Linux Audio

Check our new training course

Embedded Linux training

Mar 31-Apr 8, 2025
Register
Loading...
v4.6
   1/*
   2 * Copyright © 2008 Intel Corporation
   3 *
   4 * Permission is hereby granted, free of charge, to any person obtaining a
   5 * copy of this software and associated documentation files (the "Software"),
   6 * to deal in the Software without restriction, including without limitation
   7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
   8 * and/or sell copies of the Software, and to permit persons to whom the
   9 * Software is furnished to do so, subject to the following conditions:
  10 *
  11 * The above copyright notice and this permission notice (including the next
  12 * paragraph) shall be included in all copies or substantial portions of the
  13 * Software.
  14 *
  15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
  18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
  19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
  20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
  21 * IN THE SOFTWARE.
  22 *
  23 * Authors:
  24 *    Keith Packard <keithp@keithp.com>
  25 *
  26 */
  27
  28#include <linux/i2c.h>
  29#include <linux/slab.h>
  30#include <linux/export.h>
  31#include <linux/notifier.h>
  32#include <linux/reboot.h>
  33#include <drm/drmP.h>
  34#include <drm/drm_atomic_helper.h>
  35#include <drm/drm_crtc.h>
  36#include <drm/drm_crtc_helper.h>
  37#include <drm/drm_edid.h>
  38#include "intel_drv.h"
  39#include <drm/i915_drm.h>
  40#include "i915_drv.h"
  41
  42#define DP_LINK_CHECK_TIMEOUT	(10 * 1000)
  43
  44/* Compliance test status bits  */
  45#define INTEL_DP_RESOLUTION_SHIFT_MASK	0
  46#define INTEL_DP_RESOLUTION_PREFERRED	(1 << INTEL_DP_RESOLUTION_SHIFT_MASK)
  47#define INTEL_DP_RESOLUTION_STANDARD	(2 << INTEL_DP_RESOLUTION_SHIFT_MASK)
  48#define INTEL_DP_RESOLUTION_FAILSAFE	(3 << INTEL_DP_RESOLUTION_SHIFT_MASK)
  49
  50struct dp_link_dpll {
  51	int clock;
  52	struct dpll dpll;
  53};
  54
  55static const struct dp_link_dpll gen4_dpll[] = {
  56	{ 162000,
  57		{ .p1 = 2, .p2 = 10, .n = 2, .m1 = 23, .m2 = 8 } },
  58	{ 270000,
  59		{ .p1 = 1, .p2 = 10, .n = 1, .m1 = 14, .m2 = 2 } }
  60};
  61
  62static const struct dp_link_dpll pch_dpll[] = {
  63	{ 162000,
  64		{ .p1 = 2, .p2 = 10, .n = 1, .m1 = 12, .m2 = 9 } },
  65	{ 270000,
  66		{ .p1 = 1, .p2 = 10, .n = 2, .m1 = 14, .m2 = 8 } }
  67};
  68
  69static const struct dp_link_dpll vlv_dpll[] = {
  70	{ 162000,
  71		{ .p1 = 3, .p2 = 2, .n = 5, .m1 = 3, .m2 = 81 } },
  72	{ 270000,
  73		{ .p1 = 2, .p2 = 2, .n = 1, .m1 = 2, .m2 = 27 } }
  74};
  75
  76/*
  77 * CHV supports eDP 1.4 that have  more link rates.
  78 * Below only provides the fixed rate but exclude variable rate.
  79 */
  80static const struct dp_link_dpll chv_dpll[] = {
  81	/*
  82	 * CHV requires to program fractional division for m2.
  83	 * m2 is stored in fixed point format using formula below
  84	 * (m2_int << 22) | m2_fraction
  85	 */
  86	{ 162000,	/* m2_int = 32, m2_fraction = 1677722 */
  87		{ .p1 = 4, .p2 = 2, .n = 1, .m1 = 2, .m2 = 0x819999a } },
  88	{ 270000,	/* m2_int = 27, m2_fraction = 0 */
  89		{ .p1 = 4, .p2 = 1, .n = 1, .m1 = 2, .m2 = 0x6c00000 } },
  90	{ 540000,	/* m2_int = 27, m2_fraction = 0 */
  91		{ .p1 = 2, .p2 = 1, .n = 1, .m1 = 2, .m2 = 0x6c00000 } }
  92};
  93
  94static const int bxt_rates[] = { 162000, 216000, 243000, 270000,
  95				  324000, 432000, 540000 };
  96static const int skl_rates[] = { 162000, 216000, 270000,
  97				  324000, 432000, 540000 };
  98static const int default_rates[] = { 162000, 270000, 540000 };
  99
 100/**
 101 * is_edp - is the given port attached to an eDP panel (either CPU or PCH)
 102 * @intel_dp: DP struct
 103 *
 104 * If a CPU or PCH DP output is attached to an eDP panel, this function
 105 * will return true, and false otherwise.
 106 */
 107static bool is_edp(struct intel_dp *intel_dp)
 108{
 109	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
 110
 111	return intel_dig_port->base.type == INTEL_OUTPUT_EDP;
 112}
 113
 114static struct drm_device *intel_dp_to_dev(struct intel_dp *intel_dp)
 115{
 116	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
 117
 118	return intel_dig_port->base.base.dev;
 119}
 120
 121static struct intel_dp *intel_attached_dp(struct drm_connector *connector)
 122{
 123	return enc_to_intel_dp(&intel_attached_encoder(connector)->base);
 124}
 125
 126static void intel_dp_link_down(struct intel_dp *intel_dp);
 127static bool edp_panel_vdd_on(struct intel_dp *intel_dp);
 128static void edp_panel_vdd_off(struct intel_dp *intel_dp, bool sync);
 129static void vlv_init_panel_power_sequencer(struct intel_dp *intel_dp);
 130static void vlv_steal_power_sequencer(struct drm_device *dev,
 131				      enum pipe pipe);
 132
 133static unsigned int intel_dp_unused_lane_mask(int lane_count)
 134{
 135	return ~((1 << lane_count) - 1) & 0xf;
 136}
 137
 138static int
 139intel_dp_max_link_bw(struct intel_dp  *intel_dp)
 140{
 141	int max_link_bw = intel_dp->dpcd[DP_MAX_LINK_RATE];
 142
 143	switch (max_link_bw) {
 144	case DP_LINK_BW_1_62:
 145	case DP_LINK_BW_2_7:
 146	case DP_LINK_BW_5_4:
 147		break;
 148	default:
 149		WARN(1, "invalid max DP link bw val %x, using 1.62Gbps\n",
 150		     max_link_bw);
 151		max_link_bw = DP_LINK_BW_1_62;
 152		break;
 153	}
 154	return max_link_bw;
 155}
 156
 157static u8 intel_dp_max_lane_count(struct intel_dp *intel_dp)
 158{
 159	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
 160	u8 source_max, sink_max;
 161
 162	source_max = intel_dig_port->max_lanes;
 163	sink_max = drm_dp_max_lane_count(intel_dp->dpcd);
 164
 165	return min(source_max, sink_max);
 166}
 167
 168/*
 169 * The units on the numbers in the next two are... bizarre.  Examples will
 170 * make it clearer; this one parallels an example in the eDP spec.
 171 *
 172 * intel_dp_max_data_rate for one lane of 2.7GHz evaluates as:
 173 *
 174 *     270000 * 1 * 8 / 10 == 216000
 175 *
 176 * The actual data capacity of that configuration is 2.16Gbit/s, so the
 177 * units are decakilobits.  ->clock in a drm_display_mode is in kilohertz -
 178 * or equivalently, kilopixels per second - so for 1680x1050R it'd be
 179 * 119000.  At 18bpp that's 2142000 kilobits per second.
 180 *
 181 * Thus the strange-looking division by 10 in intel_dp_link_required, to
 182 * get the result in decakilobits instead of kilobits.
 183 */
 184
 185static int
 186intel_dp_link_required(int pixel_clock, int bpp)
 187{
 188	return (pixel_clock * bpp + 9) / 10;
 189}
 190
 191static int
 192intel_dp_max_data_rate(int max_link_clock, int max_lanes)
 193{
 194	return (max_link_clock * max_lanes * 8) / 10;
 195}
 196
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 197static enum drm_mode_status
 198intel_dp_mode_valid(struct drm_connector *connector,
 199		    struct drm_display_mode *mode)
 200{
 201	struct intel_dp *intel_dp = intel_attached_dp(connector);
 202	struct intel_connector *intel_connector = to_intel_connector(connector);
 203	struct drm_display_mode *fixed_mode = intel_connector->panel.fixed_mode;
 204	int target_clock = mode->clock;
 205	int max_rate, mode_rate, max_lanes, max_link_clock;
 206	int max_dotclk = to_i915(connector->dev)->max_dotclk_freq;
 
 
 207
 208	if (is_edp(intel_dp) && fixed_mode) {
 209		if (mode->hdisplay > fixed_mode->hdisplay)
 210			return MODE_PANEL;
 211
 212		if (mode->vdisplay > fixed_mode->vdisplay)
 213			return MODE_PANEL;
 214
 215		target_clock = fixed_mode->clock;
 216	}
 217
 218	max_link_clock = intel_dp_max_link_rate(intel_dp);
 219	max_lanes = intel_dp_max_lane_count(intel_dp);
 220
 221	max_rate = intel_dp_max_data_rate(max_link_clock, max_lanes);
 222	mode_rate = intel_dp_link_required(target_clock, 18);
 223
 224	if (mode_rate > max_rate || target_clock > max_dotclk)
 225		return MODE_CLOCK_HIGH;
 226
 227	if (mode->clock < 10000)
 228		return MODE_CLOCK_LOW;
 229
 230	if (mode->flags & DRM_MODE_FLAG_DBLCLK)
 231		return MODE_H_ILLEGAL;
 232
 233	return MODE_OK;
 234}
 235
 236uint32_t intel_dp_pack_aux(const uint8_t *src, int src_bytes)
 237{
 238	int	i;
 239	uint32_t v = 0;
 240
 241	if (src_bytes > 4)
 242		src_bytes = 4;
 243	for (i = 0; i < src_bytes; i++)
 244		v |= ((uint32_t) src[i]) << ((3-i) * 8);
 245	return v;
 246}
 247
 248static void intel_dp_unpack_aux(uint32_t src, uint8_t *dst, int dst_bytes)
 249{
 250	int i;
 251	if (dst_bytes > 4)
 252		dst_bytes = 4;
 253	for (i = 0; i < dst_bytes; i++)
 254		dst[i] = src >> ((3-i) * 8);
 255}
 256
 257static void
 258intel_dp_init_panel_power_sequencer(struct drm_device *dev,
 259				    struct intel_dp *intel_dp);
 260static void
 261intel_dp_init_panel_power_sequencer_registers(struct drm_device *dev,
 262					      struct intel_dp *intel_dp);
 
 
 
 263
 264static void pps_lock(struct intel_dp *intel_dp)
 265{
 266	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
 267	struct intel_encoder *encoder = &intel_dig_port->base;
 268	struct drm_device *dev = encoder->base.dev;
 269	struct drm_i915_private *dev_priv = dev->dev_private;
 270	enum intel_display_power_domain power_domain;
 271
 272	/*
 273	 * See vlv_power_sequencer_reset() why we need
 274	 * a power domain reference here.
 275	 */
 276	power_domain = intel_display_port_aux_power_domain(encoder);
 277	intel_display_power_get(dev_priv, power_domain);
 278
 279	mutex_lock(&dev_priv->pps_mutex);
 280}
 281
 282static void pps_unlock(struct intel_dp *intel_dp)
 283{
 284	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
 285	struct intel_encoder *encoder = &intel_dig_port->base;
 286	struct drm_device *dev = encoder->base.dev;
 287	struct drm_i915_private *dev_priv = dev->dev_private;
 288	enum intel_display_power_domain power_domain;
 289
 290	mutex_unlock(&dev_priv->pps_mutex);
 291
 292	power_domain = intel_display_port_aux_power_domain(encoder);
 293	intel_display_power_put(dev_priv, power_domain);
 294}
 295
 296static void
 297vlv_power_sequencer_kick(struct intel_dp *intel_dp)
 298{
 299	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
 300	struct drm_device *dev = intel_dig_port->base.base.dev;
 301	struct drm_i915_private *dev_priv = dev->dev_private;
 302	enum pipe pipe = intel_dp->pps_pipe;
 303	bool pll_enabled, release_cl_override = false;
 304	enum dpio_phy phy = DPIO_PHY(pipe);
 305	enum dpio_channel ch = vlv_pipe_to_channel(pipe);
 306	uint32_t DP;
 307
 308	if (WARN(I915_READ(intel_dp->output_reg) & DP_PORT_EN,
 309		 "skipping pipe %c power seqeuncer kick due to port %c being active\n",
 310		 pipe_name(pipe), port_name(intel_dig_port->port)))
 311		return;
 312
 313	DRM_DEBUG_KMS("kicking pipe %c power sequencer for port %c\n",
 314		      pipe_name(pipe), port_name(intel_dig_port->port));
 315
 316	/* Preserve the BIOS-computed detected bit. This is
 317	 * supposed to be read-only.
 318	 */
 319	DP = I915_READ(intel_dp->output_reg) & DP_DETECTED;
 320	DP |= DP_VOLTAGE_0_4 | DP_PRE_EMPHASIS_0;
 321	DP |= DP_PORT_WIDTH(1);
 322	DP |= DP_LINK_TRAIN_PAT_1;
 323
 324	if (IS_CHERRYVIEW(dev))
 325		DP |= DP_PIPE_SELECT_CHV(pipe);
 326	else if (pipe == PIPE_B)
 327		DP |= DP_PIPEB_SELECT;
 328
 329	pll_enabled = I915_READ(DPLL(pipe)) & DPLL_VCO_ENABLE;
 330
 331	/*
 332	 * The DPLL for the pipe must be enabled for this to work.
 333	 * So enable temporarily it if it's not already enabled.
 334	 */
 335	if (!pll_enabled) {
 336		release_cl_override = IS_CHERRYVIEW(dev) &&
 337			!chv_phy_powergate_ch(dev_priv, phy, ch, true);
 338
 339		if (vlv_force_pll_on(dev, pipe, IS_CHERRYVIEW(dev) ?
 340				     &chv_dpll[0].dpll : &vlv_dpll[0].dpll)) {
 341			DRM_ERROR("Failed to force on pll for pipe %c!\n",
 342				  pipe_name(pipe));
 343			return;
 344		}
 345	}
 346
 347	/*
 348	 * Similar magic as in intel_dp_enable_port().
 349	 * We _must_ do this port enable + disable trick
 350	 * to make this power seqeuencer lock onto the port.
 351	 * Otherwise even VDD force bit won't work.
 352	 */
 353	I915_WRITE(intel_dp->output_reg, DP);
 354	POSTING_READ(intel_dp->output_reg);
 355
 356	I915_WRITE(intel_dp->output_reg, DP | DP_PORT_EN);
 357	POSTING_READ(intel_dp->output_reg);
 358
 359	I915_WRITE(intel_dp->output_reg, DP & ~DP_PORT_EN);
 360	POSTING_READ(intel_dp->output_reg);
 361
 362	if (!pll_enabled) {
 363		vlv_force_pll_off(dev, pipe);
 364
 365		if (release_cl_override)
 366			chv_phy_powergate_ch(dev_priv, phy, ch, false);
 367	}
 368}
 369
 370static enum pipe
 371vlv_power_sequencer_pipe(struct intel_dp *intel_dp)
 372{
 373	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
 374	struct drm_device *dev = intel_dig_port->base.base.dev;
 375	struct drm_i915_private *dev_priv = dev->dev_private;
 376	struct intel_encoder *encoder;
 377	unsigned int pipes = (1 << PIPE_A) | (1 << PIPE_B);
 378	enum pipe pipe;
 379
 380	lockdep_assert_held(&dev_priv->pps_mutex);
 381
 382	/* We should never land here with regular DP ports */
 383	WARN_ON(!is_edp(intel_dp));
 384
 385	if (intel_dp->pps_pipe != INVALID_PIPE)
 386		return intel_dp->pps_pipe;
 387
 388	/*
 389	 * We don't have power sequencer currently.
 390	 * Pick one that's not used by other ports.
 391	 */
 392	for_each_intel_encoder(dev, encoder) {
 393		struct intel_dp *tmp;
 394
 395		if (encoder->type != INTEL_OUTPUT_EDP)
 396			continue;
 397
 398		tmp = enc_to_intel_dp(&encoder->base);
 399
 400		if (tmp->pps_pipe != INVALID_PIPE)
 401			pipes &= ~(1 << tmp->pps_pipe);
 402	}
 403
 404	/*
 405	 * Didn't find one. This should not happen since there
 406	 * are two power sequencers and up to two eDP ports.
 407	 */
 408	if (WARN_ON(pipes == 0))
 409		pipe = PIPE_A;
 410	else
 411		pipe = ffs(pipes) - 1;
 412
 413	vlv_steal_power_sequencer(dev, pipe);
 414	intel_dp->pps_pipe = pipe;
 415
 416	DRM_DEBUG_KMS("picked pipe %c power sequencer for port %c\n",
 417		      pipe_name(intel_dp->pps_pipe),
 418		      port_name(intel_dig_port->port));
 419
 420	/* init power sequencer on this pipe and port */
 421	intel_dp_init_panel_power_sequencer(dev, intel_dp);
 422	intel_dp_init_panel_power_sequencer_registers(dev, intel_dp);
 423
 424	/*
 425	 * Even vdd force doesn't work until we've made
 426	 * the power sequencer lock in on the port.
 427	 */
 428	vlv_power_sequencer_kick(intel_dp);
 429
 430	return intel_dp->pps_pipe;
 431}
 432
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 433typedef bool (*vlv_pipe_check)(struct drm_i915_private *dev_priv,
 434			       enum pipe pipe);
 435
 436static bool vlv_pipe_has_pp_on(struct drm_i915_private *dev_priv,
 437			       enum pipe pipe)
 438{
 439	return I915_READ(VLV_PIPE_PP_STATUS(pipe)) & PP_ON;
 440}
 441
 442static bool vlv_pipe_has_vdd_on(struct drm_i915_private *dev_priv,
 443				enum pipe pipe)
 444{
 445	return I915_READ(VLV_PIPE_PP_CONTROL(pipe)) & EDP_FORCE_VDD;
 446}
 447
 448static bool vlv_pipe_any(struct drm_i915_private *dev_priv,
 449			 enum pipe pipe)
 450{
 451	return true;
 452}
 453
 454static enum pipe
 455vlv_initial_pps_pipe(struct drm_i915_private *dev_priv,
 456		     enum port port,
 457		     vlv_pipe_check pipe_check)
 458{
 459	enum pipe pipe;
 460
 461	for (pipe = PIPE_A; pipe <= PIPE_B; pipe++) {
 462		u32 port_sel = I915_READ(VLV_PIPE_PP_ON_DELAYS(pipe)) &
 463			PANEL_PORT_SELECT_MASK;
 464
 465		if (port_sel != PANEL_PORT_SELECT_VLV(port))
 466			continue;
 467
 468		if (!pipe_check(dev_priv, pipe))
 469			continue;
 470
 471		return pipe;
 472	}
 473
 474	return INVALID_PIPE;
 475}
 476
 477static void
 478vlv_initial_power_sequencer_setup(struct intel_dp *intel_dp)
 479{
 480	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
 481	struct drm_device *dev = intel_dig_port->base.base.dev;
 482	struct drm_i915_private *dev_priv = dev->dev_private;
 483	enum port port = intel_dig_port->port;
 484
 485	lockdep_assert_held(&dev_priv->pps_mutex);
 486
 487	/* try to find a pipe with this port selected */
 488	/* first pick one where the panel is on */
 489	intel_dp->pps_pipe = vlv_initial_pps_pipe(dev_priv, port,
 490						  vlv_pipe_has_pp_on);
 491	/* didn't find one? pick one where vdd is on */
 492	if (intel_dp->pps_pipe == INVALID_PIPE)
 493		intel_dp->pps_pipe = vlv_initial_pps_pipe(dev_priv, port,
 494							  vlv_pipe_has_vdd_on);
 495	/* didn't find one? pick one with just the correct port */
 496	if (intel_dp->pps_pipe == INVALID_PIPE)
 497		intel_dp->pps_pipe = vlv_initial_pps_pipe(dev_priv, port,
 498							  vlv_pipe_any);
 499
 500	/* didn't find one? just let vlv_power_sequencer_pipe() pick one when needed */
 501	if (intel_dp->pps_pipe == INVALID_PIPE) {
 502		DRM_DEBUG_KMS("no initial power sequencer for port %c\n",
 503			      port_name(port));
 504		return;
 505	}
 506
 507	DRM_DEBUG_KMS("initial power sequencer for port %c: pipe %c\n",
 508		      port_name(port), pipe_name(intel_dp->pps_pipe));
 509
 510	intel_dp_init_panel_power_sequencer(dev, intel_dp);
 511	intel_dp_init_panel_power_sequencer_registers(dev, intel_dp);
 512}
 513
 514void vlv_power_sequencer_reset(struct drm_i915_private *dev_priv)
 515{
 516	struct drm_device *dev = dev_priv->dev;
 517	struct intel_encoder *encoder;
 518
 519	if (WARN_ON(!IS_VALLEYVIEW(dev) && !IS_CHERRYVIEW(dev)))
 
 520		return;
 521
 522	/*
 523	 * We can't grab pps_mutex here due to deadlock with power_domain
 524	 * mutex when power_domain functions are called while holding pps_mutex.
 525	 * That also means that in order to use pps_pipe the code needs to
 526	 * hold both a power domain reference and pps_mutex, and the power domain
 527	 * reference get/put must be done while _not_ holding pps_mutex.
 528	 * pps_{lock,unlock}() do these steps in the correct order, so one
 529	 * should use them always.
 530	 */
 531
 532	for_each_intel_encoder(dev, encoder) {
 533		struct intel_dp *intel_dp;
 534
 535		if (encoder->type != INTEL_OUTPUT_EDP)
 536			continue;
 537
 538		intel_dp = enc_to_intel_dp(&encoder->base);
 539		intel_dp->pps_pipe = INVALID_PIPE;
 
 
 
 540	}
 541}
 542
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 543static i915_reg_t
 544_pp_ctrl_reg(struct intel_dp *intel_dp)
 545{
 546	struct drm_device *dev = intel_dp_to_dev(intel_dp);
 547
 548	if (IS_BROXTON(dev))
 549		return BXT_PP_CONTROL(0);
 550	else if (HAS_PCH_SPLIT(dev))
 551		return PCH_PP_CONTROL;
 552	else
 553		return VLV_PIPE_PP_CONTROL(vlv_power_sequencer_pipe(intel_dp));
 554}
 555
 556static i915_reg_t
 557_pp_stat_reg(struct intel_dp *intel_dp)
 558{
 559	struct drm_device *dev = intel_dp_to_dev(intel_dp);
 560
 561	if (IS_BROXTON(dev))
 562		return BXT_PP_STATUS(0);
 563	else if (HAS_PCH_SPLIT(dev))
 564		return PCH_PP_STATUS;
 565	else
 566		return VLV_PIPE_PP_STATUS(vlv_power_sequencer_pipe(intel_dp));
 567}
 568
 569/* Reboot notifier handler to shutdown panel power to guarantee T12 timing
 570   This function only applicable when panel PM state is not to be tracked */
 571static int edp_notify_handler(struct notifier_block *this, unsigned long code,
 572			      void *unused)
 573{
 574	struct intel_dp *intel_dp = container_of(this, typeof(* intel_dp),
 575						 edp_notifier);
 576	struct drm_device *dev = intel_dp_to_dev(intel_dp);
 577	struct drm_i915_private *dev_priv = dev->dev_private;
 578
 579	if (!is_edp(intel_dp) || code != SYS_RESTART)
 580		return 0;
 581
 582	pps_lock(intel_dp);
 583
 584	if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) {
 585		enum pipe pipe = vlv_power_sequencer_pipe(intel_dp);
 586		i915_reg_t pp_ctrl_reg, pp_div_reg;
 587		u32 pp_div;
 588
 589		pp_ctrl_reg = VLV_PIPE_PP_CONTROL(pipe);
 590		pp_div_reg  = VLV_PIPE_PP_DIVISOR(pipe);
 591		pp_div = I915_READ(pp_div_reg);
 592		pp_div &= PP_REFERENCE_DIVIDER_MASK;
 593
 594		/* 0x1F write to PP_DIV_REG sets max cycle delay */
 595		I915_WRITE(pp_div_reg, pp_div | 0x1F);
 596		I915_WRITE(pp_ctrl_reg, PANEL_UNLOCK_REGS | PANEL_POWER_OFF);
 597		msleep(intel_dp->panel_power_cycle_delay);
 598	}
 599
 600	pps_unlock(intel_dp);
 601
 602	return 0;
 603}
 604
 605static bool edp_have_panel_power(struct intel_dp *intel_dp)
 606{
 607	struct drm_device *dev = intel_dp_to_dev(intel_dp);
 608	struct drm_i915_private *dev_priv = dev->dev_private;
 609
 610	lockdep_assert_held(&dev_priv->pps_mutex);
 611
 612	if ((IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) &&
 613	    intel_dp->pps_pipe == INVALID_PIPE)
 614		return false;
 615
 616	return (I915_READ(_pp_stat_reg(intel_dp)) & PP_ON) != 0;
 617}
 618
 619static bool edp_have_panel_vdd(struct intel_dp *intel_dp)
 620{
 621	struct drm_device *dev = intel_dp_to_dev(intel_dp);
 622	struct drm_i915_private *dev_priv = dev->dev_private;
 623
 624	lockdep_assert_held(&dev_priv->pps_mutex);
 625
 626	if ((IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) &&
 627	    intel_dp->pps_pipe == INVALID_PIPE)
 628		return false;
 629
 630	return I915_READ(_pp_ctrl_reg(intel_dp)) & EDP_FORCE_VDD;
 631}
 632
 633static void
 634intel_dp_check_edp(struct intel_dp *intel_dp)
 635{
 636	struct drm_device *dev = intel_dp_to_dev(intel_dp);
 637	struct drm_i915_private *dev_priv = dev->dev_private;
 638
 639	if (!is_edp(intel_dp))
 640		return;
 641
 642	if (!edp_have_panel_power(intel_dp) && !edp_have_panel_vdd(intel_dp)) {
 643		WARN(1, "eDP powered off while attempting aux channel communication.\n");
 644		DRM_DEBUG_KMS("Status 0x%08x Control 0x%08x\n",
 645			      I915_READ(_pp_stat_reg(intel_dp)),
 646			      I915_READ(_pp_ctrl_reg(intel_dp)));
 647	}
 648}
 649
 650static uint32_t
 651intel_dp_aux_wait_done(struct intel_dp *intel_dp, bool has_aux_irq)
 652{
 653	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
 654	struct drm_device *dev = intel_dig_port->base.base.dev;
 655	struct drm_i915_private *dev_priv = dev->dev_private;
 656	i915_reg_t ch_ctl = intel_dp->aux_ch_ctl_reg;
 657	uint32_t status;
 658	bool done;
 659
 660#define C (((status = I915_READ_NOTRACE(ch_ctl)) & DP_AUX_CH_CTL_SEND_BUSY) == 0)
 661	if (has_aux_irq)
 662		done = wait_event_timeout(dev_priv->gmbus_wait_queue, C,
 663					  msecs_to_jiffies_timeout(10));
 664	else
 665		done = wait_for_atomic(C, 10) == 0;
 666	if (!done)
 667		DRM_ERROR("dp aux hw did not signal timeout (has irq: %i)!\n",
 668			  has_aux_irq);
 669#undef C
 670
 671	return status;
 672}
 673
 674static uint32_t i9xx_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
 675{
 676	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
 677	struct drm_device *dev = intel_dig_port->base.base.dev;
 
 
 
 678
 679	/*
 680	 * The clock divider is based off the hrawclk, and would like to run at
 681	 * 2MHz.  So, take the hrawclk value and divide by 2 and use that
 682	 */
 683	return index ? 0 : DIV_ROUND_CLOSEST(intel_hrawclk(dev), 2);
 684}
 685
 686static uint32_t ilk_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
 687{
 688	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
 689	struct drm_device *dev = intel_dig_port->base.base.dev;
 690	struct drm_i915_private *dev_priv = dev->dev_private;
 691
 692	if (index)
 693		return 0;
 694
 695	if (intel_dig_port->port == PORT_A) {
 
 
 
 
 
 696		return DIV_ROUND_CLOSEST(dev_priv->cdclk_freq, 2000);
 697
 698	} else {
 699		return DIV_ROUND_CLOSEST(intel_pch_rawclk(dev), 2);
 700	}
 701}
 702
 703static uint32_t hsw_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
 704{
 705	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
 706	struct drm_device *dev = intel_dig_port->base.base.dev;
 707	struct drm_i915_private *dev_priv = dev->dev_private;
 708
 709	if (intel_dig_port->port == PORT_A) {
 710		if (index)
 711			return 0;
 712		return DIV_ROUND_CLOSEST(dev_priv->cdclk_freq, 2000);
 713	} else if (HAS_PCH_LPT_H(dev_priv)) {
 714		/* Workaround for non-ULT HSW */
 715		switch (index) {
 716		case 0: return 63;
 717		case 1: return 72;
 718		default: return 0;
 719		}
 720	} else  {
 721		return index ? 0 : DIV_ROUND_CLOSEST(intel_pch_rawclk(dev), 2);
 722	}
 723}
 724
 725static uint32_t vlv_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
 726{
 727	return index ? 0 : 100;
 728}
 729
 730static uint32_t skl_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
 731{
 732	/*
 733	 * SKL doesn't need us to program the AUX clock divider (Hardware will
 734	 * derive the clock from CDCLK automatically). We still implement the
 735	 * get_aux_clock_divider vfunc to plug-in into the existing code.
 736	 */
 737	return index ? 0 : 1;
 738}
 739
 740static uint32_t i9xx_get_aux_send_ctl(struct intel_dp *intel_dp,
 741				      bool has_aux_irq,
 742				      int send_bytes,
 743				      uint32_t aux_clock_divider)
 744{
 745	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
 746	struct drm_device *dev = intel_dig_port->base.base.dev;
 
 747	uint32_t precharge, timeout;
 748
 749	if (IS_GEN6(dev))
 750		precharge = 3;
 751	else
 752		precharge = 5;
 753
 754	if (IS_BROADWELL(dev) && intel_dig_port->port == PORT_A)
 755		timeout = DP_AUX_CH_CTL_TIME_OUT_600us;
 756	else
 757		timeout = DP_AUX_CH_CTL_TIME_OUT_400us;
 758
 759	return DP_AUX_CH_CTL_SEND_BUSY |
 760	       DP_AUX_CH_CTL_DONE |
 761	       (has_aux_irq ? DP_AUX_CH_CTL_INTERRUPT : 0) |
 762	       DP_AUX_CH_CTL_TIME_OUT_ERROR |
 763	       timeout |
 764	       DP_AUX_CH_CTL_RECEIVE_ERROR |
 765	       (send_bytes << DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT) |
 766	       (precharge << DP_AUX_CH_CTL_PRECHARGE_2US_SHIFT) |
 767	       (aux_clock_divider << DP_AUX_CH_CTL_BIT_CLOCK_2X_SHIFT);
 768}
 769
 770static uint32_t skl_get_aux_send_ctl(struct intel_dp *intel_dp,
 771				      bool has_aux_irq,
 772				      int send_bytes,
 773				      uint32_t unused)
 774{
 775	return DP_AUX_CH_CTL_SEND_BUSY |
 776	       DP_AUX_CH_CTL_DONE |
 777	       (has_aux_irq ? DP_AUX_CH_CTL_INTERRUPT : 0) |
 778	       DP_AUX_CH_CTL_TIME_OUT_ERROR |
 779	       DP_AUX_CH_CTL_TIME_OUT_1600us |
 780	       DP_AUX_CH_CTL_RECEIVE_ERROR |
 781	       (send_bytes << DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT) |
 
 782	       DP_AUX_CH_CTL_SYNC_PULSE_SKL(32);
 783}
 784
 785static int
 786intel_dp_aux_ch(struct intel_dp *intel_dp,
 787		const uint8_t *send, int send_bytes,
 788		uint8_t *recv, int recv_size)
 789{
 790	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
 791	struct drm_device *dev = intel_dig_port->base.base.dev;
 792	struct drm_i915_private *dev_priv = dev->dev_private;
 793	i915_reg_t ch_ctl = intel_dp->aux_ch_ctl_reg;
 794	uint32_t aux_clock_divider;
 795	int i, ret, recv_bytes;
 796	uint32_t status;
 797	int try, clock = 0;
 798	bool has_aux_irq = HAS_AUX_IRQ(dev);
 799	bool vdd;
 800
 801	pps_lock(intel_dp);
 802
 803	/*
 804	 * We will be called with VDD already enabled for dpcd/edid/oui reads.
 805	 * In such cases we want to leave VDD enabled and it's up to upper layers
 806	 * to turn it off. But for eg. i2c-dev access we need to turn it on/off
 807	 * ourselves.
 808	 */
 809	vdd = edp_panel_vdd_on(intel_dp);
 810
 811	/* dp aux is extremely sensitive to irq latency, hence request the
 812	 * lowest possible wakeup latency and so prevent the cpu from going into
 813	 * deep sleep states.
 814	 */
 815	pm_qos_update_request(&dev_priv->pm_qos, 0);
 816
 817	intel_dp_check_edp(intel_dp);
 818
 819	/* Try to wait for any previous AUX channel activity */
 820	for (try = 0; try < 3; try++) {
 821		status = I915_READ_NOTRACE(ch_ctl);
 822		if ((status & DP_AUX_CH_CTL_SEND_BUSY) == 0)
 823			break;
 824		msleep(1);
 825	}
 826
 827	if (try == 3) {
 828		static u32 last_status = -1;
 829		const u32 status = I915_READ(ch_ctl);
 830
 831		if (status != last_status) {
 832			WARN(1, "dp_aux_ch not started status 0x%08x\n",
 833			     status);
 834			last_status = status;
 835		}
 836
 837		ret = -EBUSY;
 838		goto out;
 839	}
 840
 841	/* Only 5 data registers! */
 842	if (WARN_ON(send_bytes > 20 || recv_size > 20)) {
 843		ret = -E2BIG;
 844		goto out;
 845	}
 846
 847	while ((aux_clock_divider = intel_dp->get_aux_clock_divider(intel_dp, clock++))) {
 848		u32 send_ctl = intel_dp->get_aux_send_ctl(intel_dp,
 849							  has_aux_irq,
 850							  send_bytes,
 851							  aux_clock_divider);
 852
 853		/* Must try at least 3 times according to DP spec */
 854		for (try = 0; try < 5; try++) {
 855			/* Load the send data into the aux channel data registers */
 856			for (i = 0; i < send_bytes; i += 4)
 857				I915_WRITE(intel_dp->aux_ch_data_reg[i >> 2],
 858					   intel_dp_pack_aux(send + i,
 859							     send_bytes - i));
 860
 861			/* Send the command and wait for it to complete */
 862			I915_WRITE(ch_ctl, send_ctl);
 863
 864			status = intel_dp_aux_wait_done(intel_dp, has_aux_irq);
 865
 866			/* Clear done status and any errors */
 867			I915_WRITE(ch_ctl,
 868				   status |
 869				   DP_AUX_CH_CTL_DONE |
 870				   DP_AUX_CH_CTL_TIME_OUT_ERROR |
 871				   DP_AUX_CH_CTL_RECEIVE_ERROR);
 872
 873			if (status & DP_AUX_CH_CTL_TIME_OUT_ERROR)
 874				continue;
 875
 876			/* DP CTS 1.2 Core Rev 1.1, 4.2.1.1 & 4.2.1.2
 877			 *   400us delay required for errors and timeouts
 878			 *   Timeout errors from the HW already meet this
 879			 *   requirement so skip to next iteration
 880			 */
 881			if (status & DP_AUX_CH_CTL_RECEIVE_ERROR) {
 882				usleep_range(400, 500);
 883				continue;
 884			}
 885			if (status & DP_AUX_CH_CTL_DONE)
 886				goto done;
 887		}
 888	}
 889
 890	if ((status & DP_AUX_CH_CTL_DONE) == 0) {
 891		DRM_ERROR("dp_aux_ch not done status 0x%08x\n", status);
 892		ret = -EBUSY;
 893		goto out;
 894	}
 895
 896done:
 897	/* Check for timeout or receive error.
 898	 * Timeouts occur when the sink is not connected
 899	 */
 900	if (status & DP_AUX_CH_CTL_RECEIVE_ERROR) {
 901		DRM_ERROR("dp_aux_ch receive error status 0x%08x\n", status);
 902		ret = -EIO;
 903		goto out;
 904	}
 905
 906	/* Timeouts occur when the device isn't connected, so they're
 907	 * "normal" -- don't fill the kernel log with these */
 908	if (status & DP_AUX_CH_CTL_TIME_OUT_ERROR) {
 909		DRM_DEBUG_KMS("dp_aux_ch timeout status 0x%08x\n", status);
 910		ret = -ETIMEDOUT;
 911		goto out;
 912	}
 913
 914	/* Unload any bytes sent back from the other side */
 915	recv_bytes = ((status & DP_AUX_CH_CTL_MESSAGE_SIZE_MASK) >>
 916		      DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT);
 917
 918	/*
 919	 * By BSpec: "Message sizes of 0 or >20 are not allowed."
 920	 * We have no idea of what happened so we return -EBUSY so
 921	 * drm layer takes care for the necessary retries.
 922	 */
 923	if (recv_bytes == 0 || recv_bytes > 20) {
 924		DRM_DEBUG_KMS("Forbidden recv_bytes = %d on aux transaction\n",
 925			      recv_bytes);
 926		/*
 927		 * FIXME: This patch was created on top of a series that
 928		 * organize the retries at drm level. There EBUSY should
 929		 * also take care for 1ms wait before retrying.
 930		 * That aux retries re-org is still needed and after that is
 931		 * merged we remove this sleep from here.
 932		 */
 933		usleep_range(1000, 1500);
 934		ret = -EBUSY;
 935		goto out;
 936	}
 937
 938	if (recv_bytes > recv_size)
 939		recv_bytes = recv_size;
 940
 941	for (i = 0; i < recv_bytes; i += 4)
 942		intel_dp_unpack_aux(I915_READ(intel_dp->aux_ch_data_reg[i >> 2]),
 943				    recv + i, recv_bytes - i);
 944
 945	ret = recv_bytes;
 946out:
 947	pm_qos_update_request(&dev_priv->pm_qos, PM_QOS_DEFAULT_VALUE);
 948
 949	if (vdd)
 950		edp_panel_vdd_off(intel_dp, false);
 951
 952	pps_unlock(intel_dp);
 953
 954	return ret;
 955}
 956
 957#define BARE_ADDRESS_SIZE	3
 958#define HEADER_SIZE		(BARE_ADDRESS_SIZE + 1)
 959static ssize_t
 960intel_dp_aux_transfer(struct drm_dp_aux *aux, struct drm_dp_aux_msg *msg)
 961{
 962	struct intel_dp *intel_dp = container_of(aux, struct intel_dp, aux);
 963	uint8_t txbuf[20], rxbuf[20];
 964	size_t txsize, rxsize;
 965	int ret;
 966
 967	txbuf[0] = (msg->request << 4) |
 968		((msg->address >> 16) & 0xf);
 969	txbuf[1] = (msg->address >> 8) & 0xff;
 970	txbuf[2] = msg->address & 0xff;
 971	txbuf[3] = msg->size - 1;
 972
 973	switch (msg->request & ~DP_AUX_I2C_MOT) {
 974	case DP_AUX_NATIVE_WRITE:
 975	case DP_AUX_I2C_WRITE:
 976	case DP_AUX_I2C_WRITE_STATUS_UPDATE:
 977		txsize = msg->size ? HEADER_SIZE + msg->size : BARE_ADDRESS_SIZE;
 978		rxsize = 2; /* 0 or 1 data bytes */
 979
 980		if (WARN_ON(txsize > 20))
 981			return -E2BIG;
 982
 
 
 983		if (msg->buffer)
 984			memcpy(txbuf + HEADER_SIZE, msg->buffer, msg->size);
 985		else
 986			WARN_ON(msg->size);
 987
 988		ret = intel_dp_aux_ch(intel_dp, txbuf, txsize, rxbuf, rxsize);
 989		if (ret > 0) {
 990			msg->reply = rxbuf[0] >> 4;
 991
 992			if (ret > 1) {
 993				/* Number of bytes written in a short write. */
 994				ret = clamp_t(int, rxbuf[1], 0, msg->size);
 995			} else {
 996				/* Return payload size. */
 997				ret = msg->size;
 998			}
 999		}
1000		break;
1001
1002	case DP_AUX_NATIVE_READ:
1003	case DP_AUX_I2C_READ:
1004		txsize = msg->size ? HEADER_SIZE : BARE_ADDRESS_SIZE;
1005		rxsize = msg->size + 1;
1006
1007		if (WARN_ON(rxsize > 20))
1008			return -E2BIG;
1009
1010		ret = intel_dp_aux_ch(intel_dp, txbuf, txsize, rxbuf, rxsize);
1011		if (ret > 0) {
1012			msg->reply = rxbuf[0] >> 4;
1013			/*
1014			 * Assume happy day, and copy the data. The caller is
1015			 * expected to check msg->reply before touching it.
1016			 *
1017			 * Return payload size.
1018			 */
1019			ret--;
1020			memcpy(msg->buffer, rxbuf + 1, ret);
1021		}
1022		break;
1023
1024	default:
1025		ret = -EINVAL;
1026		break;
1027	}
1028
1029	return ret;
1030}
1031
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1032static i915_reg_t g4x_aux_ctl_reg(struct drm_i915_private *dev_priv,
1033				       enum port port)
1034{
1035	switch (port) {
1036	case PORT_B:
1037	case PORT_C:
1038	case PORT_D:
1039		return DP_AUX_CH_CTL(port);
1040	default:
1041		MISSING_CASE(port);
1042		return DP_AUX_CH_CTL(PORT_B);
1043	}
1044}
1045
1046static i915_reg_t g4x_aux_data_reg(struct drm_i915_private *dev_priv,
1047					enum port port, int index)
1048{
1049	switch (port) {
1050	case PORT_B:
1051	case PORT_C:
1052	case PORT_D:
1053		return DP_AUX_CH_DATA(port, index);
1054	default:
1055		MISSING_CASE(port);
1056		return DP_AUX_CH_DATA(PORT_B, index);
1057	}
1058}
1059
1060static i915_reg_t ilk_aux_ctl_reg(struct drm_i915_private *dev_priv,
1061				       enum port port)
1062{
1063	switch (port) {
1064	case PORT_A:
1065		return DP_AUX_CH_CTL(port);
1066	case PORT_B:
1067	case PORT_C:
1068	case PORT_D:
1069		return PCH_DP_AUX_CH_CTL(port);
1070	default:
1071		MISSING_CASE(port);
1072		return DP_AUX_CH_CTL(PORT_A);
1073	}
1074}
1075
1076static i915_reg_t ilk_aux_data_reg(struct drm_i915_private *dev_priv,
1077					enum port port, int index)
1078{
1079	switch (port) {
1080	case PORT_A:
1081		return DP_AUX_CH_DATA(port, index);
1082	case PORT_B:
1083	case PORT_C:
1084	case PORT_D:
1085		return PCH_DP_AUX_CH_DATA(port, index);
1086	default:
1087		MISSING_CASE(port);
1088		return DP_AUX_CH_DATA(PORT_A, index);
1089	}
1090}
1091
1092/*
1093 * On SKL we don't have Aux for port E so we rely
1094 * on VBT to set a proper alternate aux channel.
1095 */
1096static enum port skl_porte_aux_port(struct drm_i915_private *dev_priv)
1097{
1098	const struct ddi_vbt_port_info *info =
1099		&dev_priv->vbt.ddi_port_info[PORT_E];
1100
1101	switch (info->alternate_aux_channel) {
1102	case DP_AUX_A:
1103		return PORT_A;
1104	case DP_AUX_B:
1105		return PORT_B;
1106	case DP_AUX_C:
1107		return PORT_C;
1108	case DP_AUX_D:
1109		return PORT_D;
1110	default:
1111		MISSING_CASE(info->alternate_aux_channel);
1112		return PORT_A;
1113	}
1114}
1115
1116static i915_reg_t skl_aux_ctl_reg(struct drm_i915_private *dev_priv,
1117				       enum port port)
1118{
1119	if (port == PORT_E)
1120		port = skl_porte_aux_port(dev_priv);
1121
1122	switch (port) {
1123	case PORT_A:
1124	case PORT_B:
1125	case PORT_C:
1126	case PORT_D:
1127		return DP_AUX_CH_CTL(port);
1128	default:
1129		MISSING_CASE(port);
1130		return DP_AUX_CH_CTL(PORT_A);
1131	}
1132}
1133
1134static i915_reg_t skl_aux_data_reg(struct drm_i915_private *dev_priv,
1135					enum port port, int index)
1136{
1137	if (port == PORT_E)
1138		port = skl_porte_aux_port(dev_priv);
1139
1140	switch (port) {
1141	case PORT_A:
1142	case PORT_B:
1143	case PORT_C:
1144	case PORT_D:
1145		return DP_AUX_CH_DATA(port, index);
1146	default:
1147		MISSING_CASE(port);
1148		return DP_AUX_CH_DATA(PORT_A, index);
1149	}
1150}
1151
1152static i915_reg_t intel_aux_ctl_reg(struct drm_i915_private *dev_priv,
1153					 enum port port)
1154{
1155	if (INTEL_INFO(dev_priv)->gen >= 9)
1156		return skl_aux_ctl_reg(dev_priv, port);
1157	else if (HAS_PCH_SPLIT(dev_priv))
1158		return ilk_aux_ctl_reg(dev_priv, port);
1159	else
1160		return g4x_aux_ctl_reg(dev_priv, port);
1161}
1162
1163static i915_reg_t intel_aux_data_reg(struct drm_i915_private *dev_priv,
1164					  enum port port, int index)
1165{
1166	if (INTEL_INFO(dev_priv)->gen >= 9)
1167		return skl_aux_data_reg(dev_priv, port, index);
1168	else if (HAS_PCH_SPLIT(dev_priv))
1169		return ilk_aux_data_reg(dev_priv, port, index);
1170	else
1171		return g4x_aux_data_reg(dev_priv, port, index);
1172}
1173
1174static void intel_aux_reg_init(struct intel_dp *intel_dp)
1175{
1176	struct drm_i915_private *dev_priv = to_i915(intel_dp_to_dev(intel_dp));
1177	enum port port = dp_to_dig_port(intel_dp)->port;
 
1178	int i;
1179
1180	intel_dp->aux_ch_ctl_reg = intel_aux_ctl_reg(dev_priv, port);
1181	for (i = 0; i < ARRAY_SIZE(intel_dp->aux_ch_data_reg); i++)
1182		intel_dp->aux_ch_data_reg[i] = intel_aux_data_reg(dev_priv, port, i);
1183}
1184
1185static void
1186intel_dp_aux_fini(struct intel_dp *intel_dp)
1187{
1188	drm_dp_aux_unregister(&intel_dp->aux);
1189	kfree(intel_dp->aux.name);
1190}
1191
1192static int
1193intel_dp_aux_init(struct intel_dp *intel_dp, struct intel_connector *connector)
1194{
1195	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
1196	enum port port = intel_dig_port->port;
1197	int ret;
1198
1199	intel_aux_reg_init(intel_dp);
 
1200
 
1201	intel_dp->aux.name = kasprintf(GFP_KERNEL, "DPDDC-%c", port_name(port));
1202	if (!intel_dp->aux.name)
1203		return -ENOMEM;
1204
1205	intel_dp->aux.dev = connector->base.kdev;
1206	intel_dp->aux.transfer = intel_dp_aux_transfer;
1207
1208	DRM_DEBUG_KMS("registering %s bus for %s\n",
1209		      intel_dp->aux.name,
1210		      connector->base.kdev->kobj.name);
1211
1212	ret = drm_dp_aux_register(&intel_dp->aux);
1213	if (ret < 0) {
1214		DRM_ERROR("drm_dp_aux_register() for %s failed (%d)\n",
1215			  intel_dp->aux.name, ret);
1216		kfree(intel_dp->aux.name);
1217		return ret;
1218	}
1219
1220	return 0;
1221}
1222
1223static void
1224intel_dp_connector_unregister(struct intel_connector *intel_connector)
1225{
1226	struct intel_dp *intel_dp = intel_attached_dp(&intel_connector->base);
1227
1228	intel_dp_aux_fini(intel_dp);
1229	intel_connector_unregister(intel_connector);
1230}
1231
1232static void
1233skl_edp_set_pll_config(struct intel_crtc_state *pipe_config)
1234{
1235	u32 ctrl1;
1236
1237	memset(&pipe_config->dpll_hw_state, 0,
1238	       sizeof(pipe_config->dpll_hw_state));
1239
1240	pipe_config->ddi_pll_sel = SKL_DPLL0;
1241	pipe_config->dpll_hw_state.cfgcr1 = 0;
1242	pipe_config->dpll_hw_state.cfgcr2 = 0;
1243
1244	ctrl1 = DPLL_CTRL1_OVERRIDE(SKL_DPLL0);
1245	switch (pipe_config->port_clock / 2) {
1246	case 81000:
1247		ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_810,
1248					      SKL_DPLL0);
1249		break;
1250	case 135000:
1251		ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_1350,
1252					      SKL_DPLL0);
1253		break;
1254	case 270000:
1255		ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_2700,
1256					      SKL_DPLL0);
1257		break;
1258	case 162000:
1259		ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_1620,
1260					      SKL_DPLL0);
1261		break;
1262	/* TBD: For DP link rates 2.16 GHz and 4.32 GHz, VCO is 8640 which
1263	results in CDCLK change. Need to handle the change of CDCLK by
1264	disabling pipes and re-enabling them */
1265	case 108000:
1266		ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_1080,
1267					      SKL_DPLL0);
1268		break;
1269	case 216000:
1270		ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_2160,
1271					      SKL_DPLL0);
1272		break;
1273
1274	}
1275	pipe_config->dpll_hw_state.ctrl1 = ctrl1;
1276}
1277
1278void
1279hsw_dp_set_ddi_pll_sel(struct intel_crtc_state *pipe_config)
1280{
1281	memset(&pipe_config->dpll_hw_state, 0,
1282	       sizeof(pipe_config->dpll_hw_state));
1283
1284	switch (pipe_config->port_clock / 2) {
1285	case 81000:
1286		pipe_config->ddi_pll_sel = PORT_CLK_SEL_LCPLL_810;
1287		break;
1288	case 135000:
1289		pipe_config->ddi_pll_sel = PORT_CLK_SEL_LCPLL_1350;
1290		break;
1291	case 270000:
1292		pipe_config->ddi_pll_sel = PORT_CLK_SEL_LCPLL_2700;
1293		break;
1294	}
1295}
1296
1297static int
1298intel_dp_sink_rates(struct intel_dp *intel_dp, const int **sink_rates)
1299{
1300	if (intel_dp->num_sink_rates) {
1301		*sink_rates = intel_dp->sink_rates;
1302		return intel_dp->num_sink_rates;
1303	}
1304
1305	*sink_rates = default_rates;
1306
1307	return (intel_dp_max_link_bw(intel_dp) >> 3) + 1;
1308}
1309
1310bool intel_dp_source_supports_hbr2(struct intel_dp *intel_dp)
1311{
1312	struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
1313	struct drm_device *dev = dig_port->base.base.dev;
1314
1315	/* WaDisableHBR2:skl */
1316	if (IS_SKL_REVID(dev, 0, SKL_REVID_B0))
1317		return false;
1318
1319	if ((IS_HASWELL(dev) && !IS_HSW_ULX(dev)) || IS_BROADWELL(dev) ||
1320	    (INTEL_INFO(dev)->gen >= 9))
1321		return true;
1322	else
1323		return false;
1324}
1325
1326static int
1327intel_dp_source_rates(struct intel_dp *intel_dp, const int **source_rates)
1328{
1329	struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
1330	struct drm_device *dev = dig_port->base.base.dev;
1331	int size;
1332
1333	if (IS_BROXTON(dev)) {
1334		*source_rates = bxt_rates;
1335		size = ARRAY_SIZE(bxt_rates);
1336	} else if (IS_SKYLAKE(dev) || IS_KABYLAKE(dev)) {
1337		*source_rates = skl_rates;
1338		size = ARRAY_SIZE(skl_rates);
1339	} else {
1340		*source_rates = default_rates;
1341		size = ARRAY_SIZE(default_rates);
1342	}
1343
1344	/* This depends on the fact that 5.4 is last value in the array */
1345	if (!intel_dp_source_supports_hbr2(intel_dp))
1346		size--;
1347
1348	return size;
1349}
1350
1351static void
1352intel_dp_set_clock(struct intel_encoder *encoder,
1353		   struct intel_crtc_state *pipe_config)
1354{
1355	struct drm_device *dev = encoder->base.dev;
 
1356	const struct dp_link_dpll *divisor = NULL;
1357	int i, count = 0;
1358
1359	if (IS_G4X(dev)) {
1360		divisor = gen4_dpll;
1361		count = ARRAY_SIZE(gen4_dpll);
1362	} else if (HAS_PCH_SPLIT(dev)) {
1363		divisor = pch_dpll;
1364		count = ARRAY_SIZE(pch_dpll);
1365	} else if (IS_CHERRYVIEW(dev)) {
1366		divisor = chv_dpll;
1367		count = ARRAY_SIZE(chv_dpll);
1368	} else if (IS_VALLEYVIEW(dev)) {
1369		divisor = vlv_dpll;
1370		count = ARRAY_SIZE(vlv_dpll);
1371	}
1372
1373	if (divisor && count) {
1374		for (i = 0; i < count; i++) {
1375			if (pipe_config->port_clock == divisor[i].clock) {
1376				pipe_config->dpll = divisor[i].dpll;
1377				pipe_config->clock_set = true;
1378				break;
1379			}
1380		}
1381	}
1382}
1383
1384static int intersect_rates(const int *source_rates, int source_len,
1385			   const int *sink_rates, int sink_len,
1386			   int *common_rates)
1387{
1388	int i = 0, j = 0, k = 0;
1389
1390	while (i < source_len && j < sink_len) {
1391		if (source_rates[i] == sink_rates[j]) {
1392			if (WARN_ON(k >= DP_MAX_SUPPORTED_RATES))
1393				return k;
1394			common_rates[k] = source_rates[i];
1395			++k;
1396			++i;
1397			++j;
1398		} else if (source_rates[i] < sink_rates[j]) {
1399			++i;
1400		} else {
1401			++j;
1402		}
1403	}
1404	return k;
1405}
1406
1407static int intel_dp_common_rates(struct intel_dp *intel_dp,
1408				 int *common_rates)
1409{
1410	const int *source_rates, *sink_rates;
1411	int source_len, sink_len;
1412
1413	sink_len = intel_dp_sink_rates(intel_dp, &sink_rates);
1414	source_len = intel_dp_source_rates(intel_dp, &source_rates);
1415
1416	return intersect_rates(source_rates, source_len,
1417			       sink_rates, sink_len,
1418			       common_rates);
1419}
1420
1421static void snprintf_int_array(char *str, size_t len,
1422			       const int *array, int nelem)
1423{
1424	int i;
1425
1426	str[0] = '\0';
1427
1428	for (i = 0; i < nelem; i++) {
1429		int r = snprintf(str, len, "%s%d", i ? ", " : "", array[i]);
1430		if (r >= len)
1431			return;
1432		str += r;
1433		len -= r;
1434	}
1435}
1436
1437static void intel_dp_print_rates(struct intel_dp *intel_dp)
1438{
1439	const int *source_rates, *sink_rates;
1440	int source_len, sink_len, common_len;
1441	int common_rates[DP_MAX_SUPPORTED_RATES];
1442	char str[128]; /* FIXME: too big for stack? */
1443
1444	if ((drm_debug & DRM_UT_KMS) == 0)
1445		return;
1446
1447	source_len = intel_dp_source_rates(intel_dp, &source_rates);
1448	snprintf_int_array(str, sizeof(str), source_rates, source_len);
1449	DRM_DEBUG_KMS("source rates: %s\n", str);
1450
1451	sink_len = intel_dp_sink_rates(intel_dp, &sink_rates);
1452	snprintf_int_array(str, sizeof(str), sink_rates, sink_len);
1453	DRM_DEBUG_KMS("sink rates: %s\n", str);
1454
1455	common_len = intel_dp_common_rates(intel_dp, common_rates);
1456	snprintf_int_array(str, sizeof(str), common_rates, common_len);
1457	DRM_DEBUG_KMS("common rates: %s\n", str);
1458}
1459
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1460static int rate_to_index(int find, const int *rates)
1461{
1462	int i = 0;
1463
1464	for (i = 0; i < DP_MAX_SUPPORTED_RATES; ++i)
1465		if (find == rates[i])
1466			break;
1467
1468	return i;
1469}
1470
1471int
1472intel_dp_max_link_rate(struct intel_dp *intel_dp)
1473{
1474	int rates[DP_MAX_SUPPORTED_RATES] = {};
1475	int len;
1476
1477	len = intel_dp_common_rates(intel_dp, rates);
1478	if (WARN_ON(len <= 0))
1479		return 162000;
1480
1481	return rates[rate_to_index(0, rates) - 1];
1482}
1483
1484int intel_dp_rate_select(struct intel_dp *intel_dp, int rate)
1485{
1486	return rate_to_index(rate, intel_dp->sink_rates);
1487}
1488
1489void intel_dp_compute_rate(struct intel_dp *intel_dp, int port_clock,
1490			   uint8_t *link_bw, uint8_t *rate_select)
1491{
1492	if (intel_dp->num_sink_rates) {
1493		*link_bw = 0;
1494		*rate_select =
1495			intel_dp_rate_select(intel_dp, port_clock);
1496	} else {
1497		*link_bw = drm_dp_link_rate_to_bw_code(port_clock);
1498		*rate_select = 0;
1499	}
1500}
1501
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1502bool
1503intel_dp_compute_config(struct intel_encoder *encoder,
1504			struct intel_crtc_state *pipe_config)
 
1505{
1506	struct drm_device *dev = encoder->base.dev;
1507	struct drm_i915_private *dev_priv = dev->dev_private;
1508	struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode;
1509	struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
1510	enum port port = dp_to_dig_port(intel_dp)->port;
1511	struct intel_crtc *intel_crtc = to_intel_crtc(pipe_config->base.crtc);
1512	struct intel_connector *intel_connector = intel_dp->attached_connector;
1513	int lane_count, clock;
1514	int min_lane_count = 1;
1515	int max_lane_count = intel_dp_max_lane_count(intel_dp);
1516	/* Conveniently, the link BW constants become indices with a shift...*/
1517	int min_clock = 0;
1518	int max_clock;
1519	int bpp, mode_rate;
1520	int link_avail, link_clock;
1521	int common_rates[DP_MAX_SUPPORTED_RATES] = {};
1522	int common_len;
1523	uint8_t link_bw, rate_select;
1524
1525	common_len = intel_dp_common_rates(intel_dp, common_rates);
1526
1527	/* No common link rates between source and sink */
1528	WARN_ON(common_len <= 0);
1529
1530	max_clock = common_len - 1;
1531
1532	if (HAS_PCH_SPLIT(dev) && !HAS_DDI(dev) && port != PORT_A)
1533		pipe_config->has_pch_encoder = true;
1534
1535	pipe_config->has_dp_encoder = true;
1536	pipe_config->has_drrs = false;
1537	pipe_config->has_audio = intel_dp->has_audio && port != PORT_A;
1538
1539	if (is_edp(intel_dp) && intel_connector->panel.fixed_mode) {
1540		intel_fixed_panel_mode(intel_connector->panel.fixed_mode,
1541				       adjusted_mode);
1542
1543		if (INTEL_INFO(dev)->gen >= 9) {
1544			int ret;
1545			ret = skl_update_scaler_crtc(pipe_config);
1546			if (ret)
1547				return ret;
1548		}
1549
1550		if (HAS_GMCH_DISPLAY(dev))
1551			intel_gmch_panel_fitting(intel_crtc, pipe_config,
1552						 intel_connector->panel.fitting_mode);
1553		else
1554			intel_pch_panel_fitting(intel_crtc, pipe_config,
1555						intel_connector->panel.fitting_mode);
1556	}
1557
1558	if (adjusted_mode->flags & DRM_MODE_FLAG_DBLCLK)
1559		return false;
1560
1561	DRM_DEBUG_KMS("DP link computation with max lane count %i "
1562		      "max bw %d pixel clock %iKHz\n",
1563		      max_lane_count, common_rates[max_clock],
1564		      adjusted_mode->crtc_clock);
1565
1566	/* Walk through all bpp values. Luckily they're all nicely spaced with 2
1567	 * bpc in between. */
1568	bpp = pipe_config->pipe_bpp;
1569	if (is_edp(intel_dp)) {
1570
1571		/* Get bpp from vbt only for panels that dont have bpp in edid */
1572		if (intel_connector->base.display_info.bpc == 0 &&
1573			(dev_priv->vbt.edp_bpp && dev_priv->vbt.edp_bpp < bpp)) {
1574			DRM_DEBUG_KMS("clamping bpp for eDP panel to BIOS-provided %i\n",
1575				      dev_priv->vbt.edp_bpp);
1576			bpp = dev_priv->vbt.edp_bpp;
1577		}
1578
1579		/*
1580		 * Use the maximum clock and number of lanes the eDP panel
1581		 * advertizes being capable of. The panels are generally
1582		 * designed to support only a single clock and lane
1583		 * configuration, and typically these values correspond to the
1584		 * native resolution of the panel.
1585		 */
1586		min_lane_count = max_lane_count;
1587		min_clock = max_clock;
1588	}
1589
1590	for (; bpp >= 6*3; bpp -= 2*3) {
1591		mode_rate = intel_dp_link_required(adjusted_mode->crtc_clock,
1592						   bpp);
1593
1594		for (clock = min_clock; clock <= max_clock; clock++) {
1595			for (lane_count = min_lane_count;
1596				lane_count <= max_lane_count;
1597				lane_count <<= 1) {
1598
1599				link_clock = common_rates[clock];
1600				link_avail = intel_dp_max_data_rate(link_clock,
1601								    lane_count);
1602
1603				if (mode_rate <= link_avail) {
1604					goto found;
1605				}
1606			}
1607		}
1608	}
1609
1610	return false;
1611
1612found:
1613	if (intel_dp->color_range_auto) {
1614		/*
1615		 * See:
1616		 * CEA-861-E - 5.1 Default Encoding Parameters
1617		 * VESA DisplayPort Ver.1.2a - 5.1.1.1 Video Colorimetry
1618		 */
1619		pipe_config->limited_color_range =
1620			bpp != 18 && drm_match_cea_mode(adjusted_mode) > 1;
1621	} else {
1622		pipe_config->limited_color_range =
1623			intel_dp->limited_color_range;
1624	}
1625
1626	pipe_config->lane_count = lane_count;
1627
1628	pipe_config->pipe_bpp = bpp;
1629	pipe_config->port_clock = common_rates[clock];
1630
1631	intel_dp_compute_rate(intel_dp, pipe_config->port_clock,
1632			      &link_bw, &rate_select);
1633
1634	DRM_DEBUG_KMS("DP link bw %02x rate select %02x lane count %d clock %d bpp %d\n",
1635		      link_bw, rate_select, pipe_config->lane_count,
1636		      pipe_config->port_clock, bpp);
1637	DRM_DEBUG_KMS("DP link bw required %i available %i\n",
1638		      mode_rate, link_avail);
1639
1640	intel_link_compute_m_n(bpp, lane_count,
1641			       adjusted_mode->crtc_clock,
1642			       pipe_config->port_clock,
1643			       &pipe_config->dp_m_n);
1644
1645	if (intel_connector->panel.downclock_mode != NULL &&
1646		dev_priv->drrs.type == SEAMLESS_DRRS_SUPPORT) {
1647			pipe_config->has_drrs = true;
1648			intel_link_compute_m_n(bpp, lane_count,
1649				intel_connector->panel.downclock_mode->clock,
1650				pipe_config->port_clock,
1651				&pipe_config->dp_m2_n2);
1652	}
1653
1654	if ((IS_SKYLAKE(dev)  || IS_KABYLAKE(dev)) && is_edp(intel_dp))
1655		skl_edp_set_pll_config(pipe_config);
1656	else if (IS_BROXTON(dev))
1657		/* handled in ddi */;
1658	else if (IS_HASWELL(dev) || IS_BROADWELL(dev))
1659		hsw_dp_set_ddi_pll_sel(pipe_config);
1660	else
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1661		intel_dp_set_clock(encoder, pipe_config);
1662
1663	return true;
1664}
1665
1666void intel_dp_set_link_params(struct intel_dp *intel_dp,
1667			      const struct intel_crtc_state *pipe_config)
 
1668{
1669	intel_dp->link_rate = pipe_config->port_clock;
1670	intel_dp->lane_count = pipe_config->lane_count;
 
1671}
1672
1673static void intel_dp_prepare(struct intel_encoder *encoder)
 
1674{
1675	struct drm_device *dev = encoder->base.dev;
1676	struct drm_i915_private *dev_priv = dev->dev_private;
1677	struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
1678	enum port port = dp_to_dig_port(intel_dp)->port;
1679	struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
1680	const struct drm_display_mode *adjusted_mode = &crtc->config->base.adjusted_mode;
1681
1682	intel_dp_set_link_params(intel_dp, crtc->config);
 
 
 
1683
1684	/*
1685	 * There are four kinds of DP registers:
1686	 *
1687	 * 	IBX PCH
1688	 * 	SNB CPU
1689	 *	IVB CPU
1690	 * 	CPT PCH
1691	 *
1692	 * IBX PCH and CPU are the same for almost everything,
1693	 * except that the CPU DP PLL is configured in this
1694	 * register
1695	 *
1696	 * CPT PCH is quite different, having many bits moved
1697	 * to the TRANS_DP_CTL register instead. That
1698	 * configuration happens (oddly) in ironlake_pch_enable
1699	 */
1700
1701	/* Preserve the BIOS-computed detected bit. This is
1702	 * supposed to be read-only.
1703	 */
1704	intel_dp->DP = I915_READ(intel_dp->output_reg) & DP_DETECTED;
1705
1706	/* Handle DP bits in common between all three register formats */
1707	intel_dp->DP |= DP_VOLTAGE_0_4 | DP_PRE_EMPHASIS_0;
1708	intel_dp->DP |= DP_PORT_WIDTH(crtc->config->lane_count);
1709
1710	/* Split out the IBX/CPU vs CPT settings */
1711
1712	if (IS_GEN7(dev) && port == PORT_A) {
1713		if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC)
1714			intel_dp->DP |= DP_SYNC_HS_HIGH;
1715		if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC)
1716			intel_dp->DP |= DP_SYNC_VS_HIGH;
1717		intel_dp->DP |= DP_LINK_TRAIN_OFF_CPT;
1718
1719		if (drm_dp_enhanced_frame_cap(intel_dp->dpcd))
1720			intel_dp->DP |= DP_ENHANCED_FRAMING;
1721
1722		intel_dp->DP |= crtc->pipe << 29;
1723	} else if (HAS_PCH_CPT(dev) && port != PORT_A) {
1724		u32 trans_dp;
1725
1726		intel_dp->DP |= DP_LINK_TRAIN_OFF_CPT;
1727
1728		trans_dp = I915_READ(TRANS_DP_CTL(crtc->pipe));
1729		if (drm_dp_enhanced_frame_cap(intel_dp->dpcd))
1730			trans_dp |= TRANS_DP_ENH_FRAMING;
1731		else
1732			trans_dp &= ~TRANS_DP_ENH_FRAMING;
1733		I915_WRITE(TRANS_DP_CTL(crtc->pipe), trans_dp);
1734	} else {
1735		if (!HAS_PCH_SPLIT(dev) && !IS_VALLEYVIEW(dev) &&
1736		    !IS_CHERRYVIEW(dev) && crtc->config->limited_color_range)
1737			intel_dp->DP |= DP_COLOR_RANGE_16_235;
1738
1739		if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC)
1740			intel_dp->DP |= DP_SYNC_HS_HIGH;
1741		if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC)
1742			intel_dp->DP |= DP_SYNC_VS_HIGH;
1743		intel_dp->DP |= DP_LINK_TRAIN_OFF;
1744
1745		if (drm_dp_enhanced_frame_cap(intel_dp->dpcd))
1746			intel_dp->DP |= DP_ENHANCED_FRAMING;
1747
1748		if (IS_CHERRYVIEW(dev))
1749			intel_dp->DP |= DP_PIPE_SELECT_CHV(crtc->pipe);
1750		else if (crtc->pipe == PIPE_B)
1751			intel_dp->DP |= DP_PIPEB_SELECT;
1752	}
1753}
1754
1755#define IDLE_ON_MASK		(PP_ON | PP_SEQUENCE_MASK | 0                     | PP_SEQUENCE_STATE_MASK)
1756#define IDLE_ON_VALUE   	(PP_ON | PP_SEQUENCE_NONE | 0                     | PP_SEQUENCE_STATE_ON_IDLE)
1757
1758#define IDLE_OFF_MASK		(PP_ON | PP_SEQUENCE_MASK | 0                     | 0)
1759#define IDLE_OFF_VALUE		(0     | PP_SEQUENCE_NONE | 0                     | 0)
1760
1761#define IDLE_CYCLE_MASK		(PP_ON | PP_SEQUENCE_MASK | PP_CYCLE_DELAY_ACTIVE | PP_SEQUENCE_STATE_MASK)
1762#define IDLE_CYCLE_VALUE	(0     | PP_SEQUENCE_NONE | 0                     | PP_SEQUENCE_STATE_OFF_IDLE)
1763
 
 
 
1764static void wait_panel_status(struct intel_dp *intel_dp,
1765				       u32 mask,
1766				       u32 value)
1767{
1768	struct drm_device *dev = intel_dp_to_dev(intel_dp);
1769	struct drm_i915_private *dev_priv = dev->dev_private;
1770	i915_reg_t pp_stat_reg, pp_ctrl_reg;
1771
1772	lockdep_assert_held(&dev_priv->pps_mutex);
1773
 
 
1774	pp_stat_reg = _pp_stat_reg(intel_dp);
1775	pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
1776
1777	DRM_DEBUG_KMS("mask %08x value %08x status %08x control %08x\n",
1778			mask, value,
1779			I915_READ(pp_stat_reg),
1780			I915_READ(pp_ctrl_reg));
1781
1782	if (_wait_for((I915_READ(pp_stat_reg) & mask) == value, 5000, 10)) {
 
 
1783		DRM_ERROR("Panel status timeout: status %08x control %08x\n",
1784				I915_READ(pp_stat_reg),
1785				I915_READ(pp_ctrl_reg));
1786	}
1787
1788	DRM_DEBUG_KMS("Wait complete\n");
1789}
1790
1791static void wait_panel_on(struct intel_dp *intel_dp)
1792{
1793	DRM_DEBUG_KMS("Wait for panel power on\n");
1794	wait_panel_status(intel_dp, IDLE_ON_MASK, IDLE_ON_VALUE);
1795}
1796
1797static void wait_panel_off(struct intel_dp *intel_dp)
1798{
1799	DRM_DEBUG_KMS("Wait for panel power off time\n");
1800	wait_panel_status(intel_dp, IDLE_OFF_MASK, IDLE_OFF_VALUE);
1801}
1802
1803static void wait_panel_power_cycle(struct intel_dp *intel_dp)
1804{
1805	ktime_t panel_power_on_time;
1806	s64 panel_power_off_duration;
1807
1808	DRM_DEBUG_KMS("Wait for panel power cycle\n");
1809
1810	/* take the difference of currrent time and panel power off time
1811	 * and then make panel wait for t11_t12 if needed. */
1812	panel_power_on_time = ktime_get_boottime();
1813	panel_power_off_duration = ktime_ms_delta(panel_power_on_time, intel_dp->panel_power_off_time);
1814
1815	/* When we disable the VDD override bit last we have to do the manual
1816	 * wait. */
1817	if (panel_power_off_duration < (s64)intel_dp->panel_power_cycle_delay)
1818		wait_remaining_ms_from_jiffies(jiffies,
1819				       intel_dp->panel_power_cycle_delay - panel_power_off_duration);
1820
1821	wait_panel_status(intel_dp, IDLE_CYCLE_MASK, IDLE_CYCLE_VALUE);
1822}
1823
1824static void wait_backlight_on(struct intel_dp *intel_dp)
1825{
1826	wait_remaining_ms_from_jiffies(intel_dp->last_power_on,
1827				       intel_dp->backlight_on_delay);
1828}
1829
1830static void edp_wait_backlight_off(struct intel_dp *intel_dp)
1831{
1832	wait_remaining_ms_from_jiffies(intel_dp->last_backlight_off,
1833				       intel_dp->backlight_off_delay);
1834}
1835
1836/* Read the current pp_control value, unlocking the register if it
1837 * is locked
1838 */
1839
1840static  u32 ironlake_get_pp_control(struct intel_dp *intel_dp)
1841{
1842	struct drm_device *dev = intel_dp_to_dev(intel_dp);
1843	struct drm_i915_private *dev_priv = dev->dev_private;
1844	u32 control;
1845
1846	lockdep_assert_held(&dev_priv->pps_mutex);
1847
1848	control = I915_READ(_pp_ctrl_reg(intel_dp));
1849	if (!IS_BROXTON(dev)) {
 
1850		control &= ~PANEL_UNLOCK_MASK;
1851		control |= PANEL_UNLOCK_REGS;
1852	}
1853	return control;
1854}
1855
1856/*
1857 * Must be paired with edp_panel_vdd_off().
1858 * Must hold pps_mutex around the whole on/off sequence.
1859 * Can be nested with intel_edp_panel_vdd_{on,off}() calls.
1860 */
1861static bool edp_panel_vdd_on(struct intel_dp *intel_dp)
1862{
1863	struct drm_device *dev = intel_dp_to_dev(intel_dp);
1864	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
1865	struct intel_encoder *intel_encoder = &intel_dig_port->base;
1866	struct drm_i915_private *dev_priv = dev->dev_private;
1867	enum intel_display_power_domain power_domain;
1868	u32 pp;
1869	i915_reg_t pp_stat_reg, pp_ctrl_reg;
1870	bool need_to_disable = !intel_dp->want_panel_vdd;
1871
1872	lockdep_assert_held(&dev_priv->pps_mutex);
1873
1874	if (!is_edp(intel_dp))
1875		return false;
1876
1877	cancel_delayed_work(&intel_dp->panel_vdd_work);
1878	intel_dp->want_panel_vdd = true;
1879
1880	if (edp_have_panel_vdd(intel_dp))
1881		return need_to_disable;
1882
1883	power_domain = intel_display_port_aux_power_domain(intel_encoder);
1884	intel_display_power_get(dev_priv, power_domain);
1885
1886	DRM_DEBUG_KMS("Turning eDP port %c VDD on\n",
1887		      port_name(intel_dig_port->port));
1888
1889	if (!edp_have_panel_power(intel_dp))
1890		wait_panel_power_cycle(intel_dp);
1891
1892	pp = ironlake_get_pp_control(intel_dp);
1893	pp |= EDP_FORCE_VDD;
1894
1895	pp_stat_reg = _pp_stat_reg(intel_dp);
1896	pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
1897
1898	I915_WRITE(pp_ctrl_reg, pp);
1899	POSTING_READ(pp_ctrl_reg);
1900	DRM_DEBUG_KMS("PP_STATUS: 0x%08x PP_CONTROL: 0x%08x\n",
1901			I915_READ(pp_stat_reg), I915_READ(pp_ctrl_reg));
1902	/*
1903	 * If the panel wasn't on, delay before accessing aux channel
1904	 */
1905	if (!edp_have_panel_power(intel_dp)) {
1906		DRM_DEBUG_KMS("eDP port %c panel power wasn't enabled\n",
1907			      port_name(intel_dig_port->port));
1908		msleep(intel_dp->panel_power_up_delay);
1909	}
1910
1911	return need_to_disable;
1912}
1913
1914/*
1915 * Must be paired with intel_edp_panel_vdd_off() or
1916 * intel_edp_panel_off().
1917 * Nested calls to these functions are not allowed since
1918 * we drop the lock. Caller must use some higher level
1919 * locking to prevent nested calls from other threads.
1920 */
1921void intel_edp_panel_vdd_on(struct intel_dp *intel_dp)
1922{
1923	bool vdd;
1924
1925	if (!is_edp(intel_dp))
1926		return;
1927
1928	pps_lock(intel_dp);
1929	vdd = edp_panel_vdd_on(intel_dp);
1930	pps_unlock(intel_dp);
1931
1932	I915_STATE_WARN(!vdd, "eDP port %c VDD already requested on\n",
1933	     port_name(dp_to_dig_port(intel_dp)->port));
1934}
1935
1936static void edp_panel_vdd_off_sync(struct intel_dp *intel_dp)
1937{
1938	struct drm_device *dev = intel_dp_to_dev(intel_dp);
1939	struct drm_i915_private *dev_priv = dev->dev_private;
1940	struct intel_digital_port *intel_dig_port =
1941		dp_to_dig_port(intel_dp);
1942	struct intel_encoder *intel_encoder = &intel_dig_port->base;
1943	enum intel_display_power_domain power_domain;
1944	u32 pp;
1945	i915_reg_t pp_stat_reg, pp_ctrl_reg;
1946
1947	lockdep_assert_held(&dev_priv->pps_mutex);
1948
1949	WARN_ON(intel_dp->want_panel_vdd);
1950
1951	if (!edp_have_panel_vdd(intel_dp))
1952		return;
1953
1954	DRM_DEBUG_KMS("Turning eDP port %c VDD off\n",
1955		      port_name(intel_dig_port->port));
1956
1957	pp = ironlake_get_pp_control(intel_dp);
1958	pp &= ~EDP_FORCE_VDD;
1959
1960	pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
1961	pp_stat_reg = _pp_stat_reg(intel_dp);
1962
1963	I915_WRITE(pp_ctrl_reg, pp);
1964	POSTING_READ(pp_ctrl_reg);
1965
1966	/* Make sure sequencer is idle before allowing subsequent activity */
1967	DRM_DEBUG_KMS("PP_STATUS: 0x%08x PP_CONTROL: 0x%08x\n",
1968	I915_READ(pp_stat_reg), I915_READ(pp_ctrl_reg));
1969
1970	if ((pp & POWER_TARGET_ON) == 0)
1971		intel_dp->panel_power_off_time = ktime_get_boottime();
1972
1973	power_domain = intel_display_port_aux_power_domain(intel_encoder);
1974	intel_display_power_put(dev_priv, power_domain);
1975}
1976
1977static void edp_panel_vdd_work(struct work_struct *__work)
1978{
1979	struct intel_dp *intel_dp = container_of(to_delayed_work(__work),
1980						 struct intel_dp, panel_vdd_work);
1981
1982	pps_lock(intel_dp);
1983	if (!intel_dp->want_panel_vdd)
1984		edp_panel_vdd_off_sync(intel_dp);
1985	pps_unlock(intel_dp);
1986}
1987
1988static void edp_panel_vdd_schedule_off(struct intel_dp *intel_dp)
1989{
1990	unsigned long delay;
1991
1992	/*
1993	 * Queue the timer to fire a long time from now (relative to the power
1994	 * down delay) to keep the panel power up across a sequence of
1995	 * operations.
1996	 */
1997	delay = msecs_to_jiffies(intel_dp->panel_power_cycle_delay * 5);
1998	schedule_delayed_work(&intel_dp->panel_vdd_work, delay);
1999}
2000
2001/*
2002 * Must be paired with edp_panel_vdd_on().
2003 * Must hold pps_mutex around the whole on/off sequence.
2004 * Can be nested with intel_edp_panel_vdd_{on,off}() calls.
2005 */
2006static void edp_panel_vdd_off(struct intel_dp *intel_dp, bool sync)
2007{
2008	struct drm_i915_private *dev_priv =
2009		intel_dp_to_dev(intel_dp)->dev_private;
2010
2011	lockdep_assert_held(&dev_priv->pps_mutex);
2012
2013	if (!is_edp(intel_dp))
2014		return;
2015
2016	I915_STATE_WARN(!intel_dp->want_panel_vdd, "eDP port %c VDD not forced on",
2017	     port_name(dp_to_dig_port(intel_dp)->port));
2018
2019	intel_dp->want_panel_vdd = false;
2020
2021	if (sync)
2022		edp_panel_vdd_off_sync(intel_dp);
2023	else
2024		edp_panel_vdd_schedule_off(intel_dp);
2025}
2026
2027static void edp_panel_on(struct intel_dp *intel_dp)
2028{
2029	struct drm_device *dev = intel_dp_to_dev(intel_dp);
2030	struct drm_i915_private *dev_priv = dev->dev_private;
2031	u32 pp;
2032	i915_reg_t pp_ctrl_reg;
2033
2034	lockdep_assert_held(&dev_priv->pps_mutex);
2035
2036	if (!is_edp(intel_dp))
2037		return;
2038
2039	DRM_DEBUG_KMS("Turn eDP port %c panel power on\n",
2040		      port_name(dp_to_dig_port(intel_dp)->port));
2041
2042	if (WARN(edp_have_panel_power(intel_dp),
2043		 "eDP port %c panel power already on\n",
2044		 port_name(dp_to_dig_port(intel_dp)->port)))
2045		return;
2046
2047	wait_panel_power_cycle(intel_dp);
2048
2049	pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
2050	pp = ironlake_get_pp_control(intel_dp);
2051	if (IS_GEN5(dev)) {
2052		/* ILK workaround: disable reset around power sequence */
2053		pp &= ~PANEL_POWER_RESET;
2054		I915_WRITE(pp_ctrl_reg, pp);
2055		POSTING_READ(pp_ctrl_reg);
2056	}
2057
2058	pp |= POWER_TARGET_ON;
2059	if (!IS_GEN5(dev))
2060		pp |= PANEL_POWER_RESET;
2061
2062	I915_WRITE(pp_ctrl_reg, pp);
2063	POSTING_READ(pp_ctrl_reg);
2064
2065	wait_panel_on(intel_dp);
2066	intel_dp->last_power_on = jiffies;
2067
2068	if (IS_GEN5(dev)) {
2069		pp |= PANEL_POWER_RESET; /* restore panel reset bit */
2070		I915_WRITE(pp_ctrl_reg, pp);
2071		POSTING_READ(pp_ctrl_reg);
2072	}
2073}
2074
2075void intel_edp_panel_on(struct intel_dp *intel_dp)
2076{
2077	if (!is_edp(intel_dp))
2078		return;
2079
2080	pps_lock(intel_dp);
2081	edp_panel_on(intel_dp);
2082	pps_unlock(intel_dp);
2083}
2084
2085
2086static void edp_panel_off(struct intel_dp *intel_dp)
2087{
2088	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
2089	struct intel_encoder *intel_encoder = &intel_dig_port->base;
2090	struct drm_device *dev = intel_dp_to_dev(intel_dp);
2091	struct drm_i915_private *dev_priv = dev->dev_private;
2092	enum intel_display_power_domain power_domain;
2093	u32 pp;
2094	i915_reg_t pp_ctrl_reg;
2095
2096	lockdep_assert_held(&dev_priv->pps_mutex);
2097
2098	if (!is_edp(intel_dp))
2099		return;
2100
2101	DRM_DEBUG_KMS("Turn eDP port %c panel power off\n",
2102		      port_name(dp_to_dig_port(intel_dp)->port));
2103
2104	WARN(!intel_dp->want_panel_vdd, "Need eDP port %c VDD to turn off panel\n",
2105	     port_name(dp_to_dig_port(intel_dp)->port));
2106
2107	pp = ironlake_get_pp_control(intel_dp);
2108	/* We need to switch off panel power _and_ force vdd, for otherwise some
2109	 * panels get very unhappy and cease to work. */
2110	pp &= ~(POWER_TARGET_ON | PANEL_POWER_RESET | EDP_FORCE_VDD |
2111		EDP_BLC_ENABLE);
2112
2113	pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
2114
2115	intel_dp->want_panel_vdd = false;
2116
2117	I915_WRITE(pp_ctrl_reg, pp);
2118	POSTING_READ(pp_ctrl_reg);
2119
2120	intel_dp->panel_power_off_time = ktime_get_boottime();
2121	wait_panel_off(intel_dp);
2122
2123	/* We got a reference when we enabled the VDD. */
2124	power_domain = intel_display_port_aux_power_domain(intel_encoder);
2125	intel_display_power_put(dev_priv, power_domain);
2126}
2127
2128void intel_edp_panel_off(struct intel_dp *intel_dp)
2129{
2130	if (!is_edp(intel_dp))
2131		return;
2132
2133	pps_lock(intel_dp);
2134	edp_panel_off(intel_dp);
2135	pps_unlock(intel_dp);
2136}
2137
2138/* Enable backlight in the panel power control. */
2139static void _intel_edp_backlight_on(struct intel_dp *intel_dp)
2140{
2141	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
2142	struct drm_device *dev = intel_dig_port->base.base.dev;
2143	struct drm_i915_private *dev_priv = dev->dev_private;
2144	u32 pp;
2145	i915_reg_t pp_ctrl_reg;
2146
2147	/*
2148	 * If we enable the backlight right away following a panel power
2149	 * on, we may see slight flicker as the panel syncs with the eDP
2150	 * link.  So delay a bit to make sure the image is solid before
2151	 * allowing it to appear.
2152	 */
2153	wait_backlight_on(intel_dp);
2154
2155	pps_lock(intel_dp);
2156
2157	pp = ironlake_get_pp_control(intel_dp);
2158	pp |= EDP_BLC_ENABLE;
2159
2160	pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
2161
2162	I915_WRITE(pp_ctrl_reg, pp);
2163	POSTING_READ(pp_ctrl_reg);
2164
2165	pps_unlock(intel_dp);
2166}
2167
2168/* Enable backlight PWM and backlight PP control. */
2169void intel_edp_backlight_on(struct intel_dp *intel_dp)
2170{
2171	if (!is_edp(intel_dp))
2172		return;
2173
2174	DRM_DEBUG_KMS("\n");
2175
2176	intel_panel_enable_backlight(intel_dp->attached_connector);
2177	_intel_edp_backlight_on(intel_dp);
2178}
2179
2180/* Disable backlight in the panel power control. */
2181static void _intel_edp_backlight_off(struct intel_dp *intel_dp)
2182{
2183	struct drm_device *dev = intel_dp_to_dev(intel_dp);
2184	struct drm_i915_private *dev_priv = dev->dev_private;
2185	u32 pp;
2186	i915_reg_t pp_ctrl_reg;
2187
2188	if (!is_edp(intel_dp))
2189		return;
2190
2191	pps_lock(intel_dp);
2192
2193	pp = ironlake_get_pp_control(intel_dp);
2194	pp &= ~EDP_BLC_ENABLE;
2195
2196	pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
2197
2198	I915_WRITE(pp_ctrl_reg, pp);
2199	POSTING_READ(pp_ctrl_reg);
2200
2201	pps_unlock(intel_dp);
2202
2203	intel_dp->last_backlight_off = jiffies;
2204	edp_wait_backlight_off(intel_dp);
2205}
2206
2207/* Disable backlight PP control and backlight PWM. */
2208void intel_edp_backlight_off(struct intel_dp *intel_dp)
2209{
2210	if (!is_edp(intel_dp))
2211		return;
2212
2213	DRM_DEBUG_KMS("\n");
2214
2215	_intel_edp_backlight_off(intel_dp);
2216	intel_panel_disable_backlight(intel_dp->attached_connector);
2217}
2218
2219/*
2220 * Hook for controlling the panel power control backlight through the bl_power
2221 * sysfs attribute. Take care to handle multiple calls.
2222 */
2223static void intel_edp_backlight_power(struct intel_connector *connector,
2224				      bool enable)
2225{
2226	struct intel_dp *intel_dp = intel_attached_dp(&connector->base);
2227	bool is_enabled;
2228
2229	pps_lock(intel_dp);
2230	is_enabled = ironlake_get_pp_control(intel_dp) & EDP_BLC_ENABLE;
2231	pps_unlock(intel_dp);
2232
2233	if (is_enabled == enable)
2234		return;
2235
2236	DRM_DEBUG_KMS("panel power control backlight %s\n",
2237		      enable ? "enable" : "disable");
2238
2239	if (enable)
2240		_intel_edp_backlight_on(intel_dp);
2241	else
2242		_intel_edp_backlight_off(intel_dp);
2243}
2244
2245static void assert_dp_port(struct intel_dp *intel_dp, bool state)
2246{
2247	struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
2248	struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev);
2249	bool cur_state = I915_READ(intel_dp->output_reg) & DP_PORT_EN;
2250
2251	I915_STATE_WARN(cur_state != state,
2252			"DP port %c state assertion failure (expected %s, current %s)\n",
2253			port_name(dig_port->port),
2254			onoff(state), onoff(cur_state));
2255}
2256#define assert_dp_port_disabled(d) assert_dp_port((d), false)
2257
2258static void assert_edp_pll(struct drm_i915_private *dev_priv, bool state)
2259{
2260	bool cur_state = I915_READ(DP_A) & DP_PLL_ENABLE;
2261
2262	I915_STATE_WARN(cur_state != state,
2263			"eDP PLL state assertion failure (expected %s, current %s)\n",
2264			onoff(state), onoff(cur_state));
2265}
2266#define assert_edp_pll_enabled(d) assert_edp_pll((d), true)
2267#define assert_edp_pll_disabled(d) assert_edp_pll((d), false)
2268
2269static void ironlake_edp_pll_on(struct intel_dp *intel_dp)
 
2270{
2271	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
2272	struct intel_crtc *crtc = to_intel_crtc(intel_dig_port->base.base.crtc);
2273	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
2274
2275	assert_pipe_disabled(dev_priv, crtc->pipe);
2276	assert_dp_port_disabled(intel_dp);
2277	assert_edp_pll_disabled(dev_priv);
2278
2279	DRM_DEBUG_KMS("enabling eDP PLL for clock %d\n",
2280		      crtc->config->port_clock);
2281
2282	intel_dp->DP &= ~DP_PLL_FREQ_MASK;
2283
2284	if (crtc->config->port_clock == 162000)
2285		intel_dp->DP |= DP_PLL_FREQ_162MHZ;
2286	else
2287		intel_dp->DP |= DP_PLL_FREQ_270MHZ;
2288
2289	I915_WRITE(DP_A, intel_dp->DP);
2290	POSTING_READ(DP_A);
2291	udelay(500);
2292
 
 
 
 
 
 
 
 
 
2293	intel_dp->DP |= DP_PLL_ENABLE;
2294
2295	I915_WRITE(DP_A, intel_dp->DP);
2296	POSTING_READ(DP_A);
2297	udelay(200);
2298}
2299
2300static void ironlake_edp_pll_off(struct intel_dp *intel_dp)
2301{
2302	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
2303	struct intel_crtc *crtc = to_intel_crtc(intel_dig_port->base.base.crtc);
2304	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
2305
2306	assert_pipe_disabled(dev_priv, crtc->pipe);
2307	assert_dp_port_disabled(intel_dp);
2308	assert_edp_pll_enabled(dev_priv);
2309
2310	DRM_DEBUG_KMS("disabling eDP PLL\n");
2311
2312	intel_dp->DP &= ~DP_PLL_ENABLE;
2313
2314	I915_WRITE(DP_A, intel_dp->DP);
2315	POSTING_READ(DP_A);
2316	udelay(200);
2317}
2318
2319/* If the sink supports it, try to set the power state appropriately */
2320void intel_dp_sink_dpms(struct intel_dp *intel_dp, int mode)
2321{
2322	int ret, i;
2323
2324	/* Should have a valid DPCD by this point */
2325	if (intel_dp->dpcd[DP_DPCD_REV] < 0x11)
2326		return;
2327
2328	if (mode != DRM_MODE_DPMS_ON) {
2329		ret = drm_dp_dpcd_writeb(&intel_dp->aux, DP_SET_POWER,
2330					 DP_SET_POWER_D3);
2331	} else {
2332		/*
2333		 * When turning on, we need to retry for 1ms to give the sink
2334		 * time to wake up.
2335		 */
2336		for (i = 0; i < 3; i++) {
2337			ret = drm_dp_dpcd_writeb(&intel_dp->aux, DP_SET_POWER,
2338						 DP_SET_POWER_D0);
2339			if (ret == 1)
2340				break;
2341			msleep(1);
2342		}
2343	}
2344
2345	if (ret != 1)
2346		DRM_DEBUG_KMS("failed to %s sink power state\n",
2347			      mode == DRM_MODE_DPMS_ON ? "enable" : "disable");
2348}
2349
2350static bool intel_dp_get_hw_state(struct intel_encoder *encoder,
2351				  enum pipe *pipe)
2352{
2353	struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2354	enum port port = dp_to_dig_port(intel_dp)->port;
2355	struct drm_device *dev = encoder->base.dev;
2356	struct drm_i915_private *dev_priv = dev->dev_private;
2357	enum intel_display_power_domain power_domain;
2358	u32 tmp;
2359	bool ret;
2360
2361	power_domain = intel_display_port_power_domain(encoder);
2362	if (!intel_display_power_get_if_enabled(dev_priv, power_domain))
2363		return false;
2364
2365	ret = false;
2366
2367	tmp = I915_READ(intel_dp->output_reg);
2368
2369	if (!(tmp & DP_PORT_EN))
2370		goto out;
2371
2372	if (IS_GEN7(dev) && port == PORT_A) {
2373		*pipe = PORT_TO_PIPE_CPT(tmp);
2374	} else if (HAS_PCH_CPT(dev) && port != PORT_A) {
2375		enum pipe p;
2376
2377		for_each_pipe(dev_priv, p) {
2378			u32 trans_dp = I915_READ(TRANS_DP_CTL(p));
2379			if (TRANS_DP_PIPE_TO_PORT(trans_dp) == port) {
2380				*pipe = p;
2381				ret = true;
2382
2383				goto out;
2384			}
2385		}
2386
2387		DRM_DEBUG_KMS("No pipe for dp port 0x%x found\n",
2388			      i915_mmio_reg_offset(intel_dp->output_reg));
2389	} else if (IS_CHERRYVIEW(dev)) {
2390		*pipe = DP_PORT_TO_PIPE_CHV(tmp);
2391	} else {
2392		*pipe = PORT_TO_PIPE(tmp);
2393	}
2394
2395	ret = true;
2396
2397out:
2398	intel_display_power_put(dev_priv, power_domain);
2399
2400	return ret;
2401}
2402
2403static void intel_dp_get_config(struct intel_encoder *encoder,
2404				struct intel_crtc_state *pipe_config)
2405{
2406	struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2407	u32 tmp, flags = 0;
2408	struct drm_device *dev = encoder->base.dev;
2409	struct drm_i915_private *dev_priv = dev->dev_private;
2410	enum port port = dp_to_dig_port(intel_dp)->port;
2411	struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
2412	int dotclock;
2413
2414	tmp = I915_READ(intel_dp->output_reg);
2415
2416	pipe_config->has_audio = tmp & DP_AUDIO_OUTPUT_ENABLE && port != PORT_A;
2417
2418	if (HAS_PCH_CPT(dev) && port != PORT_A) {
2419		u32 trans_dp = I915_READ(TRANS_DP_CTL(crtc->pipe));
2420
2421		if (trans_dp & TRANS_DP_HSYNC_ACTIVE_HIGH)
2422			flags |= DRM_MODE_FLAG_PHSYNC;
2423		else
2424			flags |= DRM_MODE_FLAG_NHSYNC;
2425
2426		if (trans_dp & TRANS_DP_VSYNC_ACTIVE_HIGH)
2427			flags |= DRM_MODE_FLAG_PVSYNC;
2428		else
2429			flags |= DRM_MODE_FLAG_NVSYNC;
2430	} else {
2431		if (tmp & DP_SYNC_HS_HIGH)
2432			flags |= DRM_MODE_FLAG_PHSYNC;
2433		else
2434			flags |= DRM_MODE_FLAG_NHSYNC;
2435
2436		if (tmp & DP_SYNC_VS_HIGH)
2437			flags |= DRM_MODE_FLAG_PVSYNC;
2438		else
2439			flags |= DRM_MODE_FLAG_NVSYNC;
2440	}
2441
2442	pipe_config->base.adjusted_mode.flags |= flags;
2443
2444	if (!HAS_PCH_SPLIT(dev) && !IS_VALLEYVIEW(dev) &&
2445	    !IS_CHERRYVIEW(dev) && tmp & DP_COLOR_RANGE_16_235)
2446		pipe_config->limited_color_range = true;
2447
2448	pipe_config->has_dp_encoder = true;
2449
2450	pipe_config->lane_count =
2451		((tmp & DP_PORT_WIDTH_MASK) >> DP_PORT_WIDTH_SHIFT) + 1;
2452
2453	intel_dp_get_m_n(crtc, pipe_config);
2454
2455	if (port == PORT_A) {
2456		if ((I915_READ(DP_A) & DP_PLL_FREQ_MASK) == DP_PLL_FREQ_162MHZ)
2457			pipe_config->port_clock = 162000;
2458		else
2459			pipe_config->port_clock = 270000;
2460	}
2461
2462	dotclock = intel_dotclock_calculate(pipe_config->port_clock,
2463					    &pipe_config->dp_m_n);
2464
2465	if (HAS_PCH_SPLIT(dev_priv->dev) && port != PORT_A)
2466		ironlake_check_encoder_dotclock(pipe_config, dotclock);
2467
2468	pipe_config->base.adjusted_mode.crtc_clock = dotclock;
2469
2470	if (is_edp(intel_dp) && dev_priv->vbt.edp_bpp &&
2471	    pipe_config->pipe_bpp > dev_priv->vbt.edp_bpp) {
2472		/*
2473		 * This is a big fat ugly hack.
2474		 *
2475		 * Some machines in UEFI boot mode provide us a VBT that has 18
2476		 * bpp and 1.62 GHz link bandwidth for eDP, which for reasons
2477		 * unknown we fail to light up. Yet the same BIOS boots up with
2478		 * 24 bpp and 2.7 GHz link. Use the same bpp as the BIOS uses as
2479		 * max, not what it tells us to use.
2480		 *
2481		 * Note: This will still be broken if the eDP panel is not lit
2482		 * up by the BIOS, and thus we can't get the mode at module
2483		 * load.
2484		 */
2485		DRM_DEBUG_KMS("pipe has %d bpp for eDP panel, overriding BIOS-provided max %d bpp\n",
2486			      pipe_config->pipe_bpp, dev_priv->vbt.edp_bpp);
2487		dev_priv->vbt.edp_bpp = pipe_config->pipe_bpp;
2488	}
2489}
2490
2491static void intel_disable_dp(struct intel_encoder *encoder)
 
 
2492{
2493	struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2494	struct drm_device *dev = encoder->base.dev;
2495	struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
2496
2497	if (crtc->config->has_audio)
2498		intel_audio_codec_disable(encoder);
2499
2500	if (HAS_PSR(dev) && !HAS_DDI(dev))
2501		intel_psr_disable(intel_dp);
2502
2503	/* Make sure the panel is off before trying to change the mode. But also
2504	 * ensure that we have vdd while we switch off the panel. */
2505	intel_edp_panel_vdd_on(intel_dp);
2506	intel_edp_backlight_off(intel_dp);
2507	intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_OFF);
2508	intel_edp_panel_off(intel_dp);
2509
2510	/* disable the port before the pipe on g4x */
2511	if (INTEL_INFO(dev)->gen < 5)
2512		intel_dp_link_down(intel_dp);
2513}
2514
2515static void ilk_post_disable_dp(struct intel_encoder *encoder)
 
 
2516{
2517	struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2518	enum port port = dp_to_dig_port(intel_dp)->port;
2519
2520	intel_dp_link_down(intel_dp);
2521
2522	/* Only ilk+ has port A */
2523	if (port == PORT_A)
2524		ironlake_edp_pll_off(intel_dp);
2525}
2526
2527static void vlv_post_disable_dp(struct intel_encoder *encoder)
 
 
2528{
2529	struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2530
2531	intel_dp_link_down(intel_dp);
2532}
2533
2534static void chv_data_lane_soft_reset(struct intel_encoder *encoder,
2535				     bool reset)
2536{
2537	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
2538	enum dpio_channel ch = vlv_dport_to_channel(enc_to_dig_port(&encoder->base));
2539	struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
2540	enum pipe pipe = crtc->pipe;
2541	uint32_t val;
2542
2543	val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW0(ch));
2544	if (reset)
2545		val &= ~(DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET);
2546	else
2547		val |= DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET;
2548	vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW0(ch), val);
2549
2550	if (crtc->config->lane_count > 2) {
2551		val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW0(ch));
2552		if (reset)
2553			val &= ~(DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET);
2554		else
2555			val |= DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET;
2556		vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW0(ch), val);
2557	}
2558
2559	val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW1(ch));
2560	val |= CHV_PCS_REQ_SOFTRESET_EN;
2561	if (reset)
2562		val &= ~DPIO_PCS_CLK_SOFT_RESET;
2563	else
2564		val |= DPIO_PCS_CLK_SOFT_RESET;
2565	vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW1(ch), val);
2566
2567	if (crtc->config->lane_count > 2) {
2568		val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW1(ch));
2569		val |= CHV_PCS_REQ_SOFTRESET_EN;
2570		if (reset)
2571			val &= ~DPIO_PCS_CLK_SOFT_RESET;
2572		else
2573			val |= DPIO_PCS_CLK_SOFT_RESET;
2574		vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW1(ch), val);
2575	}
2576}
2577
2578static void chv_post_disable_dp(struct intel_encoder *encoder)
2579{
2580	struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2581	struct drm_device *dev = encoder->base.dev;
2582	struct drm_i915_private *dev_priv = dev->dev_private;
2583
2584	intel_dp_link_down(intel_dp);
2585
2586	mutex_lock(&dev_priv->sb_lock);
2587
2588	/* Assert data lane reset */
2589	chv_data_lane_soft_reset(encoder, true);
2590
2591	mutex_unlock(&dev_priv->sb_lock);
2592}
2593
2594static void
2595_intel_dp_set_link_train(struct intel_dp *intel_dp,
2596			 uint32_t *DP,
2597			 uint8_t dp_train_pat)
2598{
2599	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
2600	struct drm_device *dev = intel_dig_port->base.base.dev;
2601	struct drm_i915_private *dev_priv = dev->dev_private;
2602	enum port port = intel_dig_port->port;
2603
2604	if (HAS_DDI(dev)) {
 
 
 
 
2605		uint32_t temp = I915_READ(DP_TP_CTL(port));
2606
2607		if (dp_train_pat & DP_LINK_SCRAMBLING_DISABLE)
2608			temp |= DP_TP_CTL_SCRAMBLE_DISABLE;
2609		else
2610			temp &= ~DP_TP_CTL_SCRAMBLE_DISABLE;
2611
2612		temp &= ~DP_TP_CTL_LINK_TRAIN_MASK;
2613		switch (dp_train_pat & DP_TRAINING_PATTERN_MASK) {
2614		case DP_TRAINING_PATTERN_DISABLE:
2615			temp |= DP_TP_CTL_LINK_TRAIN_NORMAL;
2616
2617			break;
2618		case DP_TRAINING_PATTERN_1:
2619			temp |= DP_TP_CTL_LINK_TRAIN_PAT1;
2620			break;
2621		case DP_TRAINING_PATTERN_2:
2622			temp |= DP_TP_CTL_LINK_TRAIN_PAT2;
2623			break;
2624		case DP_TRAINING_PATTERN_3:
2625			temp |= DP_TP_CTL_LINK_TRAIN_PAT3;
2626			break;
2627		}
2628		I915_WRITE(DP_TP_CTL(port), temp);
2629
2630	} else if ((IS_GEN7(dev) && port == PORT_A) ||
2631		   (HAS_PCH_CPT(dev) && port != PORT_A)) {
2632		*DP &= ~DP_LINK_TRAIN_MASK_CPT;
2633
2634		switch (dp_train_pat & DP_TRAINING_PATTERN_MASK) {
2635		case DP_TRAINING_PATTERN_DISABLE:
2636			*DP |= DP_LINK_TRAIN_OFF_CPT;
2637			break;
2638		case DP_TRAINING_PATTERN_1:
2639			*DP |= DP_LINK_TRAIN_PAT_1_CPT;
2640			break;
2641		case DP_TRAINING_PATTERN_2:
2642			*DP |= DP_LINK_TRAIN_PAT_2_CPT;
2643			break;
2644		case DP_TRAINING_PATTERN_3:
2645			DRM_ERROR("DP training pattern 3 not supported\n");
2646			*DP |= DP_LINK_TRAIN_PAT_2_CPT;
2647			break;
2648		}
2649
2650	} else {
2651		if (IS_CHERRYVIEW(dev))
2652			*DP &= ~DP_LINK_TRAIN_MASK_CHV;
2653		else
2654			*DP &= ~DP_LINK_TRAIN_MASK;
2655
2656		switch (dp_train_pat & DP_TRAINING_PATTERN_MASK) {
2657		case DP_TRAINING_PATTERN_DISABLE:
2658			*DP |= DP_LINK_TRAIN_OFF;
2659			break;
2660		case DP_TRAINING_PATTERN_1:
2661			*DP |= DP_LINK_TRAIN_PAT_1;
2662			break;
2663		case DP_TRAINING_PATTERN_2:
2664			*DP |= DP_LINK_TRAIN_PAT_2;
2665			break;
2666		case DP_TRAINING_PATTERN_3:
2667			if (IS_CHERRYVIEW(dev)) {
2668				*DP |= DP_LINK_TRAIN_PAT_3_CHV;
2669			} else {
2670				DRM_ERROR("DP training pattern 3 not supported\n");
2671				*DP |= DP_LINK_TRAIN_PAT_2;
2672			}
2673			break;
2674		}
2675	}
2676}
2677
2678static void intel_dp_enable_port(struct intel_dp *intel_dp)
 
2679{
2680	struct drm_device *dev = intel_dp_to_dev(intel_dp);
2681	struct drm_i915_private *dev_priv = dev->dev_private;
2682	struct intel_crtc *crtc =
2683		to_intel_crtc(dp_to_dig_port(intel_dp)->base.base.crtc);
2684
2685	/* enable with pattern 1 (as per spec) */
2686	_intel_dp_set_link_train(intel_dp, &intel_dp->DP,
2687				 DP_TRAINING_PATTERN_1);
2688
2689	I915_WRITE(intel_dp->output_reg, intel_dp->DP);
2690	POSTING_READ(intel_dp->output_reg);
2691
2692	/*
2693	 * Magic for VLV/CHV. We _must_ first set up the register
2694	 * without actually enabling the port, and then do another
2695	 * write to enable the port. Otherwise link training will
2696	 * fail when the power sequencer is freshly used for this port.
2697	 */
2698	intel_dp->DP |= DP_PORT_EN;
2699	if (crtc->config->has_audio)
2700		intel_dp->DP |= DP_AUDIO_OUTPUT_ENABLE;
2701
2702	I915_WRITE(intel_dp->output_reg, intel_dp->DP);
2703	POSTING_READ(intel_dp->output_reg);
2704}
2705
2706static void intel_enable_dp(struct intel_encoder *encoder)
 
 
2707{
2708	struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2709	struct drm_device *dev = encoder->base.dev;
2710	struct drm_i915_private *dev_priv = dev->dev_private;
2711	struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
2712	uint32_t dp_reg = I915_READ(intel_dp->output_reg);
2713	enum port port = dp_to_dig_port(intel_dp)->port;
2714	enum pipe pipe = crtc->pipe;
2715
2716	if (WARN_ON(dp_reg & DP_PORT_EN))
2717		return;
2718
2719	pps_lock(intel_dp);
2720
2721	if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev))
2722		vlv_init_panel_power_sequencer(intel_dp);
2723
2724	/*
2725	 * We get an occasional spurious underrun between the port
2726	 * enable and vdd enable, when enabling port A eDP.
2727	 *
2728	 * FIXME: Not sure if this applies to (PCH) port D eDP as well
2729	 */
2730	if (port == PORT_A)
2731		intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false);
2732
2733	intel_dp_enable_port(intel_dp);
2734
2735	if (port == PORT_A && IS_GEN5(dev_priv)) {
2736		/*
2737		 * Underrun reporting for the other pipe was disabled in
2738		 * g4x_pre_enable_dp(). The eDP PLL and port have now been
2739		 * enabled, so it's now safe to re-enable underrun reporting.
2740		 */
2741		intel_wait_for_vblank_if_active(dev_priv->dev, !pipe);
2742		intel_set_cpu_fifo_underrun_reporting(dev_priv, !pipe, true);
2743		intel_set_pch_fifo_underrun_reporting(dev_priv, !pipe, true);
2744	}
2745
2746	edp_panel_vdd_on(intel_dp);
2747	edp_panel_on(intel_dp);
2748	edp_panel_vdd_off(intel_dp, true);
2749
2750	if (port == PORT_A)
2751		intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
2752
2753	pps_unlock(intel_dp);
2754
2755	if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) {
2756		unsigned int lane_mask = 0x0;
2757
2758		if (IS_CHERRYVIEW(dev))
2759			lane_mask = intel_dp_unused_lane_mask(crtc->config->lane_count);
2760
2761		vlv_wait_port_ready(dev_priv, dp_to_dig_port(intel_dp),
2762				    lane_mask);
2763	}
2764
2765	intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_ON);
2766	intel_dp_start_link_train(intel_dp);
2767	intel_dp_stop_link_train(intel_dp);
2768
2769	if (crtc->config->has_audio) {
2770		DRM_DEBUG_DRIVER("Enabling DP audio on pipe %c\n",
2771				 pipe_name(pipe));
2772		intel_audio_codec_enable(encoder);
2773	}
2774}
2775
2776static void g4x_enable_dp(struct intel_encoder *encoder)
 
 
2777{
2778	struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2779
2780	intel_enable_dp(encoder);
2781	intel_edp_backlight_on(intel_dp);
2782}
2783
2784static void vlv_enable_dp(struct intel_encoder *encoder)
 
 
2785{
2786	struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2787
2788	intel_edp_backlight_on(intel_dp);
2789	intel_psr_enable(intel_dp);
2790}
2791
2792static void g4x_pre_enable_dp(struct intel_encoder *encoder)
 
 
2793{
2794	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
2795	struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2796	enum port port = dp_to_dig_port(intel_dp)->port;
2797	enum pipe pipe = to_intel_crtc(encoder->base.crtc)->pipe;
2798
2799	intel_dp_prepare(encoder);
2800
2801	if (port == PORT_A && IS_GEN5(dev_priv)) {
2802		/*
2803		 * We get FIFO underruns on the other pipe when
2804		 * enabling the CPU eDP PLL, and when enabling CPU
2805		 * eDP port. We could potentially avoid the PLL
2806		 * underrun with a vblank wait just prior to enabling
2807		 * the PLL, but that doesn't appear to help the port
2808		 * enable case. Just sweep it all under the rug.
2809		 */
2810		intel_set_cpu_fifo_underrun_reporting(dev_priv, !pipe, false);
2811		intel_set_pch_fifo_underrun_reporting(dev_priv, !pipe, false);
2812	}
2813
2814	/* Only ilk+ has port A */
2815	if (port == PORT_A)
2816		ironlake_edp_pll_on(intel_dp);
2817}
2818
2819static void vlv_detach_power_sequencer(struct intel_dp *intel_dp)
2820{
2821	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
2822	struct drm_i915_private *dev_priv = intel_dig_port->base.base.dev->dev_private;
2823	enum pipe pipe = intel_dp->pps_pipe;
2824	i915_reg_t pp_on_reg = VLV_PIPE_PP_ON_DELAYS(pipe);
 
 
 
2825
2826	edp_panel_vdd_off_sync(intel_dp);
2827
2828	/*
2829	 * VLV seems to get confused when multiple power seqeuencers
2830	 * have the same port selected (even if only one has power/vdd
2831	 * enabled). The failure manifests as vlv_wait_port_ready() failing
2832	 * CHV on the other hand doesn't seem to mind having the same port
2833	 * selected in multiple power seqeuencers, but let's clear the
2834	 * port select always when logically disconnecting a power sequencer
2835	 * from a port.
2836	 */
2837	DRM_DEBUG_KMS("detaching pipe %c power sequencer from port %c\n",
2838		      pipe_name(pipe), port_name(intel_dig_port->port));
2839	I915_WRITE(pp_on_reg, 0);
2840	POSTING_READ(pp_on_reg);
2841
2842	intel_dp->pps_pipe = INVALID_PIPE;
2843}
2844
2845static void vlv_steal_power_sequencer(struct drm_device *dev,
2846				      enum pipe pipe)
2847{
2848	struct drm_i915_private *dev_priv = dev->dev_private;
2849	struct intel_encoder *encoder;
2850
2851	lockdep_assert_held(&dev_priv->pps_mutex);
2852
2853	if (WARN_ON(pipe != PIPE_A && pipe != PIPE_B))
2854		return;
2855
2856	for_each_intel_encoder(dev, encoder) {
2857		struct intel_dp *intel_dp;
2858		enum port port;
2859
2860		if (encoder->type != INTEL_OUTPUT_EDP)
2861			continue;
2862
2863		intel_dp = enc_to_intel_dp(&encoder->base);
2864		port = dp_to_dig_port(intel_dp)->port;
2865
2866		if (intel_dp->pps_pipe != pipe)
2867			continue;
2868
2869		DRM_DEBUG_KMS("stealing pipe %c power sequencer from port %c\n",
2870			      pipe_name(pipe), port_name(port));
2871
2872		WARN(encoder->base.crtc,
2873		     "stealing pipe %c power sequencer from active eDP port %c\n",
2874		     pipe_name(pipe), port_name(port));
2875
2876		/* make sure vdd is off before we steal it */
2877		vlv_detach_power_sequencer(intel_dp);
2878	}
2879}
2880
2881static void vlv_init_panel_power_sequencer(struct intel_dp *intel_dp)
2882{
2883	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
2884	struct intel_encoder *encoder = &intel_dig_port->base;
2885	struct drm_device *dev = encoder->base.dev;
2886	struct drm_i915_private *dev_priv = dev->dev_private;
2887	struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
2888
2889	lockdep_assert_held(&dev_priv->pps_mutex);
2890
2891	if (!is_edp(intel_dp))
2892		return;
2893
2894	if (intel_dp->pps_pipe == crtc->pipe)
2895		return;
2896
2897	/*
2898	 * If another power sequencer was being used on this
2899	 * port previously make sure to turn off vdd there while
2900	 * we still have control of it.
2901	 */
2902	if (intel_dp->pps_pipe != INVALID_PIPE)
2903		vlv_detach_power_sequencer(intel_dp);
2904
2905	/*
2906	 * We may be stealing the power
2907	 * sequencer from another port.
2908	 */
2909	vlv_steal_power_sequencer(dev, crtc->pipe);
2910
2911	/* now it's all ours */
2912	intel_dp->pps_pipe = crtc->pipe;
2913
2914	DRM_DEBUG_KMS("initializing pipe %c power sequencer for port %c\n",
2915		      pipe_name(intel_dp->pps_pipe), port_name(intel_dig_port->port));
2916
2917	/* init power sequencer on this pipe and port */
2918	intel_dp_init_panel_power_sequencer(dev, intel_dp);
2919	intel_dp_init_panel_power_sequencer_registers(dev, intel_dp);
2920}
2921
2922static void vlv_pre_enable_dp(struct intel_encoder *encoder)
 
 
2923{
2924	struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2925	struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
2926	struct drm_device *dev = encoder->base.dev;
2927	struct drm_i915_private *dev_priv = dev->dev_private;
2928	struct intel_crtc *intel_crtc = to_intel_crtc(encoder->base.crtc);
2929	enum dpio_channel port = vlv_dport_to_channel(dport);
2930	int pipe = intel_crtc->pipe;
2931	u32 val;
2932
2933	mutex_lock(&dev_priv->sb_lock);
2934
2935	val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW8(port));
2936	val = 0;
2937	if (pipe)
2938		val |= (1<<21);
2939	else
2940		val &= ~(1<<21);
2941	val |= 0x001000c4;
2942	vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW8(port), val);
2943	vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW14(port), 0x00760018);
2944	vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW23(port), 0x00400888);
2945
2946	mutex_unlock(&dev_priv->sb_lock);
2947
2948	intel_enable_dp(encoder);
2949}
2950
2951static void vlv_dp_pre_pll_enable(struct intel_encoder *encoder)
 
 
2952{
2953	struct intel_digital_port *dport = enc_to_dig_port(&encoder->base);
2954	struct drm_device *dev = encoder->base.dev;
2955	struct drm_i915_private *dev_priv = dev->dev_private;
2956	struct intel_crtc *intel_crtc =
2957		to_intel_crtc(encoder->base.crtc);
2958	enum dpio_channel port = vlv_dport_to_channel(dport);
2959	int pipe = intel_crtc->pipe;
2960
2961	intel_dp_prepare(encoder);
2962
2963	/* Program Tx lane resets to default */
2964	mutex_lock(&dev_priv->sb_lock);
2965	vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW0(port),
2966			 DPIO_PCS_TX_LANE2_RESET |
2967			 DPIO_PCS_TX_LANE1_RESET);
2968	vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW1(port),
2969			 DPIO_PCS_CLK_CRI_RXEB_EIOS_EN |
2970			 DPIO_PCS_CLK_CRI_RXDIGFILTSG_EN |
2971			 (1<<DPIO_PCS_CLK_DATAWIDTH_SHIFT) |
2972				 DPIO_PCS_CLK_SOFT_RESET);
2973
2974	/* Fix up inter-pair skew failure */
2975	vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW12(port), 0x00750f00);
2976	vlv_dpio_write(dev_priv, pipe, VLV_TX_DW11(port), 0x00001500);
2977	vlv_dpio_write(dev_priv, pipe, VLV_TX_DW14(port), 0x40400000);
2978	mutex_unlock(&dev_priv->sb_lock);
2979}
2980
2981static void chv_pre_enable_dp(struct intel_encoder *encoder)
 
 
2982{
2983	struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2984	struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
2985	struct drm_device *dev = encoder->base.dev;
2986	struct drm_i915_private *dev_priv = dev->dev_private;
2987	struct intel_crtc *intel_crtc =
2988		to_intel_crtc(encoder->base.crtc);
2989	enum dpio_channel ch = vlv_dport_to_channel(dport);
2990	int pipe = intel_crtc->pipe;
2991	int data, i, stagger;
2992	u32 val;
2993
2994	mutex_lock(&dev_priv->sb_lock);
2995
2996	/* allow hardware to manage TX FIFO reset source */
2997	val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW11(ch));
2998	val &= ~DPIO_LANEDESKEW_STRAP_OVRD;
2999	vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW11(ch), val);
3000
3001	if (intel_crtc->config->lane_count > 2) {
3002		val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW11(ch));
3003		val &= ~DPIO_LANEDESKEW_STRAP_OVRD;
3004		vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW11(ch), val);
3005	}
3006
3007	/* Program Tx lane latency optimal setting*/
3008	for (i = 0; i < intel_crtc->config->lane_count; i++) {
3009		/* Set the upar bit */
3010		if (intel_crtc->config->lane_count == 1)
3011			data = 0x0;
3012		else
3013			data = (i == 1) ? 0x0 : 0x1;
3014		vlv_dpio_write(dev_priv, pipe, CHV_TX_DW14(ch, i),
3015				data << DPIO_UPAR_SHIFT);
3016	}
3017
3018	/* Data lane stagger programming */
3019	if (intel_crtc->config->port_clock > 270000)
3020		stagger = 0x18;
3021	else if (intel_crtc->config->port_clock > 135000)
3022		stagger = 0xd;
3023	else if (intel_crtc->config->port_clock > 67500)
3024		stagger = 0x7;
3025	else if (intel_crtc->config->port_clock > 33750)
3026		stagger = 0x4;
3027	else
3028		stagger = 0x2;
3029
3030	val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW11(ch));
3031	val |= DPIO_TX2_STAGGER_MASK(0x1f);
3032	vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW11(ch), val);
3033
3034	if (intel_crtc->config->lane_count > 2) {
3035		val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW11(ch));
3036		val |= DPIO_TX2_STAGGER_MASK(0x1f);
3037		vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW11(ch), val);
3038	}
3039
3040	vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW12(ch),
3041		       DPIO_LANESTAGGER_STRAP(stagger) |
3042		       DPIO_LANESTAGGER_STRAP_OVRD |
3043		       DPIO_TX1_STAGGER_MASK(0x1f) |
3044		       DPIO_TX1_STAGGER_MULT(6) |
3045		       DPIO_TX2_STAGGER_MULT(0));
3046
3047	if (intel_crtc->config->lane_count > 2) {
3048		vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW12(ch),
3049			       DPIO_LANESTAGGER_STRAP(stagger) |
3050			       DPIO_LANESTAGGER_STRAP_OVRD |
3051			       DPIO_TX1_STAGGER_MASK(0x1f) |
3052			       DPIO_TX1_STAGGER_MULT(7) |
3053			       DPIO_TX2_STAGGER_MULT(5));
3054	}
3055
3056	/* Deassert data lane reset */
3057	chv_data_lane_soft_reset(encoder, false);
3058
3059	mutex_unlock(&dev_priv->sb_lock);
3060
3061	intel_enable_dp(encoder);
3062
3063	/* Second common lane will stay alive on its own now */
3064	if (dport->release_cl2_override) {
3065		chv_phy_powergate_ch(dev_priv, DPIO_PHY0, DPIO_CH1, false);
3066		dport->release_cl2_override = false;
3067	}
3068}
3069
3070static void chv_dp_pre_pll_enable(struct intel_encoder *encoder)
3071{
3072	struct intel_digital_port *dport = enc_to_dig_port(&encoder->base);
3073	struct drm_device *dev = encoder->base.dev;
3074	struct drm_i915_private *dev_priv = dev->dev_private;
3075	struct intel_crtc *intel_crtc =
3076		to_intel_crtc(encoder->base.crtc);
3077	enum dpio_channel ch = vlv_dport_to_channel(dport);
3078	enum pipe pipe = intel_crtc->pipe;
3079	unsigned int lane_mask =
3080		intel_dp_unused_lane_mask(intel_crtc->config->lane_count);
3081	u32 val;
3082
3083	intel_dp_prepare(encoder);
3084
3085	/*
3086	 * Must trick the second common lane into life.
3087	 * Otherwise we can't even access the PLL.
3088	 */
3089	if (ch == DPIO_CH0 && pipe == PIPE_B)
3090		dport->release_cl2_override =
3091			!chv_phy_powergate_ch(dev_priv, DPIO_PHY0, DPIO_CH1, true);
3092
3093	chv_phy_powergate_lanes(encoder, true, lane_mask);
3094
3095	mutex_lock(&dev_priv->sb_lock);
3096
3097	/* Assert data lane reset */
3098	chv_data_lane_soft_reset(encoder, true);
3099
3100	/* program left/right clock distribution */
3101	if (pipe != PIPE_B) {
3102		val = vlv_dpio_read(dev_priv, pipe, _CHV_CMN_DW5_CH0);
3103		val &= ~(CHV_BUFLEFTENA1_MASK | CHV_BUFRIGHTENA1_MASK);
3104		if (ch == DPIO_CH0)
3105			val |= CHV_BUFLEFTENA1_FORCE;
3106		if (ch == DPIO_CH1)
3107			val |= CHV_BUFRIGHTENA1_FORCE;
3108		vlv_dpio_write(dev_priv, pipe, _CHV_CMN_DW5_CH0, val);
3109	} else {
3110		val = vlv_dpio_read(dev_priv, pipe, _CHV_CMN_DW1_CH1);
3111		val &= ~(CHV_BUFLEFTENA2_MASK | CHV_BUFRIGHTENA2_MASK);
3112		if (ch == DPIO_CH0)
3113			val |= CHV_BUFLEFTENA2_FORCE;
3114		if (ch == DPIO_CH1)
3115			val |= CHV_BUFRIGHTENA2_FORCE;
3116		vlv_dpio_write(dev_priv, pipe, _CHV_CMN_DW1_CH1, val);
3117	}
3118
3119	/* program clock channel usage */
3120	val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW8(ch));
3121	val |= CHV_PCS_USEDCLKCHANNEL_OVRRIDE;
3122	if (pipe != PIPE_B)
3123		val &= ~CHV_PCS_USEDCLKCHANNEL;
3124	else
3125		val |= CHV_PCS_USEDCLKCHANNEL;
3126	vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW8(ch), val);
3127
3128	if (intel_crtc->config->lane_count > 2) {
3129		val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW8(ch));
3130		val |= CHV_PCS_USEDCLKCHANNEL_OVRRIDE;
3131		if (pipe != PIPE_B)
3132			val &= ~CHV_PCS_USEDCLKCHANNEL;
3133		else
3134			val |= CHV_PCS_USEDCLKCHANNEL;
3135		vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW8(ch), val);
3136	}
3137
3138	/*
3139	 * This a a bit weird since generally CL
3140	 * matches the pipe, but here we need to
3141	 * pick the CL based on the port.
3142	 */
3143	val = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW19(ch));
3144	if (pipe != PIPE_B)
3145		val &= ~CHV_CMN_USEDCLKCHANNEL;
3146	else
3147		val |= CHV_CMN_USEDCLKCHANNEL;
3148	vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW19(ch), val);
3149
3150	mutex_unlock(&dev_priv->sb_lock);
3151}
3152
3153static void chv_dp_post_pll_disable(struct intel_encoder *encoder)
 
 
3154{
3155	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
3156	enum pipe pipe = to_intel_crtc(encoder->base.crtc)->pipe;
3157	u32 val;
3158
3159	mutex_lock(&dev_priv->sb_lock);
3160
3161	/* disable left/right clock distribution */
3162	if (pipe != PIPE_B) {
3163		val = vlv_dpio_read(dev_priv, pipe, _CHV_CMN_DW5_CH0);
3164		val &= ~(CHV_BUFLEFTENA1_MASK | CHV_BUFRIGHTENA1_MASK);
3165		vlv_dpio_write(dev_priv, pipe, _CHV_CMN_DW5_CH0, val);
3166	} else {
3167		val = vlv_dpio_read(dev_priv, pipe, _CHV_CMN_DW1_CH1);
3168		val &= ~(CHV_BUFLEFTENA2_MASK | CHV_BUFRIGHTENA2_MASK);
3169		vlv_dpio_write(dev_priv, pipe, _CHV_CMN_DW1_CH1, val);
3170	}
3171
3172	mutex_unlock(&dev_priv->sb_lock);
3173
3174	/*
3175	 * Leave the power down bit cleared for at least one
3176	 * lane so that chv_powergate_phy_ch() will power
3177	 * on something when the channel is otherwise unused.
3178	 * When the port is off and the override is removed
3179	 * the lanes power down anyway, so otherwise it doesn't
3180	 * really matter what the state of power down bits is
3181	 * after this.
3182	 */
3183	chv_phy_powergate_lanes(encoder, false, 0x0);
3184}
3185
3186/*
3187 * Native read with retry for link status and receiver capability reads for
3188 * cases where the sink may still be asleep.
3189 *
3190 * Sinks are *supposed* to come up within 1ms from an off state, but we're also
3191 * supposed to retry 3 times per the spec.
3192 */
3193static ssize_t
3194intel_dp_dpcd_read_wake(struct drm_dp_aux *aux, unsigned int offset,
3195			void *buffer, size_t size)
3196{
3197	ssize_t ret;
3198	int i;
3199
3200	/*
3201	 * Sometime we just get the same incorrect byte repeated
3202	 * over the entire buffer. Doing just one throw away read
3203	 * initially seems to "solve" it.
3204	 */
3205	drm_dp_dpcd_read(aux, DP_DPCD_REV, buffer, 1);
3206
3207	for (i = 0; i < 3; i++) {
3208		ret = drm_dp_dpcd_read(aux, offset, buffer, size);
3209		if (ret == size)
3210			return ret;
3211		msleep(1);
3212	}
3213
3214	return ret;
3215}
3216
3217/*
3218 * Fetch AUX CH registers 0x202 - 0x207 which contain
3219 * link status information
3220 */
3221bool
3222intel_dp_get_link_status(struct intel_dp *intel_dp, uint8_t link_status[DP_LINK_STATUS_SIZE])
3223{
3224	return intel_dp_dpcd_read_wake(&intel_dp->aux,
3225				       DP_LANE0_1_STATUS,
3226				       link_status,
3227				       DP_LINK_STATUS_SIZE) == DP_LINK_STATUS_SIZE;
3228}
3229
3230/* These are source-specific values. */
3231uint8_t
3232intel_dp_voltage_max(struct intel_dp *intel_dp)
3233{
3234	struct drm_device *dev = intel_dp_to_dev(intel_dp);
3235	struct drm_i915_private *dev_priv = dev->dev_private;
3236	enum port port = dp_to_dig_port(intel_dp)->port;
3237
3238	if (IS_BROXTON(dev))
3239		return DP_TRAIN_VOLTAGE_SWING_LEVEL_3;
3240	else if (INTEL_INFO(dev)->gen >= 9) {
3241		if (dev_priv->edp_low_vswing && port == PORT_A)
3242			return DP_TRAIN_VOLTAGE_SWING_LEVEL_3;
3243		return DP_TRAIN_VOLTAGE_SWING_LEVEL_2;
3244	} else if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev))
3245		return DP_TRAIN_VOLTAGE_SWING_LEVEL_3;
3246	else if (IS_GEN7(dev) && port == PORT_A)
3247		return DP_TRAIN_VOLTAGE_SWING_LEVEL_2;
3248	else if (HAS_PCH_CPT(dev) && port != PORT_A)
3249		return DP_TRAIN_VOLTAGE_SWING_LEVEL_3;
3250	else
3251		return DP_TRAIN_VOLTAGE_SWING_LEVEL_2;
3252}
3253
3254uint8_t
3255intel_dp_pre_emphasis_max(struct intel_dp *intel_dp, uint8_t voltage_swing)
3256{
3257	struct drm_device *dev = intel_dp_to_dev(intel_dp);
3258	enum port port = dp_to_dig_port(intel_dp)->port;
3259
3260	if (INTEL_INFO(dev)->gen >= 9) {
3261		switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
3262		case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3263			return DP_TRAIN_PRE_EMPH_LEVEL_3;
3264		case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3265			return DP_TRAIN_PRE_EMPH_LEVEL_2;
3266		case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3267			return DP_TRAIN_PRE_EMPH_LEVEL_1;
3268		case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
3269			return DP_TRAIN_PRE_EMPH_LEVEL_0;
3270		default:
3271			return DP_TRAIN_PRE_EMPH_LEVEL_0;
3272		}
3273	} else if (IS_HASWELL(dev) || IS_BROADWELL(dev)) {
3274		switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
3275		case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3276			return DP_TRAIN_PRE_EMPH_LEVEL_3;
3277		case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3278			return DP_TRAIN_PRE_EMPH_LEVEL_2;
3279		case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3280			return DP_TRAIN_PRE_EMPH_LEVEL_1;
3281		case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
3282		default:
3283			return DP_TRAIN_PRE_EMPH_LEVEL_0;
3284		}
3285	} else if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) {
3286		switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
3287		case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3288			return DP_TRAIN_PRE_EMPH_LEVEL_3;
3289		case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3290			return DP_TRAIN_PRE_EMPH_LEVEL_2;
3291		case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3292			return DP_TRAIN_PRE_EMPH_LEVEL_1;
3293		case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
3294		default:
3295			return DP_TRAIN_PRE_EMPH_LEVEL_0;
3296		}
3297	} else if (IS_GEN7(dev) && port == PORT_A) {
3298		switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
3299		case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3300			return DP_TRAIN_PRE_EMPH_LEVEL_2;
3301		case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3302		case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3303			return DP_TRAIN_PRE_EMPH_LEVEL_1;
3304		default:
3305			return DP_TRAIN_PRE_EMPH_LEVEL_0;
3306		}
3307	} else {
3308		switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
3309		case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3310			return DP_TRAIN_PRE_EMPH_LEVEL_2;
3311		case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3312			return DP_TRAIN_PRE_EMPH_LEVEL_2;
3313		case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3314			return DP_TRAIN_PRE_EMPH_LEVEL_1;
3315		case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
3316		default:
3317			return DP_TRAIN_PRE_EMPH_LEVEL_0;
3318		}
3319	}
3320}
3321
3322static uint32_t vlv_signal_levels(struct intel_dp *intel_dp)
3323{
3324	struct drm_device *dev = intel_dp_to_dev(intel_dp);
3325	struct drm_i915_private *dev_priv = dev->dev_private;
3326	struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
3327	struct intel_crtc *intel_crtc =
3328		to_intel_crtc(dport->base.base.crtc);
3329	unsigned long demph_reg_value, preemph_reg_value,
3330		uniqtranscale_reg_value;
3331	uint8_t train_set = intel_dp->train_set[0];
3332	enum dpio_channel port = vlv_dport_to_channel(dport);
3333	int pipe = intel_crtc->pipe;
3334
3335	switch (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) {
3336	case DP_TRAIN_PRE_EMPH_LEVEL_0:
3337		preemph_reg_value = 0x0004000;
3338		switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
3339		case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3340			demph_reg_value = 0x2B405555;
3341			uniqtranscale_reg_value = 0x552AB83A;
3342			break;
3343		case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3344			demph_reg_value = 0x2B404040;
3345			uniqtranscale_reg_value = 0x5548B83A;
3346			break;
3347		case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3348			demph_reg_value = 0x2B245555;
3349			uniqtranscale_reg_value = 0x5560B83A;
3350			break;
3351		case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
3352			demph_reg_value = 0x2B405555;
3353			uniqtranscale_reg_value = 0x5598DA3A;
3354			break;
3355		default:
3356			return 0;
3357		}
3358		break;
3359	case DP_TRAIN_PRE_EMPH_LEVEL_1:
3360		preemph_reg_value = 0x0002000;
3361		switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
3362		case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3363			demph_reg_value = 0x2B404040;
3364			uniqtranscale_reg_value = 0x5552B83A;
3365			break;
3366		case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3367			demph_reg_value = 0x2B404848;
3368			uniqtranscale_reg_value = 0x5580B83A;
3369			break;
3370		case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3371			demph_reg_value = 0x2B404040;
3372			uniqtranscale_reg_value = 0x55ADDA3A;
3373			break;
3374		default:
3375			return 0;
3376		}
3377		break;
3378	case DP_TRAIN_PRE_EMPH_LEVEL_2:
3379		preemph_reg_value = 0x0000000;
3380		switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
3381		case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3382			demph_reg_value = 0x2B305555;
3383			uniqtranscale_reg_value = 0x5570B83A;
3384			break;
3385		case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3386			demph_reg_value = 0x2B2B4040;
3387			uniqtranscale_reg_value = 0x55ADDA3A;
3388			break;
3389		default:
3390			return 0;
3391		}
3392		break;
3393	case DP_TRAIN_PRE_EMPH_LEVEL_3:
3394		preemph_reg_value = 0x0006000;
3395		switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
3396		case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3397			demph_reg_value = 0x1B405555;
3398			uniqtranscale_reg_value = 0x55ADDA3A;
3399			break;
3400		default:
3401			return 0;
3402		}
3403		break;
3404	default:
3405		return 0;
3406	}
3407
3408	mutex_lock(&dev_priv->sb_lock);
3409	vlv_dpio_write(dev_priv, pipe, VLV_TX_DW5(port), 0x00000000);
3410	vlv_dpio_write(dev_priv, pipe, VLV_TX_DW4(port), demph_reg_value);
3411	vlv_dpio_write(dev_priv, pipe, VLV_TX_DW2(port),
3412			 uniqtranscale_reg_value);
3413	vlv_dpio_write(dev_priv, pipe, VLV_TX_DW3(port), 0x0C782040);
3414	vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW11(port), 0x00030000);
3415	vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW9(port), preemph_reg_value);
3416	vlv_dpio_write(dev_priv, pipe, VLV_TX_DW5(port), 0x80000000);
3417	mutex_unlock(&dev_priv->sb_lock);
3418
3419	return 0;
3420}
3421
3422static bool chv_need_uniq_trans_scale(uint8_t train_set)
3423{
3424	return (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) == DP_TRAIN_PRE_EMPH_LEVEL_0 &&
3425		(train_set & DP_TRAIN_VOLTAGE_SWING_MASK) == DP_TRAIN_VOLTAGE_SWING_LEVEL_3;
3426}
3427
3428static uint32_t chv_signal_levels(struct intel_dp *intel_dp)
3429{
3430	struct drm_device *dev = intel_dp_to_dev(intel_dp);
3431	struct drm_i915_private *dev_priv = dev->dev_private;
3432	struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
3433	struct intel_crtc *intel_crtc = to_intel_crtc(dport->base.base.crtc);
3434	u32 deemph_reg_value, margin_reg_value, val;
3435	uint8_t train_set = intel_dp->train_set[0];
3436	enum dpio_channel ch = vlv_dport_to_channel(dport);
3437	enum pipe pipe = intel_crtc->pipe;
3438	int i;
3439
3440	switch (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) {
3441	case DP_TRAIN_PRE_EMPH_LEVEL_0:
3442		switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
3443		case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3444			deemph_reg_value = 128;
3445			margin_reg_value = 52;
3446			break;
3447		case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3448			deemph_reg_value = 128;
3449			margin_reg_value = 77;
3450			break;
3451		case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3452			deemph_reg_value = 128;
3453			margin_reg_value = 102;
3454			break;
3455		case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
3456			deemph_reg_value = 128;
3457			margin_reg_value = 154;
3458			/* FIXME extra to set for 1200 */
3459			break;
3460		default:
3461			return 0;
3462		}
3463		break;
3464	case DP_TRAIN_PRE_EMPH_LEVEL_1:
3465		switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
3466		case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3467			deemph_reg_value = 85;
3468			margin_reg_value = 78;
3469			break;
3470		case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3471			deemph_reg_value = 85;
3472			margin_reg_value = 116;
3473			break;
3474		case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3475			deemph_reg_value = 85;
3476			margin_reg_value = 154;
3477			break;
3478		default:
3479			return 0;
3480		}
3481		break;
3482	case DP_TRAIN_PRE_EMPH_LEVEL_2:
3483		switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
3484		case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3485			deemph_reg_value = 64;
3486			margin_reg_value = 104;
3487			break;
3488		case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3489			deemph_reg_value = 64;
3490			margin_reg_value = 154;
3491			break;
3492		default:
3493			return 0;
3494		}
3495		break;
3496	case DP_TRAIN_PRE_EMPH_LEVEL_3:
3497		switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
3498		case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3499			deemph_reg_value = 43;
3500			margin_reg_value = 154;
3501			break;
3502		default:
3503			return 0;
3504		}
3505		break;
3506	default:
3507		return 0;
3508	}
3509
3510	mutex_lock(&dev_priv->sb_lock);
3511
3512	/* Clear calc init */
3513	val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW10(ch));
3514	val &= ~(DPIO_PCS_SWING_CALC_TX0_TX2 | DPIO_PCS_SWING_CALC_TX1_TX3);
3515	val &= ~(DPIO_PCS_TX1DEEMP_MASK | DPIO_PCS_TX2DEEMP_MASK);
3516	val |= DPIO_PCS_TX1DEEMP_9P5 | DPIO_PCS_TX2DEEMP_9P5;
3517	vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW10(ch), val);
3518
3519	if (intel_crtc->config->lane_count > 2) {
3520		val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW10(ch));
3521		val &= ~(DPIO_PCS_SWING_CALC_TX0_TX2 | DPIO_PCS_SWING_CALC_TX1_TX3);
3522		val &= ~(DPIO_PCS_TX1DEEMP_MASK | DPIO_PCS_TX2DEEMP_MASK);
3523		val |= DPIO_PCS_TX1DEEMP_9P5 | DPIO_PCS_TX2DEEMP_9P5;
3524		vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW10(ch), val);
3525	}
3526
3527	val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW9(ch));
3528	val &= ~(DPIO_PCS_TX1MARGIN_MASK | DPIO_PCS_TX2MARGIN_MASK);
3529	val |= DPIO_PCS_TX1MARGIN_000 | DPIO_PCS_TX2MARGIN_000;
3530	vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW9(ch), val);
3531
3532	if (intel_crtc->config->lane_count > 2) {
3533		val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW9(ch));
3534		val &= ~(DPIO_PCS_TX1MARGIN_MASK | DPIO_PCS_TX2MARGIN_MASK);
3535		val |= DPIO_PCS_TX1MARGIN_000 | DPIO_PCS_TX2MARGIN_000;
3536		vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW9(ch), val);
3537	}
3538
3539	/* Program swing deemph */
3540	for (i = 0; i < intel_crtc->config->lane_count; i++) {
3541		val = vlv_dpio_read(dev_priv, pipe, CHV_TX_DW4(ch, i));
3542		val &= ~DPIO_SWING_DEEMPH9P5_MASK;
3543		val |= deemph_reg_value << DPIO_SWING_DEEMPH9P5_SHIFT;
3544		vlv_dpio_write(dev_priv, pipe, CHV_TX_DW4(ch, i), val);
3545	}
3546
3547	/* Program swing margin */
3548	for (i = 0; i < intel_crtc->config->lane_count; i++) {
3549		val = vlv_dpio_read(dev_priv, pipe, CHV_TX_DW2(ch, i));
3550
3551		val &= ~DPIO_SWING_MARGIN000_MASK;
3552		val |= margin_reg_value << DPIO_SWING_MARGIN000_SHIFT;
3553
3554		/*
3555		 * Supposedly this value shouldn't matter when unique transition
3556		 * scale is disabled, but in fact it does matter. Let's just
3557		 * always program the same value and hope it's OK.
3558		 */
3559		val &= ~(0xff << DPIO_UNIQ_TRANS_SCALE_SHIFT);
3560		val |= 0x9a << DPIO_UNIQ_TRANS_SCALE_SHIFT;
3561
3562		vlv_dpio_write(dev_priv, pipe, CHV_TX_DW2(ch, i), val);
3563	}
3564
3565	/*
3566	 * The document said it needs to set bit 27 for ch0 and bit 26
3567	 * for ch1. Might be a typo in the doc.
3568	 * For now, for this unique transition scale selection, set bit
3569	 * 27 for ch0 and ch1.
3570	 */
3571	for (i = 0; i < intel_crtc->config->lane_count; i++) {
3572		val = vlv_dpio_read(dev_priv, pipe, CHV_TX_DW3(ch, i));
3573		if (chv_need_uniq_trans_scale(train_set))
3574			val |= DPIO_TX_UNIQ_TRANS_SCALE_EN;
3575		else
3576			val &= ~DPIO_TX_UNIQ_TRANS_SCALE_EN;
3577		vlv_dpio_write(dev_priv, pipe, CHV_TX_DW3(ch, i), val);
3578	}
3579
3580	/* Start swing calculation */
3581	val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW10(ch));
3582	val |= DPIO_PCS_SWING_CALC_TX0_TX2 | DPIO_PCS_SWING_CALC_TX1_TX3;
3583	vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW10(ch), val);
3584
3585	if (intel_crtc->config->lane_count > 2) {
3586		val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW10(ch));
3587		val |= DPIO_PCS_SWING_CALC_TX0_TX2 | DPIO_PCS_SWING_CALC_TX1_TX3;
3588		vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW10(ch), val);
3589	}
3590
3591	mutex_unlock(&dev_priv->sb_lock);
3592
3593	return 0;
3594}
3595
3596static uint32_t
3597gen4_signal_levels(uint8_t train_set)
3598{
3599	uint32_t	signal_levels = 0;
3600
3601	switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
3602	case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3603	default:
3604		signal_levels |= DP_VOLTAGE_0_4;
3605		break;
3606	case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3607		signal_levels |= DP_VOLTAGE_0_6;
3608		break;
3609	case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3610		signal_levels |= DP_VOLTAGE_0_8;
3611		break;
3612	case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
3613		signal_levels |= DP_VOLTAGE_1_2;
3614		break;
3615	}
3616	switch (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) {
3617	case DP_TRAIN_PRE_EMPH_LEVEL_0:
3618	default:
3619		signal_levels |= DP_PRE_EMPHASIS_0;
3620		break;
3621	case DP_TRAIN_PRE_EMPH_LEVEL_1:
3622		signal_levels |= DP_PRE_EMPHASIS_3_5;
3623		break;
3624	case DP_TRAIN_PRE_EMPH_LEVEL_2:
3625		signal_levels |= DP_PRE_EMPHASIS_6;
3626		break;
3627	case DP_TRAIN_PRE_EMPH_LEVEL_3:
3628		signal_levels |= DP_PRE_EMPHASIS_9_5;
3629		break;
3630	}
3631	return signal_levels;
3632}
3633
3634/* Gen6's DP voltage swing and pre-emphasis control */
3635static uint32_t
3636gen6_edp_signal_levels(uint8_t train_set)
3637{
3638	int signal_levels = train_set & (DP_TRAIN_VOLTAGE_SWING_MASK |
3639					 DP_TRAIN_PRE_EMPHASIS_MASK);
3640	switch (signal_levels) {
3641	case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3642	case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3643		return EDP_LINK_TRAIN_400_600MV_0DB_SNB_B;
3644	case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_1:
3645		return EDP_LINK_TRAIN_400MV_3_5DB_SNB_B;
3646	case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_2:
3647	case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_2:
3648		return EDP_LINK_TRAIN_400_600MV_6DB_SNB_B;
3649	case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_1:
3650	case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_1:
3651		return EDP_LINK_TRAIN_600_800MV_3_5DB_SNB_B;
3652	case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3653	case DP_TRAIN_VOLTAGE_SWING_LEVEL_3 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3654		return EDP_LINK_TRAIN_800_1200MV_0DB_SNB_B;
3655	default:
3656		DRM_DEBUG_KMS("Unsupported voltage swing/pre-emphasis level:"
3657			      "0x%x\n", signal_levels);
3658		return EDP_LINK_TRAIN_400_600MV_0DB_SNB_B;
3659	}
3660}
3661
3662/* Gen7's DP voltage swing and pre-emphasis control */
3663static uint32_t
3664gen7_edp_signal_levels(uint8_t train_set)
3665{
3666	int signal_levels = train_set & (DP_TRAIN_VOLTAGE_SWING_MASK |
3667					 DP_TRAIN_PRE_EMPHASIS_MASK);
3668	switch (signal_levels) {
3669	case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3670		return EDP_LINK_TRAIN_400MV_0DB_IVB;
3671	case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_1:
3672		return EDP_LINK_TRAIN_400MV_3_5DB_IVB;
3673	case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_2:
3674		return EDP_LINK_TRAIN_400MV_6DB_IVB;
3675
3676	case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3677		return EDP_LINK_TRAIN_600MV_0DB_IVB;
3678	case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_1:
3679		return EDP_LINK_TRAIN_600MV_3_5DB_IVB;
3680
3681	case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3682		return EDP_LINK_TRAIN_800MV_0DB_IVB;
3683	case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_1:
3684		return EDP_LINK_TRAIN_800MV_3_5DB_IVB;
3685
3686	default:
3687		DRM_DEBUG_KMS("Unsupported voltage swing/pre-emphasis level:"
3688			      "0x%x\n", signal_levels);
3689		return EDP_LINK_TRAIN_500MV_0DB_IVB;
3690	}
3691}
3692
3693void
3694intel_dp_set_signal_levels(struct intel_dp *intel_dp)
3695{
3696	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
3697	enum port port = intel_dig_port->port;
3698	struct drm_device *dev = intel_dig_port->base.base.dev;
3699	struct drm_i915_private *dev_priv = to_i915(dev);
3700	uint32_t signal_levels, mask = 0;
3701	uint8_t train_set = intel_dp->train_set[0];
3702
3703	if (HAS_DDI(dev)) {
3704		signal_levels = ddi_signal_levels(intel_dp);
3705
3706		if (IS_BROXTON(dev))
3707			signal_levels = 0;
3708		else
3709			mask = DDI_BUF_EMP_MASK;
3710	} else if (IS_CHERRYVIEW(dev)) {
3711		signal_levels = chv_signal_levels(intel_dp);
3712	} else if (IS_VALLEYVIEW(dev)) {
3713		signal_levels = vlv_signal_levels(intel_dp);
3714	} else if (IS_GEN7(dev) && port == PORT_A) {
3715		signal_levels = gen7_edp_signal_levels(train_set);
3716		mask = EDP_LINK_TRAIN_VOL_EMP_MASK_IVB;
3717	} else if (IS_GEN6(dev) && port == PORT_A) {
3718		signal_levels = gen6_edp_signal_levels(train_set);
3719		mask = EDP_LINK_TRAIN_VOL_EMP_MASK_SNB;
3720	} else {
3721		signal_levels = gen4_signal_levels(train_set);
3722		mask = DP_VOLTAGE_MASK | DP_PRE_EMPHASIS_MASK;
3723	}
3724
3725	if (mask)
3726		DRM_DEBUG_KMS("Using signal levels %08x\n", signal_levels);
3727
3728	DRM_DEBUG_KMS("Using vswing level %d\n",
3729		train_set & DP_TRAIN_VOLTAGE_SWING_MASK);
3730	DRM_DEBUG_KMS("Using pre-emphasis level %d\n",
3731		(train_set & DP_TRAIN_PRE_EMPHASIS_MASK) >>
3732			DP_TRAIN_PRE_EMPHASIS_SHIFT);
3733
3734	intel_dp->DP = (intel_dp->DP & ~mask) | signal_levels;
3735
3736	I915_WRITE(intel_dp->output_reg, intel_dp->DP);
3737	POSTING_READ(intel_dp->output_reg);
3738}
3739
3740void
3741intel_dp_program_link_training_pattern(struct intel_dp *intel_dp,
3742				       uint8_t dp_train_pat)
3743{
3744	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
3745	struct drm_i915_private *dev_priv =
3746		to_i915(intel_dig_port->base.base.dev);
3747
3748	_intel_dp_set_link_train(intel_dp, &intel_dp->DP, dp_train_pat);
3749
3750	I915_WRITE(intel_dp->output_reg, intel_dp->DP);
3751	POSTING_READ(intel_dp->output_reg);
3752}
3753
3754void intel_dp_set_idle_link_train(struct intel_dp *intel_dp)
3755{
3756	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
3757	struct drm_device *dev = intel_dig_port->base.base.dev;
3758	struct drm_i915_private *dev_priv = dev->dev_private;
3759	enum port port = intel_dig_port->port;
3760	uint32_t val;
3761
3762	if (!HAS_DDI(dev))
3763		return;
3764
3765	val = I915_READ(DP_TP_CTL(port));
3766	val &= ~DP_TP_CTL_LINK_TRAIN_MASK;
3767	val |= DP_TP_CTL_LINK_TRAIN_IDLE;
3768	I915_WRITE(DP_TP_CTL(port), val);
3769
3770	/*
3771	 * On PORT_A we can have only eDP in SST mode. There the only reason
3772	 * we need to set idle transmission mode is to work around a HW issue
3773	 * where we enable the pipe while not in idle link-training mode.
3774	 * In this case there is requirement to wait for a minimum number of
3775	 * idle patterns to be sent.
3776	 */
3777	if (port == PORT_A)
3778		return;
3779
3780	if (wait_for((I915_READ(DP_TP_STATUS(port)) & DP_TP_STATUS_IDLE_DONE),
3781		     1))
 
 
3782		DRM_ERROR("Timed out waiting for DP idle patterns\n");
3783}
3784
3785static void
3786intel_dp_link_down(struct intel_dp *intel_dp)
3787{
3788	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
3789	struct intel_crtc *crtc = to_intel_crtc(intel_dig_port->base.base.crtc);
3790	enum port port = intel_dig_port->port;
3791	struct drm_device *dev = intel_dig_port->base.base.dev;
3792	struct drm_i915_private *dev_priv = dev->dev_private;
3793	uint32_t DP = intel_dp->DP;
3794
3795	if (WARN_ON(HAS_DDI(dev)))
3796		return;
3797
3798	if (WARN_ON((I915_READ(intel_dp->output_reg) & DP_PORT_EN) == 0))
3799		return;
3800
3801	DRM_DEBUG_KMS("\n");
3802
3803	if ((IS_GEN7(dev) && port == PORT_A) ||
3804	    (HAS_PCH_CPT(dev) && port != PORT_A)) {
3805		DP &= ~DP_LINK_TRAIN_MASK_CPT;
3806		DP |= DP_LINK_TRAIN_PAT_IDLE_CPT;
3807	} else {
3808		if (IS_CHERRYVIEW(dev))
3809			DP &= ~DP_LINK_TRAIN_MASK_CHV;
3810		else
3811			DP &= ~DP_LINK_TRAIN_MASK;
3812		DP |= DP_LINK_TRAIN_PAT_IDLE;
3813	}
3814	I915_WRITE(intel_dp->output_reg, DP);
3815	POSTING_READ(intel_dp->output_reg);
3816
3817	DP &= ~(DP_PORT_EN | DP_AUDIO_OUTPUT_ENABLE);
3818	I915_WRITE(intel_dp->output_reg, DP);
3819	POSTING_READ(intel_dp->output_reg);
3820
3821	/*
3822	 * HW workaround for IBX, we need to move the port
3823	 * to transcoder A after disabling it to allow the
3824	 * matching HDMI port to be enabled on transcoder A.
3825	 */
3826	if (HAS_PCH_IBX(dev) && crtc->pipe == PIPE_B && port != PORT_A) {
3827		/*
3828		 * We get CPU/PCH FIFO underruns on the other pipe when
3829		 * doing the workaround. Sweep them under the rug.
3830		 */
3831		intel_set_cpu_fifo_underrun_reporting(dev_priv, PIPE_A, false);
3832		intel_set_pch_fifo_underrun_reporting(dev_priv, PIPE_A, false);
3833
3834		/* always enable with pattern 1 (as per spec) */
3835		DP &= ~(DP_PIPEB_SELECT | DP_LINK_TRAIN_MASK);
3836		DP |= DP_PORT_EN | DP_LINK_TRAIN_PAT_1;
3837		I915_WRITE(intel_dp->output_reg, DP);
3838		POSTING_READ(intel_dp->output_reg);
3839
3840		DP &= ~DP_PORT_EN;
3841		I915_WRITE(intel_dp->output_reg, DP);
3842		POSTING_READ(intel_dp->output_reg);
3843
3844		intel_wait_for_vblank_if_active(dev_priv->dev, PIPE_A);
3845		intel_set_cpu_fifo_underrun_reporting(dev_priv, PIPE_A, true);
3846		intel_set_pch_fifo_underrun_reporting(dev_priv, PIPE_A, true);
3847	}
3848
3849	msleep(intel_dp->panel_power_down_delay);
3850
3851	intel_dp->DP = DP;
3852}
3853
3854static bool
3855intel_dp_get_dpcd(struct intel_dp *intel_dp)
3856{
3857	struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
3858	struct drm_device *dev = dig_port->base.base.dev;
3859	struct drm_i915_private *dev_priv = dev->dev_private;
3860	uint8_t rev;
3861
3862	if (intel_dp_dpcd_read_wake(&intel_dp->aux, 0x000, intel_dp->dpcd,
3863				    sizeof(intel_dp->dpcd)) < 0)
3864		return false; /* aux transfer failed */
3865
3866	DRM_DEBUG_KMS("DPCD: %*ph\n", (int) sizeof(intel_dp->dpcd), intel_dp->dpcd);
3867
3868	if (intel_dp->dpcd[DP_DPCD_REV] == 0)
3869		return false; /* DPCD not present */
3870
3871	/* Check if the panel supports PSR */
3872	memset(intel_dp->psr_dpcd, 0, sizeof(intel_dp->psr_dpcd));
3873	if (is_edp(intel_dp)) {
3874		intel_dp_dpcd_read_wake(&intel_dp->aux, DP_PSR_SUPPORT,
3875					intel_dp->psr_dpcd,
3876					sizeof(intel_dp->psr_dpcd));
3877		if (intel_dp->psr_dpcd[0] & DP_PSR_IS_SUPPORTED) {
3878			dev_priv->psr.sink_support = true;
3879			DRM_DEBUG_KMS("Detected EDP PSR Panel.\n");
3880		}
3881
3882		if (INTEL_INFO(dev)->gen >= 9 &&
3883			(intel_dp->psr_dpcd[0] & DP_PSR2_IS_SUPPORTED)) {
3884			uint8_t frame_sync_cap;
3885
3886			dev_priv->psr.sink_support = true;
3887			intel_dp_dpcd_read_wake(&intel_dp->aux,
3888					DP_SINK_DEVICE_AUX_FRAME_SYNC_CAP,
3889					&frame_sync_cap, 1);
3890			dev_priv->psr.aux_frame_sync = frame_sync_cap ? true : false;
3891			/* PSR2 needs frame sync as well */
3892			dev_priv->psr.psr2_support = dev_priv->psr.aux_frame_sync;
3893			DRM_DEBUG_KMS("PSR2 %s on sink",
3894				dev_priv->psr.psr2_support ? "supported" : "not supported");
3895		}
3896	}
3897
3898	DRM_DEBUG_KMS("Display Port TPS3 support: source %s, sink %s\n",
3899		      yesno(intel_dp_source_supports_hbr2(intel_dp)),
3900		      yesno(drm_dp_tps3_supported(intel_dp->dpcd)));
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3901
3902	/* Intermediate frequency support */
3903	if (is_edp(intel_dp) &&
3904	    (intel_dp->dpcd[DP_EDP_CONFIGURATION_CAP] &	DP_DPCD_DISPLAY_CONTROL_CAPABLE) &&
3905	    (intel_dp_dpcd_read_wake(&intel_dp->aux, DP_EDP_DPCD_REV, &rev, 1) == 1) &&
3906	    (rev >= 0x03)) { /* eDp v1.4 or higher */
3907		__le16 sink_rates[DP_MAX_SUPPORTED_RATES];
3908		int i;
3909
3910		intel_dp_dpcd_read_wake(&intel_dp->aux,
3911				DP_SUPPORTED_LINK_RATES,
3912				sink_rates,
3913				sizeof(sink_rates));
3914
3915		for (i = 0; i < ARRAY_SIZE(sink_rates); i++) {
3916			int val = le16_to_cpu(sink_rates[i]);
3917
3918			if (val == 0)
3919				break;
3920
3921			/* Value read is in kHz while drm clock is saved in deca-kHz */
3922			intel_dp->sink_rates[i] = (val * 200) / 10;
3923		}
3924		intel_dp->num_sink_rates = i;
3925	}
3926
3927	intel_dp_print_rates(intel_dp);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3928
3929	if (!(intel_dp->dpcd[DP_DOWNSTREAMPORT_PRESENT] &
3930	      DP_DWN_STRM_PORT_PRESENT))
 
 
 
 
 
 
 
 
 
3931		return true; /* native DP sink */
3932
3933	if (intel_dp->dpcd[DP_DPCD_REV] == 0x10)
3934		return true; /* no per-port downstream info */
3935
3936	if (intel_dp_dpcd_read_wake(&intel_dp->aux, DP_DOWNSTREAM_PORT_0,
3937				    intel_dp->downstream_ports,
3938				    DP_MAX_DOWNSTREAM_PORTS) < 0)
3939		return false; /* downstream port status fetch failed */
3940
3941	return true;
3942}
3943
3944static void
3945intel_dp_probe_oui(struct intel_dp *intel_dp)
3946{
3947	u8 buf[3];
3948
3949	if (!(intel_dp->dpcd[DP_DOWN_STREAM_PORT_COUNT] & DP_OUI_SUPPORT))
3950		return;
3951
3952	if (intel_dp_dpcd_read_wake(&intel_dp->aux, DP_SINK_OUI, buf, 3) == 3)
3953		DRM_DEBUG_KMS("Sink OUI: %02hx%02hx%02hx\n",
3954			      buf[0], buf[1], buf[2]);
 
 
3955
3956	if (intel_dp_dpcd_read_wake(&intel_dp->aux, DP_BRANCH_OUI, buf, 3) == 3)
3957		DRM_DEBUG_KMS("Branch OUI: %02hx%02hx%02hx\n",
3958			      buf[0], buf[1], buf[2]);
 
3959}
3960
3961static bool
3962intel_dp_probe_mst(struct intel_dp *intel_dp)
3963{
3964	u8 buf[1];
 
3965
3966	if (!intel_dp->can_mst)
3967		return false;
3968
3969	if (intel_dp->dpcd[DP_DPCD_REV] < 0x12)
3970		return false;
3971
3972	if (intel_dp_dpcd_read_wake(&intel_dp->aux, DP_MSTM_CAP, buf, 1)) {
3973		if (buf[0] & DP_MST_CAP) {
3974			DRM_DEBUG_KMS("Sink is MST capable\n");
3975			intel_dp->is_mst = true;
3976		} else {
3977			DRM_DEBUG_KMS("Sink is not MST capable\n");
3978			intel_dp->is_mst = false;
3979		}
3980	}
3981
3982	drm_dp_mst_topology_mgr_set_mst(&intel_dp->mst_mgr, intel_dp->is_mst);
3983	return intel_dp->is_mst;
3984}
3985
3986static int intel_dp_sink_crc_stop(struct intel_dp *intel_dp)
3987{
3988	struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
3989	struct drm_device *dev = dig_port->base.base.dev;
3990	struct intel_crtc *intel_crtc = to_intel_crtc(dig_port->base.base.crtc);
3991	u8 buf;
3992	int ret = 0;
3993	int count = 0;
3994	int attempts = 10;
3995
3996	if (drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_SINK, &buf) < 0) {
3997		DRM_DEBUG_KMS("Sink CRC couldn't be stopped properly\n");
3998		ret = -EIO;
3999		goto out;
4000	}
4001
4002	if (drm_dp_dpcd_writeb(&intel_dp->aux, DP_TEST_SINK,
4003			       buf & ~DP_TEST_SINK_START) < 0) {
4004		DRM_DEBUG_KMS("Sink CRC couldn't be stopped properly\n");
4005		ret = -EIO;
4006		goto out;
4007	}
4008
4009	do {
4010		intel_wait_for_vblank(dev, intel_crtc->pipe);
4011
4012		if (drm_dp_dpcd_readb(&intel_dp->aux,
4013				      DP_TEST_SINK_MISC, &buf) < 0) {
4014			ret = -EIO;
4015			goto out;
4016		}
4017		count = buf & DP_TEST_COUNT_MASK;
4018	} while (--attempts && count);
4019
4020	if (attempts == 0) {
4021		DRM_DEBUG_KMS("TIMEOUT: Sink CRC counter is not zeroed after calculation is stopped\n");
4022		ret = -ETIMEDOUT;
4023	}
4024
4025 out:
4026	hsw_enable_ips(intel_crtc);
4027	return ret;
4028}
4029
4030static int intel_dp_sink_crc_start(struct intel_dp *intel_dp)
4031{
4032	struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
4033	struct drm_device *dev = dig_port->base.base.dev;
4034	struct intel_crtc *intel_crtc = to_intel_crtc(dig_port->base.base.crtc);
4035	u8 buf;
4036	int ret;
4037
4038	if (drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_SINK_MISC, &buf) < 0)
4039		return -EIO;
4040
4041	if (!(buf & DP_TEST_CRC_SUPPORTED))
4042		return -ENOTTY;
4043
4044	if (drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_SINK, &buf) < 0)
4045		return -EIO;
4046
4047	if (buf & DP_TEST_SINK_START) {
4048		ret = intel_dp_sink_crc_stop(intel_dp);
4049		if (ret)
4050			return ret;
4051	}
4052
4053	hsw_disable_ips(intel_crtc);
4054
4055	if (drm_dp_dpcd_writeb(&intel_dp->aux, DP_TEST_SINK,
4056			       buf | DP_TEST_SINK_START) < 0) {
4057		hsw_enable_ips(intel_crtc);
4058		return -EIO;
4059	}
4060
4061	intel_wait_for_vblank(dev, intel_crtc->pipe);
4062	return 0;
4063}
4064
4065int intel_dp_sink_crc(struct intel_dp *intel_dp, u8 *crc)
4066{
4067	struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
4068	struct drm_device *dev = dig_port->base.base.dev;
4069	struct intel_crtc *intel_crtc = to_intel_crtc(dig_port->base.base.crtc);
4070	u8 buf;
4071	int count, ret;
4072	int attempts = 6;
4073
4074	ret = intel_dp_sink_crc_start(intel_dp);
4075	if (ret)
4076		return ret;
4077
4078	do {
4079		intel_wait_for_vblank(dev, intel_crtc->pipe);
4080
4081		if (drm_dp_dpcd_readb(&intel_dp->aux,
4082				      DP_TEST_SINK_MISC, &buf) < 0) {
4083			ret = -EIO;
4084			goto stop;
4085		}
4086		count = buf & DP_TEST_COUNT_MASK;
4087
4088	} while (--attempts && count == 0);
4089
4090	if (attempts == 0) {
4091		DRM_ERROR("Panel is unable to calculate any CRC after 6 vblanks\n");
4092		ret = -ETIMEDOUT;
4093		goto stop;
4094	}
4095
4096	if (drm_dp_dpcd_read(&intel_dp->aux, DP_TEST_CRC_R_CR, crc, 6) < 0) {
4097		ret = -EIO;
4098		goto stop;
4099	}
4100
4101stop:
4102	intel_dp_sink_crc_stop(intel_dp);
4103	return ret;
4104}
4105
4106static bool
4107intel_dp_get_sink_irq(struct intel_dp *intel_dp, u8 *sink_irq_vector)
4108{
4109	return intel_dp_dpcd_read_wake(&intel_dp->aux,
4110				       DP_DEVICE_SERVICE_IRQ_VECTOR,
4111				       sink_irq_vector, 1) == 1;
4112}
4113
4114static bool
4115intel_dp_get_sink_irq_esi(struct intel_dp *intel_dp, u8 *sink_irq_vector)
4116{
4117	int ret;
4118
4119	ret = intel_dp_dpcd_read_wake(&intel_dp->aux,
4120					     DP_SINK_COUNT_ESI,
4121					     sink_irq_vector, 14);
4122	if (ret != 14)
4123		return false;
4124
4125	return true;
4126}
4127
4128static uint8_t intel_dp_autotest_link_training(struct intel_dp *intel_dp)
4129{
4130	uint8_t test_result = DP_TEST_ACK;
4131	return test_result;
4132}
4133
4134static uint8_t intel_dp_autotest_video_pattern(struct intel_dp *intel_dp)
4135{
4136	uint8_t test_result = DP_TEST_NAK;
4137	return test_result;
4138}
4139
4140static uint8_t intel_dp_autotest_edid(struct intel_dp *intel_dp)
4141{
4142	uint8_t test_result = DP_TEST_NAK;
4143	struct intel_connector *intel_connector = intel_dp->attached_connector;
4144	struct drm_connector *connector = &intel_connector->base;
4145
4146	if (intel_connector->detect_edid == NULL ||
4147	    connector->edid_corrupt ||
4148	    intel_dp->aux.i2c_defer_count > 6) {
4149		/* Check EDID read for NACKs, DEFERs and corruption
4150		 * (DP CTS 1.2 Core r1.1)
4151		 *    4.2.2.4 : Failed EDID read, I2C_NAK
4152		 *    4.2.2.5 : Failed EDID read, I2C_DEFER
4153		 *    4.2.2.6 : EDID corruption detected
4154		 * Use failsafe mode for all cases
4155		 */
4156		if (intel_dp->aux.i2c_nack_count > 0 ||
4157			intel_dp->aux.i2c_defer_count > 0)
4158			DRM_DEBUG_KMS("EDID read had %d NACKs, %d DEFERs\n",
4159				      intel_dp->aux.i2c_nack_count,
4160				      intel_dp->aux.i2c_defer_count);
4161		intel_dp->compliance_test_data = INTEL_DP_RESOLUTION_FAILSAFE;
4162	} else {
4163		struct edid *block = intel_connector->detect_edid;
4164
4165		/* We have to write the checksum
4166		 * of the last block read
4167		 */
4168		block += intel_connector->detect_edid->extensions;
4169
4170		if (!drm_dp_dpcd_write(&intel_dp->aux,
4171					DP_TEST_EDID_CHECKSUM,
4172					&block->checksum,
4173					1))
4174			DRM_DEBUG_KMS("Failed to write EDID checksum\n");
4175
4176		test_result = DP_TEST_ACK | DP_TEST_EDID_CHECKSUM_WRITE;
4177		intel_dp->compliance_test_data = INTEL_DP_RESOLUTION_STANDARD;
4178	}
4179
4180	/* Set test active flag here so userspace doesn't interrupt things */
4181	intel_dp->compliance_test_active = 1;
4182
4183	return test_result;
4184}
4185
4186static uint8_t intel_dp_autotest_phy_pattern(struct intel_dp *intel_dp)
4187{
4188	uint8_t test_result = DP_TEST_NAK;
4189	return test_result;
4190}
4191
4192static void intel_dp_handle_test_request(struct intel_dp *intel_dp)
4193{
4194	uint8_t response = DP_TEST_NAK;
4195	uint8_t rxdata = 0;
4196	int status = 0;
4197
4198	status = drm_dp_dpcd_read(&intel_dp->aux, DP_TEST_REQUEST, &rxdata, 1);
4199	if (status <= 0) {
4200		DRM_DEBUG_KMS("Could not read test request from sink\n");
4201		goto update_status;
4202	}
4203
4204	switch (rxdata) {
4205	case DP_TEST_LINK_TRAINING:
4206		DRM_DEBUG_KMS("LINK_TRAINING test requested\n");
4207		intel_dp->compliance_test_type = DP_TEST_LINK_TRAINING;
4208		response = intel_dp_autotest_link_training(intel_dp);
4209		break;
4210	case DP_TEST_LINK_VIDEO_PATTERN:
4211		DRM_DEBUG_KMS("TEST_PATTERN test requested\n");
4212		intel_dp->compliance_test_type = DP_TEST_LINK_VIDEO_PATTERN;
4213		response = intel_dp_autotest_video_pattern(intel_dp);
4214		break;
4215	case DP_TEST_LINK_EDID_READ:
4216		DRM_DEBUG_KMS("EDID test requested\n");
4217		intel_dp->compliance_test_type = DP_TEST_LINK_EDID_READ;
4218		response = intel_dp_autotest_edid(intel_dp);
4219		break;
4220	case DP_TEST_LINK_PHY_TEST_PATTERN:
4221		DRM_DEBUG_KMS("PHY_PATTERN test requested\n");
4222		intel_dp->compliance_test_type = DP_TEST_LINK_PHY_TEST_PATTERN;
4223		response = intel_dp_autotest_phy_pattern(intel_dp);
4224		break;
4225	default:
4226		DRM_DEBUG_KMS("Invalid test request '%02x'\n", rxdata);
4227		break;
4228	}
4229
4230update_status:
4231	status = drm_dp_dpcd_write(&intel_dp->aux,
4232				   DP_TEST_RESPONSE,
4233				   &response, 1);
4234	if (status <= 0)
4235		DRM_DEBUG_KMS("Could not write test response to sink\n");
4236}
4237
4238static int
4239intel_dp_check_mst_status(struct intel_dp *intel_dp)
4240{
4241	bool bret;
4242
4243	if (intel_dp->is_mst) {
4244		u8 esi[16] = { 0 };
4245		int ret = 0;
4246		int retry;
4247		bool handled;
4248		bret = intel_dp_get_sink_irq_esi(intel_dp, esi);
4249go_again:
4250		if (bret == true) {
4251
4252			/* check link status - esi[10] = 0x200c */
4253			if (intel_dp->active_mst_links &&
4254			    !drm_dp_channel_eq_ok(&esi[10], intel_dp->lane_count)) {
4255				DRM_DEBUG_KMS("channel EQ not ok, retraining\n");
4256				intel_dp_start_link_train(intel_dp);
4257				intel_dp_stop_link_train(intel_dp);
4258			}
4259
4260			DRM_DEBUG_KMS("got esi %3ph\n", esi);
4261			ret = drm_dp_mst_hpd_irq(&intel_dp->mst_mgr, esi, &handled);
4262
4263			if (handled) {
4264				for (retry = 0; retry < 3; retry++) {
4265					int wret;
4266					wret = drm_dp_dpcd_write(&intel_dp->aux,
4267								 DP_SINK_COUNT_ESI+1,
4268								 &esi[1], 3);
4269					if (wret == 3) {
4270						break;
4271					}
4272				}
4273
4274				bret = intel_dp_get_sink_irq_esi(intel_dp, esi);
4275				if (bret == true) {
4276					DRM_DEBUG_KMS("got esi2 %3ph\n", esi);
4277					goto go_again;
4278				}
4279			} else
4280				ret = 0;
4281
4282			return ret;
4283		} else {
4284			struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
4285			DRM_DEBUG_KMS("failed to get ESI - device may have failed\n");
4286			intel_dp->is_mst = false;
4287			drm_dp_mst_topology_mgr_set_mst(&intel_dp->mst_mgr, intel_dp->is_mst);
4288			/* send a hotplug event */
4289			drm_kms_helper_hotplug_event(intel_dig_port->base.base.dev);
4290		}
4291	}
4292	return -EINVAL;
4293}
4294
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4295/*
4296 * According to DP spec
4297 * 5.1.2:
4298 *  1. Read DPCD
4299 *  2. Configure link according to Receiver Capabilities
4300 *  3. Use Link Training from 2.5.3.3 and 3.5.1.3
4301 *  4. Check link status on receipt of hot-plug interrupt
 
 
 
 
 
4302 */
4303static void
4304intel_dp_check_link_status(struct intel_dp *intel_dp)
4305{
4306	struct drm_device *dev = intel_dp_to_dev(intel_dp);
4307	struct intel_encoder *intel_encoder = &dp_to_dig_port(intel_dp)->base;
4308	u8 sink_irq_vector;
4309	u8 link_status[DP_LINK_STATUS_SIZE];
4310
4311	WARN_ON(!drm_modeset_is_locked(&dev->mode_config.connection_mutex));
4312
4313	/*
4314	 * Clearing compliance test variables to allow capturing
4315	 * of values for next automated test request.
4316	 */
4317	intel_dp->compliance_test_active = 0;
4318	intel_dp->compliance_test_type = 0;
4319	intel_dp->compliance_test_data = 0;
4320
4321	if (!intel_encoder->base.crtc)
4322		return;
4323
4324	if (!to_intel_crtc(intel_encoder->base.crtc)->active)
4325		return;
4326
4327	/* Try to read receiver status if the link appears to be up */
4328	if (!intel_dp_get_link_status(intel_dp, link_status)) {
4329		return;
4330	}
4331
4332	/* Now read the DPCD to see if it's actually running */
4333	if (!intel_dp_get_dpcd(intel_dp)) {
4334		return;
4335	}
4336
4337	/* Try to read the source of the interrupt */
4338	if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11 &&
4339	    intel_dp_get_sink_irq(intel_dp, &sink_irq_vector)) {
 
4340		/* Clear interrupt source */
4341		drm_dp_dpcd_writeb(&intel_dp->aux,
4342				   DP_DEVICE_SERVICE_IRQ_VECTOR,
4343				   sink_irq_vector);
4344
4345		if (sink_irq_vector & DP_AUTOMATED_TEST_REQUEST)
4346			DRM_DEBUG_DRIVER("Test request in short pulse not handled\n");
4347		if (sink_irq_vector & (DP_CP_IRQ | DP_SINK_SPECIFIC_IRQ))
4348			DRM_DEBUG_DRIVER("CP or sink specific irq unhandled\n");
4349	}
4350
4351	/* if link training is requested we should perform it always */
4352	if ((intel_dp->compliance_test_type == DP_TEST_LINK_TRAINING) ||
4353		(!drm_dp_channel_eq_ok(link_status, intel_dp->lane_count))) {
4354		DRM_DEBUG_KMS("%s: channel EQ not ok, retraining\n",
4355			      intel_encoder->base.name);
4356		intel_dp_start_link_train(intel_dp);
4357		intel_dp_stop_link_train(intel_dp);
4358	}
4359}
4360
4361/* XXX this is probably wrong for multiple downstream ports */
4362static enum drm_connector_status
4363intel_dp_detect_dpcd(struct intel_dp *intel_dp)
4364{
4365	uint8_t *dpcd = intel_dp->dpcd;
4366	uint8_t type;
4367
4368	if (!intel_dp_get_dpcd(intel_dp))
4369		return connector_status_disconnected;
4370
 
 
 
4371	/* if there's no downstream port, we're done */
4372	if (!(dpcd[DP_DOWNSTREAMPORT_PRESENT] & DP_DWN_STRM_PORT_PRESENT))
4373		return connector_status_connected;
4374
4375	/* If we're HPD-aware, SINK_COUNT changes dynamically */
4376	if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11 &&
4377	    intel_dp->downstream_ports[0] & DP_DS_PORT_HPD) {
4378		uint8_t reg;
4379
4380		if (intel_dp_dpcd_read_wake(&intel_dp->aux, DP_SINK_COUNT,
4381					    &reg, 1) < 0)
4382			return connector_status_unknown;
4383
4384		return DP_GET_SINK_COUNT(reg) ? connector_status_connected
4385					      : connector_status_disconnected;
4386	}
4387
 
 
 
4388	/* If no HPD, poke DDC gently */
4389	if (drm_probe_ddc(&intel_dp->aux.ddc))
4390		return connector_status_connected;
4391
4392	/* Well we tried, say unknown for unreliable port types */
4393	if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11) {
4394		type = intel_dp->downstream_ports[0] & DP_DS_PORT_TYPE_MASK;
4395		if (type == DP_DS_PORT_TYPE_VGA ||
4396		    type == DP_DS_PORT_TYPE_NON_EDID)
4397			return connector_status_unknown;
4398	} else {
4399		type = intel_dp->dpcd[DP_DOWNSTREAMPORT_PRESENT] &
4400			DP_DWN_STRM_PORT_TYPE_MASK;
4401		if (type == DP_DWN_STRM_PORT_TYPE_ANALOG ||
4402		    type == DP_DWN_STRM_PORT_TYPE_OTHER)
4403			return connector_status_unknown;
4404	}
4405
4406	/* Anything else is out of spec, warn and ignore */
4407	DRM_DEBUG_KMS("Broken DP branch device, ignoring\n");
4408	return connector_status_disconnected;
4409}
4410
4411static enum drm_connector_status
4412edp_detect(struct intel_dp *intel_dp)
4413{
4414	struct drm_device *dev = intel_dp_to_dev(intel_dp);
4415	enum drm_connector_status status;
4416
4417	status = intel_panel_detect(dev);
4418	if (status == connector_status_unknown)
4419		status = connector_status_connected;
4420
4421	return status;
4422}
4423
4424static bool ibx_digital_port_connected(struct drm_i915_private *dev_priv,
4425				       struct intel_digital_port *port)
4426{
4427	u32 bit;
4428
4429	switch (port->port) {
4430	case PORT_A:
4431		return true;
4432	case PORT_B:
4433		bit = SDE_PORTB_HOTPLUG;
4434		break;
4435	case PORT_C:
4436		bit = SDE_PORTC_HOTPLUG;
4437		break;
4438	case PORT_D:
4439		bit = SDE_PORTD_HOTPLUG;
4440		break;
4441	default:
4442		MISSING_CASE(port->port);
4443		return false;
4444	}
4445
4446	return I915_READ(SDEISR) & bit;
4447}
4448
4449static bool cpt_digital_port_connected(struct drm_i915_private *dev_priv,
4450				       struct intel_digital_port *port)
4451{
4452	u32 bit;
4453
4454	switch (port->port) {
4455	case PORT_A:
4456		return true;
4457	case PORT_B:
4458		bit = SDE_PORTB_HOTPLUG_CPT;
4459		break;
4460	case PORT_C:
4461		bit = SDE_PORTC_HOTPLUG_CPT;
4462		break;
4463	case PORT_D:
4464		bit = SDE_PORTD_HOTPLUG_CPT;
4465		break;
4466	case PORT_E:
4467		bit = SDE_PORTE_HOTPLUG_SPT;
4468		break;
4469	default:
4470		MISSING_CASE(port->port);
4471		return false;
4472	}
4473
4474	return I915_READ(SDEISR) & bit;
4475}
4476
4477static bool g4x_digital_port_connected(struct drm_i915_private *dev_priv,
4478				       struct intel_digital_port *port)
4479{
4480	u32 bit;
4481
4482	switch (port->port) {
4483	case PORT_B:
4484		bit = PORTB_HOTPLUG_LIVE_STATUS_G4X;
4485		break;
4486	case PORT_C:
4487		bit = PORTC_HOTPLUG_LIVE_STATUS_G4X;
4488		break;
4489	case PORT_D:
4490		bit = PORTD_HOTPLUG_LIVE_STATUS_G4X;
4491		break;
4492	default:
4493		MISSING_CASE(port->port);
4494		return false;
4495	}
4496
4497	return I915_READ(PORT_HOTPLUG_STAT) & bit;
4498}
4499
4500static bool gm45_digital_port_connected(struct drm_i915_private *dev_priv,
4501					struct intel_digital_port *port)
4502{
4503	u32 bit;
4504
4505	switch (port->port) {
4506	case PORT_B:
4507		bit = PORTB_HOTPLUG_LIVE_STATUS_GM45;
4508		break;
4509	case PORT_C:
4510		bit = PORTC_HOTPLUG_LIVE_STATUS_GM45;
4511		break;
4512	case PORT_D:
4513		bit = PORTD_HOTPLUG_LIVE_STATUS_GM45;
4514		break;
4515	default:
4516		MISSING_CASE(port->port);
4517		return false;
4518	}
4519
4520	return I915_READ(PORT_HOTPLUG_STAT) & bit;
4521}
4522
4523static bool bxt_digital_port_connected(struct drm_i915_private *dev_priv,
4524				       struct intel_digital_port *intel_dig_port)
4525{
4526	struct intel_encoder *intel_encoder = &intel_dig_port->base;
4527	enum port port;
4528	u32 bit;
4529
4530	intel_hpd_pin_to_port(intel_encoder->hpd_pin, &port);
4531	switch (port) {
4532	case PORT_A:
4533		bit = BXT_DE_PORT_HP_DDIA;
4534		break;
4535	case PORT_B:
4536		bit = BXT_DE_PORT_HP_DDIB;
4537		break;
4538	case PORT_C:
4539		bit = BXT_DE_PORT_HP_DDIC;
4540		break;
4541	default:
4542		MISSING_CASE(port);
4543		return false;
4544	}
4545
4546	return I915_READ(GEN8_DE_PORT_ISR) & bit;
4547}
4548
4549/*
4550 * intel_digital_port_connected - is the specified port connected?
4551 * @dev_priv: i915 private structure
4552 * @port: the port to test
4553 *
4554 * Return %true if @port is connected, %false otherwise.
4555 */
4556bool intel_digital_port_connected(struct drm_i915_private *dev_priv,
4557					 struct intel_digital_port *port)
4558{
4559	if (HAS_PCH_IBX(dev_priv))
4560		return ibx_digital_port_connected(dev_priv, port);
4561	else if (HAS_PCH_SPLIT(dev_priv))
4562		return cpt_digital_port_connected(dev_priv, port);
4563	else if (IS_BROXTON(dev_priv))
4564		return bxt_digital_port_connected(dev_priv, port);
4565	else if (IS_GM45(dev_priv))
4566		return gm45_digital_port_connected(dev_priv, port);
4567	else
4568		return g4x_digital_port_connected(dev_priv, port);
4569}
4570
4571static struct edid *
4572intel_dp_get_edid(struct intel_dp *intel_dp)
4573{
4574	struct intel_connector *intel_connector = intel_dp->attached_connector;
4575
4576	/* use cached edid if we have one */
4577	if (intel_connector->edid) {
4578		/* invalid edid */
4579		if (IS_ERR(intel_connector->edid))
4580			return NULL;
4581
4582		return drm_edid_duplicate(intel_connector->edid);
4583	} else
4584		return drm_get_edid(&intel_connector->base,
4585				    &intel_dp->aux.ddc);
4586}
4587
4588static void
4589intel_dp_set_edid(struct intel_dp *intel_dp)
4590{
4591	struct intel_connector *intel_connector = intel_dp->attached_connector;
4592	struct edid *edid;
4593
 
4594	edid = intel_dp_get_edid(intel_dp);
4595	intel_connector->detect_edid = edid;
4596
4597	if (intel_dp->force_audio != HDMI_AUDIO_AUTO)
4598		intel_dp->has_audio = intel_dp->force_audio == HDMI_AUDIO_ON;
4599	else
4600		intel_dp->has_audio = drm_detect_monitor_audio(edid);
4601}
4602
4603static void
4604intel_dp_unset_edid(struct intel_dp *intel_dp)
4605{
4606	struct intel_connector *intel_connector = intel_dp->attached_connector;
4607
4608	kfree(intel_connector->detect_edid);
4609	intel_connector->detect_edid = NULL;
4610
4611	intel_dp->has_audio = false;
4612}
4613
4614static enum drm_connector_status
4615intel_dp_detect(struct drm_connector *connector, bool force)
4616{
 
4617	struct intel_dp *intel_dp = intel_attached_dp(connector);
4618	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
4619	struct intel_encoder *intel_encoder = &intel_dig_port->base;
4620	struct drm_device *dev = connector->dev;
4621	enum drm_connector_status status;
4622	enum intel_display_power_domain power_domain;
4623	bool ret;
4624	u8 sink_irq_vector;
4625
4626	DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
4627		      connector->base.id, connector->name);
4628	intel_dp_unset_edid(intel_dp);
4629
4630	if (intel_dp->is_mst) {
4631		/* MST devices are disconnected from a monitor POV */
4632		if (intel_encoder->type != INTEL_OUTPUT_EDP)
4633			intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT;
4634		return connector_status_disconnected;
4635	}
4636
4637	power_domain = intel_display_port_aux_power_domain(intel_encoder);
4638	intel_display_power_get(to_i915(dev), power_domain);
4639
4640	/* Can't disconnect eDP, but you can close the lid... */
4641	if (is_edp(intel_dp))
4642		status = edp_detect(intel_dp);
4643	else if (intel_digital_port_connected(to_i915(dev),
4644					      dp_to_dig_port(intel_dp)))
4645		status = intel_dp_detect_dpcd(intel_dp);
4646	else
4647		status = connector_status_disconnected;
4648
4649	if (status != connector_status_connected) {
4650		intel_dp->compliance_test_active = 0;
4651		intel_dp->compliance_test_type = 0;
4652		intel_dp->compliance_test_data = 0;
4653
 
 
 
 
 
 
 
 
 
4654		goto out;
4655	}
4656
4657	intel_dp_probe_oui(intel_dp);
 
 
 
 
 
4658
4659	ret = intel_dp_probe_mst(intel_dp);
4660	if (ret) {
4661		/* if we are in MST mode then this connector
4662		   won't appear connected or have anything with EDID on it */
4663		if (intel_encoder->type != INTEL_OUTPUT_EDP)
4664			intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT;
 
 
 
 
 
 
4665		status = connector_status_disconnected;
4666		goto out;
 
 
 
 
 
 
 
 
 
 
4667	}
4668
4669	/*
4670	 * Clearing NACK and defer counts to get their exact values
4671	 * while reading EDID which are required by Compliance tests
4672	 * 4.2.2.4 and 4.2.2.5
4673	 */
4674	intel_dp->aux.i2c_nack_count = 0;
4675	intel_dp->aux.i2c_defer_count = 0;
4676
4677	intel_dp_set_edid(intel_dp);
4678
4679	if (intel_encoder->type != INTEL_OUTPUT_EDP)
4680		intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT;
4681	status = connector_status_connected;
4682
4683	/* Try to read the source of the interrupt */
4684	if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11 &&
4685	    intel_dp_get_sink_irq(intel_dp, &sink_irq_vector)) {
 
4686		/* Clear interrupt source */
4687		drm_dp_dpcd_writeb(&intel_dp->aux,
4688				   DP_DEVICE_SERVICE_IRQ_VECTOR,
4689				   sink_irq_vector);
4690
4691		if (sink_irq_vector & DP_AUTOMATED_TEST_REQUEST)
4692			intel_dp_handle_test_request(intel_dp);
4693		if (sink_irq_vector & (DP_CP_IRQ | DP_SINK_SPECIFIC_IRQ))
4694			DRM_DEBUG_DRIVER("CP or sink specific irq unhandled\n");
4695	}
4696
4697out:
 
 
 
4698	intel_display_power_put(to_i915(dev), power_domain);
4699	return status;
4700}
4701
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4702static void
4703intel_dp_force(struct drm_connector *connector)
4704{
4705	struct intel_dp *intel_dp = intel_attached_dp(connector);
4706	struct intel_encoder *intel_encoder = &dp_to_dig_port(intel_dp)->base;
4707	struct drm_i915_private *dev_priv = to_i915(intel_encoder->base.dev);
4708	enum intel_display_power_domain power_domain;
4709
4710	DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
4711		      connector->base.id, connector->name);
4712	intel_dp_unset_edid(intel_dp);
4713
4714	if (connector->status != connector_status_connected)
4715		return;
4716
4717	power_domain = intel_display_port_aux_power_domain(intel_encoder);
4718	intel_display_power_get(dev_priv, power_domain);
4719
4720	intel_dp_set_edid(intel_dp);
4721
4722	intel_display_power_put(dev_priv, power_domain);
4723
4724	if (intel_encoder->type != INTEL_OUTPUT_EDP)
4725		intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT;
4726}
4727
4728static int intel_dp_get_modes(struct drm_connector *connector)
4729{
4730	struct intel_connector *intel_connector = to_intel_connector(connector);
4731	struct edid *edid;
4732
4733	edid = intel_connector->detect_edid;
4734	if (edid) {
4735		int ret = intel_connector_update_modes(connector, edid);
4736		if (ret)
4737			return ret;
4738	}
4739
4740	/* if eDP has no EDID, fall back to fixed mode */
4741	if (is_edp(intel_attached_dp(connector)) &&
4742	    intel_connector->panel.fixed_mode) {
4743		struct drm_display_mode *mode;
4744
4745		mode = drm_mode_duplicate(connector->dev,
4746					  intel_connector->panel.fixed_mode);
4747		if (mode) {
4748			drm_mode_probed_add(connector, mode);
4749			return 1;
4750		}
4751	}
4752
4753	return 0;
4754}
4755
4756static bool
4757intel_dp_detect_audio(struct drm_connector *connector)
4758{
4759	bool has_audio = false;
4760	struct edid *edid;
4761
4762	edid = to_intel_connector(connector)->detect_edid;
4763	if (edid)
4764		has_audio = drm_detect_monitor_audio(edid);
4765
4766	return has_audio;
4767}
4768
4769static int
4770intel_dp_set_property(struct drm_connector *connector,
4771		      struct drm_property *property,
4772		      uint64_t val)
4773{
4774	struct drm_i915_private *dev_priv = connector->dev->dev_private;
4775	struct intel_connector *intel_connector = to_intel_connector(connector);
4776	struct intel_encoder *intel_encoder = intel_attached_encoder(connector);
4777	struct intel_dp *intel_dp = enc_to_intel_dp(&intel_encoder->base);
4778	int ret;
4779
4780	ret = drm_object_property_set_value(&connector->base, property, val);
4781	if (ret)
4782		return ret;
4783
4784	if (property == dev_priv->force_audio_property) {
4785		int i = val;
4786		bool has_audio;
4787
4788		if (i == intel_dp->force_audio)
4789			return 0;
4790
4791		intel_dp->force_audio = i;
4792
4793		if (i == HDMI_AUDIO_AUTO)
4794			has_audio = intel_dp_detect_audio(connector);
4795		else
4796			has_audio = (i == HDMI_AUDIO_ON);
4797
4798		if (has_audio == intel_dp->has_audio)
4799			return 0;
4800
4801		intel_dp->has_audio = has_audio;
4802		goto done;
4803	}
4804
4805	if (property == dev_priv->broadcast_rgb_property) {
4806		bool old_auto = intel_dp->color_range_auto;
4807		bool old_range = intel_dp->limited_color_range;
4808
4809		switch (val) {
4810		case INTEL_BROADCAST_RGB_AUTO:
4811			intel_dp->color_range_auto = true;
4812			break;
4813		case INTEL_BROADCAST_RGB_FULL:
4814			intel_dp->color_range_auto = false;
4815			intel_dp->limited_color_range = false;
4816			break;
4817		case INTEL_BROADCAST_RGB_LIMITED:
4818			intel_dp->color_range_auto = false;
4819			intel_dp->limited_color_range = true;
4820			break;
4821		default:
4822			return -EINVAL;
4823		}
4824
4825		if (old_auto == intel_dp->color_range_auto &&
4826		    old_range == intel_dp->limited_color_range)
4827			return 0;
4828
4829		goto done;
4830	}
4831
4832	if (is_edp(intel_dp) &&
4833	    property == connector->dev->mode_config.scaling_mode_property) {
4834		if (val == DRM_MODE_SCALE_NONE) {
4835			DRM_DEBUG_KMS("no scaling not supported\n");
4836			return -EINVAL;
4837		}
 
 
 
 
 
4838
4839		if (intel_connector->panel.fitting_mode == val) {
4840			/* the eDP scaling property is not changed */
4841			return 0;
4842		}
4843		intel_connector->panel.fitting_mode = val;
4844
4845		goto done;
4846	}
4847
4848	return -EINVAL;
4849
4850done:
4851	if (intel_encoder->base.crtc)
4852		intel_crtc_restore_mode(intel_encoder->base.crtc);
4853
4854	return 0;
4855}
4856
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4857static void
4858intel_dp_connector_destroy(struct drm_connector *connector)
4859{
4860	struct intel_connector *intel_connector = to_intel_connector(connector);
4861
4862	kfree(intel_connector->detect_edid);
4863
4864	if (!IS_ERR_OR_NULL(intel_connector->edid))
4865		kfree(intel_connector->edid);
4866
4867	/* Can't call is_edp() since the encoder may have been destroyed
4868	 * already. */
4869	if (connector->connector_type == DRM_MODE_CONNECTOR_eDP)
4870		intel_panel_fini(&intel_connector->panel);
4871
4872	drm_connector_cleanup(connector);
4873	kfree(connector);
4874}
4875
4876void intel_dp_encoder_destroy(struct drm_encoder *encoder)
4877{
4878	struct intel_digital_port *intel_dig_port = enc_to_dig_port(encoder);
4879	struct intel_dp *intel_dp = &intel_dig_port->dp;
4880
4881	intel_dp_mst_encoder_cleanup(intel_dig_port);
4882	if (is_edp(intel_dp)) {
4883		cancel_delayed_work_sync(&intel_dp->panel_vdd_work);
4884		/*
4885		 * vdd might still be enabled do to the delayed vdd off.
4886		 * Make sure vdd is actually turned off here.
4887		 */
4888		pps_lock(intel_dp);
4889		edp_panel_vdd_off_sync(intel_dp);
4890		pps_unlock(intel_dp);
4891
4892		if (intel_dp->edp_notifier.notifier_call) {
4893			unregister_reboot_notifier(&intel_dp->edp_notifier);
4894			intel_dp->edp_notifier.notifier_call = NULL;
4895		}
4896	}
 
 
 
4897	drm_encoder_cleanup(encoder);
4898	kfree(intel_dig_port);
4899}
4900
4901void intel_dp_encoder_suspend(struct intel_encoder *intel_encoder)
4902{
4903	struct intel_dp *intel_dp = enc_to_intel_dp(&intel_encoder->base);
4904
4905	if (!is_edp(intel_dp))
4906		return;
4907
4908	/*
4909	 * vdd might still be enabled do to the delayed vdd off.
4910	 * Make sure vdd is actually turned off here.
4911	 */
4912	cancel_delayed_work_sync(&intel_dp->panel_vdd_work);
4913	pps_lock(intel_dp);
4914	edp_panel_vdd_off_sync(intel_dp);
4915	pps_unlock(intel_dp);
4916}
4917
4918static void intel_edp_panel_vdd_sanitize(struct intel_dp *intel_dp)
4919{
4920	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
4921	struct drm_device *dev = intel_dig_port->base.base.dev;
4922	struct drm_i915_private *dev_priv = dev->dev_private;
4923	enum intel_display_power_domain power_domain;
4924
4925	lockdep_assert_held(&dev_priv->pps_mutex);
4926
4927	if (!edp_have_panel_vdd(intel_dp))
4928		return;
4929
4930	/*
4931	 * The VDD bit needs a power domain reference, so if the bit is
4932	 * already enabled when we boot or resume, grab this reference and
4933	 * schedule a vdd off, so we don't hold on to the reference
4934	 * indefinitely.
4935	 */
4936	DRM_DEBUG_KMS("VDD left on by BIOS, adjusting state tracking\n");
4937	power_domain = intel_display_port_aux_power_domain(&intel_dig_port->base);
4938	intel_display_power_get(dev_priv, power_domain);
4939
4940	edp_panel_vdd_schedule_off(intel_dp);
4941}
4942
4943void intel_dp_encoder_reset(struct drm_encoder *encoder)
4944{
4945	struct intel_dp *intel_dp;
 
 
 
 
 
 
 
 
 
4946
4947	if (to_intel_encoder(encoder)->type != INTEL_OUTPUT_EDP)
4948		return;
4949
4950	intel_dp = enc_to_intel_dp(encoder);
4951
4952	pps_lock(intel_dp);
4953
4954	/*
4955	 * Read out the current power sequencer assignment,
4956	 * in case the BIOS did something with it.
4957	 */
4958	if (IS_VALLEYVIEW(encoder->dev) || IS_CHERRYVIEW(encoder->dev))
4959		vlv_initial_power_sequencer_setup(intel_dp);
4960
4961	intel_edp_panel_vdd_sanitize(intel_dp);
4962
4963	pps_unlock(intel_dp);
4964}
4965
4966static const struct drm_connector_funcs intel_dp_connector_funcs = {
4967	.dpms = drm_atomic_helper_connector_dpms,
4968	.detect = intel_dp_detect,
4969	.force = intel_dp_force,
4970	.fill_modes = drm_helper_probe_single_connector_modes,
4971	.set_property = intel_dp_set_property,
4972	.atomic_get_property = intel_connector_atomic_get_property,
 
 
4973	.destroy = intel_dp_connector_destroy,
4974	.atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
4975	.atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
4976};
4977
4978static const struct drm_connector_helper_funcs intel_dp_connector_helper_funcs = {
4979	.get_modes = intel_dp_get_modes,
4980	.mode_valid = intel_dp_mode_valid,
4981	.best_encoder = intel_best_encoder,
4982};
4983
4984static const struct drm_encoder_funcs intel_dp_enc_funcs = {
4985	.reset = intel_dp_encoder_reset,
4986	.destroy = intel_dp_encoder_destroy,
4987};
4988
4989enum irqreturn
4990intel_dp_hpd_pulse(struct intel_digital_port *intel_dig_port, bool long_hpd)
4991{
4992	struct intel_dp *intel_dp = &intel_dig_port->dp;
4993	struct intel_encoder *intel_encoder = &intel_dig_port->base;
4994	struct drm_device *dev = intel_dig_port->base.base.dev;
4995	struct drm_i915_private *dev_priv = dev->dev_private;
4996	enum intel_display_power_domain power_domain;
4997	enum irqreturn ret = IRQ_NONE;
4998
4999	if (intel_dig_port->base.type != INTEL_OUTPUT_EDP &&
5000	    intel_dig_port->base.type != INTEL_OUTPUT_HDMI)
5001		intel_dig_port->base.type = INTEL_OUTPUT_DISPLAYPORT;
5002
5003	if (long_hpd && intel_dig_port->base.type == INTEL_OUTPUT_EDP) {
5004		/*
5005		 * vdd off can generate a long pulse on eDP which
5006		 * would require vdd on to handle it, and thus we
5007		 * would end up in an endless cycle of
5008		 * "vdd off -> long hpd -> vdd on -> detect -> vdd off -> ..."
5009		 */
5010		DRM_DEBUG_KMS("ignoring long hpd on eDP port %c\n",
5011			      port_name(intel_dig_port->port));
5012		return IRQ_HANDLED;
5013	}
5014
5015	DRM_DEBUG_KMS("got hpd irq on port %c - %s\n",
5016		      port_name(intel_dig_port->port),
5017		      long_hpd ? "long" : "short");
5018
5019	power_domain = intel_display_port_aux_power_domain(intel_encoder);
5020	intel_display_power_get(dev_priv, power_domain);
5021
5022	if (long_hpd) {
5023		/* indicate that we need to restart link training */
5024		intel_dp->train_set_valid = false;
5025
5026		if (!intel_digital_port_connected(dev_priv, intel_dig_port))
5027			goto mst_fail;
5028
5029		if (!intel_dp_get_dpcd(intel_dp)) {
5030			goto mst_fail;
5031		}
5032
5033		intel_dp_probe_oui(intel_dp);
 
5034
5035		if (!intel_dp_probe_mst(intel_dp)) {
5036			drm_modeset_lock(&dev->mode_config.connection_mutex, NULL);
5037			intel_dp_check_link_status(intel_dp);
5038			drm_modeset_unlock(&dev->mode_config.connection_mutex);
5039			goto mst_fail;
5040		}
5041	} else {
5042		if (intel_dp->is_mst) {
5043			if (intel_dp_check_mst_status(intel_dp) == -EINVAL)
5044				goto mst_fail;
 
 
 
5045		}
 
5046
5047		if (!intel_dp->is_mst) {
5048			drm_modeset_lock(&dev->mode_config.connection_mutex, NULL);
5049			intel_dp_check_link_status(intel_dp);
5050			drm_modeset_unlock(&dev->mode_config.connection_mutex);
5051		}
5052	}
5053
5054	ret = IRQ_HANDLED;
5055
5056	goto put_power;
5057mst_fail:
5058	/* if we were in MST mode, and device is not there get out of MST mode */
5059	if (intel_dp->is_mst) {
5060		DRM_DEBUG_KMS("MST device may have disappeared %d vs %d\n", intel_dp->is_mst, intel_dp->mst_mgr.mst_state);
5061		intel_dp->is_mst = false;
5062		drm_dp_mst_topology_mgr_set_mst(&intel_dp->mst_mgr, intel_dp->is_mst);
5063	}
5064put_power:
5065	intel_display_power_put(dev_priv, power_domain);
5066
5067	return ret;
5068}
5069
5070/* check the VBT to see whether the eDP is on another port */
5071bool intel_dp_is_edp(struct drm_device *dev, enum port port)
5072{
5073	struct drm_i915_private *dev_priv = dev->dev_private;
5074	union child_device_config *p_child;
5075	int i;
5076	static const short port_mapping[] = {
5077		[PORT_B] = DVO_PORT_DPB,
5078		[PORT_C] = DVO_PORT_DPC,
5079		[PORT_D] = DVO_PORT_DPD,
5080		[PORT_E] = DVO_PORT_DPE,
5081	};
5082
5083	/*
5084	 * eDP not supported on g4x. so bail out early just
5085	 * for a bit extra safety in case the VBT is bonkers.
5086	 */
5087	if (INTEL_INFO(dev)->gen < 5)
5088		return false;
5089
5090	if (port == PORT_A)
5091		return true;
5092
5093	if (!dev_priv->vbt.child_dev_num)
5094		return false;
5095
5096	for (i = 0; i < dev_priv->vbt.child_dev_num; i++) {
5097		p_child = dev_priv->vbt.child_dev + i;
5098
5099		if (p_child->common.dvo_port == port_mapping[port] &&
5100		    (p_child->common.device_type & DEVICE_TYPE_eDP_BITS) ==
5101		    (DEVICE_TYPE_eDP & DEVICE_TYPE_eDP_BITS))
5102			return true;
5103	}
5104	return false;
5105}
5106
5107void
5108intel_dp_add_properties(struct intel_dp *intel_dp, struct drm_connector *connector)
5109{
5110	struct intel_connector *intel_connector = to_intel_connector(connector);
5111
5112	intel_attach_force_audio_property(connector);
5113	intel_attach_broadcast_rgb_property(connector);
5114	intel_dp->color_range_auto = true;
5115
5116	if (is_edp(intel_dp)) {
5117		drm_mode_create_scaling_mode_property(connector->dev);
5118		drm_object_attach_property(
5119			&connector->base,
5120			connector->dev->mode_config.scaling_mode_property,
5121			DRM_MODE_SCALE_ASPECT);
5122		intel_connector->panel.fitting_mode = DRM_MODE_SCALE_ASPECT;
5123	}
5124}
5125
5126static void intel_dp_init_panel_power_timestamps(struct intel_dp *intel_dp)
5127{
5128	intel_dp->panel_power_off_time = ktime_get_boottime();
5129	intel_dp->last_power_on = jiffies;
5130	intel_dp->last_backlight_off = jiffies;
5131}
5132
5133static void
5134intel_dp_init_panel_power_sequencer(struct drm_device *dev,
5135				    struct intel_dp *intel_dp)
5136{
5137	struct drm_i915_private *dev_priv = dev->dev_private;
5138	struct edp_power_seq cur, vbt, spec,
5139		*final = &intel_dp->pps_delays;
5140	u32 pp_on, pp_off, pp_div = 0, pp_ctl = 0;
5141	i915_reg_t pp_ctrl_reg, pp_on_reg, pp_off_reg, pp_div_reg;
5142
5143	lockdep_assert_held(&dev_priv->pps_mutex);
5144
5145	/* already initialized? */
5146	if (final->t11_t12 != 0)
5147		return;
5148
5149	if (IS_BROXTON(dev)) {
5150		/*
5151		 * TODO: BXT has 2 sets of PPS registers.
5152		 * Correct Register for Broxton need to be identified
5153		 * using VBT. hardcoding for now
5154		 */
5155		pp_ctrl_reg = BXT_PP_CONTROL(0);
5156		pp_on_reg = BXT_PP_ON_DELAYS(0);
5157		pp_off_reg = BXT_PP_OFF_DELAYS(0);
5158	} else if (HAS_PCH_SPLIT(dev)) {
5159		pp_ctrl_reg = PCH_PP_CONTROL;
5160		pp_on_reg = PCH_PP_ON_DELAYS;
5161		pp_off_reg = PCH_PP_OFF_DELAYS;
5162		pp_div_reg = PCH_PP_DIVISOR;
5163	} else {
5164		enum pipe pipe = vlv_power_sequencer_pipe(intel_dp);
5165
5166		pp_ctrl_reg = VLV_PIPE_PP_CONTROL(pipe);
5167		pp_on_reg = VLV_PIPE_PP_ON_DELAYS(pipe);
5168		pp_off_reg = VLV_PIPE_PP_OFF_DELAYS(pipe);
5169		pp_div_reg = VLV_PIPE_PP_DIVISOR(pipe);
5170	}
5171
5172	/* Workaround: Need to write PP_CONTROL with the unlock key as
5173	 * the very first thing. */
5174	pp_ctl = ironlake_get_pp_control(intel_dp);
5175
5176	pp_on = I915_READ(pp_on_reg);
5177	pp_off = I915_READ(pp_off_reg);
5178	if (!IS_BROXTON(dev)) {
5179		I915_WRITE(pp_ctrl_reg, pp_ctl);
5180		pp_div = I915_READ(pp_div_reg);
5181	}
5182
5183	/* Pull timing values out of registers */
5184	cur.t1_t3 = (pp_on & PANEL_POWER_UP_DELAY_MASK) >>
5185		PANEL_POWER_UP_DELAY_SHIFT;
5186
5187	cur.t8 = (pp_on & PANEL_LIGHT_ON_DELAY_MASK) >>
5188		PANEL_LIGHT_ON_DELAY_SHIFT;
5189
5190	cur.t9 = (pp_off & PANEL_LIGHT_OFF_DELAY_MASK) >>
5191		PANEL_LIGHT_OFF_DELAY_SHIFT;
5192
5193	cur.t10 = (pp_off & PANEL_POWER_DOWN_DELAY_MASK) >>
5194		PANEL_POWER_DOWN_DELAY_SHIFT;
5195
5196	if (IS_BROXTON(dev)) {
5197		u16 tmp = (pp_ctl & BXT_POWER_CYCLE_DELAY_MASK) >>
5198			BXT_POWER_CYCLE_DELAY_SHIFT;
5199		if (tmp > 0)
5200			cur.t11_t12 = (tmp - 1) * 1000;
5201		else
5202			cur.t11_t12 = 0;
5203	} else {
5204		cur.t11_t12 = ((pp_div & PANEL_POWER_CYCLE_DELAY_MASK) >>
5205		       PANEL_POWER_CYCLE_DELAY_SHIFT) * 1000;
5206	}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
5207
5208	DRM_DEBUG_KMS("cur t1_t3 %d t8 %d t9 %d t10 %d t11_t12 %d\n",
5209		      cur.t1_t3, cur.t8, cur.t9, cur.t10, cur.t11_t12);
5210
5211	vbt = dev_priv->vbt.edp_pps;
 
 
5212
5213	/* Upper limits from eDP 1.3 spec. Note that we use the clunky units of
5214	 * our hw here, which are all in 100usec. */
5215	spec.t1_t3 = 210 * 10;
5216	spec.t8 = 50 * 10; /* no limit for t8, use t7 instead */
5217	spec.t9 = 50 * 10; /* no limit for t9, make it symmetric with t8 */
5218	spec.t10 = 500 * 10;
5219	/* This one is special and actually in units of 100ms, but zero
5220	 * based in the hw (so we need to add 100 ms). But the sw vbt
5221	 * table multiplies it with 1000 to make it in units of 100usec,
5222	 * too. */
5223	spec.t11_t12 = (510 + 100) * 10;
5224
5225	DRM_DEBUG_KMS("vbt t1_t3 %d t8 %d t9 %d t10 %d t11_t12 %d\n",
5226		      vbt.t1_t3, vbt.t8, vbt.t9, vbt.t10, vbt.t11_t12);
5227
5228	/* Use the max of the register settings and vbt. If both are
5229	 * unset, fall back to the spec limits. */
5230#define assign_final(field)	final->field = (max(cur.field, vbt.field) == 0 ? \
5231				       spec.field : \
5232				       max(cur.field, vbt.field))
5233	assign_final(t1_t3);
5234	assign_final(t8);
5235	assign_final(t9);
5236	assign_final(t10);
5237	assign_final(t11_t12);
5238#undef assign_final
5239
5240#define get_delay(field)	(DIV_ROUND_UP(final->field, 10))
5241	intel_dp->panel_power_up_delay = get_delay(t1_t3);
5242	intel_dp->backlight_on_delay = get_delay(t8);
5243	intel_dp->backlight_off_delay = get_delay(t9);
5244	intel_dp->panel_power_down_delay = get_delay(t10);
5245	intel_dp->panel_power_cycle_delay = get_delay(t11_t12);
5246#undef get_delay
5247
5248	DRM_DEBUG_KMS("panel power up delay %d, power down delay %d, power cycle delay %d\n",
5249		      intel_dp->panel_power_up_delay, intel_dp->panel_power_down_delay,
5250		      intel_dp->panel_power_cycle_delay);
5251
5252	DRM_DEBUG_KMS("backlight on delay %d, off delay %d\n",
5253		      intel_dp->backlight_on_delay, intel_dp->backlight_off_delay);
 
 
 
 
 
 
 
 
 
 
5254}
5255
5256static void
5257intel_dp_init_panel_power_sequencer_registers(struct drm_device *dev,
5258					      struct intel_dp *intel_dp)
 
5259{
5260	struct drm_i915_private *dev_priv = dev->dev_private;
5261	u32 pp_on, pp_off, pp_div, port_sel = 0;
5262	int div = HAS_PCH_SPLIT(dev) ? intel_pch_rawclk(dev) : intel_hrawclk(dev);
5263	i915_reg_t pp_on_reg, pp_off_reg, pp_div_reg, pp_ctrl_reg;
5264	enum port port = dp_to_dig_port(intel_dp)->port;
5265	const struct edp_power_seq *seq = &intel_dp->pps_delays;
5266
5267	lockdep_assert_held(&dev_priv->pps_mutex);
5268
5269	if (IS_BROXTON(dev)) {
5270		/*
5271		 * TODO: BXT has 2 sets of PPS registers.
5272		 * Correct Register for Broxton need to be identified
5273		 * using VBT. hardcoding for now
5274		 */
5275		pp_ctrl_reg = BXT_PP_CONTROL(0);
5276		pp_on_reg = BXT_PP_ON_DELAYS(0);
5277		pp_off_reg = BXT_PP_OFF_DELAYS(0);
5278
5279	} else if (HAS_PCH_SPLIT(dev)) {
5280		pp_on_reg = PCH_PP_ON_DELAYS;
5281		pp_off_reg = PCH_PP_OFF_DELAYS;
5282		pp_div_reg = PCH_PP_DIVISOR;
5283	} else {
5284		enum pipe pipe = vlv_power_sequencer_pipe(intel_dp);
 
 
 
 
 
 
 
5285
5286		pp_on_reg = VLV_PIPE_PP_ON_DELAYS(pipe);
5287		pp_off_reg = VLV_PIPE_PP_OFF_DELAYS(pipe);
5288		pp_div_reg = VLV_PIPE_PP_DIVISOR(pipe);
5289	}
5290
5291	/*
5292	 * And finally store the new values in the power sequencer. The
5293	 * backlight delays are set to 1 because we do manual waits on them. For
5294	 * T8, even BSpec recommends doing it. For T9, if we don't do this,
5295	 * we'll end up waiting for the backlight off delay twice: once when we
5296	 * do the manual sleep, and once when we disable the panel and wait for
5297	 * the PP_STATUS bit to become zero.
5298	 */
5299	pp_on = (seq->t1_t3 << PANEL_POWER_UP_DELAY_SHIFT) |
5300		(1 << PANEL_LIGHT_ON_DELAY_SHIFT);
5301	pp_off = (1 << PANEL_LIGHT_OFF_DELAY_SHIFT) |
5302		 (seq->t10 << PANEL_POWER_DOWN_DELAY_SHIFT);
5303	/* Compute the divisor for the pp clock, simply match the Bspec
5304	 * formula. */
5305	if (IS_BROXTON(dev)) {
5306		pp_div = I915_READ(pp_ctrl_reg);
5307		pp_div &= ~BXT_POWER_CYCLE_DELAY_MASK;
5308		pp_div |= (DIV_ROUND_UP((seq->t11_t12 + 1), 1000)
5309				<< BXT_POWER_CYCLE_DELAY_SHIFT);
5310	} else {
5311		pp_div = ((100 * div)/2 - 1) << PP_REFERENCE_DIVIDER_SHIFT;
5312		pp_div |= (DIV_ROUND_UP(seq->t11_t12, 1000)
5313				<< PANEL_POWER_CYCLE_DELAY_SHIFT);
5314	}
5315
5316	/* Haswell doesn't have any port selection bits for the panel
5317	 * power sequencer any more. */
5318	if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) {
5319		port_sel = PANEL_PORT_SELECT_VLV(port);
5320	} else if (HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev)) {
5321		if (port == PORT_A)
5322			port_sel = PANEL_PORT_SELECT_DPA;
5323		else
5324			port_sel = PANEL_PORT_SELECT_DPD;
5325	}
5326
5327	pp_on |= port_sel;
5328
5329	I915_WRITE(pp_on_reg, pp_on);
5330	I915_WRITE(pp_off_reg, pp_off);
5331	if (IS_BROXTON(dev))
5332		I915_WRITE(pp_ctrl_reg, pp_div);
5333	else
5334		I915_WRITE(pp_div_reg, pp_div);
5335
5336	DRM_DEBUG_KMS("panel power sequencer register settings: PP_ON %#x, PP_OFF %#x, PP_DIV %#x\n",
5337		      I915_READ(pp_on_reg),
5338		      I915_READ(pp_off_reg),
5339		      IS_BROXTON(dev) ?
5340		      (I915_READ(pp_ctrl_reg) & BXT_POWER_CYCLE_DELAY_MASK) :
5341		      I915_READ(pp_div_reg));
 
 
 
 
 
 
 
 
 
 
 
 
 
5342}
5343
5344/**
5345 * intel_dp_set_drrs_state - program registers for RR switch to take effect
5346 * @dev: DRM device
 
5347 * @refresh_rate: RR to be programmed
5348 *
5349 * This function gets called when refresh rate (RR) has to be changed from
5350 * one frequency to another. Switches can be between high and low RR
5351 * supported by the panel or to any other RR based on media playback (in
5352 * this case, RR value needs to be passed from user space).
5353 *
5354 * The caller of this function needs to take a lock on dev_priv->drrs.
5355 */
5356static void intel_dp_set_drrs_state(struct drm_device *dev, int refresh_rate)
 
 
5357{
5358	struct drm_i915_private *dev_priv = dev->dev_private;
5359	struct intel_encoder *encoder;
5360	struct intel_digital_port *dig_port = NULL;
5361	struct intel_dp *intel_dp = dev_priv->drrs.dp;
5362	struct intel_crtc_state *config = NULL;
5363	struct intel_crtc *intel_crtc = NULL;
5364	enum drrs_refresh_rate_type index = DRRS_HIGH_RR;
5365
5366	if (refresh_rate <= 0) {
5367		DRM_DEBUG_KMS("Refresh rate should be positive non-zero.\n");
5368		return;
5369	}
5370
5371	if (intel_dp == NULL) {
5372		DRM_DEBUG_KMS("DRRS not supported.\n");
5373		return;
5374	}
5375
5376	/*
5377	 * FIXME: This needs proper synchronization with psr state for some
5378	 * platforms that cannot have PSR and DRRS enabled at the same time.
5379	 */
5380
5381	dig_port = dp_to_dig_port(intel_dp);
5382	encoder = &dig_port->base;
5383	intel_crtc = to_intel_crtc(encoder->base.crtc);
5384
5385	if (!intel_crtc) {
5386		DRM_DEBUG_KMS("DRRS: intel_crtc not initialized\n");
5387		return;
5388	}
5389
5390	config = intel_crtc->config;
5391
5392	if (dev_priv->drrs.type < SEAMLESS_DRRS_SUPPORT) {
5393		DRM_DEBUG_KMS("Only Seamless DRRS supported.\n");
5394		return;
5395	}
5396
5397	if (intel_dp->attached_connector->panel.downclock_mode->vrefresh ==
5398			refresh_rate)
5399		index = DRRS_LOW_RR;
5400
5401	if (index == dev_priv->drrs.refresh_rate_type) {
5402		DRM_DEBUG_KMS(
5403			"DRRS requested for previously set RR...ignoring\n");
5404		return;
5405	}
5406
5407	if (!intel_crtc->active) {
5408		DRM_DEBUG_KMS("eDP encoder disabled. CRTC not Active\n");
5409		return;
5410	}
5411
5412	if (INTEL_INFO(dev)->gen >= 8 && !IS_CHERRYVIEW(dev)) {
5413		switch (index) {
5414		case DRRS_HIGH_RR:
5415			intel_dp_set_m_n(intel_crtc, M1_N1);
5416			break;
5417		case DRRS_LOW_RR:
5418			intel_dp_set_m_n(intel_crtc, M2_N2);
5419			break;
5420		case DRRS_MAX_RR:
5421		default:
5422			DRM_ERROR("Unsupported refreshrate type\n");
5423		}
5424	} else if (INTEL_INFO(dev)->gen > 6) {
5425		i915_reg_t reg = PIPECONF(intel_crtc->config->cpu_transcoder);
5426		u32 val;
5427
5428		val = I915_READ(reg);
5429		if (index > DRRS_HIGH_RR) {
5430			if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev))
5431				val |= PIPECONF_EDP_RR_MODE_SWITCH_VLV;
5432			else
5433				val |= PIPECONF_EDP_RR_MODE_SWITCH;
5434		} else {
5435			if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev))
5436				val &= ~PIPECONF_EDP_RR_MODE_SWITCH_VLV;
5437			else
5438				val &= ~PIPECONF_EDP_RR_MODE_SWITCH;
5439		}
5440		I915_WRITE(reg, val);
5441	}
5442
5443	dev_priv->drrs.refresh_rate_type = index;
5444
5445	DRM_DEBUG_KMS("eDP Refresh Rate set to : %dHz\n", refresh_rate);
5446}
5447
5448/**
5449 * intel_edp_drrs_enable - init drrs struct if supported
5450 * @intel_dp: DP struct
 
5451 *
5452 * Initializes frontbuffer_bits and drrs.dp
5453 */
5454void intel_edp_drrs_enable(struct intel_dp *intel_dp)
 
5455{
5456	struct drm_device *dev = intel_dp_to_dev(intel_dp);
5457	struct drm_i915_private *dev_priv = dev->dev_private;
5458	struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
5459	struct drm_crtc *crtc = dig_port->base.base.crtc;
5460	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
5461
5462	if (!intel_crtc->config->has_drrs) {
5463		DRM_DEBUG_KMS("Panel doesn't support DRRS\n");
5464		return;
5465	}
5466
5467	mutex_lock(&dev_priv->drrs.mutex);
5468	if (WARN_ON(dev_priv->drrs.dp)) {
5469		DRM_ERROR("DRRS already enabled\n");
5470		goto unlock;
5471	}
5472
5473	dev_priv->drrs.busy_frontbuffer_bits = 0;
5474
5475	dev_priv->drrs.dp = intel_dp;
5476
5477unlock:
5478	mutex_unlock(&dev_priv->drrs.mutex);
5479}
5480
5481/**
5482 * intel_edp_drrs_disable - Disable DRRS
5483 * @intel_dp: DP struct
 
5484 *
5485 */
5486void intel_edp_drrs_disable(struct intel_dp *intel_dp)
 
5487{
5488	struct drm_device *dev = intel_dp_to_dev(intel_dp);
5489	struct drm_i915_private *dev_priv = dev->dev_private;
5490	struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
5491	struct drm_crtc *crtc = dig_port->base.base.crtc;
5492	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
5493
5494	if (!intel_crtc->config->has_drrs)
5495		return;
5496
5497	mutex_lock(&dev_priv->drrs.mutex);
5498	if (!dev_priv->drrs.dp) {
5499		mutex_unlock(&dev_priv->drrs.mutex);
5500		return;
5501	}
5502
5503	if (dev_priv->drrs.refresh_rate_type == DRRS_LOW_RR)
5504		intel_dp_set_drrs_state(dev_priv->dev,
5505			intel_dp->attached_connector->panel.
5506			fixed_mode->vrefresh);
5507
5508	dev_priv->drrs.dp = NULL;
5509	mutex_unlock(&dev_priv->drrs.mutex);
5510
5511	cancel_delayed_work_sync(&dev_priv->drrs.work);
5512}
5513
5514static void intel_edp_drrs_downclock_work(struct work_struct *work)
5515{
5516	struct drm_i915_private *dev_priv =
5517		container_of(work, typeof(*dev_priv), drrs.work.work);
5518	struct intel_dp *intel_dp;
5519
5520	mutex_lock(&dev_priv->drrs.mutex);
5521
5522	intel_dp = dev_priv->drrs.dp;
5523
5524	if (!intel_dp)
5525		goto unlock;
5526
5527	/*
5528	 * The delayed work can race with an invalidate hence we need to
5529	 * recheck.
5530	 */
5531
5532	if (dev_priv->drrs.busy_frontbuffer_bits)
5533		goto unlock;
5534
5535	if (dev_priv->drrs.refresh_rate_type != DRRS_LOW_RR)
5536		intel_dp_set_drrs_state(dev_priv->dev,
5537			intel_dp->attached_connector->panel.
5538			downclock_mode->vrefresh);
 
 
5539
5540unlock:
5541	mutex_unlock(&dev_priv->drrs.mutex);
5542}
5543
5544/**
5545 * intel_edp_drrs_invalidate - Disable Idleness DRRS
5546 * @dev: DRM device
5547 * @frontbuffer_bits: frontbuffer plane tracking bits
5548 *
5549 * This function gets called everytime rendering on the given planes start.
5550 * Hence DRRS needs to be Upclocked, i.e. (LOW_RR -> HIGH_RR).
5551 *
5552 * Dirty frontbuffers relevant to DRRS are tracked in busy_frontbuffer_bits.
5553 */
5554void intel_edp_drrs_invalidate(struct drm_device *dev,
5555		unsigned frontbuffer_bits)
5556{
5557	struct drm_i915_private *dev_priv = dev->dev_private;
5558	struct drm_crtc *crtc;
5559	enum pipe pipe;
5560
5561	if (dev_priv->drrs.type == DRRS_NOT_SUPPORTED)
5562		return;
5563
5564	cancel_delayed_work(&dev_priv->drrs.work);
5565
5566	mutex_lock(&dev_priv->drrs.mutex);
5567	if (!dev_priv->drrs.dp) {
5568		mutex_unlock(&dev_priv->drrs.mutex);
5569		return;
5570	}
5571
5572	crtc = dp_to_dig_port(dev_priv->drrs.dp)->base.base.crtc;
5573	pipe = to_intel_crtc(crtc)->pipe;
5574
5575	frontbuffer_bits &= INTEL_FRONTBUFFER_ALL_MASK(pipe);
5576	dev_priv->drrs.busy_frontbuffer_bits |= frontbuffer_bits;
5577
5578	/* invalidate means busy screen hence upclock */
5579	if (frontbuffer_bits && dev_priv->drrs.refresh_rate_type == DRRS_LOW_RR)
5580		intel_dp_set_drrs_state(dev_priv->dev,
5581				dev_priv->drrs.dp->attached_connector->panel.
5582				fixed_mode->vrefresh);
5583
5584	mutex_unlock(&dev_priv->drrs.mutex);
5585}
5586
5587/**
5588 * intel_edp_drrs_flush - Restart Idleness DRRS
5589 * @dev: DRM device
5590 * @frontbuffer_bits: frontbuffer plane tracking bits
5591 *
5592 * This function gets called every time rendering on the given planes has
5593 * completed or flip on a crtc is completed. So DRRS should be upclocked
5594 * (LOW_RR -> HIGH_RR). And also Idleness detection should be started again,
5595 * if no other planes are dirty.
5596 *
5597 * Dirty frontbuffers relevant to DRRS are tracked in busy_frontbuffer_bits.
5598 */
5599void intel_edp_drrs_flush(struct drm_device *dev,
5600		unsigned frontbuffer_bits)
5601{
5602	struct drm_i915_private *dev_priv = dev->dev_private;
5603	struct drm_crtc *crtc;
5604	enum pipe pipe;
5605
5606	if (dev_priv->drrs.type == DRRS_NOT_SUPPORTED)
5607		return;
5608
5609	cancel_delayed_work(&dev_priv->drrs.work);
5610
5611	mutex_lock(&dev_priv->drrs.mutex);
5612	if (!dev_priv->drrs.dp) {
5613		mutex_unlock(&dev_priv->drrs.mutex);
5614		return;
5615	}
5616
5617	crtc = dp_to_dig_port(dev_priv->drrs.dp)->base.base.crtc;
5618	pipe = to_intel_crtc(crtc)->pipe;
5619
5620	frontbuffer_bits &= INTEL_FRONTBUFFER_ALL_MASK(pipe);
5621	dev_priv->drrs.busy_frontbuffer_bits &= ~frontbuffer_bits;
5622
5623	/* flush means busy screen hence upclock */
5624	if (frontbuffer_bits && dev_priv->drrs.refresh_rate_type == DRRS_LOW_RR)
5625		intel_dp_set_drrs_state(dev_priv->dev,
5626				dev_priv->drrs.dp->attached_connector->panel.
5627				fixed_mode->vrefresh);
5628
5629	/*
5630	 * flush also means no more activity hence schedule downclock, if all
5631	 * other fbs are quiescent too
5632	 */
5633	if (!dev_priv->drrs.busy_frontbuffer_bits)
5634		schedule_delayed_work(&dev_priv->drrs.work,
5635				msecs_to_jiffies(1000));
5636	mutex_unlock(&dev_priv->drrs.mutex);
5637}
5638
5639/**
5640 * DOC: Display Refresh Rate Switching (DRRS)
5641 *
5642 * Display Refresh Rate Switching (DRRS) is a power conservation feature
5643 * which enables swtching between low and high refresh rates,
5644 * dynamically, based on the usage scenario. This feature is applicable
5645 * for internal panels.
5646 *
5647 * Indication that the panel supports DRRS is given by the panel EDID, which
5648 * would list multiple refresh rates for one resolution.
5649 *
5650 * DRRS is of 2 types - static and seamless.
5651 * Static DRRS involves changing refresh rate (RR) by doing a full modeset
5652 * (may appear as a blink on screen) and is used in dock-undock scenario.
5653 * Seamless DRRS involves changing RR without any visual effect to the user
5654 * and can be used during normal system usage. This is done by programming
5655 * certain registers.
5656 *
5657 * Support for static/seamless DRRS may be indicated in the VBT based on
5658 * inputs from the panel spec.
5659 *
5660 * DRRS saves power by switching to low RR based on usage scenarios.
5661 *
5662 * eDP DRRS:-
5663 *        The implementation is based on frontbuffer tracking implementation.
5664 * When there is a disturbance on the screen triggered by user activity or a
5665 * periodic system activity, DRRS is disabled (RR is changed to high RR).
5666 * When there is no movement on screen, after a timeout of 1 second, a switch
5667 * to low RR is made.
5668 *        For integration with frontbuffer tracking code,
5669 * intel_edp_drrs_invalidate() and intel_edp_drrs_flush() are called.
5670 *
5671 * DRRS can be further extended to support other internal panels and also
5672 * the scenario of video playback wherein RR is set based on the rate
5673 * requested by userspace.
5674 */
5675
5676/**
5677 * intel_dp_drrs_init - Init basic DRRS work and mutex.
5678 * @intel_connector: eDP connector
5679 * @fixed_mode: preferred mode of panel
5680 *
5681 * This function is  called only once at driver load to initialize basic
5682 * DRRS stuff.
5683 *
5684 * Returns:
5685 * Downclock mode if panel supports it, else return NULL.
5686 * DRRS support is determined by the presence of downclock mode (apart
5687 * from VBT setting).
5688 */
5689static struct drm_display_mode *
5690intel_dp_drrs_init(struct intel_connector *intel_connector,
5691		struct drm_display_mode *fixed_mode)
5692{
5693	struct drm_connector *connector = &intel_connector->base;
5694	struct drm_device *dev = connector->dev;
5695	struct drm_i915_private *dev_priv = dev->dev_private;
5696	struct drm_display_mode *downclock_mode = NULL;
5697
5698	INIT_DELAYED_WORK(&dev_priv->drrs.work, intel_edp_drrs_downclock_work);
5699	mutex_init(&dev_priv->drrs.mutex);
5700
5701	if (INTEL_INFO(dev)->gen <= 6) {
5702		DRM_DEBUG_KMS("DRRS supported for Gen7 and above\n");
5703		return NULL;
5704	}
5705
5706	if (dev_priv->vbt.drrs_type != SEAMLESS_DRRS_SUPPORT) {
5707		DRM_DEBUG_KMS("VBT doesn't support DRRS\n");
5708		return NULL;
5709	}
5710
5711	downclock_mode = intel_find_panel_downclock
5712					(dev, fixed_mode, connector);
5713
5714	if (!downclock_mode) {
5715		DRM_DEBUG_KMS("Downclock mode is not found. DRRS not supported\n");
5716		return NULL;
5717	}
5718
5719	dev_priv->drrs.type = dev_priv->vbt.drrs_type;
5720
5721	dev_priv->drrs.refresh_rate_type = DRRS_HIGH_RR;
5722	DRM_DEBUG_KMS("seamless DRRS supported for eDP panel.\n");
5723	return downclock_mode;
5724}
5725
5726static bool intel_edp_init_connector(struct intel_dp *intel_dp,
5727				     struct intel_connector *intel_connector)
5728{
5729	struct drm_connector *connector = &intel_connector->base;
5730	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
5731	struct intel_encoder *intel_encoder = &intel_dig_port->base;
5732	struct drm_device *dev = intel_encoder->base.dev;
5733	struct drm_i915_private *dev_priv = dev->dev_private;
5734	struct drm_display_mode *fixed_mode = NULL;
5735	struct drm_display_mode *downclock_mode = NULL;
5736	bool has_dpcd;
5737	struct drm_display_mode *scan;
5738	struct edid *edid;
5739	enum pipe pipe = INVALID_PIPE;
5740
5741	if (!is_edp(intel_dp))
5742		return true;
5743
 
 
 
 
 
 
 
 
 
 
 
 
 
5744	pps_lock(intel_dp);
 
 
 
5745	intel_edp_panel_vdd_sanitize(intel_dp);
 
5746	pps_unlock(intel_dp);
5747
5748	/* Cache DPCD and EDID for edp. */
5749	has_dpcd = intel_dp_get_dpcd(intel_dp);
5750
5751	if (has_dpcd) {
5752		if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11)
5753			dev_priv->no_aux_handshake =
5754				intel_dp->dpcd[DP_MAX_DOWNSPREAD] &
5755				DP_NO_AUX_HANDSHAKE_LINK_TRAINING;
5756	} else {
5757		/* if this fails, presume the device is a ghost */
5758		DRM_INFO("failed to retrieve link info, disabling eDP\n");
5759		return false;
5760	}
5761
5762	/* We now know it's not a ghost, init power sequence regs. */
5763	pps_lock(intel_dp);
5764	intel_dp_init_panel_power_sequencer_registers(dev, intel_dp);
5765	pps_unlock(intel_dp);
5766
5767	mutex_lock(&dev->mode_config.mutex);
5768	edid = drm_get_edid(connector, &intel_dp->aux.ddc);
5769	if (edid) {
5770		if (drm_add_edid_modes(connector, edid)) {
5771			drm_mode_connector_update_edid_property(connector,
5772								edid);
5773			drm_edid_to_eld(connector, edid);
5774		} else {
5775			kfree(edid);
5776			edid = ERR_PTR(-EINVAL);
5777		}
5778	} else {
5779		edid = ERR_PTR(-ENOENT);
5780	}
5781	intel_connector->edid = edid;
5782
5783	/* prefer fixed mode from EDID if available */
5784	list_for_each_entry(scan, &connector->probed_modes, head) {
5785		if ((scan->type & DRM_MODE_TYPE_PREFERRED)) {
5786			fixed_mode = drm_mode_duplicate(dev, scan);
5787			downclock_mode = intel_dp_drrs_init(
5788						intel_connector, fixed_mode);
5789			break;
5790		}
5791	}
5792
5793	/* fallback to VBT if available for eDP */
5794	if (!fixed_mode && dev_priv->vbt.lfp_lvds_vbt_mode) {
5795		fixed_mode = drm_mode_duplicate(dev,
5796					dev_priv->vbt.lfp_lvds_vbt_mode);
5797		if (fixed_mode)
5798			fixed_mode->type |= DRM_MODE_TYPE_PREFERRED;
 
 
 
5799	}
5800	mutex_unlock(&dev->mode_config.mutex);
5801
5802	if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) {
5803		intel_dp->edp_notifier.notifier_call = edp_notify_handler;
5804		register_reboot_notifier(&intel_dp->edp_notifier);
5805
5806		/*
5807		 * Figure out the current pipe for the initial backlight setup.
5808		 * If the current pipe isn't valid, try the PPS pipe, and if that
5809		 * fails just assume pipe A.
5810		 */
5811		if (IS_CHERRYVIEW(dev))
5812			pipe = DP_PORT_TO_PIPE_CHV(intel_dp->DP);
5813		else
5814			pipe = PORT_TO_PIPE(intel_dp->DP);
5815
5816		if (pipe != PIPE_A && pipe != PIPE_B)
5817			pipe = intel_dp->pps_pipe;
5818
5819		if (pipe != PIPE_A && pipe != PIPE_B)
5820			pipe = PIPE_A;
5821
5822		DRM_DEBUG_KMS("using pipe %c for initial backlight setup\n",
5823			      pipe_name(pipe));
5824	}
5825
5826	intel_panel_init(&intel_connector->panel, fixed_mode, downclock_mode);
5827	intel_connector->panel.backlight.power = intel_edp_backlight_power;
5828	intel_panel_setup_backlight(connector, pipe);
5829
5830	return true;
 
 
 
 
 
 
 
 
 
 
 
 
5831}
5832
5833bool
5834intel_dp_init_connector(struct intel_digital_port *intel_dig_port,
5835			struct intel_connector *intel_connector)
5836{
5837	struct drm_connector *connector = &intel_connector->base;
5838	struct intel_dp *intel_dp = &intel_dig_port->dp;
5839	struct intel_encoder *intel_encoder = &intel_dig_port->base;
5840	struct drm_device *dev = intel_encoder->base.dev;
5841	struct drm_i915_private *dev_priv = dev->dev_private;
5842	enum port port = intel_dig_port->port;
5843	int type, ret;
5844
5845	if (WARN(intel_dig_port->max_lanes < 1,
5846		 "Not enough lanes (%d) for DP on port %c\n",
5847		 intel_dig_port->max_lanes, port_name(port)))
5848		return false;
5849
5850	intel_dp->pps_pipe = INVALID_PIPE;
5851
5852	/* intel_dp vfuncs */
5853	if (INTEL_INFO(dev)->gen >= 9)
5854		intel_dp->get_aux_clock_divider = skl_get_aux_clock_divider;
5855	else if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev))
5856		intel_dp->get_aux_clock_divider = vlv_get_aux_clock_divider;
5857	else if (IS_HASWELL(dev) || IS_BROADWELL(dev))
5858		intel_dp->get_aux_clock_divider = hsw_get_aux_clock_divider;
5859	else if (HAS_PCH_SPLIT(dev))
5860		intel_dp->get_aux_clock_divider = ilk_get_aux_clock_divider;
5861	else
5862		intel_dp->get_aux_clock_divider = i9xx_get_aux_clock_divider;
5863
5864	if (INTEL_INFO(dev)->gen >= 9)
5865		intel_dp->get_aux_send_ctl = skl_get_aux_send_ctl;
5866	else
5867		intel_dp->get_aux_send_ctl = i9xx_get_aux_send_ctl;
5868
5869	if (HAS_DDI(dev))
5870		intel_dp->prepare_link_retrain = intel_ddi_prepare_link_retrain;
5871
5872	/* Preserve the current hw state. */
5873	intel_dp->DP = I915_READ(intel_dp->output_reg);
5874	intel_dp->attached_connector = intel_connector;
5875
5876	if (intel_dp_is_edp(dev, port))
5877		type = DRM_MODE_CONNECTOR_eDP;
5878	else
5879		type = DRM_MODE_CONNECTOR_DisplayPort;
5880
5881	/*
5882	 * For eDP we always set the encoder type to INTEL_OUTPUT_EDP, but
5883	 * for DP the encoder type can be set by the caller to
5884	 * INTEL_OUTPUT_UNKNOWN for DDI, so don't rewrite it.
5885	 */
5886	if (type == DRM_MODE_CONNECTOR_eDP)
5887		intel_encoder->type = INTEL_OUTPUT_EDP;
5888
5889	/* eDP only on port B and/or C on vlv/chv */
5890	if (WARN_ON((IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) &&
5891		    is_edp(intel_dp) && port != PORT_B && port != PORT_C))
5892		return false;
5893
5894	DRM_DEBUG_KMS("Adding %s connector on port %c\n",
5895			type == DRM_MODE_CONNECTOR_eDP ? "eDP" : "DP",
5896			port_name(port));
5897
5898	drm_connector_init(dev, connector, &intel_dp_connector_funcs, type);
5899	drm_connector_helper_add(connector, &intel_dp_connector_helper_funcs);
5900
5901	connector->interlace_allowed = true;
5902	connector->doublescan_allowed = 0;
5903
 
 
5904	INIT_DELAYED_WORK(&intel_dp->panel_vdd_work,
5905			  edp_panel_vdd_work);
5906
5907	intel_connector_attach_encoder(intel_connector, intel_encoder);
5908	drm_connector_register(connector);
5909
5910	if (HAS_DDI(dev))
5911		intel_connector->get_hw_state = intel_ddi_connector_get_hw_state;
5912	else
5913		intel_connector->get_hw_state = intel_connector_get_hw_state;
5914	intel_connector->unregister = intel_dp_connector_unregister;
5915
5916	/* Set up the hotplug pin. */
5917	switch (port) {
5918	case PORT_A:
5919		intel_encoder->hpd_pin = HPD_PORT_A;
5920		break;
5921	case PORT_B:
5922		intel_encoder->hpd_pin = HPD_PORT_B;
5923		if (IS_BXT_REVID(dev, 0, BXT_REVID_A1))
5924			intel_encoder->hpd_pin = HPD_PORT_A;
5925		break;
5926	case PORT_C:
5927		intel_encoder->hpd_pin = HPD_PORT_C;
5928		break;
5929	case PORT_D:
5930		intel_encoder->hpd_pin = HPD_PORT_D;
5931		break;
5932	case PORT_E:
5933		intel_encoder->hpd_pin = HPD_PORT_E;
5934		break;
5935	default:
5936		BUG();
5937	}
5938
5939	if (is_edp(intel_dp)) {
5940		pps_lock(intel_dp);
5941		intel_dp_init_panel_power_timestamps(intel_dp);
5942		if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev))
5943			vlv_initial_power_sequencer_setup(intel_dp);
5944		else
5945			intel_dp_init_panel_power_sequencer(dev, intel_dp);
5946		pps_unlock(intel_dp);
5947	}
5948
5949	ret = intel_dp_aux_init(intel_dp, intel_connector);
5950	if (ret)
5951		goto fail;
5952
5953	/* init MST on ports that can support it */
5954	if (HAS_DP_MST(dev) &&
5955	    (port == PORT_B || port == PORT_C || port == PORT_D))
5956		intel_dp_mst_encoder_init(intel_dig_port,
5957					  intel_connector->base.base.id);
5958
5959	if (!intel_edp_init_connector(intel_dp, intel_connector)) {
5960		intel_dp_aux_fini(intel_dp);
5961		intel_dp_mst_encoder_cleanup(intel_dig_port);
5962		goto fail;
5963	}
5964
5965	intel_dp_add_properties(intel_dp, connector);
5966
5967	/* For G4X desktop chip, PEG_BAND_GAP_DATA 3:0 must first be written
5968	 * 0xd.  Failure to do so will result in spurious interrupts being
5969	 * generated on the port when a cable is not attached.
5970	 */
5971	if (IS_G4X(dev) && !IS_GM45(dev)) {
5972		u32 temp = I915_READ(PEG_BAND_GAP_DATA);
5973		I915_WRITE(PEG_BAND_GAP_DATA, (temp & ~0xf) | 0xd);
5974	}
5975
5976	i915_debugfs_connector_add(connector);
5977
5978	return true;
5979
5980fail:
5981	if (is_edp(intel_dp)) {
5982		cancel_delayed_work_sync(&intel_dp->panel_vdd_work);
5983		/*
5984		 * vdd might still be enabled do to the delayed vdd off.
5985		 * Make sure vdd is actually turned off here.
5986		 */
5987		pps_lock(intel_dp);
5988		edp_panel_vdd_off_sync(intel_dp);
5989		pps_unlock(intel_dp);
5990	}
5991	drm_connector_unregister(connector);
5992	drm_connector_cleanup(connector);
5993
5994	return false;
5995}
5996
5997void
5998intel_dp_init(struct drm_device *dev,
5999	      i915_reg_t output_reg, enum port port)
6000{
6001	struct drm_i915_private *dev_priv = dev->dev_private;
6002	struct intel_digital_port *intel_dig_port;
6003	struct intel_encoder *intel_encoder;
6004	struct drm_encoder *encoder;
6005	struct intel_connector *intel_connector;
6006
6007	intel_dig_port = kzalloc(sizeof(*intel_dig_port), GFP_KERNEL);
6008	if (!intel_dig_port)
6009		return;
6010
6011	intel_connector = intel_connector_alloc();
6012	if (!intel_connector)
6013		goto err_connector_alloc;
6014
6015	intel_encoder = &intel_dig_port->base;
6016	encoder = &intel_encoder->base;
6017
6018	if (drm_encoder_init(dev, &intel_encoder->base, &intel_dp_enc_funcs,
6019			     DRM_MODE_ENCODER_TMDS, NULL))
6020		goto err_encoder_init;
6021
6022	intel_encoder->compute_config = intel_dp_compute_config;
6023	intel_encoder->disable = intel_disable_dp;
6024	intel_encoder->get_hw_state = intel_dp_get_hw_state;
6025	intel_encoder->get_config = intel_dp_get_config;
6026	intel_encoder->suspend = intel_dp_encoder_suspend;
6027	if (IS_CHERRYVIEW(dev)) {
6028		intel_encoder->pre_pll_enable = chv_dp_pre_pll_enable;
6029		intel_encoder->pre_enable = chv_pre_enable_dp;
6030		intel_encoder->enable = vlv_enable_dp;
6031		intel_encoder->post_disable = chv_post_disable_dp;
6032		intel_encoder->post_pll_disable = chv_dp_post_pll_disable;
6033	} else if (IS_VALLEYVIEW(dev)) {
6034		intel_encoder->pre_pll_enable = vlv_dp_pre_pll_enable;
6035		intel_encoder->pre_enable = vlv_pre_enable_dp;
6036		intel_encoder->enable = vlv_enable_dp;
6037		intel_encoder->post_disable = vlv_post_disable_dp;
6038	} else {
6039		intel_encoder->pre_enable = g4x_pre_enable_dp;
6040		intel_encoder->enable = g4x_enable_dp;
6041		if (INTEL_INFO(dev)->gen >= 5)
6042			intel_encoder->post_disable = ilk_post_disable_dp;
6043	}
6044
6045	intel_dig_port->port = port;
6046	intel_dig_port->dp.output_reg = output_reg;
6047	intel_dig_port->max_lanes = 4;
6048
6049	intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT;
6050	if (IS_CHERRYVIEW(dev)) {
6051		if (port == PORT_D)
6052			intel_encoder->crtc_mask = 1 << 2;
6053		else
6054			intel_encoder->crtc_mask = (1 << 0) | (1 << 1);
6055	} else {
6056		intel_encoder->crtc_mask = (1 << 0) | (1 << 1) | (1 << 2);
6057	}
6058	intel_encoder->cloneable = 0;
 
6059
6060	intel_dig_port->hpd_pulse = intel_dp_hpd_pulse;
6061	dev_priv->hotplug.irq_port[port] = intel_dig_port;
6062
6063	if (!intel_dp_init_connector(intel_dig_port, intel_connector))
6064		goto err_init_connector;
6065
6066	return;
6067
6068err_init_connector:
6069	drm_encoder_cleanup(encoder);
6070err_encoder_init:
6071	kfree(intel_connector);
6072err_connector_alloc:
6073	kfree(intel_dig_port);
6074
6075	return;
6076}
6077
6078void intel_dp_mst_suspend(struct drm_device *dev)
6079{
6080	struct drm_i915_private *dev_priv = dev->dev_private;
6081	int i;
6082
6083	/* disable MST */
6084	for (i = 0; i < I915_MAX_PORTS; i++) {
6085		struct intel_digital_port *intel_dig_port = dev_priv->hotplug.irq_port[i];
6086		if (!intel_dig_port)
 
6087			continue;
6088
6089		if (intel_dig_port->base.type == INTEL_OUTPUT_DISPLAYPORT) {
6090			if (!intel_dig_port->dp.can_mst)
6091				continue;
6092			if (intel_dig_port->dp.is_mst)
6093				drm_dp_mst_topology_mgr_suspend(&intel_dig_port->dp.mst_mgr);
6094		}
6095	}
6096}
6097
6098void intel_dp_mst_resume(struct drm_device *dev)
6099{
6100	struct drm_i915_private *dev_priv = dev->dev_private;
6101	int i;
6102
6103	for (i = 0; i < I915_MAX_PORTS; i++) {
6104		struct intel_digital_port *intel_dig_port = dev_priv->hotplug.irq_port[i];
6105		if (!intel_dig_port)
6106			continue;
6107		if (intel_dig_port->base.type == INTEL_OUTPUT_DISPLAYPORT) {
6108			int ret;
6109
6110			if (!intel_dig_port->dp.can_mst)
6111				continue;
6112
6113			ret = drm_dp_mst_topology_mgr_resume(&intel_dig_port->dp.mst_mgr);
6114			if (ret != 0) {
6115				intel_dp_check_mst_status(&intel_dig_port->dp);
6116			}
6117		}
6118	}
6119}
v4.10.11
   1/*
   2 * Copyright © 2008 Intel Corporation
   3 *
   4 * Permission is hereby granted, free of charge, to any person obtaining a
   5 * copy of this software and associated documentation files (the "Software"),
   6 * to deal in the Software without restriction, including without limitation
   7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
   8 * and/or sell copies of the Software, and to permit persons to whom the
   9 * Software is furnished to do so, subject to the following conditions:
  10 *
  11 * The above copyright notice and this permission notice (including the next
  12 * paragraph) shall be included in all copies or substantial portions of the
  13 * Software.
  14 *
  15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
  18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
  19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
  20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
  21 * IN THE SOFTWARE.
  22 *
  23 * Authors:
  24 *    Keith Packard <keithp@keithp.com>
  25 *
  26 */
  27
  28#include <linux/i2c.h>
  29#include <linux/slab.h>
  30#include <linux/export.h>
  31#include <linux/notifier.h>
  32#include <linux/reboot.h>
  33#include <drm/drmP.h>
  34#include <drm/drm_atomic_helper.h>
  35#include <drm/drm_crtc.h>
  36#include <drm/drm_crtc_helper.h>
  37#include <drm/drm_edid.h>
  38#include "intel_drv.h"
  39#include <drm/i915_drm.h>
  40#include "i915_drv.h"
  41
  42#define DP_LINK_CHECK_TIMEOUT	(10 * 1000)
  43
  44/* Compliance test status bits  */
  45#define INTEL_DP_RESOLUTION_SHIFT_MASK	0
  46#define INTEL_DP_RESOLUTION_PREFERRED	(1 << INTEL_DP_RESOLUTION_SHIFT_MASK)
  47#define INTEL_DP_RESOLUTION_STANDARD	(2 << INTEL_DP_RESOLUTION_SHIFT_MASK)
  48#define INTEL_DP_RESOLUTION_FAILSAFE	(3 << INTEL_DP_RESOLUTION_SHIFT_MASK)
  49
  50struct dp_link_dpll {
  51	int clock;
  52	struct dpll dpll;
  53};
  54
  55static const struct dp_link_dpll gen4_dpll[] = {
  56	{ 162000,
  57		{ .p1 = 2, .p2 = 10, .n = 2, .m1 = 23, .m2 = 8 } },
  58	{ 270000,
  59		{ .p1 = 1, .p2 = 10, .n = 1, .m1 = 14, .m2 = 2 } }
  60};
  61
  62static const struct dp_link_dpll pch_dpll[] = {
  63	{ 162000,
  64		{ .p1 = 2, .p2 = 10, .n = 1, .m1 = 12, .m2 = 9 } },
  65	{ 270000,
  66		{ .p1 = 1, .p2 = 10, .n = 2, .m1 = 14, .m2 = 8 } }
  67};
  68
  69static const struct dp_link_dpll vlv_dpll[] = {
  70	{ 162000,
  71		{ .p1 = 3, .p2 = 2, .n = 5, .m1 = 3, .m2 = 81 } },
  72	{ 270000,
  73		{ .p1 = 2, .p2 = 2, .n = 1, .m1 = 2, .m2 = 27 } }
  74};
  75
  76/*
  77 * CHV supports eDP 1.4 that have  more link rates.
  78 * Below only provides the fixed rate but exclude variable rate.
  79 */
  80static const struct dp_link_dpll chv_dpll[] = {
  81	/*
  82	 * CHV requires to program fractional division for m2.
  83	 * m2 is stored in fixed point format using formula below
  84	 * (m2_int << 22) | m2_fraction
  85	 */
  86	{ 162000,	/* m2_int = 32, m2_fraction = 1677722 */
  87		{ .p1 = 4, .p2 = 2, .n = 1, .m1 = 2, .m2 = 0x819999a } },
  88	{ 270000,	/* m2_int = 27, m2_fraction = 0 */
  89		{ .p1 = 4, .p2 = 1, .n = 1, .m1 = 2, .m2 = 0x6c00000 } },
  90	{ 540000,	/* m2_int = 27, m2_fraction = 0 */
  91		{ .p1 = 2, .p2 = 1, .n = 1, .m1 = 2, .m2 = 0x6c00000 } }
  92};
  93
  94static const int bxt_rates[] = { 162000, 216000, 243000, 270000,
  95				  324000, 432000, 540000 };
  96static const int skl_rates[] = { 162000, 216000, 270000,
  97				  324000, 432000, 540000 };
  98static const int default_rates[] = { 162000, 270000, 540000 };
  99
 100/**
 101 * is_edp - is the given port attached to an eDP panel (either CPU or PCH)
 102 * @intel_dp: DP struct
 103 *
 104 * If a CPU or PCH DP output is attached to an eDP panel, this function
 105 * will return true, and false otherwise.
 106 */
 107static bool is_edp(struct intel_dp *intel_dp)
 108{
 109	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
 110
 111	return intel_dig_port->base.type == INTEL_OUTPUT_EDP;
 112}
 113
 114static struct drm_device *intel_dp_to_dev(struct intel_dp *intel_dp)
 115{
 116	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
 117
 118	return intel_dig_port->base.base.dev;
 119}
 120
 121static struct intel_dp *intel_attached_dp(struct drm_connector *connector)
 122{
 123	return enc_to_intel_dp(&intel_attached_encoder(connector)->base);
 124}
 125
 126static void intel_dp_link_down(struct intel_dp *intel_dp);
 127static bool edp_panel_vdd_on(struct intel_dp *intel_dp);
 128static void edp_panel_vdd_off(struct intel_dp *intel_dp, bool sync);
 129static void vlv_init_panel_power_sequencer(struct intel_dp *intel_dp);
 130static void vlv_steal_power_sequencer(struct drm_device *dev,
 131				      enum pipe pipe);
 132static void intel_dp_unset_edid(struct intel_dp *intel_dp);
 
 
 
 
 133
 134static int
 135intel_dp_max_link_bw(struct intel_dp  *intel_dp)
 136{
 137	int max_link_bw = intel_dp->dpcd[DP_MAX_LINK_RATE];
 138
 139	switch (max_link_bw) {
 140	case DP_LINK_BW_1_62:
 141	case DP_LINK_BW_2_7:
 142	case DP_LINK_BW_5_4:
 143		break;
 144	default:
 145		WARN(1, "invalid max DP link bw val %x, using 1.62Gbps\n",
 146		     max_link_bw);
 147		max_link_bw = DP_LINK_BW_1_62;
 148		break;
 149	}
 150	return max_link_bw;
 151}
 152
 153static u8 intel_dp_max_lane_count(struct intel_dp *intel_dp)
 154{
 155	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
 156	u8 source_max, sink_max;
 157
 158	source_max = intel_dig_port->max_lanes;
 159	sink_max = drm_dp_max_lane_count(intel_dp->dpcd);
 160
 161	return min(source_max, sink_max);
 162}
 163
 164/*
 165 * The units on the numbers in the next two are... bizarre.  Examples will
 166 * make it clearer; this one parallels an example in the eDP spec.
 167 *
 168 * intel_dp_max_data_rate for one lane of 2.7GHz evaluates as:
 169 *
 170 *     270000 * 1 * 8 / 10 == 216000
 171 *
 172 * The actual data capacity of that configuration is 2.16Gbit/s, so the
 173 * units are decakilobits.  ->clock in a drm_display_mode is in kilohertz -
 174 * or equivalently, kilopixels per second - so for 1680x1050R it'd be
 175 * 119000.  At 18bpp that's 2142000 kilobits per second.
 176 *
 177 * Thus the strange-looking division by 10 in intel_dp_link_required, to
 178 * get the result in decakilobits instead of kilobits.
 179 */
 180
 181static int
 182intel_dp_link_required(int pixel_clock, int bpp)
 183{
 184	return (pixel_clock * bpp + 9) / 10;
 185}
 186
 187static int
 188intel_dp_max_data_rate(int max_link_clock, int max_lanes)
 189{
 190	return (max_link_clock * max_lanes * 8) / 10;
 191}
 192
 193static int
 194intel_dp_downstream_max_dotclock(struct intel_dp *intel_dp)
 195{
 196	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
 197	struct intel_encoder *encoder = &intel_dig_port->base;
 198	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
 199	int max_dotclk = dev_priv->max_dotclk_freq;
 200	int ds_max_dotclk;
 201
 202	int type = intel_dp->downstream_ports[0] & DP_DS_PORT_TYPE_MASK;
 203
 204	if (type != DP_DS_PORT_TYPE_VGA)
 205		return max_dotclk;
 206
 207	ds_max_dotclk = drm_dp_downstream_max_clock(intel_dp->dpcd,
 208						    intel_dp->downstream_ports);
 209
 210	if (ds_max_dotclk != 0)
 211		max_dotclk = min(max_dotclk, ds_max_dotclk);
 212
 213	return max_dotclk;
 214}
 215
 216static int
 217intel_dp_sink_rates(struct intel_dp *intel_dp, const int **sink_rates)
 218{
 219	if (intel_dp->num_sink_rates) {
 220		*sink_rates = intel_dp->sink_rates;
 221		return intel_dp->num_sink_rates;
 222	}
 223
 224	*sink_rates = default_rates;
 225
 226	return (intel_dp_max_link_bw(intel_dp) >> 3) + 1;
 227}
 228
 229static int
 230intel_dp_source_rates(struct intel_dp *intel_dp, const int **source_rates)
 231{
 232	struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
 233	struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev);
 234	int size;
 235
 236	if (IS_BROXTON(dev_priv)) {
 237		*source_rates = bxt_rates;
 238		size = ARRAY_SIZE(bxt_rates);
 239	} else if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) {
 240		*source_rates = skl_rates;
 241		size = ARRAY_SIZE(skl_rates);
 242	} else {
 243		*source_rates = default_rates;
 244		size = ARRAY_SIZE(default_rates);
 245	}
 246
 247	/* This depends on the fact that 5.4 is last value in the array */
 248	if (!intel_dp_source_supports_hbr2(intel_dp))
 249		size--;
 250
 251	return size;
 252}
 253
 254static int intersect_rates(const int *source_rates, int source_len,
 255			   const int *sink_rates, int sink_len,
 256			   int *common_rates)
 257{
 258	int i = 0, j = 0, k = 0;
 259
 260	while (i < source_len && j < sink_len) {
 261		if (source_rates[i] == sink_rates[j]) {
 262			if (WARN_ON(k >= DP_MAX_SUPPORTED_RATES))
 263				return k;
 264			common_rates[k] = source_rates[i];
 265			++k;
 266			++i;
 267			++j;
 268		} else if (source_rates[i] < sink_rates[j]) {
 269			++i;
 270		} else {
 271			++j;
 272		}
 273	}
 274	return k;
 275}
 276
 277static int intel_dp_common_rates(struct intel_dp *intel_dp,
 278				 int *common_rates)
 279{
 280	const int *source_rates, *sink_rates;
 281	int source_len, sink_len;
 282
 283	sink_len = intel_dp_sink_rates(intel_dp, &sink_rates);
 284	source_len = intel_dp_source_rates(intel_dp, &source_rates);
 285
 286	return intersect_rates(source_rates, source_len,
 287			       sink_rates, sink_len,
 288			       common_rates);
 289}
 290
 291static enum drm_mode_status
 292intel_dp_mode_valid(struct drm_connector *connector,
 293		    struct drm_display_mode *mode)
 294{
 295	struct intel_dp *intel_dp = intel_attached_dp(connector);
 296	struct intel_connector *intel_connector = to_intel_connector(connector);
 297	struct drm_display_mode *fixed_mode = intel_connector->panel.fixed_mode;
 298	int target_clock = mode->clock;
 299	int max_rate, mode_rate, max_lanes, max_link_clock;
 300	int max_dotclk;
 301
 302	max_dotclk = intel_dp_downstream_max_dotclock(intel_dp);
 303
 304	if (is_edp(intel_dp) && fixed_mode) {
 305		if (mode->hdisplay > fixed_mode->hdisplay)
 306			return MODE_PANEL;
 307
 308		if (mode->vdisplay > fixed_mode->vdisplay)
 309			return MODE_PANEL;
 310
 311		target_clock = fixed_mode->clock;
 312	}
 313
 314	max_link_clock = intel_dp_max_link_rate(intel_dp);
 315	max_lanes = intel_dp_max_lane_count(intel_dp);
 316
 317	max_rate = intel_dp_max_data_rate(max_link_clock, max_lanes);
 318	mode_rate = intel_dp_link_required(target_clock, 18);
 319
 320	if (mode_rate > max_rate || target_clock > max_dotclk)
 321		return MODE_CLOCK_HIGH;
 322
 323	if (mode->clock < 10000)
 324		return MODE_CLOCK_LOW;
 325
 326	if (mode->flags & DRM_MODE_FLAG_DBLCLK)
 327		return MODE_H_ILLEGAL;
 328
 329	return MODE_OK;
 330}
 331
 332uint32_t intel_dp_pack_aux(const uint8_t *src, int src_bytes)
 333{
 334	int	i;
 335	uint32_t v = 0;
 336
 337	if (src_bytes > 4)
 338		src_bytes = 4;
 339	for (i = 0; i < src_bytes; i++)
 340		v |= ((uint32_t) src[i]) << ((3-i) * 8);
 341	return v;
 342}
 343
 344static void intel_dp_unpack_aux(uint32_t src, uint8_t *dst, int dst_bytes)
 345{
 346	int i;
 347	if (dst_bytes > 4)
 348		dst_bytes = 4;
 349	for (i = 0; i < dst_bytes; i++)
 350		dst[i] = src >> ((3-i) * 8);
 351}
 352
 353static void
 354intel_dp_init_panel_power_sequencer(struct drm_device *dev,
 355				    struct intel_dp *intel_dp);
 356static void
 357intel_dp_init_panel_power_sequencer_registers(struct drm_device *dev,
 358					      struct intel_dp *intel_dp,
 359					      bool force_disable_vdd);
 360static void
 361intel_dp_pps_init(struct drm_device *dev, struct intel_dp *intel_dp);
 362
 363static void pps_lock(struct intel_dp *intel_dp)
 364{
 365	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
 366	struct intel_encoder *encoder = &intel_dig_port->base;
 367	struct drm_device *dev = encoder->base.dev;
 368	struct drm_i915_private *dev_priv = to_i915(dev);
 369	enum intel_display_power_domain power_domain;
 370
 371	/*
 372	 * See vlv_power_sequencer_reset() why we need
 373	 * a power domain reference here.
 374	 */
 375	power_domain = intel_display_port_aux_power_domain(encoder);
 376	intel_display_power_get(dev_priv, power_domain);
 377
 378	mutex_lock(&dev_priv->pps_mutex);
 379}
 380
 381static void pps_unlock(struct intel_dp *intel_dp)
 382{
 383	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
 384	struct intel_encoder *encoder = &intel_dig_port->base;
 385	struct drm_device *dev = encoder->base.dev;
 386	struct drm_i915_private *dev_priv = to_i915(dev);
 387	enum intel_display_power_domain power_domain;
 388
 389	mutex_unlock(&dev_priv->pps_mutex);
 390
 391	power_domain = intel_display_port_aux_power_domain(encoder);
 392	intel_display_power_put(dev_priv, power_domain);
 393}
 394
 395static void
 396vlv_power_sequencer_kick(struct intel_dp *intel_dp)
 397{
 398	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
 399	struct drm_i915_private *dev_priv = to_i915(intel_dig_port->base.base.dev);
 
 400	enum pipe pipe = intel_dp->pps_pipe;
 401	bool pll_enabled, release_cl_override = false;
 402	enum dpio_phy phy = DPIO_PHY(pipe);
 403	enum dpio_channel ch = vlv_pipe_to_channel(pipe);
 404	uint32_t DP;
 405
 406	if (WARN(I915_READ(intel_dp->output_reg) & DP_PORT_EN,
 407		 "skipping pipe %c power seqeuncer kick due to port %c being active\n",
 408		 pipe_name(pipe), port_name(intel_dig_port->port)))
 409		return;
 410
 411	DRM_DEBUG_KMS("kicking pipe %c power sequencer for port %c\n",
 412		      pipe_name(pipe), port_name(intel_dig_port->port));
 413
 414	/* Preserve the BIOS-computed detected bit. This is
 415	 * supposed to be read-only.
 416	 */
 417	DP = I915_READ(intel_dp->output_reg) & DP_DETECTED;
 418	DP |= DP_VOLTAGE_0_4 | DP_PRE_EMPHASIS_0;
 419	DP |= DP_PORT_WIDTH(1);
 420	DP |= DP_LINK_TRAIN_PAT_1;
 421
 422	if (IS_CHERRYVIEW(dev_priv))
 423		DP |= DP_PIPE_SELECT_CHV(pipe);
 424	else if (pipe == PIPE_B)
 425		DP |= DP_PIPEB_SELECT;
 426
 427	pll_enabled = I915_READ(DPLL(pipe)) & DPLL_VCO_ENABLE;
 428
 429	/*
 430	 * The DPLL for the pipe must be enabled for this to work.
 431	 * So enable temporarily it if it's not already enabled.
 432	 */
 433	if (!pll_enabled) {
 434		release_cl_override = IS_CHERRYVIEW(dev_priv) &&
 435			!chv_phy_powergate_ch(dev_priv, phy, ch, true);
 436
 437		if (vlv_force_pll_on(dev_priv, pipe, IS_CHERRYVIEW(dev_priv) ?
 438				     &chv_dpll[0].dpll : &vlv_dpll[0].dpll)) {
 439			DRM_ERROR("Failed to force on pll for pipe %c!\n",
 440				  pipe_name(pipe));
 441			return;
 442		}
 443	}
 444
 445	/*
 446	 * Similar magic as in intel_dp_enable_port().
 447	 * We _must_ do this port enable + disable trick
 448	 * to make this power seqeuencer lock onto the port.
 449	 * Otherwise even VDD force bit won't work.
 450	 */
 451	I915_WRITE(intel_dp->output_reg, DP);
 452	POSTING_READ(intel_dp->output_reg);
 453
 454	I915_WRITE(intel_dp->output_reg, DP | DP_PORT_EN);
 455	POSTING_READ(intel_dp->output_reg);
 456
 457	I915_WRITE(intel_dp->output_reg, DP & ~DP_PORT_EN);
 458	POSTING_READ(intel_dp->output_reg);
 459
 460	if (!pll_enabled) {
 461		vlv_force_pll_off(dev_priv, pipe);
 462
 463		if (release_cl_override)
 464			chv_phy_powergate_ch(dev_priv, phy, ch, false);
 465	}
 466}
 467
 468static enum pipe
 469vlv_power_sequencer_pipe(struct intel_dp *intel_dp)
 470{
 471	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
 472	struct drm_device *dev = intel_dig_port->base.base.dev;
 473	struct drm_i915_private *dev_priv = to_i915(dev);
 474	struct intel_encoder *encoder;
 475	unsigned int pipes = (1 << PIPE_A) | (1 << PIPE_B);
 476	enum pipe pipe;
 477
 478	lockdep_assert_held(&dev_priv->pps_mutex);
 479
 480	/* We should never land here with regular DP ports */
 481	WARN_ON(!is_edp(intel_dp));
 482
 483	if (intel_dp->pps_pipe != INVALID_PIPE)
 484		return intel_dp->pps_pipe;
 485
 486	/*
 487	 * We don't have power sequencer currently.
 488	 * Pick one that's not used by other ports.
 489	 */
 490	for_each_intel_encoder(dev, encoder) {
 491		struct intel_dp *tmp;
 492
 493		if (encoder->type != INTEL_OUTPUT_EDP)
 494			continue;
 495
 496		tmp = enc_to_intel_dp(&encoder->base);
 497
 498		if (tmp->pps_pipe != INVALID_PIPE)
 499			pipes &= ~(1 << tmp->pps_pipe);
 500	}
 501
 502	/*
 503	 * Didn't find one. This should not happen since there
 504	 * are two power sequencers and up to two eDP ports.
 505	 */
 506	if (WARN_ON(pipes == 0))
 507		pipe = PIPE_A;
 508	else
 509		pipe = ffs(pipes) - 1;
 510
 511	vlv_steal_power_sequencer(dev, pipe);
 512	intel_dp->pps_pipe = pipe;
 513
 514	DRM_DEBUG_KMS("picked pipe %c power sequencer for port %c\n",
 515		      pipe_name(intel_dp->pps_pipe),
 516		      port_name(intel_dig_port->port));
 517
 518	/* init power sequencer on this pipe and port */
 519	intel_dp_init_panel_power_sequencer(dev, intel_dp);
 520	intel_dp_init_panel_power_sequencer_registers(dev, intel_dp, true);
 521
 522	/*
 523	 * Even vdd force doesn't work until we've made
 524	 * the power sequencer lock in on the port.
 525	 */
 526	vlv_power_sequencer_kick(intel_dp);
 527
 528	return intel_dp->pps_pipe;
 529}
 530
 531static int
 532bxt_power_sequencer_idx(struct intel_dp *intel_dp)
 533{
 534	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
 535	struct drm_device *dev = intel_dig_port->base.base.dev;
 536	struct drm_i915_private *dev_priv = to_i915(dev);
 537
 538	lockdep_assert_held(&dev_priv->pps_mutex);
 539
 540	/* We should never land here with regular DP ports */
 541	WARN_ON(!is_edp(intel_dp));
 542
 543	/*
 544	 * TODO: BXT has 2 PPS instances. The correct port->PPS instance
 545	 * mapping needs to be retrieved from VBT, for now just hard-code to
 546	 * use instance #0 always.
 547	 */
 548	if (!intel_dp->pps_reset)
 549		return 0;
 550
 551	intel_dp->pps_reset = false;
 552
 553	/*
 554	 * Only the HW needs to be reprogrammed, the SW state is fixed and
 555	 * has been setup during connector init.
 556	 */
 557	intel_dp_init_panel_power_sequencer_registers(dev, intel_dp, false);
 558
 559	return 0;
 560}
 561
 562typedef bool (*vlv_pipe_check)(struct drm_i915_private *dev_priv,
 563			       enum pipe pipe);
 564
 565static bool vlv_pipe_has_pp_on(struct drm_i915_private *dev_priv,
 566			       enum pipe pipe)
 567{
 568	return I915_READ(PP_STATUS(pipe)) & PP_ON;
 569}
 570
 571static bool vlv_pipe_has_vdd_on(struct drm_i915_private *dev_priv,
 572				enum pipe pipe)
 573{
 574	return I915_READ(PP_CONTROL(pipe)) & EDP_FORCE_VDD;
 575}
 576
 577static bool vlv_pipe_any(struct drm_i915_private *dev_priv,
 578			 enum pipe pipe)
 579{
 580	return true;
 581}
 582
 583static enum pipe
 584vlv_initial_pps_pipe(struct drm_i915_private *dev_priv,
 585		     enum port port,
 586		     vlv_pipe_check pipe_check)
 587{
 588	enum pipe pipe;
 589
 590	for (pipe = PIPE_A; pipe <= PIPE_B; pipe++) {
 591		u32 port_sel = I915_READ(PP_ON_DELAYS(pipe)) &
 592			PANEL_PORT_SELECT_MASK;
 593
 594		if (port_sel != PANEL_PORT_SELECT_VLV(port))
 595			continue;
 596
 597		if (!pipe_check(dev_priv, pipe))
 598			continue;
 599
 600		return pipe;
 601	}
 602
 603	return INVALID_PIPE;
 604}
 605
 606static void
 607vlv_initial_power_sequencer_setup(struct intel_dp *intel_dp)
 608{
 609	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
 610	struct drm_device *dev = intel_dig_port->base.base.dev;
 611	struct drm_i915_private *dev_priv = to_i915(dev);
 612	enum port port = intel_dig_port->port;
 613
 614	lockdep_assert_held(&dev_priv->pps_mutex);
 615
 616	/* try to find a pipe with this port selected */
 617	/* first pick one where the panel is on */
 618	intel_dp->pps_pipe = vlv_initial_pps_pipe(dev_priv, port,
 619						  vlv_pipe_has_pp_on);
 620	/* didn't find one? pick one where vdd is on */
 621	if (intel_dp->pps_pipe == INVALID_PIPE)
 622		intel_dp->pps_pipe = vlv_initial_pps_pipe(dev_priv, port,
 623							  vlv_pipe_has_vdd_on);
 624	/* didn't find one? pick one with just the correct port */
 625	if (intel_dp->pps_pipe == INVALID_PIPE)
 626		intel_dp->pps_pipe = vlv_initial_pps_pipe(dev_priv, port,
 627							  vlv_pipe_any);
 628
 629	/* didn't find one? just let vlv_power_sequencer_pipe() pick one when needed */
 630	if (intel_dp->pps_pipe == INVALID_PIPE) {
 631		DRM_DEBUG_KMS("no initial power sequencer for port %c\n",
 632			      port_name(port));
 633		return;
 634	}
 635
 636	DRM_DEBUG_KMS("initial power sequencer for port %c: pipe %c\n",
 637		      port_name(port), pipe_name(intel_dp->pps_pipe));
 638
 639	intel_dp_init_panel_power_sequencer(dev, intel_dp);
 640	intel_dp_init_panel_power_sequencer_registers(dev, intel_dp, false);
 641}
 642
 643void intel_power_sequencer_reset(struct drm_i915_private *dev_priv)
 644{
 645	struct drm_device *dev = &dev_priv->drm;
 646	struct intel_encoder *encoder;
 647
 648	if (WARN_ON(!IS_VALLEYVIEW(dev_priv) && !IS_CHERRYVIEW(dev_priv) &&
 649		    !IS_BROXTON(dev_priv)))
 650		return;
 651
 652	/*
 653	 * We can't grab pps_mutex here due to deadlock with power_domain
 654	 * mutex when power_domain functions are called while holding pps_mutex.
 655	 * That also means that in order to use pps_pipe the code needs to
 656	 * hold both a power domain reference and pps_mutex, and the power domain
 657	 * reference get/put must be done while _not_ holding pps_mutex.
 658	 * pps_{lock,unlock}() do these steps in the correct order, so one
 659	 * should use them always.
 660	 */
 661
 662	for_each_intel_encoder(dev, encoder) {
 663		struct intel_dp *intel_dp;
 664
 665		if (encoder->type != INTEL_OUTPUT_EDP)
 666			continue;
 667
 668		intel_dp = enc_to_intel_dp(&encoder->base);
 669		if (IS_BROXTON(dev_priv))
 670			intel_dp->pps_reset = true;
 671		else
 672			intel_dp->pps_pipe = INVALID_PIPE;
 673	}
 674}
 675
 676struct pps_registers {
 677	i915_reg_t pp_ctrl;
 678	i915_reg_t pp_stat;
 679	i915_reg_t pp_on;
 680	i915_reg_t pp_off;
 681	i915_reg_t pp_div;
 682};
 683
 684static void intel_pps_get_registers(struct drm_i915_private *dev_priv,
 685				    struct intel_dp *intel_dp,
 686				    struct pps_registers *regs)
 687{
 688	int pps_idx = 0;
 689
 690	memset(regs, 0, sizeof(*regs));
 691
 692	if (IS_BROXTON(dev_priv))
 693		pps_idx = bxt_power_sequencer_idx(intel_dp);
 694	else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
 695		pps_idx = vlv_power_sequencer_pipe(intel_dp);
 696
 697	regs->pp_ctrl = PP_CONTROL(pps_idx);
 698	regs->pp_stat = PP_STATUS(pps_idx);
 699	regs->pp_on = PP_ON_DELAYS(pps_idx);
 700	regs->pp_off = PP_OFF_DELAYS(pps_idx);
 701	if (!IS_BROXTON(dev_priv))
 702		regs->pp_div = PP_DIVISOR(pps_idx);
 703}
 704
 705static i915_reg_t
 706_pp_ctrl_reg(struct intel_dp *intel_dp)
 707{
 708	struct pps_registers regs;
 709
 710	intel_pps_get_registers(to_i915(intel_dp_to_dev(intel_dp)), intel_dp,
 711				&regs);
 712
 713	return regs.pp_ctrl;
 
 
 714}
 715
 716static i915_reg_t
 717_pp_stat_reg(struct intel_dp *intel_dp)
 718{
 719	struct pps_registers regs;
 720
 721	intel_pps_get_registers(to_i915(intel_dp_to_dev(intel_dp)), intel_dp,
 722				&regs);
 723
 724	return regs.pp_stat;
 
 
 725}
 726
 727/* Reboot notifier handler to shutdown panel power to guarantee T12 timing
 728   This function only applicable when panel PM state is not to be tracked */
 729static int edp_notify_handler(struct notifier_block *this, unsigned long code,
 730			      void *unused)
 731{
 732	struct intel_dp *intel_dp = container_of(this, typeof(* intel_dp),
 733						 edp_notifier);
 734	struct drm_device *dev = intel_dp_to_dev(intel_dp);
 735	struct drm_i915_private *dev_priv = to_i915(dev);
 736
 737	if (!is_edp(intel_dp) || code != SYS_RESTART)
 738		return 0;
 739
 740	pps_lock(intel_dp);
 741
 742	if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
 743		enum pipe pipe = vlv_power_sequencer_pipe(intel_dp);
 744		i915_reg_t pp_ctrl_reg, pp_div_reg;
 745		u32 pp_div;
 746
 747		pp_ctrl_reg = PP_CONTROL(pipe);
 748		pp_div_reg  = PP_DIVISOR(pipe);
 749		pp_div = I915_READ(pp_div_reg);
 750		pp_div &= PP_REFERENCE_DIVIDER_MASK;
 751
 752		/* 0x1F write to PP_DIV_REG sets max cycle delay */
 753		I915_WRITE(pp_div_reg, pp_div | 0x1F);
 754		I915_WRITE(pp_ctrl_reg, PANEL_UNLOCK_REGS | PANEL_POWER_OFF);
 755		msleep(intel_dp->panel_power_cycle_delay);
 756	}
 757
 758	pps_unlock(intel_dp);
 759
 760	return 0;
 761}
 762
 763static bool edp_have_panel_power(struct intel_dp *intel_dp)
 764{
 765	struct drm_device *dev = intel_dp_to_dev(intel_dp);
 766	struct drm_i915_private *dev_priv = to_i915(dev);
 767
 768	lockdep_assert_held(&dev_priv->pps_mutex);
 769
 770	if ((IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) &&
 771	    intel_dp->pps_pipe == INVALID_PIPE)
 772		return false;
 773
 774	return (I915_READ(_pp_stat_reg(intel_dp)) & PP_ON) != 0;
 775}
 776
 777static bool edp_have_panel_vdd(struct intel_dp *intel_dp)
 778{
 779	struct drm_device *dev = intel_dp_to_dev(intel_dp);
 780	struct drm_i915_private *dev_priv = to_i915(dev);
 781
 782	lockdep_assert_held(&dev_priv->pps_mutex);
 783
 784	if ((IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) &&
 785	    intel_dp->pps_pipe == INVALID_PIPE)
 786		return false;
 787
 788	return I915_READ(_pp_ctrl_reg(intel_dp)) & EDP_FORCE_VDD;
 789}
 790
 791static void
 792intel_dp_check_edp(struct intel_dp *intel_dp)
 793{
 794	struct drm_device *dev = intel_dp_to_dev(intel_dp);
 795	struct drm_i915_private *dev_priv = to_i915(dev);
 796
 797	if (!is_edp(intel_dp))
 798		return;
 799
 800	if (!edp_have_panel_power(intel_dp) && !edp_have_panel_vdd(intel_dp)) {
 801		WARN(1, "eDP powered off while attempting aux channel communication.\n");
 802		DRM_DEBUG_KMS("Status 0x%08x Control 0x%08x\n",
 803			      I915_READ(_pp_stat_reg(intel_dp)),
 804			      I915_READ(_pp_ctrl_reg(intel_dp)));
 805	}
 806}
 807
 808static uint32_t
 809intel_dp_aux_wait_done(struct intel_dp *intel_dp, bool has_aux_irq)
 810{
 811	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
 812	struct drm_device *dev = intel_dig_port->base.base.dev;
 813	struct drm_i915_private *dev_priv = to_i915(dev);
 814	i915_reg_t ch_ctl = intel_dp->aux_ch_ctl_reg;
 815	uint32_t status;
 816	bool done;
 817
 818#define C (((status = I915_READ_NOTRACE(ch_ctl)) & DP_AUX_CH_CTL_SEND_BUSY) == 0)
 819	if (has_aux_irq)
 820		done = wait_event_timeout(dev_priv->gmbus_wait_queue, C,
 821					  msecs_to_jiffies_timeout(10));
 822	else
 823		done = wait_for(C, 10) == 0;
 824	if (!done)
 825		DRM_ERROR("dp aux hw did not signal timeout (has irq: %i)!\n",
 826			  has_aux_irq);
 827#undef C
 828
 829	return status;
 830}
 831
 832static uint32_t g4x_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
 833{
 834	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
 835	struct drm_i915_private *dev_priv = to_i915(intel_dig_port->base.base.dev);
 836
 837	if (index)
 838		return 0;
 839
 840	/*
 841	 * The clock divider is based off the hrawclk, and would like to run at
 842	 * 2MHz.  So, take the hrawclk value and divide by 2000 and use that
 843	 */
 844	return DIV_ROUND_CLOSEST(dev_priv->rawclk_freq, 2000);
 845}
 846
 847static uint32_t ilk_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
 848{
 849	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
 850	struct drm_i915_private *dev_priv = to_i915(intel_dig_port->base.base.dev);
 
 851
 852	if (index)
 853		return 0;
 854
 855	/*
 856	 * The clock divider is based off the cdclk or PCH rawclk, and would
 857	 * like to run at 2MHz.  So, take the cdclk or PCH rawclk value and
 858	 * divide by 2000 and use that
 859	 */
 860	if (intel_dig_port->port == PORT_A)
 861		return DIV_ROUND_CLOSEST(dev_priv->cdclk_freq, 2000);
 862	else
 863		return DIV_ROUND_CLOSEST(dev_priv->rawclk_freq, 2000);
 
 
 864}
 865
 866static uint32_t hsw_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
 867{
 868	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
 869	struct drm_i915_private *dev_priv = to_i915(intel_dig_port->base.base.dev);
 
 870
 871	if (intel_dig_port->port != PORT_A && HAS_PCH_LPT_H(dev_priv)) {
 
 
 
 
 872		/* Workaround for non-ULT HSW */
 873		switch (index) {
 874		case 0: return 63;
 875		case 1: return 72;
 876		default: return 0;
 877		}
 
 
 878	}
 
 879
 880	return ilk_get_aux_clock_divider(intel_dp, index);
 
 
 881}
 882
 883static uint32_t skl_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
 884{
 885	/*
 886	 * SKL doesn't need us to program the AUX clock divider (Hardware will
 887	 * derive the clock from CDCLK automatically). We still implement the
 888	 * get_aux_clock_divider vfunc to plug-in into the existing code.
 889	 */
 890	return index ? 0 : 1;
 891}
 892
 893static uint32_t g4x_get_aux_send_ctl(struct intel_dp *intel_dp,
 894				     bool has_aux_irq,
 895				     int send_bytes,
 896				     uint32_t aux_clock_divider)
 897{
 898	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
 899	struct drm_i915_private *dev_priv =
 900			to_i915(intel_dig_port->base.base.dev);
 901	uint32_t precharge, timeout;
 902
 903	if (IS_GEN6(dev_priv))
 904		precharge = 3;
 905	else
 906		precharge = 5;
 907
 908	if (IS_BROADWELL(dev_priv) && intel_dig_port->port == PORT_A)
 909		timeout = DP_AUX_CH_CTL_TIME_OUT_600us;
 910	else
 911		timeout = DP_AUX_CH_CTL_TIME_OUT_400us;
 912
 913	return DP_AUX_CH_CTL_SEND_BUSY |
 914	       DP_AUX_CH_CTL_DONE |
 915	       (has_aux_irq ? DP_AUX_CH_CTL_INTERRUPT : 0) |
 916	       DP_AUX_CH_CTL_TIME_OUT_ERROR |
 917	       timeout |
 918	       DP_AUX_CH_CTL_RECEIVE_ERROR |
 919	       (send_bytes << DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT) |
 920	       (precharge << DP_AUX_CH_CTL_PRECHARGE_2US_SHIFT) |
 921	       (aux_clock_divider << DP_AUX_CH_CTL_BIT_CLOCK_2X_SHIFT);
 922}
 923
 924static uint32_t skl_get_aux_send_ctl(struct intel_dp *intel_dp,
 925				      bool has_aux_irq,
 926				      int send_bytes,
 927				      uint32_t unused)
 928{
 929	return DP_AUX_CH_CTL_SEND_BUSY |
 930	       DP_AUX_CH_CTL_DONE |
 931	       (has_aux_irq ? DP_AUX_CH_CTL_INTERRUPT : 0) |
 932	       DP_AUX_CH_CTL_TIME_OUT_ERROR |
 933	       DP_AUX_CH_CTL_TIME_OUT_1600us |
 934	       DP_AUX_CH_CTL_RECEIVE_ERROR |
 935	       (send_bytes << DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT) |
 936	       DP_AUX_CH_CTL_FW_SYNC_PULSE_SKL(32) |
 937	       DP_AUX_CH_CTL_SYNC_PULSE_SKL(32);
 938}
 939
 940static int
 941intel_dp_aux_ch(struct intel_dp *intel_dp,
 942		const uint8_t *send, int send_bytes,
 943		uint8_t *recv, int recv_size)
 944{
 945	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
 946	struct drm_i915_private *dev_priv =
 947			to_i915(intel_dig_port->base.base.dev);
 948	i915_reg_t ch_ctl = intel_dp->aux_ch_ctl_reg;
 949	uint32_t aux_clock_divider;
 950	int i, ret, recv_bytes;
 951	uint32_t status;
 952	int try, clock = 0;
 953	bool has_aux_irq = HAS_AUX_IRQ(dev_priv);
 954	bool vdd;
 955
 956	pps_lock(intel_dp);
 957
 958	/*
 959	 * We will be called with VDD already enabled for dpcd/edid/oui reads.
 960	 * In such cases we want to leave VDD enabled and it's up to upper layers
 961	 * to turn it off. But for eg. i2c-dev access we need to turn it on/off
 962	 * ourselves.
 963	 */
 964	vdd = edp_panel_vdd_on(intel_dp);
 965
 966	/* dp aux is extremely sensitive to irq latency, hence request the
 967	 * lowest possible wakeup latency and so prevent the cpu from going into
 968	 * deep sleep states.
 969	 */
 970	pm_qos_update_request(&dev_priv->pm_qos, 0);
 971
 972	intel_dp_check_edp(intel_dp);
 973
 974	/* Try to wait for any previous AUX channel activity */
 975	for (try = 0; try < 3; try++) {
 976		status = I915_READ_NOTRACE(ch_ctl);
 977		if ((status & DP_AUX_CH_CTL_SEND_BUSY) == 0)
 978			break;
 979		msleep(1);
 980	}
 981
 982	if (try == 3) {
 983		static u32 last_status = -1;
 984		const u32 status = I915_READ(ch_ctl);
 985
 986		if (status != last_status) {
 987			WARN(1, "dp_aux_ch not started status 0x%08x\n",
 988			     status);
 989			last_status = status;
 990		}
 991
 992		ret = -EBUSY;
 993		goto out;
 994	}
 995
 996	/* Only 5 data registers! */
 997	if (WARN_ON(send_bytes > 20 || recv_size > 20)) {
 998		ret = -E2BIG;
 999		goto out;
1000	}
1001
1002	while ((aux_clock_divider = intel_dp->get_aux_clock_divider(intel_dp, clock++))) {
1003		u32 send_ctl = intel_dp->get_aux_send_ctl(intel_dp,
1004							  has_aux_irq,
1005							  send_bytes,
1006							  aux_clock_divider);
1007
1008		/* Must try at least 3 times according to DP spec */
1009		for (try = 0; try < 5; try++) {
1010			/* Load the send data into the aux channel data registers */
1011			for (i = 0; i < send_bytes; i += 4)
1012				I915_WRITE(intel_dp->aux_ch_data_reg[i >> 2],
1013					   intel_dp_pack_aux(send + i,
1014							     send_bytes - i));
1015
1016			/* Send the command and wait for it to complete */
1017			I915_WRITE(ch_ctl, send_ctl);
1018
1019			status = intel_dp_aux_wait_done(intel_dp, has_aux_irq);
1020
1021			/* Clear done status and any errors */
1022			I915_WRITE(ch_ctl,
1023				   status |
1024				   DP_AUX_CH_CTL_DONE |
1025				   DP_AUX_CH_CTL_TIME_OUT_ERROR |
1026				   DP_AUX_CH_CTL_RECEIVE_ERROR);
1027
1028			if (status & DP_AUX_CH_CTL_TIME_OUT_ERROR)
1029				continue;
1030
1031			/* DP CTS 1.2 Core Rev 1.1, 4.2.1.1 & 4.2.1.2
1032			 *   400us delay required for errors and timeouts
1033			 *   Timeout errors from the HW already meet this
1034			 *   requirement so skip to next iteration
1035			 */
1036			if (status & DP_AUX_CH_CTL_RECEIVE_ERROR) {
1037				usleep_range(400, 500);
1038				continue;
1039			}
1040			if (status & DP_AUX_CH_CTL_DONE)
1041				goto done;
1042		}
1043	}
1044
1045	if ((status & DP_AUX_CH_CTL_DONE) == 0) {
1046		DRM_ERROR("dp_aux_ch not done status 0x%08x\n", status);
1047		ret = -EBUSY;
1048		goto out;
1049	}
1050
1051done:
1052	/* Check for timeout or receive error.
1053	 * Timeouts occur when the sink is not connected
1054	 */
1055	if (status & DP_AUX_CH_CTL_RECEIVE_ERROR) {
1056		DRM_ERROR("dp_aux_ch receive error status 0x%08x\n", status);
1057		ret = -EIO;
1058		goto out;
1059	}
1060
1061	/* Timeouts occur when the device isn't connected, so they're
1062	 * "normal" -- don't fill the kernel log with these */
1063	if (status & DP_AUX_CH_CTL_TIME_OUT_ERROR) {
1064		DRM_DEBUG_KMS("dp_aux_ch timeout status 0x%08x\n", status);
1065		ret = -ETIMEDOUT;
1066		goto out;
1067	}
1068
1069	/* Unload any bytes sent back from the other side */
1070	recv_bytes = ((status & DP_AUX_CH_CTL_MESSAGE_SIZE_MASK) >>
1071		      DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT);
1072
1073	/*
1074	 * By BSpec: "Message sizes of 0 or >20 are not allowed."
1075	 * We have no idea of what happened so we return -EBUSY so
1076	 * drm layer takes care for the necessary retries.
1077	 */
1078	if (recv_bytes == 0 || recv_bytes > 20) {
1079		DRM_DEBUG_KMS("Forbidden recv_bytes = %d on aux transaction\n",
1080			      recv_bytes);
1081		/*
1082		 * FIXME: This patch was created on top of a series that
1083		 * organize the retries at drm level. There EBUSY should
1084		 * also take care for 1ms wait before retrying.
1085		 * That aux retries re-org is still needed and after that is
1086		 * merged we remove this sleep from here.
1087		 */
1088		usleep_range(1000, 1500);
1089		ret = -EBUSY;
1090		goto out;
1091	}
1092
1093	if (recv_bytes > recv_size)
1094		recv_bytes = recv_size;
1095
1096	for (i = 0; i < recv_bytes; i += 4)
1097		intel_dp_unpack_aux(I915_READ(intel_dp->aux_ch_data_reg[i >> 2]),
1098				    recv + i, recv_bytes - i);
1099
1100	ret = recv_bytes;
1101out:
1102	pm_qos_update_request(&dev_priv->pm_qos, PM_QOS_DEFAULT_VALUE);
1103
1104	if (vdd)
1105		edp_panel_vdd_off(intel_dp, false);
1106
1107	pps_unlock(intel_dp);
1108
1109	return ret;
1110}
1111
1112#define BARE_ADDRESS_SIZE	3
1113#define HEADER_SIZE		(BARE_ADDRESS_SIZE + 1)
1114static ssize_t
1115intel_dp_aux_transfer(struct drm_dp_aux *aux, struct drm_dp_aux_msg *msg)
1116{
1117	struct intel_dp *intel_dp = container_of(aux, struct intel_dp, aux);
1118	uint8_t txbuf[20], rxbuf[20];
1119	size_t txsize, rxsize;
1120	int ret;
1121
1122	txbuf[0] = (msg->request << 4) |
1123		((msg->address >> 16) & 0xf);
1124	txbuf[1] = (msg->address >> 8) & 0xff;
1125	txbuf[2] = msg->address & 0xff;
1126	txbuf[3] = msg->size - 1;
1127
1128	switch (msg->request & ~DP_AUX_I2C_MOT) {
1129	case DP_AUX_NATIVE_WRITE:
1130	case DP_AUX_I2C_WRITE:
1131	case DP_AUX_I2C_WRITE_STATUS_UPDATE:
1132		txsize = msg->size ? HEADER_SIZE + msg->size : BARE_ADDRESS_SIZE;
1133		rxsize = 2; /* 0 or 1 data bytes */
1134
1135		if (WARN_ON(txsize > 20))
1136			return -E2BIG;
1137
1138		WARN_ON(!msg->buffer != !msg->size);
1139
1140		if (msg->buffer)
1141			memcpy(txbuf + HEADER_SIZE, msg->buffer, msg->size);
 
 
1142
1143		ret = intel_dp_aux_ch(intel_dp, txbuf, txsize, rxbuf, rxsize);
1144		if (ret > 0) {
1145			msg->reply = rxbuf[0] >> 4;
1146
1147			if (ret > 1) {
1148				/* Number of bytes written in a short write. */
1149				ret = clamp_t(int, rxbuf[1], 0, msg->size);
1150			} else {
1151				/* Return payload size. */
1152				ret = msg->size;
1153			}
1154		}
1155		break;
1156
1157	case DP_AUX_NATIVE_READ:
1158	case DP_AUX_I2C_READ:
1159		txsize = msg->size ? HEADER_SIZE : BARE_ADDRESS_SIZE;
1160		rxsize = msg->size + 1;
1161
1162		if (WARN_ON(rxsize > 20))
1163			return -E2BIG;
1164
1165		ret = intel_dp_aux_ch(intel_dp, txbuf, txsize, rxbuf, rxsize);
1166		if (ret > 0) {
1167			msg->reply = rxbuf[0] >> 4;
1168			/*
1169			 * Assume happy day, and copy the data. The caller is
1170			 * expected to check msg->reply before touching it.
1171			 *
1172			 * Return payload size.
1173			 */
1174			ret--;
1175			memcpy(msg->buffer, rxbuf + 1, ret);
1176		}
1177		break;
1178
1179	default:
1180		ret = -EINVAL;
1181		break;
1182	}
1183
1184	return ret;
1185}
1186
1187static enum port intel_aux_port(struct drm_i915_private *dev_priv,
1188				enum port port)
1189{
1190	const struct ddi_vbt_port_info *info =
1191		&dev_priv->vbt.ddi_port_info[port];
1192	enum port aux_port;
1193
1194	if (!info->alternate_aux_channel) {
1195		DRM_DEBUG_KMS("using AUX %c for port %c (platform default)\n",
1196			      port_name(port), port_name(port));
1197		return port;
1198	}
1199
1200	switch (info->alternate_aux_channel) {
1201	case DP_AUX_A:
1202		aux_port = PORT_A;
1203		break;
1204	case DP_AUX_B:
1205		aux_port = PORT_B;
1206		break;
1207	case DP_AUX_C:
1208		aux_port = PORT_C;
1209		break;
1210	case DP_AUX_D:
1211		aux_port = PORT_D;
1212		break;
1213	default:
1214		MISSING_CASE(info->alternate_aux_channel);
1215		aux_port = PORT_A;
1216		break;
1217	}
1218
1219	DRM_DEBUG_KMS("using AUX %c for port %c (VBT)\n",
1220		      port_name(aux_port), port_name(port));
1221
1222	return aux_port;
1223}
1224
1225static i915_reg_t g4x_aux_ctl_reg(struct drm_i915_private *dev_priv,
1226				  enum port port)
1227{
1228	switch (port) {
1229	case PORT_B:
1230	case PORT_C:
1231	case PORT_D:
1232		return DP_AUX_CH_CTL(port);
1233	default:
1234		MISSING_CASE(port);
1235		return DP_AUX_CH_CTL(PORT_B);
1236	}
1237}
1238
1239static i915_reg_t g4x_aux_data_reg(struct drm_i915_private *dev_priv,
1240				   enum port port, int index)
1241{
1242	switch (port) {
1243	case PORT_B:
1244	case PORT_C:
1245	case PORT_D:
1246		return DP_AUX_CH_DATA(port, index);
1247	default:
1248		MISSING_CASE(port);
1249		return DP_AUX_CH_DATA(PORT_B, index);
1250	}
1251}
1252
1253static i915_reg_t ilk_aux_ctl_reg(struct drm_i915_private *dev_priv,
1254				  enum port port)
1255{
1256	switch (port) {
1257	case PORT_A:
1258		return DP_AUX_CH_CTL(port);
1259	case PORT_B:
1260	case PORT_C:
1261	case PORT_D:
1262		return PCH_DP_AUX_CH_CTL(port);
1263	default:
1264		MISSING_CASE(port);
1265		return DP_AUX_CH_CTL(PORT_A);
1266	}
1267}
1268
1269static i915_reg_t ilk_aux_data_reg(struct drm_i915_private *dev_priv,
1270				   enum port port, int index)
1271{
1272	switch (port) {
1273	case PORT_A:
1274		return DP_AUX_CH_DATA(port, index);
1275	case PORT_B:
1276	case PORT_C:
1277	case PORT_D:
1278		return PCH_DP_AUX_CH_DATA(port, index);
1279	default:
1280		MISSING_CASE(port);
1281		return DP_AUX_CH_DATA(PORT_A, index);
1282	}
1283}
1284
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1285static i915_reg_t skl_aux_ctl_reg(struct drm_i915_private *dev_priv,
1286				  enum port port)
1287{
 
 
 
1288	switch (port) {
1289	case PORT_A:
1290	case PORT_B:
1291	case PORT_C:
1292	case PORT_D:
1293		return DP_AUX_CH_CTL(port);
1294	default:
1295		MISSING_CASE(port);
1296		return DP_AUX_CH_CTL(PORT_A);
1297	}
1298}
1299
1300static i915_reg_t skl_aux_data_reg(struct drm_i915_private *dev_priv,
1301				   enum port port, int index)
1302{
 
 
 
1303	switch (port) {
1304	case PORT_A:
1305	case PORT_B:
1306	case PORT_C:
1307	case PORT_D:
1308		return DP_AUX_CH_DATA(port, index);
1309	default:
1310		MISSING_CASE(port);
1311		return DP_AUX_CH_DATA(PORT_A, index);
1312	}
1313}
1314
1315static i915_reg_t intel_aux_ctl_reg(struct drm_i915_private *dev_priv,
1316				    enum port port)
1317{
1318	if (INTEL_INFO(dev_priv)->gen >= 9)
1319		return skl_aux_ctl_reg(dev_priv, port);
1320	else if (HAS_PCH_SPLIT(dev_priv))
1321		return ilk_aux_ctl_reg(dev_priv, port);
1322	else
1323		return g4x_aux_ctl_reg(dev_priv, port);
1324}
1325
1326static i915_reg_t intel_aux_data_reg(struct drm_i915_private *dev_priv,
1327				     enum port port, int index)
1328{
1329	if (INTEL_INFO(dev_priv)->gen >= 9)
1330		return skl_aux_data_reg(dev_priv, port, index);
1331	else if (HAS_PCH_SPLIT(dev_priv))
1332		return ilk_aux_data_reg(dev_priv, port, index);
1333	else
1334		return g4x_aux_data_reg(dev_priv, port, index);
1335}
1336
1337static void intel_aux_reg_init(struct intel_dp *intel_dp)
1338{
1339	struct drm_i915_private *dev_priv = to_i915(intel_dp_to_dev(intel_dp));
1340	enum port port = intel_aux_port(dev_priv,
1341					dp_to_dig_port(intel_dp)->port);
1342	int i;
1343
1344	intel_dp->aux_ch_ctl_reg = intel_aux_ctl_reg(dev_priv, port);
1345	for (i = 0; i < ARRAY_SIZE(intel_dp->aux_ch_data_reg); i++)
1346		intel_dp->aux_ch_data_reg[i] = intel_aux_data_reg(dev_priv, port, i);
1347}
1348
1349static void
1350intel_dp_aux_fini(struct intel_dp *intel_dp)
1351{
 
1352	kfree(intel_dp->aux.name);
1353}
1354
1355static void
1356intel_dp_aux_init(struct intel_dp *intel_dp)
1357{
1358	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
1359	enum port port = intel_dig_port->port;
 
1360
1361	intel_aux_reg_init(intel_dp);
1362	drm_dp_aux_init(&intel_dp->aux);
1363
1364	/* Failure to allocate our preferred name is not critical */
1365	intel_dp->aux.name = kasprintf(GFP_KERNEL, "DPDDC-%c", port_name(port));
 
 
 
 
1366	intel_dp->aux.transfer = intel_dp_aux_transfer;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1367}
1368
1369bool intel_dp_source_supports_hbr2(struct intel_dp *intel_dp)
1370{
1371	struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
1372	struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev);
 
 
 
 
1373
1374	if ((IS_HASWELL(dev_priv) && !IS_HSW_ULX(dev_priv)) ||
1375	    IS_BROADWELL(dev_priv) || (INTEL_GEN(dev_priv) >= 9))
1376		return true;
1377	else
1378		return false;
1379}
1380
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1381static void
1382intel_dp_set_clock(struct intel_encoder *encoder,
1383		   struct intel_crtc_state *pipe_config)
1384{
1385	struct drm_device *dev = encoder->base.dev;
1386	struct drm_i915_private *dev_priv = to_i915(dev);
1387	const struct dp_link_dpll *divisor = NULL;
1388	int i, count = 0;
1389
1390	if (IS_G4X(dev_priv)) {
1391		divisor = gen4_dpll;
1392		count = ARRAY_SIZE(gen4_dpll);
1393	} else if (HAS_PCH_SPLIT(dev_priv)) {
1394		divisor = pch_dpll;
1395		count = ARRAY_SIZE(pch_dpll);
1396	} else if (IS_CHERRYVIEW(dev_priv)) {
1397		divisor = chv_dpll;
1398		count = ARRAY_SIZE(chv_dpll);
1399	} else if (IS_VALLEYVIEW(dev_priv)) {
1400		divisor = vlv_dpll;
1401		count = ARRAY_SIZE(vlv_dpll);
1402	}
1403
1404	if (divisor && count) {
1405		for (i = 0; i < count; i++) {
1406			if (pipe_config->port_clock == divisor[i].clock) {
1407				pipe_config->dpll = divisor[i].dpll;
1408				pipe_config->clock_set = true;
1409				break;
1410			}
1411		}
1412	}
1413}
1414
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1415static void snprintf_int_array(char *str, size_t len,
1416			       const int *array, int nelem)
1417{
1418	int i;
1419
1420	str[0] = '\0';
1421
1422	for (i = 0; i < nelem; i++) {
1423		int r = snprintf(str, len, "%s%d", i ? ", " : "", array[i]);
1424		if (r >= len)
1425			return;
1426		str += r;
1427		len -= r;
1428	}
1429}
1430
1431static void intel_dp_print_rates(struct intel_dp *intel_dp)
1432{
1433	const int *source_rates, *sink_rates;
1434	int source_len, sink_len, common_len;
1435	int common_rates[DP_MAX_SUPPORTED_RATES];
1436	char str[128]; /* FIXME: too big for stack? */
1437
1438	if ((drm_debug & DRM_UT_KMS) == 0)
1439		return;
1440
1441	source_len = intel_dp_source_rates(intel_dp, &source_rates);
1442	snprintf_int_array(str, sizeof(str), source_rates, source_len);
1443	DRM_DEBUG_KMS("source rates: %s\n", str);
1444
1445	sink_len = intel_dp_sink_rates(intel_dp, &sink_rates);
1446	snprintf_int_array(str, sizeof(str), sink_rates, sink_len);
1447	DRM_DEBUG_KMS("sink rates: %s\n", str);
1448
1449	common_len = intel_dp_common_rates(intel_dp, common_rates);
1450	snprintf_int_array(str, sizeof(str), common_rates, common_len);
1451	DRM_DEBUG_KMS("common rates: %s\n", str);
1452}
1453
1454bool
1455__intel_dp_read_desc(struct intel_dp *intel_dp, struct intel_dp_desc *desc)
1456{
1457	u32 base = drm_dp_is_branch(intel_dp->dpcd) ? DP_BRANCH_OUI :
1458						      DP_SINK_OUI;
1459
1460	return drm_dp_dpcd_read(&intel_dp->aux, base, desc, sizeof(*desc)) ==
1461	       sizeof(*desc);
1462}
1463
1464bool intel_dp_read_desc(struct intel_dp *intel_dp)
1465{
1466	struct intel_dp_desc *desc = &intel_dp->desc;
1467	bool oui_sup = intel_dp->dpcd[DP_DOWN_STREAM_PORT_COUNT] &
1468		       DP_OUI_SUPPORT;
1469	int dev_id_len;
1470
1471	if (!__intel_dp_read_desc(intel_dp, desc))
1472		return false;
1473
1474	dev_id_len = strnlen(desc->device_id, sizeof(desc->device_id));
1475	DRM_DEBUG_KMS("DP %s: OUI %*phD%s dev-ID %*pE HW-rev %d.%d SW-rev %d.%d\n",
1476		      drm_dp_is_branch(intel_dp->dpcd) ? "branch" : "sink",
1477		      (int)sizeof(desc->oui), desc->oui, oui_sup ? "" : "(NS)",
1478		      dev_id_len, desc->device_id,
1479		      desc->hw_rev >> 4, desc->hw_rev & 0xf,
1480		      desc->sw_major_rev, desc->sw_minor_rev);
1481
1482	return true;
1483}
1484
1485static int rate_to_index(int find, const int *rates)
1486{
1487	int i = 0;
1488
1489	for (i = 0; i < DP_MAX_SUPPORTED_RATES; ++i)
1490		if (find == rates[i])
1491			break;
1492
1493	return i;
1494}
1495
1496int
1497intel_dp_max_link_rate(struct intel_dp *intel_dp)
1498{
1499	int rates[DP_MAX_SUPPORTED_RATES] = {};
1500	int len;
1501
1502	len = intel_dp_common_rates(intel_dp, rates);
1503	if (WARN_ON(len <= 0))
1504		return 162000;
1505
1506	return rates[len - 1];
1507}
1508
1509int intel_dp_rate_select(struct intel_dp *intel_dp, int rate)
1510{
1511	return rate_to_index(rate, intel_dp->sink_rates);
1512}
1513
1514void intel_dp_compute_rate(struct intel_dp *intel_dp, int port_clock,
1515			   uint8_t *link_bw, uint8_t *rate_select)
1516{
1517	if (intel_dp->num_sink_rates) {
1518		*link_bw = 0;
1519		*rate_select =
1520			intel_dp_rate_select(intel_dp, port_clock);
1521	} else {
1522		*link_bw = drm_dp_link_rate_to_bw_code(port_clock);
1523		*rate_select = 0;
1524	}
1525}
1526
1527static int intel_dp_compute_bpp(struct intel_dp *intel_dp,
1528				struct intel_crtc_state *pipe_config)
1529{
1530	int bpp, bpc;
1531
1532	bpp = pipe_config->pipe_bpp;
1533	bpc = drm_dp_downstream_max_bpc(intel_dp->dpcd, intel_dp->downstream_ports);
1534
1535	if (bpc > 0)
1536		bpp = min(bpp, 3*bpc);
1537
1538	return bpp;
1539}
1540
1541bool
1542intel_dp_compute_config(struct intel_encoder *encoder,
1543			struct intel_crtc_state *pipe_config,
1544			struct drm_connector_state *conn_state)
1545{
1546	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
 
1547	struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode;
1548	struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
1549	enum port port = dp_to_dig_port(intel_dp)->port;
1550	struct intel_crtc *intel_crtc = to_intel_crtc(pipe_config->base.crtc);
1551	struct intel_connector *intel_connector = intel_dp->attached_connector;
1552	int lane_count, clock;
1553	int min_lane_count = 1;
1554	int max_lane_count = intel_dp_max_lane_count(intel_dp);
1555	/* Conveniently, the link BW constants become indices with a shift...*/
1556	int min_clock = 0;
1557	int max_clock;
1558	int bpp, mode_rate;
1559	int link_avail, link_clock;
1560	int common_rates[DP_MAX_SUPPORTED_RATES] = {};
1561	int common_len;
1562	uint8_t link_bw, rate_select;
1563
1564	common_len = intel_dp_common_rates(intel_dp, common_rates);
1565
1566	/* No common link rates between source and sink */
1567	WARN_ON(common_len <= 0);
1568
1569	max_clock = common_len - 1;
1570
1571	if (HAS_PCH_SPLIT(dev_priv) && !HAS_DDI(dev_priv) && port != PORT_A)
1572		pipe_config->has_pch_encoder = true;
1573
 
1574	pipe_config->has_drrs = false;
1575	pipe_config->has_audio = intel_dp->has_audio && port != PORT_A;
1576
1577	if (is_edp(intel_dp) && intel_connector->panel.fixed_mode) {
1578		intel_fixed_panel_mode(intel_connector->panel.fixed_mode,
1579				       adjusted_mode);
1580
1581		if (INTEL_GEN(dev_priv) >= 9) {
1582			int ret;
1583			ret = skl_update_scaler_crtc(pipe_config);
1584			if (ret)
1585				return ret;
1586		}
1587
1588		if (HAS_GMCH_DISPLAY(dev_priv))
1589			intel_gmch_panel_fitting(intel_crtc, pipe_config,
1590						 intel_connector->panel.fitting_mode);
1591		else
1592			intel_pch_panel_fitting(intel_crtc, pipe_config,
1593						intel_connector->panel.fitting_mode);
1594	}
1595
1596	if (adjusted_mode->flags & DRM_MODE_FLAG_DBLCLK)
1597		return false;
1598
1599	DRM_DEBUG_KMS("DP link computation with max lane count %i "
1600		      "max bw %d pixel clock %iKHz\n",
1601		      max_lane_count, common_rates[max_clock],
1602		      adjusted_mode->crtc_clock);
1603
1604	/* Walk through all bpp values. Luckily they're all nicely spaced with 2
1605	 * bpc in between. */
1606	bpp = intel_dp_compute_bpp(intel_dp, pipe_config);
1607	if (is_edp(intel_dp)) {
1608
1609		/* Get bpp from vbt only for panels that dont have bpp in edid */
1610		if (intel_connector->base.display_info.bpc == 0 &&
1611			(dev_priv->vbt.edp.bpp && dev_priv->vbt.edp.bpp < bpp)) {
1612			DRM_DEBUG_KMS("clamping bpp for eDP panel to BIOS-provided %i\n",
1613				      dev_priv->vbt.edp.bpp);
1614			bpp = dev_priv->vbt.edp.bpp;
1615		}
1616
1617		/*
1618		 * Use the maximum clock and number of lanes the eDP panel
1619		 * advertizes being capable of. The panels are generally
1620		 * designed to support only a single clock and lane
1621		 * configuration, and typically these values correspond to the
1622		 * native resolution of the panel.
1623		 */
1624		min_lane_count = max_lane_count;
1625		min_clock = max_clock;
1626	}
1627
1628	for (; bpp >= 6*3; bpp -= 2*3) {
1629		mode_rate = intel_dp_link_required(adjusted_mode->crtc_clock,
1630						   bpp);
1631
1632		for (clock = min_clock; clock <= max_clock; clock++) {
1633			for (lane_count = min_lane_count;
1634				lane_count <= max_lane_count;
1635				lane_count <<= 1) {
1636
1637				link_clock = common_rates[clock];
1638				link_avail = intel_dp_max_data_rate(link_clock,
1639								    lane_count);
1640
1641				if (mode_rate <= link_avail) {
1642					goto found;
1643				}
1644			}
1645		}
1646	}
1647
1648	return false;
1649
1650found:
1651	if (intel_dp->color_range_auto) {
1652		/*
1653		 * See:
1654		 * CEA-861-E - 5.1 Default Encoding Parameters
1655		 * VESA DisplayPort Ver.1.2a - 5.1.1.1 Video Colorimetry
1656		 */
1657		pipe_config->limited_color_range =
1658			bpp != 18 && drm_match_cea_mode(adjusted_mode) > 1;
1659	} else {
1660		pipe_config->limited_color_range =
1661			intel_dp->limited_color_range;
1662	}
1663
1664	pipe_config->lane_count = lane_count;
1665
1666	pipe_config->pipe_bpp = bpp;
1667	pipe_config->port_clock = common_rates[clock];
1668
1669	intel_dp_compute_rate(intel_dp, pipe_config->port_clock,
1670			      &link_bw, &rate_select);
1671
1672	DRM_DEBUG_KMS("DP link bw %02x rate select %02x lane count %d clock %d bpp %d\n",
1673		      link_bw, rate_select, pipe_config->lane_count,
1674		      pipe_config->port_clock, bpp);
1675	DRM_DEBUG_KMS("DP link bw required %i available %i\n",
1676		      mode_rate, link_avail);
1677
1678	intel_link_compute_m_n(bpp, lane_count,
1679			       adjusted_mode->crtc_clock,
1680			       pipe_config->port_clock,
1681			       &pipe_config->dp_m_n);
1682
1683	if (intel_connector->panel.downclock_mode != NULL &&
1684		dev_priv->drrs.type == SEAMLESS_DRRS_SUPPORT) {
1685			pipe_config->has_drrs = true;
1686			intel_link_compute_m_n(bpp, lane_count,
1687				intel_connector->panel.downclock_mode->clock,
1688				pipe_config->port_clock,
1689				&pipe_config->dp_m2_n2);
1690	}
1691
1692	/*
1693	 * DPLL0 VCO may need to be adjusted to get the correct
1694	 * clock for eDP. This will affect cdclk as well.
1695	 */
1696	if (is_edp(intel_dp) &&
1697	    (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv))) {
1698		int vco;
1699
1700		switch (pipe_config->port_clock / 2) {
1701		case 108000:
1702		case 216000:
1703			vco = 8640000;
1704			break;
1705		default:
1706			vco = 8100000;
1707			break;
1708		}
1709
1710		to_intel_atomic_state(pipe_config->base.state)->cdclk_pll_vco = vco;
1711	}
1712
1713	if (!HAS_DDI(dev_priv))
1714		intel_dp_set_clock(encoder, pipe_config);
1715
1716	return true;
1717}
1718
1719void intel_dp_set_link_params(struct intel_dp *intel_dp,
1720			      int link_rate, uint8_t lane_count,
1721			      bool link_mst)
1722{
1723	intel_dp->link_rate = link_rate;
1724	intel_dp->lane_count = lane_count;
1725	intel_dp->link_mst = link_mst;
1726}
1727
1728static void intel_dp_prepare(struct intel_encoder *encoder,
1729			     struct intel_crtc_state *pipe_config)
1730{
1731	struct drm_device *dev = encoder->base.dev;
1732	struct drm_i915_private *dev_priv = to_i915(dev);
1733	struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
1734	enum port port = dp_to_dig_port(intel_dp)->port;
1735	struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
1736	const struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode;
1737
1738	intel_dp_set_link_params(intel_dp, pipe_config->port_clock,
1739				 pipe_config->lane_count,
1740				 intel_crtc_has_type(pipe_config,
1741						     INTEL_OUTPUT_DP_MST));
1742
1743	/*
1744	 * There are four kinds of DP registers:
1745	 *
1746	 * 	IBX PCH
1747	 * 	SNB CPU
1748	 *	IVB CPU
1749	 * 	CPT PCH
1750	 *
1751	 * IBX PCH and CPU are the same for almost everything,
1752	 * except that the CPU DP PLL is configured in this
1753	 * register
1754	 *
1755	 * CPT PCH is quite different, having many bits moved
1756	 * to the TRANS_DP_CTL register instead. That
1757	 * configuration happens (oddly) in ironlake_pch_enable
1758	 */
1759
1760	/* Preserve the BIOS-computed detected bit. This is
1761	 * supposed to be read-only.
1762	 */
1763	intel_dp->DP = I915_READ(intel_dp->output_reg) & DP_DETECTED;
1764
1765	/* Handle DP bits in common between all three register formats */
1766	intel_dp->DP |= DP_VOLTAGE_0_4 | DP_PRE_EMPHASIS_0;
1767	intel_dp->DP |= DP_PORT_WIDTH(pipe_config->lane_count);
1768
1769	/* Split out the IBX/CPU vs CPT settings */
1770
1771	if (IS_GEN7(dev_priv) && port == PORT_A) {
1772		if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC)
1773			intel_dp->DP |= DP_SYNC_HS_HIGH;
1774		if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC)
1775			intel_dp->DP |= DP_SYNC_VS_HIGH;
1776		intel_dp->DP |= DP_LINK_TRAIN_OFF_CPT;
1777
1778		if (drm_dp_enhanced_frame_cap(intel_dp->dpcd))
1779			intel_dp->DP |= DP_ENHANCED_FRAMING;
1780
1781		intel_dp->DP |= crtc->pipe << 29;
1782	} else if (HAS_PCH_CPT(dev_priv) && port != PORT_A) {
1783		u32 trans_dp;
1784
1785		intel_dp->DP |= DP_LINK_TRAIN_OFF_CPT;
1786
1787		trans_dp = I915_READ(TRANS_DP_CTL(crtc->pipe));
1788		if (drm_dp_enhanced_frame_cap(intel_dp->dpcd))
1789			trans_dp |= TRANS_DP_ENH_FRAMING;
1790		else
1791			trans_dp &= ~TRANS_DP_ENH_FRAMING;
1792		I915_WRITE(TRANS_DP_CTL(crtc->pipe), trans_dp);
1793	} else {
1794		if (IS_G4X(dev_priv) && pipe_config->limited_color_range)
 
1795			intel_dp->DP |= DP_COLOR_RANGE_16_235;
1796
1797		if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC)
1798			intel_dp->DP |= DP_SYNC_HS_HIGH;
1799		if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC)
1800			intel_dp->DP |= DP_SYNC_VS_HIGH;
1801		intel_dp->DP |= DP_LINK_TRAIN_OFF;
1802
1803		if (drm_dp_enhanced_frame_cap(intel_dp->dpcd))
1804			intel_dp->DP |= DP_ENHANCED_FRAMING;
1805
1806		if (IS_CHERRYVIEW(dev_priv))
1807			intel_dp->DP |= DP_PIPE_SELECT_CHV(crtc->pipe);
1808		else if (crtc->pipe == PIPE_B)
1809			intel_dp->DP |= DP_PIPEB_SELECT;
1810	}
1811}
1812
1813#define IDLE_ON_MASK		(PP_ON | PP_SEQUENCE_MASK | 0                     | PP_SEQUENCE_STATE_MASK)
1814#define IDLE_ON_VALUE   	(PP_ON | PP_SEQUENCE_NONE | 0                     | PP_SEQUENCE_STATE_ON_IDLE)
1815
1816#define IDLE_OFF_MASK		(PP_ON | PP_SEQUENCE_MASK | 0                     | 0)
1817#define IDLE_OFF_VALUE		(0     | PP_SEQUENCE_NONE | 0                     | 0)
1818
1819#define IDLE_CYCLE_MASK		(PP_ON | PP_SEQUENCE_MASK | PP_CYCLE_DELAY_ACTIVE | PP_SEQUENCE_STATE_MASK)
1820#define IDLE_CYCLE_VALUE	(0     | PP_SEQUENCE_NONE | 0                     | PP_SEQUENCE_STATE_OFF_IDLE)
1821
1822static void intel_pps_verify_state(struct drm_i915_private *dev_priv,
1823				   struct intel_dp *intel_dp);
1824
1825static void wait_panel_status(struct intel_dp *intel_dp,
1826				       u32 mask,
1827				       u32 value)
1828{
1829	struct drm_device *dev = intel_dp_to_dev(intel_dp);
1830	struct drm_i915_private *dev_priv = to_i915(dev);
1831	i915_reg_t pp_stat_reg, pp_ctrl_reg;
1832
1833	lockdep_assert_held(&dev_priv->pps_mutex);
1834
1835	intel_pps_verify_state(dev_priv, intel_dp);
1836
1837	pp_stat_reg = _pp_stat_reg(intel_dp);
1838	pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
1839
1840	DRM_DEBUG_KMS("mask %08x value %08x status %08x control %08x\n",
1841			mask, value,
1842			I915_READ(pp_stat_reg),
1843			I915_READ(pp_ctrl_reg));
1844
1845	if (intel_wait_for_register(dev_priv,
1846				    pp_stat_reg, mask, value,
1847				    5000))
1848		DRM_ERROR("Panel status timeout: status %08x control %08x\n",
1849				I915_READ(pp_stat_reg),
1850				I915_READ(pp_ctrl_reg));
 
1851
1852	DRM_DEBUG_KMS("Wait complete\n");
1853}
1854
1855static void wait_panel_on(struct intel_dp *intel_dp)
1856{
1857	DRM_DEBUG_KMS("Wait for panel power on\n");
1858	wait_panel_status(intel_dp, IDLE_ON_MASK, IDLE_ON_VALUE);
1859}
1860
1861static void wait_panel_off(struct intel_dp *intel_dp)
1862{
1863	DRM_DEBUG_KMS("Wait for panel power off time\n");
1864	wait_panel_status(intel_dp, IDLE_OFF_MASK, IDLE_OFF_VALUE);
1865}
1866
1867static void wait_panel_power_cycle(struct intel_dp *intel_dp)
1868{
1869	ktime_t panel_power_on_time;
1870	s64 panel_power_off_duration;
1871
1872	DRM_DEBUG_KMS("Wait for panel power cycle\n");
1873
1874	/* take the difference of currrent time and panel power off time
1875	 * and then make panel wait for t11_t12 if needed. */
1876	panel_power_on_time = ktime_get_boottime();
1877	panel_power_off_duration = ktime_ms_delta(panel_power_on_time, intel_dp->panel_power_off_time);
1878
1879	/* When we disable the VDD override bit last we have to do the manual
1880	 * wait. */
1881	if (panel_power_off_duration < (s64)intel_dp->panel_power_cycle_delay)
1882		wait_remaining_ms_from_jiffies(jiffies,
1883				       intel_dp->panel_power_cycle_delay - panel_power_off_duration);
1884
1885	wait_panel_status(intel_dp, IDLE_CYCLE_MASK, IDLE_CYCLE_VALUE);
1886}
1887
1888static void wait_backlight_on(struct intel_dp *intel_dp)
1889{
1890	wait_remaining_ms_from_jiffies(intel_dp->last_power_on,
1891				       intel_dp->backlight_on_delay);
1892}
1893
1894static void edp_wait_backlight_off(struct intel_dp *intel_dp)
1895{
1896	wait_remaining_ms_from_jiffies(intel_dp->last_backlight_off,
1897				       intel_dp->backlight_off_delay);
1898}
1899
1900/* Read the current pp_control value, unlocking the register if it
1901 * is locked
1902 */
1903
1904static  u32 ironlake_get_pp_control(struct intel_dp *intel_dp)
1905{
1906	struct drm_device *dev = intel_dp_to_dev(intel_dp);
1907	struct drm_i915_private *dev_priv = to_i915(dev);
1908	u32 control;
1909
1910	lockdep_assert_held(&dev_priv->pps_mutex);
1911
1912	control = I915_READ(_pp_ctrl_reg(intel_dp));
1913	if (WARN_ON(!HAS_DDI(dev_priv) &&
1914		    (control & PANEL_UNLOCK_MASK) != PANEL_UNLOCK_REGS)) {
1915		control &= ~PANEL_UNLOCK_MASK;
1916		control |= PANEL_UNLOCK_REGS;
1917	}
1918	return control;
1919}
1920
1921/*
1922 * Must be paired with edp_panel_vdd_off().
1923 * Must hold pps_mutex around the whole on/off sequence.
1924 * Can be nested with intel_edp_panel_vdd_{on,off}() calls.
1925 */
1926static bool edp_panel_vdd_on(struct intel_dp *intel_dp)
1927{
1928	struct drm_device *dev = intel_dp_to_dev(intel_dp);
1929	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
1930	struct intel_encoder *intel_encoder = &intel_dig_port->base;
1931	struct drm_i915_private *dev_priv = to_i915(dev);
1932	enum intel_display_power_domain power_domain;
1933	u32 pp;
1934	i915_reg_t pp_stat_reg, pp_ctrl_reg;
1935	bool need_to_disable = !intel_dp->want_panel_vdd;
1936
1937	lockdep_assert_held(&dev_priv->pps_mutex);
1938
1939	if (!is_edp(intel_dp))
1940		return false;
1941
1942	cancel_delayed_work(&intel_dp->panel_vdd_work);
1943	intel_dp->want_panel_vdd = true;
1944
1945	if (edp_have_panel_vdd(intel_dp))
1946		return need_to_disable;
1947
1948	power_domain = intel_display_port_aux_power_domain(intel_encoder);
1949	intel_display_power_get(dev_priv, power_domain);
1950
1951	DRM_DEBUG_KMS("Turning eDP port %c VDD on\n",
1952		      port_name(intel_dig_port->port));
1953
1954	if (!edp_have_panel_power(intel_dp))
1955		wait_panel_power_cycle(intel_dp);
1956
1957	pp = ironlake_get_pp_control(intel_dp);
1958	pp |= EDP_FORCE_VDD;
1959
1960	pp_stat_reg = _pp_stat_reg(intel_dp);
1961	pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
1962
1963	I915_WRITE(pp_ctrl_reg, pp);
1964	POSTING_READ(pp_ctrl_reg);
1965	DRM_DEBUG_KMS("PP_STATUS: 0x%08x PP_CONTROL: 0x%08x\n",
1966			I915_READ(pp_stat_reg), I915_READ(pp_ctrl_reg));
1967	/*
1968	 * If the panel wasn't on, delay before accessing aux channel
1969	 */
1970	if (!edp_have_panel_power(intel_dp)) {
1971		DRM_DEBUG_KMS("eDP port %c panel power wasn't enabled\n",
1972			      port_name(intel_dig_port->port));
1973		msleep(intel_dp->panel_power_up_delay);
1974	}
1975
1976	return need_to_disable;
1977}
1978
1979/*
1980 * Must be paired with intel_edp_panel_vdd_off() or
1981 * intel_edp_panel_off().
1982 * Nested calls to these functions are not allowed since
1983 * we drop the lock. Caller must use some higher level
1984 * locking to prevent nested calls from other threads.
1985 */
1986void intel_edp_panel_vdd_on(struct intel_dp *intel_dp)
1987{
1988	bool vdd;
1989
1990	if (!is_edp(intel_dp))
1991		return;
1992
1993	pps_lock(intel_dp);
1994	vdd = edp_panel_vdd_on(intel_dp);
1995	pps_unlock(intel_dp);
1996
1997	I915_STATE_WARN(!vdd, "eDP port %c VDD already requested on\n",
1998	     port_name(dp_to_dig_port(intel_dp)->port));
1999}
2000
2001static void edp_panel_vdd_off_sync(struct intel_dp *intel_dp)
2002{
2003	struct drm_device *dev = intel_dp_to_dev(intel_dp);
2004	struct drm_i915_private *dev_priv = to_i915(dev);
2005	struct intel_digital_port *intel_dig_port =
2006		dp_to_dig_port(intel_dp);
2007	struct intel_encoder *intel_encoder = &intel_dig_port->base;
2008	enum intel_display_power_domain power_domain;
2009	u32 pp;
2010	i915_reg_t pp_stat_reg, pp_ctrl_reg;
2011
2012	lockdep_assert_held(&dev_priv->pps_mutex);
2013
2014	WARN_ON(intel_dp->want_panel_vdd);
2015
2016	if (!edp_have_panel_vdd(intel_dp))
2017		return;
2018
2019	DRM_DEBUG_KMS("Turning eDP port %c VDD off\n",
2020		      port_name(intel_dig_port->port));
2021
2022	pp = ironlake_get_pp_control(intel_dp);
2023	pp &= ~EDP_FORCE_VDD;
2024
2025	pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
2026	pp_stat_reg = _pp_stat_reg(intel_dp);
2027
2028	I915_WRITE(pp_ctrl_reg, pp);
2029	POSTING_READ(pp_ctrl_reg);
2030
2031	/* Make sure sequencer is idle before allowing subsequent activity */
2032	DRM_DEBUG_KMS("PP_STATUS: 0x%08x PP_CONTROL: 0x%08x\n",
2033	I915_READ(pp_stat_reg), I915_READ(pp_ctrl_reg));
2034
2035	if ((pp & PANEL_POWER_ON) == 0)
2036		intel_dp->panel_power_off_time = ktime_get_boottime();
2037
2038	power_domain = intel_display_port_aux_power_domain(intel_encoder);
2039	intel_display_power_put(dev_priv, power_domain);
2040}
2041
2042static void edp_panel_vdd_work(struct work_struct *__work)
2043{
2044	struct intel_dp *intel_dp = container_of(to_delayed_work(__work),
2045						 struct intel_dp, panel_vdd_work);
2046
2047	pps_lock(intel_dp);
2048	if (!intel_dp->want_panel_vdd)
2049		edp_panel_vdd_off_sync(intel_dp);
2050	pps_unlock(intel_dp);
2051}
2052
2053static void edp_panel_vdd_schedule_off(struct intel_dp *intel_dp)
2054{
2055	unsigned long delay;
2056
2057	/*
2058	 * Queue the timer to fire a long time from now (relative to the power
2059	 * down delay) to keep the panel power up across a sequence of
2060	 * operations.
2061	 */
2062	delay = msecs_to_jiffies(intel_dp->panel_power_cycle_delay * 5);
2063	schedule_delayed_work(&intel_dp->panel_vdd_work, delay);
2064}
2065
2066/*
2067 * Must be paired with edp_panel_vdd_on().
2068 * Must hold pps_mutex around the whole on/off sequence.
2069 * Can be nested with intel_edp_panel_vdd_{on,off}() calls.
2070 */
2071static void edp_panel_vdd_off(struct intel_dp *intel_dp, bool sync)
2072{
2073	struct drm_i915_private *dev_priv = to_i915(intel_dp_to_dev(intel_dp));
 
2074
2075	lockdep_assert_held(&dev_priv->pps_mutex);
2076
2077	if (!is_edp(intel_dp))
2078		return;
2079
2080	I915_STATE_WARN(!intel_dp->want_panel_vdd, "eDP port %c VDD not forced on",
2081	     port_name(dp_to_dig_port(intel_dp)->port));
2082
2083	intel_dp->want_panel_vdd = false;
2084
2085	if (sync)
2086		edp_panel_vdd_off_sync(intel_dp);
2087	else
2088		edp_panel_vdd_schedule_off(intel_dp);
2089}
2090
2091static void edp_panel_on(struct intel_dp *intel_dp)
2092{
2093	struct drm_device *dev = intel_dp_to_dev(intel_dp);
2094	struct drm_i915_private *dev_priv = to_i915(dev);
2095	u32 pp;
2096	i915_reg_t pp_ctrl_reg;
2097
2098	lockdep_assert_held(&dev_priv->pps_mutex);
2099
2100	if (!is_edp(intel_dp))
2101		return;
2102
2103	DRM_DEBUG_KMS("Turn eDP port %c panel power on\n",
2104		      port_name(dp_to_dig_port(intel_dp)->port));
2105
2106	if (WARN(edp_have_panel_power(intel_dp),
2107		 "eDP port %c panel power already on\n",
2108		 port_name(dp_to_dig_port(intel_dp)->port)))
2109		return;
2110
2111	wait_panel_power_cycle(intel_dp);
2112
2113	pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
2114	pp = ironlake_get_pp_control(intel_dp);
2115	if (IS_GEN5(dev_priv)) {
2116		/* ILK workaround: disable reset around power sequence */
2117		pp &= ~PANEL_POWER_RESET;
2118		I915_WRITE(pp_ctrl_reg, pp);
2119		POSTING_READ(pp_ctrl_reg);
2120	}
2121
2122	pp |= PANEL_POWER_ON;
2123	if (!IS_GEN5(dev_priv))
2124		pp |= PANEL_POWER_RESET;
2125
2126	I915_WRITE(pp_ctrl_reg, pp);
2127	POSTING_READ(pp_ctrl_reg);
2128
2129	wait_panel_on(intel_dp);
2130	intel_dp->last_power_on = jiffies;
2131
2132	if (IS_GEN5(dev_priv)) {
2133		pp |= PANEL_POWER_RESET; /* restore panel reset bit */
2134		I915_WRITE(pp_ctrl_reg, pp);
2135		POSTING_READ(pp_ctrl_reg);
2136	}
2137}
2138
2139void intel_edp_panel_on(struct intel_dp *intel_dp)
2140{
2141	if (!is_edp(intel_dp))
2142		return;
2143
2144	pps_lock(intel_dp);
2145	edp_panel_on(intel_dp);
2146	pps_unlock(intel_dp);
2147}
2148
2149
2150static void edp_panel_off(struct intel_dp *intel_dp)
2151{
2152	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
2153	struct intel_encoder *intel_encoder = &intel_dig_port->base;
2154	struct drm_device *dev = intel_dp_to_dev(intel_dp);
2155	struct drm_i915_private *dev_priv = to_i915(dev);
2156	enum intel_display_power_domain power_domain;
2157	u32 pp;
2158	i915_reg_t pp_ctrl_reg;
2159
2160	lockdep_assert_held(&dev_priv->pps_mutex);
2161
2162	if (!is_edp(intel_dp))
2163		return;
2164
2165	DRM_DEBUG_KMS("Turn eDP port %c panel power off\n",
2166		      port_name(dp_to_dig_port(intel_dp)->port));
2167
2168	WARN(!intel_dp->want_panel_vdd, "Need eDP port %c VDD to turn off panel\n",
2169	     port_name(dp_to_dig_port(intel_dp)->port));
2170
2171	pp = ironlake_get_pp_control(intel_dp);
2172	/* We need to switch off panel power _and_ force vdd, for otherwise some
2173	 * panels get very unhappy and cease to work. */
2174	pp &= ~(PANEL_POWER_ON | PANEL_POWER_RESET | EDP_FORCE_VDD |
2175		EDP_BLC_ENABLE);
2176
2177	pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
2178
2179	intel_dp->want_panel_vdd = false;
2180
2181	I915_WRITE(pp_ctrl_reg, pp);
2182	POSTING_READ(pp_ctrl_reg);
2183
2184	intel_dp->panel_power_off_time = ktime_get_boottime();
2185	wait_panel_off(intel_dp);
2186
2187	/* We got a reference when we enabled the VDD. */
2188	power_domain = intel_display_port_aux_power_domain(intel_encoder);
2189	intel_display_power_put(dev_priv, power_domain);
2190}
2191
2192void intel_edp_panel_off(struct intel_dp *intel_dp)
2193{
2194	if (!is_edp(intel_dp))
2195		return;
2196
2197	pps_lock(intel_dp);
2198	edp_panel_off(intel_dp);
2199	pps_unlock(intel_dp);
2200}
2201
2202/* Enable backlight in the panel power control. */
2203static void _intel_edp_backlight_on(struct intel_dp *intel_dp)
2204{
2205	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
2206	struct drm_device *dev = intel_dig_port->base.base.dev;
2207	struct drm_i915_private *dev_priv = to_i915(dev);
2208	u32 pp;
2209	i915_reg_t pp_ctrl_reg;
2210
2211	/*
2212	 * If we enable the backlight right away following a panel power
2213	 * on, we may see slight flicker as the panel syncs with the eDP
2214	 * link.  So delay a bit to make sure the image is solid before
2215	 * allowing it to appear.
2216	 */
2217	wait_backlight_on(intel_dp);
2218
2219	pps_lock(intel_dp);
2220
2221	pp = ironlake_get_pp_control(intel_dp);
2222	pp |= EDP_BLC_ENABLE;
2223
2224	pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
2225
2226	I915_WRITE(pp_ctrl_reg, pp);
2227	POSTING_READ(pp_ctrl_reg);
2228
2229	pps_unlock(intel_dp);
2230}
2231
2232/* Enable backlight PWM and backlight PP control. */
2233void intel_edp_backlight_on(struct intel_dp *intel_dp)
2234{
2235	if (!is_edp(intel_dp))
2236		return;
2237
2238	DRM_DEBUG_KMS("\n");
2239
2240	intel_panel_enable_backlight(intel_dp->attached_connector);
2241	_intel_edp_backlight_on(intel_dp);
2242}
2243
2244/* Disable backlight in the panel power control. */
2245static void _intel_edp_backlight_off(struct intel_dp *intel_dp)
2246{
2247	struct drm_device *dev = intel_dp_to_dev(intel_dp);
2248	struct drm_i915_private *dev_priv = to_i915(dev);
2249	u32 pp;
2250	i915_reg_t pp_ctrl_reg;
2251
2252	if (!is_edp(intel_dp))
2253		return;
2254
2255	pps_lock(intel_dp);
2256
2257	pp = ironlake_get_pp_control(intel_dp);
2258	pp &= ~EDP_BLC_ENABLE;
2259
2260	pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
2261
2262	I915_WRITE(pp_ctrl_reg, pp);
2263	POSTING_READ(pp_ctrl_reg);
2264
2265	pps_unlock(intel_dp);
2266
2267	intel_dp->last_backlight_off = jiffies;
2268	edp_wait_backlight_off(intel_dp);
2269}
2270
2271/* Disable backlight PP control and backlight PWM. */
2272void intel_edp_backlight_off(struct intel_dp *intel_dp)
2273{
2274	if (!is_edp(intel_dp))
2275		return;
2276
2277	DRM_DEBUG_KMS("\n");
2278
2279	_intel_edp_backlight_off(intel_dp);
2280	intel_panel_disable_backlight(intel_dp->attached_connector);
2281}
2282
2283/*
2284 * Hook for controlling the panel power control backlight through the bl_power
2285 * sysfs attribute. Take care to handle multiple calls.
2286 */
2287static void intel_edp_backlight_power(struct intel_connector *connector,
2288				      bool enable)
2289{
2290	struct intel_dp *intel_dp = intel_attached_dp(&connector->base);
2291	bool is_enabled;
2292
2293	pps_lock(intel_dp);
2294	is_enabled = ironlake_get_pp_control(intel_dp) & EDP_BLC_ENABLE;
2295	pps_unlock(intel_dp);
2296
2297	if (is_enabled == enable)
2298		return;
2299
2300	DRM_DEBUG_KMS("panel power control backlight %s\n",
2301		      enable ? "enable" : "disable");
2302
2303	if (enable)
2304		_intel_edp_backlight_on(intel_dp);
2305	else
2306		_intel_edp_backlight_off(intel_dp);
2307}
2308
2309static void assert_dp_port(struct intel_dp *intel_dp, bool state)
2310{
2311	struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
2312	struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev);
2313	bool cur_state = I915_READ(intel_dp->output_reg) & DP_PORT_EN;
2314
2315	I915_STATE_WARN(cur_state != state,
2316			"DP port %c state assertion failure (expected %s, current %s)\n",
2317			port_name(dig_port->port),
2318			onoff(state), onoff(cur_state));
2319}
2320#define assert_dp_port_disabled(d) assert_dp_port((d), false)
2321
2322static void assert_edp_pll(struct drm_i915_private *dev_priv, bool state)
2323{
2324	bool cur_state = I915_READ(DP_A) & DP_PLL_ENABLE;
2325
2326	I915_STATE_WARN(cur_state != state,
2327			"eDP PLL state assertion failure (expected %s, current %s)\n",
2328			onoff(state), onoff(cur_state));
2329}
2330#define assert_edp_pll_enabled(d) assert_edp_pll((d), true)
2331#define assert_edp_pll_disabled(d) assert_edp_pll((d), false)
2332
2333static void ironlake_edp_pll_on(struct intel_dp *intel_dp,
2334				struct intel_crtc_state *pipe_config)
2335{
2336	struct intel_crtc *crtc = to_intel_crtc(pipe_config->base.crtc);
 
2337	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
2338
2339	assert_pipe_disabled(dev_priv, crtc->pipe);
2340	assert_dp_port_disabled(intel_dp);
2341	assert_edp_pll_disabled(dev_priv);
2342
2343	DRM_DEBUG_KMS("enabling eDP PLL for clock %d\n",
2344		      pipe_config->port_clock);
2345
2346	intel_dp->DP &= ~DP_PLL_FREQ_MASK;
2347
2348	if (pipe_config->port_clock == 162000)
2349		intel_dp->DP |= DP_PLL_FREQ_162MHZ;
2350	else
2351		intel_dp->DP |= DP_PLL_FREQ_270MHZ;
2352
2353	I915_WRITE(DP_A, intel_dp->DP);
2354	POSTING_READ(DP_A);
2355	udelay(500);
2356
2357	/*
2358	 * [DevILK] Work around required when enabling DP PLL
2359	 * while a pipe is enabled going to FDI:
2360	 * 1. Wait for the start of vertical blank on the enabled pipe going to FDI
2361	 * 2. Program DP PLL enable
2362	 */
2363	if (IS_GEN5(dev_priv))
2364		intel_wait_for_vblank_if_active(dev_priv, !crtc->pipe);
2365
2366	intel_dp->DP |= DP_PLL_ENABLE;
2367
2368	I915_WRITE(DP_A, intel_dp->DP);
2369	POSTING_READ(DP_A);
2370	udelay(200);
2371}
2372
2373static void ironlake_edp_pll_off(struct intel_dp *intel_dp)
2374{
2375	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
2376	struct intel_crtc *crtc = to_intel_crtc(intel_dig_port->base.base.crtc);
2377	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
2378
2379	assert_pipe_disabled(dev_priv, crtc->pipe);
2380	assert_dp_port_disabled(intel_dp);
2381	assert_edp_pll_enabled(dev_priv);
2382
2383	DRM_DEBUG_KMS("disabling eDP PLL\n");
2384
2385	intel_dp->DP &= ~DP_PLL_ENABLE;
2386
2387	I915_WRITE(DP_A, intel_dp->DP);
2388	POSTING_READ(DP_A);
2389	udelay(200);
2390}
2391
2392/* If the sink supports it, try to set the power state appropriately */
2393void intel_dp_sink_dpms(struct intel_dp *intel_dp, int mode)
2394{
2395	int ret, i;
2396
2397	/* Should have a valid DPCD by this point */
2398	if (intel_dp->dpcd[DP_DPCD_REV] < 0x11)
2399		return;
2400
2401	if (mode != DRM_MODE_DPMS_ON) {
2402		ret = drm_dp_dpcd_writeb(&intel_dp->aux, DP_SET_POWER,
2403					 DP_SET_POWER_D3);
2404	} else {
2405		/*
2406		 * When turning on, we need to retry for 1ms to give the sink
2407		 * time to wake up.
2408		 */
2409		for (i = 0; i < 3; i++) {
2410			ret = drm_dp_dpcd_writeb(&intel_dp->aux, DP_SET_POWER,
2411						 DP_SET_POWER_D0);
2412			if (ret == 1)
2413				break;
2414			msleep(1);
2415		}
2416	}
2417
2418	if (ret != 1)
2419		DRM_DEBUG_KMS("failed to %s sink power state\n",
2420			      mode == DRM_MODE_DPMS_ON ? "enable" : "disable");
2421}
2422
2423static bool intel_dp_get_hw_state(struct intel_encoder *encoder,
2424				  enum pipe *pipe)
2425{
2426	struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2427	enum port port = dp_to_dig_port(intel_dp)->port;
2428	struct drm_device *dev = encoder->base.dev;
2429	struct drm_i915_private *dev_priv = to_i915(dev);
2430	enum intel_display_power_domain power_domain;
2431	u32 tmp;
2432	bool ret;
2433
2434	power_domain = intel_display_port_power_domain(encoder);
2435	if (!intel_display_power_get_if_enabled(dev_priv, power_domain))
2436		return false;
2437
2438	ret = false;
2439
2440	tmp = I915_READ(intel_dp->output_reg);
2441
2442	if (!(tmp & DP_PORT_EN))
2443		goto out;
2444
2445	if (IS_GEN7(dev_priv) && port == PORT_A) {
2446		*pipe = PORT_TO_PIPE_CPT(tmp);
2447	} else if (HAS_PCH_CPT(dev_priv) && port != PORT_A) {
2448		enum pipe p;
2449
2450		for_each_pipe(dev_priv, p) {
2451			u32 trans_dp = I915_READ(TRANS_DP_CTL(p));
2452			if (TRANS_DP_PIPE_TO_PORT(trans_dp) == port) {
2453				*pipe = p;
2454				ret = true;
2455
2456				goto out;
2457			}
2458		}
2459
2460		DRM_DEBUG_KMS("No pipe for dp port 0x%x found\n",
2461			      i915_mmio_reg_offset(intel_dp->output_reg));
2462	} else if (IS_CHERRYVIEW(dev_priv)) {
2463		*pipe = DP_PORT_TO_PIPE_CHV(tmp);
2464	} else {
2465		*pipe = PORT_TO_PIPE(tmp);
2466	}
2467
2468	ret = true;
2469
2470out:
2471	intel_display_power_put(dev_priv, power_domain);
2472
2473	return ret;
2474}
2475
2476static void intel_dp_get_config(struct intel_encoder *encoder,
2477				struct intel_crtc_state *pipe_config)
2478{
2479	struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2480	u32 tmp, flags = 0;
2481	struct drm_device *dev = encoder->base.dev;
2482	struct drm_i915_private *dev_priv = to_i915(dev);
2483	enum port port = dp_to_dig_port(intel_dp)->port;
2484	struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
 
2485
2486	tmp = I915_READ(intel_dp->output_reg);
2487
2488	pipe_config->has_audio = tmp & DP_AUDIO_OUTPUT_ENABLE && port != PORT_A;
2489
2490	if (HAS_PCH_CPT(dev_priv) && port != PORT_A) {
2491		u32 trans_dp = I915_READ(TRANS_DP_CTL(crtc->pipe));
2492
2493		if (trans_dp & TRANS_DP_HSYNC_ACTIVE_HIGH)
2494			flags |= DRM_MODE_FLAG_PHSYNC;
2495		else
2496			flags |= DRM_MODE_FLAG_NHSYNC;
2497
2498		if (trans_dp & TRANS_DP_VSYNC_ACTIVE_HIGH)
2499			flags |= DRM_MODE_FLAG_PVSYNC;
2500		else
2501			flags |= DRM_MODE_FLAG_NVSYNC;
2502	} else {
2503		if (tmp & DP_SYNC_HS_HIGH)
2504			flags |= DRM_MODE_FLAG_PHSYNC;
2505		else
2506			flags |= DRM_MODE_FLAG_NHSYNC;
2507
2508		if (tmp & DP_SYNC_VS_HIGH)
2509			flags |= DRM_MODE_FLAG_PVSYNC;
2510		else
2511			flags |= DRM_MODE_FLAG_NVSYNC;
2512	}
2513
2514	pipe_config->base.adjusted_mode.flags |= flags;
2515
2516	if (IS_G4X(dev_priv) && tmp & DP_COLOR_RANGE_16_235)
 
2517		pipe_config->limited_color_range = true;
2518
 
 
2519	pipe_config->lane_count =
2520		((tmp & DP_PORT_WIDTH_MASK) >> DP_PORT_WIDTH_SHIFT) + 1;
2521
2522	intel_dp_get_m_n(crtc, pipe_config);
2523
2524	if (port == PORT_A) {
2525		if ((I915_READ(DP_A) & DP_PLL_FREQ_MASK) == DP_PLL_FREQ_162MHZ)
2526			pipe_config->port_clock = 162000;
2527		else
2528			pipe_config->port_clock = 270000;
2529	}
2530
2531	pipe_config->base.adjusted_mode.crtc_clock =
2532		intel_dotclock_calculate(pipe_config->port_clock,
2533					 &pipe_config->dp_m_n);
 
 
 
 
2534
2535	if (is_edp(intel_dp) && dev_priv->vbt.edp.bpp &&
2536	    pipe_config->pipe_bpp > dev_priv->vbt.edp.bpp) {
2537		/*
2538		 * This is a big fat ugly hack.
2539		 *
2540		 * Some machines in UEFI boot mode provide us a VBT that has 18
2541		 * bpp and 1.62 GHz link bandwidth for eDP, which for reasons
2542		 * unknown we fail to light up. Yet the same BIOS boots up with
2543		 * 24 bpp and 2.7 GHz link. Use the same bpp as the BIOS uses as
2544		 * max, not what it tells us to use.
2545		 *
2546		 * Note: This will still be broken if the eDP panel is not lit
2547		 * up by the BIOS, and thus we can't get the mode at module
2548		 * load.
2549		 */
2550		DRM_DEBUG_KMS("pipe has %d bpp for eDP panel, overriding BIOS-provided max %d bpp\n",
2551			      pipe_config->pipe_bpp, dev_priv->vbt.edp.bpp);
2552		dev_priv->vbt.edp.bpp = pipe_config->pipe_bpp;
2553	}
2554}
2555
2556static void intel_disable_dp(struct intel_encoder *encoder,
2557			     struct intel_crtc_state *old_crtc_state,
2558			     struct drm_connector_state *old_conn_state)
2559{
2560	struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2561	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
 
2562
2563	if (old_crtc_state->has_audio)
2564		intel_audio_codec_disable(encoder);
2565
2566	if (HAS_PSR(dev_priv) && !HAS_DDI(dev_priv))
2567		intel_psr_disable(intel_dp);
2568
2569	/* Make sure the panel is off before trying to change the mode. But also
2570	 * ensure that we have vdd while we switch off the panel. */
2571	intel_edp_panel_vdd_on(intel_dp);
2572	intel_edp_backlight_off(intel_dp);
2573	intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_OFF);
2574	intel_edp_panel_off(intel_dp);
2575
2576	/* disable the port before the pipe on g4x */
2577	if (INTEL_GEN(dev_priv) < 5)
2578		intel_dp_link_down(intel_dp);
2579}
2580
2581static void ilk_post_disable_dp(struct intel_encoder *encoder,
2582				struct intel_crtc_state *old_crtc_state,
2583				struct drm_connector_state *old_conn_state)
2584{
2585	struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2586	enum port port = dp_to_dig_port(intel_dp)->port;
2587
2588	intel_dp_link_down(intel_dp);
2589
2590	/* Only ilk+ has port A */
2591	if (port == PORT_A)
2592		ironlake_edp_pll_off(intel_dp);
2593}
2594
2595static void vlv_post_disable_dp(struct intel_encoder *encoder,
2596				struct intel_crtc_state *old_crtc_state,
2597				struct drm_connector_state *old_conn_state)
2598{
2599	struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2600
2601	intel_dp_link_down(intel_dp);
2602}
2603
2604static void chv_post_disable_dp(struct intel_encoder *encoder,
2605				struct intel_crtc_state *old_crtc_state,
2606				struct drm_connector_state *old_conn_state)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2607{
2608	struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2609	struct drm_device *dev = encoder->base.dev;
2610	struct drm_i915_private *dev_priv = to_i915(dev);
2611
2612	intel_dp_link_down(intel_dp);
2613
2614	mutex_lock(&dev_priv->sb_lock);
2615
2616	/* Assert data lane reset */
2617	chv_data_lane_soft_reset(encoder, true);
2618
2619	mutex_unlock(&dev_priv->sb_lock);
2620}
2621
2622static void
2623_intel_dp_set_link_train(struct intel_dp *intel_dp,
2624			 uint32_t *DP,
2625			 uint8_t dp_train_pat)
2626{
2627	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
2628	struct drm_device *dev = intel_dig_port->base.base.dev;
2629	struct drm_i915_private *dev_priv = to_i915(dev);
2630	enum port port = intel_dig_port->port;
2631
2632	if (dp_train_pat & DP_TRAINING_PATTERN_MASK)
2633		DRM_DEBUG_KMS("Using DP training pattern TPS%d\n",
2634			      dp_train_pat & DP_TRAINING_PATTERN_MASK);
2635
2636	if (HAS_DDI(dev_priv)) {
2637		uint32_t temp = I915_READ(DP_TP_CTL(port));
2638
2639		if (dp_train_pat & DP_LINK_SCRAMBLING_DISABLE)
2640			temp |= DP_TP_CTL_SCRAMBLE_DISABLE;
2641		else
2642			temp &= ~DP_TP_CTL_SCRAMBLE_DISABLE;
2643
2644		temp &= ~DP_TP_CTL_LINK_TRAIN_MASK;
2645		switch (dp_train_pat & DP_TRAINING_PATTERN_MASK) {
2646		case DP_TRAINING_PATTERN_DISABLE:
2647			temp |= DP_TP_CTL_LINK_TRAIN_NORMAL;
2648
2649			break;
2650		case DP_TRAINING_PATTERN_1:
2651			temp |= DP_TP_CTL_LINK_TRAIN_PAT1;
2652			break;
2653		case DP_TRAINING_PATTERN_2:
2654			temp |= DP_TP_CTL_LINK_TRAIN_PAT2;
2655			break;
2656		case DP_TRAINING_PATTERN_3:
2657			temp |= DP_TP_CTL_LINK_TRAIN_PAT3;
2658			break;
2659		}
2660		I915_WRITE(DP_TP_CTL(port), temp);
2661
2662	} else if ((IS_GEN7(dev_priv) && port == PORT_A) ||
2663		   (HAS_PCH_CPT(dev_priv) && port != PORT_A)) {
2664		*DP &= ~DP_LINK_TRAIN_MASK_CPT;
2665
2666		switch (dp_train_pat & DP_TRAINING_PATTERN_MASK) {
2667		case DP_TRAINING_PATTERN_DISABLE:
2668			*DP |= DP_LINK_TRAIN_OFF_CPT;
2669			break;
2670		case DP_TRAINING_PATTERN_1:
2671			*DP |= DP_LINK_TRAIN_PAT_1_CPT;
2672			break;
2673		case DP_TRAINING_PATTERN_2:
2674			*DP |= DP_LINK_TRAIN_PAT_2_CPT;
2675			break;
2676		case DP_TRAINING_PATTERN_3:
2677			DRM_DEBUG_KMS("TPS3 not supported, using TPS2 instead\n");
2678			*DP |= DP_LINK_TRAIN_PAT_2_CPT;
2679			break;
2680		}
2681
2682	} else {
2683		if (IS_CHERRYVIEW(dev_priv))
2684			*DP &= ~DP_LINK_TRAIN_MASK_CHV;
2685		else
2686			*DP &= ~DP_LINK_TRAIN_MASK;
2687
2688		switch (dp_train_pat & DP_TRAINING_PATTERN_MASK) {
2689		case DP_TRAINING_PATTERN_DISABLE:
2690			*DP |= DP_LINK_TRAIN_OFF;
2691			break;
2692		case DP_TRAINING_PATTERN_1:
2693			*DP |= DP_LINK_TRAIN_PAT_1;
2694			break;
2695		case DP_TRAINING_PATTERN_2:
2696			*DP |= DP_LINK_TRAIN_PAT_2;
2697			break;
2698		case DP_TRAINING_PATTERN_3:
2699			if (IS_CHERRYVIEW(dev_priv)) {
2700				*DP |= DP_LINK_TRAIN_PAT_3_CHV;
2701			} else {
2702				DRM_DEBUG_KMS("TPS3 not supported, using TPS2 instead\n");
2703				*DP |= DP_LINK_TRAIN_PAT_2;
2704			}
2705			break;
2706		}
2707	}
2708}
2709
2710static void intel_dp_enable_port(struct intel_dp *intel_dp,
2711				 struct intel_crtc_state *old_crtc_state)
2712{
2713	struct drm_device *dev = intel_dp_to_dev(intel_dp);
2714	struct drm_i915_private *dev_priv = to_i915(dev);
 
 
2715
2716	/* enable with pattern 1 (as per spec) */
 
 
2717
2718	intel_dp_program_link_training_pattern(intel_dp, DP_TRAINING_PATTERN_1);
 
2719
2720	/*
2721	 * Magic for VLV/CHV. We _must_ first set up the register
2722	 * without actually enabling the port, and then do another
2723	 * write to enable the port. Otherwise link training will
2724	 * fail when the power sequencer is freshly used for this port.
2725	 */
2726	intel_dp->DP |= DP_PORT_EN;
2727	if (old_crtc_state->has_audio)
2728		intel_dp->DP |= DP_AUDIO_OUTPUT_ENABLE;
2729
2730	I915_WRITE(intel_dp->output_reg, intel_dp->DP);
2731	POSTING_READ(intel_dp->output_reg);
2732}
2733
2734static void intel_enable_dp(struct intel_encoder *encoder,
2735			    struct intel_crtc_state *pipe_config,
2736			    struct drm_connector_state *conn_state)
2737{
2738	struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2739	struct drm_device *dev = encoder->base.dev;
2740	struct drm_i915_private *dev_priv = to_i915(dev);
2741	struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
2742	uint32_t dp_reg = I915_READ(intel_dp->output_reg);
 
2743	enum pipe pipe = crtc->pipe;
2744
2745	if (WARN_ON(dp_reg & DP_PORT_EN))
2746		return;
2747
2748	pps_lock(intel_dp);
2749
2750	if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
2751		vlv_init_panel_power_sequencer(intel_dp);
2752
2753	intel_dp_enable_port(intel_dp, pipe_config);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2754
2755	edp_panel_vdd_on(intel_dp);
2756	edp_panel_on(intel_dp);
2757	edp_panel_vdd_off(intel_dp, true);
2758
 
 
 
2759	pps_unlock(intel_dp);
2760
2761	if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
2762		unsigned int lane_mask = 0x0;
2763
2764		if (IS_CHERRYVIEW(dev_priv))
2765			lane_mask = intel_dp_unused_lane_mask(pipe_config->lane_count);
2766
2767		vlv_wait_port_ready(dev_priv, dp_to_dig_port(intel_dp),
2768				    lane_mask);
2769	}
2770
2771	intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_ON);
2772	intel_dp_start_link_train(intel_dp);
2773	intel_dp_stop_link_train(intel_dp);
2774
2775	if (pipe_config->has_audio) {
2776		DRM_DEBUG_DRIVER("Enabling DP audio on pipe %c\n",
2777				 pipe_name(pipe));
2778		intel_audio_codec_enable(encoder, pipe_config, conn_state);
2779	}
2780}
2781
2782static void g4x_enable_dp(struct intel_encoder *encoder,
2783			  struct intel_crtc_state *pipe_config,
2784			  struct drm_connector_state *conn_state)
2785{
2786	struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2787
2788	intel_enable_dp(encoder, pipe_config, conn_state);
2789	intel_edp_backlight_on(intel_dp);
2790}
2791
2792static void vlv_enable_dp(struct intel_encoder *encoder,
2793			  struct intel_crtc_state *pipe_config,
2794			  struct drm_connector_state *conn_state)
2795{
2796	struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2797
2798	intel_edp_backlight_on(intel_dp);
2799	intel_psr_enable(intel_dp);
2800}
2801
2802static void g4x_pre_enable_dp(struct intel_encoder *encoder,
2803			      struct intel_crtc_state *pipe_config,
2804			      struct drm_connector_state *conn_state)
2805{
 
2806	struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2807	enum port port = dp_to_dig_port(intel_dp)->port;
 
2808
2809	intel_dp_prepare(encoder, pipe_config);
 
 
 
 
 
 
 
 
 
 
 
 
 
2810
2811	/* Only ilk+ has port A */
2812	if (port == PORT_A)
2813		ironlake_edp_pll_on(intel_dp, pipe_config);
2814}
2815
2816static void vlv_detach_power_sequencer(struct intel_dp *intel_dp)
2817{
2818	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
2819	struct drm_i915_private *dev_priv = to_i915(intel_dig_port->base.base.dev);
2820	enum pipe pipe = intel_dp->pps_pipe;
2821	i915_reg_t pp_on_reg = PP_ON_DELAYS(pipe);
2822
2823	if (WARN_ON(pipe != PIPE_A && pipe != PIPE_B))
2824		return;
2825
2826	edp_panel_vdd_off_sync(intel_dp);
2827
2828	/*
2829	 * VLV seems to get confused when multiple power seqeuencers
2830	 * have the same port selected (even if only one has power/vdd
2831	 * enabled). The failure manifests as vlv_wait_port_ready() failing
2832	 * CHV on the other hand doesn't seem to mind having the same port
2833	 * selected in multiple power seqeuencers, but let's clear the
2834	 * port select always when logically disconnecting a power sequencer
2835	 * from a port.
2836	 */
2837	DRM_DEBUG_KMS("detaching pipe %c power sequencer from port %c\n",
2838		      pipe_name(pipe), port_name(intel_dig_port->port));
2839	I915_WRITE(pp_on_reg, 0);
2840	POSTING_READ(pp_on_reg);
2841
2842	intel_dp->pps_pipe = INVALID_PIPE;
2843}
2844
2845static void vlv_steal_power_sequencer(struct drm_device *dev,
2846				      enum pipe pipe)
2847{
2848	struct drm_i915_private *dev_priv = to_i915(dev);
2849	struct intel_encoder *encoder;
2850
2851	lockdep_assert_held(&dev_priv->pps_mutex);
2852
 
 
 
2853	for_each_intel_encoder(dev, encoder) {
2854		struct intel_dp *intel_dp;
2855		enum port port;
2856
2857		if (encoder->type != INTEL_OUTPUT_EDP)
2858			continue;
2859
2860		intel_dp = enc_to_intel_dp(&encoder->base);
2861		port = dp_to_dig_port(intel_dp)->port;
2862
2863		if (intel_dp->pps_pipe != pipe)
2864			continue;
2865
2866		DRM_DEBUG_KMS("stealing pipe %c power sequencer from port %c\n",
2867			      pipe_name(pipe), port_name(port));
2868
2869		WARN(encoder->base.crtc,
2870		     "stealing pipe %c power sequencer from active eDP port %c\n",
2871		     pipe_name(pipe), port_name(port));
2872
2873		/* make sure vdd is off before we steal it */
2874		vlv_detach_power_sequencer(intel_dp);
2875	}
2876}
2877
2878static void vlv_init_panel_power_sequencer(struct intel_dp *intel_dp)
2879{
2880	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
2881	struct intel_encoder *encoder = &intel_dig_port->base;
2882	struct drm_device *dev = encoder->base.dev;
2883	struct drm_i915_private *dev_priv = to_i915(dev);
2884	struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
2885
2886	lockdep_assert_held(&dev_priv->pps_mutex);
2887
2888	if (!is_edp(intel_dp))
2889		return;
2890
2891	if (intel_dp->pps_pipe == crtc->pipe)
2892		return;
2893
2894	/*
2895	 * If another power sequencer was being used on this
2896	 * port previously make sure to turn off vdd there while
2897	 * we still have control of it.
2898	 */
2899	if (intel_dp->pps_pipe != INVALID_PIPE)
2900		vlv_detach_power_sequencer(intel_dp);
2901
2902	/*
2903	 * We may be stealing the power
2904	 * sequencer from another port.
2905	 */
2906	vlv_steal_power_sequencer(dev, crtc->pipe);
2907
2908	/* now it's all ours */
2909	intel_dp->pps_pipe = crtc->pipe;
2910
2911	DRM_DEBUG_KMS("initializing pipe %c power sequencer for port %c\n",
2912		      pipe_name(intel_dp->pps_pipe), port_name(intel_dig_port->port));
2913
2914	/* init power sequencer on this pipe and port */
2915	intel_dp_init_panel_power_sequencer(dev, intel_dp);
2916	intel_dp_init_panel_power_sequencer_registers(dev, intel_dp, true);
2917}
2918
2919static void vlv_pre_enable_dp(struct intel_encoder *encoder,
2920			      struct intel_crtc_state *pipe_config,
2921			      struct drm_connector_state *conn_state)
2922{
2923	vlv_phy_pre_encoder_enable(encoder);
 
 
 
 
 
 
 
2924
2925	intel_enable_dp(encoder, pipe_config, conn_state);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2926}
2927
2928static void vlv_dp_pre_pll_enable(struct intel_encoder *encoder,
2929				  struct intel_crtc_state *pipe_config,
2930				  struct drm_connector_state *conn_state)
2931{
2932	intel_dp_prepare(encoder, pipe_config);
 
 
 
 
 
 
2933
2934	vlv_phy_pre_pll_enable(encoder);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2935}
2936
2937static void chv_pre_enable_dp(struct intel_encoder *encoder,
2938			      struct intel_crtc_state *pipe_config,
2939			      struct drm_connector_state *conn_state)
2940{
2941	chv_phy_pre_encoder_enable(encoder);
 
 
 
 
 
 
 
 
 
2942
2943	intel_enable_dp(encoder, pipe_config, conn_state);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2944
2945	/* Second common lane will stay alive on its own now */
2946	chv_phy_release_cl2_override(encoder);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2947}
2948
2949static void chv_dp_pre_pll_enable(struct intel_encoder *encoder,
2950				  struct intel_crtc_state *pipe_config,
2951				  struct drm_connector_state *conn_state)
2952{
2953	intel_dp_prepare(encoder, pipe_config);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2954
2955	chv_phy_pre_pll_enable(encoder);
 
 
 
 
 
 
 
 
 
 
 
2956}
2957
2958static void chv_dp_post_pll_disable(struct intel_encoder *encoder,
2959				    struct intel_crtc_state *pipe_config,
2960				    struct drm_connector_state *conn_state)
 
 
 
 
 
 
 
2961{
2962	chv_phy_post_pll_disable(encoder);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2963}
2964
2965/*
2966 * Fetch AUX CH registers 0x202 - 0x207 which contain
2967 * link status information
2968 */
2969bool
2970intel_dp_get_link_status(struct intel_dp *intel_dp, uint8_t link_status[DP_LINK_STATUS_SIZE])
2971{
2972	return drm_dp_dpcd_read(&intel_dp->aux, DP_LANE0_1_STATUS, link_status,
2973				DP_LINK_STATUS_SIZE) == DP_LINK_STATUS_SIZE;
 
 
2974}
2975
2976/* These are source-specific values. */
2977uint8_t
2978intel_dp_voltage_max(struct intel_dp *intel_dp)
2979{
2980	struct drm_i915_private *dev_priv = to_i915(intel_dp_to_dev(intel_dp));
 
2981	enum port port = dp_to_dig_port(intel_dp)->port;
2982
2983	if (IS_BROXTON(dev_priv))
2984		return DP_TRAIN_VOLTAGE_SWING_LEVEL_3;
2985	else if (INTEL_GEN(dev_priv) >= 9) {
2986		if (dev_priv->vbt.edp.low_vswing && port == PORT_A)
2987			return DP_TRAIN_VOLTAGE_SWING_LEVEL_3;
2988		return DP_TRAIN_VOLTAGE_SWING_LEVEL_2;
2989	} else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
2990		return DP_TRAIN_VOLTAGE_SWING_LEVEL_3;
2991	else if (IS_GEN7(dev_priv) && port == PORT_A)
2992		return DP_TRAIN_VOLTAGE_SWING_LEVEL_2;
2993	else if (HAS_PCH_CPT(dev_priv) && port != PORT_A)
2994		return DP_TRAIN_VOLTAGE_SWING_LEVEL_3;
2995	else
2996		return DP_TRAIN_VOLTAGE_SWING_LEVEL_2;
2997}
2998
2999uint8_t
3000intel_dp_pre_emphasis_max(struct intel_dp *intel_dp, uint8_t voltage_swing)
3001{
3002	struct drm_i915_private *dev_priv = to_i915(intel_dp_to_dev(intel_dp));
3003	enum port port = dp_to_dig_port(intel_dp)->port;
3004
3005	if (INTEL_GEN(dev_priv) >= 9) {
3006		switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
3007		case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3008			return DP_TRAIN_PRE_EMPH_LEVEL_3;
3009		case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3010			return DP_TRAIN_PRE_EMPH_LEVEL_2;
3011		case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3012			return DP_TRAIN_PRE_EMPH_LEVEL_1;
3013		case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
3014			return DP_TRAIN_PRE_EMPH_LEVEL_0;
3015		default:
3016			return DP_TRAIN_PRE_EMPH_LEVEL_0;
3017		}
3018	} else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
3019		switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
3020		case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3021			return DP_TRAIN_PRE_EMPH_LEVEL_3;
3022		case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3023			return DP_TRAIN_PRE_EMPH_LEVEL_2;
3024		case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3025			return DP_TRAIN_PRE_EMPH_LEVEL_1;
3026		case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
3027		default:
3028			return DP_TRAIN_PRE_EMPH_LEVEL_0;
3029		}
3030	} else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
3031		switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
3032		case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3033			return DP_TRAIN_PRE_EMPH_LEVEL_3;
3034		case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3035			return DP_TRAIN_PRE_EMPH_LEVEL_2;
3036		case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3037			return DP_TRAIN_PRE_EMPH_LEVEL_1;
3038		case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
3039		default:
3040			return DP_TRAIN_PRE_EMPH_LEVEL_0;
3041		}
3042	} else if (IS_GEN7(dev_priv) && port == PORT_A) {
3043		switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
3044		case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3045			return DP_TRAIN_PRE_EMPH_LEVEL_2;
3046		case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3047		case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3048			return DP_TRAIN_PRE_EMPH_LEVEL_1;
3049		default:
3050			return DP_TRAIN_PRE_EMPH_LEVEL_0;
3051		}
3052	} else {
3053		switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
3054		case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3055			return DP_TRAIN_PRE_EMPH_LEVEL_2;
3056		case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3057			return DP_TRAIN_PRE_EMPH_LEVEL_2;
3058		case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3059			return DP_TRAIN_PRE_EMPH_LEVEL_1;
3060		case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
3061		default:
3062			return DP_TRAIN_PRE_EMPH_LEVEL_0;
3063		}
3064	}
3065}
3066
3067static uint32_t vlv_signal_levels(struct intel_dp *intel_dp)
3068{
3069	struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base;
 
 
 
 
3070	unsigned long demph_reg_value, preemph_reg_value,
3071		uniqtranscale_reg_value;
3072	uint8_t train_set = intel_dp->train_set[0];
 
 
3073
3074	switch (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) {
3075	case DP_TRAIN_PRE_EMPH_LEVEL_0:
3076		preemph_reg_value = 0x0004000;
3077		switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
3078		case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3079			demph_reg_value = 0x2B405555;
3080			uniqtranscale_reg_value = 0x552AB83A;
3081			break;
3082		case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3083			demph_reg_value = 0x2B404040;
3084			uniqtranscale_reg_value = 0x5548B83A;
3085			break;
3086		case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3087			demph_reg_value = 0x2B245555;
3088			uniqtranscale_reg_value = 0x5560B83A;
3089			break;
3090		case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
3091			demph_reg_value = 0x2B405555;
3092			uniqtranscale_reg_value = 0x5598DA3A;
3093			break;
3094		default:
3095			return 0;
3096		}
3097		break;
3098	case DP_TRAIN_PRE_EMPH_LEVEL_1:
3099		preemph_reg_value = 0x0002000;
3100		switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
3101		case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3102			demph_reg_value = 0x2B404040;
3103			uniqtranscale_reg_value = 0x5552B83A;
3104			break;
3105		case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3106			demph_reg_value = 0x2B404848;
3107			uniqtranscale_reg_value = 0x5580B83A;
3108			break;
3109		case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3110			demph_reg_value = 0x2B404040;
3111			uniqtranscale_reg_value = 0x55ADDA3A;
3112			break;
3113		default:
3114			return 0;
3115		}
3116		break;
3117	case DP_TRAIN_PRE_EMPH_LEVEL_2:
3118		preemph_reg_value = 0x0000000;
3119		switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
3120		case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3121			demph_reg_value = 0x2B305555;
3122			uniqtranscale_reg_value = 0x5570B83A;
3123			break;
3124		case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3125			demph_reg_value = 0x2B2B4040;
3126			uniqtranscale_reg_value = 0x55ADDA3A;
3127			break;
3128		default:
3129			return 0;
3130		}
3131		break;
3132	case DP_TRAIN_PRE_EMPH_LEVEL_3:
3133		preemph_reg_value = 0x0006000;
3134		switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
3135		case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3136			demph_reg_value = 0x1B405555;
3137			uniqtranscale_reg_value = 0x55ADDA3A;
3138			break;
3139		default:
3140			return 0;
3141		}
3142		break;
3143	default:
3144		return 0;
3145	}
3146
3147	vlv_set_phy_signal_level(encoder, demph_reg_value, preemph_reg_value,
3148				 uniqtranscale_reg_value, 0);
 
 
 
 
 
 
 
 
3149
3150	return 0;
3151}
3152
 
 
 
 
 
 
3153static uint32_t chv_signal_levels(struct intel_dp *intel_dp)
3154{
3155	struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base;
3156	u32 deemph_reg_value, margin_reg_value;
3157	bool uniq_trans_scale = false;
 
 
3158	uint8_t train_set = intel_dp->train_set[0];
 
 
 
3159
3160	switch (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) {
3161	case DP_TRAIN_PRE_EMPH_LEVEL_0:
3162		switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
3163		case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3164			deemph_reg_value = 128;
3165			margin_reg_value = 52;
3166			break;
3167		case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3168			deemph_reg_value = 128;
3169			margin_reg_value = 77;
3170			break;
3171		case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3172			deemph_reg_value = 128;
3173			margin_reg_value = 102;
3174			break;
3175		case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
3176			deemph_reg_value = 128;
3177			margin_reg_value = 154;
3178			uniq_trans_scale = true;
3179			break;
3180		default:
3181			return 0;
3182		}
3183		break;
3184	case DP_TRAIN_PRE_EMPH_LEVEL_1:
3185		switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
3186		case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3187			deemph_reg_value = 85;
3188			margin_reg_value = 78;
3189			break;
3190		case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3191			deemph_reg_value = 85;
3192			margin_reg_value = 116;
3193			break;
3194		case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3195			deemph_reg_value = 85;
3196			margin_reg_value = 154;
3197			break;
3198		default:
3199			return 0;
3200		}
3201		break;
3202	case DP_TRAIN_PRE_EMPH_LEVEL_2:
3203		switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
3204		case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3205			deemph_reg_value = 64;
3206			margin_reg_value = 104;
3207			break;
3208		case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3209			deemph_reg_value = 64;
3210			margin_reg_value = 154;
3211			break;
3212		default:
3213			return 0;
3214		}
3215		break;
3216	case DP_TRAIN_PRE_EMPH_LEVEL_3:
3217		switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
3218		case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3219			deemph_reg_value = 43;
3220			margin_reg_value = 154;
3221			break;
3222		default:
3223			return 0;
3224		}
3225		break;
3226	default:
3227		return 0;
3228	}
3229
3230	chv_set_phy_signal_level(encoder, deemph_reg_value,
3231				 margin_reg_value, uniq_trans_scale);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3232
3233	return 0;
3234}
3235
3236static uint32_t
3237gen4_signal_levels(uint8_t train_set)
3238{
3239	uint32_t	signal_levels = 0;
3240
3241	switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
3242	case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3243	default:
3244		signal_levels |= DP_VOLTAGE_0_4;
3245		break;
3246	case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3247		signal_levels |= DP_VOLTAGE_0_6;
3248		break;
3249	case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3250		signal_levels |= DP_VOLTAGE_0_8;
3251		break;
3252	case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
3253		signal_levels |= DP_VOLTAGE_1_2;
3254		break;
3255	}
3256	switch (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) {
3257	case DP_TRAIN_PRE_EMPH_LEVEL_0:
3258	default:
3259		signal_levels |= DP_PRE_EMPHASIS_0;
3260		break;
3261	case DP_TRAIN_PRE_EMPH_LEVEL_1:
3262		signal_levels |= DP_PRE_EMPHASIS_3_5;
3263		break;
3264	case DP_TRAIN_PRE_EMPH_LEVEL_2:
3265		signal_levels |= DP_PRE_EMPHASIS_6;
3266		break;
3267	case DP_TRAIN_PRE_EMPH_LEVEL_3:
3268		signal_levels |= DP_PRE_EMPHASIS_9_5;
3269		break;
3270	}
3271	return signal_levels;
3272}
3273
3274/* Gen6's DP voltage swing and pre-emphasis control */
3275static uint32_t
3276gen6_edp_signal_levels(uint8_t train_set)
3277{
3278	int signal_levels = train_set & (DP_TRAIN_VOLTAGE_SWING_MASK |
3279					 DP_TRAIN_PRE_EMPHASIS_MASK);
3280	switch (signal_levels) {
3281	case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3282	case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3283		return EDP_LINK_TRAIN_400_600MV_0DB_SNB_B;
3284	case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_1:
3285		return EDP_LINK_TRAIN_400MV_3_5DB_SNB_B;
3286	case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_2:
3287	case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_2:
3288		return EDP_LINK_TRAIN_400_600MV_6DB_SNB_B;
3289	case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_1:
3290	case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_1:
3291		return EDP_LINK_TRAIN_600_800MV_3_5DB_SNB_B;
3292	case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3293	case DP_TRAIN_VOLTAGE_SWING_LEVEL_3 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3294		return EDP_LINK_TRAIN_800_1200MV_0DB_SNB_B;
3295	default:
3296		DRM_DEBUG_KMS("Unsupported voltage swing/pre-emphasis level:"
3297			      "0x%x\n", signal_levels);
3298		return EDP_LINK_TRAIN_400_600MV_0DB_SNB_B;
3299	}
3300}
3301
3302/* Gen7's DP voltage swing and pre-emphasis control */
3303static uint32_t
3304gen7_edp_signal_levels(uint8_t train_set)
3305{
3306	int signal_levels = train_set & (DP_TRAIN_VOLTAGE_SWING_MASK |
3307					 DP_TRAIN_PRE_EMPHASIS_MASK);
3308	switch (signal_levels) {
3309	case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3310		return EDP_LINK_TRAIN_400MV_0DB_IVB;
3311	case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_1:
3312		return EDP_LINK_TRAIN_400MV_3_5DB_IVB;
3313	case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_2:
3314		return EDP_LINK_TRAIN_400MV_6DB_IVB;
3315
3316	case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3317		return EDP_LINK_TRAIN_600MV_0DB_IVB;
3318	case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_1:
3319		return EDP_LINK_TRAIN_600MV_3_5DB_IVB;
3320
3321	case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3322		return EDP_LINK_TRAIN_800MV_0DB_IVB;
3323	case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_1:
3324		return EDP_LINK_TRAIN_800MV_3_5DB_IVB;
3325
3326	default:
3327		DRM_DEBUG_KMS("Unsupported voltage swing/pre-emphasis level:"
3328			      "0x%x\n", signal_levels);
3329		return EDP_LINK_TRAIN_500MV_0DB_IVB;
3330	}
3331}
3332
3333void
3334intel_dp_set_signal_levels(struct intel_dp *intel_dp)
3335{
3336	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
3337	enum port port = intel_dig_port->port;
3338	struct drm_device *dev = intel_dig_port->base.base.dev;
3339	struct drm_i915_private *dev_priv = to_i915(dev);
3340	uint32_t signal_levels, mask = 0;
3341	uint8_t train_set = intel_dp->train_set[0];
3342
3343	if (HAS_DDI(dev_priv)) {
3344		signal_levels = ddi_signal_levels(intel_dp);
3345
3346		if (IS_BROXTON(dev_priv))
3347			signal_levels = 0;
3348		else
3349			mask = DDI_BUF_EMP_MASK;
3350	} else if (IS_CHERRYVIEW(dev_priv)) {
3351		signal_levels = chv_signal_levels(intel_dp);
3352	} else if (IS_VALLEYVIEW(dev_priv)) {
3353		signal_levels = vlv_signal_levels(intel_dp);
3354	} else if (IS_GEN7(dev_priv) && port == PORT_A) {
3355		signal_levels = gen7_edp_signal_levels(train_set);
3356		mask = EDP_LINK_TRAIN_VOL_EMP_MASK_IVB;
3357	} else if (IS_GEN6(dev_priv) && port == PORT_A) {
3358		signal_levels = gen6_edp_signal_levels(train_set);
3359		mask = EDP_LINK_TRAIN_VOL_EMP_MASK_SNB;
3360	} else {
3361		signal_levels = gen4_signal_levels(train_set);
3362		mask = DP_VOLTAGE_MASK | DP_PRE_EMPHASIS_MASK;
3363	}
3364
3365	if (mask)
3366		DRM_DEBUG_KMS("Using signal levels %08x\n", signal_levels);
3367
3368	DRM_DEBUG_KMS("Using vswing level %d\n",
3369		train_set & DP_TRAIN_VOLTAGE_SWING_MASK);
3370	DRM_DEBUG_KMS("Using pre-emphasis level %d\n",
3371		(train_set & DP_TRAIN_PRE_EMPHASIS_MASK) >>
3372			DP_TRAIN_PRE_EMPHASIS_SHIFT);
3373
3374	intel_dp->DP = (intel_dp->DP & ~mask) | signal_levels;
3375
3376	I915_WRITE(intel_dp->output_reg, intel_dp->DP);
3377	POSTING_READ(intel_dp->output_reg);
3378}
3379
3380void
3381intel_dp_program_link_training_pattern(struct intel_dp *intel_dp,
3382				       uint8_t dp_train_pat)
3383{
3384	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
3385	struct drm_i915_private *dev_priv =
3386		to_i915(intel_dig_port->base.base.dev);
3387
3388	_intel_dp_set_link_train(intel_dp, &intel_dp->DP, dp_train_pat);
3389
3390	I915_WRITE(intel_dp->output_reg, intel_dp->DP);
3391	POSTING_READ(intel_dp->output_reg);
3392}
3393
3394void intel_dp_set_idle_link_train(struct intel_dp *intel_dp)
3395{
3396	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
3397	struct drm_device *dev = intel_dig_port->base.base.dev;
3398	struct drm_i915_private *dev_priv = to_i915(dev);
3399	enum port port = intel_dig_port->port;
3400	uint32_t val;
3401
3402	if (!HAS_DDI(dev_priv))
3403		return;
3404
3405	val = I915_READ(DP_TP_CTL(port));
3406	val &= ~DP_TP_CTL_LINK_TRAIN_MASK;
3407	val |= DP_TP_CTL_LINK_TRAIN_IDLE;
3408	I915_WRITE(DP_TP_CTL(port), val);
3409
3410	/*
3411	 * On PORT_A we can have only eDP in SST mode. There the only reason
3412	 * we need to set idle transmission mode is to work around a HW issue
3413	 * where we enable the pipe while not in idle link-training mode.
3414	 * In this case there is requirement to wait for a minimum number of
3415	 * idle patterns to be sent.
3416	 */
3417	if (port == PORT_A)
3418		return;
3419
3420	if (intel_wait_for_register(dev_priv,DP_TP_STATUS(port),
3421				    DP_TP_STATUS_IDLE_DONE,
3422				    DP_TP_STATUS_IDLE_DONE,
3423				    1))
3424		DRM_ERROR("Timed out waiting for DP idle patterns\n");
3425}
3426
3427static void
3428intel_dp_link_down(struct intel_dp *intel_dp)
3429{
3430	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
3431	struct intel_crtc *crtc = to_intel_crtc(intel_dig_port->base.base.crtc);
3432	enum port port = intel_dig_port->port;
3433	struct drm_device *dev = intel_dig_port->base.base.dev;
3434	struct drm_i915_private *dev_priv = to_i915(dev);
3435	uint32_t DP = intel_dp->DP;
3436
3437	if (WARN_ON(HAS_DDI(dev_priv)))
3438		return;
3439
3440	if (WARN_ON((I915_READ(intel_dp->output_reg) & DP_PORT_EN) == 0))
3441		return;
3442
3443	DRM_DEBUG_KMS("\n");
3444
3445	if ((IS_GEN7(dev_priv) && port == PORT_A) ||
3446	    (HAS_PCH_CPT(dev_priv) && port != PORT_A)) {
3447		DP &= ~DP_LINK_TRAIN_MASK_CPT;
3448		DP |= DP_LINK_TRAIN_PAT_IDLE_CPT;
3449	} else {
3450		if (IS_CHERRYVIEW(dev_priv))
3451			DP &= ~DP_LINK_TRAIN_MASK_CHV;
3452		else
3453			DP &= ~DP_LINK_TRAIN_MASK;
3454		DP |= DP_LINK_TRAIN_PAT_IDLE;
3455	}
3456	I915_WRITE(intel_dp->output_reg, DP);
3457	POSTING_READ(intel_dp->output_reg);
3458
3459	DP &= ~(DP_PORT_EN | DP_AUDIO_OUTPUT_ENABLE);
3460	I915_WRITE(intel_dp->output_reg, DP);
3461	POSTING_READ(intel_dp->output_reg);
3462
3463	/*
3464	 * HW workaround for IBX, we need to move the port
3465	 * to transcoder A after disabling it to allow the
3466	 * matching HDMI port to be enabled on transcoder A.
3467	 */
3468	if (HAS_PCH_IBX(dev_priv) && crtc->pipe == PIPE_B && port != PORT_A) {
3469		/*
3470		 * We get CPU/PCH FIFO underruns on the other pipe when
3471		 * doing the workaround. Sweep them under the rug.
3472		 */
3473		intel_set_cpu_fifo_underrun_reporting(dev_priv, PIPE_A, false);
3474		intel_set_pch_fifo_underrun_reporting(dev_priv, PIPE_A, false);
3475
3476		/* always enable with pattern 1 (as per spec) */
3477		DP &= ~(DP_PIPEB_SELECT | DP_LINK_TRAIN_MASK);
3478		DP |= DP_PORT_EN | DP_LINK_TRAIN_PAT_1;
3479		I915_WRITE(intel_dp->output_reg, DP);
3480		POSTING_READ(intel_dp->output_reg);
3481
3482		DP &= ~DP_PORT_EN;
3483		I915_WRITE(intel_dp->output_reg, DP);
3484		POSTING_READ(intel_dp->output_reg);
3485
3486		intel_wait_for_vblank_if_active(dev_priv, PIPE_A);
3487		intel_set_cpu_fifo_underrun_reporting(dev_priv, PIPE_A, true);
3488		intel_set_pch_fifo_underrun_reporting(dev_priv, PIPE_A, true);
3489	}
3490
3491	msleep(intel_dp->panel_power_down_delay);
3492
3493	intel_dp->DP = DP;
3494}
3495
3496bool
3497intel_dp_read_dpcd(struct intel_dp *intel_dp)
3498{
3499	if (drm_dp_dpcd_read(&intel_dp->aux, 0x000, intel_dp->dpcd,
3500			     sizeof(intel_dp->dpcd)) < 0)
 
 
 
 
 
3501		return false; /* aux transfer failed */
3502
3503	DRM_DEBUG_KMS("DPCD: %*ph\n", (int) sizeof(intel_dp->dpcd), intel_dp->dpcd);
3504
3505	return intel_dp->dpcd[DP_DPCD_REV] != 0;
3506}
3507
3508static bool
3509intel_edp_init_dpcd(struct intel_dp *intel_dp)
3510{
3511	struct drm_i915_private *dev_priv =
3512		to_i915(dp_to_dig_port(intel_dp)->base.base.dev);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3513
3514	/* this function is meant to be called only once */
3515	WARN_ON(intel_dp->dpcd[DP_DPCD_REV] != 0);
3516
3517	if (!intel_dp_read_dpcd(intel_dp))
3518		return false;
3519
3520	intel_dp_read_desc(intel_dp);
3521
3522	if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11)
3523		dev_priv->no_aux_handshake = intel_dp->dpcd[DP_MAX_DOWNSPREAD] &
3524			DP_NO_AUX_HANDSHAKE_LINK_TRAINING;
3525
3526	/* Check if the panel supports PSR */
3527	drm_dp_dpcd_read(&intel_dp->aux, DP_PSR_SUPPORT,
3528			 intel_dp->psr_dpcd,
3529			 sizeof(intel_dp->psr_dpcd));
3530	if (intel_dp->psr_dpcd[0] & DP_PSR_IS_SUPPORTED) {
3531		dev_priv->psr.sink_support = true;
3532		DRM_DEBUG_KMS("Detected EDP PSR Panel.\n");
3533	}
3534
3535	if (INTEL_GEN(dev_priv) >= 9 &&
3536	    (intel_dp->psr_dpcd[0] & DP_PSR2_IS_SUPPORTED)) {
3537		uint8_t frame_sync_cap;
3538
3539		dev_priv->psr.sink_support = true;
3540		drm_dp_dpcd_read(&intel_dp->aux,
3541				 DP_SINK_DEVICE_AUX_FRAME_SYNC_CAP,
3542				 &frame_sync_cap, 1);
3543		dev_priv->psr.aux_frame_sync = frame_sync_cap ? true : false;
3544		/* PSR2 needs frame sync as well */
3545		dev_priv->psr.psr2_support = dev_priv->psr.aux_frame_sync;
3546		DRM_DEBUG_KMS("PSR2 %s on sink",
3547			      dev_priv->psr.psr2_support ? "supported" : "not supported");
3548	}
3549
3550	/* Read the eDP Display control capabilities registers */
3551	if ((intel_dp->dpcd[DP_EDP_CONFIGURATION_CAP] & DP_DPCD_DISPLAY_CONTROL_CAPABLE) &&
3552	    drm_dp_dpcd_read(&intel_dp->aux, DP_EDP_DPCD_REV,
3553			     intel_dp->edp_dpcd, sizeof(intel_dp->edp_dpcd)) ==
3554			     sizeof(intel_dp->edp_dpcd))
3555		DRM_DEBUG_KMS("EDP DPCD : %*ph\n", (int) sizeof(intel_dp->edp_dpcd),
3556			      intel_dp->edp_dpcd);
3557
3558	/* Intermediate frequency support */
3559	if (intel_dp->edp_dpcd[0] >= 0x03) { /* eDp v1.4 or higher */
 
 
 
3560		__le16 sink_rates[DP_MAX_SUPPORTED_RATES];
3561		int i;
3562
3563		drm_dp_dpcd_read(&intel_dp->aux, DP_SUPPORTED_LINK_RATES,
3564				sink_rates, sizeof(sink_rates));
 
 
3565
3566		for (i = 0; i < ARRAY_SIZE(sink_rates); i++) {
3567			int val = le16_to_cpu(sink_rates[i]);
3568
3569			if (val == 0)
3570				break;
3571
3572			/* Value read is in kHz while drm clock is saved in deca-kHz */
3573			intel_dp->sink_rates[i] = (val * 200) / 10;
3574		}
3575		intel_dp->num_sink_rates = i;
3576	}
3577
3578	return true;
3579}
3580
3581
3582static bool
3583intel_dp_get_dpcd(struct intel_dp *intel_dp)
3584{
3585	if (!intel_dp_read_dpcd(intel_dp))
3586		return false;
3587
3588	if (drm_dp_dpcd_read(&intel_dp->aux, DP_SINK_COUNT,
3589			     &intel_dp->sink_count, 1) < 0)
3590		return false;
3591
3592	/*
3593	 * Sink count can change between short pulse hpd hence
3594	 * a member variable in intel_dp will track any changes
3595	 * between short pulse interrupts.
3596	 */
3597	intel_dp->sink_count = DP_GET_SINK_COUNT(intel_dp->sink_count);
3598
3599	/*
3600	 * SINK_COUNT == 0 and DOWNSTREAM_PORT_PRESENT == 1 implies that
3601	 * a dongle is present but no display. Unless we require to know
3602	 * if a dongle is present or not, we don't need to update
3603	 * downstream port information. So, an early return here saves
3604	 * time from performing other operations which are not required.
3605	 */
3606	if (!is_edp(intel_dp) && !intel_dp->sink_count)
3607		return false;
3608
3609	if (!drm_dp_is_branch(intel_dp->dpcd))
3610		return true; /* native DP sink */
3611
3612	if (intel_dp->dpcd[DP_DPCD_REV] == 0x10)
3613		return true; /* no per-port downstream info */
3614
3615	if (drm_dp_dpcd_read(&intel_dp->aux, DP_DOWNSTREAM_PORT_0,
3616			     intel_dp->downstream_ports,
3617			     DP_MAX_DOWNSTREAM_PORTS) < 0)
3618		return false; /* downstream port status fetch failed */
3619
3620	return true;
3621}
3622
3623static bool
3624intel_dp_can_mst(struct intel_dp *intel_dp)
3625{
3626	u8 buf[1];
3627
3628	if (!i915.enable_dp_mst)
3629		return false;
3630
3631	if (!intel_dp->can_mst)
3632		return false;
3633
3634	if (intel_dp->dpcd[DP_DPCD_REV] < 0x12)
3635		return false;
3636
3637	if (drm_dp_dpcd_read(&intel_dp->aux, DP_MSTM_CAP, buf, 1) != 1)
3638		return false;
3639
3640	return buf[0] & DP_MST_CAP;
3641}
3642
3643static void
3644intel_dp_configure_mst(struct intel_dp *intel_dp)
3645{
3646	if (!i915.enable_dp_mst)
3647		return;
3648
3649	if (!intel_dp->can_mst)
3650		return;
3651
3652	intel_dp->is_mst = intel_dp_can_mst(intel_dp);
 
3653
3654	if (intel_dp->is_mst)
3655		DRM_DEBUG_KMS("Sink is MST capable\n");
3656	else
3657		DRM_DEBUG_KMS("Sink is not MST capable\n");
 
 
 
 
 
3658
3659	drm_dp_mst_topology_mgr_set_mst(&intel_dp->mst_mgr,
3660					intel_dp->is_mst);
3661}
3662
3663static int intel_dp_sink_crc_stop(struct intel_dp *intel_dp)
3664{
3665	struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
3666	struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev);
3667	struct intel_crtc *intel_crtc = to_intel_crtc(dig_port->base.base.crtc);
3668	u8 buf;
3669	int ret = 0;
3670	int count = 0;
3671	int attempts = 10;
3672
3673	if (drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_SINK, &buf) < 0) {
3674		DRM_DEBUG_KMS("Sink CRC couldn't be stopped properly\n");
3675		ret = -EIO;
3676		goto out;
3677	}
3678
3679	if (drm_dp_dpcd_writeb(&intel_dp->aux, DP_TEST_SINK,
3680			       buf & ~DP_TEST_SINK_START) < 0) {
3681		DRM_DEBUG_KMS("Sink CRC couldn't be stopped properly\n");
3682		ret = -EIO;
3683		goto out;
3684	}
3685
3686	do {
3687		intel_wait_for_vblank(dev_priv, intel_crtc->pipe);
3688
3689		if (drm_dp_dpcd_readb(&intel_dp->aux,
3690				      DP_TEST_SINK_MISC, &buf) < 0) {
3691			ret = -EIO;
3692			goto out;
3693		}
3694		count = buf & DP_TEST_COUNT_MASK;
3695	} while (--attempts && count);
3696
3697	if (attempts == 0) {
3698		DRM_DEBUG_KMS("TIMEOUT: Sink CRC counter is not zeroed after calculation is stopped\n");
3699		ret = -ETIMEDOUT;
3700	}
3701
3702 out:
3703	hsw_enable_ips(intel_crtc);
3704	return ret;
3705}
3706
3707static int intel_dp_sink_crc_start(struct intel_dp *intel_dp)
3708{
3709	struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
3710	struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev);
3711	struct intel_crtc *intel_crtc = to_intel_crtc(dig_port->base.base.crtc);
3712	u8 buf;
3713	int ret;
3714
3715	if (drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_SINK_MISC, &buf) < 0)
3716		return -EIO;
3717
3718	if (!(buf & DP_TEST_CRC_SUPPORTED))
3719		return -ENOTTY;
3720
3721	if (drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_SINK, &buf) < 0)
3722		return -EIO;
3723
3724	if (buf & DP_TEST_SINK_START) {
3725		ret = intel_dp_sink_crc_stop(intel_dp);
3726		if (ret)
3727			return ret;
3728	}
3729
3730	hsw_disable_ips(intel_crtc);
3731
3732	if (drm_dp_dpcd_writeb(&intel_dp->aux, DP_TEST_SINK,
3733			       buf | DP_TEST_SINK_START) < 0) {
3734		hsw_enable_ips(intel_crtc);
3735		return -EIO;
3736	}
3737
3738	intel_wait_for_vblank(dev_priv, intel_crtc->pipe);
3739	return 0;
3740}
3741
3742int intel_dp_sink_crc(struct intel_dp *intel_dp, u8 *crc)
3743{
3744	struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
3745	struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev);
3746	struct intel_crtc *intel_crtc = to_intel_crtc(dig_port->base.base.crtc);
3747	u8 buf;
3748	int count, ret;
3749	int attempts = 6;
3750
3751	ret = intel_dp_sink_crc_start(intel_dp);
3752	if (ret)
3753		return ret;
3754
3755	do {
3756		intel_wait_for_vblank(dev_priv, intel_crtc->pipe);
3757
3758		if (drm_dp_dpcd_readb(&intel_dp->aux,
3759				      DP_TEST_SINK_MISC, &buf) < 0) {
3760			ret = -EIO;
3761			goto stop;
3762		}
3763		count = buf & DP_TEST_COUNT_MASK;
3764
3765	} while (--attempts && count == 0);
3766
3767	if (attempts == 0) {
3768		DRM_ERROR("Panel is unable to calculate any CRC after 6 vblanks\n");
3769		ret = -ETIMEDOUT;
3770		goto stop;
3771	}
3772
3773	if (drm_dp_dpcd_read(&intel_dp->aux, DP_TEST_CRC_R_CR, crc, 6) < 0) {
3774		ret = -EIO;
3775		goto stop;
3776	}
3777
3778stop:
3779	intel_dp_sink_crc_stop(intel_dp);
3780	return ret;
3781}
3782
3783static bool
3784intel_dp_get_sink_irq(struct intel_dp *intel_dp, u8 *sink_irq_vector)
3785{
3786	return drm_dp_dpcd_read(&intel_dp->aux,
3787				       DP_DEVICE_SERVICE_IRQ_VECTOR,
3788				       sink_irq_vector, 1) == 1;
3789}
3790
3791static bool
3792intel_dp_get_sink_irq_esi(struct intel_dp *intel_dp, u8 *sink_irq_vector)
3793{
3794	int ret;
3795
3796	ret = drm_dp_dpcd_read(&intel_dp->aux,
3797					     DP_SINK_COUNT_ESI,
3798					     sink_irq_vector, 14);
3799	if (ret != 14)
3800		return false;
3801
3802	return true;
3803}
3804
3805static uint8_t intel_dp_autotest_link_training(struct intel_dp *intel_dp)
3806{
3807	uint8_t test_result = DP_TEST_ACK;
3808	return test_result;
3809}
3810
3811static uint8_t intel_dp_autotest_video_pattern(struct intel_dp *intel_dp)
3812{
3813	uint8_t test_result = DP_TEST_NAK;
3814	return test_result;
3815}
3816
3817static uint8_t intel_dp_autotest_edid(struct intel_dp *intel_dp)
3818{
3819	uint8_t test_result = DP_TEST_NAK;
3820	struct intel_connector *intel_connector = intel_dp->attached_connector;
3821	struct drm_connector *connector = &intel_connector->base;
3822
3823	if (intel_connector->detect_edid == NULL ||
3824	    connector->edid_corrupt ||
3825	    intel_dp->aux.i2c_defer_count > 6) {
3826		/* Check EDID read for NACKs, DEFERs and corruption
3827		 * (DP CTS 1.2 Core r1.1)
3828		 *    4.2.2.4 : Failed EDID read, I2C_NAK
3829		 *    4.2.2.5 : Failed EDID read, I2C_DEFER
3830		 *    4.2.2.6 : EDID corruption detected
3831		 * Use failsafe mode for all cases
3832		 */
3833		if (intel_dp->aux.i2c_nack_count > 0 ||
3834			intel_dp->aux.i2c_defer_count > 0)
3835			DRM_DEBUG_KMS("EDID read had %d NACKs, %d DEFERs\n",
3836				      intel_dp->aux.i2c_nack_count,
3837				      intel_dp->aux.i2c_defer_count);
3838		intel_dp->compliance_test_data = INTEL_DP_RESOLUTION_FAILSAFE;
3839	} else {
3840		struct edid *block = intel_connector->detect_edid;
3841
3842		/* We have to write the checksum
3843		 * of the last block read
3844		 */
3845		block += intel_connector->detect_edid->extensions;
3846
3847		if (!drm_dp_dpcd_write(&intel_dp->aux,
3848					DP_TEST_EDID_CHECKSUM,
3849					&block->checksum,
3850					1))
3851			DRM_DEBUG_KMS("Failed to write EDID checksum\n");
3852
3853		test_result = DP_TEST_ACK | DP_TEST_EDID_CHECKSUM_WRITE;
3854		intel_dp->compliance_test_data = INTEL_DP_RESOLUTION_STANDARD;
3855	}
3856
3857	/* Set test active flag here so userspace doesn't interrupt things */
3858	intel_dp->compliance_test_active = 1;
3859
3860	return test_result;
3861}
3862
3863static uint8_t intel_dp_autotest_phy_pattern(struct intel_dp *intel_dp)
3864{
3865	uint8_t test_result = DP_TEST_NAK;
3866	return test_result;
3867}
3868
3869static void intel_dp_handle_test_request(struct intel_dp *intel_dp)
3870{
3871	uint8_t response = DP_TEST_NAK;
3872	uint8_t rxdata = 0;
3873	int status = 0;
3874
3875	status = drm_dp_dpcd_read(&intel_dp->aux, DP_TEST_REQUEST, &rxdata, 1);
3876	if (status <= 0) {
3877		DRM_DEBUG_KMS("Could not read test request from sink\n");
3878		goto update_status;
3879	}
3880
3881	switch (rxdata) {
3882	case DP_TEST_LINK_TRAINING:
3883		DRM_DEBUG_KMS("LINK_TRAINING test requested\n");
3884		intel_dp->compliance_test_type = DP_TEST_LINK_TRAINING;
3885		response = intel_dp_autotest_link_training(intel_dp);
3886		break;
3887	case DP_TEST_LINK_VIDEO_PATTERN:
3888		DRM_DEBUG_KMS("TEST_PATTERN test requested\n");
3889		intel_dp->compliance_test_type = DP_TEST_LINK_VIDEO_PATTERN;
3890		response = intel_dp_autotest_video_pattern(intel_dp);
3891		break;
3892	case DP_TEST_LINK_EDID_READ:
3893		DRM_DEBUG_KMS("EDID test requested\n");
3894		intel_dp->compliance_test_type = DP_TEST_LINK_EDID_READ;
3895		response = intel_dp_autotest_edid(intel_dp);
3896		break;
3897	case DP_TEST_LINK_PHY_TEST_PATTERN:
3898		DRM_DEBUG_KMS("PHY_PATTERN test requested\n");
3899		intel_dp->compliance_test_type = DP_TEST_LINK_PHY_TEST_PATTERN;
3900		response = intel_dp_autotest_phy_pattern(intel_dp);
3901		break;
3902	default:
3903		DRM_DEBUG_KMS("Invalid test request '%02x'\n", rxdata);
3904		break;
3905	}
3906
3907update_status:
3908	status = drm_dp_dpcd_write(&intel_dp->aux,
3909				   DP_TEST_RESPONSE,
3910				   &response, 1);
3911	if (status <= 0)
3912		DRM_DEBUG_KMS("Could not write test response to sink\n");
3913}
3914
3915static int
3916intel_dp_check_mst_status(struct intel_dp *intel_dp)
3917{
3918	bool bret;
3919
3920	if (intel_dp->is_mst) {
3921		u8 esi[16] = { 0 };
3922		int ret = 0;
3923		int retry;
3924		bool handled;
3925		bret = intel_dp_get_sink_irq_esi(intel_dp, esi);
3926go_again:
3927		if (bret == true) {
3928
3929			/* check link status - esi[10] = 0x200c */
3930			if (intel_dp->active_mst_links &&
3931			    !drm_dp_channel_eq_ok(&esi[10], intel_dp->lane_count)) {
3932				DRM_DEBUG_KMS("channel EQ not ok, retraining\n");
3933				intel_dp_start_link_train(intel_dp);
3934				intel_dp_stop_link_train(intel_dp);
3935			}
3936
3937			DRM_DEBUG_KMS("got esi %3ph\n", esi);
3938			ret = drm_dp_mst_hpd_irq(&intel_dp->mst_mgr, esi, &handled);
3939
3940			if (handled) {
3941				for (retry = 0; retry < 3; retry++) {
3942					int wret;
3943					wret = drm_dp_dpcd_write(&intel_dp->aux,
3944								 DP_SINK_COUNT_ESI+1,
3945								 &esi[1], 3);
3946					if (wret == 3) {
3947						break;
3948					}
3949				}
3950
3951				bret = intel_dp_get_sink_irq_esi(intel_dp, esi);
3952				if (bret == true) {
3953					DRM_DEBUG_KMS("got esi2 %3ph\n", esi);
3954					goto go_again;
3955				}
3956			} else
3957				ret = 0;
3958
3959			return ret;
3960		} else {
3961			struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
3962			DRM_DEBUG_KMS("failed to get ESI - device may have failed\n");
3963			intel_dp->is_mst = false;
3964			drm_dp_mst_topology_mgr_set_mst(&intel_dp->mst_mgr, intel_dp->is_mst);
3965			/* send a hotplug event */
3966			drm_kms_helper_hotplug_event(intel_dig_port->base.base.dev);
3967		}
3968	}
3969	return -EINVAL;
3970}
3971
3972static void
3973intel_dp_retrain_link(struct intel_dp *intel_dp)
3974{
3975	struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base;
3976	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
3977	struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
3978
3979	/* Suppress underruns caused by re-training */
3980	intel_set_cpu_fifo_underrun_reporting(dev_priv, crtc->pipe, false);
3981	if (crtc->config->has_pch_encoder)
3982		intel_set_pch_fifo_underrun_reporting(dev_priv,
3983						      intel_crtc_pch_transcoder(crtc), false);
3984
3985	intel_dp_start_link_train(intel_dp);
3986	intel_dp_stop_link_train(intel_dp);
3987
3988	/* Keep underrun reporting disabled until things are stable */
3989	intel_wait_for_vblank(dev_priv, crtc->pipe);
3990
3991	intel_set_cpu_fifo_underrun_reporting(dev_priv, crtc->pipe, true);
3992	if (crtc->config->has_pch_encoder)
3993		intel_set_pch_fifo_underrun_reporting(dev_priv,
3994						      intel_crtc_pch_transcoder(crtc), true);
3995}
3996
3997static void
3998intel_dp_check_link_status(struct intel_dp *intel_dp)
3999{
4000	struct intel_encoder *intel_encoder = &dp_to_dig_port(intel_dp)->base;
4001	struct drm_device *dev = intel_dp_to_dev(intel_dp);
4002	u8 link_status[DP_LINK_STATUS_SIZE];
4003
4004	WARN_ON(!drm_modeset_is_locked(&dev->mode_config.connection_mutex));
4005
4006	if (!intel_dp_get_link_status(intel_dp, link_status)) {
4007		DRM_ERROR("Failed to get link status\n");
4008		return;
4009	}
4010
4011	if (!intel_encoder->base.crtc)
4012		return;
4013
4014	if (!to_intel_crtc(intel_encoder->base.crtc)->active)
4015		return;
4016
4017	/* FIXME: we need to synchronize this sort of stuff with hardware
4018	 * readout. Currently fast link training doesn't work on boot-up. */
4019	if (!intel_dp->lane_count)
4020		return;
4021
4022	/* if link training is requested we should perform it always */
4023	if ((intel_dp->compliance_test_type == DP_TEST_LINK_TRAINING) ||
4024	    (!drm_dp_channel_eq_ok(link_status, intel_dp->lane_count))) {
4025		DRM_DEBUG_KMS("%s: channel EQ not ok, retraining\n",
4026			      intel_encoder->base.name);
4027
4028		intel_dp_retrain_link(intel_dp);
4029	}
4030}
4031
4032/*
4033 * According to DP spec
4034 * 5.1.2:
4035 *  1. Read DPCD
4036 *  2. Configure link according to Receiver Capabilities
4037 *  3. Use Link Training from 2.5.3.3 and 3.5.1.3
4038 *  4. Check link status on receipt of hot-plug interrupt
4039 *
4040 * intel_dp_short_pulse -  handles short pulse interrupts
4041 * when full detection is not required.
4042 * Returns %true if short pulse is handled and full detection
4043 * is NOT required and %false otherwise.
4044 */
4045static bool
4046intel_dp_short_pulse(struct intel_dp *intel_dp)
4047{
4048	struct drm_device *dev = intel_dp_to_dev(intel_dp);
4049	u8 sink_irq_vector = 0;
4050	u8 old_sink_count = intel_dp->sink_count;
4051	bool ret;
 
 
4052
4053	/*
4054	 * Clearing compliance test variables to allow capturing
4055	 * of values for next automated test request.
4056	 */
4057	intel_dp->compliance_test_active = 0;
4058	intel_dp->compliance_test_type = 0;
4059	intel_dp->compliance_test_data = 0;
4060
4061	/*
4062	 * Now read the DPCD to see if it's actually running
4063	 * If the current value of sink count doesn't match with
4064	 * the value that was stored earlier or dpcd read failed
4065	 * we need to do full detection
4066	 */
4067	ret = intel_dp_get_dpcd(intel_dp);
 
 
 
4068
4069	if ((old_sink_count != intel_dp->sink_count) || !ret) {
4070		/* No need to proceed if we are going to do full detect */
4071		return false;
4072	}
4073
4074	/* Try to read the source of the interrupt */
4075	if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11 &&
4076	    intel_dp_get_sink_irq(intel_dp, &sink_irq_vector) &&
4077	    sink_irq_vector != 0) {
4078		/* Clear interrupt source */
4079		drm_dp_dpcd_writeb(&intel_dp->aux,
4080				   DP_DEVICE_SERVICE_IRQ_VECTOR,
4081				   sink_irq_vector);
4082
4083		if (sink_irq_vector & DP_AUTOMATED_TEST_REQUEST)
4084			DRM_DEBUG_DRIVER("Test request in short pulse not handled\n");
4085		if (sink_irq_vector & (DP_CP_IRQ | DP_SINK_SPECIFIC_IRQ))
4086			DRM_DEBUG_DRIVER("CP or sink specific irq unhandled\n");
4087	}
4088
4089	drm_modeset_lock(&dev->mode_config.connection_mutex, NULL);
4090	intel_dp_check_link_status(intel_dp);
4091	drm_modeset_unlock(&dev->mode_config.connection_mutex);
4092
4093	return true;
 
 
 
4094}
4095
4096/* XXX this is probably wrong for multiple downstream ports */
4097static enum drm_connector_status
4098intel_dp_detect_dpcd(struct intel_dp *intel_dp)
4099{
4100	uint8_t *dpcd = intel_dp->dpcd;
4101	uint8_t type;
4102
4103	if (!intel_dp_get_dpcd(intel_dp))
4104		return connector_status_disconnected;
4105
4106	if (is_edp(intel_dp))
4107		return connector_status_connected;
4108
4109	/* if there's no downstream port, we're done */
4110	if (!drm_dp_is_branch(dpcd))
4111		return connector_status_connected;
4112
4113	/* If we're HPD-aware, SINK_COUNT changes dynamically */
4114	if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11 &&
4115	    intel_dp->downstream_ports[0] & DP_DS_PORT_HPD) {
 
4116
4117		return intel_dp->sink_count ?
4118		connector_status_connected : connector_status_disconnected;
 
 
 
 
4119	}
4120
4121	if (intel_dp_can_mst(intel_dp))
4122		return connector_status_connected;
4123
4124	/* If no HPD, poke DDC gently */
4125	if (drm_probe_ddc(&intel_dp->aux.ddc))
4126		return connector_status_connected;
4127
4128	/* Well we tried, say unknown for unreliable port types */
4129	if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11) {
4130		type = intel_dp->downstream_ports[0] & DP_DS_PORT_TYPE_MASK;
4131		if (type == DP_DS_PORT_TYPE_VGA ||
4132		    type == DP_DS_PORT_TYPE_NON_EDID)
4133			return connector_status_unknown;
4134	} else {
4135		type = intel_dp->dpcd[DP_DOWNSTREAMPORT_PRESENT] &
4136			DP_DWN_STRM_PORT_TYPE_MASK;
4137		if (type == DP_DWN_STRM_PORT_TYPE_ANALOG ||
4138		    type == DP_DWN_STRM_PORT_TYPE_OTHER)
4139			return connector_status_unknown;
4140	}
4141
4142	/* Anything else is out of spec, warn and ignore */
4143	DRM_DEBUG_KMS("Broken DP branch device, ignoring\n");
4144	return connector_status_disconnected;
4145}
4146
4147static enum drm_connector_status
4148edp_detect(struct intel_dp *intel_dp)
4149{
4150	struct drm_device *dev = intel_dp_to_dev(intel_dp);
4151	enum drm_connector_status status;
4152
4153	status = intel_panel_detect(dev);
4154	if (status == connector_status_unknown)
4155		status = connector_status_connected;
4156
4157	return status;
4158}
4159
4160static bool ibx_digital_port_connected(struct drm_i915_private *dev_priv,
4161				       struct intel_digital_port *port)
4162{
4163	u32 bit;
4164
4165	switch (port->port) {
4166	case PORT_A:
4167		return true;
4168	case PORT_B:
4169		bit = SDE_PORTB_HOTPLUG;
4170		break;
4171	case PORT_C:
4172		bit = SDE_PORTC_HOTPLUG;
4173		break;
4174	case PORT_D:
4175		bit = SDE_PORTD_HOTPLUG;
4176		break;
4177	default:
4178		MISSING_CASE(port->port);
4179		return false;
4180	}
4181
4182	return I915_READ(SDEISR) & bit;
4183}
4184
4185static bool cpt_digital_port_connected(struct drm_i915_private *dev_priv,
4186				       struct intel_digital_port *port)
4187{
4188	u32 bit;
4189
4190	switch (port->port) {
4191	case PORT_A:
4192		return true;
4193	case PORT_B:
4194		bit = SDE_PORTB_HOTPLUG_CPT;
4195		break;
4196	case PORT_C:
4197		bit = SDE_PORTC_HOTPLUG_CPT;
4198		break;
4199	case PORT_D:
4200		bit = SDE_PORTD_HOTPLUG_CPT;
4201		break;
4202	case PORT_E:
4203		bit = SDE_PORTE_HOTPLUG_SPT;
4204		break;
4205	default:
4206		MISSING_CASE(port->port);
4207		return false;
4208	}
4209
4210	return I915_READ(SDEISR) & bit;
4211}
4212
4213static bool g4x_digital_port_connected(struct drm_i915_private *dev_priv,
4214				       struct intel_digital_port *port)
4215{
4216	u32 bit;
4217
4218	switch (port->port) {
4219	case PORT_B:
4220		bit = PORTB_HOTPLUG_LIVE_STATUS_G4X;
4221		break;
4222	case PORT_C:
4223		bit = PORTC_HOTPLUG_LIVE_STATUS_G4X;
4224		break;
4225	case PORT_D:
4226		bit = PORTD_HOTPLUG_LIVE_STATUS_G4X;
4227		break;
4228	default:
4229		MISSING_CASE(port->port);
4230		return false;
4231	}
4232
4233	return I915_READ(PORT_HOTPLUG_STAT) & bit;
4234}
4235
4236static bool gm45_digital_port_connected(struct drm_i915_private *dev_priv,
4237					struct intel_digital_port *port)
4238{
4239	u32 bit;
4240
4241	switch (port->port) {
4242	case PORT_B:
4243		bit = PORTB_HOTPLUG_LIVE_STATUS_GM45;
4244		break;
4245	case PORT_C:
4246		bit = PORTC_HOTPLUG_LIVE_STATUS_GM45;
4247		break;
4248	case PORT_D:
4249		bit = PORTD_HOTPLUG_LIVE_STATUS_GM45;
4250		break;
4251	default:
4252		MISSING_CASE(port->port);
4253		return false;
4254	}
4255
4256	return I915_READ(PORT_HOTPLUG_STAT) & bit;
4257}
4258
4259static bool bxt_digital_port_connected(struct drm_i915_private *dev_priv,
4260				       struct intel_digital_port *intel_dig_port)
4261{
4262	struct intel_encoder *intel_encoder = &intel_dig_port->base;
4263	enum port port;
4264	u32 bit;
4265
4266	intel_hpd_pin_to_port(intel_encoder->hpd_pin, &port);
4267	switch (port) {
4268	case PORT_A:
4269		bit = BXT_DE_PORT_HP_DDIA;
4270		break;
4271	case PORT_B:
4272		bit = BXT_DE_PORT_HP_DDIB;
4273		break;
4274	case PORT_C:
4275		bit = BXT_DE_PORT_HP_DDIC;
4276		break;
4277	default:
4278		MISSING_CASE(port);
4279		return false;
4280	}
4281
4282	return I915_READ(GEN8_DE_PORT_ISR) & bit;
4283}
4284
4285/*
4286 * intel_digital_port_connected - is the specified port connected?
4287 * @dev_priv: i915 private structure
4288 * @port: the port to test
4289 *
4290 * Return %true if @port is connected, %false otherwise.
4291 */
4292bool intel_digital_port_connected(struct drm_i915_private *dev_priv,
4293				  struct intel_digital_port *port)
4294{
4295	if (HAS_PCH_IBX(dev_priv))
4296		return ibx_digital_port_connected(dev_priv, port);
4297	else if (HAS_PCH_SPLIT(dev_priv))
4298		return cpt_digital_port_connected(dev_priv, port);
4299	else if (IS_BROXTON(dev_priv))
4300		return bxt_digital_port_connected(dev_priv, port);
4301	else if (IS_GM45(dev_priv))
4302		return gm45_digital_port_connected(dev_priv, port);
4303	else
4304		return g4x_digital_port_connected(dev_priv, port);
4305}
4306
4307static struct edid *
4308intel_dp_get_edid(struct intel_dp *intel_dp)
4309{
4310	struct intel_connector *intel_connector = intel_dp->attached_connector;
4311
4312	/* use cached edid if we have one */
4313	if (intel_connector->edid) {
4314		/* invalid edid */
4315		if (IS_ERR(intel_connector->edid))
4316			return NULL;
4317
4318		return drm_edid_duplicate(intel_connector->edid);
4319	} else
4320		return drm_get_edid(&intel_connector->base,
4321				    &intel_dp->aux.ddc);
4322}
4323
4324static void
4325intel_dp_set_edid(struct intel_dp *intel_dp)
4326{
4327	struct intel_connector *intel_connector = intel_dp->attached_connector;
4328	struct edid *edid;
4329
4330	intel_dp_unset_edid(intel_dp);
4331	edid = intel_dp_get_edid(intel_dp);
4332	intel_connector->detect_edid = edid;
4333
4334	if (intel_dp->force_audio != HDMI_AUDIO_AUTO)
4335		intel_dp->has_audio = intel_dp->force_audio == HDMI_AUDIO_ON;
4336	else
4337		intel_dp->has_audio = drm_detect_monitor_audio(edid);
4338}
4339
4340static void
4341intel_dp_unset_edid(struct intel_dp *intel_dp)
4342{
4343	struct intel_connector *intel_connector = intel_dp->attached_connector;
4344
4345	kfree(intel_connector->detect_edid);
4346	intel_connector->detect_edid = NULL;
4347
4348	intel_dp->has_audio = false;
4349}
4350
4351static enum drm_connector_status
4352intel_dp_long_pulse(struct intel_connector *intel_connector)
4353{
4354	struct drm_connector *connector = &intel_connector->base;
4355	struct intel_dp *intel_dp = intel_attached_dp(connector);
4356	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
4357	struct intel_encoder *intel_encoder = &intel_dig_port->base;
4358	struct drm_device *dev = connector->dev;
4359	enum drm_connector_status status;
4360	enum intel_display_power_domain power_domain;
4361	u8 sink_irq_vector = 0;
 
 
 
 
 
 
 
 
 
 
 
 
4362
4363	power_domain = intel_display_port_aux_power_domain(intel_encoder);
4364	intel_display_power_get(to_i915(dev), power_domain);
4365
4366	/* Can't disconnect eDP, but you can close the lid... */
4367	if (is_edp(intel_dp))
4368		status = edp_detect(intel_dp);
4369	else if (intel_digital_port_connected(to_i915(dev),
4370					      dp_to_dig_port(intel_dp)))
4371		status = intel_dp_detect_dpcd(intel_dp);
4372	else
4373		status = connector_status_disconnected;
4374
4375	if (status == connector_status_disconnected) {
4376		intel_dp->compliance_test_active = 0;
4377		intel_dp->compliance_test_type = 0;
4378		intel_dp->compliance_test_data = 0;
4379
4380		if (intel_dp->is_mst) {
4381			DRM_DEBUG_KMS("MST device may have disappeared %d vs %d\n",
4382				      intel_dp->is_mst,
4383				      intel_dp->mst_mgr.mst_state);
4384			intel_dp->is_mst = false;
4385			drm_dp_mst_topology_mgr_set_mst(&intel_dp->mst_mgr,
4386							intel_dp->is_mst);
4387		}
4388
4389		goto out;
4390	}
4391
4392	if (intel_encoder->type != INTEL_OUTPUT_EDP)
4393		intel_encoder->type = INTEL_OUTPUT_DP;
4394
4395	DRM_DEBUG_KMS("Display Port TPS3 support: source %s, sink %s\n",
4396		      yesno(intel_dp_source_supports_hbr2(intel_dp)),
4397		      yesno(drm_dp_tps3_supported(intel_dp->dpcd)));
4398
4399	intel_dp_print_rates(intel_dp);
4400
4401	intel_dp_read_desc(intel_dp);
4402
4403	intel_dp_configure_mst(intel_dp);
4404
4405	if (intel_dp->is_mst) {
4406		/*
4407		 * If we are in MST mode then this connector
4408		 * won't appear connected or have anything
4409		 * with EDID on it
4410		 */
4411		status = connector_status_disconnected;
4412		goto out;
4413	} else if (connector->status == connector_status_connected) {
4414		/*
4415		 * If display was connected already and is still connected
4416		 * check links status, there has been known issues of
4417		 * link loss triggerring long pulse!!!!
4418		 */
4419		drm_modeset_lock(&dev->mode_config.connection_mutex, NULL);
4420		intel_dp_check_link_status(intel_dp);
4421		drm_modeset_unlock(&dev->mode_config.connection_mutex);
4422		goto out;
4423	}
4424
4425	/*
4426	 * Clearing NACK and defer counts to get their exact values
4427	 * while reading EDID which are required by Compliance tests
4428	 * 4.2.2.4 and 4.2.2.5
4429	 */
4430	intel_dp->aux.i2c_nack_count = 0;
4431	intel_dp->aux.i2c_defer_count = 0;
4432
4433	intel_dp_set_edid(intel_dp);
4434	if (is_edp(intel_dp) || intel_connector->detect_edid)
4435		status = connector_status_connected;
4436	intel_dp->detect_done = true;
 
4437
4438	/* Try to read the source of the interrupt */
4439	if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11 &&
4440	    intel_dp_get_sink_irq(intel_dp, &sink_irq_vector) &&
4441	    sink_irq_vector != 0) {
4442		/* Clear interrupt source */
4443		drm_dp_dpcd_writeb(&intel_dp->aux,
4444				   DP_DEVICE_SERVICE_IRQ_VECTOR,
4445				   sink_irq_vector);
4446
4447		if (sink_irq_vector & DP_AUTOMATED_TEST_REQUEST)
4448			intel_dp_handle_test_request(intel_dp);
4449		if (sink_irq_vector & (DP_CP_IRQ | DP_SINK_SPECIFIC_IRQ))
4450			DRM_DEBUG_DRIVER("CP or sink specific irq unhandled\n");
4451	}
4452
4453out:
4454	if (status != connector_status_connected && !intel_dp->is_mst)
4455		intel_dp_unset_edid(intel_dp);
4456
4457	intel_display_power_put(to_i915(dev), power_domain);
4458	return status;
4459}
4460
4461static enum drm_connector_status
4462intel_dp_detect(struct drm_connector *connector, bool force)
4463{
4464	struct intel_dp *intel_dp = intel_attached_dp(connector);
4465	enum drm_connector_status status = connector->status;
4466
4467	DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
4468		      connector->base.id, connector->name);
4469
4470	/* If full detect is not performed yet, do a full detect */
4471	if (!intel_dp->detect_done)
4472		status = intel_dp_long_pulse(intel_dp->attached_connector);
4473
4474	intel_dp->detect_done = false;
4475
4476	return status;
4477}
4478
4479static void
4480intel_dp_force(struct drm_connector *connector)
4481{
4482	struct intel_dp *intel_dp = intel_attached_dp(connector);
4483	struct intel_encoder *intel_encoder = &dp_to_dig_port(intel_dp)->base;
4484	struct drm_i915_private *dev_priv = to_i915(intel_encoder->base.dev);
4485	enum intel_display_power_domain power_domain;
4486
4487	DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
4488		      connector->base.id, connector->name);
4489	intel_dp_unset_edid(intel_dp);
4490
4491	if (connector->status != connector_status_connected)
4492		return;
4493
4494	power_domain = intel_display_port_aux_power_domain(intel_encoder);
4495	intel_display_power_get(dev_priv, power_domain);
4496
4497	intel_dp_set_edid(intel_dp);
4498
4499	intel_display_power_put(dev_priv, power_domain);
4500
4501	if (intel_encoder->type != INTEL_OUTPUT_EDP)
4502		intel_encoder->type = INTEL_OUTPUT_DP;
4503}
4504
4505static int intel_dp_get_modes(struct drm_connector *connector)
4506{
4507	struct intel_connector *intel_connector = to_intel_connector(connector);
4508	struct edid *edid;
4509
4510	edid = intel_connector->detect_edid;
4511	if (edid) {
4512		int ret = intel_connector_update_modes(connector, edid);
4513		if (ret)
4514			return ret;
4515	}
4516
4517	/* if eDP has no EDID, fall back to fixed mode */
4518	if (is_edp(intel_attached_dp(connector)) &&
4519	    intel_connector->panel.fixed_mode) {
4520		struct drm_display_mode *mode;
4521
4522		mode = drm_mode_duplicate(connector->dev,
4523					  intel_connector->panel.fixed_mode);
4524		if (mode) {
4525			drm_mode_probed_add(connector, mode);
4526			return 1;
4527		}
4528	}
4529
4530	return 0;
4531}
4532
4533static bool
4534intel_dp_detect_audio(struct drm_connector *connector)
4535{
4536	bool has_audio = false;
4537	struct edid *edid;
4538
4539	edid = to_intel_connector(connector)->detect_edid;
4540	if (edid)
4541		has_audio = drm_detect_monitor_audio(edid);
4542
4543	return has_audio;
4544}
4545
4546static int
4547intel_dp_set_property(struct drm_connector *connector,
4548		      struct drm_property *property,
4549		      uint64_t val)
4550{
4551	struct drm_i915_private *dev_priv = to_i915(connector->dev);
4552	struct intel_connector *intel_connector = to_intel_connector(connector);
4553	struct intel_encoder *intel_encoder = intel_attached_encoder(connector);
4554	struct intel_dp *intel_dp = enc_to_intel_dp(&intel_encoder->base);
4555	int ret;
4556
4557	ret = drm_object_property_set_value(&connector->base, property, val);
4558	if (ret)
4559		return ret;
4560
4561	if (property == dev_priv->force_audio_property) {
4562		int i = val;
4563		bool has_audio;
4564
4565		if (i == intel_dp->force_audio)
4566			return 0;
4567
4568		intel_dp->force_audio = i;
4569
4570		if (i == HDMI_AUDIO_AUTO)
4571			has_audio = intel_dp_detect_audio(connector);
4572		else
4573			has_audio = (i == HDMI_AUDIO_ON);
4574
4575		if (has_audio == intel_dp->has_audio)
4576			return 0;
4577
4578		intel_dp->has_audio = has_audio;
4579		goto done;
4580	}
4581
4582	if (property == dev_priv->broadcast_rgb_property) {
4583		bool old_auto = intel_dp->color_range_auto;
4584		bool old_range = intel_dp->limited_color_range;
4585
4586		switch (val) {
4587		case INTEL_BROADCAST_RGB_AUTO:
4588			intel_dp->color_range_auto = true;
4589			break;
4590		case INTEL_BROADCAST_RGB_FULL:
4591			intel_dp->color_range_auto = false;
4592			intel_dp->limited_color_range = false;
4593			break;
4594		case INTEL_BROADCAST_RGB_LIMITED:
4595			intel_dp->color_range_auto = false;
4596			intel_dp->limited_color_range = true;
4597			break;
4598		default:
4599			return -EINVAL;
4600		}
4601
4602		if (old_auto == intel_dp->color_range_auto &&
4603		    old_range == intel_dp->limited_color_range)
4604			return 0;
4605
4606		goto done;
4607	}
4608
4609	if (is_edp(intel_dp) &&
4610	    property == connector->dev->mode_config.scaling_mode_property) {
4611		if (val == DRM_MODE_SCALE_NONE) {
4612			DRM_DEBUG_KMS("no scaling not supported\n");
4613			return -EINVAL;
4614		}
4615		if (HAS_GMCH_DISPLAY(dev_priv) &&
4616		    val == DRM_MODE_SCALE_CENTER) {
4617			DRM_DEBUG_KMS("centering not supported\n");
4618			return -EINVAL;
4619		}
4620
4621		if (intel_connector->panel.fitting_mode == val) {
4622			/* the eDP scaling property is not changed */
4623			return 0;
4624		}
4625		intel_connector->panel.fitting_mode = val;
4626
4627		goto done;
4628	}
4629
4630	return -EINVAL;
4631
4632done:
4633	if (intel_encoder->base.crtc)
4634		intel_crtc_restore_mode(intel_encoder->base.crtc);
4635
4636	return 0;
4637}
4638
4639static int
4640intel_dp_connector_register(struct drm_connector *connector)
4641{
4642	struct intel_dp *intel_dp = intel_attached_dp(connector);
4643	int ret;
4644
4645	ret = intel_connector_register(connector);
4646	if (ret)
4647		return ret;
4648
4649	i915_debugfs_connector_add(connector);
4650
4651	DRM_DEBUG_KMS("registering %s bus for %s\n",
4652		      intel_dp->aux.name, connector->kdev->kobj.name);
4653
4654	intel_dp->aux.dev = connector->kdev;
4655	return drm_dp_aux_register(&intel_dp->aux);
4656}
4657
4658static void
4659intel_dp_connector_unregister(struct drm_connector *connector)
4660{
4661	drm_dp_aux_unregister(&intel_attached_dp(connector)->aux);
4662	intel_connector_unregister(connector);
4663}
4664
4665static void
4666intel_dp_connector_destroy(struct drm_connector *connector)
4667{
4668	struct intel_connector *intel_connector = to_intel_connector(connector);
4669
4670	kfree(intel_connector->detect_edid);
4671
4672	if (!IS_ERR_OR_NULL(intel_connector->edid))
4673		kfree(intel_connector->edid);
4674
4675	/* Can't call is_edp() since the encoder may have been destroyed
4676	 * already. */
4677	if (connector->connector_type == DRM_MODE_CONNECTOR_eDP)
4678		intel_panel_fini(&intel_connector->panel);
4679
4680	drm_connector_cleanup(connector);
4681	kfree(connector);
4682}
4683
4684void intel_dp_encoder_destroy(struct drm_encoder *encoder)
4685{
4686	struct intel_digital_port *intel_dig_port = enc_to_dig_port(encoder);
4687	struct intel_dp *intel_dp = &intel_dig_port->dp;
4688
4689	intel_dp_mst_encoder_cleanup(intel_dig_port);
4690	if (is_edp(intel_dp)) {
4691		cancel_delayed_work_sync(&intel_dp->panel_vdd_work);
4692		/*
4693		 * vdd might still be enabled do to the delayed vdd off.
4694		 * Make sure vdd is actually turned off here.
4695		 */
4696		pps_lock(intel_dp);
4697		edp_panel_vdd_off_sync(intel_dp);
4698		pps_unlock(intel_dp);
4699
4700		if (intel_dp->edp_notifier.notifier_call) {
4701			unregister_reboot_notifier(&intel_dp->edp_notifier);
4702			intel_dp->edp_notifier.notifier_call = NULL;
4703		}
4704	}
4705
4706	intel_dp_aux_fini(intel_dp);
4707
4708	drm_encoder_cleanup(encoder);
4709	kfree(intel_dig_port);
4710}
4711
4712void intel_dp_encoder_suspend(struct intel_encoder *intel_encoder)
4713{
4714	struct intel_dp *intel_dp = enc_to_intel_dp(&intel_encoder->base);
4715
4716	if (!is_edp(intel_dp))
4717		return;
4718
4719	/*
4720	 * vdd might still be enabled do to the delayed vdd off.
4721	 * Make sure vdd is actually turned off here.
4722	 */
4723	cancel_delayed_work_sync(&intel_dp->panel_vdd_work);
4724	pps_lock(intel_dp);
4725	edp_panel_vdd_off_sync(intel_dp);
4726	pps_unlock(intel_dp);
4727}
4728
4729static void intel_edp_panel_vdd_sanitize(struct intel_dp *intel_dp)
4730{
4731	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
4732	struct drm_device *dev = intel_dig_port->base.base.dev;
4733	struct drm_i915_private *dev_priv = to_i915(dev);
4734	enum intel_display_power_domain power_domain;
4735
4736	lockdep_assert_held(&dev_priv->pps_mutex);
4737
4738	if (!edp_have_panel_vdd(intel_dp))
4739		return;
4740
4741	/*
4742	 * The VDD bit needs a power domain reference, so if the bit is
4743	 * already enabled when we boot or resume, grab this reference and
4744	 * schedule a vdd off, so we don't hold on to the reference
4745	 * indefinitely.
4746	 */
4747	DRM_DEBUG_KMS("VDD left on by BIOS, adjusting state tracking\n");
4748	power_domain = intel_display_port_aux_power_domain(&intel_dig_port->base);
4749	intel_display_power_get(dev_priv, power_domain);
4750
4751	edp_panel_vdd_schedule_off(intel_dp);
4752}
4753
4754void intel_dp_encoder_reset(struct drm_encoder *encoder)
4755{
4756	struct drm_i915_private *dev_priv = to_i915(encoder->dev);
4757	struct intel_digital_port *intel_dig_port = enc_to_dig_port(encoder);
4758	struct intel_lspcon *lspcon = &intel_dig_port->lspcon;
4759	struct intel_dp *intel_dp = &intel_dig_port->dp;
4760
4761	if (!HAS_DDI(dev_priv))
4762		intel_dp->DP = I915_READ(intel_dp->output_reg);
4763
4764	if (IS_GEN9(dev_priv) && lspcon->active)
4765		lspcon_resume(lspcon);
4766
4767	if (to_intel_encoder(encoder)->type != INTEL_OUTPUT_EDP)
4768		return;
4769
 
 
4770	pps_lock(intel_dp);
4771
4772	/* Reinit the power sequencer, in case BIOS did something with it. */
4773	intel_dp_pps_init(encoder->dev, intel_dp);
 
 
 
 
 
4774	intel_edp_panel_vdd_sanitize(intel_dp);
4775
4776	pps_unlock(intel_dp);
4777}
4778
4779static const struct drm_connector_funcs intel_dp_connector_funcs = {
4780	.dpms = drm_atomic_helper_connector_dpms,
4781	.detect = intel_dp_detect,
4782	.force = intel_dp_force,
4783	.fill_modes = drm_helper_probe_single_connector_modes,
4784	.set_property = intel_dp_set_property,
4785	.atomic_get_property = intel_connector_atomic_get_property,
4786	.late_register = intel_dp_connector_register,
4787	.early_unregister = intel_dp_connector_unregister,
4788	.destroy = intel_dp_connector_destroy,
4789	.atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
4790	.atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
4791};
4792
4793static const struct drm_connector_helper_funcs intel_dp_connector_helper_funcs = {
4794	.get_modes = intel_dp_get_modes,
4795	.mode_valid = intel_dp_mode_valid,
 
4796};
4797
4798static const struct drm_encoder_funcs intel_dp_enc_funcs = {
4799	.reset = intel_dp_encoder_reset,
4800	.destroy = intel_dp_encoder_destroy,
4801};
4802
4803enum irqreturn
4804intel_dp_hpd_pulse(struct intel_digital_port *intel_dig_port, bool long_hpd)
4805{
4806	struct intel_dp *intel_dp = &intel_dig_port->dp;
4807	struct intel_encoder *intel_encoder = &intel_dig_port->base;
4808	struct drm_device *dev = intel_dig_port->base.base.dev;
4809	struct drm_i915_private *dev_priv = to_i915(dev);
4810	enum intel_display_power_domain power_domain;
4811	enum irqreturn ret = IRQ_NONE;
4812
4813	if (intel_dig_port->base.type != INTEL_OUTPUT_EDP &&
4814	    intel_dig_port->base.type != INTEL_OUTPUT_HDMI)
4815		intel_dig_port->base.type = INTEL_OUTPUT_DP;
4816
4817	if (long_hpd && intel_dig_port->base.type == INTEL_OUTPUT_EDP) {
4818		/*
4819		 * vdd off can generate a long pulse on eDP which
4820		 * would require vdd on to handle it, and thus we
4821		 * would end up in an endless cycle of
4822		 * "vdd off -> long hpd -> vdd on -> detect -> vdd off -> ..."
4823		 */
4824		DRM_DEBUG_KMS("ignoring long hpd on eDP port %c\n",
4825			      port_name(intel_dig_port->port));
4826		return IRQ_HANDLED;
4827	}
4828
4829	DRM_DEBUG_KMS("got hpd irq on port %c - %s\n",
4830		      port_name(intel_dig_port->port),
4831		      long_hpd ? "long" : "short");
4832
 
 
 
4833	if (long_hpd) {
4834		intel_dp->detect_done = false;
4835		return IRQ_NONE;
4836	}
 
 
 
 
 
 
4837
4838	power_domain = intel_display_port_aux_power_domain(intel_encoder);
4839	intel_display_power_get(dev_priv, power_domain);
4840
4841	if (intel_dp->is_mst) {
4842		if (intel_dp_check_mst_status(intel_dp) == -EINVAL) {
4843			/*
4844			 * If we were in MST mode, and device is not
4845			 * there, get out of MST mode
4846			 */
4847			DRM_DEBUG_KMS("MST device may have disappeared %d vs %d\n",
4848				      intel_dp->is_mst, intel_dp->mst_mgr.mst_state);
4849			intel_dp->is_mst = false;
4850			drm_dp_mst_topology_mgr_set_mst(&intel_dp->mst_mgr,
4851							intel_dp->is_mst);
4852			intel_dp->detect_done = false;
4853			goto put_power;
4854		}
4855	}
4856
4857	if (!intel_dp->is_mst) {
4858		if (!intel_dp_short_pulse(intel_dp)) {
4859			intel_dp->detect_done = false;
4860			goto put_power;
4861		}
4862	}
4863
4864	ret = IRQ_HANDLED;
4865
 
 
 
 
 
 
 
 
4866put_power:
4867	intel_display_power_put(dev_priv, power_domain);
4868
4869	return ret;
4870}
4871
4872/* check the VBT to see whether the eDP is on another port */
4873bool intel_dp_is_edp(struct drm_i915_private *dev_priv, enum port port)
4874{
 
 
 
 
 
 
 
 
 
 
4875	/*
4876	 * eDP not supported on g4x. so bail out early just
4877	 * for a bit extra safety in case the VBT is bonkers.
4878	 */
4879	if (INTEL_GEN(dev_priv) < 5)
4880		return false;
4881
4882	if (port == PORT_A)
4883		return true;
4884
4885	return intel_bios_is_port_edp(dev_priv, port);
 
 
 
 
 
 
 
 
 
 
 
4886}
4887
4888void
4889intel_dp_add_properties(struct intel_dp *intel_dp, struct drm_connector *connector)
4890{
4891	struct intel_connector *intel_connector = to_intel_connector(connector);
4892
4893	intel_attach_force_audio_property(connector);
4894	intel_attach_broadcast_rgb_property(connector);
4895	intel_dp->color_range_auto = true;
4896
4897	if (is_edp(intel_dp)) {
4898		drm_mode_create_scaling_mode_property(connector->dev);
4899		drm_object_attach_property(
4900			&connector->base,
4901			connector->dev->mode_config.scaling_mode_property,
4902			DRM_MODE_SCALE_ASPECT);
4903		intel_connector->panel.fitting_mode = DRM_MODE_SCALE_ASPECT;
4904	}
4905}
4906
4907static void intel_dp_init_panel_power_timestamps(struct intel_dp *intel_dp)
4908{
4909	intel_dp->panel_power_off_time = ktime_get_boottime();
4910	intel_dp->last_power_on = jiffies;
4911	intel_dp->last_backlight_off = jiffies;
4912}
4913
4914static void
4915intel_pps_readout_hw_state(struct drm_i915_private *dev_priv,
4916			   struct intel_dp *intel_dp, struct edp_power_seq *seq)
4917{
 
 
 
4918	u32 pp_on, pp_off, pp_div = 0, pp_ctl = 0;
4919	struct pps_registers regs;
 
 
4920
4921	intel_pps_get_registers(dev_priv, intel_dp, &regs);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4922
4923	/* Workaround: Need to write PP_CONTROL with the unlock key as
4924	 * the very first thing. */
4925	pp_ctl = ironlake_get_pp_control(intel_dp);
4926
4927	pp_on = I915_READ(regs.pp_on);
4928	pp_off = I915_READ(regs.pp_off);
4929	if (!IS_BROXTON(dev_priv)) {
4930		I915_WRITE(regs.pp_ctrl, pp_ctl);
4931		pp_div = I915_READ(regs.pp_div);
4932	}
4933
4934	/* Pull timing values out of registers */
4935	seq->t1_t3 = (pp_on & PANEL_POWER_UP_DELAY_MASK) >>
4936		     PANEL_POWER_UP_DELAY_SHIFT;
4937
4938	seq->t8 = (pp_on & PANEL_LIGHT_ON_DELAY_MASK) >>
4939		  PANEL_LIGHT_ON_DELAY_SHIFT;
4940
4941	seq->t9 = (pp_off & PANEL_LIGHT_OFF_DELAY_MASK) >>
4942		  PANEL_LIGHT_OFF_DELAY_SHIFT;
4943
4944	seq->t10 = (pp_off & PANEL_POWER_DOWN_DELAY_MASK) >>
4945		   PANEL_POWER_DOWN_DELAY_SHIFT;
4946
4947	if (IS_BROXTON(dev_priv)) {
4948		u16 tmp = (pp_ctl & BXT_POWER_CYCLE_DELAY_MASK) >>
4949			BXT_POWER_CYCLE_DELAY_SHIFT;
4950		if (tmp > 0)
4951			seq->t11_t12 = (tmp - 1) * 1000;
4952		else
4953			seq->t11_t12 = 0;
4954	} else {
4955		seq->t11_t12 = ((pp_div & PANEL_POWER_CYCLE_DELAY_MASK) >>
4956		       PANEL_POWER_CYCLE_DELAY_SHIFT) * 1000;
4957	}
4958}
4959
4960static void
4961intel_pps_dump_state(const char *state_name, const struct edp_power_seq *seq)
4962{
4963	DRM_DEBUG_KMS("%s t1_t3 %d t8 %d t9 %d t10 %d t11_t12 %d\n",
4964		      state_name,
4965		      seq->t1_t3, seq->t8, seq->t9, seq->t10, seq->t11_t12);
4966}
4967
4968static void
4969intel_pps_verify_state(struct drm_i915_private *dev_priv,
4970		       struct intel_dp *intel_dp)
4971{
4972	struct edp_power_seq hw;
4973	struct edp_power_seq *sw = &intel_dp->pps_delays;
4974
4975	intel_pps_readout_hw_state(dev_priv, intel_dp, &hw);
4976
4977	if (hw.t1_t3 != sw->t1_t3 || hw.t8 != sw->t8 || hw.t9 != sw->t9 ||
4978	    hw.t10 != sw->t10 || hw.t11_t12 != sw->t11_t12) {
4979		DRM_ERROR("PPS state mismatch\n");
4980		intel_pps_dump_state("sw", sw);
4981		intel_pps_dump_state("hw", &hw);
4982	}
4983}
4984
4985static void
4986intel_dp_init_panel_power_sequencer(struct drm_device *dev,
4987				    struct intel_dp *intel_dp)
4988{
4989	struct drm_i915_private *dev_priv = to_i915(dev);
4990	struct edp_power_seq cur, vbt, spec,
4991		*final = &intel_dp->pps_delays;
4992
4993	lockdep_assert_held(&dev_priv->pps_mutex);
4994
4995	/* already initialized? */
4996	if (final->t11_t12 != 0)
4997		return;
4998
4999	intel_pps_readout_hw_state(dev_priv, intel_dp, &cur);
 
5000
5001	intel_pps_dump_state("cur", &cur);
5002
5003	vbt = dev_priv->vbt.edp.pps;
5004
5005	/* Upper limits from eDP 1.3 spec. Note that we use the clunky units of
5006	 * our hw here, which are all in 100usec. */
5007	spec.t1_t3 = 210 * 10;
5008	spec.t8 = 50 * 10; /* no limit for t8, use t7 instead */
5009	spec.t9 = 50 * 10; /* no limit for t9, make it symmetric with t8 */
5010	spec.t10 = 500 * 10;
5011	/* This one is special and actually in units of 100ms, but zero
5012	 * based in the hw (so we need to add 100 ms). But the sw vbt
5013	 * table multiplies it with 1000 to make it in units of 100usec,
5014	 * too. */
5015	spec.t11_t12 = (510 + 100) * 10;
5016
5017	intel_pps_dump_state("vbt", &vbt);
 
5018
5019	/* Use the max of the register settings and vbt. If both are
5020	 * unset, fall back to the spec limits. */
5021#define assign_final(field)	final->field = (max(cur.field, vbt.field) == 0 ? \
5022				       spec.field : \
5023				       max(cur.field, vbt.field))
5024	assign_final(t1_t3);
5025	assign_final(t8);
5026	assign_final(t9);
5027	assign_final(t10);
5028	assign_final(t11_t12);
5029#undef assign_final
5030
5031#define get_delay(field)	(DIV_ROUND_UP(final->field, 10))
5032	intel_dp->panel_power_up_delay = get_delay(t1_t3);
5033	intel_dp->backlight_on_delay = get_delay(t8);
5034	intel_dp->backlight_off_delay = get_delay(t9);
5035	intel_dp->panel_power_down_delay = get_delay(t10);
5036	intel_dp->panel_power_cycle_delay = get_delay(t11_t12);
5037#undef get_delay
5038
5039	DRM_DEBUG_KMS("panel power up delay %d, power down delay %d, power cycle delay %d\n",
5040		      intel_dp->panel_power_up_delay, intel_dp->panel_power_down_delay,
5041		      intel_dp->panel_power_cycle_delay);
5042
5043	DRM_DEBUG_KMS("backlight on delay %d, off delay %d\n",
5044		      intel_dp->backlight_on_delay, intel_dp->backlight_off_delay);
5045
5046	/*
5047	 * We override the HW backlight delays to 1 because we do manual waits
5048	 * on them. For T8, even BSpec recommends doing it. For T9, if we
5049	 * don't do this, we'll end up waiting for the backlight off delay
5050	 * twice: once when we do the manual sleep, and once when we disable
5051	 * the panel and wait for the PP_STATUS bit to become zero.
5052	 */
5053	final->t8 = 1;
5054	final->t9 = 1;
5055}
5056
5057static void
5058intel_dp_init_panel_power_sequencer_registers(struct drm_device *dev,
5059					      struct intel_dp *intel_dp,
5060					      bool force_disable_vdd)
5061{
5062	struct drm_i915_private *dev_priv = to_i915(dev);
5063	u32 pp_on, pp_off, pp_div, port_sel = 0;
5064	int div = dev_priv->rawclk_freq / 1000;
5065	struct pps_registers regs;
5066	enum port port = dp_to_dig_port(intel_dp)->port;
5067	const struct edp_power_seq *seq = &intel_dp->pps_delays;
5068
5069	lockdep_assert_held(&dev_priv->pps_mutex);
5070
5071	intel_pps_get_registers(dev_priv, intel_dp, &regs);
5072
5073	/*
5074	 * On some VLV machines the BIOS can leave the VDD
5075	 * enabled even on power seqeuencers which aren't
5076	 * hooked up to any port. This would mess up the
5077	 * power domain tracking the first time we pick
5078	 * one of these power sequencers for use since
5079	 * edp_panel_vdd_on() would notice that the VDD was
5080	 * already on and therefore wouldn't grab the power
5081	 * domain reference. Disable VDD first to avoid this.
5082	 * This also avoids spuriously turning the VDD on as
5083	 * soon as the new power seqeuencer gets initialized.
5084	 */
5085	if (force_disable_vdd) {
5086		u32 pp = ironlake_get_pp_control(intel_dp);
5087
5088		WARN(pp & PANEL_POWER_ON, "Panel power already on\n");
5089
5090		if (pp & EDP_FORCE_VDD)
5091			DRM_DEBUG_KMS("VDD already on, disabling first\n");
5092
5093		pp &= ~EDP_FORCE_VDD;
5094
5095		I915_WRITE(regs.pp_ctrl, pp);
 
 
5096	}
5097
 
 
 
 
 
 
 
 
5098	pp_on = (seq->t1_t3 << PANEL_POWER_UP_DELAY_SHIFT) |
5099		(seq->t8 << PANEL_LIGHT_ON_DELAY_SHIFT);
5100	pp_off = (seq->t9 << PANEL_LIGHT_OFF_DELAY_SHIFT) |
5101		 (seq->t10 << PANEL_POWER_DOWN_DELAY_SHIFT);
5102	/* Compute the divisor for the pp clock, simply match the Bspec
5103	 * formula. */
5104	if (IS_BROXTON(dev_priv)) {
5105		pp_div = I915_READ(regs.pp_ctrl);
5106		pp_div &= ~BXT_POWER_CYCLE_DELAY_MASK;
5107		pp_div |= (DIV_ROUND_UP((seq->t11_t12 + 1), 1000)
5108				<< BXT_POWER_CYCLE_DELAY_SHIFT);
5109	} else {
5110		pp_div = ((100 * div)/2 - 1) << PP_REFERENCE_DIVIDER_SHIFT;
5111		pp_div |= (DIV_ROUND_UP(seq->t11_t12, 1000)
5112				<< PANEL_POWER_CYCLE_DELAY_SHIFT);
5113	}
5114
5115	/* Haswell doesn't have any port selection bits for the panel
5116	 * power sequencer any more. */
5117	if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
5118		port_sel = PANEL_PORT_SELECT_VLV(port);
5119	} else if (HAS_PCH_IBX(dev_priv) || HAS_PCH_CPT(dev_priv)) {
5120		if (port == PORT_A)
5121			port_sel = PANEL_PORT_SELECT_DPA;
5122		else
5123			port_sel = PANEL_PORT_SELECT_DPD;
5124	}
5125
5126	pp_on |= port_sel;
5127
5128	I915_WRITE(regs.pp_on, pp_on);
5129	I915_WRITE(regs.pp_off, pp_off);
5130	if (IS_BROXTON(dev_priv))
5131		I915_WRITE(regs.pp_ctrl, pp_div);
5132	else
5133		I915_WRITE(regs.pp_div, pp_div);
5134
5135	DRM_DEBUG_KMS("panel power sequencer register settings: PP_ON %#x, PP_OFF %#x, PP_DIV %#x\n",
5136		      I915_READ(regs.pp_on),
5137		      I915_READ(regs.pp_off),
5138		      IS_BROXTON(dev_priv) ?
5139		      (I915_READ(regs.pp_ctrl) & BXT_POWER_CYCLE_DELAY_MASK) :
5140		      I915_READ(regs.pp_div));
5141}
5142
5143static void intel_dp_pps_init(struct drm_device *dev,
5144			      struct intel_dp *intel_dp)
5145{
5146	struct drm_i915_private *dev_priv = to_i915(dev);
5147
5148	if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
5149		vlv_initial_power_sequencer_setup(intel_dp);
5150	} else {
5151		intel_dp_init_panel_power_sequencer(dev, intel_dp);
5152		intel_dp_init_panel_power_sequencer_registers(dev, intel_dp, false);
5153	}
5154}
5155
5156/**
5157 * intel_dp_set_drrs_state - program registers for RR switch to take effect
5158 * @dev_priv: i915 device
5159 * @crtc_state: a pointer to the active intel_crtc_state
5160 * @refresh_rate: RR to be programmed
5161 *
5162 * This function gets called when refresh rate (RR) has to be changed from
5163 * one frequency to another. Switches can be between high and low RR
5164 * supported by the panel or to any other RR based on media playback (in
5165 * this case, RR value needs to be passed from user space).
5166 *
5167 * The caller of this function needs to take a lock on dev_priv->drrs.
5168 */
5169static void intel_dp_set_drrs_state(struct drm_i915_private *dev_priv,
5170				    struct intel_crtc_state *crtc_state,
5171				    int refresh_rate)
5172{
 
5173	struct intel_encoder *encoder;
5174	struct intel_digital_port *dig_port = NULL;
5175	struct intel_dp *intel_dp = dev_priv->drrs.dp;
5176	struct intel_crtc *intel_crtc = to_intel_crtc(crtc_state->base.crtc);
 
5177	enum drrs_refresh_rate_type index = DRRS_HIGH_RR;
5178
5179	if (refresh_rate <= 0) {
5180		DRM_DEBUG_KMS("Refresh rate should be positive non-zero.\n");
5181		return;
5182	}
5183
5184	if (intel_dp == NULL) {
5185		DRM_DEBUG_KMS("DRRS not supported.\n");
5186		return;
5187	}
5188
5189	/*
5190	 * FIXME: This needs proper synchronization with psr state for some
5191	 * platforms that cannot have PSR and DRRS enabled at the same time.
5192	 */
5193
5194	dig_port = dp_to_dig_port(intel_dp);
5195	encoder = &dig_port->base;
5196	intel_crtc = to_intel_crtc(encoder->base.crtc);
5197
5198	if (!intel_crtc) {
5199		DRM_DEBUG_KMS("DRRS: intel_crtc not initialized\n");
5200		return;
5201	}
5202
 
 
5203	if (dev_priv->drrs.type < SEAMLESS_DRRS_SUPPORT) {
5204		DRM_DEBUG_KMS("Only Seamless DRRS supported.\n");
5205		return;
5206	}
5207
5208	if (intel_dp->attached_connector->panel.downclock_mode->vrefresh ==
5209			refresh_rate)
5210		index = DRRS_LOW_RR;
5211
5212	if (index == dev_priv->drrs.refresh_rate_type) {
5213		DRM_DEBUG_KMS(
5214			"DRRS requested for previously set RR...ignoring\n");
5215		return;
5216	}
5217
5218	if (!crtc_state->base.active) {
5219		DRM_DEBUG_KMS("eDP encoder disabled. CRTC not Active\n");
5220		return;
5221	}
5222
5223	if (INTEL_GEN(dev_priv) >= 8 && !IS_CHERRYVIEW(dev_priv)) {
5224		switch (index) {
5225		case DRRS_HIGH_RR:
5226			intel_dp_set_m_n(intel_crtc, M1_N1);
5227			break;
5228		case DRRS_LOW_RR:
5229			intel_dp_set_m_n(intel_crtc, M2_N2);
5230			break;
5231		case DRRS_MAX_RR:
5232		default:
5233			DRM_ERROR("Unsupported refreshrate type\n");
5234		}
5235	} else if (INTEL_GEN(dev_priv) > 6) {
5236		i915_reg_t reg = PIPECONF(crtc_state->cpu_transcoder);
5237		u32 val;
5238
5239		val = I915_READ(reg);
5240		if (index > DRRS_HIGH_RR) {
5241			if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
5242				val |= PIPECONF_EDP_RR_MODE_SWITCH_VLV;
5243			else
5244				val |= PIPECONF_EDP_RR_MODE_SWITCH;
5245		} else {
5246			if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
5247				val &= ~PIPECONF_EDP_RR_MODE_SWITCH_VLV;
5248			else
5249				val &= ~PIPECONF_EDP_RR_MODE_SWITCH;
5250		}
5251		I915_WRITE(reg, val);
5252	}
5253
5254	dev_priv->drrs.refresh_rate_type = index;
5255
5256	DRM_DEBUG_KMS("eDP Refresh Rate set to : %dHz\n", refresh_rate);
5257}
5258
5259/**
5260 * intel_edp_drrs_enable - init drrs struct if supported
5261 * @intel_dp: DP struct
5262 * @crtc_state: A pointer to the active crtc state.
5263 *
5264 * Initializes frontbuffer_bits and drrs.dp
5265 */
5266void intel_edp_drrs_enable(struct intel_dp *intel_dp,
5267			   struct intel_crtc_state *crtc_state)
5268{
5269	struct drm_device *dev = intel_dp_to_dev(intel_dp);
5270	struct drm_i915_private *dev_priv = to_i915(dev);
 
 
 
5271
5272	if (!crtc_state->has_drrs) {
5273		DRM_DEBUG_KMS("Panel doesn't support DRRS\n");
5274		return;
5275	}
5276
5277	mutex_lock(&dev_priv->drrs.mutex);
5278	if (WARN_ON(dev_priv->drrs.dp)) {
5279		DRM_ERROR("DRRS already enabled\n");
5280		goto unlock;
5281	}
5282
5283	dev_priv->drrs.busy_frontbuffer_bits = 0;
5284
5285	dev_priv->drrs.dp = intel_dp;
5286
5287unlock:
5288	mutex_unlock(&dev_priv->drrs.mutex);
5289}
5290
5291/**
5292 * intel_edp_drrs_disable - Disable DRRS
5293 * @intel_dp: DP struct
5294 * @old_crtc_state: Pointer to old crtc_state.
5295 *
5296 */
5297void intel_edp_drrs_disable(struct intel_dp *intel_dp,
5298			    struct intel_crtc_state *old_crtc_state)
5299{
5300	struct drm_device *dev = intel_dp_to_dev(intel_dp);
5301	struct drm_i915_private *dev_priv = to_i915(dev);
 
 
 
5302
5303	if (!old_crtc_state->has_drrs)
5304		return;
5305
5306	mutex_lock(&dev_priv->drrs.mutex);
5307	if (!dev_priv->drrs.dp) {
5308		mutex_unlock(&dev_priv->drrs.mutex);
5309		return;
5310	}
5311
5312	if (dev_priv->drrs.refresh_rate_type == DRRS_LOW_RR)
5313		intel_dp_set_drrs_state(dev_priv, old_crtc_state,
5314			intel_dp->attached_connector->panel.fixed_mode->vrefresh);
 
5315
5316	dev_priv->drrs.dp = NULL;
5317	mutex_unlock(&dev_priv->drrs.mutex);
5318
5319	cancel_delayed_work_sync(&dev_priv->drrs.work);
5320}
5321
5322static void intel_edp_drrs_downclock_work(struct work_struct *work)
5323{
5324	struct drm_i915_private *dev_priv =
5325		container_of(work, typeof(*dev_priv), drrs.work.work);
5326	struct intel_dp *intel_dp;
5327
5328	mutex_lock(&dev_priv->drrs.mutex);
5329
5330	intel_dp = dev_priv->drrs.dp;
5331
5332	if (!intel_dp)
5333		goto unlock;
5334
5335	/*
5336	 * The delayed work can race with an invalidate hence we need to
5337	 * recheck.
5338	 */
5339
5340	if (dev_priv->drrs.busy_frontbuffer_bits)
5341		goto unlock;
5342
5343	if (dev_priv->drrs.refresh_rate_type != DRRS_LOW_RR) {
5344		struct drm_crtc *crtc = dp_to_dig_port(intel_dp)->base.base.crtc;
5345
5346		intel_dp_set_drrs_state(dev_priv, to_intel_crtc(crtc)->config,
5347			intel_dp->attached_connector->panel.downclock_mode->vrefresh);
5348	}
5349
5350unlock:
5351	mutex_unlock(&dev_priv->drrs.mutex);
5352}
5353
5354/**
5355 * intel_edp_drrs_invalidate - Disable Idleness DRRS
5356 * @dev_priv: i915 device
5357 * @frontbuffer_bits: frontbuffer plane tracking bits
5358 *
5359 * This function gets called everytime rendering on the given planes start.
5360 * Hence DRRS needs to be Upclocked, i.e. (LOW_RR -> HIGH_RR).
5361 *
5362 * Dirty frontbuffers relevant to DRRS are tracked in busy_frontbuffer_bits.
5363 */
5364void intel_edp_drrs_invalidate(struct drm_i915_private *dev_priv,
5365			       unsigned int frontbuffer_bits)
5366{
 
5367	struct drm_crtc *crtc;
5368	enum pipe pipe;
5369
5370	if (dev_priv->drrs.type == DRRS_NOT_SUPPORTED)
5371		return;
5372
5373	cancel_delayed_work(&dev_priv->drrs.work);
5374
5375	mutex_lock(&dev_priv->drrs.mutex);
5376	if (!dev_priv->drrs.dp) {
5377		mutex_unlock(&dev_priv->drrs.mutex);
5378		return;
5379	}
5380
5381	crtc = dp_to_dig_port(dev_priv->drrs.dp)->base.base.crtc;
5382	pipe = to_intel_crtc(crtc)->pipe;
5383
5384	frontbuffer_bits &= INTEL_FRONTBUFFER_ALL_MASK(pipe);
5385	dev_priv->drrs.busy_frontbuffer_bits |= frontbuffer_bits;
5386
5387	/* invalidate means busy screen hence upclock */
5388	if (frontbuffer_bits && dev_priv->drrs.refresh_rate_type == DRRS_LOW_RR)
5389		intel_dp_set_drrs_state(dev_priv, to_intel_crtc(crtc)->config,
5390			dev_priv->drrs.dp->attached_connector->panel.fixed_mode->vrefresh);
 
5391
5392	mutex_unlock(&dev_priv->drrs.mutex);
5393}
5394
5395/**
5396 * intel_edp_drrs_flush - Restart Idleness DRRS
5397 * @dev_priv: i915 device
5398 * @frontbuffer_bits: frontbuffer plane tracking bits
5399 *
5400 * This function gets called every time rendering on the given planes has
5401 * completed or flip on a crtc is completed. So DRRS should be upclocked
5402 * (LOW_RR -> HIGH_RR). And also Idleness detection should be started again,
5403 * if no other planes are dirty.
5404 *
5405 * Dirty frontbuffers relevant to DRRS are tracked in busy_frontbuffer_bits.
5406 */
5407void intel_edp_drrs_flush(struct drm_i915_private *dev_priv,
5408			  unsigned int frontbuffer_bits)
5409{
 
5410	struct drm_crtc *crtc;
5411	enum pipe pipe;
5412
5413	if (dev_priv->drrs.type == DRRS_NOT_SUPPORTED)
5414		return;
5415
5416	cancel_delayed_work(&dev_priv->drrs.work);
5417
5418	mutex_lock(&dev_priv->drrs.mutex);
5419	if (!dev_priv->drrs.dp) {
5420		mutex_unlock(&dev_priv->drrs.mutex);
5421		return;
5422	}
5423
5424	crtc = dp_to_dig_port(dev_priv->drrs.dp)->base.base.crtc;
5425	pipe = to_intel_crtc(crtc)->pipe;
5426
5427	frontbuffer_bits &= INTEL_FRONTBUFFER_ALL_MASK(pipe);
5428	dev_priv->drrs.busy_frontbuffer_bits &= ~frontbuffer_bits;
5429
5430	/* flush means busy screen hence upclock */
5431	if (frontbuffer_bits && dev_priv->drrs.refresh_rate_type == DRRS_LOW_RR)
5432		intel_dp_set_drrs_state(dev_priv, to_intel_crtc(crtc)->config,
5433				dev_priv->drrs.dp->attached_connector->panel.fixed_mode->vrefresh);
 
5434
5435	/*
5436	 * flush also means no more activity hence schedule downclock, if all
5437	 * other fbs are quiescent too
5438	 */
5439	if (!dev_priv->drrs.busy_frontbuffer_bits)
5440		schedule_delayed_work(&dev_priv->drrs.work,
5441				msecs_to_jiffies(1000));
5442	mutex_unlock(&dev_priv->drrs.mutex);
5443}
5444
5445/**
5446 * DOC: Display Refresh Rate Switching (DRRS)
5447 *
5448 * Display Refresh Rate Switching (DRRS) is a power conservation feature
5449 * which enables swtching between low and high refresh rates,
5450 * dynamically, based on the usage scenario. This feature is applicable
5451 * for internal panels.
5452 *
5453 * Indication that the panel supports DRRS is given by the panel EDID, which
5454 * would list multiple refresh rates for one resolution.
5455 *
5456 * DRRS is of 2 types - static and seamless.
5457 * Static DRRS involves changing refresh rate (RR) by doing a full modeset
5458 * (may appear as a blink on screen) and is used in dock-undock scenario.
5459 * Seamless DRRS involves changing RR without any visual effect to the user
5460 * and can be used during normal system usage. This is done by programming
5461 * certain registers.
5462 *
5463 * Support for static/seamless DRRS may be indicated in the VBT based on
5464 * inputs from the panel spec.
5465 *
5466 * DRRS saves power by switching to low RR based on usage scenarios.
5467 *
5468 * The implementation is based on frontbuffer tracking implementation.  When
5469 * there is a disturbance on the screen triggered by user activity or a periodic
5470 * system activity, DRRS is disabled (RR is changed to high RR).  When there is
5471 * no movement on screen, after a timeout of 1 second, a switch to low RR is
5472 * made.
5473 *
5474 * For integration with frontbuffer tracking code, intel_edp_drrs_invalidate()
5475 * and intel_edp_drrs_flush() are called.
5476 *
5477 * DRRS can be further extended to support other internal panels and also
5478 * the scenario of video playback wherein RR is set based on the rate
5479 * requested by userspace.
5480 */
5481
5482/**
5483 * intel_dp_drrs_init - Init basic DRRS work and mutex.
5484 * @intel_connector: eDP connector
5485 * @fixed_mode: preferred mode of panel
5486 *
5487 * This function is  called only once at driver load to initialize basic
5488 * DRRS stuff.
5489 *
5490 * Returns:
5491 * Downclock mode if panel supports it, else return NULL.
5492 * DRRS support is determined by the presence of downclock mode (apart
5493 * from VBT setting).
5494 */
5495static struct drm_display_mode *
5496intel_dp_drrs_init(struct intel_connector *intel_connector,
5497		struct drm_display_mode *fixed_mode)
5498{
5499	struct drm_connector *connector = &intel_connector->base;
5500	struct drm_device *dev = connector->dev;
5501	struct drm_i915_private *dev_priv = to_i915(dev);
5502	struct drm_display_mode *downclock_mode = NULL;
5503
5504	INIT_DELAYED_WORK(&dev_priv->drrs.work, intel_edp_drrs_downclock_work);
5505	mutex_init(&dev_priv->drrs.mutex);
5506
5507	if (INTEL_GEN(dev_priv) <= 6) {
5508		DRM_DEBUG_KMS("DRRS supported for Gen7 and above\n");
5509		return NULL;
5510	}
5511
5512	if (dev_priv->vbt.drrs_type != SEAMLESS_DRRS_SUPPORT) {
5513		DRM_DEBUG_KMS("VBT doesn't support DRRS\n");
5514		return NULL;
5515	}
5516
5517	downclock_mode = intel_find_panel_downclock
5518					(dev, fixed_mode, connector);
5519
5520	if (!downclock_mode) {
5521		DRM_DEBUG_KMS("Downclock mode is not found. DRRS not supported\n");
5522		return NULL;
5523	}
5524
5525	dev_priv->drrs.type = dev_priv->vbt.drrs_type;
5526
5527	dev_priv->drrs.refresh_rate_type = DRRS_HIGH_RR;
5528	DRM_DEBUG_KMS("seamless DRRS supported for eDP panel.\n");
5529	return downclock_mode;
5530}
5531
5532static bool intel_edp_init_connector(struct intel_dp *intel_dp,
5533				     struct intel_connector *intel_connector)
5534{
5535	struct drm_connector *connector = &intel_connector->base;
5536	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
5537	struct intel_encoder *intel_encoder = &intel_dig_port->base;
5538	struct drm_device *dev = intel_encoder->base.dev;
5539	struct drm_i915_private *dev_priv = to_i915(dev);
5540	struct drm_display_mode *fixed_mode = NULL;
5541	struct drm_display_mode *downclock_mode = NULL;
5542	bool has_dpcd;
5543	struct drm_display_mode *scan;
5544	struct edid *edid;
5545	enum pipe pipe = INVALID_PIPE;
5546
5547	if (!is_edp(intel_dp))
5548		return true;
5549
5550	/*
5551	 * On IBX/CPT we may get here with LVDS already registered. Since the
5552	 * driver uses the only internal power sequencer available for both
5553	 * eDP and LVDS bail out early in this case to prevent interfering
5554	 * with an already powered-on LVDS power sequencer.
5555	 */
5556	if (intel_get_lvds_encoder(dev)) {
5557		WARN_ON(!(HAS_PCH_IBX(dev_priv) || HAS_PCH_CPT(dev_priv)));
5558		DRM_INFO("LVDS was detected, not registering eDP\n");
5559
5560		return false;
5561	}
5562
5563	pps_lock(intel_dp);
5564
5565	intel_dp_init_panel_power_timestamps(intel_dp);
5566	intel_dp_pps_init(dev, intel_dp);
5567	intel_edp_panel_vdd_sanitize(intel_dp);
5568
5569	pps_unlock(intel_dp);
5570
5571	/* Cache DPCD and EDID for edp. */
5572	has_dpcd = intel_edp_init_dpcd(intel_dp);
5573
5574	if (!has_dpcd) {
 
 
 
 
 
5575		/* if this fails, presume the device is a ghost */
5576		DRM_INFO("failed to retrieve link info, disabling eDP\n");
5577		goto out_vdd_off;
5578	}
5579
 
 
 
 
 
5580	mutex_lock(&dev->mode_config.mutex);
5581	edid = drm_get_edid(connector, &intel_dp->aux.ddc);
5582	if (edid) {
5583		if (drm_add_edid_modes(connector, edid)) {
5584			drm_mode_connector_update_edid_property(connector,
5585								edid);
5586			drm_edid_to_eld(connector, edid);
5587		} else {
5588			kfree(edid);
5589			edid = ERR_PTR(-EINVAL);
5590		}
5591	} else {
5592		edid = ERR_PTR(-ENOENT);
5593	}
5594	intel_connector->edid = edid;
5595
5596	/* prefer fixed mode from EDID if available */
5597	list_for_each_entry(scan, &connector->probed_modes, head) {
5598		if ((scan->type & DRM_MODE_TYPE_PREFERRED)) {
5599			fixed_mode = drm_mode_duplicate(dev, scan);
5600			downclock_mode = intel_dp_drrs_init(
5601						intel_connector, fixed_mode);
5602			break;
5603		}
5604	}
5605
5606	/* fallback to VBT if available for eDP */
5607	if (!fixed_mode && dev_priv->vbt.lfp_lvds_vbt_mode) {
5608		fixed_mode = drm_mode_duplicate(dev,
5609					dev_priv->vbt.lfp_lvds_vbt_mode);
5610		if (fixed_mode) {
5611			fixed_mode->type |= DRM_MODE_TYPE_PREFERRED;
5612			connector->display_info.width_mm = fixed_mode->width_mm;
5613			connector->display_info.height_mm = fixed_mode->height_mm;
5614		}
5615	}
5616	mutex_unlock(&dev->mode_config.mutex);
5617
5618	if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
5619		intel_dp->edp_notifier.notifier_call = edp_notify_handler;
5620		register_reboot_notifier(&intel_dp->edp_notifier);
5621
5622		/*
5623		 * Figure out the current pipe for the initial backlight setup.
5624		 * If the current pipe isn't valid, try the PPS pipe, and if that
5625		 * fails just assume pipe A.
5626		 */
5627		if (IS_CHERRYVIEW(dev_priv))
5628			pipe = DP_PORT_TO_PIPE_CHV(intel_dp->DP);
5629		else
5630			pipe = PORT_TO_PIPE(intel_dp->DP);
5631
5632		if (pipe != PIPE_A && pipe != PIPE_B)
5633			pipe = intel_dp->pps_pipe;
5634
5635		if (pipe != PIPE_A && pipe != PIPE_B)
5636			pipe = PIPE_A;
5637
5638		DRM_DEBUG_KMS("using pipe %c for initial backlight setup\n",
5639			      pipe_name(pipe));
5640	}
5641
5642	intel_panel_init(&intel_connector->panel, fixed_mode, downclock_mode);
5643	intel_connector->panel.backlight.power = intel_edp_backlight_power;
5644	intel_panel_setup_backlight(connector, pipe);
5645
5646	return true;
5647
5648out_vdd_off:
5649	cancel_delayed_work_sync(&intel_dp->panel_vdd_work);
5650	/*
5651	 * vdd might still be enabled do to the delayed vdd off.
5652	 * Make sure vdd is actually turned off here.
5653	 */
5654	pps_lock(intel_dp);
5655	edp_panel_vdd_off_sync(intel_dp);
5656	pps_unlock(intel_dp);
5657
5658	return false;
5659}
5660
5661bool
5662intel_dp_init_connector(struct intel_digital_port *intel_dig_port,
5663			struct intel_connector *intel_connector)
5664{
5665	struct drm_connector *connector = &intel_connector->base;
5666	struct intel_dp *intel_dp = &intel_dig_port->dp;
5667	struct intel_encoder *intel_encoder = &intel_dig_port->base;
5668	struct drm_device *dev = intel_encoder->base.dev;
5669	struct drm_i915_private *dev_priv = to_i915(dev);
5670	enum port port = intel_dig_port->port;
5671	int type;
5672
5673	if (WARN(intel_dig_port->max_lanes < 1,
5674		 "Not enough lanes (%d) for DP on port %c\n",
5675		 intel_dig_port->max_lanes, port_name(port)))
5676		return false;
5677
5678	intel_dp->pps_pipe = INVALID_PIPE;
5679
5680	/* intel_dp vfuncs */
5681	if (INTEL_GEN(dev_priv) >= 9)
5682		intel_dp->get_aux_clock_divider = skl_get_aux_clock_divider;
5683	else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
 
 
5684		intel_dp->get_aux_clock_divider = hsw_get_aux_clock_divider;
5685	else if (HAS_PCH_SPLIT(dev_priv))
5686		intel_dp->get_aux_clock_divider = ilk_get_aux_clock_divider;
5687	else
5688		intel_dp->get_aux_clock_divider = g4x_get_aux_clock_divider;
5689
5690	if (INTEL_GEN(dev_priv) >= 9)
5691		intel_dp->get_aux_send_ctl = skl_get_aux_send_ctl;
5692	else
5693		intel_dp->get_aux_send_ctl = g4x_get_aux_send_ctl;
5694
5695	if (HAS_DDI(dev_priv))
5696		intel_dp->prepare_link_retrain = intel_ddi_prepare_link_retrain;
5697
5698	/* Preserve the current hw state. */
5699	intel_dp->DP = I915_READ(intel_dp->output_reg);
5700	intel_dp->attached_connector = intel_connector;
5701
5702	if (intel_dp_is_edp(dev_priv, port))
5703		type = DRM_MODE_CONNECTOR_eDP;
5704	else
5705		type = DRM_MODE_CONNECTOR_DisplayPort;
5706
5707	/*
5708	 * For eDP we always set the encoder type to INTEL_OUTPUT_EDP, but
5709	 * for DP the encoder type can be set by the caller to
5710	 * INTEL_OUTPUT_UNKNOWN for DDI, so don't rewrite it.
5711	 */
5712	if (type == DRM_MODE_CONNECTOR_eDP)
5713		intel_encoder->type = INTEL_OUTPUT_EDP;
5714
5715	/* eDP only on port B and/or C on vlv/chv */
5716	if (WARN_ON((IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) &&
5717		    is_edp(intel_dp) && port != PORT_B && port != PORT_C))
5718		return false;
5719
5720	DRM_DEBUG_KMS("Adding %s connector on port %c\n",
5721			type == DRM_MODE_CONNECTOR_eDP ? "eDP" : "DP",
5722			port_name(port));
5723
5724	drm_connector_init(dev, connector, &intel_dp_connector_funcs, type);
5725	drm_connector_helper_add(connector, &intel_dp_connector_helper_funcs);
5726
5727	connector->interlace_allowed = true;
5728	connector->doublescan_allowed = 0;
5729
5730	intel_dp_aux_init(intel_dp);
5731
5732	INIT_DELAYED_WORK(&intel_dp->panel_vdd_work,
5733			  edp_panel_vdd_work);
5734
5735	intel_connector_attach_encoder(intel_connector, intel_encoder);
 
5736
5737	if (HAS_DDI(dev_priv))
5738		intel_connector->get_hw_state = intel_ddi_connector_get_hw_state;
5739	else
5740		intel_connector->get_hw_state = intel_connector_get_hw_state;
 
5741
5742	/* Set up the hotplug pin. */
5743	switch (port) {
5744	case PORT_A:
5745		intel_encoder->hpd_pin = HPD_PORT_A;
5746		break;
5747	case PORT_B:
5748		intel_encoder->hpd_pin = HPD_PORT_B;
5749		if (IS_BXT_REVID(dev_priv, 0, BXT_REVID_A1))
5750			intel_encoder->hpd_pin = HPD_PORT_A;
5751		break;
5752	case PORT_C:
5753		intel_encoder->hpd_pin = HPD_PORT_C;
5754		break;
5755	case PORT_D:
5756		intel_encoder->hpd_pin = HPD_PORT_D;
5757		break;
5758	case PORT_E:
5759		intel_encoder->hpd_pin = HPD_PORT_E;
5760		break;
5761	default:
5762		BUG();
5763	}
5764
 
 
 
 
 
 
 
 
 
 
 
 
 
 
5765	/* init MST on ports that can support it */
5766	if (HAS_DP_MST(dev_priv) && !is_edp(intel_dp) &&
5767	    (port == PORT_B || port == PORT_C || port == PORT_D))
5768		intel_dp_mst_encoder_init(intel_dig_port,
5769					  intel_connector->base.base.id);
5770
5771	if (!intel_edp_init_connector(intel_dp, intel_connector)) {
5772		intel_dp_aux_fini(intel_dp);
5773		intel_dp_mst_encoder_cleanup(intel_dig_port);
5774		goto fail;
5775	}
5776
5777	intel_dp_add_properties(intel_dp, connector);
5778
5779	/* For G4X desktop chip, PEG_BAND_GAP_DATA 3:0 must first be written
5780	 * 0xd.  Failure to do so will result in spurious interrupts being
5781	 * generated on the port when a cable is not attached.
5782	 */
5783	if (IS_G4X(dev_priv) && !IS_GM45(dev_priv)) {
5784		u32 temp = I915_READ(PEG_BAND_GAP_DATA);
5785		I915_WRITE(PEG_BAND_GAP_DATA, (temp & ~0xf) | 0xd);
5786	}
5787
 
 
5788	return true;
5789
5790fail:
 
 
 
 
 
 
 
 
 
 
 
5791	drm_connector_cleanup(connector);
5792
5793	return false;
5794}
5795
5796bool intel_dp_init(struct drm_device *dev,
5797		   i915_reg_t output_reg,
5798		   enum port port)
5799{
5800	struct drm_i915_private *dev_priv = to_i915(dev);
5801	struct intel_digital_port *intel_dig_port;
5802	struct intel_encoder *intel_encoder;
5803	struct drm_encoder *encoder;
5804	struct intel_connector *intel_connector;
5805
5806	intel_dig_port = kzalloc(sizeof(*intel_dig_port), GFP_KERNEL);
5807	if (!intel_dig_port)
5808		return false;
5809
5810	intel_connector = intel_connector_alloc();
5811	if (!intel_connector)
5812		goto err_connector_alloc;
5813
5814	intel_encoder = &intel_dig_port->base;
5815	encoder = &intel_encoder->base;
5816
5817	if (drm_encoder_init(dev, &intel_encoder->base, &intel_dp_enc_funcs,
5818			     DRM_MODE_ENCODER_TMDS, "DP %c", port_name(port)))
5819		goto err_encoder_init;
5820
5821	intel_encoder->compute_config = intel_dp_compute_config;
5822	intel_encoder->disable = intel_disable_dp;
5823	intel_encoder->get_hw_state = intel_dp_get_hw_state;
5824	intel_encoder->get_config = intel_dp_get_config;
5825	intel_encoder->suspend = intel_dp_encoder_suspend;
5826	if (IS_CHERRYVIEW(dev_priv)) {
5827		intel_encoder->pre_pll_enable = chv_dp_pre_pll_enable;
5828		intel_encoder->pre_enable = chv_pre_enable_dp;
5829		intel_encoder->enable = vlv_enable_dp;
5830		intel_encoder->post_disable = chv_post_disable_dp;
5831		intel_encoder->post_pll_disable = chv_dp_post_pll_disable;
5832	} else if (IS_VALLEYVIEW(dev_priv)) {
5833		intel_encoder->pre_pll_enable = vlv_dp_pre_pll_enable;
5834		intel_encoder->pre_enable = vlv_pre_enable_dp;
5835		intel_encoder->enable = vlv_enable_dp;
5836		intel_encoder->post_disable = vlv_post_disable_dp;
5837	} else {
5838		intel_encoder->pre_enable = g4x_pre_enable_dp;
5839		intel_encoder->enable = g4x_enable_dp;
5840		if (INTEL_GEN(dev_priv) >= 5)
5841			intel_encoder->post_disable = ilk_post_disable_dp;
5842	}
5843
5844	intel_dig_port->port = port;
5845	intel_dig_port->dp.output_reg = output_reg;
5846	intel_dig_port->max_lanes = 4;
5847
5848	intel_encoder->type = INTEL_OUTPUT_DP;
5849	if (IS_CHERRYVIEW(dev_priv)) {
5850		if (port == PORT_D)
5851			intel_encoder->crtc_mask = 1 << 2;
5852		else
5853			intel_encoder->crtc_mask = (1 << 0) | (1 << 1);
5854	} else {
5855		intel_encoder->crtc_mask = (1 << 0) | (1 << 1) | (1 << 2);
5856	}
5857	intel_encoder->cloneable = 0;
5858	intel_encoder->port = port;
5859
5860	intel_dig_port->hpd_pulse = intel_dp_hpd_pulse;
5861	dev_priv->hotplug.irq_port[port] = intel_dig_port;
5862
5863	if (!intel_dp_init_connector(intel_dig_port, intel_connector))
5864		goto err_init_connector;
5865
5866	return true;
5867
5868err_init_connector:
5869	drm_encoder_cleanup(encoder);
5870err_encoder_init:
5871	kfree(intel_connector);
5872err_connector_alloc:
5873	kfree(intel_dig_port);
5874	return false;
 
5875}
5876
5877void intel_dp_mst_suspend(struct drm_device *dev)
5878{
5879	struct drm_i915_private *dev_priv = to_i915(dev);
5880	int i;
5881
5882	/* disable MST */
5883	for (i = 0; i < I915_MAX_PORTS; i++) {
5884		struct intel_digital_port *intel_dig_port = dev_priv->hotplug.irq_port[i];
5885
5886		if (!intel_dig_port || !intel_dig_port->dp.can_mst)
5887			continue;
5888
5889		if (intel_dig_port->dp.is_mst)
5890			drm_dp_mst_topology_mgr_suspend(&intel_dig_port->dp.mst_mgr);
 
 
 
 
5891	}
5892}
5893
5894void intel_dp_mst_resume(struct drm_device *dev)
5895{
5896	struct drm_i915_private *dev_priv = to_i915(dev);
5897	int i;
5898
5899	for (i = 0; i < I915_MAX_PORTS; i++) {
5900		struct intel_digital_port *intel_dig_port = dev_priv->hotplug.irq_port[i];
5901		int ret;
 
 
 
5902
5903		if (!intel_dig_port || !intel_dig_port->dp.can_mst)
5904			continue;
5905
5906		ret = drm_dp_mst_topology_mgr_resume(&intel_dig_port->dp.mst_mgr);
5907		if (ret)
5908			intel_dp_check_mst_status(&intel_dig_port->dp);
 
 
5909	}
5910}