Linux Audio

Check our new training course

Loading...
v3.1
   1/*
   2 * Copyright © 2008 Intel Corporation
   3 *
   4 * Permission is hereby granted, free of charge, to any person obtaining a
   5 * copy of this software and associated documentation files (the "Software"),
   6 * to deal in the Software without restriction, including without limitation
   7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
   8 * and/or sell copies of the Software, and to permit persons to whom the
   9 * Software is furnished to do so, subject to the following conditions:
  10 *
  11 * The above copyright notice and this permission notice (including the next
  12 * paragraph) shall be included in all copies or substantial portions of the
  13 * Software.
  14 *
  15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
  18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
  19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
  20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
  21 * IN THE SOFTWARE.
  22 *
  23 * Authors:
  24 *    Keith Packard <keithp@keithp.com>
  25 *
  26 */
  27
  28#include <linux/i2c.h>
  29#include <linux/slab.h>
  30#include "drmP.h"
  31#include "drm.h"
  32#include "drm_crtc.h"
  33#include "drm_crtc_helper.h"
 
 
 
 
 
 
 
 
  34#include "intel_drv.h"
  35#include "i915_drm.h"
  36#include "i915_drv.h"
  37#include "drm_dp_helper.h"
  38
  39
  40#define DP_LINK_STATUS_SIZE	6
  41#define DP_LINK_CHECK_TIMEOUT	(10 * 1000)
 
  42
  43#define DP_LINK_CONFIGURATION_SIZE	9
 
 
 
 
 
 
 
 
 
  44
  45struct intel_dp {
  46	struct intel_encoder base;
  47	uint32_t output_reg;
  48	uint32_t DP;
  49	uint8_t  link_configuration[DP_LINK_CONFIGURATION_SIZE];
  50	bool has_audio;
  51	int force_audio;
  52	uint32_t color_range;
  53	int dpms_mode;
  54	uint8_t link_bw;
  55	uint8_t lane_count;
  56	uint8_t dpcd[8];
  57	struct i2c_adapter adapter;
  58	struct i2c_algo_dp_aux_data algo;
  59	bool is_pch_edp;
  60	uint8_t	train_set[4];
  61	uint8_t link_status[DP_LINK_STATUS_SIZE];
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
  62};
  63
  64/**
  65 * is_edp - is the given port attached to an eDP panel (either CPU or PCH)
  66 * @intel_dp: DP struct
  67 *
  68 * If a CPU or PCH DP output is attached to an eDP panel, this function
  69 * will return true, and false otherwise.
  70 */
  71static bool is_edp(struct intel_dp *intel_dp)
  72{
  73	return intel_dp->base.type == INTEL_OUTPUT_EDP;
 
 
  74}
  75
  76/**
  77 * is_pch_edp - is the port on the PCH and attached to an eDP panel?
  78 * @intel_dp: DP struct
  79 *
  80 * Returns true if the given DP struct corresponds to a PCH DP port attached
  81 * to an eDP panel, false otherwise.  Helpful for determining whether we
  82 * may need FDI resources for a given DP output or not.
  83 */
  84static bool is_pch_edp(struct intel_dp *intel_dp)
  85{
  86	return intel_dp->is_pch_edp;
 
 
  87}
  88
  89static struct intel_dp *enc_to_intel_dp(struct drm_encoder *encoder)
  90{
  91	return container_of(encoder, struct intel_dp, base.base);
  92}
  93
  94static struct intel_dp *intel_attached_dp(struct drm_connector *connector)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
  95{
  96	return container_of(intel_attached_encoder(connector),
  97			    struct intel_dp, base);
  98}
  99
 100/**
 101 * intel_encoder_is_pch_edp - is the given encoder a PCH attached eDP?
 102 * @encoder: DRM encoder
 103 *
 104 * Return true if @encoder corresponds to a PCH attached eDP panel.  Needed
 105 * by intel_display.c.
 106 */
 107bool intel_encoder_is_pch_edp(struct drm_encoder *encoder)
 108{
 109	struct intel_dp *intel_dp;
 
 110
 111	if (!encoder)
 112		return false;
 
 
 
 
 113
 114	intel_dp = enc_to_intel_dp(encoder);
 
 115
 116	return is_pch_edp(intel_dp);
 
 
 117}
 118
 119static void intel_dp_start_link_train(struct intel_dp *intel_dp);
 120static void intel_dp_complete_link_train(struct intel_dp *intel_dp);
 121static void intel_dp_link_down(struct intel_dp *intel_dp);
 
 
 
 122
 123void
 124intel_edp_link_config (struct intel_encoder *intel_encoder,
 125		       int *lane_num, int *link_bw)
 126{
 127	struct intel_dp *intel_dp = container_of(intel_encoder, struct intel_dp, base);
 
 
 
 
 128
 129	*lane_num = intel_dp->lane_count;
 130	if (intel_dp->link_bw == DP_LINK_BW_1_62)
 131		*link_bw = 162000;
 132	else if (intel_dp->link_bw == DP_LINK_BW_2_7)
 133		*link_bw = 270000;
 134}
 135
 136static int
 137intel_dp_max_lane_count(struct intel_dp *intel_dp)
 138{
 139	int max_lane_count = 4;
 
 
 
 
 140
 141	if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11) {
 142		max_lane_count = intel_dp->dpcd[DP_MAX_LANE_COUNT] & 0x1f;
 143		switch (max_lane_count) {
 144		case 1: case 2: case 4:
 145			break;
 146		default:
 147			max_lane_count = 4;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 148		}
 149	}
 150	return max_lane_count;
 151}
 152
 153static int
 154intel_dp_max_link_bw(struct intel_dp *intel_dp)
 155{
 156	int max_link_bw = intel_dp->dpcd[DP_MAX_LINK_RATE];
 157
 158	switch (max_link_bw) {
 159	case DP_LINK_BW_1_62:
 160	case DP_LINK_BW_2_7:
 161		break;
 162	default:
 163		max_link_bw = DP_LINK_BW_1_62;
 164		break;
 165	}
 166	return max_link_bw;
 167}
 168
 169static int
 170intel_dp_link_clock(uint8_t link_bw)
 171{
 172	if (link_bw == DP_LINK_BW_2_7)
 173		return 270000;
 174	else
 175		return 162000;
 
 
 
 
 
 
 
 
 
 176}
 177
 178/* I think this is a fiction */
 179static int
 180intel_dp_link_required(struct drm_device *dev, struct intel_dp *intel_dp, int pixel_clock)
 181{
 182	struct drm_crtc *crtc = intel_dp->base.base.crtc;
 183	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
 184	int bpp = 24;
 
 
 
 
 
 185
 186	if (intel_crtc)
 187		bpp = intel_crtc->bpp;
 
 188
 189	return (pixel_clock * bpp + 7) / 8;
 190}
 191
 192static int
 193intel_dp_max_data_rate(int max_link_clock, int max_lanes)
 194{
 195	return (max_link_clock * max_lanes * 8) / 10;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 196}
 197
 198static int
 199intel_dp_mode_valid(struct drm_connector *connector,
 200		    struct drm_display_mode *mode)
 201{
 202	struct intel_dp *intel_dp = intel_attached_dp(connector);
 203	struct drm_device *dev = connector->dev;
 204	struct drm_i915_private *dev_priv = dev->dev_private;
 205	int max_link_clock = intel_dp_link_clock(intel_dp_max_link_bw(intel_dp));
 206	int max_lanes = intel_dp_max_lane_count(intel_dp);
 
 
 
 207
 208	if (is_edp(intel_dp) && dev_priv->panel_fixed_mode) {
 209		if (mode->hdisplay > dev_priv->panel_fixed_mode->hdisplay)
 210			return MODE_PANEL;
 211
 212		if (mode->vdisplay > dev_priv->panel_fixed_mode->vdisplay)
 213			return MODE_PANEL;
 
 
 214	}
 215
 216	/* only refuse the mode on non eDP since we have seen some weird eDP panels
 217	   which are outside spec tolerances but somehow work by magic */
 218	if (!is_edp(intel_dp) &&
 219	    (intel_dp_link_required(connector->dev, intel_dp, mode->clock)
 220	     > intel_dp_max_data_rate(max_link_clock, max_lanes)))
 
 
 221		return MODE_CLOCK_HIGH;
 222
 223	if (mode->clock < 10000)
 224		return MODE_CLOCK_LOW;
 225
 
 
 
 226	return MODE_OK;
 227}
 228
 229static uint32_t
 230pack_aux(uint8_t *src, int src_bytes)
 231{
 232	int	i;
 233	uint32_t v = 0;
 234
 235	if (src_bytes > 4)
 236		src_bytes = 4;
 237	for (i = 0; i < src_bytes; i++)
 238		v |= ((uint32_t) src[i]) << ((3-i) * 8);
 239	return v;
 240}
 241
 242static void
 243unpack_aux(uint32_t src, uint8_t *dst, int dst_bytes)
 244{
 245	int i;
 246	if (dst_bytes > 4)
 247		dst_bytes = 4;
 248	for (i = 0; i < dst_bytes; i++)
 249		dst[i] = src >> ((3-i) * 8);
 250}
 251
 252/* hrawclock is 1/4 the FSB frequency */
 253static int
 254intel_hrawclk(struct drm_device *dev)
 
 
 
 
 
 
 255{
 256	struct drm_i915_private *dev_priv = dev->dev_private;
 257	uint32_t clkcfg;
 258
 259	clkcfg = I915_READ(CLKCFG);
 260	switch (clkcfg & CLKCFG_FSB_MASK) {
 261	case CLKCFG_FSB_400:
 262		return 100;
 263	case CLKCFG_FSB_533:
 264		return 133;
 265	case CLKCFG_FSB_667:
 266		return 166;
 267	case CLKCFG_FSB_800:
 268		return 200;
 269	case CLKCFG_FSB_1067:
 270		return 266;
 271	case CLKCFG_FSB_1333:
 272		return 333;
 273	/* these two are just a guess; one of them might be right */
 274	case CLKCFG_FSB_1600:
 275	case CLKCFG_FSB_1600_ALT:
 276		return 400;
 277	default:
 278		return 133;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 279	}
 280}
 281
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 282static int
 283intel_dp_aux_ch(struct intel_dp *intel_dp,
 284		uint8_t *send, int send_bytes,
 285		uint8_t *recv, int recv_size)
 286{
 287	uint32_t output_reg = intel_dp->output_reg;
 288	struct drm_device *dev = intel_dp->base.base.dev;
 289	struct drm_i915_private *dev_priv = dev->dev_private;
 290	uint32_t ch_ctl = output_reg + 0x10;
 291	uint32_t ch_data = ch_ctl + 4;
 292	int i;
 293	int recv_bytes;
 294	uint32_t status;
 295	uint32_t aux_clock_divider;
 296	int try, precharge;
 297
 298	/* The clock divider is based off the hrawclk,
 299	 * and would like to run at 2MHz. So, take the
 300	 * hrawclk value and divide by 2 and use that
 301	 *
 302	 * Note that PCH attached eDP panels should use a 125MHz input
 303	 * clock divider.
 
 
 
 
 
 
 
 304	 */
 305	if (is_edp(intel_dp) && !is_pch_edp(intel_dp)) {
 306		if (IS_GEN6(dev))
 307			aux_clock_divider = 200; /* SNB eDP input clock at 400Mhz */
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 308		else
 309			aux_clock_divider = 225; /* eDP input clock at 450Mhz */
 310	} else if (HAS_PCH_SPLIT(dev))
 311		aux_clock_divider = 62; /* IRL input clock fixed at 125Mhz */
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 312	else
 313		aux_clock_divider = intel_hrawclk(dev) / 2;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 314
 315	if (IS_GEN6(dev))
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 316		precharge = 3;
 317	else
 318		precharge = 5;
 319
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 320	/* Try to wait for any previous AUX channel activity */
 321	for (try = 0; try < 3; try++) {
 322		status = I915_READ(ch_ctl);
 323		if ((status & DP_AUX_CH_CTL_SEND_BUSY) == 0)
 324			break;
 325		msleep(1);
 326	}
 327
 328	if (try == 3) {
 329		WARN(1, "dp_aux_ch not started status 0x%08x\n",
 330		     I915_READ(ch_ctl));
 331		return -EBUSY;
 332	}
 333
 334	/* Must try at least 3 times according to DP spec */
 335	for (try = 0; try < 5; try++) {
 336		/* Load the send data into the aux channel data registers */
 337		for (i = 0; i < send_bytes; i += 4)
 338			I915_WRITE(ch_data + i,
 339				   pack_aux(send + i, send_bytes - i));
 340	
 341		/* Send the command and wait for it to complete */
 342		I915_WRITE(ch_ctl,
 343			   DP_AUX_CH_CTL_SEND_BUSY |
 344			   DP_AUX_CH_CTL_TIME_OUT_400us |
 345			   (send_bytes << DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT) |
 346			   (precharge << DP_AUX_CH_CTL_PRECHARGE_2US_SHIFT) |
 347			   (aux_clock_divider << DP_AUX_CH_CTL_BIT_CLOCK_2X_SHIFT) |
 348			   DP_AUX_CH_CTL_DONE |
 349			   DP_AUX_CH_CTL_TIME_OUT_ERROR |
 350			   DP_AUX_CH_CTL_RECEIVE_ERROR);
 351		for (;;) {
 352			status = I915_READ(ch_ctl);
 353			if ((status & DP_AUX_CH_CTL_SEND_BUSY) == 0)
 354				break;
 355			udelay(100);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 356		}
 357	
 358		/* Clear done status and any errors */
 359		I915_WRITE(ch_ctl,
 360			   status |
 361			   DP_AUX_CH_CTL_DONE |
 362			   DP_AUX_CH_CTL_TIME_OUT_ERROR |
 363			   DP_AUX_CH_CTL_RECEIVE_ERROR);
 364		if (status & DP_AUX_CH_CTL_DONE)
 365			break;
 366	}
 367
 368	if ((status & DP_AUX_CH_CTL_DONE) == 0) {
 369		DRM_ERROR("dp_aux_ch not done status 0x%08x\n", status);
 370		return -EBUSY;
 
 371	}
 372
 
 373	/* Check for timeout or receive error.
 374	 * Timeouts occur when the sink is not connected
 375	 */
 376	if (status & DP_AUX_CH_CTL_RECEIVE_ERROR) {
 377		DRM_ERROR("dp_aux_ch receive error status 0x%08x\n", status);
 378		return -EIO;
 
 379	}
 380
 381	/* Timeouts occur when the device isn't connected, so they're
 382	 * "normal" -- don't fill the kernel log with these */
 383	if (status & DP_AUX_CH_CTL_TIME_OUT_ERROR) {
 384		DRM_DEBUG_KMS("dp_aux_ch timeout status 0x%08x\n", status);
 385		return -ETIMEDOUT;
 
 386	}
 387
 388	/* Unload any bytes sent back from the other side */
 389	recv_bytes = ((status & DP_AUX_CH_CTL_MESSAGE_SIZE_MASK) >>
 390		      DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT);
 
 
 
 
 
 
 
 
 
 
 
 
 
 391	if (recv_bytes > recv_size)
 392		recv_bytes = recv_size;
 393	
 394	for (i = 0; i < recv_bytes; i += 4)
 395		unpack_aux(I915_READ(ch_data + i),
 396			   recv + i, recv_bytes - i);
 397
 398	return recv_bytes;
 399}
 
 400
 401/* Write data to the aux channel in native mode */
 402static int
 403intel_dp_aux_native_write(struct intel_dp *intel_dp,
 404			  uint16_t address, uint8_t *send, int send_bytes)
 405{
 406	int ret;
 407	uint8_t	msg[20];
 408	int msg_bytes;
 409	uint8_t	ack;
 410
 411	if (send_bytes > 16)
 412		return -1;
 413	msg[0] = AUX_NATIVE_WRITE << 4;
 414	msg[1] = address >> 8;
 415	msg[2] = address & 0xff;
 416	msg[3] = send_bytes - 1;
 417	memcpy(&msg[4], send, send_bytes);
 418	msg_bytes = send_bytes + 4;
 419	for (;;) {
 420		ret = intel_dp_aux_ch(intel_dp, msg, msg_bytes, &ack, 1);
 421		if (ret < 0)
 422			return ret;
 423		if ((ack & AUX_NATIVE_REPLY_MASK) == AUX_NATIVE_REPLY_ACK)
 424			break;
 425		else if ((ack & AUX_NATIVE_REPLY_MASK) == AUX_NATIVE_REPLY_DEFER)
 426			udelay(100);
 427		else
 428			return -EIO;
 429	}
 430	return send_bytes;
 431}
 432
 433/* Write a single byte to the aux channel in native mode */
 434static int
 435intel_dp_aux_native_write_1(struct intel_dp *intel_dp,
 436			    uint16_t address, uint8_t byte)
 
 
 437{
 438	return intel_dp_aux_native_write(intel_dp, address, &byte, 1);
 
 
 
 439}
 440
 441/* read bytes from a native aux channel */
 442static int
 443intel_dp_aux_native_read(struct intel_dp *intel_dp,
 444			 uint16_t address, uint8_t *recv, int recv_bytes)
 445{
 446	uint8_t msg[4];
 447	int msg_bytes;
 448	uint8_t reply[20];
 449	int reply_bytes;
 450	uint8_t ack;
 451	int ret;
 452
 453	msg[0] = AUX_NATIVE_READ << 4;
 454	msg[1] = address >> 8;
 455	msg[2] = address & 0xff;
 456	msg[3] = recv_bytes - 1;
 457
 458	msg_bytes = 4;
 459	reply_bytes = recv_bytes + 1;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 460
 461	for (;;) {
 462		ret = intel_dp_aux_ch(intel_dp, msg, msg_bytes,
 463				      reply, reply_bytes);
 464		if (ret == 0)
 465			return -EPROTO;
 466		if (ret < 0)
 467			return ret;
 468		ack = reply[0];
 469		if ((ack & AUX_NATIVE_REPLY_MASK) == AUX_NATIVE_REPLY_ACK) {
 470			memcpy(recv, reply + 1, ret - 1);
 471			return ret - 1;
 
 
 
 
 
 
 
 
 
 472		}
 473		else if ((ack & AUX_NATIVE_REPLY_MASK) == AUX_NATIVE_REPLY_DEFER)
 474			udelay(100);
 475		else
 476			return -EIO;
 
 477	}
 
 
 478}
 479
 480static int
 481intel_dp_i2c_aux_ch(struct i2c_adapter *adapter, int mode,
 482		    uint8_t write_byte, uint8_t *read_byte)
 483{
 484	struct i2c_algo_dp_aux_data *algo_data = adapter->algo_data;
 485	struct intel_dp *intel_dp = container_of(adapter,
 486						struct intel_dp,
 487						adapter);
 488	uint16_t address = algo_data->address;
 489	uint8_t msg[5];
 490	uint8_t reply[2];
 491	unsigned retry;
 492	int msg_bytes;
 493	int reply_bytes;
 494	int ret;
 495
 496	/* Set up the command byte */
 497	if (mode & MODE_I2C_READ)
 498		msg[0] = AUX_I2C_READ << 4;
 499	else
 500		msg[0] = AUX_I2C_WRITE << 4;
 501
 502	if (!(mode & MODE_I2C_STOP))
 503		msg[0] |= AUX_I2C_MOT << 4;
 504
 505	msg[1] = address >> 8;
 506	msg[2] = address;
 
 
 507
 508	switch (mode) {
 509	case MODE_I2C_WRITE:
 510		msg[3] = 0;
 511		msg[4] = write_byte;
 512		msg_bytes = 5;
 513		reply_bytes = 1;
 
 
 
 
 
 
 514		break;
 515	case MODE_I2C_READ:
 516		msg[3] = 0;
 517		msg_bytes = 4;
 518		reply_bytes = 2;
 519		break;
 520	default:
 521		msg_bytes = 3;
 522		reply_bytes = 1;
 523		break;
 524	}
 525
 526	for (retry = 0; retry < 5; retry++) {
 527		ret = intel_dp_aux_ch(intel_dp,
 528				      msg, msg_bytes,
 529				      reply, reply_bytes);
 530		if (ret < 0) {
 531			DRM_DEBUG_KMS("aux_ch failed %d\n", ret);
 532			return ret;
 533		}
 534
 535		switch (reply[0] & AUX_NATIVE_REPLY_MASK) {
 536		case AUX_NATIVE_REPLY_ACK:
 537			/* I2C-over-AUX Reply field is only valid
 538			 * when paired with AUX ACK.
 539			 */
 540			break;
 541		case AUX_NATIVE_REPLY_NACK:
 542			DRM_DEBUG_KMS("aux_ch native nack\n");
 543			return -EREMOTEIO;
 544		case AUX_NATIVE_REPLY_DEFER:
 545			udelay(100);
 546			continue;
 547		default:
 548			DRM_ERROR("aux_ch invalid native reply 0x%02x\n",
 549				  reply[0]);
 550			return -EREMOTEIO;
 551		}
 552
 553		switch (reply[0] & AUX_I2C_REPLY_MASK) {
 554		case AUX_I2C_REPLY_ACK:
 555			if (mode == MODE_I2C_READ) {
 556				*read_byte = reply[1];
 557			}
 558			return reply_bytes - 1;
 559		case AUX_I2C_REPLY_NACK:
 560			DRM_DEBUG_KMS("aux_i2c nack\n");
 561			return -EREMOTEIO;
 562		case AUX_I2C_REPLY_DEFER:
 563			DRM_DEBUG_KMS("aux_i2c defer\n");
 564			udelay(100);
 565			break;
 566		default:
 567			DRM_ERROR("aux_i2c invalid reply 0x%02x\n", reply[0]);
 568			return -EREMOTEIO;
 569		}
 570	}
 
 
 
 
 
 
 571
 572	DRM_ERROR("too many retries, giving up\n");
 573	return -EREMOTEIO;
 
 
 
 
 
 
 
 574}
 575
 576static int
 577intel_dp_i2c_init(struct intel_dp *intel_dp,
 578		  struct intel_connector *intel_connector, const char *name)
 579{
 580	DRM_DEBUG_KMS("i2c_init %s\n", name);
 581	intel_dp->algo.running = false;
 582	intel_dp->algo.address = 0;
 583	intel_dp->algo.aux_ch = intel_dp_i2c_aux_ch;
 584
 585	memset(&intel_dp->adapter, '\0', sizeof (intel_dp->adapter));
 586	intel_dp->adapter.owner = THIS_MODULE;
 587	intel_dp->adapter.class = I2C_CLASS_DDC;
 588	strncpy (intel_dp->adapter.name, name, sizeof(intel_dp->adapter.name) - 1);
 589	intel_dp->adapter.name[sizeof(intel_dp->adapter.name) - 1] = '\0';
 590	intel_dp->adapter.algo_data = &intel_dp->algo;
 591	intel_dp->adapter.dev.parent = &intel_connector->base.kdev;
 592
 593	return i2c_dp_aux_add_bus(&intel_dp->adapter);
 
 
 
 
 
 
 
 
 594}
 595
 596static bool
 597intel_dp_mode_fixup(struct drm_encoder *encoder, struct drm_display_mode *mode,
 598		    struct drm_display_mode *adjusted_mode)
 599{
 600	struct drm_device *dev = encoder->dev;
 601	struct drm_i915_private *dev_priv = dev->dev_private;
 602	struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
 603	int lane_count, clock;
 604	int max_lane_count = intel_dp_max_lane_count(intel_dp);
 605	int max_clock = intel_dp_max_link_bw(intel_dp) == DP_LINK_BW_2_7 ? 1 : 0;
 606	static int bws[2] = { DP_LINK_BW_1_62, DP_LINK_BW_2_7 };
 607
 608	if (is_edp(intel_dp) && dev_priv->panel_fixed_mode) {
 609		intel_fixed_panel_mode(dev_priv->panel_fixed_mode, adjusted_mode);
 610		intel_pch_panel_fitting(dev, DRM_MODE_SCALE_FULLSCREEN,
 611					mode, adjusted_mode);
 612		/*
 613		 * the mode->clock is used to calculate the Data&Link M/N
 614		 * of the pipe. For the eDP the fixed clock should be used.
 615		 */
 616		mode->clock = dev_priv->panel_fixed_mode->clock;
 
 617	}
 
 618
 619	for (lane_count = 1; lane_count <= max_lane_count; lane_count <<= 1) {
 620		for (clock = 0; clock <= max_clock; clock++) {
 621			int link_avail = intel_dp_max_data_rate(intel_dp_link_clock(bws[clock]), lane_count);
 622
 623			if (intel_dp_link_required(encoder->dev, intel_dp, mode->clock)
 624					<= link_avail) {
 625				intel_dp->link_bw = bws[clock];
 626				intel_dp->lane_count = lane_count;
 627				adjusted_mode->clock = intel_dp_link_clock(intel_dp->link_bw);
 628				DRM_DEBUG_KMS("Display port link bw %02x lane "
 629						"count %d clock %d\n",
 630				       intel_dp->link_bw, intel_dp->lane_count,
 631				       adjusted_mode->clock);
 632				return true;
 633			}
 634		}
 635	}
 
 636
 637	if (is_edp(intel_dp)) {
 638		/* okay we failed just pick the highest */
 639		intel_dp->lane_count = max_lane_count;
 640		intel_dp->link_bw = bws[max_clock];
 641		adjusted_mode->clock = intel_dp_link_clock(intel_dp->link_bw);
 642		DRM_DEBUG_KMS("Force picking display port link bw %02x lane "
 643			      "count %d clock %d\n",
 644			      intel_dp->link_bw, intel_dp->lane_count,
 645			      adjusted_mode->clock);
 646
 647		return true;
 
 
 
 
 
 
 
 
 
 648	}
 
 649
 650	return false;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 651}
 652
 653struct intel_dp_m_n {
 654	uint32_t	tu;
 655	uint32_t	gmch_m;
 656	uint32_t	gmch_n;
 657	uint32_t	link_m;
 658	uint32_t	link_n;
 659};
 660
 661static void
 662intel_reduce_ratio(uint32_t *num, uint32_t *den)
 663{
 664	while (*num > 0xffffff || *den > 0xffffff) {
 665		*num >>= 1;
 666		*den >>= 1;
 
 
 
 
 
 
 
 
 
 
 
 
 667	}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 668}
 669
 670static void
 671intel_dp_compute_m_n(int bpp,
 672		     int nlanes,
 673		     int pixel_clock,
 674		     int link_clock,
 675		     struct intel_dp_m_n *m_n)
 676{
 677	m_n->tu = 64;
 678	m_n->gmch_m = (pixel_clock * bpp) >> 3;
 679	m_n->gmch_n = link_clock * nlanes;
 680	intel_reduce_ratio(&m_n->gmch_m, &m_n->gmch_n);
 681	m_n->link_m = pixel_clock;
 682	m_n->link_n = link_clock;
 683	intel_reduce_ratio(&m_n->link_m, &m_n->link_n);
 684}
 685
 686void
 687intel_dp_set_m_n(struct drm_crtc *crtc, struct drm_display_mode *mode,
 688		 struct drm_display_mode *adjusted_mode)
 689{
 690	struct drm_device *dev = crtc->dev;
 691	struct drm_mode_config *mode_config = &dev->mode_config;
 692	struct drm_encoder *encoder;
 693	struct drm_i915_private *dev_priv = dev->dev_private;
 694	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
 695	int lane_count = 4;
 696	struct intel_dp_m_n m_n;
 697	int pipe = intel_crtc->pipe;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 698
 699	/*
 700	 * Find the lane count in the intel_encoder private
 701	 */
 702	list_for_each_entry(encoder, &mode_config->encoder_list, head) {
 703		struct intel_dp *intel_dp;
 704
 705		if (encoder->crtc != crtc)
 706			continue;
 707
 708		intel_dp = enc_to_intel_dp(encoder);
 709		if (intel_dp->base.type == INTEL_OUTPUT_DISPLAYPORT) {
 710			lane_count = intel_dp->lane_count;
 711			break;
 712		} else if (is_edp(intel_dp)) {
 713			lane_count = dev_priv->edp.lanes;
 714			break;
 715		}
 716	}
 
 717
 718	/*
 719	 * Compute the GMCH and Link ratios. The '3' here is
 720	 * the number of bytes_per_pixel post-LUT, which we always
 721	 * set up for 8-bits of R/G/B, or 3 bytes total.
 722	 */
 723	intel_dp_compute_m_n(intel_crtc->bpp, lane_count,
 724			     mode->clock, adjusted_mode->clock, &m_n);
 725
 726	if (HAS_PCH_SPLIT(dev)) {
 727		I915_WRITE(TRANSDATA_M1(pipe),
 728			   ((m_n.tu - 1) << PIPE_GMCH_DATA_M_TU_SIZE_SHIFT) |
 729			   m_n.gmch_m);
 730		I915_WRITE(TRANSDATA_N1(pipe), m_n.gmch_n);
 731		I915_WRITE(TRANSDPLINK_M1(pipe), m_n.link_m);
 732		I915_WRITE(TRANSDPLINK_N1(pipe), m_n.link_n);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 733	} else {
 734		I915_WRITE(PIPE_GMCH_DATA_M(pipe),
 735			   ((m_n.tu - 1) << PIPE_GMCH_DATA_M_TU_SIZE_SHIFT) |
 736			   m_n.gmch_m);
 737		I915_WRITE(PIPE_GMCH_DATA_N(pipe), m_n.gmch_n);
 738		I915_WRITE(PIPE_DP_LINK_M(pipe), m_n.link_m);
 739		I915_WRITE(PIPE_DP_LINK_N(pipe), m_n.link_n);
 740	}
 741}
 742
 743static void
 744intel_dp_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode,
 745		  struct drm_display_mode *adjusted_mode)
 746{
 747	struct drm_device *dev = encoder->dev;
 748	struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
 749	struct drm_crtc *crtc = intel_dp->base.base.crtc;
 750	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
 751
 752	intel_dp->DP = DP_VOLTAGE_0_4 | DP_PRE_EMPHASIS_0;
 753	intel_dp->DP |= intel_dp->color_range;
 754
 755	if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC)
 756		intel_dp->DP |= DP_SYNC_HS_HIGH;
 757	if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC)
 758		intel_dp->DP |= DP_SYNC_VS_HIGH;
 759
 760	if (HAS_PCH_CPT(dev) && !is_edp(intel_dp))
 761		intel_dp->DP |= DP_LINK_TRAIN_OFF_CPT;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 762	else
 763		intel_dp->DP |= DP_LINK_TRAIN_OFF;
 764
 765	switch (intel_dp->lane_count) {
 766	case 1:
 767		intel_dp->DP |= DP_PORT_WIDTH_1;
 768		break;
 769	case 2:
 770		intel_dp->DP |= DP_PORT_WIDTH_2;
 771		break;
 772	case 4:
 773		intel_dp->DP |= DP_PORT_WIDTH_4;
 774		break;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 775	}
 776	if (intel_dp->has_audio)
 777		intel_dp->DP |= DP_AUDIO_OUTPUT_ENABLE;
 778
 779	memset(intel_dp->link_configuration, 0, DP_LINK_CONFIGURATION_SIZE);
 780	intel_dp->link_configuration[0] = intel_dp->link_bw;
 781	intel_dp->link_configuration[1] = intel_dp->lane_count;
 782	intel_dp->link_configuration[8] = DP_SET_ANSI_8B10B;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 783
 784	/*
 785	 * Check for DPCD version > 1.1 and enhanced framing support
 
 
 
 
 
 
 
 
 
 
 
 
 
 786	 */
 787	if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11 &&
 788	    (intel_dp->dpcd[DP_MAX_LANE_COUNT] & DP_ENHANCED_FRAME_CAP)) {
 789		intel_dp->link_configuration[1] |= DP_LANE_COUNT_ENHANCED_FRAME_EN;
 790		intel_dp->DP |= DP_ENHANCED_FRAMING;
 791	}
 792
 793	/* CPT DP's pipe select is decided in TRANS_DP_CTL */
 794	if (intel_crtc->pipe == 1 && !HAS_PCH_CPT(dev))
 795		intel_dp->DP |= DP_PIPEB_SELECT;
 796
 797	if (is_edp(intel_dp) && !is_pch_edp(intel_dp)) {
 798		/* don't miss out required setting for eDP */
 799		intel_dp->DP |= DP_PLL_ENABLE;
 800		if (adjusted_mode->clock < 200000)
 801			intel_dp->DP |= DP_PLL_FREQ_160MHZ;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 802		else
 803			intel_dp->DP |= DP_PLL_FREQ_270MHZ;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 804	}
 805}
 806
 807static void ironlake_edp_panel_vdd_on(struct intel_dp *intel_dp)
 
 
 
 
 
 
 
 
 
 
 
 
 
 808{
 809	struct drm_device *dev = intel_dp->base.base.dev;
 810	struct drm_i915_private *dev_priv = dev->dev_private;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 811	u32 pp;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 812
 
 
 
 
 
 
 
 
 
 
 
 
 
 813	/*
 814	 * If the panel wasn't on, make sure there's not a currently
 815	 * active PP sequence before enabling AUX VDD.
 816	 */
 817	if (!(I915_READ(PCH_PP_STATUS) & PP_ON))
 818		msleep(dev_priv->panel_t3);
 
 
 
 819
 820	pp = I915_READ(PCH_PP_CONTROL);
 821	pp |= EDP_FORCE_VDD;
 822	I915_WRITE(PCH_PP_CONTROL, pp);
 823	POSTING_READ(PCH_PP_CONTROL);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 824}
 825
 826static void ironlake_edp_panel_vdd_off(struct intel_dp *intel_dp)
 827{
 828	struct drm_device *dev = intel_dp->base.base.dev;
 829	struct drm_i915_private *dev_priv = dev->dev_private;
 
 830	u32 pp;
 
 
 
 831
 832	pp = I915_READ(PCH_PP_CONTROL);
 
 
 
 
 
 
 
 
 833	pp &= ~EDP_FORCE_VDD;
 834	I915_WRITE(PCH_PP_CONTROL, pp);
 835	POSTING_READ(PCH_PP_CONTROL);
 
 
 
 
 836
 837	/* Make sure sequencer is idle before allowing subsequent activity */
 838	msleep(dev_priv->panel_t12);
 
 
 
 
 
 
 839}
 840
 841/* Returns true if the panel was already on when called */
 842static bool ironlake_edp_panel_on (struct intel_dp *intel_dp)
 843{
 844	struct drm_device *dev = intel_dp->base.base.dev;
 845	struct drm_i915_private *dev_priv = dev->dev_private;
 846	u32 pp, idle_on_mask = PP_ON | PP_SEQUENCE_STATE_ON_IDLE;
 847
 848	if (I915_READ(PCH_PP_STATUS) & PP_ON)
 849		return true;
 
 
 
 850
 851	pp = I915_READ(PCH_PP_CONTROL);
 
 
 852
 853	/* ILK workaround: disable reset around power sequence */
 854	pp &= ~PANEL_POWER_RESET;
 855	I915_WRITE(PCH_PP_CONTROL, pp);
 856	POSTING_READ(PCH_PP_CONTROL);
 857
 858	pp |= PANEL_UNLOCK_REGS | POWER_TARGET_ON;
 859	I915_WRITE(PCH_PP_CONTROL, pp);
 860	POSTING_READ(PCH_PP_CONTROL);
 861
 862	if (wait_for((I915_READ(PCH_PP_STATUS) & idle_on_mask) == idle_on_mask,
 863		     5000))
 864		DRM_ERROR("panel on wait timed out: 0x%08x\n",
 865			  I915_READ(PCH_PP_STATUS));
 866
 867	pp |= PANEL_POWER_RESET; /* restore panel reset bit */
 868	I915_WRITE(PCH_PP_CONTROL, pp);
 869	POSTING_READ(PCH_PP_CONTROL);
 870
 871	return false;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 872}
 873
 874static void ironlake_edp_panel_off (struct drm_device *dev)
 875{
 876	struct drm_i915_private *dev_priv = dev->dev_private;
 877	u32 pp, idle_off_mask = PP_ON | PP_SEQUENCE_MASK |
 878		PP_CYCLE_DELAY_ACTIVE | PP_SEQUENCE_STATE_MASK;
 
 
 
 
 
 879
 880	pp = I915_READ(PCH_PP_CONTROL);
 
 881
 882	/* ILK workaround: disable reset around power sequence */
 883	pp &= ~PANEL_POWER_RESET;
 884	I915_WRITE(PCH_PP_CONTROL, pp);
 885	POSTING_READ(PCH_PP_CONTROL);
 
 
 
 
 
 
 
 
 
 
 
 886
 887	pp &= ~POWER_TARGET_ON;
 888	I915_WRITE(PCH_PP_CONTROL, pp);
 889	POSTING_READ(PCH_PP_CONTROL);
 890
 891	if (wait_for((I915_READ(PCH_PP_STATUS) & idle_off_mask) == 0, 5000))
 892		DRM_ERROR("panel off wait timed out: 0x%08x\n",
 893			  I915_READ(PCH_PP_STATUS));
 894
 895	pp |= PANEL_POWER_RESET; /* restore panel reset bit */
 896	I915_WRITE(PCH_PP_CONTROL, pp);
 897	POSTING_READ(PCH_PP_CONTROL);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 898}
 899
 900static void ironlake_edp_backlight_on (struct drm_device *dev)
 
 901{
 902	struct drm_i915_private *dev_priv = dev->dev_private;
 903	u32 pp;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 904
 905	DRM_DEBUG_KMS("\n");
 906	/*
 907	 * If we enable the backlight right away following a panel power
 908	 * on, we may see slight flicker as the panel syncs with the eDP
 909	 * link.  So delay a bit to make sure the image is solid before
 910	 * allowing it to appear.
 911	 */
 912	msleep(300);
 913	pp = I915_READ(PCH_PP_CONTROL);
 
 
 
 914	pp |= EDP_BLC_ENABLE;
 915	I915_WRITE(PCH_PP_CONTROL, pp);
 
 
 
 
 
 
 916}
 917
 918static void ironlake_edp_backlight_off (struct drm_device *dev)
 
 
 919{
 920	struct drm_i915_private *dev_priv = dev->dev_private;
 921	u32 pp;
 
 
 922
 923	DRM_DEBUG_KMS("\n");
 924	pp = I915_READ(PCH_PP_CONTROL);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 925	pp &= ~EDP_BLC_ENABLE;
 926	I915_WRITE(PCH_PP_CONTROL, pp);
 
 
 
 
 
 
 
 
 
 927}
 928
 929static void ironlake_edp_pll_on(struct drm_encoder *encoder)
 
 930{
 931	struct drm_device *dev = encoder->dev;
 932	struct drm_i915_private *dev_priv = dev->dev_private;
 933	u32 dpa_ctl;
 
 934
 935	DRM_DEBUG_KMS("\n");
 936	dpa_ctl = I915_READ(DP_A);
 937	dpa_ctl |= DP_PLL_ENABLE;
 938	I915_WRITE(DP_A, dpa_ctl);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 939	POSTING_READ(DP_A);
 940	udelay(200);
 941}
 942
 943static void ironlake_edp_pll_off(struct drm_encoder *encoder)
 
 944{
 945	struct drm_device *dev = encoder->dev;
 946	struct drm_i915_private *dev_priv = dev->dev_private;
 947	u32 dpa_ctl;
 948
 949	dpa_ctl = I915_READ(DP_A);
 950	dpa_ctl &= ~DP_PLL_ENABLE;
 951	I915_WRITE(DP_A, dpa_ctl);
 
 
 
 
 
 952	POSTING_READ(DP_A);
 953	udelay(200);
 954}
 955
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 956/* If the sink supports it, try to set the power state appropriately */
 957static void intel_dp_sink_dpms(struct intel_dp *intel_dp, int mode)
 958{
 959	int ret, i;
 960
 961	/* Should have a valid DPCD by this point */
 962	if (intel_dp->dpcd[DP_DPCD_REV] < 0x11)
 963		return;
 964
 965	if (mode != DRM_MODE_DPMS_ON) {
 966		ret = intel_dp_aux_native_write_1(intel_dp, DP_SET_POWER,
 967						  DP_SET_POWER_D3);
 968		if (ret != 1)
 969			DRM_DEBUG_DRIVER("failed to write sink power state\n");
 
 970	} else {
 
 
 971		/*
 972		 * When turning on, we need to retry for 1ms to give the sink
 973		 * time to wake up.
 974		 */
 975		for (i = 0; i < 3; i++) {
 976			ret = intel_dp_aux_native_write_1(intel_dp,
 977							  DP_SET_POWER,
 978							  DP_SET_POWER_D0);
 979			if (ret == 1)
 980				break;
 981			msleep(1);
 982		}
 
 
 
 983	}
 
 
 
 
 984}
 985
 986static void intel_dp_prepare(struct drm_encoder *encoder)
 
 987{
 988	struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
 989	struct drm_device *dev = encoder->dev;
 
 
 
 990
 991	/* Wake up the sink first */
 992	intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_ON);
 
 993
 994	if (is_edp(intel_dp)) {
 995		ironlake_edp_backlight_off(dev);
 996		ironlake_edp_panel_off(dev);
 997		if (!is_pch_edp(intel_dp))
 998			ironlake_edp_pll_on(encoder);
 999		else
1000			ironlake_edp_pll_off(encoder);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1001	}
1002	intel_dp_link_down(intel_dp);
 
 
 
 
 
 
1003}
1004
1005static void intel_dp_commit(struct drm_encoder *encoder)
 
1006{
1007	struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
1008	struct drm_device *dev = encoder->dev;
 
 
 
 
 
 
 
 
1009
1010	if (is_edp(intel_dp))
1011		ironlake_edp_panel_vdd_on(intel_dp);
1012
1013	intel_dp_start_link_train(intel_dp);
1014
1015	if (is_edp(intel_dp)) {
1016		ironlake_edp_panel_on(intel_dp);
1017		ironlake_edp_panel_vdd_off(intel_dp);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1018	}
1019
1020	intel_dp_complete_link_train(intel_dp);
1021
1022	if (is_edp(intel_dp))
1023		ironlake_edp_backlight_on(dev);
1024
1025	intel_dp->dpms_mode = DRM_MODE_DPMS_ON;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1026}
1027
1028static void
1029intel_dp_dpms(struct drm_encoder *encoder, int mode)
 
1030{
1031	struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
1032	struct drm_device *dev = encoder->dev;
1033	struct drm_i915_private *dev_priv = dev->dev_private;
1034	uint32_t dp_reg = I915_READ(intel_dp->output_reg);
1035
1036	if (mode != DRM_MODE_DPMS_ON) {
1037		if (is_edp(intel_dp))
1038			ironlake_edp_backlight_off(dev);
1039		intel_dp_sink_dpms(intel_dp, mode);
1040		intel_dp_link_down(intel_dp);
1041		if (is_edp(intel_dp))
1042			ironlake_edp_panel_off(dev);
1043		if (is_edp(intel_dp) && !is_pch_edp(intel_dp))
1044			ironlake_edp_pll_off(encoder);
1045	} else {
1046		if (is_edp(intel_dp))
1047			ironlake_edp_panel_vdd_on(intel_dp);
1048		intel_dp_sink_dpms(intel_dp, mode);
1049		if (!(dp_reg & DP_PORT_EN)) {
1050			intel_dp_start_link_train(intel_dp);
1051			if (is_edp(intel_dp)) {
1052				ironlake_edp_panel_on(intel_dp);
1053				ironlake_edp_panel_vdd_off(intel_dp);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1054			}
1055			intel_dp_complete_link_train(intel_dp);
1056		}
1057		if (is_edp(intel_dp))
1058			ironlake_edp_backlight_on(dev);
1059	}
1060	intel_dp->dpms_mode = mode;
1061}
1062
1063/*
1064 * Native read with retry for link status and receiver capability reads for
1065 * cases where the sink may still be asleep.
1066 */
1067static bool
1068intel_dp_aux_native_read_retry(struct intel_dp *intel_dp, uint16_t address,
1069			       uint8_t *recv, int recv_bytes)
1070{
1071	int ret, i;
 
 
 
 
1072
1073	/*
1074	 * Sinks are *supposed* to come up within 1ms from an off state,
1075	 * but we're also supposed to retry 3 times per the spec.
 
 
1076	 */
1077	for (i = 0; i < 3; i++) {
1078		ret = intel_dp_aux_native_read(intel_dp, address, recv,
1079					       recv_bytes);
1080		if (ret == recv_bytes)
1081			return true;
1082		msleep(1);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1083	}
1084
1085	return false;
 
 
 
 
 
 
 
 
1086}
1087
1088/*
1089 * Fetch AUX CH registers 0x202 - 0x207 which contain
1090 * link status information
1091 */
1092static bool
1093intel_dp_get_link_status(struct intel_dp *intel_dp)
1094{
1095	return intel_dp_aux_native_read_retry(intel_dp,
1096					      DP_LANE0_1_STATUS,
1097					      intel_dp->link_status,
1098					      DP_LINK_STATUS_SIZE);
1099}
1100
1101static uint8_t
1102intel_dp_link_status(uint8_t link_status[DP_LINK_STATUS_SIZE],
1103		     int r)
1104{
1105	return link_status[r - DP_LANE0_1_STATUS];
 
 
 
1106}
1107
1108static uint8_t
1109intel_get_adjust_request_voltage(uint8_t link_status[DP_LINK_STATUS_SIZE],
1110				 int lane)
1111{
1112	int	    i = DP_ADJUST_REQUEST_LANE0_1 + (lane >> 1);
1113	int	    s = ((lane & 1) ?
1114			 DP_ADJUST_VOLTAGE_SWING_LANE1_SHIFT :
1115			 DP_ADJUST_VOLTAGE_SWING_LANE0_SHIFT);
1116	uint8_t l = intel_dp_link_status(link_status, i);
1117
1118	return ((l >> s) & 3) << DP_TRAIN_VOLTAGE_SWING_SHIFT;
 
 
1119}
1120
1121static uint8_t
1122intel_get_adjust_request_pre_emphasis(uint8_t link_status[DP_LINK_STATUS_SIZE],
1123				      int lane)
1124{
1125	int	    i = DP_ADJUST_REQUEST_LANE0_1 + (lane >> 1);
1126	int	    s = ((lane & 1) ?
1127			 DP_ADJUST_PRE_EMPHASIS_LANE1_SHIFT :
1128			 DP_ADJUST_PRE_EMPHASIS_LANE0_SHIFT);
1129	uint8_t l = intel_dp_link_status(link_status, i);
1130
1131	return ((l >> s) & 3) << DP_TRAIN_PRE_EMPHASIS_SHIFT;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1132}
1133
 
 
 
 
1134
1135#if 0
1136static char	*voltage_names[] = {
1137	"0.4V", "0.6V", "0.8V", "1.2V"
1138};
1139static char	*pre_emph_names[] = {
1140	"0dB", "3.5dB", "6dB", "9.5dB"
1141};
1142static char	*link_train_names[] = {
1143	"pattern 1", "pattern 2", "idle", "off"
1144};
1145#endif
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1146
1147/*
1148 * These are source-specific values; current Intel hardware supports
1149 * a maximum voltage of 800mV and a maximum pre-emphasis of 6dB
1150 */
1151#define I830_DP_VOLTAGE_MAX	    DP_TRAIN_VOLTAGE_SWING_800
 
 
 
 
 
1152
1153static uint8_t
1154intel_dp_pre_emphasis_max(uint8_t voltage_swing)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1155{
1156	switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
1157	case DP_TRAIN_VOLTAGE_SWING_400:
1158		return DP_TRAIN_PRE_EMPHASIS_6;
1159	case DP_TRAIN_VOLTAGE_SWING_600:
1160		return DP_TRAIN_PRE_EMPHASIS_6;
1161	case DP_TRAIN_VOLTAGE_SWING_800:
1162		return DP_TRAIN_PRE_EMPHASIS_3_5;
1163	case DP_TRAIN_VOLTAGE_SWING_1200:
1164	default:
1165		return DP_TRAIN_PRE_EMPHASIS_0;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1166	}
1167}
1168
1169static void
1170intel_get_adjust_train(struct intel_dp *intel_dp)
1171{
1172	uint8_t v = 0;
1173	uint8_t p = 0;
1174	int lane;
1175
1176	for (lane = 0; lane < intel_dp->lane_count; lane++) {
1177		uint8_t this_v = intel_get_adjust_request_voltage(intel_dp->link_status, lane);
1178		uint8_t this_p = intel_get_adjust_request_pre_emphasis(intel_dp->link_status, lane);
1179
1180		if (this_v > v)
1181			v = this_v;
1182		if (this_p > p)
1183			p = this_p;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1184	}
1185
1186	if (v >= I830_DP_VOLTAGE_MAX)
1187		v = I830_DP_VOLTAGE_MAX | DP_TRAIN_MAX_SWING_REACHED;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1188
1189	if (p >= intel_dp_pre_emphasis_max(v))
1190		p = intel_dp_pre_emphasis_max(v) | DP_TRAIN_MAX_PRE_EMPHASIS_REACHED;
1191
1192	for (lane = 0; lane < 4; lane++)
1193		intel_dp->train_set[lane] = v | p;
1194}
1195
1196static uint32_t
1197intel_dp_signal_levels(uint8_t train_set, int lane_count)
1198{
1199	uint32_t	signal_levels = 0;
1200
1201	switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
1202	case DP_TRAIN_VOLTAGE_SWING_400:
1203	default:
1204		signal_levels |= DP_VOLTAGE_0_4;
1205		break;
1206	case DP_TRAIN_VOLTAGE_SWING_600:
1207		signal_levels |= DP_VOLTAGE_0_6;
1208		break;
1209	case DP_TRAIN_VOLTAGE_SWING_800:
1210		signal_levels |= DP_VOLTAGE_0_8;
1211		break;
1212	case DP_TRAIN_VOLTAGE_SWING_1200:
1213		signal_levels |= DP_VOLTAGE_1_2;
1214		break;
1215	}
1216	switch (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) {
1217	case DP_TRAIN_PRE_EMPHASIS_0:
1218	default:
1219		signal_levels |= DP_PRE_EMPHASIS_0;
1220		break;
1221	case DP_TRAIN_PRE_EMPHASIS_3_5:
1222		signal_levels |= DP_PRE_EMPHASIS_3_5;
1223		break;
1224	case DP_TRAIN_PRE_EMPHASIS_6:
1225		signal_levels |= DP_PRE_EMPHASIS_6;
1226		break;
1227	case DP_TRAIN_PRE_EMPHASIS_9_5:
1228		signal_levels |= DP_PRE_EMPHASIS_9_5;
1229		break;
1230	}
1231	return signal_levels;
1232}
1233
1234/* Gen6's DP voltage swing and pre-emphasis control */
1235static uint32_t
1236intel_gen6_edp_signal_levels(uint8_t train_set)
1237{
1238	int signal_levels = train_set & (DP_TRAIN_VOLTAGE_SWING_MASK |
1239					 DP_TRAIN_PRE_EMPHASIS_MASK);
1240	switch (signal_levels) {
1241	case DP_TRAIN_VOLTAGE_SWING_400 | DP_TRAIN_PRE_EMPHASIS_0:
1242	case DP_TRAIN_VOLTAGE_SWING_600 | DP_TRAIN_PRE_EMPHASIS_0:
1243		return EDP_LINK_TRAIN_400_600MV_0DB_SNB_B;
1244	case DP_TRAIN_VOLTAGE_SWING_400 | DP_TRAIN_PRE_EMPHASIS_3_5:
1245		return EDP_LINK_TRAIN_400MV_3_5DB_SNB_B;
1246	case DP_TRAIN_VOLTAGE_SWING_400 | DP_TRAIN_PRE_EMPHASIS_6:
1247	case DP_TRAIN_VOLTAGE_SWING_600 | DP_TRAIN_PRE_EMPHASIS_6:
1248		return EDP_LINK_TRAIN_400_600MV_6DB_SNB_B;
1249	case DP_TRAIN_VOLTAGE_SWING_600 | DP_TRAIN_PRE_EMPHASIS_3_5:
1250	case DP_TRAIN_VOLTAGE_SWING_800 | DP_TRAIN_PRE_EMPHASIS_3_5:
1251		return EDP_LINK_TRAIN_600_800MV_3_5DB_SNB_B;
1252	case DP_TRAIN_VOLTAGE_SWING_800 | DP_TRAIN_PRE_EMPHASIS_0:
1253	case DP_TRAIN_VOLTAGE_SWING_1200 | DP_TRAIN_PRE_EMPHASIS_0:
1254		return EDP_LINK_TRAIN_800_1200MV_0DB_SNB_B;
1255	default:
1256		DRM_DEBUG_KMS("Unsupported voltage swing/pre-emphasis level:"
1257			      "0x%x\n", signal_levels);
1258		return EDP_LINK_TRAIN_400_600MV_0DB_SNB_B;
1259	}
1260}
1261
1262static uint8_t
1263intel_get_lane_status(uint8_t link_status[DP_LINK_STATUS_SIZE],
1264		      int lane)
1265{
1266	int i = DP_LANE0_1_STATUS + (lane >> 1);
1267	int s = (lane & 1) * 4;
1268	uint8_t l = intel_dp_link_status(link_status, i);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1269
1270	return (l >> s) & 0xf;
 
 
 
 
1271}
1272
1273/* Check for clock recovery is done on all channels */
1274static bool
1275intel_clock_recovery_ok(uint8_t link_status[DP_LINK_STATUS_SIZE], int lane_count)
1276{
1277	int lane;
1278	uint8_t lane_status;
1279
1280	for (lane = 0; lane < lane_count; lane++) {
1281		lane_status = intel_get_lane_status(link_status, lane);
1282		if ((lane_status & DP_LANE_CR_DONE) == 0)
1283			return false;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1284	}
1285	return true;
 
 
 
 
 
 
 
 
 
 
 
 
 
1286}
1287
1288/* Check to see if channel eq is done on all channels */
1289#define CHANNEL_EQ_BITS (DP_LANE_CR_DONE|\
1290			 DP_LANE_CHANNEL_EQ_DONE|\
1291			 DP_LANE_SYMBOL_LOCKED)
1292static bool
1293intel_channel_eq_ok(struct intel_dp *intel_dp)
1294{
1295	uint8_t lane_align;
1296	uint8_t lane_status;
1297	int lane;
1298
1299	lane_align = intel_dp_link_status(intel_dp->link_status,
1300					  DP_LANE_ALIGN_STATUS_UPDATED);
1301	if ((lane_align & DP_INTERLANE_ALIGN_DONE) == 0)
1302		return false;
1303	for (lane = 0; lane < intel_dp->lane_count; lane++) {
1304		lane_status = intel_get_lane_status(intel_dp->link_status, lane);
1305		if ((lane_status & CHANNEL_EQ_BITS) != CHANNEL_EQ_BITS)
1306			return false;
1307	}
1308	return true;
1309}
1310
1311static bool
1312intel_dp_set_link_train(struct intel_dp *intel_dp,
1313			uint32_t dp_reg_value,
1314			uint8_t dp_train_pat)
1315{
1316	struct drm_device *dev = intel_dp->base.base.dev;
1317	struct drm_i915_private *dev_priv = dev->dev_private;
1318	int ret;
 
1319
1320	I915_WRITE(intel_dp->output_reg, dp_reg_value);
1321	POSTING_READ(intel_dp->output_reg);
1322
1323	intel_dp_aux_native_write_1(intel_dp,
1324				    DP_TRAINING_PATTERN_SET,
1325				    dp_train_pat);
1326
1327	ret = intel_dp_aux_native_write(intel_dp,
1328					DP_TRAINING_LANE0_SET,
1329					intel_dp->train_set, 4);
1330	if (ret != 4)
1331		return false;
1332
1333	return true;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1334}
1335
1336/* Enable corresponding port and start training pattern 1 */
1337static void
1338intel_dp_start_link_train(struct intel_dp *intel_dp)
 
1339{
1340	struct drm_device *dev = intel_dp->base.base.dev;
1341	struct drm_i915_private *dev_priv = dev->dev_private;
1342	struct intel_crtc *intel_crtc = to_intel_crtc(intel_dp->base.base.crtc);
1343	int i;
1344	uint8_t voltage;
1345	bool clock_recovery = false;
1346	int tries;
1347	u32 reg;
1348	uint32_t DP = intel_dp->DP;
1349
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1350	/*
1351	 * On CPT we have to enable the port in training pattern 1, which
1352	 * will happen below in intel_dp_set_link_train.  Otherwise, enable
1353	 * the port and wait for it to become active.
1354	 */
1355	if (!HAS_PCH_CPT(dev)) {
1356		I915_WRITE(intel_dp->output_reg, intel_dp->DP);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1357		POSTING_READ(intel_dp->output_reg);
1358		intel_wait_for_vblank(dev, intel_crtc->pipe);
 
 
 
1359	}
1360
1361	/* Write the link configuration data */
1362	intel_dp_aux_native_write(intel_dp, DP_LINK_BW_SET,
1363				  intel_dp->link_configuration,
1364				  DP_LINK_CONFIGURATION_SIZE);
1365
1366	DP |= DP_PORT_EN;
1367	if (HAS_PCH_CPT(dev) && !is_edp(intel_dp))
1368		DP &= ~DP_LINK_TRAIN_MASK_CPT;
1369	else
1370		DP &= ~DP_LINK_TRAIN_MASK;
1371	memset(intel_dp->train_set, 0, 4);
1372	voltage = 0xff;
1373	tries = 0;
1374	clock_recovery = false;
1375	for (;;) {
1376		/* Use intel_dp->train_set[0] to set the voltage and pre emphasis values */
1377		uint32_t    signal_levels;
1378		if (IS_GEN6(dev) && is_edp(intel_dp)) {
1379			signal_levels = intel_gen6_edp_signal_levels(intel_dp->train_set[0]);
1380			DP = (DP & ~EDP_LINK_TRAIN_VOL_EMP_MASK_SNB) | signal_levels;
1381		} else {
1382			signal_levels = intel_dp_signal_levels(intel_dp->train_set[0], intel_dp->lane_count);
1383			DP = (DP & ~(DP_VOLTAGE_MASK|DP_PRE_EMPHASIS_MASK)) | signal_levels;
1384		}
1385
1386		if (HAS_PCH_CPT(dev) && !is_edp(intel_dp))
1387			reg = DP | DP_LINK_TRAIN_PAT_1_CPT;
1388		else
1389			reg = DP | DP_LINK_TRAIN_PAT_1;
 
 
1390
1391		if (!intel_dp_set_link_train(intel_dp, reg,
1392					     DP_TRAINING_PATTERN_1 |
1393					     DP_LINK_SCRAMBLING_DISABLE))
1394			break;
1395		/* Set training pattern 1 */
 
1396
1397		udelay(100);
1398		if (!intel_dp_get_link_status(intel_dp))
1399			break;
1400
1401		if (intel_clock_recovery_ok(intel_dp->link_status, intel_dp->lane_count)) {
1402			clock_recovery = true;
1403			break;
1404		}
1405
1406		/* Check to see if we've tried the max voltage */
1407		for (i = 0; i < intel_dp->lane_count; i++)
1408			if ((intel_dp->train_set[i] & DP_TRAIN_MAX_SWING_REACHED) == 0)
1409				break;
1410		if (i == intel_dp->lane_count)
1411			break;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1412
1413		/* Check to see if we've tried the same voltage 5 times */
1414		if ((intel_dp->train_set[0] & DP_TRAIN_VOLTAGE_SWING_MASK) == voltage) {
1415			++tries;
1416			if (tries == 5)
1417				break;
1418		} else
1419			tries = 0;
1420		voltage = intel_dp->train_set[0] & DP_TRAIN_VOLTAGE_SWING_MASK;
1421
1422		/* Compute new intel_dp->train_set as requested by target */
1423		intel_get_adjust_train(intel_dp);
 
 
 
 
 
 
 
1424	}
1425
1426	intel_dp->DP = DP;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1427}
1428
1429static void
1430intel_dp_complete_link_train(struct intel_dp *intel_dp)
1431{
1432	struct drm_device *dev = intel_dp->base.base.dev;
1433	struct drm_i915_private *dev_priv = dev->dev_private;
1434	bool channel_eq = false;
1435	int tries, cr_tries;
1436	u32 reg;
1437	uint32_t DP = intel_dp->DP;
1438
1439	/* channel equalization */
1440	tries = 0;
1441	cr_tries = 0;
1442	channel_eq = false;
1443	for (;;) {
1444		/* Use intel_dp->train_set[0] to set the voltage and pre emphasis values */
1445		uint32_t    signal_levels;
1446
1447		if (cr_tries > 5) {
1448			DRM_ERROR("failed to train DP, aborting\n");
1449			intel_dp_link_down(intel_dp);
1450			break;
1451		}
1452
1453		if (IS_GEN6(dev) && is_edp(intel_dp)) {
1454			signal_levels = intel_gen6_edp_signal_levels(intel_dp->train_set[0]);
1455			DP = (DP & ~EDP_LINK_TRAIN_VOL_EMP_MASK_SNB) | signal_levels;
1456		} else {
1457			signal_levels = intel_dp_signal_levels(intel_dp->train_set[0], intel_dp->lane_count);
1458			DP = (DP & ~(DP_VOLTAGE_MASK|DP_PRE_EMPHASIS_MASK)) | signal_levels;
1459		}
1460
1461		if (HAS_PCH_CPT(dev) && !is_edp(intel_dp))
1462			reg = DP | DP_LINK_TRAIN_PAT_2_CPT;
1463		else
1464			reg = DP | DP_LINK_TRAIN_PAT_2;
1465
1466		/* channel eq pattern */
1467		if (!intel_dp_set_link_train(intel_dp, reg,
1468					     DP_TRAINING_PATTERN_2 |
1469					     DP_LINK_SCRAMBLING_DISABLE))
1470			break;
 
 
 
 
 
1471
1472		udelay(400);
1473		if (!intel_dp_get_link_status(intel_dp))
1474			break;
 
 
1475
1476		/* Make sure clock is still ok */
1477		if (!intel_clock_recovery_ok(intel_dp->link_status, intel_dp->lane_count)) {
1478			intel_dp_start_link_train(intel_dp);
1479			cr_tries++;
1480			continue;
1481		}
1482
1483		if (intel_channel_eq_ok(intel_dp)) {
1484			channel_eq = true;
1485			break;
1486		}
1487
1488		/* Try 5 times, then try clock recovery if that fails */
1489		if (tries > 5) {
1490			intel_dp_link_down(intel_dp);
1491			intel_dp_start_link_train(intel_dp);
1492			tries = 0;
1493			cr_tries++;
1494			continue;
1495		}
 
 
1496
1497		/* Compute new intel_dp->train_set as requested by target */
1498		intel_get_adjust_train(intel_dp);
1499		++tries;
1500	}
1501
1502	if (HAS_PCH_CPT(dev) && !is_edp(intel_dp))
1503		reg = DP | DP_LINK_TRAIN_OFF_CPT;
1504	else
1505		reg = DP | DP_LINK_TRAIN_OFF;
 
1506
1507	I915_WRITE(intel_dp->output_reg, reg);
1508	POSTING_READ(intel_dp->output_reg);
1509	intel_dp_aux_native_write_1(intel_dp,
1510				    DP_TRAINING_PATTERN_SET, DP_TRAINING_PATTERN_DISABLE);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1511}
1512
1513static void
1514intel_dp_link_down(struct intel_dp *intel_dp)
1515{
1516	struct drm_device *dev = intel_dp->base.base.dev;
1517	struct drm_i915_private *dev_priv = dev->dev_private;
1518	uint32_t DP = intel_dp->DP;
 
 
 
1519
1520	if ((I915_READ(intel_dp->output_reg) & DP_PORT_EN) == 0)
1521		return;
 
1522
1523	DRM_DEBUG_KMS("\n");
 
1524
1525	if (is_edp(intel_dp)) {
1526		DP &= ~DP_PLL_ENABLE;
1527		I915_WRITE(intel_dp->output_reg, DP);
1528		POSTING_READ(intel_dp->output_reg);
1529		udelay(100);
 
 
 
 
 
 
 
 
1530	}
1531
1532	if (HAS_PCH_CPT(dev) && !is_edp(intel_dp)) {
1533		DP &= ~DP_LINK_TRAIN_MASK_CPT;
1534		I915_WRITE(intel_dp->output_reg, DP | DP_LINK_TRAIN_PAT_IDLE_CPT);
1535	} else {
1536		DP &= ~DP_LINK_TRAIN_MASK;
1537		I915_WRITE(intel_dp->output_reg, DP | DP_LINK_TRAIN_PAT_IDLE);
1538	}
1539	POSTING_READ(intel_dp->output_reg);
1540
1541	msleep(17);
 
 
 
1542
1543	if (is_edp(intel_dp))
1544		DP |= DP_LINK_TRAIN_OFF;
 
 
 
 
1545
1546	if (!HAS_PCH_CPT(dev) &&
1547	    I915_READ(intel_dp->output_reg) & DP_PIPEB_SELECT) {
1548		struct drm_crtc *crtc = intel_dp->base.base.crtc;
1549
1550		/* Hardware workaround: leaving our transcoder select
1551		 * set to transcoder B while it's off will prevent the
1552		 * corresponding HDMI output on transcoder A.
1553		 *
1554		 * Combine this with another hardware workaround:
1555		 * transcoder select bit can only be cleared while the
1556		 * port is enabled.
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1557		 */
1558		DP &= ~DP_PIPEB_SELECT;
1559		I915_WRITE(intel_dp->output_reg, DP);
 
 
 
 
 
 
1560
1561		/* Changes to enable or select take place the vblank
1562		 * after being written.
1563		 */
1564		if (crtc == NULL) {
1565			/* We can arrive here never having been attached
1566			 * to a CRTC, for instance, due to inheriting
1567			 * random state from the BIOS.
1568			 *
1569			 * If the pipe is not running, play safe and
1570			 * wait for the clocks to stabilise before
1571			 * continuing.
1572			 */
1573			POSTING_READ(intel_dp->output_reg);
1574			msleep(50);
1575		} else
1576			intel_wait_for_vblank(dev, to_intel_crtc(crtc)->pipe);
1577	}
1578
1579	I915_WRITE(intel_dp->output_reg, DP & ~DP_PORT_EN);
1580	POSTING_READ(intel_dp->output_reg);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1581}
1582
1583static bool
1584intel_dp_get_dpcd(struct intel_dp *intel_dp)
1585{
1586	if (intel_dp_aux_native_read_retry(intel_dp, 0x000, intel_dp->dpcd,
1587					   sizeof (intel_dp->dpcd)) &&
1588	    (intel_dp->dpcd[DP_DPCD_REV] != 0)) {
1589		return true;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1590	}
1591
1592	return false;
 
 
 
 
1593}
1594
1595/*
1596 * According to DP spec
1597 * 5.1.2:
1598 *  1. Read DPCD
1599 *  2. Configure link according to Receiver Capabilities
1600 *  3. Use Link Training from 2.5.3.3 and 3.5.1.3
1601 *  4. Check link status on receipt of hot-plug interrupt
 
 
 
 
 
1602 */
1603
1604static void
1605intel_dp_check_link_status(struct intel_dp *intel_dp)
1606{
1607	if (intel_dp->dpms_mode != DRM_MODE_DPMS_ON)
1608		return;
 
 
1609
1610	if (!intel_dp->base.base.crtc)
1611		return;
 
 
 
1612
1613	/* Try to read receiver status if the link appears to be up */
1614	if (!intel_dp_get_link_status(intel_dp)) {
1615		intel_dp_link_down(intel_dp);
1616		return;
 
 
 
 
 
 
 
1617	}
1618
1619	/* Now read the DPCD to see if it's actually running */
1620	if (!intel_dp_get_dpcd(intel_dp)) {
1621		intel_dp_link_down(intel_dp);
1622		return;
 
 
 
 
 
 
 
 
 
1623	}
1624
1625	if (!intel_channel_eq_ok(intel_dp)) {
1626		DRM_DEBUG_KMS("%s: channel EQ not ok, retraining\n",
1627			      drm_get_encoder_name(&intel_dp->base.base));
1628		intel_dp_start_link_train(intel_dp);
1629		intel_dp_complete_link_train(intel_dp);
 
 
 
1630	}
 
 
1631}
1632
 
1633static enum drm_connector_status
1634intel_dp_detect_dpcd(struct intel_dp *intel_dp)
1635{
1636	if (intel_dp_get_dpcd(intel_dp))
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1637		return connector_status_connected;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1638	return connector_status_disconnected;
1639}
1640
1641static enum drm_connector_status
1642ironlake_dp_detect(struct intel_dp *intel_dp)
1643{
 
1644	enum drm_connector_status status;
1645
1646	/* Can't disconnect eDP, but you can close the lid... */
1647	if (is_edp(intel_dp)) {
1648		status = intel_panel_detect(intel_dp->base.base.dev);
1649		if (status == connector_status_unknown)
1650			status = connector_status_connected;
1651		return status;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1652	}
1653
1654	return intel_dp_detect_dpcd(intel_dp);
1655}
1656
1657static enum drm_connector_status
1658g4x_dp_detect(struct intel_dp *intel_dp)
1659{
1660	struct drm_device *dev = intel_dp->base.base.dev;
1661	struct drm_i915_private *dev_priv = dev->dev_private;
1662	uint32_t temp, bit;
1663
1664	switch (intel_dp->output_reg) {
1665	case DP_B:
1666		bit = DPB_HOTPLUG_INT_STATUS;
1667		break;
1668	case DP_C:
1669		bit = DPC_HOTPLUG_INT_STATUS;
1670		break;
1671	case DP_D:
1672		bit = DPD_HOTPLUG_INT_STATUS;
1673		break;
1674	default:
1675		return connector_status_unknown;
 
1676	}
1677
1678	temp = I915_READ(PORT_HOTPLUG_STAT);
 
1679
1680	if ((temp & bit) == 0)
1681		return connector_status_disconnected;
 
 
1682
1683	return intel_dp_detect_dpcd(intel_dp);
 
 
 
 
 
 
 
 
 
 
 
1684}
1685
1686/**
1687 * Uses CRT_HOTPLUG_EN and CRT_HOTPLUG_STAT to detect DP connection.
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1688 *
1689 * \return true if DP port is connected.
1690 * \return false if DP port is disconnected.
1691 */
1692static enum drm_connector_status
1693intel_dp_detect(struct drm_connector *connector, bool force)
1694{
1695	struct intel_dp *intel_dp = intel_attached_dp(connector);
1696	struct drm_device *dev = intel_dp->base.base.dev;
1697	enum drm_connector_status status;
1698	struct edid *edid = NULL;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1699
1700	intel_dp->has_audio = false;
 
 
 
 
 
 
 
 
 
1701
1702	if (HAS_PCH_SPLIT(dev))
1703		status = ironlake_dp_detect(intel_dp);
 
 
 
 
 
 
 
1704	else
1705		status = g4x_dp_detect(intel_dp);
1706
1707	DRM_DEBUG_KMS("DPCD: %02hx%02hx%02hx%02hx%02hx%02hx%02hx%02hx\n",
1708		      intel_dp->dpcd[0], intel_dp->dpcd[1], intel_dp->dpcd[2],
1709		      intel_dp->dpcd[3], intel_dp->dpcd[4], intel_dp->dpcd[5],
1710		      intel_dp->dpcd[6], intel_dp->dpcd[7]);
1711
1712	if (status != connector_status_connected)
1713		return status;
1714
1715	if (intel_dp->force_audio) {
1716		intel_dp->has_audio = intel_dp->force_audio > 0;
1717	} else {
1718		edid = drm_get_edid(connector, &intel_dp->adapter);
1719		if (edid) {
1720			intel_dp->has_audio = drm_detect_monitor_audio(edid);
1721			connector->display_info.raw_edid = NULL;
1722			kfree(edid);
1723		}
 
 
1724	}
1725
1726	return connector_status_connected;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1727}
1728
1729static int intel_dp_get_modes(struct drm_connector *connector)
 
 
 
1730{
1731	struct intel_dp *intel_dp = intel_attached_dp(connector);
1732	struct drm_device *dev = intel_dp->base.base.dev;
1733	struct drm_i915_private *dev_priv = dev->dev_private;
1734	int ret;
1735
1736	/* We should parse the EDID data and find out if it has an audio sink
1737	 */
1738
1739	ret = intel_ddc_get_modes(connector, &intel_dp->adapter);
1740	if (ret) {
1741		if (is_edp(intel_dp) && !dev_priv->panel_fixed_mode) {
1742			struct drm_display_mode *newmode;
1743			list_for_each_entry(newmode, &connector->probed_modes,
1744					    head) {
1745				if (newmode->type & DRM_MODE_TYPE_PREFERRED) {
1746					dev_priv->panel_fixed_mode =
1747						drm_mode_duplicate(dev, newmode);
1748					break;
1749				}
1750			}
1751		}
1752
1753		return ret;
1754	}
1755
1756	/* if eDP has no EDID, try to use fixed panel mode from VBT */
1757	if (is_edp(intel_dp)) {
1758		if (dev_priv->panel_fixed_mode != NULL) {
1759			struct drm_display_mode *mode;
1760			mode = drm_mode_duplicate(dev, dev_priv->panel_fixed_mode);
1761			drm_mode_probed_add(connector, mode);
1762			return 1;
1763		}
1764	}
1765	return 0;
1766}
1767
1768static bool
1769intel_dp_detect_audio(struct drm_connector *connector)
1770{
1771	struct intel_dp *intel_dp = intel_attached_dp(connector);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1772	struct edid *edid;
1773	bool has_audio = false;
1774
1775	edid = drm_get_edid(connector, &intel_dp->adapter);
1776	if (edid) {
1777		has_audio = drm_detect_monitor_audio(edid);
 
 
 
1778
1779		connector->display_info.raw_edid = NULL;
1780		kfree(edid);
 
 
 
 
 
 
 
 
 
1781	}
1782
1783	return has_audio;
1784}
1785
1786static int
1787intel_dp_set_property(struct drm_connector *connector,
1788		      struct drm_property *property,
1789		      uint64_t val)
1790{
1791	struct drm_i915_private *dev_priv = connector->dev->dev_private;
1792	struct intel_dp *intel_dp = intel_attached_dp(connector);
1793	int ret;
1794
1795	ret = drm_connector_property_set_value(connector, property, val);
1796	if (ret)
1797		return ret;
1798
1799	if (property == dev_priv->force_audio_property) {
1800		int i = val;
1801		bool has_audio;
1802
1803		if (i == intel_dp->force_audio)
1804			return 0;
1805
1806		intel_dp->force_audio = i;
 
 
1807
1808		if (i == 0)
1809			has_audio = intel_dp_detect_audio(connector);
1810		else
1811			has_audio = i > 0;
 
 
1812
1813		if (has_audio == intel_dp->has_audio)
1814			return 0;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1815
1816		intel_dp->has_audio = has_audio;
1817		goto done;
 
 
 
 
 
 
 
 
 
 
 
 
 
1818	}
1819
1820	if (property == dev_priv->broadcast_rgb_property) {
1821		if (val == !!intel_dp->color_range)
1822			return 0;
1823
1824		intel_dp->color_range = val ? DP_COLOR_RANGE_16_235 : 0;
1825		goto done;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1826	}
1827
1828	return -EINVAL;
 
 
 
 
 
 
1829
1830done:
1831	if (intel_dp->base.base.crtc) {
1832		struct drm_crtc *crtc = intel_dp->base.base.crtc;
1833		drm_crtc_helper_set_mode(crtc, &crtc->mode,
1834					 crtc->x, crtc->y,
1835					 crtc->fb);
 
 
 
1836	}
1837
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1838	return 0;
1839}
1840
1841static void
1842intel_dp_destroy (struct drm_connector *connector)
1843{
1844	struct drm_device *dev = connector->dev;
 
 
 
 
 
 
 
 
 
 
 
 
 
1845
1846	if (intel_dpd_is_edp(dev))
1847		intel_panel_destroy_backlight(dev);
 
 
 
 
 
 
 
 
 
 
1848
1849	drm_sysfs_connector_remove(connector);
1850	drm_connector_cleanup(connector);
1851	kfree(connector);
1852}
1853
1854static void intel_dp_encoder_destroy(struct drm_encoder *encoder)
 
 
1855{
1856	struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
 
1857
1858	i2c_del_adapter(&intel_dp->adapter);
1859	drm_encoder_cleanup(encoder);
1860	kfree(intel_dp);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1861}
1862
1863static const struct drm_encoder_helper_funcs intel_dp_helper_funcs = {
1864	.dpms = intel_dp_dpms,
1865	.mode_fixup = intel_dp_mode_fixup,
1866	.prepare = intel_dp_prepare,
1867	.mode_set = intel_dp_mode_set,
1868	.commit = intel_dp_commit,
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1869};
1870
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1871static const struct drm_connector_funcs intel_dp_connector_funcs = {
1872	.dpms = drm_helper_connector_dpms,
1873	.detect = intel_dp_detect,
1874	.fill_modes = drm_helper_probe_single_connector_modes,
1875	.set_property = intel_dp_set_property,
1876	.destroy = intel_dp_destroy,
 
 
 
 
 
1877};
1878
1879static const struct drm_connector_helper_funcs intel_dp_connector_helper_funcs = {
 
1880	.get_modes = intel_dp_get_modes,
1881	.mode_valid = intel_dp_mode_valid,
1882	.best_encoder = intel_best_encoder,
1883};
1884
1885static const struct drm_encoder_funcs intel_dp_enc_funcs = {
 
1886	.destroy = intel_dp_encoder_destroy,
1887};
1888
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1889static void
1890intel_dp_hot_plug(struct intel_encoder *intel_encoder)
1891{
1892	struct intel_dp *intel_dp = container_of(intel_encoder, struct intel_dp, base);
 
 
 
 
 
 
 
 
 
1893
1894	intel_dp_check_link_status(intel_dp);
 
 
 
 
 
 
 
 
1895}
1896
1897/* Return which DP Port should be selected for Transcoder DP control */
1898int
1899intel_trans_dp_port_sel (struct drm_crtc *crtc)
1900{
1901	struct drm_device *dev = crtc->dev;
1902	struct drm_mode_config *mode_config = &dev->mode_config;
1903	struct drm_encoder *encoder;
 
1904
1905	list_for_each_entry(encoder, &mode_config->encoder_list, head) {
1906		struct intel_dp *intel_dp;
 
 
 
 
1907
1908		if (encoder->crtc != crtc)
1909			continue;
 
 
 
1910
1911		intel_dp = enc_to_intel_dp(encoder);
1912		if (intel_dp->base.type == INTEL_OUTPUT_DISPLAYPORT)
1913			return intel_dp->output_reg;
 
 
 
1914	}
1915
1916	return -1;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1917}
1918
1919/* check the VBT to see whether the eDP is on DP-D port */
1920bool intel_dpd_is_edp(struct drm_device *dev)
1921{
1922	struct drm_i915_private *dev_priv = dev->dev_private;
1923	struct child_device_config *p_child;
1924	int i;
 
1925
1926	if (!dev_priv->child_dev_num)
1927		return false;
 
 
 
1928
1929	for (i = 0; i < dev_priv->child_dev_num; i++) {
1930		p_child = dev_priv->child_dev + i;
1931
1932		if (p_child->dvo_port == PORT_IDPD &&
1933		    p_child->device_type == DEVICE_TYPE_eDP)
1934			return true;
 
 
1935	}
1936	return false;
1937}
1938
1939static void
1940intel_dp_add_properties(struct intel_dp *intel_dp, struct drm_connector *connector)
1941{
1942	intel_attach_force_audio_property(connector);
1943	intel_attach_broadcast_rgb_property(connector);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1944}
1945
1946void
1947intel_dp_init(struct drm_device *dev, int output_reg)
 
1948{
1949	struct drm_i915_private *dev_priv = dev->dev_private;
1950	struct drm_connector *connector;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1951	struct intel_dp *intel_dp;
1952	struct intel_encoder *intel_encoder;
1953	struct intel_connector *intel_connector;
1954	const char *name = NULL;
1955	int type;
1956
1957	intel_dp = kzalloc(sizeof(struct intel_dp), GFP_KERNEL);
 
 
 
1958	if (!intel_dp)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1959		return;
1960
1961	intel_dp->output_reg = output_reg;
1962	intel_dp->dpms_mode = -1;
1963
1964	intel_connector = kzalloc(sizeof(struct intel_connector), GFP_KERNEL);
1965	if (!intel_connector) {
1966		kfree(intel_dp);
1967		return;
1968	}
1969	intel_encoder = &intel_dp->base;
1970
1971	if (HAS_PCH_SPLIT(dev) && output_reg == PCH_DP_D)
1972		if (intel_dpd_is_edp(dev))
1973			intel_dp->is_pch_edp = true;
1974
1975	if (output_reg == DP_A || is_pch_edp(intel_dp)) {
1976		type = DRM_MODE_CONNECTOR_eDP;
1977		intel_encoder->type = INTEL_OUTPUT_EDP;
1978	} else {
1979		type = DRM_MODE_CONNECTOR_DisplayPort;
1980		intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1981	}
1982
1983	connector = &intel_connector->base;
1984	drm_connector_init(dev, connector, &intel_dp_connector_funcs, type);
1985	drm_connector_helper_add(connector, &intel_dp_connector_helper_funcs);
1986
1987	connector->polled = DRM_CONNECTOR_POLL_HPD;
 
1988
1989	if (output_reg == DP_B || output_reg == PCH_DP_B)
1990		intel_encoder->clone_mask = (1 << INTEL_DP_B_CLONE_BIT);
1991	else if (output_reg == DP_C || output_reg == PCH_DP_C)
1992		intel_encoder->clone_mask = (1 << INTEL_DP_C_CLONE_BIT);
1993	else if (output_reg == DP_D || output_reg == PCH_DP_D)
1994		intel_encoder->clone_mask = (1 << INTEL_DP_D_CLONE_BIT);
1995
1996	if (is_edp(intel_dp))
1997		intel_encoder->clone_mask = (1 << INTEL_EDP_CLONE_BIT);
 
 
 
 
 
 
 
1998
1999	intel_encoder->crtc_mask = (1 << 0) | (1 << 1);
2000	connector->interlace_allowed = true;
2001	connector->doublescan_allowed = 0;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2002
2003	drm_encoder_init(dev, &intel_encoder->base, &intel_dp_enc_funcs,
2004			 DRM_MODE_ENCODER_TMDS);
2005	drm_encoder_helper_add(&intel_encoder->base, &intel_dp_helper_funcs);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2006
2007	intel_connector_attach_encoder(intel_connector, intel_encoder);
2008	drm_sysfs_connector_add(connector);
2009
2010	/* Set up the DDC bus. */
2011	switch (output_reg) {
2012		case DP_A:
2013			name = "DPDDC-A";
2014			break;
2015		case DP_B:
2016		case PCH_DP_B:
2017			dev_priv->hotplug_supported_mask |=
2018				HDMIB_HOTPLUG_INT_STATUS;
2019			name = "DPDDC-B";
2020			break;
2021		case DP_C:
2022		case PCH_DP_C:
2023			dev_priv->hotplug_supported_mask |=
2024				HDMIC_HOTPLUG_INT_STATUS;
2025			name = "DPDDC-C";
2026			break;
2027		case DP_D:
2028		case PCH_DP_D:
2029			dev_priv->hotplug_supported_mask |=
2030				HDMID_HOTPLUG_INT_STATUS;
2031			name = "DPDDC-D";
2032			break;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2033	}
2034
2035	intel_dp_i2c_init(intel_dp, intel_connector, name);
2036
2037	/* Cache some DPCD data in the eDP case */
2038	if (is_edp(intel_dp)) {
2039		bool ret;
2040		u32 pp_on, pp_div;
2041
2042		pp_on = I915_READ(PCH_PP_ON_DELAYS);
2043		pp_div = I915_READ(PCH_PP_DIVISOR);
2044
2045		/* Get T3 & T12 values (note: VESA not bspec terminology) */
2046		dev_priv->panel_t3 = (pp_on & 0x1fff0000) >> 16;
2047		dev_priv->panel_t3 /= 10; /* t3 in 100us units */
2048		dev_priv->panel_t12 = pp_div & 0xf;
2049		dev_priv->panel_t12 *= 100; /* t12 in 100ms units */
2050
2051		ironlake_edp_panel_vdd_on(intel_dp);
2052		ret = intel_dp_get_dpcd(intel_dp);
2053		ironlake_edp_panel_vdd_off(intel_dp);
2054		if (ret) {
2055			if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11)
2056				dev_priv->no_aux_handshake =
2057					intel_dp->dpcd[DP_MAX_DOWNSPREAD] &
2058					DP_NO_AUX_HANDSHAKE_LINK_TRAINING;
2059		} else {
2060			/* if this fails, presume the device is a ghost */
2061			DRM_INFO("failed to retrieve link info, disabling eDP\n");
2062			intel_dp_encoder_destroy(&intel_dp->base.base);
2063			intel_dp_destroy(&intel_connector->base);
2064			return;
2065		}
 
 
2066	}
 
2067
2068	intel_encoder->hot_plug = intel_dp_hot_plug;
 
 
 
 
 
 
 
 
 
2069
2070	if (is_edp(intel_dp)) {
2071		/* initialize panel mode from VBT if available for eDP */
2072		if (dev_priv->lfp_lvds_vbt_mode) {
2073			dev_priv->panel_fixed_mode =
2074				drm_mode_duplicate(dev, dev_priv->lfp_lvds_vbt_mode);
2075			if (dev_priv->panel_fixed_mode) {
2076				dev_priv->panel_fixed_mode->type |=
2077					DRM_MODE_TYPE_PREFERRED;
2078			}
2079		}
2080		dev_priv->int_edp_connector = connector;
2081		intel_panel_setup_backlight(dev);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2082	}
2083
2084	intel_dp_add_properties(intel_dp, connector);
2085
 
 
 
 
 
 
2086	/* For G4X desktop chip, PEG_BAND_GAP_DATA 3:0 must first be written
2087	 * 0xd.  Failure to do so will result in spurious interrupts being
2088	 * generated on the port when a cable is not attached.
2089	 */
2090	if (IS_G4X(dev) && !IS_GM45(dev)) {
2091		u32 temp = I915_READ(PEG_BAND_GAP_DATA);
2092		I915_WRITE(PEG_BAND_GAP_DATA, (temp & ~0xf) | 0xd);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2093	}
2094}
v4.17
   1/*
   2 * Copyright © 2008 Intel Corporation
   3 *
   4 * Permission is hereby granted, free of charge, to any person obtaining a
   5 * copy of this software and associated documentation files (the "Software"),
   6 * to deal in the Software without restriction, including without limitation
   7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
   8 * and/or sell copies of the Software, and to permit persons to whom the
   9 * Software is furnished to do so, subject to the following conditions:
  10 *
  11 * The above copyright notice and this permission notice (including the next
  12 * paragraph) shall be included in all copies or substantial portions of the
  13 * Software.
  14 *
  15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
  18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
  19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
  20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
  21 * IN THE SOFTWARE.
  22 *
  23 * Authors:
  24 *    Keith Packard <keithp@keithp.com>
  25 *
  26 */
  27
  28#include <linux/i2c.h>
  29#include <linux/slab.h>
  30#include <linux/export.h>
  31#include <linux/types.h>
  32#include <linux/notifier.h>
  33#include <linux/reboot.h>
  34#include <asm/byteorder.h>
  35#include <drm/drmP.h>
  36#include <drm/drm_atomic_helper.h>
  37#include <drm/drm_crtc.h>
  38#include <drm/drm_crtc_helper.h>
  39#include <drm/drm_dp_helper.h>
  40#include <drm/drm_edid.h>
  41#include <drm/drm_hdcp.h>
  42#include "intel_drv.h"
  43#include <drm/i915_drm.h>
  44#include "i915_drv.h"
 
  45
 
 
  46#define DP_LINK_CHECK_TIMEOUT	(10 * 1000)
  47#define DP_DPRX_ESI_LEN 14
  48
  49/* Compliance test status bits  */
  50#define INTEL_DP_RESOLUTION_SHIFT_MASK	0
  51#define INTEL_DP_RESOLUTION_PREFERRED	(1 << INTEL_DP_RESOLUTION_SHIFT_MASK)
  52#define INTEL_DP_RESOLUTION_STANDARD	(2 << INTEL_DP_RESOLUTION_SHIFT_MASK)
  53#define INTEL_DP_RESOLUTION_FAILSAFE	(3 << INTEL_DP_RESOLUTION_SHIFT_MASK)
  54
  55struct dp_link_dpll {
  56	int clock;
  57	struct dpll dpll;
  58};
  59
  60static const struct dp_link_dpll gen4_dpll[] = {
  61	{ 162000,
  62		{ .p1 = 2, .p2 = 10, .n = 2, .m1 = 23, .m2 = 8 } },
  63	{ 270000,
  64		{ .p1 = 1, .p2 = 10, .n = 1, .m1 = 14, .m2 = 2 } }
  65};
  66
  67static const struct dp_link_dpll pch_dpll[] = {
  68	{ 162000,
  69		{ .p1 = 2, .p2 = 10, .n = 1, .m1 = 12, .m2 = 9 } },
  70	{ 270000,
  71		{ .p1 = 1, .p2 = 10, .n = 2, .m1 = 14, .m2 = 8 } }
  72};
  73
  74static const struct dp_link_dpll vlv_dpll[] = {
  75	{ 162000,
  76		{ .p1 = 3, .p2 = 2, .n = 5, .m1 = 3, .m2 = 81 } },
  77	{ 270000,
  78		{ .p1 = 2, .p2 = 2, .n = 1, .m1 = 2, .m2 = 27 } }
  79};
  80
  81/*
  82 * CHV supports eDP 1.4 that have  more link rates.
  83 * Below only provides the fixed rate but exclude variable rate.
  84 */
  85static const struct dp_link_dpll chv_dpll[] = {
  86	/*
  87	 * CHV requires to program fractional division for m2.
  88	 * m2 is stored in fixed point format using formula below
  89	 * (m2_int << 22) | m2_fraction
  90	 */
  91	{ 162000,	/* m2_int = 32, m2_fraction = 1677722 */
  92		{ .p1 = 4, .p2 = 2, .n = 1, .m1 = 2, .m2 = 0x819999a } },
  93	{ 270000,	/* m2_int = 27, m2_fraction = 0 */
  94		{ .p1 = 4, .p2 = 1, .n = 1, .m1 = 2, .m2 = 0x6c00000 } },
  95	{ 540000,	/* m2_int = 27, m2_fraction = 0 */
  96		{ .p1 = 2, .p2 = 1, .n = 1, .m1 = 2, .m2 = 0x6c00000 } }
  97};
  98
  99/**
 100 * intel_dp_is_edp - is the given port attached to an eDP panel (either CPU or PCH)
 101 * @intel_dp: DP struct
 102 *
 103 * If a CPU or PCH DP output is attached to an eDP panel, this function
 104 * will return true, and false otherwise.
 105 */
 106bool intel_dp_is_edp(struct intel_dp *intel_dp)
 107{
 108	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
 109
 110	return intel_dig_port->base.type == INTEL_OUTPUT_EDP;
 111}
 112
 113static struct drm_device *intel_dp_to_dev(struct intel_dp *intel_dp)
 
 
 
 
 
 
 
 
 114{
 115	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
 116
 117	return intel_dig_port->base.base.dev;
 118}
 119
 120static struct intel_dp *intel_attached_dp(struct drm_connector *connector)
 121{
 122	return enc_to_intel_dp(&intel_attached_encoder(connector)->base);
 123}
 124
 125static void intel_dp_link_down(struct intel_encoder *encoder,
 126			       const struct intel_crtc_state *old_crtc_state);
 127static bool edp_panel_vdd_on(struct intel_dp *intel_dp);
 128static void edp_panel_vdd_off(struct intel_dp *intel_dp, bool sync);
 129static void vlv_init_panel_power_sequencer(struct intel_encoder *encoder,
 130					   const struct intel_crtc_state *crtc_state);
 131static void vlv_steal_power_sequencer(struct drm_i915_private *dev_priv,
 132				      enum pipe pipe);
 133static void intel_dp_unset_edid(struct intel_dp *intel_dp);
 134
 135/* update sink rates from dpcd */
 136static void intel_dp_set_sink_rates(struct intel_dp *intel_dp)
 137{
 138	static const int dp_rates[] = {
 139		162000, 270000, 540000, 810000
 140	};
 141	int i, max_rate;
 142
 143	max_rate = drm_dp_bw_code_to_link_rate(intel_dp->dpcd[DP_MAX_LINK_RATE]);
 144
 145	for (i = 0; i < ARRAY_SIZE(dp_rates); i++) {
 146		if (dp_rates[i] > max_rate)
 147			break;
 148		intel_dp->sink_rates[i] = dp_rates[i];
 149	}
 150
 151	intel_dp->num_sink_rates = i;
 152}
 153
 154/* Get length of rates array potentially limited by max_rate. */
 155static int intel_dp_rate_limit_len(const int *rates, int len, int max_rate)
 156{
 157	int i;
 158
 159	/* Limit results by potentially reduced max rate */
 160	for (i = 0; i < len; i++) {
 161		if (rates[len - i - 1] <= max_rate)
 162			return len - i;
 163	}
 164
 165	return 0;
 166}
 167
 168/* Get length of common rates array potentially limited by max_rate. */
 169static int intel_dp_common_len_rate_limit(const struct intel_dp *intel_dp,
 170					  int max_rate)
 171{
 172	return intel_dp_rate_limit_len(intel_dp->common_rates,
 173				       intel_dp->num_common_rates, max_rate);
 174}
 175
 176/* Theoretical max between source and sink */
 177static int intel_dp_max_common_rate(struct intel_dp *intel_dp)
 
 
 
 
 
 
 178{
 179	return intel_dp->common_rates[intel_dp->num_common_rates - 1];
 180}
 181
 182/* Theoretical max between source and sink */
 183static int intel_dp_max_common_lane_count(struct intel_dp *intel_dp)
 184{
 185	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
 186	int source_max = intel_dig_port->max_lanes;
 187	int sink_max = drm_dp_max_lane_count(intel_dp->dpcd);
 188
 189	return min(source_max, sink_max);
 190}
 191
 192int intel_dp_max_lane_count(struct intel_dp *intel_dp)
 193{
 194	return intel_dp->max_link_lane_count;
 195}
 196
 197int
 198intel_dp_link_required(int pixel_clock, int bpp)
 199{
 200	/* pixel_clock is in kHz, divide bpp by 8 for bit to Byte conversion */
 201	return DIV_ROUND_UP(pixel_clock * bpp, 8);
 202}
 203
 204int
 205intel_dp_max_data_rate(int max_link_clock, int max_lanes)
 
 206{
 207	/* max_link_clock is the link symbol clock (LS_Clk) in kHz and not the
 208	 * link rate that is generally expressed in Gbps. Since, 8 bits of data
 209	 * is transmitted every LS_Clk per lane, there is no need to account for
 210	 * the channel encoding that is done in the PHY layer here.
 211	 */
 212
 213	return max_link_clock * max_lanes;
 
 
 
 
 214}
 215
 216static int
 217intel_dp_downstream_max_dotclock(struct intel_dp *intel_dp)
 218{
 219	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
 220	struct intel_encoder *encoder = &intel_dig_port->base;
 221	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
 222	int max_dotclk = dev_priv->max_dotclk_freq;
 223	int ds_max_dotclk;
 224
 225	int type = intel_dp->downstream_ports[0] & DP_DS_PORT_TYPE_MASK;
 226
 227	if (type != DP_DS_PORT_TYPE_VGA)
 228		return max_dotclk;
 229
 230	ds_max_dotclk = drm_dp_downstream_max_clock(intel_dp->dpcd,
 231						    intel_dp->downstream_ports);
 232
 233	if (ds_max_dotclk != 0)
 234		max_dotclk = min(max_dotclk, ds_max_dotclk);
 235
 236	return max_dotclk;
 237}
 238
 239static int cnl_max_source_rate(struct intel_dp *intel_dp)
 240{
 241	struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
 242	struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev);
 243	enum port port = dig_port->base.port;
 244
 245	u32 voltage = I915_READ(CNL_PORT_COMP_DW3) & VOLTAGE_INFO_MASK;
 246
 247	/* Low voltage SKUs are limited to max of 5.4G */
 248	if (voltage == VOLTAGE_INFO_0_85V)
 249		return 540000;
 250
 251	/* For this SKU 8.1G is supported in all ports */
 252	if (IS_CNL_WITH_PORT_F(dev_priv))
 253		return 810000;
 254
 255	/* For other SKUs, max rate on ports A and D is 5.4G */
 256	if (port == PORT_A || port == PORT_D)
 257		return 540000;
 258
 259	return 810000;
 260}
 261
 262static void
 263intel_dp_set_source_rates(struct intel_dp *intel_dp)
 264{
 265	/* The values must be in increasing order */
 266	static const int cnl_rates[] = {
 267		162000, 216000, 270000, 324000, 432000, 540000, 648000, 810000
 268	};
 269	static const int bxt_rates[] = {
 270		162000, 216000, 243000, 270000, 324000, 432000, 540000
 271	};
 272	static const int skl_rates[] = {
 273		162000, 216000, 270000, 324000, 432000, 540000
 274	};
 275	static const int hsw_rates[] = {
 276		162000, 270000, 540000
 277	};
 278	static const int g4x_rates[] = {
 279		162000, 270000
 280	};
 281	struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
 282	struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev);
 283	const struct ddi_vbt_port_info *info =
 284		&dev_priv->vbt.ddi_port_info[dig_port->base.port];
 285	const int *source_rates;
 286	int size, max_rate = 0, vbt_max_rate = info->dp_max_link_rate;
 287
 288	/* This should only be done once */
 289	WARN_ON(intel_dp->source_rates || intel_dp->num_source_rates);
 290
 291	if (IS_CANNONLAKE(dev_priv)) {
 292		source_rates = cnl_rates;
 293		size = ARRAY_SIZE(cnl_rates);
 294		max_rate = cnl_max_source_rate(intel_dp);
 295	} else if (IS_GEN9_LP(dev_priv)) {
 296		source_rates = bxt_rates;
 297		size = ARRAY_SIZE(bxt_rates);
 298	} else if (IS_GEN9_BC(dev_priv)) {
 299		source_rates = skl_rates;
 300		size = ARRAY_SIZE(skl_rates);
 301	} else if ((IS_HASWELL(dev_priv) && !IS_HSW_ULX(dev_priv)) ||
 302		   IS_BROADWELL(dev_priv)) {
 303		source_rates = hsw_rates;
 304		size = ARRAY_SIZE(hsw_rates);
 305	} else {
 306		source_rates = g4x_rates;
 307		size = ARRAY_SIZE(g4x_rates);
 308	}
 309
 310	if (max_rate && vbt_max_rate)
 311		max_rate = min(max_rate, vbt_max_rate);
 312	else if (vbt_max_rate)
 313		max_rate = vbt_max_rate;
 314
 315	if (max_rate)
 316		size = intel_dp_rate_limit_len(source_rates, size, max_rate);
 317
 318	intel_dp->source_rates = source_rates;
 319	intel_dp->num_source_rates = size;
 320}
 321
 322static int intersect_rates(const int *source_rates, int source_len,
 323			   const int *sink_rates, int sink_len,
 324			   int *common_rates)
 325{
 326	int i = 0, j = 0, k = 0;
 327
 328	while (i < source_len && j < sink_len) {
 329		if (source_rates[i] == sink_rates[j]) {
 330			if (WARN_ON(k >= DP_MAX_SUPPORTED_RATES))
 331				return k;
 332			common_rates[k] = source_rates[i];
 333			++k;
 334			++i;
 335			++j;
 336		} else if (source_rates[i] < sink_rates[j]) {
 337			++i;
 338		} else {
 339			++j;
 340		}
 341	}
 342	return k;
 343}
 344
 345/* return index of rate in rates array, or -1 if not found */
 346static int intel_dp_rate_index(const int *rates, int len, int rate)
 347{
 348	int i;
 349
 350	for (i = 0; i < len; i++)
 351		if (rate == rates[i])
 352			return i;
 353
 354	return -1;
 
 
 
 
 355}
 356
 357static void intel_dp_set_common_rates(struct intel_dp *intel_dp)
 
 358{
 359	WARN_ON(!intel_dp->num_source_rates || !intel_dp->num_sink_rates);
 360
 361	intel_dp->num_common_rates = intersect_rates(intel_dp->source_rates,
 362						     intel_dp->num_source_rates,
 363						     intel_dp->sink_rates,
 364						     intel_dp->num_sink_rates,
 365						     intel_dp->common_rates);
 366
 367	/* Paranoia, there should always be something in common. */
 368	if (WARN_ON(intel_dp->num_common_rates == 0)) {
 369		intel_dp->common_rates[0] = 162000;
 370		intel_dp->num_common_rates = 1;
 371	}
 372}
 373
 374static bool intel_dp_link_params_valid(struct intel_dp *intel_dp, int link_rate,
 375				       uint8_t lane_count)
 
 376{
 377	/*
 378	 * FIXME: we need to synchronize the current link parameters with
 379	 * hardware readout. Currently fast link training doesn't work on
 380	 * boot-up.
 381	 */
 382	if (link_rate == 0 ||
 383	    link_rate > intel_dp->max_link_rate)
 384		return false;
 385
 386	if (lane_count == 0 ||
 387	    lane_count > intel_dp_max_lane_count(intel_dp))
 388		return false;
 389
 390	return true;
 391}
 392
 393int intel_dp_get_link_train_fallback_values(struct intel_dp *intel_dp,
 394					    int link_rate, uint8_t lane_count)
 395{
 396	int index;
 397
 398	index = intel_dp_rate_index(intel_dp->common_rates,
 399				    intel_dp->num_common_rates,
 400				    link_rate);
 401	if (index > 0) {
 402		intel_dp->max_link_rate = intel_dp->common_rates[index - 1];
 403		intel_dp->max_link_lane_count = lane_count;
 404	} else if (lane_count > 1) {
 405		intel_dp->max_link_rate = intel_dp_max_common_rate(intel_dp);
 406		intel_dp->max_link_lane_count = lane_count >> 1;
 407	} else {
 408		DRM_ERROR("Link Training Unsuccessful\n");
 409		return -1;
 410	}
 411
 412	return 0;
 413}
 414
 415static enum drm_mode_status
 416intel_dp_mode_valid(struct drm_connector *connector,
 417		    struct drm_display_mode *mode)
 418{
 419	struct intel_dp *intel_dp = intel_attached_dp(connector);
 420	struct intel_connector *intel_connector = to_intel_connector(connector);
 421	struct drm_display_mode *fixed_mode = intel_connector->panel.fixed_mode;
 422	int target_clock = mode->clock;
 423	int max_rate, mode_rate, max_lanes, max_link_clock;
 424	int max_dotclk;
 425
 426	max_dotclk = intel_dp_downstream_max_dotclock(intel_dp);
 427
 428	if (intel_dp_is_edp(intel_dp) && fixed_mode) {
 429		if (mode->hdisplay > fixed_mode->hdisplay)
 430			return MODE_PANEL;
 431
 432		if (mode->vdisplay > fixed_mode->vdisplay)
 433			return MODE_PANEL;
 434
 435		target_clock = fixed_mode->clock;
 436	}
 437
 438	max_link_clock = intel_dp_max_link_rate(intel_dp);
 439	max_lanes = intel_dp_max_lane_count(intel_dp);
 440
 441	max_rate = intel_dp_max_data_rate(max_link_clock, max_lanes);
 442	mode_rate = intel_dp_link_required(target_clock, 18);
 443
 444	if (mode_rate > max_rate || target_clock > max_dotclk)
 445		return MODE_CLOCK_HIGH;
 446
 447	if (mode->clock < 10000)
 448		return MODE_CLOCK_LOW;
 449
 450	if (mode->flags & DRM_MODE_FLAG_DBLCLK)
 451		return MODE_H_ILLEGAL;
 452
 453	return MODE_OK;
 454}
 455
 456uint32_t intel_dp_pack_aux(const uint8_t *src, int src_bytes)
 
 457{
 458	int	i;
 459	uint32_t v = 0;
 460
 461	if (src_bytes > 4)
 462		src_bytes = 4;
 463	for (i = 0; i < src_bytes; i++)
 464		v |= ((uint32_t) src[i]) << ((3-i) * 8);
 465	return v;
 466}
 467
 468static void intel_dp_unpack_aux(uint32_t src, uint8_t *dst, int dst_bytes)
 
 469{
 470	int i;
 471	if (dst_bytes > 4)
 472		dst_bytes = 4;
 473	for (i = 0; i < dst_bytes; i++)
 474		dst[i] = src >> ((3-i) * 8);
 475}
 476
 477static void
 478intel_dp_init_panel_power_sequencer(struct intel_dp *intel_dp);
 479static void
 480intel_dp_init_panel_power_sequencer_registers(struct intel_dp *intel_dp,
 481					      bool force_disable_vdd);
 482static void
 483intel_dp_pps_init(struct intel_dp *intel_dp);
 484
 485static void pps_lock(struct intel_dp *intel_dp)
 486{
 487	struct drm_i915_private *dev_priv = to_i915(intel_dp_to_dev(intel_dp));
 
 488
 489	/*
 490	 * See intel_power_sequencer_reset() why we need
 491	 * a power domain reference here.
 492	 */
 493	intel_display_power_get(dev_priv, intel_dp->aux_power_domain);
 494
 495	mutex_lock(&dev_priv->pps_mutex);
 496}
 497
 498static void pps_unlock(struct intel_dp *intel_dp)
 499{
 500	struct drm_i915_private *dev_priv = to_i915(intel_dp_to_dev(intel_dp));
 501
 502	mutex_unlock(&dev_priv->pps_mutex);
 503
 504	intel_display_power_put(dev_priv, intel_dp->aux_power_domain);
 505}
 506
 507static void
 508vlv_power_sequencer_kick(struct intel_dp *intel_dp)
 509{
 510	struct drm_i915_private *dev_priv = to_i915(intel_dp_to_dev(intel_dp));
 511	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
 512	enum pipe pipe = intel_dp->pps_pipe;
 513	bool pll_enabled, release_cl_override = false;
 514	enum dpio_phy phy = DPIO_PHY(pipe);
 515	enum dpio_channel ch = vlv_pipe_to_channel(pipe);
 516	uint32_t DP;
 517
 518	if (WARN(I915_READ(intel_dp->output_reg) & DP_PORT_EN,
 519		 "skipping pipe %c power seqeuncer kick due to port %c being active\n",
 520		 pipe_name(pipe), port_name(intel_dig_port->base.port)))
 521		return;
 522
 523	DRM_DEBUG_KMS("kicking pipe %c power sequencer for port %c\n",
 524		      pipe_name(pipe), port_name(intel_dig_port->base.port));
 525
 526	/* Preserve the BIOS-computed detected bit. This is
 527	 * supposed to be read-only.
 528	 */
 529	DP = I915_READ(intel_dp->output_reg) & DP_DETECTED;
 530	DP |= DP_VOLTAGE_0_4 | DP_PRE_EMPHASIS_0;
 531	DP |= DP_PORT_WIDTH(1);
 532	DP |= DP_LINK_TRAIN_PAT_1;
 533
 534	if (IS_CHERRYVIEW(dev_priv))
 535		DP |= DP_PIPE_SELECT_CHV(pipe);
 536	else if (pipe == PIPE_B)
 537		DP |= DP_PIPEB_SELECT;
 538
 539	pll_enabled = I915_READ(DPLL(pipe)) & DPLL_VCO_ENABLE;
 540
 541	/*
 542	 * The DPLL for the pipe must be enabled for this to work.
 543	 * So enable temporarily it if it's not already enabled.
 544	 */
 545	if (!pll_enabled) {
 546		release_cl_override = IS_CHERRYVIEW(dev_priv) &&
 547			!chv_phy_powergate_ch(dev_priv, phy, ch, true);
 548
 549		if (vlv_force_pll_on(dev_priv, pipe, IS_CHERRYVIEW(dev_priv) ?
 550				     &chv_dpll[0].dpll : &vlv_dpll[0].dpll)) {
 551			DRM_ERROR("Failed to force on pll for pipe %c!\n",
 552				  pipe_name(pipe));
 553			return;
 554		}
 555	}
 556
 557	/*
 558	 * Similar magic as in intel_dp_enable_port().
 559	 * We _must_ do this port enable + disable trick
 560	 * to make this power seqeuencer lock onto the port.
 561	 * Otherwise even VDD force bit won't work.
 562	 */
 563	I915_WRITE(intel_dp->output_reg, DP);
 564	POSTING_READ(intel_dp->output_reg);
 565
 566	I915_WRITE(intel_dp->output_reg, DP | DP_PORT_EN);
 567	POSTING_READ(intel_dp->output_reg);
 568
 569	I915_WRITE(intel_dp->output_reg, DP & ~DP_PORT_EN);
 570	POSTING_READ(intel_dp->output_reg);
 571
 572	if (!pll_enabled) {
 573		vlv_force_pll_off(dev_priv, pipe);
 574
 575		if (release_cl_override)
 576			chv_phy_powergate_ch(dev_priv, phy, ch, false);
 577	}
 578}
 579
 580static enum pipe vlv_find_free_pps(struct drm_i915_private *dev_priv)
 581{
 582	struct intel_encoder *encoder;
 583	unsigned int pipes = (1 << PIPE_A) | (1 << PIPE_B);
 584
 585	/*
 586	 * We don't have power sequencer currently.
 587	 * Pick one that's not used by other ports.
 588	 */
 589	for_each_intel_encoder(&dev_priv->drm, encoder) {
 590		struct intel_dp *intel_dp;
 591
 592		if (encoder->type != INTEL_OUTPUT_DP &&
 593		    encoder->type != INTEL_OUTPUT_EDP)
 594			continue;
 595
 596		intel_dp = enc_to_intel_dp(&encoder->base);
 597
 598		if (encoder->type == INTEL_OUTPUT_EDP) {
 599			WARN_ON(intel_dp->active_pipe != INVALID_PIPE &&
 600				intel_dp->active_pipe != intel_dp->pps_pipe);
 601
 602			if (intel_dp->pps_pipe != INVALID_PIPE)
 603				pipes &= ~(1 << intel_dp->pps_pipe);
 604		} else {
 605			WARN_ON(intel_dp->pps_pipe != INVALID_PIPE);
 606
 607			if (intel_dp->active_pipe != INVALID_PIPE)
 608				pipes &= ~(1 << intel_dp->active_pipe);
 609		}
 610	}
 611
 612	if (pipes == 0)
 613		return INVALID_PIPE;
 614
 615	return ffs(pipes) - 1;
 616}
 617
 618static enum pipe
 619vlv_power_sequencer_pipe(struct intel_dp *intel_dp)
 620{
 621	struct drm_i915_private *dev_priv = to_i915(intel_dp_to_dev(intel_dp));
 622	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
 623	enum pipe pipe;
 624
 625	lockdep_assert_held(&dev_priv->pps_mutex);
 626
 627	/* We should never land here with regular DP ports */
 628	WARN_ON(!intel_dp_is_edp(intel_dp));
 629
 630	WARN_ON(intel_dp->active_pipe != INVALID_PIPE &&
 631		intel_dp->active_pipe != intel_dp->pps_pipe);
 632
 633	if (intel_dp->pps_pipe != INVALID_PIPE)
 634		return intel_dp->pps_pipe;
 635
 636	pipe = vlv_find_free_pps(dev_priv);
 637
 638	/*
 639	 * Didn't find one. This should not happen since there
 640	 * are two power sequencers and up to two eDP ports.
 641	 */
 642	if (WARN_ON(pipe == INVALID_PIPE))
 643		pipe = PIPE_A;
 644
 645	vlv_steal_power_sequencer(dev_priv, pipe);
 646	intel_dp->pps_pipe = pipe;
 647
 648	DRM_DEBUG_KMS("picked pipe %c power sequencer for port %c\n",
 649		      pipe_name(intel_dp->pps_pipe),
 650		      port_name(intel_dig_port->base.port));
 651
 652	/* init power sequencer on this pipe and port */
 653	intel_dp_init_panel_power_sequencer(intel_dp);
 654	intel_dp_init_panel_power_sequencer_registers(intel_dp, true);
 655
 656	/*
 657	 * Even vdd force doesn't work until we've made
 658	 * the power sequencer lock in on the port.
 659	 */
 660	vlv_power_sequencer_kick(intel_dp);
 661
 662	return intel_dp->pps_pipe;
 663}
 664
 665static int
 666bxt_power_sequencer_idx(struct intel_dp *intel_dp)
 667{
 668	struct drm_i915_private *dev_priv = to_i915(intel_dp_to_dev(intel_dp));
 669	int backlight_controller = dev_priv->vbt.backlight.controller;
 
 
 
 
 
 
 
 
 
 
 670
 671	lockdep_assert_held(&dev_priv->pps_mutex);
 672
 673	/* We should never land here with regular DP ports */
 674	WARN_ON(!intel_dp_is_edp(intel_dp));
 675
 676	if (!intel_dp->pps_reset)
 677		return backlight_controller;
 678
 679	intel_dp->pps_reset = false;
 680
 681	/*
 682	 * Only the HW needs to be reprogrammed, the SW state is fixed and
 683	 * has been setup during connector init.
 684	 */
 685	intel_dp_init_panel_power_sequencer_registers(intel_dp, false);
 686
 687	return backlight_controller;
 688}
 689
 690typedef bool (*vlv_pipe_check)(struct drm_i915_private *dev_priv,
 691			       enum pipe pipe);
 692
 693static bool vlv_pipe_has_pp_on(struct drm_i915_private *dev_priv,
 694			       enum pipe pipe)
 695{
 696	return I915_READ(PP_STATUS(pipe)) & PP_ON;
 697}
 698
 699static bool vlv_pipe_has_vdd_on(struct drm_i915_private *dev_priv,
 700				enum pipe pipe)
 701{
 702	return I915_READ(PP_CONTROL(pipe)) & EDP_FORCE_VDD;
 703}
 704
 705static bool vlv_pipe_any(struct drm_i915_private *dev_priv,
 706			 enum pipe pipe)
 707{
 708	return true;
 709}
 710
 711static enum pipe
 712vlv_initial_pps_pipe(struct drm_i915_private *dev_priv,
 713		     enum port port,
 714		     vlv_pipe_check pipe_check)
 715{
 716	enum pipe pipe;
 717
 718	for (pipe = PIPE_A; pipe <= PIPE_B; pipe++) {
 719		u32 port_sel = I915_READ(PP_ON_DELAYS(pipe)) &
 720			PANEL_PORT_SELECT_MASK;
 721
 722		if (port_sel != PANEL_PORT_SELECT_VLV(port))
 723			continue;
 724
 725		if (!pipe_check(dev_priv, pipe))
 726			continue;
 727
 728		return pipe;
 729	}
 730
 731	return INVALID_PIPE;
 732}
 733
 734static void
 735vlv_initial_power_sequencer_setup(struct intel_dp *intel_dp)
 736{
 737	struct drm_i915_private *dev_priv = to_i915(intel_dp_to_dev(intel_dp));
 738	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
 739	enum port port = intel_dig_port->base.port;
 740
 741	lockdep_assert_held(&dev_priv->pps_mutex);
 742
 743	/* try to find a pipe with this port selected */
 744	/* first pick one where the panel is on */
 745	intel_dp->pps_pipe = vlv_initial_pps_pipe(dev_priv, port,
 746						  vlv_pipe_has_pp_on);
 747	/* didn't find one? pick one where vdd is on */
 748	if (intel_dp->pps_pipe == INVALID_PIPE)
 749		intel_dp->pps_pipe = vlv_initial_pps_pipe(dev_priv, port,
 750							  vlv_pipe_has_vdd_on);
 751	/* didn't find one? pick one with just the correct port */
 752	if (intel_dp->pps_pipe == INVALID_PIPE)
 753		intel_dp->pps_pipe = vlv_initial_pps_pipe(dev_priv, port,
 754							  vlv_pipe_any);
 755
 756	/* didn't find one? just let vlv_power_sequencer_pipe() pick one when needed */
 757	if (intel_dp->pps_pipe == INVALID_PIPE) {
 758		DRM_DEBUG_KMS("no initial power sequencer for port %c\n",
 759			      port_name(port));
 760		return;
 761	}
 762
 763	DRM_DEBUG_KMS("initial power sequencer for port %c: pipe %c\n",
 764		      port_name(port), pipe_name(intel_dp->pps_pipe));
 765
 766	intel_dp_init_panel_power_sequencer(intel_dp);
 767	intel_dp_init_panel_power_sequencer_registers(intel_dp, false);
 768}
 769
 770void intel_power_sequencer_reset(struct drm_i915_private *dev_priv)
 771{
 772	struct intel_encoder *encoder;
 773
 774	if (WARN_ON(!IS_VALLEYVIEW(dev_priv) && !IS_CHERRYVIEW(dev_priv) &&
 775		    !IS_GEN9_LP(dev_priv)))
 776		return;
 777
 778	/*
 779	 * We can't grab pps_mutex here due to deadlock with power_domain
 780	 * mutex when power_domain functions are called while holding pps_mutex.
 781	 * That also means that in order to use pps_pipe the code needs to
 782	 * hold both a power domain reference and pps_mutex, and the power domain
 783	 * reference get/put must be done while _not_ holding pps_mutex.
 784	 * pps_{lock,unlock}() do these steps in the correct order, so one
 785	 * should use them always.
 786	 */
 787
 788	for_each_intel_encoder(&dev_priv->drm, encoder) {
 789		struct intel_dp *intel_dp;
 790
 791		if (encoder->type != INTEL_OUTPUT_DP &&
 792		    encoder->type != INTEL_OUTPUT_EDP &&
 793		    encoder->type != INTEL_OUTPUT_DDI)
 794			continue;
 795
 796		intel_dp = enc_to_intel_dp(&encoder->base);
 797
 798		/* Skip pure DVI/HDMI DDI encoders */
 799		if (!i915_mmio_reg_valid(intel_dp->output_reg))
 800			continue;
 801
 802		WARN_ON(intel_dp->active_pipe != INVALID_PIPE);
 803
 804		if (encoder->type != INTEL_OUTPUT_EDP)
 805			continue;
 806
 807		if (IS_GEN9_LP(dev_priv))
 808			intel_dp->pps_reset = true;
 809		else
 810			intel_dp->pps_pipe = INVALID_PIPE;
 811	}
 812}
 813
 814struct pps_registers {
 815	i915_reg_t pp_ctrl;
 816	i915_reg_t pp_stat;
 817	i915_reg_t pp_on;
 818	i915_reg_t pp_off;
 819	i915_reg_t pp_div;
 820};
 821
 822static void intel_pps_get_registers(struct intel_dp *intel_dp,
 823				    struct pps_registers *regs)
 824{
 825	struct drm_i915_private *dev_priv = to_i915(intel_dp_to_dev(intel_dp));
 826	int pps_idx = 0;
 827
 828	memset(regs, 0, sizeof(*regs));
 829
 830	if (IS_GEN9_LP(dev_priv))
 831		pps_idx = bxt_power_sequencer_idx(intel_dp);
 832	else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
 833		pps_idx = vlv_power_sequencer_pipe(intel_dp);
 834
 835	regs->pp_ctrl = PP_CONTROL(pps_idx);
 836	regs->pp_stat = PP_STATUS(pps_idx);
 837	regs->pp_on = PP_ON_DELAYS(pps_idx);
 838	regs->pp_off = PP_OFF_DELAYS(pps_idx);
 839	if (!IS_GEN9_LP(dev_priv) && !HAS_PCH_CNP(dev_priv) &&
 840	    !HAS_PCH_ICP(dev_priv))
 841		regs->pp_div = PP_DIVISOR(pps_idx);
 842}
 843
 844static i915_reg_t
 845_pp_ctrl_reg(struct intel_dp *intel_dp)
 846{
 847	struct pps_registers regs;
 848
 849	intel_pps_get_registers(intel_dp, &regs);
 850
 851	return regs.pp_ctrl;
 852}
 853
 854static i915_reg_t
 855_pp_stat_reg(struct intel_dp *intel_dp)
 856{
 857	struct pps_registers regs;
 858
 859	intel_pps_get_registers(intel_dp, &regs);
 860
 861	return regs.pp_stat;
 862}
 863
 864/* Reboot notifier handler to shutdown panel power to guarantee T12 timing
 865   This function only applicable when panel PM state is not to be tracked */
 866static int edp_notify_handler(struct notifier_block *this, unsigned long code,
 867			      void *unused)
 868{
 869	struct intel_dp *intel_dp = container_of(this, typeof(* intel_dp),
 870						 edp_notifier);
 871	struct drm_i915_private *dev_priv = to_i915(intel_dp_to_dev(intel_dp));
 872
 873	if (!intel_dp_is_edp(intel_dp) || code != SYS_RESTART)
 874		return 0;
 875
 876	pps_lock(intel_dp);
 877
 878	if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
 879		enum pipe pipe = vlv_power_sequencer_pipe(intel_dp);
 880		i915_reg_t pp_ctrl_reg, pp_div_reg;
 881		u32 pp_div;
 882
 883		pp_ctrl_reg = PP_CONTROL(pipe);
 884		pp_div_reg  = PP_DIVISOR(pipe);
 885		pp_div = I915_READ(pp_div_reg);
 886		pp_div &= PP_REFERENCE_DIVIDER_MASK;
 887
 888		/* 0x1F write to PP_DIV_REG sets max cycle delay */
 889		I915_WRITE(pp_div_reg, pp_div | 0x1F);
 890		I915_WRITE(pp_ctrl_reg, PANEL_UNLOCK_REGS | PANEL_POWER_OFF);
 891		msleep(intel_dp->panel_power_cycle_delay);
 892	}
 893
 894	pps_unlock(intel_dp);
 895
 896	return 0;
 897}
 898
 899static bool edp_have_panel_power(struct intel_dp *intel_dp)
 900{
 901	struct drm_i915_private *dev_priv = to_i915(intel_dp_to_dev(intel_dp));
 902
 903	lockdep_assert_held(&dev_priv->pps_mutex);
 904
 905	if ((IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) &&
 906	    intel_dp->pps_pipe == INVALID_PIPE)
 907		return false;
 908
 909	return (I915_READ(_pp_stat_reg(intel_dp)) & PP_ON) != 0;
 910}
 911
 912static bool edp_have_panel_vdd(struct intel_dp *intel_dp)
 913{
 914	struct drm_i915_private *dev_priv = to_i915(intel_dp_to_dev(intel_dp));
 915
 916	lockdep_assert_held(&dev_priv->pps_mutex);
 917
 918	if ((IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) &&
 919	    intel_dp->pps_pipe == INVALID_PIPE)
 920		return false;
 921
 922	return I915_READ(_pp_ctrl_reg(intel_dp)) & EDP_FORCE_VDD;
 923}
 924
 925static void
 926intel_dp_check_edp(struct intel_dp *intel_dp)
 927{
 928	struct drm_i915_private *dev_priv = to_i915(intel_dp_to_dev(intel_dp));
 929
 930	if (!intel_dp_is_edp(intel_dp))
 931		return;
 932
 933	if (!edp_have_panel_power(intel_dp) && !edp_have_panel_vdd(intel_dp)) {
 934		WARN(1, "eDP powered off while attempting aux channel communication.\n");
 935		DRM_DEBUG_KMS("Status 0x%08x Control 0x%08x\n",
 936			      I915_READ(_pp_stat_reg(intel_dp)),
 937			      I915_READ(_pp_ctrl_reg(intel_dp)));
 938	}
 939}
 940
 941static uint32_t
 942intel_dp_aux_wait_done(struct intel_dp *intel_dp, bool has_aux_irq)
 943{
 944	struct drm_i915_private *dev_priv = to_i915(intel_dp_to_dev(intel_dp));
 945	i915_reg_t ch_ctl = intel_dp->aux_ch_ctl_reg(intel_dp);
 946	uint32_t status;
 947	bool done;
 948
 949#define C (((status = I915_READ_NOTRACE(ch_ctl)) & DP_AUX_CH_CTL_SEND_BUSY) == 0)
 950	if (has_aux_irq)
 951		done = wait_event_timeout(dev_priv->gmbus_wait_queue, C,
 952					  msecs_to_jiffies_timeout(10));
 953	else
 954		done = wait_for(C, 10) == 0;
 955	if (!done)
 956		DRM_ERROR("dp aux hw did not signal timeout (has irq: %i)!\n",
 957			  has_aux_irq);
 958#undef C
 959
 960	return status;
 961}
 962
 963static uint32_t g4x_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
 964{
 965	struct drm_i915_private *dev_priv = to_i915(intel_dp_to_dev(intel_dp));
 966
 967	if (index)
 968		return 0;
 969
 970	/*
 971	 * The clock divider is based off the hrawclk, and would like to run at
 972	 * 2MHz.  So, take the hrawclk value and divide by 2000 and use that
 973	 */
 974	return DIV_ROUND_CLOSEST(dev_priv->rawclk_freq, 2000);
 975}
 976
 977static uint32_t ilk_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
 978{
 979	struct drm_i915_private *dev_priv = to_i915(intel_dp_to_dev(intel_dp));
 980
 981	if (index)
 982		return 0;
 983
 984	/*
 985	 * The clock divider is based off the cdclk or PCH rawclk, and would
 986	 * like to run at 2MHz.  So, take the cdclk or PCH rawclk value and
 987	 * divide by 2000 and use that
 988	 */
 989	if (intel_dp->aux_ch == AUX_CH_A)
 990		return DIV_ROUND_CLOSEST(dev_priv->cdclk.hw.cdclk, 2000);
 991	else
 992		return DIV_ROUND_CLOSEST(dev_priv->rawclk_freq, 2000);
 993}
 994
 995static uint32_t hsw_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
 996{
 997	struct drm_i915_private *dev_priv = to_i915(intel_dp_to_dev(intel_dp));
 998
 999	if (intel_dp->aux_ch != AUX_CH_A && HAS_PCH_LPT_H(dev_priv)) {
1000		/* Workaround for non-ULT HSW */
1001		switch (index) {
1002		case 0: return 63;
1003		case 1: return 72;
1004		default: return 0;
1005		}
1006	}
1007
1008	return ilk_get_aux_clock_divider(intel_dp, index);
1009}
1010
1011static uint32_t skl_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
1012{
1013	/*
1014	 * SKL doesn't need us to program the AUX clock divider (Hardware will
1015	 * derive the clock from CDCLK automatically). We still implement the
1016	 * get_aux_clock_divider vfunc to plug-in into the existing code.
1017	 */
1018	return index ? 0 : 1;
1019}
1020
1021static uint32_t g4x_get_aux_send_ctl(struct intel_dp *intel_dp,
1022				     bool has_aux_irq,
1023				     int send_bytes,
1024				     uint32_t aux_clock_divider)
1025{
1026	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
1027	struct drm_i915_private *dev_priv =
1028			to_i915(intel_dig_port->base.base.dev);
1029	uint32_t precharge, timeout;
1030
1031	if (IS_GEN6(dev_priv))
1032		precharge = 3;
1033	else
1034		precharge = 5;
1035
1036	if (IS_BROADWELL(dev_priv))
1037		timeout = DP_AUX_CH_CTL_TIME_OUT_600us;
1038	else
1039		timeout = DP_AUX_CH_CTL_TIME_OUT_400us;
1040
1041	return DP_AUX_CH_CTL_SEND_BUSY |
1042	       DP_AUX_CH_CTL_DONE |
1043	       (has_aux_irq ? DP_AUX_CH_CTL_INTERRUPT : 0) |
1044	       DP_AUX_CH_CTL_TIME_OUT_ERROR |
1045	       timeout |
1046	       DP_AUX_CH_CTL_RECEIVE_ERROR |
1047	       (send_bytes << DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT) |
1048	       (precharge << DP_AUX_CH_CTL_PRECHARGE_2US_SHIFT) |
1049	       (aux_clock_divider << DP_AUX_CH_CTL_BIT_CLOCK_2X_SHIFT);
1050}
1051
1052static uint32_t skl_get_aux_send_ctl(struct intel_dp *intel_dp,
1053				      bool has_aux_irq,
1054				      int send_bytes,
1055				      uint32_t unused)
1056{
1057	return DP_AUX_CH_CTL_SEND_BUSY |
1058	       DP_AUX_CH_CTL_DONE |
1059	       (has_aux_irq ? DP_AUX_CH_CTL_INTERRUPT : 0) |
1060	       DP_AUX_CH_CTL_TIME_OUT_ERROR |
1061	       DP_AUX_CH_CTL_TIME_OUT_MAX |
1062	       DP_AUX_CH_CTL_RECEIVE_ERROR |
1063	       (send_bytes << DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT) |
1064	       DP_AUX_CH_CTL_FW_SYNC_PULSE_SKL(32) |
1065	       DP_AUX_CH_CTL_SYNC_PULSE_SKL(32);
1066}
1067
1068static int
1069intel_dp_aux_xfer(struct intel_dp *intel_dp,
1070		  const uint8_t *send, int send_bytes,
1071		  uint8_t *recv, int recv_size,
1072		  u32 aux_send_ctl_flags)
1073{
1074	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
1075	struct drm_i915_private *dev_priv =
1076			to_i915(intel_dig_port->base.base.dev);
1077	i915_reg_t ch_ctl, ch_data[5];
1078	uint32_t aux_clock_divider;
1079	int i, ret, recv_bytes;
1080	uint32_t status;
1081	int try, clock = 0;
1082	bool has_aux_irq = HAS_AUX_IRQ(dev_priv);
1083	bool vdd;
1084
1085	ch_ctl = intel_dp->aux_ch_ctl_reg(intel_dp);
1086	for (i = 0; i < ARRAY_SIZE(ch_data); i++)
1087		ch_data[i] = intel_dp->aux_ch_data_reg(intel_dp, i);
1088
1089	pps_lock(intel_dp);
1090
1091	/*
1092	 * We will be called with VDD already enabled for dpcd/edid/oui reads.
1093	 * In such cases we want to leave VDD enabled and it's up to upper layers
1094	 * to turn it off. But for eg. i2c-dev access we need to turn it on/off
1095	 * ourselves.
1096	 */
1097	vdd = edp_panel_vdd_on(intel_dp);
1098
1099	/* dp aux is extremely sensitive to irq latency, hence request the
1100	 * lowest possible wakeup latency and so prevent the cpu from going into
1101	 * deep sleep states.
1102	 */
1103	pm_qos_update_request(&dev_priv->pm_qos, 0);
1104
1105	intel_dp_check_edp(intel_dp);
1106
1107	/* Try to wait for any previous AUX channel activity */
1108	for (try = 0; try < 3; try++) {
1109		status = I915_READ_NOTRACE(ch_ctl);
1110		if ((status & DP_AUX_CH_CTL_SEND_BUSY) == 0)
1111			break;
1112		msleep(1);
1113	}
1114
1115	if (try == 3) {
1116		static u32 last_status = -1;
1117		const u32 status = I915_READ(ch_ctl);
1118
1119		if (status != last_status) {
1120			WARN(1, "dp_aux_ch not started status 0x%08x\n",
1121			     status);
1122			last_status = status;
1123		}
1124
1125		ret = -EBUSY;
1126		goto out;
1127	}
1128
1129	/* Only 5 data registers! */
1130	if (WARN_ON(send_bytes > 20 || recv_size > 20)) {
1131		ret = -E2BIG;
1132		goto out;
1133	}
1134
1135	while ((aux_clock_divider = intel_dp->get_aux_clock_divider(intel_dp, clock++))) {
1136		u32 send_ctl = intel_dp->get_aux_send_ctl(intel_dp,
1137							  has_aux_irq,
1138							  send_bytes,
1139							  aux_clock_divider);
1140
1141		send_ctl |= aux_send_ctl_flags;
1142
1143		/* Must try at least 3 times according to DP spec */
1144		for (try = 0; try < 5; try++) {
1145			/* Load the send data into the aux channel data registers */
1146			for (i = 0; i < send_bytes; i += 4)
1147				I915_WRITE(ch_data[i >> 2],
1148					   intel_dp_pack_aux(send + i,
1149							     send_bytes - i));
1150
1151			/* Send the command and wait for it to complete */
1152			I915_WRITE(ch_ctl, send_ctl);
1153
1154			status = intel_dp_aux_wait_done(intel_dp, has_aux_irq);
1155
1156			/* Clear done status and any errors */
1157			I915_WRITE(ch_ctl,
1158				   status |
1159				   DP_AUX_CH_CTL_DONE |
1160				   DP_AUX_CH_CTL_TIME_OUT_ERROR |
1161				   DP_AUX_CH_CTL_RECEIVE_ERROR);
1162
1163			/* DP CTS 1.2 Core Rev 1.1, 4.2.1.1 & 4.2.1.2
1164			 *   400us delay required for errors and timeouts
1165			 *   Timeout errors from the HW already meet this
1166			 *   requirement so skip to next iteration
1167			 */
1168			if (status & DP_AUX_CH_CTL_TIME_OUT_ERROR)
1169				continue;
1170
1171			if (status & DP_AUX_CH_CTL_RECEIVE_ERROR) {
1172				usleep_range(400, 500);
1173				continue;
1174			}
1175			if (status & DP_AUX_CH_CTL_DONE)
1176				goto done;
1177		}
 
 
 
 
 
 
 
 
 
1178	}
1179
1180	if ((status & DP_AUX_CH_CTL_DONE) == 0) {
1181		DRM_ERROR("dp_aux_ch not done status 0x%08x\n", status);
1182		ret = -EBUSY;
1183		goto out;
1184	}
1185
1186done:
1187	/* Check for timeout or receive error.
1188	 * Timeouts occur when the sink is not connected
1189	 */
1190	if (status & DP_AUX_CH_CTL_RECEIVE_ERROR) {
1191		DRM_ERROR("dp_aux_ch receive error status 0x%08x\n", status);
1192		ret = -EIO;
1193		goto out;
1194	}
1195
1196	/* Timeouts occur when the device isn't connected, so they're
1197	 * "normal" -- don't fill the kernel log with these */
1198	if (status & DP_AUX_CH_CTL_TIME_OUT_ERROR) {
1199		DRM_DEBUG_KMS("dp_aux_ch timeout status 0x%08x\n", status);
1200		ret = -ETIMEDOUT;
1201		goto out;
1202	}
1203
1204	/* Unload any bytes sent back from the other side */
1205	recv_bytes = ((status & DP_AUX_CH_CTL_MESSAGE_SIZE_MASK) >>
1206		      DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT);
1207
1208	/*
1209	 * By BSpec: "Message sizes of 0 or >20 are not allowed."
1210	 * We have no idea of what happened so we return -EBUSY so
1211	 * drm layer takes care for the necessary retries.
1212	 */
1213	if (recv_bytes == 0 || recv_bytes > 20) {
1214		DRM_DEBUG_KMS("Forbidden recv_bytes = %d on aux transaction\n",
1215			      recv_bytes);
1216		ret = -EBUSY;
1217		goto out;
1218	}
1219
1220	if (recv_bytes > recv_size)
1221		recv_bytes = recv_size;
1222
1223	for (i = 0; i < recv_bytes; i += 4)
1224		intel_dp_unpack_aux(I915_READ(ch_data[i >> 2]),
1225				    recv + i, recv_bytes - i);
1226
1227	ret = recv_bytes;
1228out:
1229	pm_qos_update_request(&dev_priv->pm_qos, PM_QOS_DEFAULT_VALUE);
1230
1231	if (vdd)
1232		edp_panel_vdd_off(intel_dp, false);
 
 
 
 
 
 
 
1233
1234	pps_unlock(intel_dp);
1235
1236	return ret;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1237}
1238
1239#define BARE_ADDRESS_SIZE	3
1240#define HEADER_SIZE		(BARE_ADDRESS_SIZE + 1)
1241
1242static void
1243intel_dp_aux_header(u8 txbuf[HEADER_SIZE],
1244		    const struct drm_dp_aux_msg *msg)
1245{
1246	txbuf[0] = (msg->request << 4) | ((msg->address >> 16) & 0xf);
1247	txbuf[1] = (msg->address >> 8) & 0xff;
1248	txbuf[2] = msg->address & 0xff;
1249	txbuf[3] = msg->size - 1;
1250}
1251
1252static ssize_t
1253intel_dp_aux_transfer(struct drm_dp_aux *aux, struct drm_dp_aux_msg *msg)
 
 
1254{
1255	struct intel_dp *intel_dp = container_of(aux, struct intel_dp, aux);
1256	uint8_t txbuf[20], rxbuf[20];
1257	size_t txsize, rxsize;
 
 
1258	int ret;
1259
1260	intel_dp_aux_header(txbuf, msg);
 
 
 
1261
1262	switch (msg->request & ~DP_AUX_I2C_MOT) {
1263	case DP_AUX_NATIVE_WRITE:
1264	case DP_AUX_I2C_WRITE:
1265	case DP_AUX_I2C_WRITE_STATUS_UPDATE:
1266		txsize = msg->size ? HEADER_SIZE + msg->size : BARE_ADDRESS_SIZE;
1267		rxsize = 2; /* 0 or 1 data bytes */
1268
1269		if (WARN_ON(txsize > 20))
1270			return -E2BIG;
1271
1272		WARN_ON(!msg->buffer != !msg->size);
1273
1274		if (msg->buffer)
1275			memcpy(txbuf + HEADER_SIZE, msg->buffer, msg->size);
1276
1277		ret = intel_dp_aux_xfer(intel_dp, txbuf, txsize,
1278					rxbuf, rxsize, 0);
1279		if (ret > 0) {
1280			msg->reply = rxbuf[0] >> 4;
1281
1282			if (ret > 1) {
1283				/* Number of bytes written in a short write. */
1284				ret = clamp_t(int, rxbuf[1], 0, msg->size);
1285			} else {
1286				/* Return payload size. */
1287				ret = msg->size;
1288			}
1289		}
1290		break;
1291
1292	case DP_AUX_NATIVE_READ:
1293	case DP_AUX_I2C_READ:
1294		txsize = msg->size ? HEADER_SIZE : BARE_ADDRESS_SIZE;
1295		rxsize = msg->size + 1;
1296
1297		if (WARN_ON(rxsize > 20))
1298			return -E2BIG;
1299
1300		ret = intel_dp_aux_xfer(intel_dp, txbuf, txsize,
1301					rxbuf, rxsize, 0);
1302		if (ret > 0) {
1303			msg->reply = rxbuf[0] >> 4;
1304			/*
1305			 * Assume happy day, and copy the data. The caller is
1306			 * expected to check msg->reply before touching it.
1307			 *
1308			 * Return payload size.
1309			 */
1310			ret--;
1311			memcpy(msg->buffer, rxbuf + 1, ret);
1312		}
1313		break;
1314
1315	default:
1316		ret = -EINVAL;
1317		break;
1318	}
1319
1320	return ret;
1321}
1322
1323static enum aux_ch intel_aux_ch(struct intel_dp *intel_dp)
 
 
1324{
1325	struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base;
1326	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
1327	enum port port = encoder->port;
1328	const struct ddi_vbt_port_info *info =
1329		&dev_priv->vbt.ddi_port_info[port];
1330	enum aux_ch aux_ch;
 
 
 
 
 
 
 
 
 
 
 
1331
1332	if (!info->alternate_aux_channel) {
1333		aux_ch = (enum aux_ch) port;
1334
1335		DRM_DEBUG_KMS("using AUX %c for port %c (platform default)\n",
1336			      aux_ch_name(aux_ch), port_name(port));
1337		return aux_ch;
1338	}
1339
1340	switch (info->alternate_aux_channel) {
1341	case DP_AUX_A:
1342		aux_ch = AUX_CH_A;
1343		break;
1344	case DP_AUX_B:
1345		aux_ch = AUX_CH_B;
1346		break;
1347	case DP_AUX_C:
1348		aux_ch = AUX_CH_C;
1349		break;
1350	case DP_AUX_D:
1351		aux_ch = AUX_CH_D;
1352		break;
1353	case DP_AUX_F:
1354		aux_ch = AUX_CH_F;
 
 
1355		break;
1356	default:
1357		MISSING_CASE(info->alternate_aux_channel);
1358		aux_ch = AUX_CH_A;
1359		break;
1360	}
1361
1362	DRM_DEBUG_KMS("using AUX %c for port %c (VBT)\n",
1363		      aux_ch_name(aux_ch), port_name(port));
 
 
 
 
 
 
1364
1365	return aux_ch;
1366}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1367
1368static enum intel_display_power_domain
1369intel_aux_power_domain(struct intel_dp *intel_dp)
1370{
1371	switch (intel_dp->aux_ch) {
1372	case AUX_CH_A:
1373		return POWER_DOMAIN_AUX_A;
1374	case AUX_CH_B:
1375		return POWER_DOMAIN_AUX_B;
1376	case AUX_CH_C:
1377		return POWER_DOMAIN_AUX_C;
1378	case AUX_CH_D:
1379		return POWER_DOMAIN_AUX_D;
1380	case AUX_CH_F:
1381		return POWER_DOMAIN_AUX_F;
1382	default:
1383		MISSING_CASE(intel_dp->aux_ch);
1384		return POWER_DOMAIN_AUX_A;
1385	}
1386}
1387
1388static i915_reg_t g4x_aux_ctl_reg(struct intel_dp *intel_dp)
1389{
1390	struct drm_i915_private *dev_priv = to_i915(intel_dp_to_dev(intel_dp));
1391	enum aux_ch aux_ch = intel_dp->aux_ch;
1392
1393	switch (aux_ch) {
1394	case AUX_CH_B:
1395	case AUX_CH_C:
1396	case AUX_CH_D:
1397		return DP_AUX_CH_CTL(aux_ch);
1398	default:
1399		MISSING_CASE(aux_ch);
1400		return DP_AUX_CH_CTL(AUX_CH_B);
1401	}
1402}
1403
1404static i915_reg_t g4x_aux_data_reg(struct intel_dp *intel_dp, int index)
 
 
1405{
1406	struct drm_i915_private *dev_priv = to_i915(intel_dp_to_dev(intel_dp));
1407	enum aux_ch aux_ch = intel_dp->aux_ch;
 
 
 
 
 
 
 
 
 
 
1408
1409	switch (aux_ch) {
1410	case AUX_CH_B:
1411	case AUX_CH_C:
1412	case AUX_CH_D:
1413		return DP_AUX_CH_DATA(aux_ch, index);
1414	default:
1415		MISSING_CASE(aux_ch);
1416		return DP_AUX_CH_DATA(AUX_CH_B, index);
1417	}
1418}
1419
1420static i915_reg_t ilk_aux_ctl_reg(struct intel_dp *intel_dp)
 
 
1421{
1422	struct drm_i915_private *dev_priv = to_i915(intel_dp_to_dev(intel_dp));
1423	enum aux_ch aux_ch = intel_dp->aux_ch;
 
 
 
 
 
1424
1425	switch (aux_ch) {
1426	case AUX_CH_A:
1427		return DP_AUX_CH_CTL(aux_ch);
1428	case AUX_CH_B:
1429	case AUX_CH_C:
1430	case AUX_CH_D:
1431		return PCH_DP_AUX_CH_CTL(aux_ch);
1432	default:
1433		MISSING_CASE(aux_ch);
1434		return DP_AUX_CH_CTL(AUX_CH_A);
1435	}
1436}
1437
1438static i915_reg_t ilk_aux_data_reg(struct intel_dp *intel_dp, int index)
1439{
1440	struct drm_i915_private *dev_priv = to_i915(intel_dp_to_dev(intel_dp));
1441	enum aux_ch aux_ch = intel_dp->aux_ch;
1442
1443	switch (aux_ch) {
1444	case AUX_CH_A:
1445		return DP_AUX_CH_DATA(aux_ch, index);
1446	case AUX_CH_B:
1447	case AUX_CH_C:
1448	case AUX_CH_D:
1449		return PCH_DP_AUX_CH_DATA(aux_ch, index);
1450	default:
1451		MISSING_CASE(aux_ch);
1452		return DP_AUX_CH_DATA(AUX_CH_A, index);
 
1453	}
1454}
1455
1456static i915_reg_t skl_aux_ctl_reg(struct intel_dp *intel_dp)
1457{
1458	struct drm_i915_private *dev_priv = to_i915(intel_dp_to_dev(intel_dp));
1459	enum aux_ch aux_ch = intel_dp->aux_ch;
 
 
 
 
 
1460
1461	switch (aux_ch) {
1462	case AUX_CH_A:
1463	case AUX_CH_B:
1464	case AUX_CH_C:
1465	case AUX_CH_D:
1466	case AUX_CH_F:
1467		return DP_AUX_CH_CTL(aux_ch);
1468	default:
1469		MISSING_CASE(aux_ch);
1470		return DP_AUX_CH_CTL(AUX_CH_A);
1471	}
1472}
1473
1474static i915_reg_t skl_aux_data_reg(struct intel_dp *intel_dp, int index)
1475{
1476	struct drm_i915_private *dev_priv = to_i915(intel_dp_to_dev(intel_dp));
1477	enum aux_ch aux_ch = intel_dp->aux_ch;
1478
1479	switch (aux_ch) {
1480	case AUX_CH_A:
1481	case AUX_CH_B:
1482	case AUX_CH_C:
1483	case AUX_CH_D:
1484	case AUX_CH_F:
1485		return DP_AUX_CH_DATA(aux_ch, index);
1486	default:
1487		MISSING_CASE(aux_ch);
1488		return DP_AUX_CH_DATA(AUX_CH_A, index);
1489	}
1490}
1491
1492static void
1493intel_dp_aux_fini(struct intel_dp *intel_dp)
1494{
1495	kfree(intel_dp->aux.name);
1496}
 
 
1497
1498static void
1499intel_dp_aux_init(struct intel_dp *intel_dp)
1500{
1501	struct drm_i915_private *dev_priv = to_i915(intel_dp_to_dev(intel_dp));
1502	struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base;
1503
1504	intel_dp->aux_ch = intel_aux_ch(intel_dp);
1505	intel_dp->aux_power_domain = intel_aux_power_domain(intel_dp);
1506
1507	if (INTEL_GEN(dev_priv) >= 9) {
1508		intel_dp->aux_ch_ctl_reg = skl_aux_ctl_reg;
1509		intel_dp->aux_ch_data_reg = skl_aux_data_reg;
1510	} else if (HAS_PCH_SPLIT(dev_priv)) {
1511		intel_dp->aux_ch_ctl_reg = ilk_aux_ctl_reg;
1512		intel_dp->aux_ch_data_reg = ilk_aux_data_reg;
1513	} else {
1514		intel_dp->aux_ch_ctl_reg = g4x_aux_ctl_reg;
1515		intel_dp->aux_ch_data_reg = g4x_aux_data_reg;
1516	}
1517
1518	if (INTEL_GEN(dev_priv) >= 9)
1519		intel_dp->get_aux_clock_divider = skl_get_aux_clock_divider;
1520	else if (IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv))
1521		intel_dp->get_aux_clock_divider = hsw_get_aux_clock_divider;
1522	else if (HAS_PCH_SPLIT(dev_priv))
1523		intel_dp->get_aux_clock_divider = ilk_get_aux_clock_divider;
1524	else
1525		intel_dp->get_aux_clock_divider = g4x_get_aux_clock_divider;
1526
1527	if (INTEL_GEN(dev_priv) >= 9)
1528		intel_dp->get_aux_send_ctl = skl_get_aux_send_ctl;
1529	else
1530		intel_dp->get_aux_send_ctl = g4x_get_aux_send_ctl;
1531
1532	drm_dp_aux_init(&intel_dp->aux);
1533
1534	/* Failure to allocate our preferred name is not critical */
1535	intel_dp->aux.name = kasprintf(GFP_KERNEL, "DPDDC-%c",
1536				       port_name(encoder->port));
1537	intel_dp->aux.transfer = intel_dp_aux_transfer;
1538}
1539
1540bool intel_dp_source_supports_hbr2(struct intel_dp *intel_dp)
 
 
 
 
 
1541{
1542	int max_rate = intel_dp->source_rates[intel_dp->num_source_rates - 1];
1543
1544	return max_rate >= 540000;
 
 
 
 
1545}
1546
1547static void
1548intel_dp_set_clock(struct intel_encoder *encoder,
1549		   struct intel_crtc_state *pipe_config)
1550{
1551	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
1552	const struct dp_link_dpll *divisor = NULL;
1553	int i, count = 0;
1554
1555	if (IS_G4X(dev_priv)) {
1556		divisor = gen4_dpll;
1557		count = ARRAY_SIZE(gen4_dpll);
1558	} else if (HAS_PCH_SPLIT(dev_priv)) {
1559		divisor = pch_dpll;
1560		count = ARRAY_SIZE(pch_dpll);
1561	} else if (IS_CHERRYVIEW(dev_priv)) {
1562		divisor = chv_dpll;
1563		count = ARRAY_SIZE(chv_dpll);
1564	} else if (IS_VALLEYVIEW(dev_priv)) {
1565		divisor = vlv_dpll;
1566		count = ARRAY_SIZE(vlv_dpll);
1567	}
1568
1569	if (divisor && count) {
1570		for (i = 0; i < count; i++) {
1571			if (pipe_config->port_clock == divisor[i].clock) {
1572				pipe_config->dpll = divisor[i].dpll;
1573				pipe_config->clock_set = true;
1574				break;
1575			}
1576		}
1577	}
1578}
1579
1580static void snprintf_int_array(char *str, size_t len,
1581			       const int *array, int nelem)
1582{
1583	int i;
 
1584
1585	str[0] = '\0';
 
1586
1587	for (i = 0; i < nelem; i++) {
1588		int r = snprintf(str, len, "%s%d", i ? ", " : "", array[i]);
1589		if (r >= len)
1590			return;
1591		str += r;
1592		len -= r;
 
 
1593	}
1594}
1595
1596static void intel_dp_print_rates(struct intel_dp *intel_dp)
1597{
1598	char str[128]; /* FIXME: too big for stack? */
1599
1600	if ((drm_debug & DRM_UT_KMS) == 0)
1601		return;
 
1602
1603	snprintf_int_array(str, sizeof(str),
1604			   intel_dp->source_rates, intel_dp->num_source_rates);
1605	DRM_DEBUG_KMS("source rates: %s\n", str);
1606
1607	snprintf_int_array(str, sizeof(str),
1608			   intel_dp->sink_rates, intel_dp->num_sink_rates);
1609	DRM_DEBUG_KMS("sink rates: %s\n", str);
1610
1611	snprintf_int_array(str, sizeof(str),
1612			   intel_dp->common_rates, intel_dp->num_common_rates);
1613	DRM_DEBUG_KMS("common rates: %s\n", str);
1614}
1615
1616int
1617intel_dp_max_link_rate(struct intel_dp *intel_dp)
1618{
1619	int len;
1620
1621	len = intel_dp_common_len_rate_limit(intel_dp, intel_dp->max_link_rate);
1622	if (WARN_ON(len <= 0))
1623		return 162000;
1624
1625	return intel_dp->common_rates[len - 1];
1626}
1627
1628int intel_dp_rate_select(struct intel_dp *intel_dp, int rate)
1629{
1630	int i = intel_dp_rate_index(intel_dp->sink_rates,
1631				    intel_dp->num_sink_rates, rate);
1632
1633	if (WARN_ON(i < 0))
1634		i = 0;
1635
1636	return i;
1637}
1638
1639void intel_dp_compute_rate(struct intel_dp *intel_dp, int port_clock,
1640			   uint8_t *link_bw, uint8_t *rate_select)
1641{
1642	/* eDP 1.4 rate select method. */
1643	if (intel_dp->use_rate_select) {
1644		*link_bw = 0;
1645		*rate_select =
1646			intel_dp_rate_select(intel_dp, port_clock);
1647	} else {
1648		*link_bw = drm_dp_link_rate_to_bw_code(port_clock);
1649		*rate_select = 0;
 
 
 
 
1650	}
1651}
1652
1653static int intel_dp_compute_bpp(struct intel_dp *intel_dp,
1654				struct intel_crtc_state *pipe_config)
 
1655{
1656	int bpp, bpc;
 
 
 
1657
1658	bpp = pipe_config->pipe_bpp;
1659	bpc = drm_dp_downstream_max_bpc(intel_dp->dpcd, intel_dp->downstream_ports);
1660
1661	if (bpc > 0)
1662		bpp = min(bpp, 3*bpc);
 
 
1663
1664	/* For DP Compliance we override the computed bpp for the pipe */
1665	if (intel_dp->compliance.test_data.bpc != 0) {
1666		pipe_config->pipe_bpp =	3*intel_dp->compliance.test_data.bpc;
1667		pipe_config->dither_force_disable = pipe_config->pipe_bpp == 6*3;
1668		DRM_DEBUG_KMS("Setting pipe_bpp to %d\n",
1669			      pipe_config->pipe_bpp);
1670	}
1671	return bpp;
1672}
1673
1674static bool intel_edp_compare_alt_mode(struct drm_display_mode *m1,
1675				       struct drm_display_mode *m2)
1676{
1677	bool bres = false;
1678
1679	if (m1 && m2)
1680		bres = (m1->hdisplay == m2->hdisplay &&
1681			m1->hsync_start == m2->hsync_start &&
1682			m1->hsync_end == m2->hsync_end &&
1683			m1->htotal == m2->htotal &&
1684			m1->vdisplay == m2->vdisplay &&
1685			m1->vsync_start == m2->vsync_start &&
1686			m1->vsync_end == m2->vsync_end &&
1687			m1->vtotal == m2->vtotal);
1688	return bres;
1689}
1690
1691bool
1692intel_dp_compute_config(struct intel_encoder *encoder,
1693			struct intel_crtc_state *pipe_config,
1694			struct drm_connector_state *conn_state)
1695{
1696	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
1697	struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode;
1698	struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
1699	enum port port = encoder->port;
1700	struct intel_crtc *intel_crtc = to_intel_crtc(pipe_config->base.crtc);
1701	struct intel_connector *intel_connector = intel_dp->attached_connector;
1702	struct intel_digital_connector_state *intel_conn_state =
1703		to_intel_digital_connector_state(conn_state);
1704	int lane_count, clock;
1705	int min_lane_count = 1;
1706	int max_lane_count = intel_dp_max_lane_count(intel_dp);
1707	/* Conveniently, the link BW constants become indices with a shift...*/
1708	int min_clock = 0;
1709	int max_clock;
1710	int bpp, mode_rate;
1711	int link_avail, link_clock;
1712	int common_len;
1713	uint8_t link_bw, rate_select;
1714	bool reduce_m_n = drm_dp_has_quirk(&intel_dp->desc,
1715					   DP_DPCD_QUIRK_LIMITED_M_N);
1716
1717	common_len = intel_dp_common_len_rate_limit(intel_dp,
1718						    intel_dp->max_link_rate);
1719
1720	/* No common link rates between source and sink */
1721	WARN_ON(common_len <= 0);
1722
1723	max_clock = common_len - 1;
1724
1725	if (HAS_PCH_SPLIT(dev_priv) && !HAS_DDI(dev_priv) && port != PORT_A)
1726		pipe_config->has_pch_encoder = true;
1727
1728	pipe_config->has_drrs = false;
1729	if (IS_G4X(dev_priv) || port == PORT_A)
1730		pipe_config->has_audio = false;
1731	else if (intel_conn_state->force_audio == HDMI_AUDIO_AUTO)
1732		pipe_config->has_audio = intel_dp->has_audio;
1733	else
1734		pipe_config->has_audio = intel_conn_state->force_audio == HDMI_AUDIO_ON;
1735
1736	if (intel_dp_is_edp(intel_dp) && intel_connector->panel.fixed_mode) {
1737		struct drm_display_mode *panel_mode =
1738			intel_connector->panel.alt_fixed_mode;
1739		struct drm_display_mode *req_mode = &pipe_config->base.mode;
1740
1741		if (!intel_edp_compare_alt_mode(req_mode, panel_mode))
1742			panel_mode = intel_connector->panel.fixed_mode;
1743
1744		drm_mode_debug_printmodeline(panel_mode);
1745
1746		intel_fixed_panel_mode(panel_mode, adjusted_mode);
1747
1748		if (INTEL_GEN(dev_priv) >= 9) {
1749			int ret;
1750			ret = skl_update_scaler_crtc(pipe_config);
1751			if (ret)
1752				return ret;
1753		}
1754
1755		if (HAS_GMCH_DISPLAY(dev_priv))
1756			intel_gmch_panel_fitting(intel_crtc, pipe_config,
1757						 conn_state->scaling_mode);
1758		else
1759			intel_pch_panel_fitting(intel_crtc, pipe_config,
1760						conn_state->scaling_mode);
1761	}
 
 
1762
1763	if ((IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) &&
1764	    adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE)
1765		return false;
1766
1767	if (adjusted_mode->flags & DRM_MODE_FLAG_DBLCLK)
1768		return false;
1769
1770	/* Use values requested by Compliance Test Request */
1771	if (intel_dp->compliance.test_type == DP_TEST_LINK_TRAINING) {
1772		int index;
1773
1774		/* Validate the compliance test data since max values
1775		 * might have changed due to link train fallback.
1776		 */
1777		if (intel_dp_link_params_valid(intel_dp, intel_dp->compliance.test_link_rate,
1778					       intel_dp->compliance.test_lane_count)) {
1779			index = intel_dp_rate_index(intel_dp->common_rates,
1780						    intel_dp->num_common_rates,
1781						    intel_dp->compliance.test_link_rate);
1782			if (index >= 0)
1783				min_clock = max_clock = index;
1784			min_lane_count = max_lane_count = intel_dp->compliance.test_lane_count;
1785		}
1786	}
1787	DRM_DEBUG_KMS("DP link computation with max lane count %i "
1788		      "max bw %d pixel clock %iKHz\n",
1789		      max_lane_count, intel_dp->common_rates[max_clock],
1790		      adjusted_mode->crtc_clock);
1791
1792	/* Walk through all bpp values. Luckily they're all nicely spaced with 2
1793	 * bpc in between. */
1794	bpp = intel_dp_compute_bpp(intel_dp, pipe_config);
1795	if (intel_dp_is_edp(intel_dp)) {
1796
1797		/* Get bpp from vbt only for panels that dont have bpp in edid */
1798		if (intel_connector->base.display_info.bpc == 0 &&
1799			(dev_priv->vbt.edp.bpp && dev_priv->vbt.edp.bpp < bpp)) {
1800			DRM_DEBUG_KMS("clamping bpp for eDP panel to BIOS-provided %i\n",
1801				      dev_priv->vbt.edp.bpp);
1802			bpp = dev_priv->vbt.edp.bpp;
1803		}
1804
1805		/*
1806		 * Use the maximum clock and number of lanes the eDP panel
1807		 * advertizes being capable of. The panels are generally
1808		 * designed to support only a single clock and lane
1809		 * configuration, and typically these values correspond to the
1810		 * native resolution of the panel.
1811		 */
1812		min_lane_count = max_lane_count;
1813		min_clock = max_clock;
1814	}
1815
1816	for (; bpp >= 6*3; bpp -= 2*3) {
1817		mode_rate = intel_dp_link_required(adjusted_mode->crtc_clock,
1818						   bpp);
1819
1820		for (clock = min_clock; clock <= max_clock; clock++) {
1821			for (lane_count = min_lane_count;
1822				lane_count <= max_lane_count;
1823				lane_count <<= 1) {
1824
1825				link_clock = intel_dp->common_rates[clock];
1826				link_avail = intel_dp_max_data_rate(link_clock,
1827								    lane_count);
1828
1829				if (mode_rate <= link_avail) {
1830					goto found;
1831				}
1832			}
1833		}
1834	}
1835
1836	return false;
1837
1838found:
1839	if (intel_conn_state->broadcast_rgb == INTEL_BROADCAST_RGB_AUTO) {
1840		/*
1841		 * See:
1842		 * CEA-861-E - 5.1 Default Encoding Parameters
1843		 * VESA DisplayPort Ver.1.2a - 5.1.1.1 Video Colorimetry
1844		 */
1845		pipe_config->limited_color_range =
1846			bpp != 18 &&
1847			drm_default_rgb_quant_range(adjusted_mode) ==
1848			HDMI_QUANTIZATION_RANGE_LIMITED;
1849	} else {
1850		pipe_config->limited_color_range =
1851			intel_conn_state->broadcast_rgb == INTEL_BROADCAST_RGB_LIMITED;
1852	}
1853
1854	pipe_config->lane_count = lane_count;
1855
1856	pipe_config->pipe_bpp = bpp;
1857	pipe_config->port_clock = intel_dp->common_rates[clock];
1858
1859	intel_dp_compute_rate(intel_dp, pipe_config->port_clock,
1860			      &link_bw, &rate_select);
1861
1862	DRM_DEBUG_KMS("DP link bw %02x rate select %02x lane count %d clock %d bpp %d\n",
1863		      link_bw, rate_select, pipe_config->lane_count,
1864		      pipe_config->port_clock, bpp);
1865	DRM_DEBUG_KMS("DP link bw required %i available %i\n",
1866		      mode_rate, link_avail);
1867
1868	intel_link_compute_m_n(bpp, lane_count,
1869			       adjusted_mode->crtc_clock,
1870			       pipe_config->port_clock,
1871			       &pipe_config->dp_m_n,
1872			       reduce_m_n);
1873
1874	if (intel_connector->panel.downclock_mode != NULL &&
1875		dev_priv->drrs.type == SEAMLESS_DRRS_SUPPORT) {
1876			pipe_config->has_drrs = true;
1877			intel_link_compute_m_n(bpp, lane_count,
1878				intel_connector->panel.downclock_mode->clock,
1879				pipe_config->port_clock,
1880				&pipe_config->dp_m2_n2,
1881				reduce_m_n);
1882	}
1883
1884	if (!HAS_DDI(dev_priv))
1885		intel_dp_set_clock(encoder, pipe_config);
1886
1887	intel_psr_compute_config(intel_dp, pipe_config);
1888
1889	return true;
1890}
1891
1892void intel_dp_set_link_params(struct intel_dp *intel_dp,
1893			      int link_rate, uint8_t lane_count,
1894			      bool link_mst)
1895{
1896	intel_dp->link_trained = false;
1897	intel_dp->link_rate = link_rate;
1898	intel_dp->lane_count = lane_count;
1899	intel_dp->link_mst = link_mst;
1900}
1901
1902static void intel_dp_prepare(struct intel_encoder *encoder,
1903			     const struct intel_crtc_state *pipe_config)
1904{
1905	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
1906	struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
1907	enum port port = encoder->port;
1908	struct intel_crtc *crtc = to_intel_crtc(pipe_config->base.crtc);
1909	const struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode;
1910
1911	intel_dp_set_link_params(intel_dp, pipe_config->port_clock,
1912				 pipe_config->lane_count,
1913				 intel_crtc_has_type(pipe_config,
1914						     INTEL_OUTPUT_DP_MST));
1915
1916	/*
1917	 * There are four kinds of DP registers:
1918	 *
1919	 * 	IBX PCH
1920	 * 	SNB CPU
1921	 *	IVB CPU
1922	 * 	CPT PCH
1923	 *
1924	 * IBX PCH and CPU are the same for almost everything,
1925	 * except that the CPU DP PLL is configured in this
1926	 * register
1927	 *
1928	 * CPT PCH is quite different, having many bits moved
1929	 * to the TRANS_DP_CTL register instead. That
1930	 * configuration happens (oddly) in ironlake_pch_enable
1931	 */
 
 
 
 
 
1932
1933	/* Preserve the BIOS-computed detected bit. This is
1934	 * supposed to be read-only.
1935	 */
1936	intel_dp->DP = I915_READ(intel_dp->output_reg) & DP_DETECTED;
1937
1938	/* Handle DP bits in common between all three register formats */
1939	intel_dp->DP |= DP_VOLTAGE_0_4 | DP_PRE_EMPHASIS_0;
1940	intel_dp->DP |= DP_PORT_WIDTH(pipe_config->lane_count);
1941
1942	/* Split out the IBX/CPU vs CPT settings */
1943
1944	if (IS_GEN7(dev_priv) && port == PORT_A) {
1945		if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC)
1946			intel_dp->DP |= DP_SYNC_HS_HIGH;
1947		if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC)
1948			intel_dp->DP |= DP_SYNC_VS_HIGH;
1949		intel_dp->DP |= DP_LINK_TRAIN_OFF_CPT;
1950
1951		if (drm_dp_enhanced_frame_cap(intel_dp->dpcd))
1952			intel_dp->DP |= DP_ENHANCED_FRAMING;
1953
1954		intel_dp->DP |= crtc->pipe << 29;
1955	} else if (HAS_PCH_CPT(dev_priv) && port != PORT_A) {
1956		u32 trans_dp;
1957
1958		intel_dp->DP |= DP_LINK_TRAIN_OFF_CPT;
1959
1960		trans_dp = I915_READ(TRANS_DP_CTL(crtc->pipe));
1961		if (drm_dp_enhanced_frame_cap(intel_dp->dpcd))
1962			trans_dp |= TRANS_DP_ENH_FRAMING;
1963		else
1964			trans_dp &= ~TRANS_DP_ENH_FRAMING;
1965		I915_WRITE(TRANS_DP_CTL(crtc->pipe), trans_dp);
1966	} else {
1967		if (IS_G4X(dev_priv) && pipe_config->limited_color_range)
1968			intel_dp->DP |= DP_COLOR_RANGE_16_235;
1969
1970		if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC)
1971			intel_dp->DP |= DP_SYNC_HS_HIGH;
1972		if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC)
1973			intel_dp->DP |= DP_SYNC_VS_HIGH;
1974		intel_dp->DP |= DP_LINK_TRAIN_OFF;
1975
1976		if (drm_dp_enhanced_frame_cap(intel_dp->dpcd))
1977			intel_dp->DP |= DP_ENHANCED_FRAMING;
1978
1979		if (IS_CHERRYVIEW(dev_priv))
1980			intel_dp->DP |= DP_PIPE_SELECT_CHV(crtc->pipe);
1981		else if (crtc->pipe == PIPE_B)
1982			intel_dp->DP |= DP_PIPEB_SELECT;
1983	}
1984}
1985
1986#define IDLE_ON_MASK		(PP_ON | PP_SEQUENCE_MASK | 0                     | PP_SEQUENCE_STATE_MASK)
1987#define IDLE_ON_VALUE   	(PP_ON | PP_SEQUENCE_NONE | 0                     | PP_SEQUENCE_STATE_ON_IDLE)
1988
1989#define IDLE_OFF_MASK		(PP_ON | PP_SEQUENCE_MASK | 0                     | 0)
1990#define IDLE_OFF_VALUE		(0     | PP_SEQUENCE_NONE | 0                     | 0)
1991
1992#define IDLE_CYCLE_MASK		(PP_ON | PP_SEQUENCE_MASK | PP_CYCLE_DELAY_ACTIVE | PP_SEQUENCE_STATE_MASK)
1993#define IDLE_CYCLE_VALUE	(0     | PP_SEQUENCE_NONE | 0                     | PP_SEQUENCE_STATE_OFF_IDLE)
1994
1995static void intel_pps_verify_state(struct intel_dp *intel_dp);
1996
1997static void wait_panel_status(struct intel_dp *intel_dp,
1998				       u32 mask,
1999				       u32 value)
2000{
2001	struct drm_i915_private *dev_priv = to_i915(intel_dp_to_dev(intel_dp));
2002	i915_reg_t pp_stat_reg, pp_ctrl_reg;
2003
2004	lockdep_assert_held(&dev_priv->pps_mutex);
2005
2006	intel_pps_verify_state(intel_dp);
2007
2008	pp_stat_reg = _pp_stat_reg(intel_dp);
2009	pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
2010
2011	DRM_DEBUG_KMS("mask %08x value %08x status %08x control %08x\n",
2012			mask, value,
2013			I915_READ(pp_stat_reg),
2014			I915_READ(pp_ctrl_reg));
2015
2016	if (intel_wait_for_register(dev_priv,
2017				    pp_stat_reg, mask, value,
2018				    5000))
2019		DRM_ERROR("Panel status timeout: status %08x control %08x\n",
2020				I915_READ(pp_stat_reg),
2021				I915_READ(pp_ctrl_reg));
2022
2023	DRM_DEBUG_KMS("Wait complete\n");
2024}
2025
2026static void wait_panel_on(struct intel_dp *intel_dp)
2027{
2028	DRM_DEBUG_KMS("Wait for panel power on\n");
2029	wait_panel_status(intel_dp, IDLE_ON_MASK, IDLE_ON_VALUE);
2030}
2031
2032static void wait_panel_off(struct intel_dp *intel_dp)
2033{
2034	DRM_DEBUG_KMS("Wait for panel power off time\n");
2035	wait_panel_status(intel_dp, IDLE_OFF_MASK, IDLE_OFF_VALUE);
2036}
2037
2038static void wait_panel_power_cycle(struct intel_dp *intel_dp)
2039{
2040	ktime_t panel_power_on_time;
2041	s64 panel_power_off_duration;
2042
2043	DRM_DEBUG_KMS("Wait for panel power cycle\n");
2044
2045	/* take the difference of currrent time and panel power off time
2046	 * and then make panel wait for t11_t12 if needed. */
2047	panel_power_on_time = ktime_get_boottime();
2048	panel_power_off_duration = ktime_ms_delta(panel_power_on_time, intel_dp->panel_power_off_time);
2049
2050	/* When we disable the VDD override bit last we have to do the manual
2051	 * wait. */
2052	if (panel_power_off_duration < (s64)intel_dp->panel_power_cycle_delay)
2053		wait_remaining_ms_from_jiffies(jiffies,
2054				       intel_dp->panel_power_cycle_delay - panel_power_off_duration);
2055
2056	wait_panel_status(intel_dp, IDLE_CYCLE_MASK, IDLE_CYCLE_VALUE);
2057}
2058
2059static void wait_backlight_on(struct intel_dp *intel_dp)
2060{
2061	wait_remaining_ms_from_jiffies(intel_dp->last_power_on,
2062				       intel_dp->backlight_on_delay);
2063}
2064
2065static void edp_wait_backlight_off(struct intel_dp *intel_dp)
2066{
2067	wait_remaining_ms_from_jiffies(intel_dp->last_backlight_off,
2068				       intel_dp->backlight_off_delay);
2069}
2070
2071/* Read the current pp_control value, unlocking the register if it
2072 * is locked
2073 */
2074
2075static  u32 ironlake_get_pp_control(struct intel_dp *intel_dp)
2076{
2077	struct drm_i915_private *dev_priv = to_i915(intel_dp_to_dev(intel_dp));
2078	u32 control;
2079
2080	lockdep_assert_held(&dev_priv->pps_mutex);
2081
2082	control = I915_READ(_pp_ctrl_reg(intel_dp));
2083	if (WARN_ON(!HAS_DDI(dev_priv) &&
2084		    (control & PANEL_UNLOCK_MASK) != PANEL_UNLOCK_REGS)) {
2085		control &= ~PANEL_UNLOCK_MASK;
2086		control |= PANEL_UNLOCK_REGS;
2087	}
2088	return control;
2089}
2090
2091/*
2092 * Must be paired with edp_panel_vdd_off().
2093 * Must hold pps_mutex around the whole on/off sequence.
2094 * Can be nested with intel_edp_panel_vdd_{on,off}() calls.
2095 */
2096static bool edp_panel_vdd_on(struct intel_dp *intel_dp)
2097{
2098	struct drm_i915_private *dev_priv = to_i915(intel_dp_to_dev(intel_dp));
2099	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
2100	u32 pp;
2101	i915_reg_t pp_stat_reg, pp_ctrl_reg;
2102	bool need_to_disable = !intel_dp->want_panel_vdd;
2103
2104	lockdep_assert_held(&dev_priv->pps_mutex);
2105
2106	if (!intel_dp_is_edp(intel_dp))
2107		return false;
2108
2109	cancel_delayed_work(&intel_dp->panel_vdd_work);
2110	intel_dp->want_panel_vdd = true;
2111
2112	if (edp_have_panel_vdd(intel_dp))
2113		return need_to_disable;
2114
2115	intel_display_power_get(dev_priv, intel_dp->aux_power_domain);
2116
2117	DRM_DEBUG_KMS("Turning eDP port %c VDD on\n",
2118		      port_name(intel_dig_port->base.port));
2119
2120	if (!edp_have_panel_power(intel_dp))
2121		wait_panel_power_cycle(intel_dp);
2122
2123	pp = ironlake_get_pp_control(intel_dp);
2124	pp |= EDP_FORCE_VDD;
2125
2126	pp_stat_reg = _pp_stat_reg(intel_dp);
2127	pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
2128
2129	I915_WRITE(pp_ctrl_reg, pp);
2130	POSTING_READ(pp_ctrl_reg);
2131	DRM_DEBUG_KMS("PP_STATUS: 0x%08x PP_CONTROL: 0x%08x\n",
2132			I915_READ(pp_stat_reg), I915_READ(pp_ctrl_reg));
2133	/*
2134	 * If the panel wasn't on, delay before accessing aux channel
 
2135	 */
2136	if (!edp_have_panel_power(intel_dp)) {
2137		DRM_DEBUG_KMS("eDP port %c panel power wasn't enabled\n",
2138			      port_name(intel_dig_port->base.port));
2139		msleep(intel_dp->panel_power_up_delay);
2140	}
2141
2142	return need_to_disable;
2143}
2144
2145/*
2146 * Must be paired with intel_edp_panel_vdd_off() or
2147 * intel_edp_panel_off().
2148 * Nested calls to these functions are not allowed since
2149 * we drop the lock. Caller must use some higher level
2150 * locking to prevent nested calls from other threads.
2151 */
2152void intel_edp_panel_vdd_on(struct intel_dp *intel_dp)
2153{
2154	bool vdd;
2155
2156	if (!intel_dp_is_edp(intel_dp))
2157		return;
2158
2159	pps_lock(intel_dp);
2160	vdd = edp_panel_vdd_on(intel_dp);
2161	pps_unlock(intel_dp);
2162
2163	I915_STATE_WARN(!vdd, "eDP port %c VDD already requested on\n",
2164	     port_name(dp_to_dig_port(intel_dp)->base.port));
2165}
2166
2167static void edp_panel_vdd_off_sync(struct intel_dp *intel_dp)
2168{
2169	struct drm_i915_private *dev_priv = to_i915(intel_dp_to_dev(intel_dp));
2170	struct intel_digital_port *intel_dig_port =
2171		dp_to_dig_port(intel_dp);
2172	u32 pp;
2173	i915_reg_t pp_stat_reg, pp_ctrl_reg;
2174
2175	lockdep_assert_held(&dev_priv->pps_mutex);
2176
2177	WARN_ON(intel_dp->want_panel_vdd);
2178
2179	if (!edp_have_panel_vdd(intel_dp))
2180		return;
2181
2182	DRM_DEBUG_KMS("Turning eDP port %c VDD off\n",
2183		      port_name(intel_dig_port->base.port));
2184
2185	pp = ironlake_get_pp_control(intel_dp);
2186	pp &= ~EDP_FORCE_VDD;
2187
2188	pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
2189	pp_stat_reg = _pp_stat_reg(intel_dp);
2190
2191	I915_WRITE(pp_ctrl_reg, pp);
2192	POSTING_READ(pp_ctrl_reg);
2193
2194	/* Make sure sequencer is idle before allowing subsequent activity */
2195	DRM_DEBUG_KMS("PP_STATUS: 0x%08x PP_CONTROL: 0x%08x\n",
2196	I915_READ(pp_stat_reg), I915_READ(pp_ctrl_reg));
2197
2198	if ((pp & PANEL_POWER_ON) == 0)
2199		intel_dp->panel_power_off_time = ktime_get_boottime();
2200
2201	intel_display_power_put(dev_priv, intel_dp->aux_power_domain);
2202}
2203
2204static void edp_panel_vdd_work(struct work_struct *__work)
 
2205{
2206	struct intel_dp *intel_dp = container_of(to_delayed_work(__work),
2207						 struct intel_dp, panel_vdd_work);
 
2208
2209	pps_lock(intel_dp);
2210	if (!intel_dp->want_panel_vdd)
2211		edp_panel_vdd_off_sync(intel_dp);
2212	pps_unlock(intel_dp);
2213}
2214
2215static void edp_panel_vdd_schedule_off(struct intel_dp *intel_dp)
2216{
2217	unsigned long delay;
2218
2219	/*
2220	 * Queue the timer to fire a long time from now (relative to the power
2221	 * down delay) to keep the panel power up across a sequence of
2222	 * operations.
2223	 */
2224	delay = msecs_to_jiffies(intel_dp->panel_power_cycle_delay * 5);
2225	schedule_delayed_work(&intel_dp->panel_vdd_work, delay);
2226}
 
 
 
 
 
 
 
 
 
2227
2228/*
2229 * Must be paired with edp_panel_vdd_on().
2230 * Must hold pps_mutex around the whole on/off sequence.
2231 * Can be nested with intel_edp_panel_vdd_{on,off}() calls.
2232 */
2233static void edp_panel_vdd_off(struct intel_dp *intel_dp, bool sync)
2234{
2235	struct drm_i915_private *dev_priv = to_i915(intel_dp_to_dev(intel_dp));
2236
2237	lockdep_assert_held(&dev_priv->pps_mutex);
2238
2239	if (!intel_dp_is_edp(intel_dp))
2240		return;
2241
2242	I915_STATE_WARN(!intel_dp->want_panel_vdd, "eDP port %c VDD not forced on",
2243	     port_name(dp_to_dig_port(intel_dp)->base.port));
2244
2245	intel_dp->want_panel_vdd = false;
2246
2247	if (sync)
2248		edp_panel_vdd_off_sync(intel_dp);
2249	else
2250		edp_panel_vdd_schedule_off(intel_dp);
2251}
2252
2253static void edp_panel_on(struct intel_dp *intel_dp)
2254{
2255	struct drm_i915_private *dev_priv = to_i915(intel_dp_to_dev(intel_dp));
2256	u32 pp;
2257	i915_reg_t pp_ctrl_reg;
2258
2259	lockdep_assert_held(&dev_priv->pps_mutex);
2260
2261	if (!intel_dp_is_edp(intel_dp))
2262		return;
2263
2264	DRM_DEBUG_KMS("Turn eDP port %c panel power on\n",
2265		      port_name(dp_to_dig_port(intel_dp)->base.port));
2266
2267	if (WARN(edp_have_panel_power(intel_dp),
2268		 "eDP port %c panel power already on\n",
2269		 port_name(dp_to_dig_port(intel_dp)->base.port)))
2270		return;
2271
2272	wait_panel_power_cycle(intel_dp);
2273
2274	pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
2275	pp = ironlake_get_pp_control(intel_dp);
2276	if (IS_GEN5(dev_priv)) {
2277		/* ILK workaround: disable reset around power sequence */
2278		pp &= ~PANEL_POWER_RESET;
2279		I915_WRITE(pp_ctrl_reg, pp);
2280		POSTING_READ(pp_ctrl_reg);
2281	}
2282
2283	pp |= PANEL_POWER_ON;
2284	if (!IS_GEN5(dev_priv))
2285		pp |= PANEL_POWER_RESET;
2286
2287	I915_WRITE(pp_ctrl_reg, pp);
2288	POSTING_READ(pp_ctrl_reg);
 
2289
2290	wait_panel_on(intel_dp);
2291	intel_dp->last_power_on = jiffies;
2292
2293	if (IS_GEN5(dev_priv)) {
2294		pp |= PANEL_POWER_RESET; /* restore panel reset bit */
2295		I915_WRITE(pp_ctrl_reg, pp);
2296		POSTING_READ(pp_ctrl_reg);
2297	}
2298}
2299
2300void intel_edp_panel_on(struct intel_dp *intel_dp)
2301{
2302	if (!intel_dp_is_edp(intel_dp))
2303		return;
2304
2305	pps_lock(intel_dp);
2306	edp_panel_on(intel_dp);
2307	pps_unlock(intel_dp);
2308}
2309
2310
2311static void edp_panel_off(struct intel_dp *intel_dp)
2312{
2313	struct drm_i915_private *dev_priv = to_i915(intel_dp_to_dev(intel_dp));
2314	u32 pp;
2315	i915_reg_t pp_ctrl_reg;
2316
2317	lockdep_assert_held(&dev_priv->pps_mutex);
2318
2319	if (!intel_dp_is_edp(intel_dp))
2320		return;
2321
2322	DRM_DEBUG_KMS("Turn eDP port %c panel power off\n",
2323		      port_name(dp_to_dig_port(intel_dp)->base.port));
2324
2325	WARN(!intel_dp->want_panel_vdd, "Need eDP port %c VDD to turn off panel\n",
2326	     port_name(dp_to_dig_port(intel_dp)->base.port));
2327
2328	pp = ironlake_get_pp_control(intel_dp);
2329	/* We need to switch off panel power _and_ force vdd, for otherwise some
2330	 * panels get very unhappy and cease to work. */
2331	pp &= ~(PANEL_POWER_ON | PANEL_POWER_RESET | EDP_FORCE_VDD |
2332		EDP_BLC_ENABLE);
2333
2334	pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
2335
2336	intel_dp->want_panel_vdd = false;
2337
2338	I915_WRITE(pp_ctrl_reg, pp);
2339	POSTING_READ(pp_ctrl_reg);
2340
2341	wait_panel_off(intel_dp);
2342	intel_dp->panel_power_off_time = ktime_get_boottime();
2343
2344	/* We got a reference when we enabled the VDD. */
2345	intel_display_power_put(dev_priv, intel_dp->aux_power_domain);
2346}
2347
2348void intel_edp_panel_off(struct intel_dp *intel_dp)
2349{
2350	if (!intel_dp_is_edp(intel_dp))
2351		return;
2352
2353	pps_lock(intel_dp);
2354	edp_panel_off(intel_dp);
2355	pps_unlock(intel_dp);
2356}
2357
2358/* Enable backlight in the panel power control. */
2359static void _intel_edp_backlight_on(struct intel_dp *intel_dp)
2360{
2361	struct drm_i915_private *dev_priv = to_i915(intel_dp_to_dev(intel_dp));
2362	u32 pp;
2363	i915_reg_t pp_ctrl_reg;
2364
 
2365	/*
2366	 * If we enable the backlight right away following a panel power
2367	 * on, we may see slight flicker as the panel syncs with the eDP
2368	 * link.  So delay a bit to make sure the image is solid before
2369	 * allowing it to appear.
2370	 */
2371	wait_backlight_on(intel_dp);
2372
2373	pps_lock(intel_dp);
2374
2375	pp = ironlake_get_pp_control(intel_dp);
2376	pp |= EDP_BLC_ENABLE;
2377
2378	pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
2379
2380	I915_WRITE(pp_ctrl_reg, pp);
2381	POSTING_READ(pp_ctrl_reg);
2382
2383	pps_unlock(intel_dp);
2384}
2385
2386/* Enable backlight PWM and backlight PP control. */
2387void intel_edp_backlight_on(const struct intel_crtc_state *crtc_state,
2388			    const struct drm_connector_state *conn_state)
2389{
2390	struct intel_dp *intel_dp = enc_to_intel_dp(conn_state->best_encoder);
2391
2392	if (!intel_dp_is_edp(intel_dp))
2393		return;
2394
2395	DRM_DEBUG_KMS("\n");
2396
2397	intel_panel_enable_backlight(crtc_state, conn_state);
2398	_intel_edp_backlight_on(intel_dp);
2399}
2400
2401/* Disable backlight in the panel power control. */
2402static void _intel_edp_backlight_off(struct intel_dp *intel_dp)
2403{
2404	struct drm_i915_private *dev_priv = to_i915(intel_dp_to_dev(intel_dp));
2405	u32 pp;
2406	i915_reg_t pp_ctrl_reg;
2407
2408	if (!intel_dp_is_edp(intel_dp))
2409		return;
2410
2411	pps_lock(intel_dp);
2412
2413	pp = ironlake_get_pp_control(intel_dp);
2414	pp &= ~EDP_BLC_ENABLE;
2415
2416	pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
2417
2418	I915_WRITE(pp_ctrl_reg, pp);
2419	POSTING_READ(pp_ctrl_reg);
2420
2421	pps_unlock(intel_dp);
2422
2423	intel_dp->last_backlight_off = jiffies;
2424	edp_wait_backlight_off(intel_dp);
2425}
2426
2427/* Disable backlight PP control and backlight PWM. */
2428void intel_edp_backlight_off(const struct drm_connector_state *old_conn_state)
2429{
2430	struct intel_dp *intel_dp = enc_to_intel_dp(old_conn_state->best_encoder);
2431
2432	if (!intel_dp_is_edp(intel_dp))
2433		return;
2434
2435	DRM_DEBUG_KMS("\n");
2436
2437	_intel_edp_backlight_off(intel_dp);
2438	intel_panel_disable_backlight(old_conn_state);
2439}
2440
2441/*
2442 * Hook for controlling the panel power control backlight through the bl_power
2443 * sysfs attribute. Take care to handle multiple calls.
2444 */
2445static void intel_edp_backlight_power(struct intel_connector *connector,
2446				      bool enable)
2447{
2448	struct intel_dp *intel_dp = intel_attached_dp(&connector->base);
2449	bool is_enabled;
2450
2451	pps_lock(intel_dp);
2452	is_enabled = ironlake_get_pp_control(intel_dp) & EDP_BLC_ENABLE;
2453	pps_unlock(intel_dp);
2454
2455	if (is_enabled == enable)
2456		return;
2457
2458	DRM_DEBUG_KMS("panel power control backlight %s\n",
2459		      enable ? "enable" : "disable");
2460
2461	if (enable)
2462		_intel_edp_backlight_on(intel_dp);
2463	else
2464		_intel_edp_backlight_off(intel_dp);
2465}
2466
2467static void assert_dp_port(struct intel_dp *intel_dp, bool state)
2468{
2469	struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
2470	struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev);
2471	bool cur_state = I915_READ(intel_dp->output_reg) & DP_PORT_EN;
2472
2473	I915_STATE_WARN(cur_state != state,
2474			"DP port %c state assertion failure (expected %s, current %s)\n",
2475			port_name(dig_port->base.port),
2476			onoff(state), onoff(cur_state));
2477}
2478#define assert_dp_port_disabled(d) assert_dp_port((d), false)
2479
2480static void assert_edp_pll(struct drm_i915_private *dev_priv, bool state)
2481{
2482	bool cur_state = I915_READ(DP_A) & DP_PLL_ENABLE;
2483
2484	I915_STATE_WARN(cur_state != state,
2485			"eDP PLL state assertion failure (expected %s, current %s)\n",
2486			onoff(state), onoff(cur_state));
2487}
2488#define assert_edp_pll_enabled(d) assert_edp_pll((d), true)
2489#define assert_edp_pll_disabled(d) assert_edp_pll((d), false)
2490
2491static void ironlake_edp_pll_on(struct intel_dp *intel_dp,
2492				const struct intel_crtc_state *pipe_config)
2493{
2494	struct intel_crtc *crtc = to_intel_crtc(pipe_config->base.crtc);
2495	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
2496
2497	assert_pipe_disabled(dev_priv, crtc->pipe);
2498	assert_dp_port_disabled(intel_dp);
2499	assert_edp_pll_disabled(dev_priv);
2500
2501	DRM_DEBUG_KMS("enabling eDP PLL for clock %d\n",
2502		      pipe_config->port_clock);
2503
2504	intel_dp->DP &= ~DP_PLL_FREQ_MASK;
2505
2506	if (pipe_config->port_clock == 162000)
2507		intel_dp->DP |= DP_PLL_FREQ_162MHZ;
2508	else
2509		intel_dp->DP |= DP_PLL_FREQ_270MHZ;
2510
2511	I915_WRITE(DP_A, intel_dp->DP);
2512	POSTING_READ(DP_A);
2513	udelay(500);
2514
2515	/*
2516	 * [DevILK] Work around required when enabling DP PLL
2517	 * while a pipe is enabled going to FDI:
2518	 * 1. Wait for the start of vertical blank on the enabled pipe going to FDI
2519	 * 2. Program DP PLL enable
2520	 */
2521	if (IS_GEN5(dev_priv))
2522		intel_wait_for_vblank_if_active(dev_priv, !crtc->pipe);
2523
2524	intel_dp->DP |= DP_PLL_ENABLE;
2525
2526	I915_WRITE(DP_A, intel_dp->DP);
2527	POSTING_READ(DP_A);
2528	udelay(200);
2529}
2530
2531static void ironlake_edp_pll_off(struct intel_dp *intel_dp,
2532				 const struct intel_crtc_state *old_crtc_state)
2533{
2534	struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->base.crtc);
2535	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
2536
2537	assert_pipe_disabled(dev_priv, crtc->pipe);
2538	assert_dp_port_disabled(intel_dp);
2539	assert_edp_pll_enabled(dev_priv);
2540
2541	DRM_DEBUG_KMS("disabling eDP PLL\n");
2542
2543	intel_dp->DP &= ~DP_PLL_ENABLE;
2544
2545	I915_WRITE(DP_A, intel_dp->DP);
2546	POSTING_READ(DP_A);
2547	udelay(200);
2548}
2549
2550static bool downstream_hpd_needs_d0(struct intel_dp *intel_dp)
2551{
2552	/*
2553	 * DPCD 1.2+ should support BRANCH_DEVICE_CTRL, and thus
2554	 * be capable of signalling downstream hpd with a long pulse.
2555	 * Whether or not that means D3 is safe to use is not clear,
2556	 * but let's assume so until proven otherwise.
2557	 *
2558	 * FIXME should really check all downstream ports...
2559	 */
2560	return intel_dp->dpcd[DP_DPCD_REV] == 0x11 &&
2561		intel_dp->dpcd[DP_DOWNSTREAMPORT_PRESENT] & DP_DWN_STRM_PORT_PRESENT &&
2562		intel_dp->downstream_ports[0] & DP_DS_PORT_HPD;
2563}
2564
2565/* If the sink supports it, try to set the power state appropriately */
2566void intel_dp_sink_dpms(struct intel_dp *intel_dp, int mode)
2567{
2568	int ret, i;
2569
2570	/* Should have a valid DPCD by this point */
2571	if (intel_dp->dpcd[DP_DPCD_REV] < 0x11)
2572		return;
2573
2574	if (mode != DRM_MODE_DPMS_ON) {
2575		if (downstream_hpd_needs_d0(intel_dp))
2576			return;
2577
2578		ret = drm_dp_dpcd_writeb(&intel_dp->aux, DP_SET_POWER,
2579					 DP_SET_POWER_D3);
2580	} else {
2581		struct intel_lspcon *lspcon = dp_to_lspcon(intel_dp);
2582
2583		/*
2584		 * When turning on, we need to retry for 1ms to give the sink
2585		 * time to wake up.
2586		 */
2587		for (i = 0; i < 3; i++) {
2588			ret = drm_dp_dpcd_writeb(&intel_dp->aux, DP_SET_POWER,
2589						 DP_SET_POWER_D0);
 
2590			if (ret == 1)
2591				break;
2592			msleep(1);
2593		}
2594
2595		if (ret == 1 && lspcon->active)
2596			lspcon_wait_pcon_mode(lspcon);
2597	}
2598
2599	if (ret != 1)
2600		DRM_DEBUG_KMS("failed to %s sink power state\n",
2601			      mode == DRM_MODE_DPMS_ON ? "enable" : "disable");
2602}
2603
2604static bool intel_dp_get_hw_state(struct intel_encoder *encoder,
2605				  enum pipe *pipe)
2606{
2607	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
2608	struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2609	enum port port = encoder->port;
2610	u32 tmp;
2611	bool ret;
2612
2613	if (!intel_display_power_get_if_enabled(dev_priv,
2614						encoder->power_domain))
2615		return false;
2616
2617	ret = false;
2618
2619	tmp = I915_READ(intel_dp->output_reg);
2620
2621	if (!(tmp & DP_PORT_EN))
2622		goto out;
2623
2624	if (IS_GEN7(dev_priv) && port == PORT_A) {
2625		*pipe = PORT_TO_PIPE_CPT(tmp);
2626	} else if (HAS_PCH_CPT(dev_priv) && port != PORT_A) {
2627		enum pipe p;
2628
2629		for_each_pipe(dev_priv, p) {
2630			u32 trans_dp = I915_READ(TRANS_DP_CTL(p));
2631			if (TRANS_DP_PIPE_TO_PORT(trans_dp) == port) {
2632				*pipe = p;
2633				ret = true;
2634
2635				goto out;
2636			}
2637		}
2638
2639		DRM_DEBUG_KMS("No pipe for dp port 0x%x found\n",
2640			      i915_mmio_reg_offset(intel_dp->output_reg));
2641	} else if (IS_CHERRYVIEW(dev_priv)) {
2642		*pipe = DP_PORT_TO_PIPE_CHV(tmp);
2643	} else {
2644		*pipe = PORT_TO_PIPE(tmp);
2645	}
2646
2647	ret = true;
2648
2649out:
2650	intel_display_power_put(dev_priv, encoder->power_domain);
2651
2652	return ret;
2653}
2654
2655static void intel_dp_get_config(struct intel_encoder *encoder,
2656				struct intel_crtc_state *pipe_config)
2657{
2658	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
2659	struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2660	u32 tmp, flags = 0;
2661	enum port port = encoder->port;
2662	struct intel_crtc *crtc = to_intel_crtc(pipe_config->base.crtc);
2663
2664	if (encoder->type == INTEL_OUTPUT_EDP)
2665		pipe_config->output_types |= BIT(INTEL_OUTPUT_EDP);
2666	else
2667		pipe_config->output_types |= BIT(INTEL_OUTPUT_DP);
2668
2669	tmp = I915_READ(intel_dp->output_reg);
 
2670
2671	pipe_config->has_audio = tmp & DP_AUDIO_OUTPUT_ENABLE && port != PORT_A;
2672
2673	if (HAS_PCH_CPT(dev_priv) && port != PORT_A) {
2674		u32 trans_dp = I915_READ(TRANS_DP_CTL(crtc->pipe));
2675
2676		if (trans_dp & TRANS_DP_HSYNC_ACTIVE_HIGH)
2677			flags |= DRM_MODE_FLAG_PHSYNC;
2678		else
2679			flags |= DRM_MODE_FLAG_NHSYNC;
2680
2681		if (trans_dp & TRANS_DP_VSYNC_ACTIVE_HIGH)
2682			flags |= DRM_MODE_FLAG_PVSYNC;
2683		else
2684			flags |= DRM_MODE_FLAG_NVSYNC;
2685	} else {
2686		if (tmp & DP_SYNC_HS_HIGH)
2687			flags |= DRM_MODE_FLAG_PHSYNC;
2688		else
2689			flags |= DRM_MODE_FLAG_NHSYNC;
2690
2691		if (tmp & DP_SYNC_VS_HIGH)
2692			flags |= DRM_MODE_FLAG_PVSYNC;
2693		else
2694			flags |= DRM_MODE_FLAG_NVSYNC;
2695	}
2696
2697	pipe_config->base.adjusted_mode.flags |= flags;
2698
2699	if (IS_G4X(dev_priv) && tmp & DP_COLOR_RANGE_16_235)
2700		pipe_config->limited_color_range = true;
2701
2702	pipe_config->lane_count =
2703		((tmp & DP_PORT_WIDTH_MASK) >> DP_PORT_WIDTH_SHIFT) + 1;
2704
2705	intel_dp_get_m_n(crtc, pipe_config);
2706
2707	if (port == PORT_A) {
2708		if ((I915_READ(DP_A) & DP_PLL_FREQ_MASK) == DP_PLL_FREQ_162MHZ)
2709			pipe_config->port_clock = 162000;
2710		else
2711			pipe_config->port_clock = 270000;
2712	}
2713
2714	pipe_config->base.adjusted_mode.crtc_clock =
2715		intel_dotclock_calculate(pipe_config->port_clock,
2716					 &pipe_config->dp_m_n);
2717
2718	if (intel_dp_is_edp(intel_dp) && dev_priv->vbt.edp.bpp &&
2719	    pipe_config->pipe_bpp > dev_priv->vbt.edp.bpp) {
2720		/*
2721		 * This is a big fat ugly hack.
2722		 *
2723		 * Some machines in UEFI boot mode provide us a VBT that has 18
2724		 * bpp and 1.62 GHz link bandwidth for eDP, which for reasons
2725		 * unknown we fail to light up. Yet the same BIOS boots up with
2726		 * 24 bpp and 2.7 GHz link. Use the same bpp as the BIOS uses as
2727		 * max, not what it tells us to use.
2728		 *
2729		 * Note: This will still be broken if the eDP panel is not lit
2730		 * up by the BIOS, and thus we can't get the mode at module
2731		 * load.
2732		 */
2733		DRM_DEBUG_KMS("pipe has %d bpp for eDP panel, overriding BIOS-provided max %d bpp\n",
2734			      pipe_config->pipe_bpp, dev_priv->vbt.edp.bpp);
2735		dev_priv->vbt.edp.bpp = pipe_config->pipe_bpp;
2736	}
2737}
2738
2739static void intel_disable_dp(struct intel_encoder *encoder,
2740			     const struct intel_crtc_state *old_crtc_state,
2741			     const struct drm_connector_state *old_conn_state)
2742{
2743	struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
 
 
 
2744
2745	intel_dp->link_trained = false;
2746
2747	if (old_crtc_state->has_audio)
2748		intel_audio_codec_disable(encoder,
2749					  old_crtc_state, old_conn_state);
2750
2751	/* Make sure the panel is off before trying to change the mode. But also
2752	 * ensure that we have vdd while we switch off the panel. */
2753	intel_edp_panel_vdd_on(intel_dp);
2754	intel_edp_backlight_off(old_conn_state);
2755	intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_OFF);
2756	intel_edp_panel_off(intel_dp);
2757}
2758
2759static void g4x_disable_dp(struct intel_encoder *encoder,
2760			   const struct intel_crtc_state *old_crtc_state,
2761			   const struct drm_connector_state *old_conn_state)
2762{
2763	intel_disable_dp(encoder, old_crtc_state, old_conn_state);
2764
2765	/* disable the port before the pipe on g4x */
2766	intel_dp_link_down(encoder, old_crtc_state);
2767}
2768
2769static void ilk_disable_dp(struct intel_encoder *encoder,
2770			   const struct intel_crtc_state *old_crtc_state,
2771			   const struct drm_connector_state *old_conn_state)
2772{
2773	intel_disable_dp(encoder, old_crtc_state, old_conn_state);
2774}
2775
2776static void vlv_disable_dp(struct intel_encoder *encoder,
2777			   const struct intel_crtc_state *old_crtc_state,
2778			   const struct drm_connector_state *old_conn_state)
2779{
2780	struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2781
2782	intel_psr_disable(intel_dp, old_crtc_state);
2783
2784	intel_disable_dp(encoder, old_crtc_state, old_conn_state);
2785}
2786
2787static void ilk_post_disable_dp(struct intel_encoder *encoder,
2788				const struct intel_crtc_state *old_crtc_state,
2789				const struct drm_connector_state *old_conn_state)
2790{
2791	struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2792	enum port port = encoder->port;
2793
2794	intel_dp_link_down(encoder, old_crtc_state);
2795
2796	/* Only ilk+ has port A */
2797	if (port == PORT_A)
2798		ironlake_edp_pll_off(intel_dp, old_crtc_state);
2799}
2800
2801static void vlv_post_disable_dp(struct intel_encoder *encoder,
2802				const struct intel_crtc_state *old_crtc_state,
2803				const struct drm_connector_state *old_conn_state)
2804{
2805	intel_dp_link_down(encoder, old_crtc_state);
2806}
2807
2808static void chv_post_disable_dp(struct intel_encoder *encoder,
2809				const struct intel_crtc_state *old_crtc_state,
2810				const struct drm_connector_state *old_conn_state)
2811{
2812	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
2813
2814	intel_dp_link_down(encoder, old_crtc_state);
2815
2816	mutex_lock(&dev_priv->sb_lock);
2817
2818	/* Assert data lane reset */
2819	chv_data_lane_soft_reset(encoder, old_crtc_state, true);
2820
2821	mutex_unlock(&dev_priv->sb_lock);
2822}
2823
2824static void
2825_intel_dp_set_link_train(struct intel_dp *intel_dp,
2826			 uint32_t *DP,
2827			 uint8_t dp_train_pat)
2828{
2829	struct drm_i915_private *dev_priv = to_i915(intel_dp_to_dev(intel_dp));
2830	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
2831	enum port port = intel_dig_port->base.port;
2832
2833	if (dp_train_pat & DP_TRAINING_PATTERN_MASK)
2834		DRM_DEBUG_KMS("Using DP training pattern TPS%d\n",
2835			      dp_train_pat & DP_TRAINING_PATTERN_MASK);
2836
2837	if (HAS_DDI(dev_priv)) {
2838		uint32_t temp = I915_READ(DP_TP_CTL(port));
2839
2840		if (dp_train_pat & DP_LINK_SCRAMBLING_DISABLE)
2841			temp |= DP_TP_CTL_SCRAMBLE_DISABLE;
2842		else
2843			temp &= ~DP_TP_CTL_SCRAMBLE_DISABLE;
2844
2845		temp &= ~DP_TP_CTL_LINK_TRAIN_MASK;
2846		switch (dp_train_pat & DP_TRAINING_PATTERN_MASK) {
2847		case DP_TRAINING_PATTERN_DISABLE:
2848			temp |= DP_TP_CTL_LINK_TRAIN_NORMAL;
2849
2850			break;
2851		case DP_TRAINING_PATTERN_1:
2852			temp |= DP_TP_CTL_LINK_TRAIN_PAT1;
2853			break;
2854		case DP_TRAINING_PATTERN_2:
2855			temp |= DP_TP_CTL_LINK_TRAIN_PAT2;
2856			break;
2857		case DP_TRAINING_PATTERN_3:
2858			temp |= DP_TP_CTL_LINK_TRAIN_PAT3;
2859			break;
2860		}
2861		I915_WRITE(DP_TP_CTL(port), temp);
2862
2863	} else if ((IS_GEN7(dev_priv) && port == PORT_A) ||
2864		   (HAS_PCH_CPT(dev_priv) && port != PORT_A)) {
2865		*DP &= ~DP_LINK_TRAIN_MASK_CPT;
2866
2867		switch (dp_train_pat & DP_TRAINING_PATTERN_MASK) {
2868		case DP_TRAINING_PATTERN_DISABLE:
2869			*DP |= DP_LINK_TRAIN_OFF_CPT;
2870			break;
2871		case DP_TRAINING_PATTERN_1:
2872			*DP |= DP_LINK_TRAIN_PAT_1_CPT;
2873			break;
2874		case DP_TRAINING_PATTERN_2:
2875			*DP |= DP_LINK_TRAIN_PAT_2_CPT;
2876			break;
2877		case DP_TRAINING_PATTERN_3:
2878			DRM_DEBUG_KMS("TPS3 not supported, using TPS2 instead\n");
2879			*DP |= DP_LINK_TRAIN_PAT_2_CPT;
2880			break;
2881		}
2882
2883	} else {
2884		if (IS_CHERRYVIEW(dev_priv))
2885			*DP &= ~DP_LINK_TRAIN_MASK_CHV;
2886		else
2887			*DP &= ~DP_LINK_TRAIN_MASK;
2888
2889		switch (dp_train_pat & DP_TRAINING_PATTERN_MASK) {
2890		case DP_TRAINING_PATTERN_DISABLE:
2891			*DP |= DP_LINK_TRAIN_OFF;
2892			break;
2893		case DP_TRAINING_PATTERN_1:
2894			*DP |= DP_LINK_TRAIN_PAT_1;
2895			break;
2896		case DP_TRAINING_PATTERN_2:
2897			*DP |= DP_LINK_TRAIN_PAT_2;
2898			break;
2899		case DP_TRAINING_PATTERN_3:
2900			if (IS_CHERRYVIEW(dev_priv)) {
2901				*DP |= DP_LINK_TRAIN_PAT_3_CHV;
2902			} else {
2903				DRM_DEBUG_KMS("TPS3 not supported, using TPS2 instead\n");
2904				*DP |= DP_LINK_TRAIN_PAT_2;
2905			}
2906			break;
2907		}
 
 
2908	}
 
2909}
2910
2911static void intel_dp_enable_port(struct intel_dp *intel_dp,
2912				 const struct intel_crtc_state *old_crtc_state)
 
 
 
 
 
2913{
2914	struct drm_i915_private *dev_priv = to_i915(intel_dp_to_dev(intel_dp));
2915
2916	/* enable with pattern 1 (as per spec) */
2917
2918	intel_dp_program_link_training_pattern(intel_dp, DP_TRAINING_PATTERN_1);
2919
2920	/*
2921	 * Magic for VLV/CHV. We _must_ first set up the register
2922	 * without actually enabling the port, and then do another
2923	 * write to enable the port. Otherwise link training will
2924	 * fail when the power sequencer is freshly used for this port.
2925	 */
2926	intel_dp->DP |= DP_PORT_EN;
2927	if (old_crtc_state->has_audio)
2928		intel_dp->DP |= DP_AUDIO_OUTPUT_ENABLE;
2929
2930	I915_WRITE(intel_dp->output_reg, intel_dp->DP);
2931	POSTING_READ(intel_dp->output_reg);
2932}
2933
2934static void intel_enable_dp(struct intel_encoder *encoder,
2935			    const struct intel_crtc_state *pipe_config,
2936			    const struct drm_connector_state *conn_state)
2937{
2938	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
2939	struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2940	struct intel_crtc *crtc = to_intel_crtc(pipe_config->base.crtc);
2941	uint32_t dp_reg = I915_READ(intel_dp->output_reg);
2942	enum pipe pipe = crtc->pipe;
2943
2944	if (WARN_ON(dp_reg & DP_PORT_EN))
2945		return;
2946
2947	pps_lock(intel_dp);
2948
2949	if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
2950		vlv_init_panel_power_sequencer(encoder, pipe_config);
2951
2952	intel_dp_enable_port(intel_dp, pipe_config);
2953
2954	edp_panel_vdd_on(intel_dp);
2955	edp_panel_on(intel_dp);
2956	edp_panel_vdd_off(intel_dp, true);
2957
2958	pps_unlock(intel_dp);
2959
2960	if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
2961		unsigned int lane_mask = 0x0;
2962
2963		if (IS_CHERRYVIEW(dev_priv))
2964			lane_mask = intel_dp_unused_lane_mask(pipe_config->lane_count);
2965
2966		vlv_wait_port_ready(dev_priv, dp_to_dig_port(intel_dp),
2967				    lane_mask);
2968	}
2969
2970	intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_ON);
2971	intel_dp_start_link_train(intel_dp);
2972	intel_dp_stop_link_train(intel_dp);
2973
2974	if (pipe_config->has_audio) {
2975		DRM_DEBUG_DRIVER("Enabling DP audio on pipe %c\n",
2976				 pipe_name(pipe));
2977		intel_audio_codec_enable(encoder, pipe_config, conn_state);
2978	}
2979}
2980
2981static void g4x_enable_dp(struct intel_encoder *encoder,
2982			  const struct intel_crtc_state *pipe_config,
2983			  const struct drm_connector_state *conn_state)
 
 
 
2984{
2985	intel_enable_dp(encoder, pipe_config, conn_state);
2986	intel_edp_backlight_on(pipe_config, conn_state);
 
 
2987}
2988
2989static void vlv_enable_dp(struct intel_encoder *encoder,
2990			  const struct intel_crtc_state *pipe_config,
2991			  const struct drm_connector_state *conn_state)
2992{
2993	struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2994
2995	intel_edp_backlight_on(pipe_config, conn_state);
2996	intel_psr_enable(intel_dp, pipe_config);
2997}
2998
2999static void g4x_pre_enable_dp(struct intel_encoder *encoder,
3000			      const struct intel_crtc_state *pipe_config,
3001			      const struct drm_connector_state *conn_state)
3002{
3003	struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
3004	enum port port = encoder->port;
3005
3006	intel_dp_prepare(encoder, pipe_config);
 
3007
3008	/* Only ilk+ has port A */
3009	if (port == PORT_A)
3010		ironlake_edp_pll_on(intel_dp, pipe_config);
3011}
3012
3013static void vlv_detach_power_sequencer(struct intel_dp *intel_dp)
 
 
3014{
3015	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
3016	struct drm_i915_private *dev_priv = to_i915(intel_dig_port->base.base.dev);
3017	enum pipe pipe = intel_dp->pps_pipe;
3018	i915_reg_t pp_on_reg = PP_ON_DELAYS(pipe);
 
3019
3020	WARN_ON(intel_dp->active_pipe != INVALID_PIPE);
3021
3022	if (WARN_ON(pipe != PIPE_A && pipe != PIPE_B))
3023		return;
3024
3025	edp_panel_vdd_off_sync(intel_dp);
3026
3027	/*
3028	 * VLV seems to get confused when multiple power seqeuencers
3029	 * have the same port selected (even if only one has power/vdd
3030	 * enabled). The failure manifests as vlv_wait_port_ready() failing
3031	 * CHV on the other hand doesn't seem to mind having the same port
3032	 * selected in multiple power seqeuencers, but let's clear the
3033	 * port select always when logically disconnecting a power sequencer
3034	 * from a port.
3035	 */
3036	DRM_DEBUG_KMS("detaching pipe %c power sequencer from port %c\n",
3037		      pipe_name(pipe), port_name(intel_dig_port->base.port));
3038	I915_WRITE(pp_on_reg, 0);
3039	POSTING_READ(pp_on_reg);
3040
3041	intel_dp->pps_pipe = INVALID_PIPE;
3042}
3043
3044static void vlv_steal_power_sequencer(struct drm_i915_private *dev_priv,
3045				      enum pipe pipe)
3046{
3047	struct intel_encoder *encoder;
3048
3049	lockdep_assert_held(&dev_priv->pps_mutex);
3050
3051	for_each_intel_encoder(&dev_priv->drm, encoder) {
3052		struct intel_dp *intel_dp;
3053		enum port port;
3054
3055		if (encoder->type != INTEL_OUTPUT_DP &&
3056		    encoder->type != INTEL_OUTPUT_EDP)
3057			continue;
3058
3059		intel_dp = enc_to_intel_dp(&encoder->base);
3060		port = dp_to_dig_port(intel_dp)->base.port;
3061
3062		WARN(intel_dp->active_pipe == pipe,
3063		     "stealing pipe %c power sequencer from active (e)DP port %c\n",
3064		     pipe_name(pipe), port_name(port));
3065
3066		if (intel_dp->pps_pipe != pipe)
3067			continue;
3068
3069		DRM_DEBUG_KMS("stealing pipe %c power sequencer from port %c\n",
3070			      pipe_name(pipe), port_name(port));
3071
3072		/* make sure vdd is off before we steal it */
3073		vlv_detach_power_sequencer(intel_dp);
3074	}
3075}
3076
3077static void vlv_init_panel_power_sequencer(struct intel_encoder *encoder,
3078					   const struct intel_crtc_state *crtc_state)
3079{
3080	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
3081	struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
3082	struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
3083
3084	lockdep_assert_held(&dev_priv->pps_mutex);
3085
3086	WARN_ON(intel_dp->active_pipe != INVALID_PIPE);
3087
3088	if (intel_dp->pps_pipe != INVALID_PIPE &&
3089	    intel_dp->pps_pipe != crtc->pipe) {
3090		/*
3091		 * If another power sequencer was being used on this
3092		 * port previously make sure to turn off vdd there while
3093		 * we still have control of it.
3094		 */
3095		vlv_detach_power_sequencer(intel_dp);
3096	}
3097
3098	/*
3099	 * We may be stealing the power
3100	 * sequencer from another port.
3101	 */
3102	vlv_steal_power_sequencer(dev_priv, crtc->pipe);
3103
3104	intel_dp->active_pipe = crtc->pipe;
3105
3106	if (!intel_dp_is_edp(intel_dp))
3107		return;
3108
3109	/* now it's all ours */
3110	intel_dp->pps_pipe = crtc->pipe;
3111
3112	DRM_DEBUG_KMS("initializing pipe %c power sequencer for port %c\n",
3113		      pipe_name(intel_dp->pps_pipe), port_name(encoder->port));
3114
3115	/* init power sequencer on this pipe and port */
3116	intel_dp_init_panel_power_sequencer(intel_dp);
3117	intel_dp_init_panel_power_sequencer_registers(intel_dp, true);
3118}
3119
3120static void vlv_pre_enable_dp(struct intel_encoder *encoder,
3121			      const struct intel_crtc_state *pipe_config,
3122			      const struct drm_connector_state *conn_state)
3123{
3124	vlv_phy_pre_encoder_enable(encoder, pipe_config);
3125
3126	intel_enable_dp(encoder, pipe_config, conn_state);
3127}
3128
3129static void vlv_dp_pre_pll_enable(struct intel_encoder *encoder,
3130				  const struct intel_crtc_state *pipe_config,
3131				  const struct drm_connector_state *conn_state)
3132{
3133	intel_dp_prepare(encoder, pipe_config);
3134
3135	vlv_phy_pre_pll_enable(encoder, pipe_config);
3136}
3137
3138static void chv_pre_enable_dp(struct intel_encoder *encoder,
3139			      const struct intel_crtc_state *pipe_config,
3140			      const struct drm_connector_state *conn_state)
3141{
3142	chv_phy_pre_encoder_enable(encoder, pipe_config);
3143
3144	intel_enable_dp(encoder, pipe_config, conn_state);
3145
3146	/* Second common lane will stay alive on its own now */
3147	chv_phy_release_cl2_override(encoder);
3148}
3149
3150static void chv_dp_pre_pll_enable(struct intel_encoder *encoder,
3151				  const struct intel_crtc_state *pipe_config,
3152				  const struct drm_connector_state *conn_state)
3153{
3154	intel_dp_prepare(encoder, pipe_config);
3155
3156	chv_phy_pre_pll_enable(encoder, pipe_config);
3157}
3158
3159static void chv_dp_post_pll_disable(struct intel_encoder *encoder,
3160				    const struct intel_crtc_state *old_crtc_state,
3161				    const struct drm_connector_state *old_conn_state)
3162{
3163	chv_phy_post_pll_disable(encoder, old_crtc_state);
3164}
3165
3166/*
3167 * Fetch AUX CH registers 0x202 - 0x207 which contain
3168 * link status information
3169 */
3170bool
3171intel_dp_get_link_status(struct intel_dp *intel_dp, uint8_t link_status[DP_LINK_STATUS_SIZE])
3172{
3173	return drm_dp_dpcd_read(&intel_dp->aux, DP_LANE0_1_STATUS, link_status,
3174				DP_LINK_STATUS_SIZE) == DP_LINK_STATUS_SIZE;
3175}
3176
3177/* These are source-specific values. */
3178uint8_t
3179intel_dp_voltage_max(struct intel_dp *intel_dp)
3180{
3181	struct drm_i915_private *dev_priv = to_i915(intel_dp_to_dev(intel_dp));
3182	enum port port = dp_to_dig_port(intel_dp)->base.port;
3183
3184	if (INTEL_GEN(dev_priv) >= 9) {
3185		struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base;
3186		return intel_ddi_dp_voltage_max(encoder);
3187	} else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
3188		return DP_TRAIN_VOLTAGE_SWING_LEVEL_3;
3189	else if (IS_GEN7(dev_priv) && port == PORT_A)
3190		return DP_TRAIN_VOLTAGE_SWING_LEVEL_2;
3191	else if (HAS_PCH_CPT(dev_priv) && port != PORT_A)
3192		return DP_TRAIN_VOLTAGE_SWING_LEVEL_3;
3193	else
3194		return DP_TRAIN_VOLTAGE_SWING_LEVEL_2;
3195}
3196
3197uint8_t
3198intel_dp_pre_emphasis_max(struct intel_dp *intel_dp, uint8_t voltage_swing)
3199{
3200	struct drm_i915_private *dev_priv = to_i915(intel_dp_to_dev(intel_dp));
3201	enum port port = dp_to_dig_port(intel_dp)->base.port;
3202
3203	if (INTEL_GEN(dev_priv) >= 9) {
3204		switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
3205		case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3206			return DP_TRAIN_PRE_EMPH_LEVEL_3;
3207		case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3208			return DP_TRAIN_PRE_EMPH_LEVEL_2;
3209		case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3210			return DP_TRAIN_PRE_EMPH_LEVEL_1;
3211		case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
3212			return DP_TRAIN_PRE_EMPH_LEVEL_0;
3213		default:
3214			return DP_TRAIN_PRE_EMPH_LEVEL_0;
3215		}
3216	} else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
3217		switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
3218		case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3219			return DP_TRAIN_PRE_EMPH_LEVEL_3;
3220		case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3221			return DP_TRAIN_PRE_EMPH_LEVEL_2;
3222		case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3223			return DP_TRAIN_PRE_EMPH_LEVEL_1;
3224		case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
3225		default:
3226			return DP_TRAIN_PRE_EMPH_LEVEL_0;
3227		}
3228	} else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
3229		switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
3230		case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3231			return DP_TRAIN_PRE_EMPH_LEVEL_3;
3232		case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3233			return DP_TRAIN_PRE_EMPH_LEVEL_2;
3234		case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3235			return DP_TRAIN_PRE_EMPH_LEVEL_1;
3236		case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
3237		default:
3238			return DP_TRAIN_PRE_EMPH_LEVEL_0;
3239		}
3240	} else if (IS_GEN7(dev_priv) && port == PORT_A) {
3241		switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
3242		case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3243			return DP_TRAIN_PRE_EMPH_LEVEL_2;
3244		case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3245		case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3246			return DP_TRAIN_PRE_EMPH_LEVEL_1;
3247		default:
3248			return DP_TRAIN_PRE_EMPH_LEVEL_0;
3249		}
3250	} else {
3251		switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
3252		case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3253			return DP_TRAIN_PRE_EMPH_LEVEL_2;
3254		case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3255			return DP_TRAIN_PRE_EMPH_LEVEL_2;
3256		case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3257			return DP_TRAIN_PRE_EMPH_LEVEL_1;
3258		case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
3259		default:
3260			return DP_TRAIN_PRE_EMPH_LEVEL_0;
3261		}
3262	}
3263}
3264
3265static uint32_t vlv_signal_levels(struct intel_dp *intel_dp)
 
3266{
3267	struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base;
3268	unsigned long demph_reg_value, preemph_reg_value,
3269		uniqtranscale_reg_value;
3270	uint8_t train_set = intel_dp->train_set[0];
 
 
 
3271
3272	switch (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) {
3273	case DP_TRAIN_PRE_EMPH_LEVEL_0:
3274		preemph_reg_value = 0x0004000;
3275		switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
3276		case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3277			demph_reg_value = 0x2B405555;
3278			uniqtranscale_reg_value = 0x552AB83A;
3279			break;
3280		case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3281			demph_reg_value = 0x2B404040;
3282			uniqtranscale_reg_value = 0x5548B83A;
3283			break;
3284		case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3285			demph_reg_value = 0x2B245555;
3286			uniqtranscale_reg_value = 0x5560B83A;
3287			break;
3288		case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
3289			demph_reg_value = 0x2B405555;
3290			uniqtranscale_reg_value = 0x5598DA3A;
3291			break;
3292		default:
3293			return 0;
3294		}
3295		break;
3296	case DP_TRAIN_PRE_EMPH_LEVEL_1:
3297		preemph_reg_value = 0x0002000;
3298		switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
3299		case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3300			demph_reg_value = 0x2B404040;
3301			uniqtranscale_reg_value = 0x5552B83A;
3302			break;
3303		case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3304			demph_reg_value = 0x2B404848;
3305			uniqtranscale_reg_value = 0x5580B83A;
3306			break;
3307		case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3308			demph_reg_value = 0x2B404040;
3309			uniqtranscale_reg_value = 0x55ADDA3A;
3310			break;
3311		default:
3312			return 0;
3313		}
3314		break;
3315	case DP_TRAIN_PRE_EMPH_LEVEL_2:
3316		preemph_reg_value = 0x0000000;
3317		switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
3318		case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3319			demph_reg_value = 0x2B305555;
3320			uniqtranscale_reg_value = 0x5570B83A;
3321			break;
3322		case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3323			demph_reg_value = 0x2B2B4040;
3324			uniqtranscale_reg_value = 0x55ADDA3A;
3325			break;
3326		default:
3327			return 0;
3328		}
3329		break;
3330	case DP_TRAIN_PRE_EMPH_LEVEL_3:
3331		preemph_reg_value = 0x0006000;
3332		switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
3333		case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3334			demph_reg_value = 0x1B405555;
3335			uniqtranscale_reg_value = 0x55ADDA3A;
3336			break;
3337		default:
3338			return 0;
3339		}
3340		break;
3341	default:
3342		return 0;
3343	}
3344
3345	vlv_set_phy_signal_level(encoder, demph_reg_value, preemph_reg_value,
3346				 uniqtranscale_reg_value, 0);
3347
3348	return 0;
3349}
3350
3351static uint32_t chv_signal_levels(struct intel_dp *intel_dp)
3352{
3353	struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base;
3354	u32 deemph_reg_value, margin_reg_value;
3355	bool uniq_trans_scale = false;
3356	uint8_t train_set = intel_dp->train_set[0];
3357
3358	switch (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) {
3359	case DP_TRAIN_PRE_EMPH_LEVEL_0:
3360		switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
3361		case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3362			deemph_reg_value = 128;
3363			margin_reg_value = 52;
3364			break;
3365		case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3366			deemph_reg_value = 128;
3367			margin_reg_value = 77;
3368			break;
3369		case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3370			deemph_reg_value = 128;
3371			margin_reg_value = 102;
3372			break;
3373		case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
3374			deemph_reg_value = 128;
3375			margin_reg_value = 154;
3376			uniq_trans_scale = true;
3377			break;
3378		default:
3379			return 0;
3380		}
3381		break;
3382	case DP_TRAIN_PRE_EMPH_LEVEL_1:
3383		switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
3384		case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3385			deemph_reg_value = 85;
3386			margin_reg_value = 78;
3387			break;
3388		case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3389			deemph_reg_value = 85;
3390			margin_reg_value = 116;
3391			break;
3392		case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3393			deemph_reg_value = 85;
3394			margin_reg_value = 154;
3395			break;
3396		default:
3397			return 0;
3398		}
3399		break;
3400	case DP_TRAIN_PRE_EMPH_LEVEL_2:
3401		switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
3402		case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3403			deemph_reg_value = 64;
3404			margin_reg_value = 104;
3405			break;
3406		case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3407			deemph_reg_value = 64;
3408			margin_reg_value = 154;
3409			break;
3410		default:
3411			return 0;
3412		}
3413		break;
3414	case DP_TRAIN_PRE_EMPH_LEVEL_3:
3415		switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
3416		case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3417			deemph_reg_value = 43;
3418			margin_reg_value = 154;
3419			break;
3420		default:
3421			return 0;
3422		}
3423		break;
3424	default:
3425		return 0;
3426	}
3427
3428	chv_set_phy_signal_level(encoder, deemph_reg_value,
3429				 margin_reg_value, uniq_trans_scale);
3430
3431	return 0;
 
3432}
3433
3434static uint32_t
3435gen4_signal_levels(uint8_t train_set)
3436{
3437	uint32_t	signal_levels = 0;
3438
3439	switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
3440	case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3441	default:
3442		signal_levels |= DP_VOLTAGE_0_4;
3443		break;
3444	case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3445		signal_levels |= DP_VOLTAGE_0_6;
3446		break;
3447	case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3448		signal_levels |= DP_VOLTAGE_0_8;
3449		break;
3450	case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
3451		signal_levels |= DP_VOLTAGE_1_2;
3452		break;
3453	}
3454	switch (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) {
3455	case DP_TRAIN_PRE_EMPH_LEVEL_0:
3456	default:
3457		signal_levels |= DP_PRE_EMPHASIS_0;
3458		break;
3459	case DP_TRAIN_PRE_EMPH_LEVEL_1:
3460		signal_levels |= DP_PRE_EMPHASIS_3_5;
3461		break;
3462	case DP_TRAIN_PRE_EMPH_LEVEL_2:
3463		signal_levels |= DP_PRE_EMPHASIS_6;
3464		break;
3465	case DP_TRAIN_PRE_EMPH_LEVEL_3:
3466		signal_levels |= DP_PRE_EMPHASIS_9_5;
3467		break;
3468	}
3469	return signal_levels;
3470}
3471
3472/* Gen6's DP voltage swing and pre-emphasis control */
3473static uint32_t
3474gen6_edp_signal_levels(uint8_t train_set)
3475{
3476	int signal_levels = train_set & (DP_TRAIN_VOLTAGE_SWING_MASK |
3477					 DP_TRAIN_PRE_EMPHASIS_MASK);
3478	switch (signal_levels) {
3479	case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3480	case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3481		return EDP_LINK_TRAIN_400_600MV_0DB_SNB_B;
3482	case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_1:
3483		return EDP_LINK_TRAIN_400MV_3_5DB_SNB_B;
3484	case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_2:
3485	case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_2:
3486		return EDP_LINK_TRAIN_400_600MV_6DB_SNB_B;
3487	case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_1:
3488	case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_1:
3489		return EDP_LINK_TRAIN_600_800MV_3_5DB_SNB_B;
3490	case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3491	case DP_TRAIN_VOLTAGE_SWING_LEVEL_3 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3492		return EDP_LINK_TRAIN_800_1200MV_0DB_SNB_B;
3493	default:
3494		DRM_DEBUG_KMS("Unsupported voltage swing/pre-emphasis level:"
3495			      "0x%x\n", signal_levels);
3496		return EDP_LINK_TRAIN_400_600MV_0DB_SNB_B;
3497	}
3498}
3499
3500/* Gen7's DP voltage swing and pre-emphasis control */
3501static uint32_t
3502gen7_edp_signal_levels(uint8_t train_set)
3503{
3504	int signal_levels = train_set & (DP_TRAIN_VOLTAGE_SWING_MASK |
3505					 DP_TRAIN_PRE_EMPHASIS_MASK);
3506	switch (signal_levels) {
3507	case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3508		return EDP_LINK_TRAIN_400MV_0DB_IVB;
3509	case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_1:
3510		return EDP_LINK_TRAIN_400MV_3_5DB_IVB;
3511	case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_2:
3512		return EDP_LINK_TRAIN_400MV_6DB_IVB;
3513
3514	case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3515		return EDP_LINK_TRAIN_600MV_0DB_IVB;
3516	case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_1:
3517		return EDP_LINK_TRAIN_600MV_3_5DB_IVB;
3518
3519	case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3520		return EDP_LINK_TRAIN_800MV_0DB_IVB;
3521	case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_1:
3522		return EDP_LINK_TRAIN_800MV_3_5DB_IVB;
3523
3524	default:
3525		DRM_DEBUG_KMS("Unsupported voltage swing/pre-emphasis level:"
3526			      "0x%x\n", signal_levels);
3527		return EDP_LINK_TRAIN_500MV_0DB_IVB;
3528	}
3529}
3530
3531void
3532intel_dp_set_signal_levels(struct intel_dp *intel_dp)
 
3533{
3534	struct drm_i915_private *dev_priv = to_i915(intel_dp_to_dev(intel_dp));
3535	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
3536	enum port port = intel_dig_port->base.port;
3537	uint32_t signal_levels, mask = 0;
3538	uint8_t train_set = intel_dp->train_set[0];
3539
3540	if (IS_GEN9_LP(dev_priv) || IS_CANNONLAKE(dev_priv)) {
3541		signal_levels = bxt_signal_levels(intel_dp);
3542	} else if (HAS_DDI(dev_priv)) {
3543		signal_levels = ddi_signal_levels(intel_dp);
3544		mask = DDI_BUF_EMP_MASK;
3545	} else if (IS_CHERRYVIEW(dev_priv)) {
3546		signal_levels = chv_signal_levels(intel_dp);
3547	} else if (IS_VALLEYVIEW(dev_priv)) {
3548		signal_levels = vlv_signal_levels(intel_dp);
3549	} else if (IS_GEN7(dev_priv) && port == PORT_A) {
3550		signal_levels = gen7_edp_signal_levels(train_set);
3551		mask = EDP_LINK_TRAIN_VOL_EMP_MASK_IVB;
3552	} else if (IS_GEN6(dev_priv) && port == PORT_A) {
3553		signal_levels = gen6_edp_signal_levels(train_set);
3554		mask = EDP_LINK_TRAIN_VOL_EMP_MASK_SNB;
3555	} else {
3556		signal_levels = gen4_signal_levels(train_set);
3557		mask = DP_VOLTAGE_MASK | DP_PRE_EMPHASIS_MASK;
3558	}
3559
3560	if (mask)
3561		DRM_DEBUG_KMS("Using signal levels %08x\n", signal_levels);
3562
3563	DRM_DEBUG_KMS("Using vswing level %d\n",
3564		train_set & DP_TRAIN_VOLTAGE_SWING_MASK);
3565	DRM_DEBUG_KMS("Using pre-emphasis level %d\n",
3566		(train_set & DP_TRAIN_PRE_EMPHASIS_MASK) >>
3567			DP_TRAIN_PRE_EMPHASIS_SHIFT);
3568
3569	intel_dp->DP = (intel_dp->DP & ~mask) | signal_levels;
3570
3571	I915_WRITE(intel_dp->output_reg, intel_dp->DP);
3572	POSTING_READ(intel_dp->output_reg);
3573}
3574
3575void
3576intel_dp_program_link_training_pattern(struct intel_dp *intel_dp,
3577				       uint8_t dp_train_pat)
 
 
 
3578{
3579	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
3580	struct drm_i915_private *dev_priv =
3581		to_i915(intel_dig_port->base.base.dev);
3582
3583	_intel_dp_set_link_train(intel_dp, &intel_dp->DP, dp_train_pat);
3584
3585	I915_WRITE(intel_dp->output_reg, intel_dp->DP);
3586	POSTING_READ(intel_dp->output_reg);
 
 
 
 
 
 
3587}
3588
3589void intel_dp_set_idle_link_train(struct intel_dp *intel_dp)
 
 
 
3590{
3591	struct drm_i915_private *dev_priv = to_i915(intel_dp_to_dev(intel_dp));
3592	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
3593	enum port port = intel_dig_port->base.port;
3594	uint32_t val;
3595
3596	if (!HAS_DDI(dev_priv))
3597		return;
3598
3599	val = I915_READ(DP_TP_CTL(port));
3600	val &= ~DP_TP_CTL_LINK_TRAIN_MASK;
3601	val |= DP_TP_CTL_LINK_TRAIN_IDLE;
3602	I915_WRITE(DP_TP_CTL(port), val);
 
 
 
 
 
3603
3604	/*
3605	 * On PORT_A we can have only eDP in SST mode. There the only reason
3606	 * we need to set idle transmission mode is to work around a HW issue
3607	 * where we enable the pipe while not in idle link-training mode.
3608	 * In this case there is requirement to wait for a minimum number of
3609	 * idle patterns to be sent.
3610	 */
3611	if (port == PORT_A)
3612		return;
3613
3614	if (intel_wait_for_register(dev_priv,DP_TP_STATUS(port),
3615				    DP_TP_STATUS_IDLE_DONE,
3616				    DP_TP_STATUS_IDLE_DONE,
3617				    1))
3618		DRM_ERROR("Timed out waiting for DP idle patterns\n");
3619}
3620
 
3621static void
3622intel_dp_link_down(struct intel_encoder *encoder,
3623		   const struct intel_crtc_state *old_crtc_state)
3624{
3625	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
3626	struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
3627	struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->base.crtc);
3628	enum port port = encoder->port;
 
 
 
 
3629	uint32_t DP = intel_dp->DP;
3630
3631	if (WARN_ON(HAS_DDI(dev_priv)))
3632		return;
3633
3634	if (WARN_ON((I915_READ(intel_dp->output_reg) & DP_PORT_EN) == 0))
3635		return;
3636
3637	DRM_DEBUG_KMS("\n");
3638
3639	if ((IS_GEN7(dev_priv) && port == PORT_A) ||
3640	    (HAS_PCH_CPT(dev_priv) && port != PORT_A)) {
3641		DP &= ~DP_LINK_TRAIN_MASK_CPT;
3642		DP |= DP_LINK_TRAIN_PAT_IDLE_CPT;
3643	} else {
3644		if (IS_CHERRYVIEW(dev_priv))
3645			DP &= ~DP_LINK_TRAIN_MASK_CHV;
3646		else
3647			DP &= ~DP_LINK_TRAIN_MASK;
3648		DP |= DP_LINK_TRAIN_PAT_IDLE;
3649	}
3650	I915_WRITE(intel_dp->output_reg, DP);
3651	POSTING_READ(intel_dp->output_reg);
3652
3653	DP &= ~(DP_PORT_EN | DP_AUDIO_OUTPUT_ENABLE);
3654	I915_WRITE(intel_dp->output_reg, DP);
3655	POSTING_READ(intel_dp->output_reg);
3656
3657	/*
3658	 * HW workaround for IBX, we need to move the port
3659	 * to transcoder A after disabling it to allow the
3660	 * matching HDMI port to be enabled on transcoder A.
3661	 */
3662	if (HAS_PCH_IBX(dev_priv) && crtc->pipe == PIPE_B && port != PORT_A) {
3663		/*
3664		 * We get CPU/PCH FIFO underruns on the other pipe when
3665		 * doing the workaround. Sweep them under the rug.
3666		 */
3667		intel_set_cpu_fifo_underrun_reporting(dev_priv, PIPE_A, false);
3668		intel_set_pch_fifo_underrun_reporting(dev_priv, PIPE_A, false);
3669
3670		/* always enable with pattern 1 (as per spec) */
3671		DP &= ~(DP_PIPEB_SELECT | DP_LINK_TRAIN_MASK);
3672		DP |= DP_PORT_EN | DP_LINK_TRAIN_PAT_1;
3673		I915_WRITE(intel_dp->output_reg, DP);
3674		POSTING_READ(intel_dp->output_reg);
3675
3676		DP &= ~DP_PORT_EN;
3677		I915_WRITE(intel_dp->output_reg, DP);
3678		POSTING_READ(intel_dp->output_reg);
3679
3680		intel_wait_for_vblank_if_active(dev_priv, PIPE_A);
3681		intel_set_cpu_fifo_underrun_reporting(dev_priv, PIPE_A, true);
3682		intel_set_pch_fifo_underrun_reporting(dev_priv, PIPE_A, true);
3683	}
3684
3685	msleep(intel_dp->panel_power_down_delay);
 
 
 
3686
3687	intel_dp->DP = DP;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3688
3689	if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
3690		pps_lock(intel_dp);
3691		intel_dp->active_pipe = INVALID_PIPE;
3692		pps_unlock(intel_dp);
3693	}
3694}
3695
3696bool
3697intel_dp_read_dpcd(struct intel_dp *intel_dp)
3698{
3699	if (drm_dp_dpcd_read(&intel_dp->aux, 0x000, intel_dp->dpcd,
3700			     sizeof(intel_dp->dpcd)) < 0)
3701		return false; /* aux transfer failed */
3702
3703	DRM_DEBUG_KMS("DPCD: %*ph\n", (int) sizeof(intel_dp->dpcd), intel_dp->dpcd);
 
 
3704
3705	return intel_dp->dpcd[DP_DPCD_REV] != 0;
3706}
 
 
3707
3708static bool
3709intel_edp_init_dpcd(struct intel_dp *intel_dp)
3710{
3711	struct drm_i915_private *dev_priv =
3712		to_i915(dp_to_dig_port(intel_dp)->base.base.dev);
3713
3714	/* this function is meant to be called only once */
3715	WARN_ON(intel_dp->dpcd[DP_DPCD_REV] != 0);
3716
3717	if (!intel_dp_read_dpcd(intel_dp))
3718		return false;
3719
3720	drm_dp_read_desc(&intel_dp->aux, &intel_dp->desc,
3721			 drm_dp_is_branch(intel_dp->dpcd));
3722
3723	if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11)
3724		dev_priv->no_aux_handshake = intel_dp->dpcd[DP_MAX_DOWNSPREAD] &
3725			DP_NO_AUX_HANDSHAKE_LINK_TRAINING;
3726
3727	intel_psr_init_dpcd(intel_dp);
3728
3729	/*
3730	 * Read the eDP display control registers.
3731	 *
3732	 * Do this independent of DP_DPCD_DISPLAY_CONTROL_CAPABLE bit in
3733	 * DP_EDP_CONFIGURATION_CAP, because some buggy displays do not have it
3734	 * set, but require eDP 1.4+ detection (e.g. for supported link rates
3735	 * method). The display control registers should read zero if they're
3736	 * not supported anyway.
3737	 */
3738	if (drm_dp_dpcd_read(&intel_dp->aux, DP_EDP_DPCD_REV,
3739			     intel_dp->edp_dpcd, sizeof(intel_dp->edp_dpcd)) ==
3740			     sizeof(intel_dp->edp_dpcd))
3741		DRM_DEBUG_KMS("eDP DPCD: %*ph\n", (int) sizeof(intel_dp->edp_dpcd),
3742			      intel_dp->edp_dpcd);
3743
3744	/* Read the eDP 1.4+ supported link rates. */
3745	if (intel_dp->edp_dpcd[0] >= DP_EDP_14) {
3746		__le16 sink_rates[DP_MAX_SUPPORTED_RATES];
3747		int i;
3748
3749		drm_dp_dpcd_read(&intel_dp->aux, DP_SUPPORTED_LINK_RATES,
3750				sink_rates, sizeof(sink_rates));
3751
3752		for (i = 0; i < ARRAY_SIZE(sink_rates); i++) {
3753			int val = le16_to_cpu(sink_rates[i]);
3754
3755			if (val == 0)
 
 
 
3756				break;
 
 
 
3757
3758			/* Value read multiplied by 200kHz gives the per-lane
3759			 * link rate in kHz. The source rates are, however,
3760			 * stored in terms of LS_Clk kHz. The full conversion
3761			 * back to symbols is
3762			 * (val * 200kHz)*(8/10 ch. encoding)*(1/8 bit to Byte)
3763			 */
3764			intel_dp->sink_rates[i] = (val * 200) / 10;
3765		}
3766		intel_dp->num_sink_rates = i;
3767	}
3768
3769	/*
3770	 * Use DP_LINK_RATE_SET if DP_SUPPORTED_LINK_RATES are available,
3771	 * default to DP_MAX_LINK_RATE and DP_LINK_BW_SET otherwise.
3772	 */
3773	if (intel_dp->num_sink_rates)
3774		intel_dp->use_rate_select = true;
3775	else
3776		intel_dp_set_sink_rates(intel_dp);
3777
3778	intel_dp_set_common_rates(intel_dp);
3779
3780	return true;
3781}
3782
3783
3784static bool
3785intel_dp_get_dpcd(struct intel_dp *intel_dp)
3786{
3787	u8 sink_count;
3788
3789	if (!intel_dp_read_dpcd(intel_dp))
3790		return false;
3791
3792	/* Don't clobber cached eDP rates. */
3793	if (!intel_dp_is_edp(intel_dp)) {
3794		intel_dp_set_sink_rates(intel_dp);
3795		intel_dp_set_common_rates(intel_dp);
3796	}
3797
3798	if (drm_dp_dpcd_readb(&intel_dp->aux, DP_SINK_COUNT, &sink_count) <= 0)
3799		return false;
3800
3801	/*
3802	 * Sink count can change between short pulse hpd hence
3803	 * a member variable in intel_dp will track any changes
3804	 * between short pulse interrupts.
3805	 */
3806	intel_dp->sink_count = DP_GET_SINK_COUNT(sink_count);
3807
3808	/*
3809	 * SINK_COUNT == 0 and DOWNSTREAM_PORT_PRESENT == 1 implies that
3810	 * a dongle is present but no display. Unless we require to know
3811	 * if a dongle is present or not, we don't need to update
3812	 * downstream port information. So, an early return here saves
3813	 * time from performing other operations which are not required.
3814	 */
3815	if (!intel_dp_is_edp(intel_dp) && !intel_dp->sink_count)
3816		return false;
3817
3818	if (!drm_dp_is_branch(intel_dp->dpcd))
3819		return true; /* native DP sink */
3820
3821	if (intel_dp->dpcd[DP_DPCD_REV] == 0x10)
3822		return true; /* no per-port downstream info */
3823
3824	if (drm_dp_dpcd_read(&intel_dp->aux, DP_DOWNSTREAM_PORT_0,
3825			     intel_dp->downstream_ports,
3826			     DP_MAX_DOWNSTREAM_PORTS) < 0)
3827		return false; /* downstream port status fetch failed */
3828
3829	return true;
3830}
3831
3832static bool
3833intel_dp_can_mst(struct intel_dp *intel_dp)
3834{
3835	u8 mstm_cap;
3836
3837	if (!i915_modparams.enable_dp_mst)
3838		return false;
3839
3840	if (!intel_dp->can_mst)
3841		return false;
3842
3843	if (intel_dp->dpcd[DP_DPCD_REV] < 0x12)
3844		return false;
3845
3846	if (drm_dp_dpcd_readb(&intel_dp->aux, DP_MSTM_CAP, &mstm_cap) != 1)
3847		return false;
3848
3849	return mstm_cap & DP_MST_CAP;
3850}
3851
3852static void
3853intel_dp_configure_mst(struct intel_dp *intel_dp)
3854{
3855	if (!i915_modparams.enable_dp_mst)
3856		return;
 
 
 
 
3857
3858	if (!intel_dp->can_mst)
3859		return;
 
 
 
 
 
3860
3861	intel_dp->is_mst = intel_dp_can_mst(intel_dp);
 
 
 
 
3862
3863	if (intel_dp->is_mst)
3864		DRM_DEBUG_KMS("Sink is MST capable\n");
3865	else
3866		DRM_DEBUG_KMS("Sink is not MST capable\n");
 
 
 
3867
3868	drm_dp_mst_topology_mgr_set_mst(&intel_dp->mst_mgr,
3869					intel_dp->is_mst);
3870}
 
3871
3872static int intel_dp_sink_crc_stop(struct intel_dp *intel_dp,
3873				  struct intel_crtc_state *crtc_state, bool disable_wa)
3874{
3875	struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
3876	struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev);
3877	struct intel_crtc *intel_crtc = to_intel_crtc(crtc_state->base.crtc);
3878	u8 buf;
3879	int ret = 0;
3880	int count = 0;
3881	int attempts = 10;
3882
3883	if (drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_SINK, &buf) < 0) {
3884		DRM_DEBUG_KMS("Sink CRC couldn't be stopped properly\n");
3885		ret = -EIO;
3886		goto out;
3887	}
3888
3889	if (drm_dp_dpcd_writeb(&intel_dp->aux, DP_TEST_SINK,
3890			       buf & ~DP_TEST_SINK_START) < 0) {
3891		DRM_DEBUG_KMS("Sink CRC couldn't be stopped properly\n");
3892		ret = -EIO;
3893		goto out;
3894	}
3895
3896	do {
3897		intel_wait_for_vblank(dev_priv, intel_crtc->pipe);
 
 
3898
3899		if (drm_dp_dpcd_readb(&intel_dp->aux,
3900				      DP_TEST_SINK_MISC, &buf) < 0) {
3901			ret = -EIO;
3902			goto out;
 
 
 
3903		}
3904		count = buf & DP_TEST_COUNT_MASK;
3905	} while (--attempts && count);
3906
3907	if (attempts == 0) {
3908		DRM_DEBUG_KMS("TIMEOUT: Sink CRC counter is not zeroed after calculation is stopped\n");
3909		ret = -ETIMEDOUT;
3910	}
3911
3912 out:
3913	if (disable_wa)
3914		hsw_enable_ips(crtc_state);
3915	return ret;
3916}
3917
3918static int intel_dp_sink_crc_start(struct intel_dp *intel_dp,
3919				   struct intel_crtc_state *crtc_state)
3920{
3921	struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
3922	struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev);
3923	struct intel_crtc *intel_crtc = to_intel_crtc(crtc_state->base.crtc);
3924	u8 buf;
3925	int ret;
3926
3927	if (drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_SINK_MISC, &buf) < 0)
3928		return -EIO;
3929
3930	if (!(buf & DP_TEST_CRC_SUPPORTED))
3931		return -ENOTTY;
3932
3933	if (drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_SINK, &buf) < 0)
3934		return -EIO;
3935
3936	if (buf & DP_TEST_SINK_START) {
3937		ret = intel_dp_sink_crc_stop(intel_dp, crtc_state, false);
3938		if (ret)
3939			return ret;
3940	}
3941
3942	hsw_disable_ips(crtc_state);
3943
3944	if (drm_dp_dpcd_writeb(&intel_dp->aux, DP_TEST_SINK,
3945			       buf | DP_TEST_SINK_START) < 0) {
3946		hsw_enable_ips(crtc_state);
3947		return -EIO;
3948	}
3949
3950	intel_wait_for_vblank(dev_priv, intel_crtc->pipe);
3951	return 0;
3952}
3953
3954int intel_dp_sink_crc(struct intel_dp *intel_dp, struct intel_crtc_state *crtc_state, u8 *crc)
 
3955{
3956	struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
3957	struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev);
3958	struct intel_crtc *intel_crtc = to_intel_crtc(crtc_state->base.crtc);
3959	u8 buf;
3960	int count, ret;
3961	int attempts = 6;
3962
3963	ret = intel_dp_sink_crc_start(intel_dp, crtc_state);
3964	if (ret)
3965		return ret;
3966
3967	do {
3968		intel_wait_for_vblank(dev_priv, intel_crtc->pipe);
3969
3970		if (drm_dp_dpcd_readb(&intel_dp->aux,
3971				      DP_TEST_SINK_MISC, &buf) < 0) {
3972			ret = -EIO;
3973			goto stop;
3974		}
3975		count = buf & DP_TEST_COUNT_MASK;
3976
3977	} while (--attempts && count == 0);
3978
3979	if (attempts == 0) {
3980		DRM_ERROR("Panel is unable to calculate any CRC after 6 vblanks\n");
3981		ret = -ETIMEDOUT;
3982		goto stop;
3983	}
3984
3985	if (drm_dp_dpcd_read(&intel_dp->aux, DP_TEST_CRC_R_CR, crc, 6) < 0) {
3986		ret = -EIO;
3987		goto stop;
 
 
 
3988	}
 
3989
3990stop:
3991	intel_dp_sink_crc_stop(intel_dp, crtc_state, true);
3992	return ret;
3993}
3994
3995static bool
3996intel_dp_get_sink_irq(struct intel_dp *intel_dp, u8 *sink_irq_vector)
3997{
3998	return drm_dp_dpcd_readb(&intel_dp->aux, DP_DEVICE_SERVICE_IRQ_VECTOR,
3999				 sink_irq_vector) == 1;
4000}
4001
4002static bool
4003intel_dp_get_sink_irq_esi(struct intel_dp *intel_dp, u8 *sink_irq_vector)
4004{
4005	return drm_dp_dpcd_read(&intel_dp->aux, DP_SINK_COUNT_ESI,
4006				sink_irq_vector, DP_DPRX_ESI_LEN) ==
4007		DP_DPRX_ESI_LEN;
4008}
4009
4010static uint8_t intel_dp_autotest_link_training(struct intel_dp *intel_dp)
4011{
4012	int status = 0;
4013	int test_link_rate;
4014	uint8_t test_lane_count, test_link_bw;
4015	/* (DP CTS 1.2)
4016	 * 4.3.1.11
4017	 */
4018	/* Read the TEST_LANE_COUNT and TEST_LINK_RTAE fields (DP CTS 3.1.4) */
4019	status = drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_LANE_COUNT,
4020				   &test_lane_count);
4021
4022	if (status <= 0) {
4023		DRM_DEBUG_KMS("Lane count read failed\n");
4024		return DP_TEST_NAK;
4025	}
4026	test_lane_count &= DP_MAX_LANE_COUNT_MASK;
4027
4028	status = drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_LINK_RATE,
4029				   &test_link_bw);
4030	if (status <= 0) {
4031		DRM_DEBUG_KMS("Link Rate read failed\n");
4032		return DP_TEST_NAK;
4033	}
4034	test_link_rate = drm_dp_bw_code_to_link_rate(test_link_bw);
4035
4036	/* Validate the requested link rate and lane count */
4037	if (!intel_dp_link_params_valid(intel_dp, test_link_rate,
4038					test_lane_count))
4039		return DP_TEST_NAK;
4040
4041	intel_dp->compliance.test_lane_count = test_lane_count;
4042	intel_dp->compliance.test_link_rate = test_link_rate;
4043
4044	return DP_TEST_ACK;
4045}
4046
4047static uint8_t intel_dp_autotest_video_pattern(struct intel_dp *intel_dp)
4048{
4049	uint8_t test_pattern;
4050	uint8_t test_misc;
4051	__be16 h_width, v_height;
4052	int status = 0;
4053
4054	/* Read the TEST_PATTERN (DP CTS 3.1.5) */
4055	status = drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_PATTERN,
4056				   &test_pattern);
4057	if (status <= 0) {
4058		DRM_DEBUG_KMS("Test pattern read failed\n");
4059		return DP_TEST_NAK;
4060	}
4061	if (test_pattern != DP_COLOR_RAMP)
4062		return DP_TEST_NAK;
4063
4064	status = drm_dp_dpcd_read(&intel_dp->aux, DP_TEST_H_WIDTH_HI,
4065				  &h_width, 2);
4066	if (status <= 0) {
4067		DRM_DEBUG_KMS("H Width read failed\n");
4068		return DP_TEST_NAK;
4069	}
4070
4071	status = drm_dp_dpcd_read(&intel_dp->aux, DP_TEST_V_HEIGHT_HI,
4072				  &v_height, 2);
4073	if (status <= 0) {
4074		DRM_DEBUG_KMS("V Height read failed\n");
4075		return DP_TEST_NAK;
4076	}
4077
4078	status = drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_MISC0,
4079				   &test_misc);
4080	if (status <= 0) {
4081		DRM_DEBUG_KMS("TEST MISC read failed\n");
4082		return DP_TEST_NAK;
4083	}
4084	if ((test_misc & DP_TEST_COLOR_FORMAT_MASK) != DP_COLOR_FORMAT_RGB)
4085		return DP_TEST_NAK;
4086	if (test_misc & DP_TEST_DYNAMIC_RANGE_CEA)
4087		return DP_TEST_NAK;
4088	switch (test_misc & DP_TEST_BIT_DEPTH_MASK) {
4089	case DP_TEST_BIT_DEPTH_6:
4090		intel_dp->compliance.test_data.bpc = 6;
4091		break;
4092	case DP_TEST_BIT_DEPTH_8:
4093		intel_dp->compliance.test_data.bpc = 8;
4094		break;
4095	default:
4096		return DP_TEST_NAK;
4097	}
4098
4099	intel_dp->compliance.test_data.video_pattern = test_pattern;
4100	intel_dp->compliance.test_data.hdisplay = be16_to_cpu(h_width);
4101	intel_dp->compliance.test_data.vdisplay = be16_to_cpu(v_height);
4102	/* Set test active flag here so userspace doesn't interrupt things */
4103	intel_dp->compliance.test_active = 1;
4104
4105	return DP_TEST_ACK;
4106}
4107
4108static uint8_t intel_dp_autotest_edid(struct intel_dp *intel_dp)
4109{
4110	uint8_t test_result = DP_TEST_ACK;
4111	struct intel_connector *intel_connector = intel_dp->attached_connector;
4112	struct drm_connector *connector = &intel_connector->base;
4113
4114	if (intel_connector->detect_edid == NULL ||
4115	    connector->edid_corrupt ||
4116	    intel_dp->aux.i2c_defer_count > 6) {
4117		/* Check EDID read for NACKs, DEFERs and corruption
4118		 * (DP CTS 1.2 Core r1.1)
4119		 *    4.2.2.4 : Failed EDID read, I2C_NAK
4120		 *    4.2.2.5 : Failed EDID read, I2C_DEFER
4121		 *    4.2.2.6 : EDID corruption detected
4122		 * Use failsafe mode for all cases
4123		 */
4124		if (intel_dp->aux.i2c_nack_count > 0 ||
4125			intel_dp->aux.i2c_defer_count > 0)
4126			DRM_DEBUG_KMS("EDID read had %d NACKs, %d DEFERs\n",
4127				      intel_dp->aux.i2c_nack_count,
4128				      intel_dp->aux.i2c_defer_count);
4129		intel_dp->compliance.test_data.edid = INTEL_DP_RESOLUTION_FAILSAFE;
4130	} else {
4131		struct edid *block = intel_connector->detect_edid;
4132
4133		/* We have to write the checksum
4134		 * of the last block read
4135		 */
4136		block += intel_connector->detect_edid->extensions;
4137
4138		if (drm_dp_dpcd_writeb(&intel_dp->aux, DP_TEST_EDID_CHECKSUM,
4139				       block->checksum) <= 0)
4140			DRM_DEBUG_KMS("Failed to write EDID checksum\n");
4141
4142		test_result = DP_TEST_ACK | DP_TEST_EDID_CHECKSUM_WRITE;
4143		intel_dp->compliance.test_data.edid = INTEL_DP_RESOLUTION_PREFERRED;
 
 
 
 
 
4144	}
4145
4146	/* Set test active flag here so userspace doesn't interrupt things */
4147	intel_dp->compliance.test_active = 1;
4148
4149	return test_result;
4150}
4151
4152static uint8_t intel_dp_autotest_phy_pattern(struct intel_dp *intel_dp)
4153{
4154	uint8_t test_result = DP_TEST_NAK;
4155	return test_result;
4156}
4157
4158static void intel_dp_handle_test_request(struct intel_dp *intel_dp)
4159{
4160	uint8_t response = DP_TEST_NAK;
4161	uint8_t request = 0;
4162	int status;
4163
4164	status = drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_REQUEST, &request);
4165	if (status <= 0) {
4166		DRM_DEBUG_KMS("Could not read test request from sink\n");
4167		goto update_status;
4168	}
4169
4170	switch (request) {
4171	case DP_TEST_LINK_TRAINING:
4172		DRM_DEBUG_KMS("LINK_TRAINING test requested\n");
4173		response = intel_dp_autotest_link_training(intel_dp);
4174		break;
4175	case DP_TEST_LINK_VIDEO_PATTERN:
4176		DRM_DEBUG_KMS("TEST_PATTERN test requested\n");
4177		response = intel_dp_autotest_video_pattern(intel_dp);
4178		break;
4179	case DP_TEST_LINK_EDID_READ:
4180		DRM_DEBUG_KMS("EDID test requested\n");
4181		response = intel_dp_autotest_edid(intel_dp);
4182		break;
4183	case DP_TEST_LINK_PHY_TEST_PATTERN:
4184		DRM_DEBUG_KMS("PHY_PATTERN test requested\n");
4185		response = intel_dp_autotest_phy_pattern(intel_dp);
4186		break;
4187	default:
4188		DRM_DEBUG_KMS("Invalid test request '%02x'\n", request);
4189		break;
4190	}
4191
4192	if (response & DP_TEST_ACK)
4193		intel_dp->compliance.test_type = request;
4194
4195update_status:
4196	status = drm_dp_dpcd_writeb(&intel_dp->aux, DP_TEST_RESPONSE, response);
4197	if (status <= 0)
4198		DRM_DEBUG_KMS("Could not write test response to sink\n");
4199}
4200
4201static int
4202intel_dp_check_mst_status(struct intel_dp *intel_dp)
4203{
4204	bool bret;
4205
4206	if (intel_dp->is_mst) {
4207		u8 esi[DP_DPRX_ESI_LEN] = { 0 };
4208		int ret = 0;
4209		int retry;
4210		bool handled;
4211		bret = intel_dp_get_sink_irq_esi(intel_dp, esi);
4212go_again:
4213		if (bret == true) {
4214
4215			/* check link status - esi[10] = 0x200c */
4216			if (intel_dp->active_mst_links &&
4217			    !drm_dp_channel_eq_ok(&esi[10], intel_dp->lane_count)) {
4218				DRM_DEBUG_KMS("channel EQ not ok, retraining\n");
4219				intel_dp_start_link_train(intel_dp);
4220				intel_dp_stop_link_train(intel_dp);
4221			}
4222
4223			DRM_DEBUG_KMS("got esi %3ph\n", esi);
4224			ret = drm_dp_mst_hpd_irq(&intel_dp->mst_mgr, esi, &handled);
4225
4226			if (handled) {
4227				for (retry = 0; retry < 3; retry++) {
4228					int wret;
4229					wret = drm_dp_dpcd_write(&intel_dp->aux,
4230								 DP_SINK_COUNT_ESI+1,
4231								 &esi[1], 3);
4232					if (wret == 3) {
4233						break;
4234					}
4235				}
4236
4237				bret = intel_dp_get_sink_irq_esi(intel_dp, esi);
4238				if (bret == true) {
4239					DRM_DEBUG_KMS("got esi2 %3ph\n", esi);
4240					goto go_again;
4241				}
4242			} else
4243				ret = 0;
4244
4245			return ret;
4246		} else {
4247			struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
4248			DRM_DEBUG_KMS("failed to get ESI - device may have failed\n");
4249			intel_dp->is_mst = false;
4250			drm_dp_mst_topology_mgr_set_mst(&intel_dp->mst_mgr, intel_dp->is_mst);
4251			/* send a hotplug event */
4252			drm_kms_helper_hotplug_event(intel_dig_port->base.base.dev);
4253		}
4254	}
4255	return -EINVAL;
4256}
4257
4258static bool
4259intel_dp_needs_link_retrain(struct intel_dp *intel_dp)
4260{
4261	u8 link_status[DP_LINK_STATUS_SIZE];
4262
4263	if (!intel_dp->link_trained)
4264		return false;
4265
4266	if (!intel_dp_get_link_status(intel_dp, link_status))
4267		return false;
4268
4269	/*
4270	 * Validate the cached values of intel_dp->link_rate and
4271	 * intel_dp->lane_count before attempting to retrain.
4272	 */
4273	if (!intel_dp_link_params_valid(intel_dp, intel_dp->link_rate,
4274					intel_dp->lane_count))
4275		return false;
4276
4277	/* Retrain if Channel EQ or CR not ok */
4278	return !drm_dp_channel_eq_ok(link_status, intel_dp->lane_count);
4279}
4280
4281/*
4282 * If display is now connected check links status,
4283 * there has been known issues of link loss triggering
4284 * long pulse.
4285 *
4286 * Some sinks (eg. ASUS PB287Q) seem to perform some
4287 * weird HPD ping pong during modesets. So we can apparently
4288 * end up with HPD going low during a modeset, and then
4289 * going back up soon after. And once that happens we must
4290 * retrain the link to get a picture. That's in case no
4291 * userspace component reacted to intermittent HPD dip.
4292 */
4293int intel_dp_retrain_link(struct intel_encoder *encoder,
4294			  struct drm_modeset_acquire_ctx *ctx)
4295{
4296	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
4297	struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
4298	struct intel_connector *connector = intel_dp->attached_connector;
4299	struct drm_connector_state *conn_state;
4300	struct intel_crtc_state *crtc_state;
4301	struct intel_crtc *crtc;
4302	int ret;
4303
4304	/* FIXME handle the MST connectors as well */
4305
4306	if (!connector || connector->base.status != connector_status_connected)
4307		return 0;
4308
4309	ret = drm_modeset_lock(&dev_priv->drm.mode_config.connection_mutex,
4310			       ctx);
4311	if (ret)
4312		return ret;
4313
4314	conn_state = connector->base.state;
4315
4316	crtc = to_intel_crtc(conn_state->crtc);
4317	if (!crtc)
4318		return 0;
4319
4320	ret = drm_modeset_lock(&crtc->base.mutex, ctx);
4321	if (ret)
4322		return ret;
4323
4324	crtc_state = to_intel_crtc_state(crtc->base.state);
4325
4326	WARN_ON(!intel_crtc_has_dp_encoder(crtc_state));
4327
4328	if (!crtc_state->base.active)
4329		return 0;
4330
4331	if (conn_state->commit &&
4332	    !try_wait_for_completion(&conn_state->commit->hw_done))
4333		return 0;
4334
4335	if (!intel_dp_needs_link_retrain(intel_dp))
4336		return 0;
4337
4338	/* Suppress underruns caused by re-training */
4339	intel_set_cpu_fifo_underrun_reporting(dev_priv, crtc->pipe, false);
4340	if (crtc->config->has_pch_encoder)
4341		intel_set_pch_fifo_underrun_reporting(dev_priv,
4342						      intel_crtc_pch_transcoder(crtc), false);
4343
4344	intel_dp_start_link_train(intel_dp);
4345	intel_dp_stop_link_train(intel_dp);
4346
4347	/* Keep underrun reporting disabled until things are stable */
4348	intel_wait_for_vblank(dev_priv, crtc->pipe);
4349
4350	intel_set_cpu_fifo_underrun_reporting(dev_priv, crtc->pipe, true);
4351	if (crtc->config->has_pch_encoder)
4352		intel_set_pch_fifo_underrun_reporting(dev_priv,
4353						      intel_crtc_pch_transcoder(crtc), true);
4354
4355	return 0;
4356}
4357
4358/*
4359 * If display is now connected check links status,
4360 * there has been known issues of link loss triggering
4361 * long pulse.
4362 *
4363 * Some sinks (eg. ASUS PB287Q) seem to perform some
4364 * weird HPD ping pong during modesets. So we can apparently
4365 * end up with HPD going low during a modeset, and then
4366 * going back up soon after. And once that happens we must
4367 * retrain the link to get a picture. That's in case no
4368 * userspace component reacted to intermittent HPD dip.
4369 */
4370static bool intel_dp_hotplug(struct intel_encoder *encoder,
4371			     struct intel_connector *connector)
4372{
4373	struct drm_modeset_acquire_ctx ctx;
4374	bool changed;
4375	int ret;
4376
4377	changed = intel_encoder_hotplug(encoder, connector);
4378
4379	drm_modeset_acquire_init(&ctx, 0);
4380
4381	for (;;) {
4382		ret = intel_dp_retrain_link(encoder, &ctx);
4383
4384		if (ret == -EDEADLK) {
4385			drm_modeset_backoff(&ctx);
4386			continue;
4387		}
4388
4389		break;
4390	}
4391
4392	drm_modeset_drop_locks(&ctx);
4393	drm_modeset_acquire_fini(&ctx);
4394	WARN(ret, "Acquiring modeset locks failed with %i\n", ret);
4395
4396	return changed;
4397}
4398
4399/*
4400 * According to DP spec
4401 * 5.1.2:
4402 *  1. Read DPCD
4403 *  2. Configure link according to Receiver Capabilities
4404 *  3. Use Link Training from 2.5.3.3 and 3.5.1.3
4405 *  4. Check link status on receipt of hot-plug interrupt
4406 *
4407 * intel_dp_short_pulse -  handles short pulse interrupts
4408 * when full detection is not required.
4409 * Returns %true if short pulse is handled and full detection
4410 * is NOT required and %false otherwise.
4411 */
4412static bool
4413intel_dp_short_pulse(struct intel_dp *intel_dp)
 
4414{
4415	struct drm_i915_private *dev_priv = to_i915(intel_dp_to_dev(intel_dp));
4416	u8 sink_irq_vector = 0;
4417	u8 old_sink_count = intel_dp->sink_count;
4418	bool ret;
4419
4420	/*
4421	 * Clearing compliance test variables to allow capturing
4422	 * of values for next automated test request.
4423	 */
4424	memset(&intel_dp->compliance, 0, sizeof(intel_dp->compliance));
4425
4426	/*
4427	 * Now read the DPCD to see if it's actually running
4428	 * If the current value of sink count doesn't match with
4429	 * the value that was stored earlier or dpcd read failed
4430	 * we need to do full detection
4431	 */
4432	ret = intel_dp_get_dpcd(intel_dp);
4433
4434	if ((old_sink_count != intel_dp->sink_count) || !ret) {
4435		/* No need to proceed if we are going to do full detect */
4436		return false;
4437	}
4438
4439	/* Try to read the source of the interrupt */
4440	if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11 &&
4441	    intel_dp_get_sink_irq(intel_dp, &sink_irq_vector) &&
4442	    sink_irq_vector != 0) {
4443		/* Clear interrupt source */
4444		drm_dp_dpcd_writeb(&intel_dp->aux,
4445				   DP_DEVICE_SERVICE_IRQ_VECTOR,
4446				   sink_irq_vector);
4447
4448		if (sink_irq_vector & DP_AUTOMATED_TEST_REQUEST)
4449			intel_dp_handle_test_request(intel_dp);
4450		if (sink_irq_vector & (DP_CP_IRQ | DP_SINK_SPECIFIC_IRQ))
4451			DRM_DEBUG_DRIVER("CP or sink specific irq unhandled\n");
4452	}
4453
4454	/* defer to the hotplug work for link retraining if needed */
4455	if (intel_dp_needs_link_retrain(intel_dp))
4456		return false;
4457
4458	if (intel_dp->compliance.test_type == DP_TEST_LINK_TRAINING) {
4459		DRM_DEBUG_KMS("Link Training Compliance Test requested\n");
4460		/* Send a Hotplug Uevent to userspace to start modeset */
4461		drm_kms_helper_hotplug_event(&dev_priv->drm);
4462	}
4463
4464	return true;
4465}
4466
4467/* XXX this is probably wrong for multiple downstream ports */
4468static enum drm_connector_status
4469intel_dp_detect_dpcd(struct intel_dp *intel_dp)
4470{
4471	struct intel_lspcon *lspcon = dp_to_lspcon(intel_dp);
4472	uint8_t *dpcd = intel_dp->dpcd;
4473	uint8_t type;
4474
4475	if (lspcon->active)
4476		lspcon_resume(lspcon);
4477
4478	if (!intel_dp_get_dpcd(intel_dp))
4479		return connector_status_disconnected;
4480
4481	if (intel_dp_is_edp(intel_dp))
4482		return connector_status_connected;
4483
4484	/* if there's no downstream port, we're done */
4485	if (!drm_dp_is_branch(dpcd))
4486		return connector_status_connected;
4487
4488	/* If we're HPD-aware, SINK_COUNT changes dynamically */
4489	if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11 &&
4490	    intel_dp->downstream_ports[0] & DP_DS_PORT_HPD) {
4491
4492		return intel_dp->sink_count ?
4493		connector_status_connected : connector_status_disconnected;
4494	}
4495
4496	if (intel_dp_can_mst(intel_dp))
4497		return connector_status_connected;
4498
4499	/* If no HPD, poke DDC gently */
4500	if (drm_probe_ddc(&intel_dp->aux.ddc))
4501		return connector_status_connected;
4502
4503	/* Well we tried, say unknown for unreliable port types */
4504	if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11) {
4505		type = intel_dp->downstream_ports[0] & DP_DS_PORT_TYPE_MASK;
4506		if (type == DP_DS_PORT_TYPE_VGA ||
4507		    type == DP_DS_PORT_TYPE_NON_EDID)
4508			return connector_status_unknown;
4509	} else {
4510		type = intel_dp->dpcd[DP_DOWNSTREAMPORT_PRESENT] &
4511			DP_DWN_STRM_PORT_TYPE_MASK;
4512		if (type == DP_DWN_STRM_PORT_TYPE_ANALOG ||
4513		    type == DP_DWN_STRM_PORT_TYPE_OTHER)
4514			return connector_status_unknown;
4515	}
4516
4517	/* Anything else is out of spec, warn and ignore */
4518	DRM_DEBUG_KMS("Broken DP branch device, ignoring\n");
4519	return connector_status_disconnected;
4520}
4521
4522static enum drm_connector_status
4523edp_detect(struct intel_dp *intel_dp)
4524{
4525	struct drm_i915_private *dev_priv = to_i915(intel_dp_to_dev(intel_dp));
4526	enum drm_connector_status status;
4527
4528	status = intel_panel_detect(dev_priv);
4529	if (status == connector_status_unknown)
4530		status = connector_status_connected;
4531
4532	return status;
4533}
4534
4535static bool ibx_digital_port_connected(struct intel_encoder *encoder)
4536{
4537	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
4538	u32 bit;
4539
4540	switch (encoder->hpd_pin) {
4541	case HPD_PORT_B:
4542		bit = SDE_PORTB_HOTPLUG;
4543		break;
4544	case HPD_PORT_C:
4545		bit = SDE_PORTC_HOTPLUG;
4546		break;
4547	case HPD_PORT_D:
4548		bit = SDE_PORTD_HOTPLUG;
4549		break;
4550	default:
4551		MISSING_CASE(encoder->hpd_pin);
4552		return false;
4553	}
4554
4555	return I915_READ(SDEISR) & bit;
4556}
4557
4558static bool cpt_digital_port_connected(struct intel_encoder *encoder)
 
4559{
4560	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
4561	u32 bit;
 
4562
4563	switch (encoder->hpd_pin) {
4564	case HPD_PORT_B:
4565		bit = SDE_PORTB_HOTPLUG_CPT;
4566		break;
4567	case HPD_PORT_C:
4568		bit = SDE_PORTC_HOTPLUG_CPT;
4569		break;
4570	case HPD_PORT_D:
4571		bit = SDE_PORTD_HOTPLUG_CPT;
4572		break;
4573	default:
4574		MISSING_CASE(encoder->hpd_pin);
4575		return false;
4576	}
4577
4578	return I915_READ(SDEISR) & bit;
4579}
4580
4581static bool spt_digital_port_connected(struct intel_encoder *encoder)
4582{
4583	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
4584	u32 bit;
4585
4586	switch (encoder->hpd_pin) {
4587	case HPD_PORT_A:
4588		bit = SDE_PORTA_HOTPLUG_SPT;
4589		break;
4590	case HPD_PORT_E:
4591		bit = SDE_PORTE_HOTPLUG_SPT;
4592		break;
4593	default:
4594		return cpt_digital_port_connected(encoder);
4595	}
4596
4597	return I915_READ(SDEISR) & bit;
4598}
4599
4600static bool g4x_digital_port_connected(struct intel_encoder *encoder)
4601{
4602	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
4603	u32 bit;
4604
4605	switch (encoder->hpd_pin) {
4606	case HPD_PORT_B:
4607		bit = PORTB_HOTPLUG_LIVE_STATUS_G4X;
4608		break;
4609	case HPD_PORT_C:
4610		bit = PORTC_HOTPLUG_LIVE_STATUS_G4X;
4611		break;
4612	case HPD_PORT_D:
4613		bit = PORTD_HOTPLUG_LIVE_STATUS_G4X;
4614		break;
4615	default:
4616		MISSING_CASE(encoder->hpd_pin);
4617		return false;
4618	}
4619
4620	return I915_READ(PORT_HOTPLUG_STAT) & bit;
4621}
4622
4623static bool gm45_digital_port_connected(struct intel_encoder *encoder)
4624{
4625	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
4626	u32 bit;
4627
4628	switch (encoder->hpd_pin) {
4629	case HPD_PORT_B:
4630		bit = PORTB_HOTPLUG_LIVE_STATUS_GM45;
4631		break;
4632	case HPD_PORT_C:
4633		bit = PORTC_HOTPLUG_LIVE_STATUS_GM45;
4634		break;
4635	case HPD_PORT_D:
4636		bit = PORTD_HOTPLUG_LIVE_STATUS_GM45;
4637		break;
4638	default:
4639		MISSING_CASE(encoder->hpd_pin);
4640		return false;
4641	}
4642
4643	return I915_READ(PORT_HOTPLUG_STAT) & bit;
4644}
4645
4646static bool ilk_digital_port_connected(struct intel_encoder *encoder)
4647{
4648	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
4649
4650	if (encoder->hpd_pin == HPD_PORT_A)
4651		return I915_READ(DEISR) & DE_DP_A_HOTPLUG;
4652	else
4653		return ibx_digital_port_connected(encoder);
4654}
4655
4656static bool snb_digital_port_connected(struct intel_encoder *encoder)
4657{
4658	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
4659
4660	if (encoder->hpd_pin == HPD_PORT_A)
4661		return I915_READ(DEISR) & DE_DP_A_HOTPLUG;
4662	else
4663		return cpt_digital_port_connected(encoder);
4664}
4665
4666static bool ivb_digital_port_connected(struct intel_encoder *encoder)
4667{
4668	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
4669
4670	if (encoder->hpd_pin == HPD_PORT_A)
4671		return I915_READ(DEISR) & DE_DP_A_HOTPLUG_IVB;
4672	else
4673		return cpt_digital_port_connected(encoder);
4674}
4675
4676static bool bdw_digital_port_connected(struct intel_encoder *encoder)
4677{
4678	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
4679
4680	if (encoder->hpd_pin == HPD_PORT_A)
4681		return I915_READ(GEN8_DE_PORT_ISR) & GEN8_PORT_DP_A_HOTPLUG;
4682	else
4683		return cpt_digital_port_connected(encoder);
4684}
4685
4686static bool bxt_digital_port_connected(struct intel_encoder *encoder)
4687{
4688	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
4689	u32 bit;
4690
4691	switch (encoder->hpd_pin) {
4692	case HPD_PORT_A:
4693		bit = BXT_DE_PORT_HP_DDIA;
4694		break;
4695	case HPD_PORT_B:
4696		bit = BXT_DE_PORT_HP_DDIB;
4697		break;
4698	case HPD_PORT_C:
4699		bit = BXT_DE_PORT_HP_DDIC;
4700		break;
4701	default:
4702		MISSING_CASE(encoder->hpd_pin);
4703		return false;
4704	}
4705
4706	return I915_READ(GEN8_DE_PORT_ISR) & bit;
4707}
4708
4709/*
4710 * intel_digital_port_connected - is the specified port connected?
4711 * @encoder: intel_encoder
4712 *
4713 * Return %true if port is connected, %false otherwise.
 
4714 */
4715bool intel_digital_port_connected(struct intel_encoder *encoder)
 
4716{
4717	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
4718
4719	if (HAS_GMCH_DISPLAY(dev_priv)) {
4720		if (IS_GM45(dev_priv))
4721			return gm45_digital_port_connected(encoder);
4722		else
4723			return g4x_digital_port_connected(encoder);
4724	}
4725
4726	if (IS_GEN5(dev_priv))
4727		return ilk_digital_port_connected(encoder);
4728	else if (IS_GEN6(dev_priv))
4729		return snb_digital_port_connected(encoder);
4730	else if (IS_GEN7(dev_priv))
4731		return ivb_digital_port_connected(encoder);
4732	else if (IS_GEN8(dev_priv))
4733		return bdw_digital_port_connected(encoder);
4734	else if (IS_GEN9_LP(dev_priv))
4735		return bxt_digital_port_connected(encoder);
4736	else
4737		return spt_digital_port_connected(encoder);
4738}
4739
4740static struct edid *
4741intel_dp_get_edid(struct intel_dp *intel_dp)
4742{
4743	struct intel_connector *intel_connector = intel_dp->attached_connector;
4744
4745	/* use cached edid if we have one */
4746	if (intel_connector->edid) {
4747		/* invalid edid */
4748		if (IS_ERR(intel_connector->edid))
4749			return NULL;
4750
4751		return drm_edid_duplicate(intel_connector->edid);
4752	} else
4753		return drm_get_edid(&intel_connector->base,
4754				    &intel_dp->aux.ddc);
4755}
4756
4757static void
4758intel_dp_set_edid(struct intel_dp *intel_dp)
4759{
4760	struct intel_connector *intel_connector = intel_dp->attached_connector;
4761	struct edid *edid;
4762
4763	intel_dp_unset_edid(intel_dp);
4764	edid = intel_dp_get_edid(intel_dp);
4765	intel_connector->detect_edid = edid;
4766
4767	intel_dp->has_audio = drm_detect_monitor_audio(edid);
4768}
4769
4770static void
4771intel_dp_unset_edid(struct intel_dp *intel_dp)
4772{
4773	struct intel_connector *intel_connector = intel_dp->attached_connector;
4774
4775	kfree(intel_connector->detect_edid);
4776	intel_connector->detect_edid = NULL;
4777
4778	intel_dp->has_audio = false;
4779}
4780
4781static int
4782intel_dp_long_pulse(struct intel_connector *connector)
4783{
4784	struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
4785	struct intel_dp *intel_dp = intel_attached_dp(&connector->base);
4786	enum drm_connector_status status;
4787	u8 sink_irq_vector = 0;
4788
4789	WARN_ON(!drm_modeset_is_locked(&dev_priv->drm.mode_config.connection_mutex));
4790
4791	intel_display_power_get(dev_priv, intel_dp->aux_power_domain);
4792
4793	/* Can't disconnect eDP, but you can close the lid... */
4794	if (intel_dp_is_edp(intel_dp))
4795		status = edp_detect(intel_dp);
4796	else if (intel_digital_port_connected(&dp_to_dig_port(intel_dp)->base))
4797		status = intel_dp_detect_dpcd(intel_dp);
4798	else
4799		status = connector_status_disconnected;
4800
4801	if (status == connector_status_disconnected) {
4802		memset(&intel_dp->compliance, 0, sizeof(intel_dp->compliance));
4803
4804		if (intel_dp->is_mst) {
4805			DRM_DEBUG_KMS("MST device may have disappeared %d vs %d\n",
4806				      intel_dp->is_mst,
4807				      intel_dp->mst_mgr.mst_state);
4808			intel_dp->is_mst = false;
4809			drm_dp_mst_topology_mgr_set_mst(&intel_dp->mst_mgr,
4810							intel_dp->is_mst);
 
 
 
 
 
 
4811		}
4812
4813		goto out;
4814	}
4815
4816	if (intel_dp->reset_link_params) {
4817		/* Initial max link lane count */
4818		intel_dp->max_link_lane_count = intel_dp_max_common_lane_count(intel_dp);
4819
4820		/* Initial max link rate */
4821		intel_dp->max_link_rate = intel_dp_max_common_rate(intel_dp);
4822
4823		intel_dp->reset_link_params = false;
4824	}
4825
4826	intel_dp_print_rates(intel_dp);
4827
4828	drm_dp_read_desc(&intel_dp->aux, &intel_dp->desc,
4829			 drm_dp_is_branch(intel_dp->dpcd));
4830
4831	intel_dp_configure_mst(intel_dp);
4832
4833	if (intel_dp->is_mst) {
4834		/*
4835		 * If we are in MST mode then this connector
4836		 * won't appear connected or have anything
4837		 * with EDID on it
4838		 */
4839		status = connector_status_disconnected;
4840		goto out;
4841	}
4842
4843	/*
4844	 * Clearing NACK and defer counts to get their exact values
4845	 * while reading EDID which are required by Compliance tests
4846	 * 4.2.2.4 and 4.2.2.5
4847	 */
4848	intel_dp->aux.i2c_nack_count = 0;
4849	intel_dp->aux.i2c_defer_count = 0;
4850
4851	intel_dp_set_edid(intel_dp);
4852	if (intel_dp_is_edp(intel_dp) || connector->detect_edid)
4853		status = connector_status_connected;
4854	intel_dp->detect_done = true;
4855
4856	/* Try to read the source of the interrupt */
4857	if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11 &&
4858	    intel_dp_get_sink_irq(intel_dp, &sink_irq_vector) &&
4859	    sink_irq_vector != 0) {
4860		/* Clear interrupt source */
4861		drm_dp_dpcd_writeb(&intel_dp->aux,
4862				   DP_DEVICE_SERVICE_IRQ_VECTOR,
4863				   sink_irq_vector);
4864
4865		if (sink_irq_vector & DP_AUTOMATED_TEST_REQUEST)
4866			intel_dp_handle_test_request(intel_dp);
4867		if (sink_irq_vector & (DP_CP_IRQ | DP_SINK_SPECIFIC_IRQ))
4868			DRM_DEBUG_DRIVER("CP or sink specific irq unhandled\n");
4869	}
4870
4871out:
4872	if (status != connector_status_connected && !intel_dp->is_mst)
4873		intel_dp_unset_edid(intel_dp);
4874
4875	intel_display_power_put(dev_priv, intel_dp->aux_power_domain);
4876	return status;
4877}
4878
4879static int
4880intel_dp_detect(struct drm_connector *connector,
4881		struct drm_modeset_acquire_ctx *ctx,
4882		bool force)
4883{
4884	struct intel_dp *intel_dp = intel_attached_dp(connector);
4885	int status = connector->status;
 
 
4886
4887	DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
4888		      connector->base.id, connector->name);
4889
4890	/* If full detect is not performed yet, do a full detect */
4891	if (!intel_dp->detect_done) {
4892		struct drm_crtc *crtc;
4893		int ret;
4894
4895		crtc = connector->state->crtc;
4896		if (crtc) {
4897			ret = drm_modeset_lock(&crtc->mutex, ctx);
4898			if (ret)
4899				return ret;
 
 
4900		}
4901
4902		status = intel_dp_long_pulse(intel_dp->attached_connector);
4903	}
4904
4905	intel_dp->detect_done = false;
4906
4907	return status;
 
 
 
 
 
 
 
4908}
4909
4910static void
4911intel_dp_force(struct drm_connector *connector)
4912{
4913	struct intel_dp *intel_dp = intel_attached_dp(connector);
4914	struct intel_encoder *intel_encoder = &dp_to_dig_port(intel_dp)->base;
4915	struct drm_i915_private *dev_priv = to_i915(intel_encoder->base.dev);
4916
4917	DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
4918		      connector->base.id, connector->name);
4919	intel_dp_unset_edid(intel_dp);
4920
4921	if (connector->status != connector_status_connected)
4922		return;
4923
4924	intel_display_power_get(dev_priv, intel_dp->aux_power_domain);
4925
4926	intel_dp_set_edid(intel_dp);
4927
4928	intel_display_power_put(dev_priv, intel_dp->aux_power_domain);
4929}
4930
4931static int intel_dp_get_modes(struct drm_connector *connector)
4932{
4933	struct intel_connector *intel_connector = to_intel_connector(connector);
4934	struct edid *edid;
 
4935
4936	edid = intel_connector->detect_edid;
4937	if (edid) {
4938		int ret = intel_connector_update_modes(connector, edid);
4939		if (ret)
4940			return ret;
4941	}
4942
4943	/* if eDP has no EDID, fall back to fixed mode */
4944	if (intel_dp_is_edp(intel_attached_dp(connector)) &&
4945	    intel_connector->panel.fixed_mode) {
4946		struct drm_display_mode *mode;
4947
4948		mode = drm_mode_duplicate(connector->dev,
4949					  intel_connector->panel.fixed_mode);
4950		if (mode) {
4951			drm_mode_probed_add(connector, mode);
4952			return 1;
4953		}
4954	}
4955
4956	return 0;
4957}
4958
4959static int
4960intel_dp_connector_register(struct drm_connector *connector)
 
 
4961{
 
4962	struct intel_dp *intel_dp = intel_attached_dp(connector);
4963	int ret;
4964
4965	ret = intel_connector_register(connector);
4966	if (ret)
4967		return ret;
4968
4969	i915_debugfs_connector_add(connector);
 
 
4970
4971	DRM_DEBUG_KMS("registering %s bus for %s\n",
4972		      intel_dp->aux.name, connector->kdev->kobj.name);
4973
4974	intel_dp->aux.dev = connector->kdev;
4975	return drm_dp_aux_register(&intel_dp->aux);
4976}
4977
4978static void
4979intel_dp_connector_unregister(struct drm_connector *connector)
4980{
4981	drm_dp_aux_unregister(&intel_attached_dp(connector)->aux);
4982	intel_connector_unregister(connector);
4983}
4984
4985static void
4986intel_dp_connector_destroy(struct drm_connector *connector)
4987{
4988	struct intel_connector *intel_connector = to_intel_connector(connector);
4989
4990	kfree(intel_connector->detect_edid);
4991
4992	if (!IS_ERR_OR_NULL(intel_connector->edid))
4993		kfree(intel_connector->edid);
4994
4995	/*
4996	 * Can't call intel_dp_is_edp() since the encoder may have been
4997	 * destroyed already.
4998	 */
4999	if (connector->connector_type == DRM_MODE_CONNECTOR_eDP)
5000		intel_panel_fini(&intel_connector->panel);
5001
5002	drm_connector_cleanup(connector);
5003	kfree(connector);
5004}
5005
5006void intel_dp_encoder_destroy(struct drm_encoder *encoder)
5007{
5008	struct intel_digital_port *intel_dig_port = enc_to_dig_port(encoder);
5009	struct intel_dp *intel_dp = &intel_dig_port->dp;
5010
5011	intel_dp_mst_encoder_cleanup(intel_dig_port);
5012	if (intel_dp_is_edp(intel_dp)) {
5013		cancel_delayed_work_sync(&intel_dp->panel_vdd_work);
5014		/*
5015		 * vdd might still be enabled do to the delayed vdd off.
5016		 * Make sure vdd is actually turned off here.
5017		 */
5018		pps_lock(intel_dp);
5019		edp_panel_vdd_off_sync(intel_dp);
5020		pps_unlock(intel_dp);
5021
5022		if (intel_dp->edp_notifier.notifier_call) {
5023			unregister_reboot_notifier(&intel_dp->edp_notifier);
5024			intel_dp->edp_notifier.notifier_call = NULL;
5025		}
5026	}
5027
5028	intel_dp_aux_fini(intel_dp);
 
 
5029
5030	drm_encoder_cleanup(encoder);
5031	kfree(intel_dig_port);
5032}
5033
5034void intel_dp_encoder_suspend(struct intel_encoder *intel_encoder)
5035{
5036	struct intel_dp *intel_dp = enc_to_intel_dp(&intel_encoder->base);
5037
5038	if (!intel_dp_is_edp(intel_dp))
5039		return;
5040
5041	/*
5042	 * vdd might still be enabled do to the delayed vdd off.
5043	 * Make sure vdd is actually turned off here.
5044	 */
5045	cancel_delayed_work_sync(&intel_dp->panel_vdd_work);
5046	pps_lock(intel_dp);
5047	edp_panel_vdd_off_sync(intel_dp);
5048	pps_unlock(intel_dp);
5049}
5050
5051static
5052int intel_dp_hdcp_write_an_aksv(struct intel_digital_port *intel_dig_port,
5053				u8 *an)
5054{
5055	struct intel_dp *intel_dp = enc_to_intel_dp(&intel_dig_port->base.base);
5056	static const struct drm_dp_aux_msg msg = {
5057		.request = DP_AUX_NATIVE_WRITE,
5058		.address = DP_AUX_HDCP_AKSV,
5059		.size = DRM_HDCP_KSV_LEN,
5060	};
5061	uint8_t txbuf[HEADER_SIZE + DRM_HDCP_KSV_LEN] = {}, rxbuf[2], reply = 0;
5062	ssize_t dpcd_ret;
5063	int ret;
5064
5065	/* Output An first, that's easy */
5066	dpcd_ret = drm_dp_dpcd_write(&intel_dig_port->dp.aux, DP_AUX_HDCP_AN,
5067				     an, DRM_HDCP_AN_LEN);
5068	if (dpcd_ret != DRM_HDCP_AN_LEN) {
5069		DRM_ERROR("Failed to write An over DP/AUX (%zd)\n", dpcd_ret);
5070		return dpcd_ret >= 0 ? -EIO : dpcd_ret;
5071	}
5072
5073	/*
5074	 * Since Aksv is Oh-So-Secret, we can't access it in software. So in
5075	 * order to get it on the wire, we need to create the AUX header as if
5076	 * we were writing the data, and then tickle the hardware to output the
5077	 * data once the header is sent out.
5078	 */
5079	intel_dp_aux_header(txbuf, &msg);
5080
5081	ret = intel_dp_aux_xfer(intel_dp, txbuf, HEADER_SIZE + msg.size,
5082				rxbuf, sizeof(rxbuf),
5083				DP_AUX_CH_CTL_AUX_AKSV_SELECT);
5084	if (ret < 0) {
5085		DRM_ERROR("Write Aksv over DP/AUX failed (%d)\n", ret);
5086		return ret;
5087	} else if (ret == 0) {
5088		DRM_ERROR("Aksv write over DP/AUX was empty\n");
5089		return -EIO;
5090	}
5091
5092	reply = (rxbuf[0] >> 4) & DP_AUX_NATIVE_REPLY_MASK;
5093	return reply == DP_AUX_NATIVE_REPLY_ACK ? 0 : -EIO;
5094}
5095
5096static int intel_dp_hdcp_read_bksv(struct intel_digital_port *intel_dig_port,
5097				   u8 *bksv)
5098{
5099	ssize_t ret;
5100	ret = drm_dp_dpcd_read(&intel_dig_port->dp.aux, DP_AUX_HDCP_BKSV, bksv,
5101			       DRM_HDCP_KSV_LEN);
5102	if (ret != DRM_HDCP_KSV_LEN) {
5103		DRM_ERROR("Read Bksv from DP/AUX failed (%zd)\n", ret);
5104		return ret >= 0 ? -EIO : ret;
5105	}
5106	return 0;
5107}
5108
5109static int intel_dp_hdcp_read_bstatus(struct intel_digital_port *intel_dig_port,
5110				      u8 *bstatus)
5111{
5112	ssize_t ret;
5113	/*
5114	 * For some reason the HDMI and DP HDCP specs call this register
5115	 * definition by different names. In the HDMI spec, it's called BSTATUS,
5116	 * but in DP it's called BINFO.
5117	 */
5118	ret = drm_dp_dpcd_read(&intel_dig_port->dp.aux, DP_AUX_HDCP_BINFO,
5119			       bstatus, DRM_HDCP_BSTATUS_LEN);
5120	if (ret != DRM_HDCP_BSTATUS_LEN) {
5121		DRM_ERROR("Read bstatus from DP/AUX failed (%zd)\n", ret);
5122		return ret >= 0 ? -EIO : ret;
5123	}
5124	return 0;
5125}
5126
5127static
5128int intel_dp_hdcp_read_bcaps(struct intel_digital_port *intel_dig_port,
5129			     u8 *bcaps)
5130{
5131	ssize_t ret;
5132
5133	ret = drm_dp_dpcd_read(&intel_dig_port->dp.aux, DP_AUX_HDCP_BCAPS,
5134			       bcaps, 1);
5135	if (ret != 1) {
5136		DRM_ERROR("Read bcaps from DP/AUX failed (%zd)\n", ret);
5137		return ret >= 0 ? -EIO : ret;
5138	}
5139
5140	return 0;
 
 
5141}
5142
5143static
5144int intel_dp_hdcp_repeater_present(struct intel_digital_port *intel_dig_port,
5145				   bool *repeater_present)
5146{
5147	ssize_t ret;
5148	u8 bcaps;
5149
5150	ret = intel_dp_hdcp_read_bcaps(intel_dig_port, &bcaps);
5151	if (ret)
5152		return ret;
5153
5154	*repeater_present = bcaps & DP_BCAPS_REPEATER_PRESENT;
5155	return 0;
5156}
5157
5158static
5159int intel_dp_hdcp_read_ri_prime(struct intel_digital_port *intel_dig_port,
5160				u8 *ri_prime)
5161{
5162	ssize_t ret;
5163	ret = drm_dp_dpcd_read(&intel_dig_port->dp.aux, DP_AUX_HDCP_RI_PRIME,
5164			       ri_prime, DRM_HDCP_RI_LEN);
5165	if (ret != DRM_HDCP_RI_LEN) {
5166		DRM_ERROR("Read Ri' from DP/AUX failed (%zd)\n", ret);
5167		return ret >= 0 ? -EIO : ret;
5168	}
5169	return 0;
5170}
5171
5172static
5173int intel_dp_hdcp_read_ksv_ready(struct intel_digital_port *intel_dig_port,
5174				 bool *ksv_ready)
5175{
5176	ssize_t ret;
5177	u8 bstatus;
5178	ret = drm_dp_dpcd_read(&intel_dig_port->dp.aux, DP_AUX_HDCP_BSTATUS,
5179			       &bstatus, 1);
5180	if (ret != 1) {
5181		DRM_ERROR("Read bstatus from DP/AUX failed (%zd)\n", ret);
5182		return ret >= 0 ? -EIO : ret;
5183	}
5184	*ksv_ready = bstatus & DP_BSTATUS_READY;
5185	return 0;
5186}
5187
5188static
5189int intel_dp_hdcp_read_ksv_fifo(struct intel_digital_port *intel_dig_port,
5190				int num_downstream, u8 *ksv_fifo)
5191{
5192	ssize_t ret;
5193	int i;
5194
5195	/* KSV list is read via 15 byte window (3 entries @ 5 bytes each) */
5196	for (i = 0; i < num_downstream; i += 3) {
5197		size_t len = min(num_downstream - i, 3) * DRM_HDCP_KSV_LEN;
5198		ret = drm_dp_dpcd_read(&intel_dig_port->dp.aux,
5199				       DP_AUX_HDCP_KSV_FIFO,
5200				       ksv_fifo + i * DRM_HDCP_KSV_LEN,
5201				       len);
5202		if (ret != len) {
5203			DRM_ERROR("Read ksv[%d] from DP/AUX failed (%zd)\n", i,
5204				  ret);
5205			return ret >= 0 ? -EIO : ret;
5206		}
5207	}
5208	return 0;
5209}
5210
5211static
5212int intel_dp_hdcp_read_v_prime_part(struct intel_digital_port *intel_dig_port,
5213				    int i, u32 *part)
5214{
5215	ssize_t ret;
5216
5217	if (i >= DRM_HDCP_V_PRIME_NUM_PARTS)
5218		return -EINVAL;
5219
5220	ret = drm_dp_dpcd_read(&intel_dig_port->dp.aux,
5221			       DP_AUX_HDCP_V_PRIME(i), part,
5222			       DRM_HDCP_V_PRIME_PART_LEN);
5223	if (ret != DRM_HDCP_V_PRIME_PART_LEN) {
5224		DRM_ERROR("Read v'[%d] from DP/AUX failed (%zd)\n", i, ret);
5225		return ret >= 0 ? -EIO : ret;
5226	}
5227	return 0;
5228}
5229
5230static
5231int intel_dp_hdcp_toggle_signalling(struct intel_digital_port *intel_dig_port,
5232				    bool enable)
5233{
5234	/* Not used for single stream DisplayPort setups */
5235	return 0;
5236}
5237
5238static
5239bool intel_dp_hdcp_check_link(struct intel_digital_port *intel_dig_port)
5240{
5241	ssize_t ret;
5242	u8 bstatus;
5243
5244	ret = drm_dp_dpcd_read(&intel_dig_port->dp.aux, DP_AUX_HDCP_BSTATUS,
5245			       &bstatus, 1);
5246	if (ret != 1) {
5247		DRM_ERROR("Read bstatus from DP/AUX failed (%zd)\n", ret);
5248		return false;
5249	}
5250
5251	return !(bstatus & (DP_BSTATUS_LINK_FAILURE | DP_BSTATUS_REAUTH_REQ));
5252}
5253
5254static
5255int intel_dp_hdcp_capable(struct intel_digital_port *intel_dig_port,
5256			  bool *hdcp_capable)
5257{
5258	ssize_t ret;
5259	u8 bcaps;
5260
5261	ret = intel_dp_hdcp_read_bcaps(intel_dig_port, &bcaps);
5262	if (ret)
5263		return ret;
5264
5265	*hdcp_capable = bcaps & DP_BCAPS_HDCP_CAPABLE;
5266	return 0;
5267}
5268
5269static const struct intel_hdcp_shim intel_dp_hdcp_shim = {
5270	.write_an_aksv = intel_dp_hdcp_write_an_aksv,
5271	.read_bksv = intel_dp_hdcp_read_bksv,
5272	.read_bstatus = intel_dp_hdcp_read_bstatus,
5273	.repeater_present = intel_dp_hdcp_repeater_present,
5274	.read_ri_prime = intel_dp_hdcp_read_ri_prime,
5275	.read_ksv_ready = intel_dp_hdcp_read_ksv_ready,
5276	.read_ksv_fifo = intel_dp_hdcp_read_ksv_fifo,
5277	.read_v_prime_part = intel_dp_hdcp_read_v_prime_part,
5278	.toggle_signalling = intel_dp_hdcp_toggle_signalling,
5279	.check_link = intel_dp_hdcp_check_link,
5280	.hdcp_capable = intel_dp_hdcp_capable,
5281};
5282
5283static void intel_edp_panel_vdd_sanitize(struct intel_dp *intel_dp)
5284{
5285	struct drm_i915_private *dev_priv = to_i915(intel_dp_to_dev(intel_dp));
5286
5287	lockdep_assert_held(&dev_priv->pps_mutex);
5288
5289	if (!edp_have_panel_vdd(intel_dp))
5290		return;
5291
5292	/*
5293	 * The VDD bit needs a power domain reference, so if the bit is
5294	 * already enabled when we boot or resume, grab this reference and
5295	 * schedule a vdd off, so we don't hold on to the reference
5296	 * indefinitely.
5297	 */
5298	DRM_DEBUG_KMS("VDD left on by BIOS, adjusting state tracking\n");
5299	intel_display_power_get(dev_priv, intel_dp->aux_power_domain);
5300
5301	edp_panel_vdd_schedule_off(intel_dp);
5302}
5303
5304static enum pipe vlv_active_pipe(struct intel_dp *intel_dp)
5305{
5306	struct drm_i915_private *dev_priv = to_i915(intel_dp_to_dev(intel_dp));
5307
5308	if ((intel_dp->DP & DP_PORT_EN) == 0)
5309		return INVALID_PIPE;
5310
5311	if (IS_CHERRYVIEW(dev_priv))
5312		return DP_PORT_TO_PIPE_CHV(intel_dp->DP);
5313	else
5314		return PORT_TO_PIPE(intel_dp->DP);
5315}
5316
5317void intel_dp_encoder_reset(struct drm_encoder *encoder)
5318{
5319	struct drm_i915_private *dev_priv = to_i915(encoder->dev);
5320	struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
5321	struct intel_lspcon *lspcon = dp_to_lspcon(intel_dp);
5322
5323	if (!HAS_DDI(dev_priv))
5324		intel_dp->DP = I915_READ(intel_dp->output_reg);
5325
5326	if (lspcon->active)
5327		lspcon_resume(lspcon);
5328
5329	intel_dp->reset_link_params = true;
5330
5331	pps_lock(intel_dp);
5332
5333	if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
5334		intel_dp->active_pipe = vlv_active_pipe(intel_dp);
5335
5336	if (intel_dp_is_edp(intel_dp)) {
5337		/* Reinit the power sequencer, in case BIOS did something with it. */
5338		intel_dp_pps_init(intel_dp);
5339		intel_edp_panel_vdd_sanitize(intel_dp);
5340	}
5341
5342	pps_unlock(intel_dp);
5343}
5344
5345static const struct drm_connector_funcs intel_dp_connector_funcs = {
5346	.force = intel_dp_force,
 
5347	.fill_modes = drm_helper_probe_single_connector_modes,
5348	.atomic_get_property = intel_digital_connector_atomic_get_property,
5349	.atomic_set_property = intel_digital_connector_atomic_set_property,
5350	.late_register = intel_dp_connector_register,
5351	.early_unregister = intel_dp_connector_unregister,
5352	.destroy = intel_dp_connector_destroy,
5353	.atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
5354	.atomic_duplicate_state = intel_digital_connector_duplicate_state,
5355};
5356
5357static const struct drm_connector_helper_funcs intel_dp_connector_helper_funcs = {
5358	.detect_ctx = intel_dp_detect,
5359	.get_modes = intel_dp_get_modes,
5360	.mode_valid = intel_dp_mode_valid,
5361	.atomic_check = intel_digital_connector_atomic_check,
5362};
5363
5364static const struct drm_encoder_funcs intel_dp_enc_funcs = {
5365	.reset = intel_dp_encoder_reset,
5366	.destroy = intel_dp_encoder_destroy,
5367};
5368
5369enum irqreturn
5370intel_dp_hpd_pulse(struct intel_digital_port *intel_dig_port, bool long_hpd)
5371{
5372	struct intel_dp *intel_dp = &intel_dig_port->dp;
5373	struct drm_i915_private *dev_priv = to_i915(intel_dp_to_dev(intel_dp));
5374	enum irqreturn ret = IRQ_NONE;
5375
5376	if (long_hpd && intel_dig_port->base.type == INTEL_OUTPUT_EDP) {
5377		/*
5378		 * vdd off can generate a long pulse on eDP which
5379		 * would require vdd on to handle it, and thus we
5380		 * would end up in an endless cycle of
5381		 * "vdd off -> long hpd -> vdd on -> detect -> vdd off -> ..."
5382		 */
5383		DRM_DEBUG_KMS("ignoring long hpd on eDP port %c\n",
5384			      port_name(intel_dig_port->base.port));
5385		return IRQ_HANDLED;
5386	}
5387
5388	DRM_DEBUG_KMS("got hpd irq on port %c - %s\n",
5389		      port_name(intel_dig_port->base.port),
5390		      long_hpd ? "long" : "short");
5391
5392	if (long_hpd) {
5393		intel_dp->reset_link_params = true;
5394		intel_dp->detect_done = false;
5395		return IRQ_NONE;
5396	}
5397
5398	intel_display_power_get(dev_priv, intel_dp->aux_power_domain);
5399
5400	if (intel_dp->is_mst) {
5401		if (intel_dp_check_mst_status(intel_dp) == -EINVAL) {
5402			/*
5403			 * If we were in MST mode, and device is not
5404			 * there, get out of MST mode
5405			 */
5406			DRM_DEBUG_KMS("MST device may have disappeared %d vs %d\n",
5407				      intel_dp->is_mst, intel_dp->mst_mgr.mst_state);
5408			intel_dp->is_mst = false;
5409			drm_dp_mst_topology_mgr_set_mst(&intel_dp->mst_mgr,
5410							intel_dp->is_mst);
5411			intel_dp->detect_done = false;
5412			goto put_power;
5413		}
5414	}
5415
5416	if (!intel_dp->is_mst) {
5417		bool handled;
5418
5419		handled = intel_dp_short_pulse(intel_dp);
5420
5421		/* Short pulse can signify loss of hdcp authentication */
5422		intel_hdcp_check_link(intel_dp->attached_connector);
5423
5424		if (!handled) {
5425			intel_dp->detect_done = false;
5426			goto put_power;
5427		}
5428	}
5429
5430	ret = IRQ_HANDLED;
5431
5432put_power:
5433	intel_display_power_put(dev_priv, intel_dp->aux_power_domain);
5434
5435	return ret;
5436}
5437
5438/* check the VBT to see whether the eDP is on another port */
5439bool intel_dp_is_port_edp(struct drm_i915_private *dev_priv, enum port port)
5440{
5441	/*
5442	 * eDP not supported on g4x. so bail out early just
5443	 * for a bit extra safety in case the VBT is bonkers.
5444	 */
5445	if (INTEL_GEN(dev_priv) < 5)
5446		return false;
5447
5448	if (INTEL_GEN(dev_priv) < 9 && port == PORT_A)
5449		return true;
5450
5451	return intel_bios_is_port_edp(dev_priv, port);
5452}
5453
5454static void
5455intel_dp_add_properties(struct intel_dp *intel_dp, struct drm_connector *connector)
5456{
5457	struct drm_i915_private *dev_priv = to_i915(connector->dev);
5458	enum port port = dp_to_dig_port(intel_dp)->base.port;
5459
5460	if (!IS_G4X(dev_priv) && port != PORT_A)
5461		intel_attach_force_audio_property(connector);
5462
5463	intel_attach_broadcast_rgb_property(connector);
5464
5465	if (intel_dp_is_edp(intel_dp)) {
5466		u32 allowed_scalers;
5467
5468		allowed_scalers = BIT(DRM_MODE_SCALE_ASPECT) | BIT(DRM_MODE_SCALE_FULLSCREEN);
5469		if (!HAS_GMCH_DISPLAY(dev_priv))
5470			allowed_scalers |= BIT(DRM_MODE_SCALE_CENTER);
5471
5472		drm_connector_attach_scaling_mode_property(connector, allowed_scalers);
5473
5474		connector->state->scaling_mode = DRM_MODE_SCALE_ASPECT;
5475
5476	}
5477}
5478
5479static void intel_dp_init_panel_power_timestamps(struct intel_dp *intel_dp)
 
 
5480{
5481	intel_dp->panel_power_off_time = ktime_get_boottime();
5482	intel_dp->last_power_on = jiffies;
5483	intel_dp->last_backlight_off = jiffies;
5484}
5485
5486static void
5487intel_pps_readout_hw_state(struct intel_dp *intel_dp, struct edp_power_seq *seq)
5488{
5489	struct drm_i915_private *dev_priv = to_i915(intel_dp_to_dev(intel_dp));
5490	u32 pp_on, pp_off, pp_div = 0, pp_ctl = 0;
5491	struct pps_registers regs;
5492
5493	intel_pps_get_registers(intel_dp, &regs);
5494
5495	/* Workaround: Need to write PP_CONTROL with the unlock key as
5496	 * the very first thing. */
5497	pp_ctl = ironlake_get_pp_control(intel_dp);
5498
5499	pp_on = I915_READ(regs.pp_on);
5500	pp_off = I915_READ(regs.pp_off);
5501	if (!IS_GEN9_LP(dev_priv) && !HAS_PCH_CNP(dev_priv) &&
5502	    !HAS_PCH_ICP(dev_priv)) {
5503		I915_WRITE(regs.pp_ctrl, pp_ctl);
5504		pp_div = I915_READ(regs.pp_div);
5505	}
5506
5507	/* Pull timing values out of registers */
5508	seq->t1_t3 = (pp_on & PANEL_POWER_UP_DELAY_MASK) >>
5509		     PANEL_POWER_UP_DELAY_SHIFT;
5510
5511	seq->t8 = (pp_on & PANEL_LIGHT_ON_DELAY_MASK) >>
5512		  PANEL_LIGHT_ON_DELAY_SHIFT;
5513
5514	seq->t9 = (pp_off & PANEL_LIGHT_OFF_DELAY_MASK) >>
5515		  PANEL_LIGHT_OFF_DELAY_SHIFT;
5516
5517	seq->t10 = (pp_off & PANEL_POWER_DOWN_DELAY_MASK) >>
5518		   PANEL_POWER_DOWN_DELAY_SHIFT;
5519
5520	if (IS_GEN9_LP(dev_priv) || HAS_PCH_CNP(dev_priv) ||
5521	    HAS_PCH_ICP(dev_priv)) {
5522		seq->t11_t12 = ((pp_ctl & BXT_POWER_CYCLE_DELAY_MASK) >>
5523				BXT_POWER_CYCLE_DELAY_SHIFT) * 1000;
5524	} else {
5525		seq->t11_t12 = ((pp_div & PANEL_POWER_CYCLE_DELAY_MASK) >>
5526		       PANEL_POWER_CYCLE_DELAY_SHIFT) * 1000;
5527	}
5528}
5529
5530static void
5531intel_pps_dump_state(const char *state_name, const struct edp_power_seq *seq)
5532{
5533	DRM_DEBUG_KMS("%s t1_t3 %d t8 %d t9 %d t10 %d t11_t12 %d\n",
5534		      state_name,
5535		      seq->t1_t3, seq->t8, seq->t9, seq->t10, seq->t11_t12);
5536}
5537
5538static void
5539intel_pps_verify_state(struct intel_dp *intel_dp)
5540{
5541	struct edp_power_seq hw;
5542	struct edp_power_seq *sw = &intel_dp->pps_delays;
5543
5544	intel_pps_readout_hw_state(intel_dp, &hw);
 
5545
5546	if (hw.t1_t3 != sw->t1_t3 || hw.t8 != sw->t8 || hw.t9 != sw->t9 ||
5547	    hw.t10 != sw->t10 || hw.t11_t12 != sw->t11_t12) {
5548		DRM_ERROR("PPS state mismatch\n");
5549		intel_pps_dump_state("sw", sw);
5550		intel_pps_dump_state("hw", &hw);
5551	}
 
5552}
5553
5554static void
5555intel_dp_init_panel_power_sequencer(struct intel_dp *intel_dp)
5556{
5557	struct drm_i915_private *dev_priv = to_i915(intel_dp_to_dev(intel_dp));
5558	struct edp_power_seq cur, vbt, spec,
5559		*final = &intel_dp->pps_delays;
5560
5561	lockdep_assert_held(&dev_priv->pps_mutex);
5562
5563	/* already initialized? */
5564	if (final->t11_t12 != 0)
5565		return;
5566
5567	intel_pps_readout_hw_state(intel_dp, &cur);
5568
5569	intel_pps_dump_state("cur", &cur);
5570
5571	vbt = dev_priv->vbt.edp.pps;
5572	/* On Toshiba Satellite P50-C-18C system the VBT T12 delay
5573	 * of 500ms appears to be too short. Ocassionally the panel
5574	 * just fails to power back on. Increasing the delay to 800ms
5575	 * seems sufficient to avoid this problem.
5576	 */
5577	if (dev_priv->quirks & QUIRK_INCREASE_T12_DELAY) {
5578		vbt.t11_t12 = max_t(u16, vbt.t11_t12, 1300 * 10);
5579		DRM_DEBUG_KMS("Increasing T12 panel delay as per the quirk to %d\n",
5580			      vbt.t11_t12);
5581	}
5582	/* T11_T12 delay is special and actually in units of 100ms, but zero
5583	 * based in the hw (so we need to add 100 ms). But the sw vbt
5584	 * table multiplies it with 1000 to make it in units of 100usec,
5585	 * too. */
5586	vbt.t11_t12 += 100 * 10;
5587
5588	/* Upper limits from eDP 1.3 spec. Note that we use the clunky units of
5589	 * our hw here, which are all in 100usec. */
5590	spec.t1_t3 = 210 * 10;
5591	spec.t8 = 50 * 10; /* no limit for t8, use t7 instead */
5592	spec.t9 = 50 * 10; /* no limit for t9, make it symmetric with t8 */
5593	spec.t10 = 500 * 10;
5594	/* This one is special and actually in units of 100ms, but zero
5595	 * based in the hw (so we need to add 100 ms). But the sw vbt
5596	 * table multiplies it with 1000 to make it in units of 100usec,
5597	 * too. */
5598	spec.t11_t12 = (510 + 100) * 10;
5599
5600	intel_pps_dump_state("vbt", &vbt);
5601
5602	/* Use the max of the register settings and vbt. If both are
5603	 * unset, fall back to the spec limits. */
5604#define assign_final(field)	final->field = (max(cur.field, vbt.field) == 0 ? \
5605				       spec.field : \
5606				       max(cur.field, vbt.field))
5607	assign_final(t1_t3);
5608	assign_final(t8);
5609	assign_final(t9);
5610	assign_final(t10);
5611	assign_final(t11_t12);
5612#undef assign_final
5613
5614#define get_delay(field)	(DIV_ROUND_UP(final->field, 10))
5615	intel_dp->panel_power_up_delay = get_delay(t1_t3);
5616	intel_dp->backlight_on_delay = get_delay(t8);
5617	intel_dp->backlight_off_delay = get_delay(t9);
5618	intel_dp->panel_power_down_delay = get_delay(t10);
5619	intel_dp->panel_power_cycle_delay = get_delay(t11_t12);
5620#undef get_delay
5621
5622	DRM_DEBUG_KMS("panel power up delay %d, power down delay %d, power cycle delay %d\n",
5623		      intel_dp->panel_power_up_delay, intel_dp->panel_power_down_delay,
5624		      intel_dp->panel_power_cycle_delay);
5625
5626	DRM_DEBUG_KMS("backlight on delay %d, off delay %d\n",
5627		      intel_dp->backlight_on_delay, intel_dp->backlight_off_delay);
5628
5629	/*
5630	 * We override the HW backlight delays to 1 because we do manual waits
5631	 * on them. For T8, even BSpec recommends doing it. For T9, if we
5632	 * don't do this, we'll end up waiting for the backlight off delay
5633	 * twice: once when we do the manual sleep, and once when we disable
5634	 * the panel and wait for the PP_STATUS bit to become zero.
5635	 */
5636	final->t8 = 1;
5637	final->t9 = 1;
5638
5639	/*
5640	 * HW has only a 100msec granularity for t11_t12 so round it up
5641	 * accordingly.
5642	 */
5643	final->t11_t12 = roundup(final->t11_t12, 100 * 10);
5644}
5645
5646static void
5647intel_dp_init_panel_power_sequencer_registers(struct intel_dp *intel_dp,
5648					      bool force_disable_vdd)
5649{
5650	struct drm_i915_private *dev_priv = to_i915(intel_dp_to_dev(intel_dp));
5651	u32 pp_on, pp_off, pp_div, port_sel = 0;
5652	int div = dev_priv->rawclk_freq / 1000;
5653	struct pps_registers regs;
5654	enum port port = dp_to_dig_port(intel_dp)->base.port;
5655	const struct edp_power_seq *seq = &intel_dp->pps_delays;
5656
5657	lockdep_assert_held(&dev_priv->pps_mutex);
5658
5659	intel_pps_get_registers(intel_dp, &regs);
5660
5661	/*
5662	 * On some VLV machines the BIOS can leave the VDD
5663	 * enabled even on power seqeuencers which aren't
5664	 * hooked up to any port. This would mess up the
5665	 * power domain tracking the first time we pick
5666	 * one of these power sequencers for use since
5667	 * edp_panel_vdd_on() would notice that the VDD was
5668	 * already on and therefore wouldn't grab the power
5669	 * domain reference. Disable VDD first to avoid this.
5670	 * This also avoids spuriously turning the VDD on as
5671	 * soon as the new power seqeuencer gets initialized.
5672	 */
5673	if (force_disable_vdd) {
5674		u32 pp = ironlake_get_pp_control(intel_dp);
5675
5676		WARN(pp & PANEL_POWER_ON, "Panel power already on\n");
5677
5678		if (pp & EDP_FORCE_VDD)
5679			DRM_DEBUG_KMS("VDD already on, disabling first\n");
5680
5681		pp &= ~EDP_FORCE_VDD;
5682
5683		I915_WRITE(regs.pp_ctrl, pp);
5684	}
5685
5686	pp_on = (seq->t1_t3 << PANEL_POWER_UP_DELAY_SHIFT) |
5687		(seq->t8 << PANEL_LIGHT_ON_DELAY_SHIFT);
5688	pp_off = (seq->t9 << PANEL_LIGHT_OFF_DELAY_SHIFT) |
5689		 (seq->t10 << PANEL_POWER_DOWN_DELAY_SHIFT);
5690	/* Compute the divisor for the pp clock, simply match the Bspec
5691	 * formula. */
5692	if (IS_GEN9_LP(dev_priv) || HAS_PCH_CNP(dev_priv) ||
5693	    HAS_PCH_ICP(dev_priv)) {
5694		pp_div = I915_READ(regs.pp_ctrl);
5695		pp_div &= ~BXT_POWER_CYCLE_DELAY_MASK;
5696		pp_div |= (DIV_ROUND_UP(seq->t11_t12, 1000)
5697				<< BXT_POWER_CYCLE_DELAY_SHIFT);
5698	} else {
5699		pp_div = ((100 * div)/2 - 1) << PP_REFERENCE_DIVIDER_SHIFT;
5700		pp_div |= (DIV_ROUND_UP(seq->t11_t12, 1000)
5701				<< PANEL_POWER_CYCLE_DELAY_SHIFT);
5702	}
5703
5704	/* Haswell doesn't have any port selection bits for the panel
5705	 * power sequencer any more. */
5706	if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
5707		port_sel = PANEL_PORT_SELECT_VLV(port);
5708	} else if (HAS_PCH_IBX(dev_priv) || HAS_PCH_CPT(dev_priv)) {
5709		if (port == PORT_A)
5710			port_sel = PANEL_PORT_SELECT_DPA;
5711		else
5712			port_sel = PANEL_PORT_SELECT_DPD;
5713	}
5714
5715	pp_on |= port_sel;
5716
5717	I915_WRITE(regs.pp_on, pp_on);
5718	I915_WRITE(regs.pp_off, pp_off);
5719	if (IS_GEN9_LP(dev_priv) || HAS_PCH_CNP(dev_priv) ||
5720	    HAS_PCH_ICP(dev_priv))
5721		I915_WRITE(regs.pp_ctrl, pp_div);
5722	else
5723		I915_WRITE(regs.pp_div, pp_div);
5724
5725	DRM_DEBUG_KMS("panel power sequencer register settings: PP_ON %#x, PP_OFF %#x, PP_DIV %#x\n",
5726		      I915_READ(regs.pp_on),
5727		      I915_READ(regs.pp_off),
5728		      (IS_GEN9_LP(dev_priv) || HAS_PCH_CNP(dev_priv)  ||
5729		       HAS_PCH_ICP(dev_priv)) ?
5730		      (I915_READ(regs.pp_ctrl) & BXT_POWER_CYCLE_DELAY_MASK) :
5731		      I915_READ(regs.pp_div));
5732}
5733
5734static void intel_dp_pps_init(struct intel_dp *intel_dp)
5735{
5736	struct drm_i915_private *dev_priv = to_i915(intel_dp_to_dev(intel_dp));
5737
5738	if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
5739		vlv_initial_power_sequencer_setup(intel_dp);
5740	} else {
5741		intel_dp_init_panel_power_sequencer(intel_dp);
5742		intel_dp_init_panel_power_sequencer_registers(intel_dp, false);
5743	}
5744}
5745
5746/**
5747 * intel_dp_set_drrs_state - program registers for RR switch to take effect
5748 * @dev_priv: i915 device
5749 * @crtc_state: a pointer to the active intel_crtc_state
5750 * @refresh_rate: RR to be programmed
5751 *
5752 * This function gets called when refresh rate (RR) has to be changed from
5753 * one frequency to another. Switches can be between high and low RR
5754 * supported by the panel or to any other RR based on media playback (in
5755 * this case, RR value needs to be passed from user space).
5756 *
5757 * The caller of this function needs to take a lock on dev_priv->drrs.
5758 */
5759static void intel_dp_set_drrs_state(struct drm_i915_private *dev_priv,
5760				    const struct intel_crtc_state *crtc_state,
5761				    int refresh_rate)
5762{
5763	struct intel_encoder *encoder;
5764	struct intel_digital_port *dig_port = NULL;
5765	struct intel_dp *intel_dp = dev_priv->drrs.dp;
5766	struct intel_crtc *intel_crtc = to_intel_crtc(crtc_state->base.crtc);
5767	enum drrs_refresh_rate_type index = DRRS_HIGH_RR;
5768
5769	if (refresh_rate <= 0) {
5770		DRM_DEBUG_KMS("Refresh rate should be positive non-zero.\n");
5771		return;
5772	}
5773
5774	if (intel_dp == NULL) {
5775		DRM_DEBUG_KMS("DRRS not supported.\n");
5776		return;
5777	}
5778
5779	dig_port = dp_to_dig_port(intel_dp);
5780	encoder = &dig_port->base;
5781
5782	if (!intel_crtc) {
5783		DRM_DEBUG_KMS("DRRS: intel_crtc not initialized\n");
5784		return;
5785	}
5786
5787	if (dev_priv->drrs.type < SEAMLESS_DRRS_SUPPORT) {
5788		DRM_DEBUG_KMS("Only Seamless DRRS supported.\n");
5789		return;
5790	}
5791
5792	if (intel_dp->attached_connector->panel.downclock_mode->vrefresh ==
5793			refresh_rate)
5794		index = DRRS_LOW_RR;
5795
5796	if (index == dev_priv->drrs.refresh_rate_type) {
5797		DRM_DEBUG_KMS(
5798			"DRRS requested for previously set RR...ignoring\n");
5799		return;
5800	}
5801
5802	if (!crtc_state->base.active) {
5803		DRM_DEBUG_KMS("eDP encoder disabled. CRTC not Active\n");
5804		return;
5805	}
5806
5807	if (INTEL_GEN(dev_priv) >= 8 && !IS_CHERRYVIEW(dev_priv)) {
5808		switch (index) {
5809		case DRRS_HIGH_RR:
5810			intel_dp_set_m_n(intel_crtc, M1_N1);
5811			break;
5812		case DRRS_LOW_RR:
5813			intel_dp_set_m_n(intel_crtc, M2_N2);
5814			break;
5815		case DRRS_MAX_RR:
5816		default:
5817			DRM_ERROR("Unsupported refreshrate type\n");
5818		}
5819	} else if (INTEL_GEN(dev_priv) > 6) {
5820		i915_reg_t reg = PIPECONF(crtc_state->cpu_transcoder);
5821		u32 val;
5822
5823		val = I915_READ(reg);
5824		if (index > DRRS_HIGH_RR) {
5825			if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
5826				val |= PIPECONF_EDP_RR_MODE_SWITCH_VLV;
5827			else
5828				val |= PIPECONF_EDP_RR_MODE_SWITCH;
5829		} else {
5830			if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
5831				val &= ~PIPECONF_EDP_RR_MODE_SWITCH_VLV;
5832			else
5833				val &= ~PIPECONF_EDP_RR_MODE_SWITCH;
5834		}
5835		I915_WRITE(reg, val);
5836	}
5837
5838	dev_priv->drrs.refresh_rate_type = index;
5839
5840	DRM_DEBUG_KMS("eDP Refresh Rate set to : %dHz\n", refresh_rate);
5841}
5842
5843/**
5844 * intel_edp_drrs_enable - init drrs struct if supported
5845 * @intel_dp: DP struct
5846 * @crtc_state: A pointer to the active crtc state.
5847 *
5848 * Initializes frontbuffer_bits and drrs.dp
5849 */
5850void intel_edp_drrs_enable(struct intel_dp *intel_dp,
5851			   const struct intel_crtc_state *crtc_state)
5852{
5853	struct drm_i915_private *dev_priv = to_i915(intel_dp_to_dev(intel_dp));
5854
5855	if (!crtc_state->has_drrs) {
5856		DRM_DEBUG_KMS("Panel doesn't support DRRS\n");
5857		return;
5858	}
5859
5860	if (dev_priv->psr.enabled) {
5861		DRM_DEBUG_KMS("PSR enabled. Not enabling DRRS.\n");
5862		return;
5863	}
5864
5865	mutex_lock(&dev_priv->drrs.mutex);
5866	if (WARN_ON(dev_priv->drrs.dp)) {
5867		DRM_ERROR("DRRS already enabled\n");
5868		goto unlock;
5869	}
5870
5871	dev_priv->drrs.busy_frontbuffer_bits = 0;
5872
5873	dev_priv->drrs.dp = intel_dp;
5874
5875unlock:
5876	mutex_unlock(&dev_priv->drrs.mutex);
5877}
5878
5879/**
5880 * intel_edp_drrs_disable - Disable DRRS
5881 * @intel_dp: DP struct
5882 * @old_crtc_state: Pointer to old crtc_state.
5883 *
5884 */
5885void intel_edp_drrs_disable(struct intel_dp *intel_dp,
5886			    const struct intel_crtc_state *old_crtc_state)
5887{
5888	struct drm_i915_private *dev_priv = to_i915(intel_dp_to_dev(intel_dp));
5889
5890	if (!old_crtc_state->has_drrs)
5891		return;
5892
5893	mutex_lock(&dev_priv->drrs.mutex);
5894	if (!dev_priv->drrs.dp) {
5895		mutex_unlock(&dev_priv->drrs.mutex);
5896		return;
5897	}
5898
5899	if (dev_priv->drrs.refresh_rate_type == DRRS_LOW_RR)
5900		intel_dp_set_drrs_state(dev_priv, old_crtc_state,
5901			intel_dp->attached_connector->panel.fixed_mode->vrefresh);
5902
5903	dev_priv->drrs.dp = NULL;
5904	mutex_unlock(&dev_priv->drrs.mutex);
5905
5906	cancel_delayed_work_sync(&dev_priv->drrs.work);
5907}
5908
5909static void intel_edp_drrs_downclock_work(struct work_struct *work)
5910{
5911	struct drm_i915_private *dev_priv =
5912		container_of(work, typeof(*dev_priv), drrs.work.work);
5913	struct intel_dp *intel_dp;
 
 
 
 
5914
5915	mutex_lock(&dev_priv->drrs.mutex);
5916
5917	intel_dp = dev_priv->drrs.dp;
5918
5919	if (!intel_dp)
5920		goto unlock;
5921
5922	/*
5923	 * The delayed work can race with an invalidate hence we need to
5924	 * recheck.
5925	 */
5926
5927	if (dev_priv->drrs.busy_frontbuffer_bits)
5928		goto unlock;
5929
5930	if (dev_priv->drrs.refresh_rate_type != DRRS_LOW_RR) {
5931		struct drm_crtc *crtc = dp_to_dig_port(intel_dp)->base.base.crtc;
5932
5933		intel_dp_set_drrs_state(dev_priv, to_intel_crtc(crtc)->config,
5934			intel_dp->attached_connector->panel.downclock_mode->vrefresh);
5935	}
5936
5937unlock:
5938	mutex_unlock(&dev_priv->drrs.mutex);
5939}
5940
5941/**
5942 * intel_edp_drrs_invalidate - Disable Idleness DRRS
5943 * @dev_priv: i915 device
5944 * @frontbuffer_bits: frontbuffer plane tracking bits
5945 *
5946 * This function gets called everytime rendering on the given planes start.
5947 * Hence DRRS needs to be Upclocked, i.e. (LOW_RR -> HIGH_RR).
5948 *
5949 * Dirty frontbuffers relevant to DRRS are tracked in busy_frontbuffer_bits.
5950 */
5951void intel_edp_drrs_invalidate(struct drm_i915_private *dev_priv,
5952			       unsigned int frontbuffer_bits)
5953{
5954	struct drm_crtc *crtc;
5955	enum pipe pipe;
5956
5957	if (dev_priv->drrs.type == DRRS_NOT_SUPPORTED)
5958		return;
5959
5960	cancel_delayed_work(&dev_priv->drrs.work);
 
5961
5962	mutex_lock(&dev_priv->drrs.mutex);
5963	if (!dev_priv->drrs.dp) {
5964		mutex_unlock(&dev_priv->drrs.mutex);
5965		return;
5966	}
 
5967
5968	crtc = dp_to_dig_port(dev_priv->drrs.dp)->base.base.crtc;
5969	pipe = to_intel_crtc(crtc)->pipe;
 
5970
5971	frontbuffer_bits &= INTEL_FRONTBUFFER_ALL_MASK(pipe);
5972	dev_priv->drrs.busy_frontbuffer_bits |= frontbuffer_bits;
5973
5974	/* invalidate means busy screen hence upclock */
5975	if (frontbuffer_bits && dev_priv->drrs.refresh_rate_type == DRRS_LOW_RR)
5976		intel_dp_set_drrs_state(dev_priv, to_intel_crtc(crtc)->config,
5977			dev_priv->drrs.dp->attached_connector->panel.fixed_mode->vrefresh);
5978
5979	mutex_unlock(&dev_priv->drrs.mutex);
5980}
5981
5982/**
5983 * intel_edp_drrs_flush - Restart Idleness DRRS
5984 * @dev_priv: i915 device
5985 * @frontbuffer_bits: frontbuffer plane tracking bits
5986 *
5987 * This function gets called every time rendering on the given planes has
5988 * completed or flip on a crtc is completed. So DRRS should be upclocked
5989 * (LOW_RR -> HIGH_RR). And also Idleness detection should be started again,
5990 * if no other planes are dirty.
5991 *
5992 * Dirty frontbuffers relevant to DRRS are tracked in busy_frontbuffer_bits.
5993 */
5994void intel_edp_drrs_flush(struct drm_i915_private *dev_priv,
5995			  unsigned int frontbuffer_bits)
5996{
5997	struct drm_crtc *crtc;
5998	enum pipe pipe;
5999
6000	if (dev_priv->drrs.type == DRRS_NOT_SUPPORTED)
6001		return;
6002
6003	cancel_delayed_work(&dev_priv->drrs.work);
6004
6005	mutex_lock(&dev_priv->drrs.mutex);
6006	if (!dev_priv->drrs.dp) {
6007		mutex_unlock(&dev_priv->drrs.mutex);
6008		return;
6009	}
6010
6011	crtc = dp_to_dig_port(dev_priv->drrs.dp)->base.base.crtc;
6012	pipe = to_intel_crtc(crtc)->pipe;
 
6013
6014	frontbuffer_bits &= INTEL_FRONTBUFFER_ALL_MASK(pipe);
6015	dev_priv->drrs.busy_frontbuffer_bits &= ~frontbuffer_bits;
6016
6017	/* flush means busy screen hence upclock */
6018	if (frontbuffer_bits && dev_priv->drrs.refresh_rate_type == DRRS_LOW_RR)
6019		intel_dp_set_drrs_state(dev_priv, to_intel_crtc(crtc)->config,
6020				dev_priv->drrs.dp->attached_connector->panel.fixed_mode->vrefresh);
 
 
6021
6022	/*
6023	 * flush also means no more activity hence schedule downclock, if all
6024	 * other fbs are quiescent too
6025	 */
6026	if (!dev_priv->drrs.busy_frontbuffer_bits)
6027		schedule_delayed_work(&dev_priv->drrs.work,
6028				msecs_to_jiffies(1000));
6029	mutex_unlock(&dev_priv->drrs.mutex);
6030}
6031
6032/**
6033 * DOC: Display Refresh Rate Switching (DRRS)
6034 *
6035 * Display Refresh Rate Switching (DRRS) is a power conservation feature
6036 * which enables swtching between low and high refresh rates,
6037 * dynamically, based on the usage scenario. This feature is applicable
6038 * for internal panels.
6039 *
6040 * Indication that the panel supports DRRS is given by the panel EDID, which
6041 * would list multiple refresh rates for one resolution.
6042 *
6043 * DRRS is of 2 types - static and seamless.
6044 * Static DRRS involves changing refresh rate (RR) by doing a full modeset
6045 * (may appear as a blink on screen) and is used in dock-undock scenario.
6046 * Seamless DRRS involves changing RR without any visual effect to the user
6047 * and can be used during normal system usage. This is done by programming
6048 * certain registers.
6049 *
6050 * Support for static/seamless DRRS may be indicated in the VBT based on
6051 * inputs from the panel spec.
6052 *
6053 * DRRS saves power by switching to low RR based on usage scenarios.
6054 *
6055 * The implementation is based on frontbuffer tracking implementation.  When
6056 * there is a disturbance on the screen triggered by user activity or a periodic
6057 * system activity, DRRS is disabled (RR is changed to high RR).  When there is
6058 * no movement on screen, after a timeout of 1 second, a switch to low RR is
6059 * made.
6060 *
6061 * For integration with frontbuffer tracking code, intel_edp_drrs_invalidate()
6062 * and intel_edp_drrs_flush() are called.
6063 *
6064 * DRRS can be further extended to support other internal panels and also
6065 * the scenario of video playback wherein RR is set based on the rate
6066 * requested by userspace.
6067 */
6068
6069/**
6070 * intel_dp_drrs_init - Init basic DRRS work and mutex.
6071 * @connector: eDP connector
6072 * @fixed_mode: preferred mode of panel
6073 *
6074 * This function is  called only once at driver load to initialize basic
6075 * DRRS stuff.
6076 *
6077 * Returns:
6078 * Downclock mode if panel supports it, else return NULL.
6079 * DRRS support is determined by the presence of downclock mode (apart
6080 * from VBT setting).
6081 */
6082static struct drm_display_mode *
6083intel_dp_drrs_init(struct intel_connector *connector,
6084		   struct drm_display_mode *fixed_mode)
6085{
6086	struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
6087	struct drm_display_mode *downclock_mode = NULL;
6088
6089	INIT_DELAYED_WORK(&dev_priv->drrs.work, intel_edp_drrs_downclock_work);
6090	mutex_init(&dev_priv->drrs.mutex);
6091
6092	if (INTEL_GEN(dev_priv) <= 6) {
6093		DRM_DEBUG_KMS("DRRS supported for Gen7 and above\n");
6094		return NULL;
6095	}
6096
6097	if (dev_priv->vbt.drrs_type != SEAMLESS_DRRS_SUPPORT) {
6098		DRM_DEBUG_KMS("VBT doesn't support DRRS\n");
6099		return NULL;
6100	}
6101
6102	downclock_mode = intel_find_panel_downclock(dev_priv, fixed_mode,
6103						    &connector->base);
6104
6105	if (!downclock_mode) {
6106		DRM_DEBUG_KMS("Downclock mode is not found. DRRS not supported\n");
6107		return NULL;
6108	}
6109
6110	dev_priv->drrs.type = dev_priv->vbt.drrs_type;
6111
6112	dev_priv->drrs.refresh_rate_type = DRRS_HIGH_RR;
6113	DRM_DEBUG_KMS("seamless DRRS supported for eDP panel.\n");
6114	return downclock_mode;
6115}
6116
6117static bool intel_edp_init_connector(struct intel_dp *intel_dp,
6118				     struct intel_connector *intel_connector)
6119{
6120	struct drm_device *dev = intel_dp_to_dev(intel_dp);
6121	struct drm_i915_private *dev_priv = to_i915(dev);
6122	struct drm_connector *connector = &intel_connector->base;
6123	struct drm_display_mode *fixed_mode = NULL;
6124	struct drm_display_mode *alt_fixed_mode = NULL;
6125	struct drm_display_mode *downclock_mode = NULL;
6126	bool has_dpcd;
6127	struct drm_display_mode *scan;
6128	struct edid *edid;
6129	enum pipe pipe = INVALID_PIPE;
6130
6131	if (!intel_dp_is_edp(intel_dp))
6132		return true;
6133
6134	/*
6135	 * On IBX/CPT we may get here with LVDS already registered. Since the
6136	 * driver uses the only internal power sequencer available for both
6137	 * eDP and LVDS bail out early in this case to prevent interfering
6138	 * with an already powered-on LVDS power sequencer.
6139	 */
6140	if (intel_get_lvds_encoder(&dev_priv->drm)) {
6141		WARN_ON(!(HAS_PCH_IBX(dev_priv) || HAS_PCH_CPT(dev_priv)));
6142		DRM_INFO("LVDS was detected, not registering eDP\n");
6143
6144		return false;
6145	}
6146
6147	pps_lock(intel_dp);
6148
6149	intel_dp_init_panel_power_timestamps(intel_dp);
6150	intel_dp_pps_init(intel_dp);
6151	intel_edp_panel_vdd_sanitize(intel_dp);
6152
6153	pps_unlock(intel_dp);
6154
6155	/* Cache DPCD and EDID for edp. */
6156	has_dpcd = intel_edp_init_dpcd(intel_dp);
6157
6158	if (!has_dpcd) {
6159		/* if this fails, presume the device is a ghost */
6160		DRM_INFO("failed to retrieve link info, disabling eDP\n");
6161		goto out_vdd_off;
6162	}
6163
6164	mutex_lock(&dev->mode_config.mutex);
6165	edid = drm_get_edid(connector, &intel_dp->aux.ddc);
6166	if (edid) {
6167		if (drm_add_edid_modes(connector, edid)) {
6168			drm_mode_connector_update_edid_property(connector,
6169								edid);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
6170		} else {
6171			kfree(edid);
6172			edid = ERR_PTR(-EINVAL);
 
 
 
6173		}
6174	} else {
6175		edid = ERR_PTR(-ENOENT);
6176	}
6177	intel_connector->edid = edid;
6178
6179	/* prefer fixed mode from EDID if available, save an alt mode also */
6180	list_for_each_entry(scan, &connector->probed_modes, head) {
6181		if ((scan->type & DRM_MODE_TYPE_PREFERRED)) {
6182			fixed_mode = drm_mode_duplicate(dev, scan);
6183			downclock_mode = intel_dp_drrs_init(
6184						intel_connector, fixed_mode);
6185		} else if (!alt_fixed_mode) {
6186			alt_fixed_mode = drm_mode_duplicate(dev, scan);
6187		}
6188	}
6189
6190	/* fallback to VBT if available for eDP */
6191	if (!fixed_mode && dev_priv->vbt.lfp_lvds_vbt_mode) {
6192		fixed_mode = drm_mode_duplicate(dev,
6193					dev_priv->vbt.lfp_lvds_vbt_mode);
6194		if (fixed_mode) {
6195			fixed_mode->type |= DRM_MODE_TYPE_PREFERRED;
6196			connector->display_info.width_mm = fixed_mode->width_mm;
6197			connector->display_info.height_mm = fixed_mode->height_mm;
 
6198		}
6199	}
6200	mutex_unlock(&dev->mode_config.mutex);
6201
6202	if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
6203		intel_dp->edp_notifier.notifier_call = edp_notify_handler;
6204		register_reboot_notifier(&intel_dp->edp_notifier);
6205
6206		/*
6207		 * Figure out the current pipe for the initial backlight setup.
6208		 * If the current pipe isn't valid, try the PPS pipe, and if that
6209		 * fails just assume pipe A.
6210		 */
6211		pipe = vlv_active_pipe(intel_dp);
6212
6213		if (pipe != PIPE_A && pipe != PIPE_B)
6214			pipe = intel_dp->pps_pipe;
6215
6216		if (pipe != PIPE_A && pipe != PIPE_B)
6217			pipe = PIPE_A;
6218
6219		DRM_DEBUG_KMS("using pipe %c for initial backlight setup\n",
6220			      pipe_name(pipe));
6221	}
6222
6223	intel_panel_init(&intel_connector->panel, fixed_mode, alt_fixed_mode,
6224			 downclock_mode);
6225	intel_connector->panel.backlight.power = intel_edp_backlight_power;
6226	intel_panel_setup_backlight(connector, pipe);
6227
6228	return true;
6229
6230out_vdd_off:
6231	cancel_delayed_work_sync(&intel_dp->panel_vdd_work);
6232	/*
6233	 * vdd might still be enabled do to the delayed vdd off.
6234	 * Make sure vdd is actually turned off here.
6235	 */
6236	pps_lock(intel_dp);
6237	edp_panel_vdd_off_sync(intel_dp);
6238	pps_unlock(intel_dp);
6239
6240	return false;
6241}
6242
6243static void intel_dp_modeset_retry_work_fn(struct work_struct *work)
6244{
6245	struct intel_connector *intel_connector;
6246	struct drm_connector *connector;
6247
6248	intel_connector = container_of(work, typeof(*intel_connector),
6249				       modeset_retry_work);
6250	connector = &intel_connector->base;
6251	DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n", connector->base.id,
6252		      connector->name);
6253
6254	/* Grab the locks before changing connector property*/
6255	mutex_lock(&connector->dev->mode_config.mutex);
6256	/* Set connector link status to BAD and send a Uevent to notify
6257	 * userspace to do a modeset.
6258	 */
6259	drm_mode_connector_set_link_status_property(connector,
6260						    DRM_MODE_LINK_STATUS_BAD);
6261	mutex_unlock(&connector->dev->mode_config.mutex);
6262	/* Send Hotplug uevent so userspace can reprobe */
6263	drm_kms_helper_hotplug_event(connector->dev);
6264}
6265
6266bool
6267intel_dp_init_connector(struct intel_digital_port *intel_dig_port,
6268			struct intel_connector *intel_connector)
6269{
6270	struct drm_connector *connector = &intel_connector->base;
6271	struct intel_dp *intel_dp = &intel_dig_port->dp;
6272	struct intel_encoder *intel_encoder = &intel_dig_port->base;
6273	struct drm_device *dev = intel_encoder->base.dev;
6274	struct drm_i915_private *dev_priv = to_i915(dev);
6275	enum port port = intel_encoder->port;
6276	int type;
6277
6278	/* Initialize the work for modeset in case of link train failure */
6279	INIT_WORK(&intel_connector->modeset_retry_work,
6280		  intel_dp_modeset_retry_work_fn);
6281
6282	if (WARN(intel_dig_port->max_lanes < 1,
6283		 "Not enough lanes (%d) for DP on port %c\n",
6284		 intel_dig_port->max_lanes, port_name(port)))
6285		return false;
6286
6287	intel_dp_set_source_rates(intel_dp);
6288
6289	intel_dp->reset_link_params = true;
6290	intel_dp->pps_pipe = INVALID_PIPE;
6291	intel_dp->active_pipe = INVALID_PIPE;
6292
6293	/* intel_dp vfuncs */
6294	if (HAS_DDI(dev_priv))
6295		intel_dp->prepare_link_retrain = intel_ddi_prepare_link_retrain;
6296
6297	/* Preserve the current hw state. */
6298	intel_dp->DP = I915_READ(intel_dp->output_reg);
6299	intel_dp->attached_connector = intel_connector;
6300
6301	if (intel_dp_is_port_edp(dev_priv, port))
6302		type = DRM_MODE_CONNECTOR_eDP;
6303	else
6304		type = DRM_MODE_CONNECTOR_DisplayPort;
6305
6306	if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
6307		intel_dp->active_pipe = vlv_active_pipe(intel_dp);
6308
6309	/*
6310	 * For eDP we always set the encoder type to INTEL_OUTPUT_EDP, but
6311	 * for DP the encoder type can be set by the caller to
6312	 * INTEL_OUTPUT_UNKNOWN for DDI, so don't rewrite it.
6313	 */
6314	if (type == DRM_MODE_CONNECTOR_eDP)
6315		intel_encoder->type = INTEL_OUTPUT_EDP;
6316
6317	/* eDP only on port B and/or C on vlv/chv */
6318	if (WARN_ON((IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) &&
6319		    intel_dp_is_edp(intel_dp) &&
6320		    port != PORT_B && port != PORT_C))
6321		return false;
6322
6323	DRM_DEBUG_KMS("Adding %s connector on port %c\n",
6324			type == DRM_MODE_CONNECTOR_eDP ? "eDP" : "DP",
6325			port_name(port));
6326
6327	drm_connector_init(dev, connector, &intel_dp_connector_funcs, type);
6328	drm_connector_helper_add(connector, &intel_dp_connector_helper_funcs);
6329
6330	if (!IS_VALLEYVIEW(dev_priv) && !IS_CHERRYVIEW(dev_priv))
6331		connector->interlace_allowed = true;
6332	connector->doublescan_allowed = 0;
6333
6334	intel_encoder->hpd_pin = intel_hpd_pin_default(dev_priv, port);
6335
6336	intel_dp_aux_init(intel_dp);
6337
6338	INIT_DELAYED_WORK(&intel_dp->panel_vdd_work,
6339			  edp_panel_vdd_work);
6340
6341	intel_connector_attach_encoder(intel_connector, intel_encoder);
6342
6343	if (HAS_DDI(dev_priv))
6344		intel_connector->get_hw_state = intel_ddi_connector_get_hw_state;
6345	else
6346		intel_connector->get_hw_state = intel_connector_get_hw_state;
6347
6348	/* init MST on ports that can support it */
6349	if (HAS_DP_MST(dev_priv) && !intel_dp_is_edp(intel_dp) &&
6350	    (port == PORT_B || port == PORT_C ||
6351	     port == PORT_D || port == PORT_F))
6352		intel_dp_mst_encoder_init(intel_dig_port,
6353					  intel_connector->base.base.id);
6354
6355	if (!intel_edp_init_connector(intel_dp, intel_connector)) {
6356		intel_dp_aux_fini(intel_dp);
6357		intel_dp_mst_encoder_cleanup(intel_dig_port);
6358		goto fail;
6359	}
6360
6361	intel_dp_add_properties(intel_dp, connector);
6362
6363	if (is_hdcp_supported(dev_priv, port) && !intel_dp_is_edp(intel_dp)) {
6364		int ret = intel_hdcp_init(intel_connector, &intel_dp_hdcp_shim);
6365		if (ret)
6366			DRM_DEBUG_KMS("HDCP init failed, skipping.\n");
6367	}
6368
6369	/* For G4X desktop chip, PEG_BAND_GAP_DATA 3:0 must first be written
6370	 * 0xd.  Failure to do so will result in spurious interrupts being
6371	 * generated on the port when a cable is not attached.
6372	 */
6373	if (IS_G4X(dev_priv) && !IS_GM45(dev_priv)) {
6374		u32 temp = I915_READ(PEG_BAND_GAP_DATA);
6375		I915_WRITE(PEG_BAND_GAP_DATA, (temp & ~0xf) | 0xd);
6376	}
6377
6378	return true;
6379
6380fail:
6381	drm_connector_cleanup(connector);
6382
6383	return false;
6384}
6385
6386bool intel_dp_init(struct drm_i915_private *dev_priv,
6387		   i915_reg_t output_reg,
6388		   enum port port)
6389{
6390	struct intel_digital_port *intel_dig_port;
6391	struct intel_encoder *intel_encoder;
6392	struct drm_encoder *encoder;
6393	struct intel_connector *intel_connector;
6394
6395	intel_dig_port = kzalloc(sizeof(*intel_dig_port), GFP_KERNEL);
6396	if (!intel_dig_port)
6397		return false;
6398
6399	intel_connector = intel_connector_alloc();
6400	if (!intel_connector)
6401		goto err_connector_alloc;
6402
6403	intel_encoder = &intel_dig_port->base;
6404	encoder = &intel_encoder->base;
6405
6406	if (drm_encoder_init(&dev_priv->drm, &intel_encoder->base,
6407			     &intel_dp_enc_funcs, DRM_MODE_ENCODER_TMDS,
6408			     "DP %c", port_name(port)))
6409		goto err_encoder_init;
6410
6411	intel_encoder->hotplug = intel_dp_hotplug;
6412	intel_encoder->compute_config = intel_dp_compute_config;
6413	intel_encoder->get_hw_state = intel_dp_get_hw_state;
6414	intel_encoder->get_config = intel_dp_get_config;
6415	intel_encoder->suspend = intel_dp_encoder_suspend;
6416	if (IS_CHERRYVIEW(dev_priv)) {
6417		intel_encoder->pre_pll_enable = chv_dp_pre_pll_enable;
6418		intel_encoder->pre_enable = chv_pre_enable_dp;
6419		intel_encoder->enable = vlv_enable_dp;
6420		intel_encoder->disable = vlv_disable_dp;
6421		intel_encoder->post_disable = chv_post_disable_dp;
6422		intel_encoder->post_pll_disable = chv_dp_post_pll_disable;
6423	} else if (IS_VALLEYVIEW(dev_priv)) {
6424		intel_encoder->pre_pll_enable = vlv_dp_pre_pll_enable;
6425		intel_encoder->pre_enable = vlv_pre_enable_dp;
6426		intel_encoder->enable = vlv_enable_dp;
6427		intel_encoder->disable = vlv_disable_dp;
6428		intel_encoder->post_disable = vlv_post_disable_dp;
6429	} else if (INTEL_GEN(dev_priv) >= 5) {
6430		intel_encoder->pre_enable = g4x_pre_enable_dp;
6431		intel_encoder->enable = g4x_enable_dp;
6432		intel_encoder->disable = ilk_disable_dp;
6433		intel_encoder->post_disable = ilk_post_disable_dp;
6434	} else {
6435		intel_encoder->pre_enable = g4x_pre_enable_dp;
6436		intel_encoder->enable = g4x_enable_dp;
6437		intel_encoder->disable = g4x_disable_dp;
6438	}
6439
6440	intel_dig_port->dp.output_reg = output_reg;
6441	intel_dig_port->max_lanes = 4;
6442
6443	intel_encoder->type = INTEL_OUTPUT_DP;
6444	intel_encoder->power_domain = intel_port_to_power_domain(port);
6445	if (IS_CHERRYVIEW(dev_priv)) {
6446		if (port == PORT_D)
6447			intel_encoder->crtc_mask = 1 << 2;
6448		else
6449			intel_encoder->crtc_mask = (1 << 0) | (1 << 1);
6450	} else {
6451		intel_encoder->crtc_mask = (1 << 0) | (1 << 1) | (1 << 2);
6452	}
6453	intel_encoder->cloneable = 0;
6454	intel_encoder->port = port;
6455
6456	intel_dig_port->hpd_pulse = intel_dp_hpd_pulse;
6457	dev_priv->hotplug.irq_port[port] = intel_dig_port;
6458
6459	if (port != PORT_A)
6460		intel_infoframe_init(intel_dig_port);
6461
6462	if (!intel_dp_init_connector(intel_dig_port, intel_connector))
6463		goto err_init_connector;
6464
6465	return true;
6466
6467err_init_connector:
6468	drm_encoder_cleanup(encoder);
6469err_encoder_init:
6470	kfree(intel_connector);
6471err_connector_alloc:
6472	kfree(intel_dig_port);
6473	return false;
6474}
6475
6476void intel_dp_mst_suspend(struct drm_device *dev)
6477{
6478	struct drm_i915_private *dev_priv = to_i915(dev);
6479	int i;
6480
6481	/* disable MST */
6482	for (i = 0; i < I915_MAX_PORTS; i++) {
6483		struct intel_digital_port *intel_dig_port = dev_priv->hotplug.irq_port[i];
6484
6485		if (!intel_dig_port || !intel_dig_port->dp.can_mst)
6486			continue;
6487
6488		if (intel_dig_port->dp.is_mst)
6489			drm_dp_mst_topology_mgr_suspend(&intel_dig_port->dp.mst_mgr);
6490	}
6491}
6492
6493void intel_dp_mst_resume(struct drm_device *dev)
6494{
6495	struct drm_i915_private *dev_priv = to_i915(dev);
6496	int i;
6497
6498	for (i = 0; i < I915_MAX_PORTS; i++) {
6499		struct intel_digital_port *intel_dig_port = dev_priv->hotplug.irq_port[i];
6500		int ret;
6501
6502		if (!intel_dig_port || !intel_dig_port->dp.can_mst)
6503			continue;
6504
6505		ret = drm_dp_mst_topology_mgr_resume(&intel_dig_port->dp.mst_mgr);
6506		if (ret)
6507			intel_dp_check_mst_status(&intel_dig_port->dp);
6508	}
6509}