Linux Audio

Check our new training course

Loading...
v5.14.15
   1/*
   2 * Copyright © 2008 Intel Corporation
   3 *
   4 * Permission is hereby granted, free of charge, to any person obtaining a
   5 * copy of this software and associated documentation files (the "Software"),
   6 * to deal in the Software without restriction, including without limitation
   7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
   8 * and/or sell copies of the Software, and to permit persons to whom the
   9 * Software is furnished to do so, subject to the following conditions:
  10 *
  11 * The above copyright notice and this permission notice (including the next
  12 * paragraph) shall be included in all copies or substantial portions of the
  13 * Software.
  14 *
  15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
  18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
  19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
  20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
  21 * IN THE SOFTWARE.
  22 *
  23 * Authors:
  24 *    Keith Packard <keithp@keithp.com>
  25 *
  26 */
  27
  28#include <linux/export.h>
  29#include <linux/i2c.h>
  30#include <linux/notifier.h>
 
  31#include <linux/slab.h>
  32#include <linux/types.h>
  33
  34#include <asm/byteorder.h>
  35
  36#include <drm/drm_atomic_helper.h>
  37#include <drm/drm_crtc.h>
  38#include <drm/drm_dp_helper.h>
  39#include <drm/drm_edid.h>
 
  40#include <drm/drm_probe_helper.h>
 
  41
  42#include "g4x_dp.h"
  43#include "i915_debugfs.h"
  44#include "i915_drv.h"
 
  45#include "intel_atomic.h"
  46#include "intel_audio.h"
  47#include "intel_connector.h"
  48#include "intel_ddi.h"
  49#include "intel_de.h"
  50#include "intel_display_types.h"
  51#include "intel_dp.h"
  52#include "intel_dp_aux.h"
  53#include "intel_dp_hdcp.h"
  54#include "intel_dp_link_training.h"
  55#include "intel_dp_mst.h"
  56#include "intel_dpio_phy.h"
  57#include "intel_dpll.h"
  58#include "intel_fifo_underrun.h"
  59#include "intel_hdcp.h"
  60#include "intel_hdmi.h"
  61#include "intel_hotplug.h"
  62#include "intel_lspcon.h"
  63#include "intel_lvds.h"
  64#include "intel_panel.h"
  65#include "intel_pps.h"
  66#include "intel_psr.h"
  67#include "intel_sideband.h"
  68#include "intel_tc.h"
  69#include "intel_vdsc.h"
  70#include "intel_vrr.h"
  71
  72#define DP_DPRX_ESI_LEN 14
  73
 
 
 
 
 
  74/* DP DSC throughput values used for slice count calculations KPixels/s */
  75#define DP_DSC_PEAK_PIXEL_RATE			2720000
  76#define DP_DSC_MAX_ENC_THROUGHPUT_0		340000
  77#define DP_DSC_MAX_ENC_THROUGHPUT_1		400000
  78
  79/* DP DSC FEC Overhead factor = 1/(0.972261) */
  80#define DP_DSC_FEC_OVERHEAD_FACTOR		972261
  81
  82/* Compliance test status bits  */
  83#define INTEL_DP_RESOLUTION_SHIFT_MASK	0
  84#define INTEL_DP_RESOLUTION_PREFERRED	(1 << INTEL_DP_RESOLUTION_SHIFT_MASK)
  85#define INTEL_DP_RESOLUTION_STANDARD	(2 << INTEL_DP_RESOLUTION_SHIFT_MASK)
  86#define INTEL_DP_RESOLUTION_FAILSAFE	(3 << INTEL_DP_RESOLUTION_SHIFT_MASK)
  87
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
  88
  89/* Constants for DP DSC configurations */
  90static const u8 valid_dsc_bpp[] = {6, 8, 10, 12, 15};
  91
  92/* With Single pipe configuration, HW is capable of supporting maximum
  93 * of 4 slices per line.
  94 */
  95static const u8 valid_dsc_slicecount[] = {1, 2, 4};
  96
  97/**
  98 * intel_dp_is_edp - is the given port attached to an eDP panel (either CPU or PCH)
  99 * @intel_dp: DP struct
 100 *
 101 * If a CPU or PCH DP output is attached to an eDP panel, this function
 102 * will return true, and false otherwise.
 103 */
 104bool intel_dp_is_edp(struct intel_dp *intel_dp)
 105{
 106	struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
 107
 108	return dig_port->base.type == INTEL_OUTPUT_EDP;
 109}
 110
 
 
 
 
 
 
 
 
 
 
 
 
 
 111static void intel_dp_unset_edid(struct intel_dp *intel_dp);
 112static int intel_dp_dsc_compute_bpp(struct intel_dp *intel_dp, u8 dsc_max_bpc);
 113
 114/* update sink rates from dpcd */
 115static void intel_dp_set_sink_rates(struct intel_dp *intel_dp)
 116{
 117	static const int dp_rates[] = {
 118		162000, 270000, 540000, 810000
 119	};
 120	int i, max_rate;
 121	int max_lttpr_rate;
 122
 123	if (drm_dp_has_quirk(&intel_dp->desc, DP_DPCD_QUIRK_CAN_DO_MAX_LINK_RATE_3_24_GBPS)) {
 124		/* Needed, e.g., for Apple MBP 2017, 15 inch eDP Retina panel */
 125		static const int quirk_rates[] = { 162000, 270000, 324000 };
 126
 127		memcpy(intel_dp->sink_rates, quirk_rates, sizeof(quirk_rates));
 128		intel_dp->num_sink_rates = ARRAY_SIZE(quirk_rates);
 129
 130		return;
 131	}
 132
 133	max_rate = drm_dp_bw_code_to_link_rate(intel_dp->dpcd[DP_MAX_LINK_RATE]);
 134	max_lttpr_rate = drm_dp_lttpr_max_link_rate(intel_dp->lttpr_common_caps);
 135	if (max_lttpr_rate)
 136		max_rate = min(max_rate, max_lttpr_rate);
 137
 138	for (i = 0; i < ARRAY_SIZE(dp_rates); i++) {
 139		if (dp_rates[i] > max_rate)
 140			break;
 141		intel_dp->sink_rates[i] = dp_rates[i];
 142	}
 143
 144	intel_dp->num_sink_rates = i;
 145}
 146
 147/* Get length of rates array potentially limited by max_rate. */
 148static int intel_dp_rate_limit_len(const int *rates, int len, int max_rate)
 149{
 150	int i;
 151
 152	/* Limit results by potentially reduced max rate */
 153	for (i = 0; i < len; i++) {
 154		if (rates[len - i - 1] <= max_rate)
 155			return len - i;
 156	}
 157
 158	return 0;
 159}
 160
 161/* Get length of common rates array potentially limited by max_rate. */
 162static int intel_dp_common_len_rate_limit(const struct intel_dp *intel_dp,
 163					  int max_rate)
 164{
 165	return intel_dp_rate_limit_len(intel_dp->common_rates,
 166				       intel_dp->num_common_rates, max_rate);
 167}
 168
 169/* Theoretical max between source and sink */
 170static int intel_dp_max_common_rate(struct intel_dp *intel_dp)
 171{
 172	return intel_dp->common_rates[intel_dp->num_common_rates - 1];
 173}
 174
 175/* Theoretical max between source and sink */
 176static int intel_dp_max_common_lane_count(struct intel_dp *intel_dp)
 177{
 178	struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
 179	int source_max = dig_port->max_lanes;
 180	int sink_max = drm_dp_max_lane_count(intel_dp->dpcd);
 181	int fia_max = intel_tc_port_fia_max_lane_count(dig_port);
 182	int lttpr_max = drm_dp_lttpr_max_lane_count(intel_dp->lttpr_common_caps);
 183
 184	if (lttpr_max)
 185		sink_max = min(sink_max, lttpr_max);
 186
 187	return min3(source_max, sink_max, fia_max);
 188}
 189
 190int intel_dp_max_lane_count(struct intel_dp *intel_dp)
 191{
 192	return intel_dp->max_link_lane_count;
 193}
 194
 195int
 196intel_dp_link_required(int pixel_clock, int bpp)
 197{
 198	/* pixel_clock is in kHz, divide bpp by 8 for bit to Byte conversion */
 199	return DIV_ROUND_UP(pixel_clock * bpp, 8);
 200}
 201
 202int
 203intel_dp_max_data_rate(int max_link_clock, int max_lanes)
 204{
 205	/* max_link_clock is the link symbol clock (LS_Clk) in kHz and not the
 206	 * link rate that is generally expressed in Gbps. Since, 8 bits of data
 207	 * is transmitted every LS_Clk per lane, there is no need to account for
 208	 * the channel encoding that is done in the PHY layer here.
 209	 */
 210
 211	return max_link_clock * max_lanes;
 212}
 213
 214bool intel_dp_can_bigjoiner(struct intel_dp *intel_dp)
 
 215{
 216	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
 217	struct intel_encoder *encoder = &intel_dig_port->base;
 218	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
 
 
 
 
 
 
 
 219
 220	return DISPLAY_VER(dev_priv) >= 12 ||
 221		(DISPLAY_VER(dev_priv) == 11 &&
 222		 encoder->port != PORT_A);
 
 
 
 
 223}
 224
 225static int cnl_max_source_rate(struct intel_dp *intel_dp)
 226{
 227	struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
 228	struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev);
 229	enum port port = dig_port->base.port;
 230
 231	u32 voltage = intel_de_read(dev_priv, CNL_PORT_COMP_DW3) & VOLTAGE_INFO_MASK;
 232
 233	/* Low voltage SKUs are limited to max of 5.4G */
 234	if (voltage == VOLTAGE_INFO_0_85V)
 235		return 540000;
 236
 237	/* For this SKU 8.1G is supported in all ports */
 238	if (IS_CNL_WITH_PORT_F(dev_priv))
 239		return 810000;
 240
 241	/* For other SKUs, max rate on ports A and D is 5.4G */
 242	if (port == PORT_A || port == PORT_D)
 243		return 540000;
 244
 245	return 810000;
 246}
 247
 248static int icl_max_source_rate(struct intel_dp *intel_dp)
 249{
 250	struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
 251	struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev);
 252	enum phy phy = intel_port_to_phy(dev_priv, dig_port->base.port);
 253
 254	if (intel_phy_is_combo(dev_priv, phy) &&
 
 255	    !intel_dp_is_edp(intel_dp))
 256		return 540000;
 257
 258	return 810000;
 259}
 260
 261static int ehl_max_source_rate(struct intel_dp *intel_dp)
 262{
 263	if (intel_dp_is_edp(intel_dp))
 264		return 540000;
 265
 266	return 810000;
 267}
 268
 269static void
 270intel_dp_set_source_rates(struct intel_dp *intel_dp)
 271{
 272	/* The values must be in increasing order */
 273	static const int cnl_rates[] = {
 274		162000, 216000, 270000, 324000, 432000, 540000, 648000, 810000
 275	};
 276	static const int bxt_rates[] = {
 277		162000, 216000, 243000, 270000, 324000, 432000, 540000
 278	};
 279	static const int skl_rates[] = {
 280		162000, 216000, 270000, 324000, 432000, 540000
 281	};
 282	static const int hsw_rates[] = {
 283		162000, 270000, 540000
 284	};
 285	static const int g4x_rates[] = {
 286		162000, 270000
 287	};
 288	struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
 289	struct intel_encoder *encoder = &dig_port->base;
 290	struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev);
 
 
 291	const int *source_rates;
 292	int size, max_rate = 0, vbt_max_rate;
 293
 294	/* This should only be done once */
 295	drm_WARN_ON(&dev_priv->drm,
 296		    intel_dp->source_rates || intel_dp->num_source_rates);
 297
 298	if (DISPLAY_VER(dev_priv) >= 11 || IS_CANNONLAKE(dev_priv)) {
 299		source_rates = cnl_rates;
 300		size = ARRAY_SIZE(cnl_rates);
 301		if (DISPLAY_VER(dev_priv) == 10)
 302			max_rate = cnl_max_source_rate(intel_dp);
 303		else if (IS_JSL_EHL(dev_priv))
 304			max_rate = ehl_max_source_rate(intel_dp);
 305		else
 306			max_rate = icl_max_source_rate(intel_dp);
 307	} else if (IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv)) {
 308		source_rates = bxt_rates;
 309		size = ARRAY_SIZE(bxt_rates);
 310	} else if (DISPLAY_VER(dev_priv) == 9) {
 311		source_rates = skl_rates;
 312		size = ARRAY_SIZE(skl_rates);
 313	} else if ((IS_HASWELL(dev_priv) && !IS_HSW_ULX(dev_priv)) ||
 314		   IS_BROADWELL(dev_priv)) {
 315		source_rates = hsw_rates;
 316		size = ARRAY_SIZE(hsw_rates);
 317	} else {
 318		source_rates = g4x_rates;
 319		size = ARRAY_SIZE(g4x_rates);
 320	}
 321
 322	vbt_max_rate = intel_bios_dp_max_link_rate(encoder);
 323	if (max_rate && vbt_max_rate)
 324		max_rate = min(max_rate, vbt_max_rate);
 325	else if (vbt_max_rate)
 326		max_rate = vbt_max_rate;
 327
 328	if (max_rate)
 329		size = intel_dp_rate_limit_len(source_rates, size, max_rate);
 330
 331	intel_dp->source_rates = source_rates;
 332	intel_dp->num_source_rates = size;
 333}
 334
 335static int intersect_rates(const int *source_rates, int source_len,
 336			   const int *sink_rates, int sink_len,
 337			   int *common_rates)
 338{
 339	int i = 0, j = 0, k = 0;
 340
 341	while (i < source_len && j < sink_len) {
 342		if (source_rates[i] == sink_rates[j]) {
 343			if (WARN_ON(k >= DP_MAX_SUPPORTED_RATES))
 344				return k;
 345			common_rates[k] = source_rates[i];
 346			++k;
 347			++i;
 348			++j;
 349		} else if (source_rates[i] < sink_rates[j]) {
 350			++i;
 351		} else {
 352			++j;
 353		}
 354	}
 355	return k;
 356}
 357
 358/* return index of rate in rates array, or -1 if not found */
 359static int intel_dp_rate_index(const int *rates, int len, int rate)
 360{
 361	int i;
 362
 363	for (i = 0; i < len; i++)
 364		if (rate == rates[i])
 365			return i;
 366
 367	return -1;
 368}
 369
 370static void intel_dp_set_common_rates(struct intel_dp *intel_dp)
 371{
 372	struct drm_i915_private *i915 = dp_to_i915(intel_dp);
 373
 374	drm_WARN_ON(&i915->drm,
 375		    !intel_dp->num_source_rates || !intel_dp->num_sink_rates);
 376
 377	intel_dp->num_common_rates = intersect_rates(intel_dp->source_rates,
 378						     intel_dp->num_source_rates,
 379						     intel_dp->sink_rates,
 380						     intel_dp->num_sink_rates,
 381						     intel_dp->common_rates);
 382
 383	/* Paranoia, there should always be something in common. */
 384	if (drm_WARN_ON(&i915->drm, intel_dp->num_common_rates == 0)) {
 385		intel_dp->common_rates[0] = 162000;
 386		intel_dp->num_common_rates = 1;
 387	}
 388}
 389
 390static bool intel_dp_link_params_valid(struct intel_dp *intel_dp, int link_rate,
 391				       u8 lane_count)
 392{
 393	/*
 394	 * FIXME: we need to synchronize the current link parameters with
 395	 * hardware readout. Currently fast link training doesn't work on
 396	 * boot-up.
 397	 */
 398	if (link_rate == 0 ||
 399	    link_rate > intel_dp->max_link_rate)
 400		return false;
 401
 402	if (lane_count == 0 ||
 403	    lane_count > intel_dp_max_lane_count(intel_dp))
 404		return false;
 405
 406	return true;
 407}
 408
 409static bool intel_dp_can_link_train_fallback_for_edp(struct intel_dp *intel_dp,
 410						     int link_rate,
 411						     u8 lane_count)
 412{
 413	const struct drm_display_mode *fixed_mode =
 414		intel_dp->attached_connector->panel.fixed_mode;
 415	int mode_rate, max_rate;
 416
 417	mode_rate = intel_dp_link_required(fixed_mode->clock, 18);
 418	max_rate = intel_dp_max_data_rate(link_rate, lane_count);
 419	if (mode_rate > max_rate)
 420		return false;
 421
 422	return true;
 423}
 424
 425int intel_dp_get_link_train_fallback_values(struct intel_dp *intel_dp,
 426					    int link_rate, u8 lane_count)
 427{
 428	struct drm_i915_private *i915 = dp_to_i915(intel_dp);
 429	int index;
 430
 431	/*
 432	 * TODO: Enable fallback on MST links once MST link compute can handle
 433	 * the fallback params.
 434	 */
 435	if (intel_dp->is_mst) {
 436		drm_err(&i915->drm, "Link Training Unsuccessful\n");
 437		return -1;
 438	}
 439
 440	if (intel_dp_is_edp(intel_dp) && !intel_dp->use_max_params) {
 441		drm_dbg_kms(&i915->drm,
 442			    "Retrying Link training for eDP with max parameters\n");
 443		intel_dp->use_max_params = true;
 444		return 0;
 445	}
 446
 447	index = intel_dp_rate_index(intel_dp->common_rates,
 448				    intel_dp->num_common_rates,
 449				    link_rate);
 450	if (index > 0) {
 451		if (intel_dp_is_edp(intel_dp) &&
 452		    !intel_dp_can_link_train_fallback_for_edp(intel_dp,
 453							      intel_dp->common_rates[index - 1],
 454							      lane_count)) {
 455			drm_dbg_kms(&i915->drm,
 456				    "Retrying Link training for eDP with same parameters\n");
 457			return 0;
 458		}
 459		intel_dp->max_link_rate = intel_dp->common_rates[index - 1];
 460		intel_dp->max_link_lane_count = lane_count;
 461	} else if (lane_count > 1) {
 462		if (intel_dp_is_edp(intel_dp) &&
 463		    !intel_dp_can_link_train_fallback_for_edp(intel_dp,
 464							      intel_dp_max_common_rate(intel_dp),
 465							      lane_count >> 1)) {
 466			drm_dbg_kms(&i915->drm,
 467				    "Retrying Link training for eDP with same parameters\n");
 468			return 0;
 469		}
 470		intel_dp->max_link_rate = intel_dp_max_common_rate(intel_dp);
 471		intel_dp->max_link_lane_count = lane_count >> 1;
 472	} else {
 473		drm_err(&i915->drm, "Link Training Unsuccessful\n");
 474		return -1;
 475	}
 476
 477	return 0;
 478}
 479
 480u32 intel_dp_mode_to_fec_clock(u32 mode_clock)
 481{
 482	return div_u64(mul_u32_u32(mode_clock, 1000000U),
 483		       DP_DSC_FEC_OVERHEAD_FACTOR);
 484}
 485
 486static int
 487small_joiner_ram_size_bits(struct drm_i915_private *i915)
 488{
 489	if (DISPLAY_VER(i915) >= 11)
 490		return 7680 * 8;
 491	else
 492		return 6144 * 8;
 493}
 494
 495static u16 intel_dp_dsc_get_output_bpp(struct drm_i915_private *i915,
 496				       u32 link_clock, u32 lane_count,
 497				       u32 mode_clock, u32 mode_hdisplay,
 498				       bool bigjoiner,
 499				       u32 pipe_bpp)
 500{
 501	u32 bits_per_pixel, max_bpp_small_joiner_ram;
 502	int i;
 503
 504	/*
 505	 * Available Link Bandwidth(Kbits/sec) = (NumberOfLanes)*
 506	 * (LinkSymbolClock)* 8 * (TimeSlotsPerMTP)
 507	 * for SST -> TimeSlotsPerMTP is 1,
 508	 * for MST -> TimeSlotsPerMTP has to be calculated
 509	 */
 510	bits_per_pixel = (link_clock * lane_count * 8) /
 511			 intel_dp_mode_to_fec_clock(mode_clock);
 512	drm_dbg_kms(&i915->drm, "Max link bpp: %u\n", bits_per_pixel);
 513
 514	/* Small Joiner Check: output bpp <= joiner RAM (bits) / Horiz. width */
 515	max_bpp_small_joiner_ram = small_joiner_ram_size_bits(i915) /
 516		mode_hdisplay;
 517
 518	if (bigjoiner)
 519		max_bpp_small_joiner_ram *= 2;
 520
 521	drm_dbg_kms(&i915->drm, "Max small joiner bpp: %u\n",
 522		    max_bpp_small_joiner_ram);
 523
 524	/*
 525	 * Greatest allowed DSC BPP = MIN (output BPP from available Link BW
 526	 * check, output bpp from small joiner RAM check)
 527	 */
 528	bits_per_pixel = min(bits_per_pixel, max_bpp_small_joiner_ram);
 529
 530	if (bigjoiner) {
 531		u32 max_bpp_bigjoiner =
 532			i915->max_cdclk_freq * 48 /
 533			intel_dp_mode_to_fec_clock(mode_clock);
 534
 535		DRM_DEBUG_KMS("Max big joiner bpp: %u\n", max_bpp_bigjoiner);
 536		bits_per_pixel = min(bits_per_pixel, max_bpp_bigjoiner);
 537	}
 538
 539	/* Error out if the max bpp is less than smallest allowed valid bpp */
 540	if (bits_per_pixel < valid_dsc_bpp[0]) {
 541		drm_dbg_kms(&i915->drm, "Unsupported BPP %u, min %u\n",
 542			    bits_per_pixel, valid_dsc_bpp[0]);
 543		return 0;
 544	}
 545
 546	/* From XE_LPD onwards we support from bpc upto uncompressed bpp-1 BPPs */
 547	if (DISPLAY_VER(i915) >= 13) {
 548		bits_per_pixel = min(bits_per_pixel, pipe_bpp - 1);
 549	} else {
 550		/* Find the nearest match in the array of known BPPs from VESA */
 551		for (i = 0; i < ARRAY_SIZE(valid_dsc_bpp) - 1; i++) {
 552			if (bits_per_pixel < valid_dsc_bpp[i + 1])
 553				break;
 554		}
 555		bits_per_pixel = valid_dsc_bpp[i];
 556	}
 
 557
 558	/*
 559	 * Compressed BPP in U6.4 format so multiply by 16, for Gen 11,
 560	 * fractional part is 0
 561	 */
 562	return bits_per_pixel << 4;
 563}
 564
 565static u8 intel_dp_dsc_get_slice_count(struct intel_dp *intel_dp,
 566				       int mode_clock, int mode_hdisplay,
 567				       bool bigjoiner)
 568{
 569	struct drm_i915_private *i915 = dp_to_i915(intel_dp);
 570	u8 min_slice_count, i;
 571	int max_slice_width;
 572
 573	if (mode_clock <= DP_DSC_PEAK_PIXEL_RATE)
 574		min_slice_count = DIV_ROUND_UP(mode_clock,
 575					       DP_DSC_MAX_ENC_THROUGHPUT_0);
 576	else
 577		min_slice_count = DIV_ROUND_UP(mode_clock,
 578					       DP_DSC_MAX_ENC_THROUGHPUT_1);
 579
 580	max_slice_width = drm_dp_dsc_sink_max_slice_width(intel_dp->dsc_dpcd);
 581	if (max_slice_width < DP_DSC_MIN_SLICE_WIDTH_VALUE) {
 582		drm_dbg_kms(&i915->drm,
 583			    "Unsupported slice width %d by DP DSC Sink device\n",
 584			    max_slice_width);
 585		return 0;
 586	}
 587	/* Also take into account max slice width */
 588	min_slice_count = max_t(u8, min_slice_count,
 589				DIV_ROUND_UP(mode_hdisplay,
 590					     max_slice_width));
 591
 592	/* Find the closest match to the valid slice count values */
 593	for (i = 0; i < ARRAY_SIZE(valid_dsc_slicecount); i++) {
 594		u8 test_slice_count = valid_dsc_slicecount[i] << bigjoiner;
 595
 596		if (test_slice_count >
 597		    drm_dp_dsc_sink_max_slice_count(intel_dp->dsc_dpcd, false))
 598			break;
 
 
 
 599
 600		/* big joiner needs small joiner to be enabled */
 601		if (bigjoiner && test_slice_count < 4)
 602			continue;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 603
 604		if (min_slice_count <= test_slice_count)
 605			return test_slice_count;
 
 
 606	}
 607
 608	drm_dbg_kms(&i915->drm, "Unsupported Slice Count %d\n",
 609		    min_slice_count);
 610	return 0;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 611}
 612
 613static enum intel_output_format
 614intel_dp_output_format(struct drm_connector *connector,
 615		       const struct drm_display_mode *mode)
 616{
 617	struct intel_dp *intel_dp = intel_attached_dp(to_intel_connector(connector));
 618	const struct drm_display_info *info = &connector->display_info;
 619
 620	if (!connector->ycbcr_420_allowed ||
 621	    !drm_mode_is_420_only(info, mode))
 622		return INTEL_OUTPUT_FORMAT_RGB;
 623
 624	if (intel_dp->dfp.rgb_to_ycbcr &&
 625	    intel_dp->dfp.ycbcr_444_to_420)
 626		return INTEL_OUTPUT_FORMAT_RGB;
 627
 628	if (intel_dp->dfp.ycbcr_444_to_420)
 629		return INTEL_OUTPUT_FORMAT_YCBCR444;
 630	else
 631		return INTEL_OUTPUT_FORMAT_YCBCR420;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 632}
 633
 634int intel_dp_min_bpp(enum intel_output_format output_format)
 
 635{
 636	if (output_format == INTEL_OUTPUT_FORMAT_RGB)
 637		return 6 * 3;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 638	else
 639		return 8 * 3;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 640}
 641
 642static int intel_dp_output_bpp(enum intel_output_format output_format, int bpp)
 643{
 
 
 
 644	/*
 645	 * bpp value was assumed to RGB format. And YCbCr 4:2:0 output
 646	 * format of the number of bytes per pixel will be half the number
 647	 * of bytes of RGB pixel.
 648	 */
 649	if (output_format == INTEL_OUTPUT_FORMAT_YCBCR420)
 650		bpp /= 2;
 651
 652	return bpp;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 653}
 654
 655static int
 656intel_dp_mode_min_output_bpp(struct drm_connector *connector,
 657			     const struct drm_display_mode *mode)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 658{
 659	enum intel_output_format output_format =
 660		intel_dp_output_format(connector, mode);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 661
 662	return intel_dp_output_bpp(output_format, intel_dp_min_bpp(output_format));
 
 
 
 
 
 
 
 
 
 
 
 663}
 664
 665static bool intel_dp_hdisplay_bad(struct drm_i915_private *dev_priv,
 666				  int hdisplay)
 667{
 
 
 
 
 
 
 668	/*
 669	 * Older platforms don't like hdisplay==4096 with DP.
 670	 *
 671	 * On ILK/SNB/IVB the pipe seems to be somewhat running (scanline
 672	 * and frame counter increment), but we don't get vblank interrupts,
 673	 * and the pipe underruns immediately. The link also doesn't seem
 674	 * to get trained properly.
 675	 *
 676	 * On CHV the vblank interrupts don't seem to disappear but
 677	 * otherwise the symptoms are similar.
 678	 *
 679	 * TODO: confirm the behaviour on HSW+
 680	 */
 681	return hdisplay == 4096 && !HAS_DDI(dev_priv);
 
 
 
 
 
 
 
 
 
 
 
 
 
 682}
 683
 684static enum drm_mode_status
 685intel_dp_mode_valid_downstream(struct intel_connector *connector,
 686			       const struct drm_display_mode *mode,
 687			       int target_clock)
 
 
 
 
 
 
 688{
 689	struct intel_dp *intel_dp = intel_attached_dp(connector);
 690	const struct drm_display_info *info = &connector->base.display_info;
 691	int tmds_clock;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 692
 693	/* If PCON supports FRL MODE, check FRL bandwidth constraints */
 694	if (intel_dp->dfp.pcon_max_frl_bw) {
 695		int target_bw;
 696		int max_frl_bw;
 697		int bpp = intel_dp_mode_min_output_bpp(&connector->base, mode);
 698
 699		target_bw = bpp * target_clock;
 
 
 
 700
 701		max_frl_bw = intel_dp->dfp.pcon_max_frl_bw;
 702
 703		/* converting bw from Gbps to Kbps*/
 704		max_frl_bw = max_frl_bw * 1000000;
 
 
 
 
 
 
 
 
 
 
 705
 706		if (target_bw > max_frl_bw)
 707			return MODE_CLOCK_HIGH;
 708
 709		return MODE_OK;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 710	}
 711
 712	if (intel_dp->dfp.max_dotclock &&
 713	    target_clock > intel_dp->dfp.max_dotclock)
 714		return MODE_CLOCK_HIGH;
 715
 716	/* Assume 8bpc for the DP++/HDMI/DVI TMDS clock check */
 717	tmds_clock = target_clock;
 718	if (drm_mode_is_420_only(info, mode))
 719		tmds_clock /= 2;
 720
 721	if (intel_dp->dfp.min_tmds_clock &&
 722	    tmds_clock < intel_dp->dfp.min_tmds_clock)
 723		return MODE_CLOCK_LOW;
 724	if (intel_dp->dfp.max_tmds_clock &&
 725	    tmds_clock > intel_dp->dfp.max_tmds_clock)
 726		return MODE_CLOCK_HIGH;
 727
 728	return MODE_OK;
 
 
 
 
 729}
 730
 731static enum drm_mode_status
 732intel_dp_mode_valid(struct drm_connector *connector,
 733		    struct drm_display_mode *mode)
 734{
 735	struct intel_dp *intel_dp = intel_attached_dp(to_intel_connector(connector));
 736	struct intel_connector *intel_connector = to_intel_connector(connector);
 737	struct drm_display_mode *fixed_mode = intel_connector->panel.fixed_mode;
 738	struct drm_i915_private *dev_priv = to_i915(connector->dev);
 739	int target_clock = mode->clock;
 740	int max_rate, mode_rate, max_lanes, max_link_clock;
 741	int max_dotclk = dev_priv->max_dotclk_freq;
 742	u16 dsc_max_output_bpp = 0;
 743	u8 dsc_slice_count = 0;
 744	enum drm_mode_status status;
 745	bool dsc = false, bigjoiner = false;
 746
 747	if (mode->flags & DRM_MODE_FLAG_DBLSCAN)
 748		return MODE_NO_DBLESCAN;
 749
 750	if (mode->flags & DRM_MODE_FLAG_DBLCLK)
 751		return MODE_H_ILLEGAL;
 
 752
 753	if (intel_dp_is_edp(intel_dp) && fixed_mode) {
 754		if (mode->hdisplay != fixed_mode->hdisplay)
 755			return MODE_PANEL;
 756
 757		if (mode->vdisplay != fixed_mode->vdisplay)
 758			return MODE_PANEL;
 
 
 759
 760		target_clock = fixed_mode->clock;
 
 
 
 
 
 
 
 761	}
 
 762
 763	if (mode->clock < 10000)
 764		return MODE_CLOCK_LOW;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 765
 766	if ((target_clock > max_dotclk || mode->hdisplay > 5120) &&
 767	    intel_dp_can_bigjoiner(intel_dp)) {
 768		bigjoiner = true;
 769		max_dotclk *= 2;
 
 
 
 
 
 
 
 
 770	}
 771	if (target_clock > max_dotclk)
 772		return MODE_CLOCK_HIGH;
 773
 774	max_link_clock = intel_dp_max_link_rate(intel_dp);
 775	max_lanes = intel_dp_max_lane_count(intel_dp);
 776
 777	max_rate = intel_dp_max_data_rate(max_link_clock, max_lanes);
 778	mode_rate = intel_dp_link_required(target_clock,
 779					   intel_dp_mode_min_output_bpp(connector, mode));
 
 
 
 
 
 
 780
 781	if (intel_dp_hdisplay_bad(dev_priv, mode->hdisplay))
 782		return MODE_H_ILLEGAL;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 783
 784	/*
 785	 * Output bpp is stored in 6.4 format so right shift by 4 to get the
 786	 * integer value since we support only integer values of bpp.
 
 
 787	 */
 788	if (DISPLAY_VER(dev_priv) >= 10 &&
 789	    drm_dp_sink_supports_dsc(intel_dp->dsc_dpcd)) {
 790		/*
 791		 * TBD pass the connector BPC,
 792		 * for now U8_MAX so that max BPC on that platform would be picked
 793		 */
 794		int pipe_bpp = intel_dp_dsc_compute_bpp(intel_dp, U8_MAX);
 795
 796		if (intel_dp_is_edp(intel_dp)) {
 797			dsc_max_output_bpp =
 798				drm_edp_dsc_sink_output_bpp(intel_dp->dsc_dpcd) >> 4;
 799			dsc_slice_count =
 800				drm_dp_dsc_sink_max_slice_count(intel_dp->dsc_dpcd,
 801								true);
 802		} else if (drm_dp_sink_supports_fec(intel_dp->fec_capable)) {
 803			dsc_max_output_bpp =
 804				intel_dp_dsc_get_output_bpp(dev_priv,
 805							    max_link_clock,
 806							    max_lanes,
 807							    target_clock,
 808							    mode->hdisplay,
 809							    bigjoiner,
 810							    pipe_bpp) >> 4;
 811			dsc_slice_count =
 812				intel_dp_dsc_get_slice_count(intel_dp,
 813							     target_clock,
 814							     mode->hdisplay,
 815							     bigjoiner);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 816		}
 
 
 
 
 
 
 
 817
 818		dsc = dsc_max_output_bpp && dsc_slice_count;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 819	}
 820
 
 
 
 
 821	/*
 822	 * Big joiner configuration needs DSC for TGL which is not true for
 823	 * XE_LPD where uncompressed joiner is supported.
 824	 */
 825	if (DISPLAY_VER(dev_priv) < 13 && bigjoiner && !dsc)
 826		return MODE_CLOCK_HIGH;
 
 
 
 
 
 827
 828	if (mode_rate > max_rate && !dsc)
 829		return MODE_CLOCK_HIGH;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 830
 831	status = intel_dp_mode_valid_downstream(intel_connector,
 832						mode, target_clock);
 833	if (status != MODE_OK)
 834		return status;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 835
 836	return intel_mode_valid_max_plane_size(dev_priv, mode, bigjoiner);
 
 
 
 837}
 838
 839bool intel_dp_source_supports_hbr2(struct intel_dp *intel_dp)
 840{
 841	int max_rate = intel_dp->source_rates[intel_dp->num_source_rates - 1];
 842
 843	return max_rate >= 540000;
 844}
 845
 846bool intel_dp_source_supports_hbr3(struct intel_dp *intel_dp)
 847{
 848	int max_rate = intel_dp->source_rates[intel_dp->num_source_rates - 1];
 849
 850	return max_rate >= 810000;
 851}
 852
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 853static void snprintf_int_array(char *str, size_t len,
 854			       const int *array, int nelem)
 855{
 856	int i;
 857
 858	str[0] = '\0';
 859
 860	for (i = 0; i < nelem; i++) {
 861		int r = snprintf(str, len, "%s%d", i ? ", " : "", array[i]);
 862		if (r >= len)
 863			return;
 864		str += r;
 865		len -= r;
 866	}
 867}
 868
 869static void intel_dp_print_rates(struct intel_dp *intel_dp)
 870{
 871	struct drm_i915_private *i915 = dp_to_i915(intel_dp);
 872	char str[128]; /* FIXME: too big for stack? */
 873
 874	if (!drm_debug_enabled(DRM_UT_KMS))
 875		return;
 876
 877	snprintf_int_array(str, sizeof(str),
 878			   intel_dp->source_rates, intel_dp->num_source_rates);
 879	drm_dbg_kms(&i915->drm, "source rates: %s\n", str);
 880
 881	snprintf_int_array(str, sizeof(str),
 882			   intel_dp->sink_rates, intel_dp->num_sink_rates);
 883	drm_dbg_kms(&i915->drm, "sink rates: %s\n", str);
 884
 885	snprintf_int_array(str, sizeof(str),
 886			   intel_dp->common_rates, intel_dp->num_common_rates);
 887	drm_dbg_kms(&i915->drm, "common rates: %s\n", str);
 888}
 889
 890int
 891intel_dp_max_link_rate(struct intel_dp *intel_dp)
 892{
 893	struct drm_i915_private *i915 = dp_to_i915(intel_dp);
 894	int len;
 895
 896	len = intel_dp_common_len_rate_limit(intel_dp, intel_dp->max_link_rate);
 897	if (drm_WARN_ON(&i915->drm, len <= 0))
 898		return 162000;
 899
 900	return intel_dp->common_rates[len - 1];
 901}
 902
 903int intel_dp_rate_select(struct intel_dp *intel_dp, int rate)
 904{
 905	struct drm_i915_private *i915 = dp_to_i915(intel_dp);
 906	int i = intel_dp_rate_index(intel_dp->sink_rates,
 907				    intel_dp->num_sink_rates, rate);
 908
 909	if (drm_WARN_ON(&i915->drm, i < 0))
 910		i = 0;
 911
 912	return i;
 913}
 914
 915void intel_dp_compute_rate(struct intel_dp *intel_dp, int port_clock,
 916			   u8 *link_bw, u8 *rate_select)
 917{
 918	/* eDP 1.4 rate select method. */
 919	if (intel_dp->use_rate_select) {
 920		*link_bw = 0;
 921		*rate_select =
 922			intel_dp_rate_select(intel_dp, port_clock);
 923	} else {
 924		*link_bw = drm_dp_link_rate_to_bw_code(port_clock);
 925		*rate_select = 0;
 926	}
 927}
 928
 929static bool intel_dp_source_supports_fec(struct intel_dp *intel_dp,
 930					 const struct intel_crtc_state *pipe_config)
 931{
 932	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
 933
 934	/* On TGL, FEC is supported on all Pipes */
 935	if (DISPLAY_VER(dev_priv) >= 12)
 936		return true;
 937
 938	if (DISPLAY_VER(dev_priv) == 11 && pipe_config->cpu_transcoder != TRANSCODER_A)
 939		return true;
 940
 941	return false;
 942}
 943
 944static bool intel_dp_supports_fec(struct intel_dp *intel_dp,
 945				  const struct intel_crtc_state *pipe_config)
 946{
 947	return intel_dp_source_supports_fec(intel_dp, pipe_config) &&
 948		drm_dp_sink_supports_fec(intel_dp->fec_capable);
 949}
 950
 951static bool intel_dp_supports_dsc(struct intel_dp *intel_dp,
 952				  const struct intel_crtc_state *crtc_state)
 953{
 954	if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DP) && !crtc_state->fec_enable)
 955		return false;
 956
 957	return intel_dsc_source_support(crtc_state) &&
 958		drm_dp_sink_supports_dsc(intel_dp->dsc_dpcd);
 959}
 960
 961static bool intel_dp_hdmi_ycbcr420(struct intel_dp *intel_dp,
 962				   const struct intel_crtc_state *crtc_state)
 963{
 964	return crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR420 ||
 965		(crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR444 &&
 966		 intel_dp->dfp.ycbcr_444_to_420);
 967}
 968
 969static int intel_dp_hdmi_tmds_clock(struct intel_dp *intel_dp,
 970				    const struct intel_crtc_state *crtc_state, int bpc)
 971{
 972	int clock = crtc_state->hw.adjusted_mode.crtc_clock * bpc / 8;
 973
 974	if (intel_dp_hdmi_ycbcr420(intel_dp, crtc_state))
 975		clock /= 2;
 976
 977	return clock;
 
 978}
 979
 980static bool intel_dp_hdmi_tmds_clock_valid(struct intel_dp *intel_dp,
 981					   const struct intel_crtc_state *crtc_state, int bpc)
 982{
 983	int tmds_clock = intel_dp_hdmi_tmds_clock(intel_dp, crtc_state, bpc);
 984
 985	if (intel_dp->dfp.min_tmds_clock &&
 986	    tmds_clock < intel_dp->dfp.min_tmds_clock)
 987		return false;
 988
 989	if (intel_dp->dfp.max_tmds_clock &&
 990	    tmds_clock > intel_dp->dfp.max_tmds_clock)
 991		return false;
 992
 993	return true;
 994}
 995
 996static bool intel_dp_hdmi_deep_color_possible(struct intel_dp *intel_dp,
 997					      const struct intel_crtc_state *crtc_state,
 998					      int bpc)
 999{
1000
1001	return intel_hdmi_deep_color_possible(crtc_state, bpc,
1002					      intel_dp->has_hdmi_sink,
1003					      intel_dp_hdmi_ycbcr420(intel_dp, crtc_state)) &&
1004		intel_dp_hdmi_tmds_clock_valid(intel_dp, crtc_state, bpc);
1005}
1006
1007static int intel_dp_max_bpp(struct intel_dp *intel_dp,
1008			    const struct intel_crtc_state *crtc_state)
1009{
1010	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1011	struct intel_connector *intel_connector = intel_dp->attached_connector;
1012	int bpp, bpc;
1013
1014	bpc = crtc_state->pipe_bpp / 3;
1015
1016	if (intel_dp->dfp.max_bpc)
1017		bpc = min_t(int, bpc, intel_dp->dfp.max_bpc);
1018
1019	if (intel_dp->dfp.min_tmds_clock) {
1020		for (; bpc >= 10; bpc -= 2) {
1021			if (intel_dp_hdmi_deep_color_possible(intel_dp, crtc_state, bpc))
1022				break;
1023		}
1024	}
1025
1026	bpp = bpc * 3;
1027	if (intel_dp_is_edp(intel_dp)) {
1028		/* Get bpp from vbt only for panels that dont have bpp in edid */
1029		if (intel_connector->base.display_info.bpc == 0 &&
1030		    dev_priv->vbt.edp.bpp && dev_priv->vbt.edp.bpp < bpp) {
1031			drm_dbg_kms(&dev_priv->drm,
1032				    "clamping bpp for eDP panel to BIOS-provided %i\n",
1033				    dev_priv->vbt.edp.bpp);
1034			bpp = dev_priv->vbt.edp.bpp;
1035		}
1036	}
1037
1038	return bpp;
1039}
1040
1041/* Adjust link config limits based on compliance test requests. */
1042void
1043intel_dp_adjust_compliance_config(struct intel_dp *intel_dp,
1044				  struct intel_crtc_state *pipe_config,
1045				  struct link_config_limits *limits)
1046{
1047	struct drm_i915_private *i915 = dp_to_i915(intel_dp);
1048
1049	/* For DP Compliance we override the computed bpp for the pipe */
1050	if (intel_dp->compliance.test_data.bpc != 0) {
1051		int bpp = 3 * intel_dp->compliance.test_data.bpc;
1052
1053		limits->min_bpp = limits->max_bpp = bpp;
1054		pipe_config->dither_force_disable = bpp == 6 * 3;
1055
1056		drm_dbg_kms(&i915->drm, "Setting pipe_bpp to %d\n", bpp);
1057	}
1058
1059	/* Use values requested by Compliance Test Request */
1060	if (intel_dp->compliance.test_type == DP_TEST_LINK_TRAINING) {
1061		int index;
1062
1063		/* Validate the compliance test data since max values
1064		 * might have changed due to link train fallback.
1065		 */
1066		if (intel_dp_link_params_valid(intel_dp, intel_dp->compliance.test_link_rate,
1067					       intel_dp->compliance.test_lane_count)) {
1068			index = intel_dp_rate_index(intel_dp->common_rates,
1069						    intel_dp->num_common_rates,
1070						    intel_dp->compliance.test_link_rate);
1071			if (index >= 0)
1072				limits->min_clock = limits->max_clock = index;
1073			limits->min_lane_count = limits->max_lane_count =
1074				intel_dp->compliance.test_lane_count;
1075		}
1076	}
1077}
1078
 
 
 
 
 
 
 
 
 
 
 
 
 
1079/* Optimize link config in order: max bpp, min clock, min lanes */
1080static int
1081intel_dp_compute_link_config_wide(struct intel_dp *intel_dp,
1082				  struct intel_crtc_state *pipe_config,
1083				  const struct link_config_limits *limits)
1084{
1085	struct drm_display_mode *adjusted_mode = &pipe_config->hw.adjusted_mode;
1086	int bpp, clock, lane_count;
1087	int mode_rate, link_clock, link_avail;
1088
1089	for (bpp = limits->max_bpp; bpp >= limits->min_bpp; bpp -= 2 * 3) {
1090		int output_bpp = intel_dp_output_bpp(pipe_config->output_format, bpp);
1091
1092		mode_rate = intel_dp_link_required(adjusted_mode->crtc_clock,
1093						   output_bpp);
1094
1095		for (clock = limits->min_clock; clock <= limits->max_clock; clock++) {
1096			for (lane_count = limits->min_lane_count;
1097			     lane_count <= limits->max_lane_count;
1098			     lane_count <<= 1) {
1099				link_clock = intel_dp->common_rates[clock];
1100				link_avail = intel_dp_max_data_rate(link_clock,
1101								    lane_count);
1102
1103				if (mode_rate <= link_avail) {
1104					pipe_config->lane_count = lane_count;
1105					pipe_config->pipe_bpp = bpp;
1106					pipe_config->port_clock = link_clock;
1107
1108					return 0;
1109				}
1110			}
1111		}
1112	}
1113
1114	return -EINVAL;
1115}
1116
1117static int intel_dp_dsc_compute_bpp(struct intel_dp *intel_dp, u8 max_req_bpc)
1118{
1119	struct drm_i915_private *i915 = dp_to_i915(intel_dp);
1120	int i, num_bpc;
1121	u8 dsc_bpc[3] = {0};
1122	u8 dsc_max_bpc;
1123
1124	/* Max DSC Input BPC for ICL is 10 and for TGL+ is 12 */
1125	if (DISPLAY_VER(i915) >= 12)
1126		dsc_max_bpc = min_t(u8, 12, max_req_bpc);
1127	else
1128		dsc_max_bpc = min_t(u8, 10, max_req_bpc);
1129
1130	num_bpc = drm_dp_dsc_sink_supported_input_bpcs(intel_dp->dsc_dpcd,
1131						       dsc_bpc);
1132	for (i = 0; i < num_bpc; i++) {
1133		if (dsc_max_bpc >= dsc_bpc[i])
1134			return dsc_bpc[i] * 3;
1135	}
1136
1137	return 0;
1138}
1139
1140#define DSC_SUPPORTED_VERSION_MIN		1
1141
1142static int intel_dp_dsc_compute_params(struct intel_encoder *encoder,
1143				       struct intel_crtc_state *crtc_state)
1144{
1145	struct drm_i915_private *i915 = to_i915(encoder->base.dev);
1146	struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
1147	struct drm_dsc_config *vdsc_cfg = &crtc_state->dsc.config;
1148	u8 line_buf_depth;
1149	int ret;
1150
1151	/*
1152	 * RC_MODEL_SIZE is currently a constant across all configurations.
1153	 *
1154	 * FIXME: Look into using sink defined DPCD DP_DSC_RC_BUF_BLK_SIZE and
1155	 * DP_DSC_RC_BUF_SIZE for this.
1156	 */
1157	vdsc_cfg->rc_model_size = DSC_RC_MODEL_SIZE_CONST;
1158
1159	/*
1160	 * Slice Height of 8 works for all currently available panels. So start
1161	 * with that if pic_height is an integral multiple of 8. Eventually add
1162	 * logic to try multiple slice heights.
1163	 */
1164	if (vdsc_cfg->pic_height % 8 == 0)
1165		vdsc_cfg->slice_height = 8;
1166	else if (vdsc_cfg->pic_height % 4 == 0)
1167		vdsc_cfg->slice_height = 4;
1168	else
1169		vdsc_cfg->slice_height = 2;
1170
1171	ret = intel_dsc_compute_params(encoder, crtc_state);
1172	if (ret)
1173		return ret;
1174
1175	vdsc_cfg->dsc_version_major =
1176		(intel_dp->dsc_dpcd[DP_DSC_REV - DP_DSC_SUPPORT] &
1177		 DP_DSC_MAJOR_MASK) >> DP_DSC_MAJOR_SHIFT;
1178	vdsc_cfg->dsc_version_minor =
1179		min(DSC_SUPPORTED_VERSION_MIN,
1180		    (intel_dp->dsc_dpcd[DP_DSC_REV - DP_DSC_SUPPORT] &
1181		     DP_DSC_MINOR_MASK) >> DP_DSC_MINOR_SHIFT);
1182
1183	vdsc_cfg->convert_rgb = intel_dp->dsc_dpcd[DP_DSC_DEC_COLOR_FORMAT_CAP - DP_DSC_SUPPORT] &
1184		DP_DSC_RGB;
1185
1186	line_buf_depth = drm_dp_dsc_sink_line_buf_depth(intel_dp->dsc_dpcd);
1187	if (!line_buf_depth) {
1188		drm_dbg_kms(&i915->drm,
1189			    "DSC Sink Line Buffer Depth invalid\n");
1190		return -EINVAL;
1191	}
1192
1193	if (vdsc_cfg->dsc_version_minor == 2)
1194		vdsc_cfg->line_buf_depth = (line_buf_depth == DSC_1_2_MAX_LINEBUF_DEPTH_BITS) ?
1195			DSC_1_2_MAX_LINEBUF_DEPTH_VAL : line_buf_depth;
1196	else
1197		vdsc_cfg->line_buf_depth = (line_buf_depth > DSC_1_1_MAX_LINEBUF_DEPTH_BITS) ?
1198			DSC_1_1_MAX_LINEBUF_DEPTH_BITS : line_buf_depth;
1199
1200	vdsc_cfg->block_pred_enable =
1201		intel_dp->dsc_dpcd[DP_DSC_BLK_PREDICTION_SUPPORT - DP_DSC_SUPPORT] &
1202		DP_DSC_BLK_PREDICTION_IS_SUPPORTED;
1203
1204	return drm_dsc_compute_rc_parameters(vdsc_cfg);
1205}
1206
1207static int intel_dp_dsc_compute_config(struct intel_dp *intel_dp,
1208				       struct intel_crtc_state *pipe_config,
1209				       struct drm_connector_state *conn_state,
1210				       struct link_config_limits *limits)
1211{
1212	struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
1213	struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev);
1214	const struct drm_display_mode *adjusted_mode =
1215		&pipe_config->hw.adjusted_mode;
1216	int pipe_bpp;
1217	int ret;
1218
1219	pipe_config->fec_enable = !intel_dp_is_edp(intel_dp) &&
1220		intel_dp_supports_fec(intel_dp, pipe_config);
1221
1222	if (!intel_dp_supports_dsc(intel_dp, pipe_config))
1223		return -EINVAL;
1224
1225	pipe_bpp = intel_dp_dsc_compute_bpp(intel_dp, conn_state->max_requested_bpc);
 
1226
1227	/* Min Input BPC for ICL+ is 8 */
1228	if (pipe_bpp < 8 * 3) {
1229		drm_dbg_kms(&dev_priv->drm,
1230			    "No DSC support for less than 8bpc\n");
1231		return -EINVAL;
1232	}
1233
1234	/*
1235	 * For now enable DSC for max bpp, max link rate, max lane count.
1236	 * Optimize this later for the minimum possible link rate/lane count
1237	 * with DSC enabled for the requested mode.
1238	 */
1239	pipe_config->pipe_bpp = pipe_bpp;
1240	pipe_config->port_clock = intel_dp->common_rates[limits->max_clock];
1241	pipe_config->lane_count = limits->max_lane_count;
1242
1243	if (intel_dp_is_edp(intel_dp)) {
1244		pipe_config->dsc.compressed_bpp =
1245			min_t(u16, drm_edp_dsc_sink_output_bpp(intel_dp->dsc_dpcd) >> 4,
1246			      pipe_config->pipe_bpp);
1247		pipe_config->dsc.slice_count =
1248			drm_dp_dsc_sink_max_slice_count(intel_dp->dsc_dpcd,
1249							true);
1250	} else {
1251		u16 dsc_max_output_bpp;
1252		u8 dsc_dp_slice_count;
1253
1254		dsc_max_output_bpp =
1255			intel_dp_dsc_get_output_bpp(dev_priv,
1256						    pipe_config->port_clock,
1257						    pipe_config->lane_count,
1258						    adjusted_mode->crtc_clock,
1259						    adjusted_mode->crtc_hdisplay,
1260						    pipe_config->bigjoiner,
1261						    pipe_bpp);
1262		dsc_dp_slice_count =
1263			intel_dp_dsc_get_slice_count(intel_dp,
1264						     adjusted_mode->crtc_clock,
1265						     adjusted_mode->crtc_hdisplay,
1266						     pipe_config->bigjoiner);
1267		if (!dsc_max_output_bpp || !dsc_dp_slice_count) {
1268			drm_dbg_kms(&dev_priv->drm,
1269				    "Compressed BPP/Slice Count not supported\n");
1270			return -EINVAL;
1271		}
1272		pipe_config->dsc.compressed_bpp = min_t(u16,
1273							       dsc_max_output_bpp >> 4,
1274							       pipe_config->pipe_bpp);
1275		pipe_config->dsc.slice_count = dsc_dp_slice_count;
1276	}
1277	/*
1278	 * VDSC engine operates at 1 Pixel per clock, so if peak pixel rate
1279	 * is greater than the maximum Cdclock and if slice count is even
1280	 * then we need to use 2 VDSC instances.
1281	 */
1282	if (adjusted_mode->crtc_clock > dev_priv->max_cdclk_freq ||
1283	    pipe_config->bigjoiner) {
1284		if (pipe_config->dsc.slice_count < 2) {
1285			drm_dbg_kms(&dev_priv->drm,
1286				    "Cannot split stream to use 2 VDSC instances\n");
1287			return -EINVAL;
1288		}
1289
1290		pipe_config->dsc.dsc_split = true;
1291	}
1292
1293	ret = intel_dp_dsc_compute_params(&dig_port->base, pipe_config);
1294	if (ret < 0) {
1295		drm_dbg_kms(&dev_priv->drm,
1296			    "Cannot compute valid DSC parameters for Input Bpp = %d "
1297			    "Compressed BPP = %d\n",
1298			    pipe_config->pipe_bpp,
1299			    pipe_config->dsc.compressed_bpp);
1300		return ret;
1301	}
1302
1303	pipe_config->dsc.compression_enable = true;
1304	drm_dbg_kms(&dev_priv->drm, "DP DSC computed with Input Bpp = %d "
1305		    "Compressed Bpp = %d Slice Count = %d\n",
1306		    pipe_config->pipe_bpp,
1307		    pipe_config->dsc.compressed_bpp,
1308		    pipe_config->dsc.slice_count);
1309
1310	return 0;
1311}
1312
 
 
 
 
 
 
 
 
1313static int
1314intel_dp_compute_link_config(struct intel_encoder *encoder,
1315			     struct intel_crtc_state *pipe_config,
1316			     struct drm_connector_state *conn_state)
1317{
1318	struct drm_i915_private *i915 = to_i915(encoder->base.dev);
1319	const struct drm_display_mode *adjusted_mode =
1320		&pipe_config->hw.adjusted_mode;
1321	struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
1322	struct link_config_limits limits;
1323	int common_len;
1324	int ret;
1325
1326	common_len = intel_dp_common_len_rate_limit(intel_dp,
1327						    intel_dp->max_link_rate);
1328
1329	/* No common link rates between source and sink */
1330	drm_WARN_ON(encoder->base.dev, common_len <= 0);
1331
1332	limits.min_clock = 0;
1333	limits.max_clock = common_len - 1;
1334
1335	limits.min_lane_count = 1;
1336	limits.max_lane_count = intel_dp_max_lane_count(intel_dp);
1337
1338	limits.min_bpp = intel_dp_min_bpp(pipe_config->output_format);
1339	limits.max_bpp = intel_dp_max_bpp(intel_dp, pipe_config);
1340
1341	if (intel_dp->use_max_params) {
1342		/*
1343		 * Use the maximum clock and number of lanes the eDP panel
1344		 * advertizes being capable of in case the initial fast
1345		 * optimal params failed us. The panels are generally
1346		 * designed to support only a single clock and lane
1347		 * configuration, and typically on older panels these
1348		 * values correspond to the native resolution of the panel.
1349		 */
1350		limits.min_lane_count = limits.max_lane_count;
1351		limits.min_clock = limits.max_clock;
1352	}
1353
1354	intel_dp_adjust_compliance_config(intel_dp, pipe_config, &limits);
1355
1356	drm_dbg_kms(&i915->drm, "DP link computation with max lane count %i "
1357		    "max rate %d max bpp %d pixel clock %iKHz\n",
1358		    limits.max_lane_count,
1359		    intel_dp->common_rates[limits.max_clock],
1360		    limits.max_bpp, adjusted_mode->crtc_clock);
1361
1362	if ((adjusted_mode->crtc_clock > i915->max_dotclk_freq ||
1363	     adjusted_mode->crtc_hdisplay > 5120) &&
1364	    intel_dp_can_bigjoiner(intel_dp))
1365		pipe_config->bigjoiner = true;
1366
1367	/*
1368	 * Optimize for slow and wide for everything, because there are some
1369	 * eDP 1.3 and 1.4 panels don't work well with fast and narrow.
1370	 */
1371	ret = intel_dp_compute_link_config_wide(intel_dp, pipe_config, &limits);
1372
1373	/*
1374	 * Pipe joiner needs compression upto display12 due to BW limitation. DG2
1375	 * onwards pipe joiner can be enabled without compression.
1376	 */
1377	drm_dbg_kms(&i915->drm, "Force DSC en = %d\n", intel_dp->force_dsc_en);
1378	if (ret || intel_dp->force_dsc_en || (DISPLAY_VER(i915) < 13 &&
1379					      pipe_config->bigjoiner)) {
1380		ret = intel_dp_dsc_compute_config(intel_dp, pipe_config,
1381						  conn_state, &limits);
1382		if (ret < 0)
1383			return ret;
1384	}
1385
1386	if (pipe_config->dsc.compression_enable) {
1387		drm_dbg_kms(&i915->drm,
1388			    "DP lane count %d clock %d Input bpp %d Compressed bpp %d\n",
1389			    pipe_config->lane_count, pipe_config->port_clock,
1390			    pipe_config->pipe_bpp,
1391			    pipe_config->dsc.compressed_bpp);
1392
1393		drm_dbg_kms(&i915->drm,
1394			    "DP link rate required %i available %i\n",
1395			    intel_dp_link_required(adjusted_mode->crtc_clock,
1396						   pipe_config->dsc.compressed_bpp),
1397			    intel_dp_max_data_rate(pipe_config->port_clock,
1398						   pipe_config->lane_count));
1399	} else {
1400		drm_dbg_kms(&i915->drm, "DP lane count %d clock %d bpp %d\n",
1401			    pipe_config->lane_count, pipe_config->port_clock,
1402			    pipe_config->pipe_bpp);
1403
1404		drm_dbg_kms(&i915->drm,
1405			    "DP link rate required %i available %i\n",
1406			    intel_dp_link_required(adjusted_mode->crtc_clock,
1407						   pipe_config->pipe_bpp),
1408			    intel_dp_max_data_rate(pipe_config->port_clock,
1409						   pipe_config->lane_count));
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1410	}
 
 
 
1411	return 0;
1412}
1413
1414bool intel_dp_limited_color_range(const struct intel_crtc_state *crtc_state,
1415				  const struct drm_connector_state *conn_state)
1416{
1417	const struct intel_digital_connector_state *intel_conn_state =
1418		to_intel_digital_connector_state(conn_state);
1419	const struct drm_display_mode *adjusted_mode =
1420		&crtc_state->hw.adjusted_mode;
1421
1422	/*
1423	 * Our YCbCr output is always limited range.
1424	 * crtc_state->limited_color_range only applies to RGB,
1425	 * and it must never be set for YCbCr or we risk setting
1426	 * some conflicting bits in PIPECONF which will mess up
1427	 * the colors on the monitor.
1428	 */
1429	if (crtc_state->output_format != INTEL_OUTPUT_FORMAT_RGB)
1430		return false;
1431
1432	if (intel_conn_state->broadcast_rgb == INTEL_BROADCAST_RGB_AUTO) {
1433		/*
1434		 * See:
1435		 * CEA-861-E - 5.1 Default Encoding Parameters
1436		 * VESA DisplayPort Ver.1.2a - 5.1.1.1 Video Colorimetry
1437		 */
1438		return crtc_state->pipe_bpp != 18 &&
1439			drm_default_rgb_quant_range(adjusted_mode) ==
1440			HDMI_QUANTIZATION_RANGE_LIMITED;
1441	} else {
1442		return intel_conn_state->broadcast_rgb ==
1443			INTEL_BROADCAST_RGB_LIMITED;
1444	}
1445}
1446
1447static bool intel_dp_port_has_audio(struct drm_i915_private *dev_priv,
1448				    enum port port)
 
 
1449{
1450	if (IS_G4X(dev_priv))
1451		return false;
1452	if (DISPLAY_VER(dev_priv) < 12 && port == PORT_A)
1453		return false;
 
 
 
 
 
 
 
 
1454
1455	return true;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1456}
1457
1458static void intel_dp_compute_vsc_colorimetry(const struct intel_crtc_state *crtc_state,
1459					     const struct drm_connector_state *conn_state,
1460					     struct drm_dp_vsc_sdp *vsc)
1461{
1462	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
1463	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1464
1465	/*
1466	 * Prepare VSC Header for SU as per DP 1.4 spec, Table 2-118
1467	 * VSC SDP supporting 3D stereo, PSR2, and Pixel Encoding/
1468	 * Colorimetry Format indication.
 
 
 
 
 
 
 
 
 
 
 
1469	 */
1470	vsc->revision = 0x5;
1471	vsc->length = 0x13;
1472
1473	/* DP 1.4a spec, Table 2-120 */
1474	switch (crtc_state->output_format) {
1475	case INTEL_OUTPUT_FORMAT_YCBCR444:
1476		vsc->pixelformat = DP_PIXELFORMAT_YUV444;
1477		break;
1478	case INTEL_OUTPUT_FORMAT_YCBCR420:
1479		vsc->pixelformat = DP_PIXELFORMAT_YUV420;
1480		break;
1481	case INTEL_OUTPUT_FORMAT_RGB:
1482	default:
1483		vsc->pixelformat = DP_PIXELFORMAT_RGB;
1484	}
1485
1486	switch (conn_state->colorspace) {
1487	case DRM_MODE_COLORIMETRY_BT709_YCC:
1488		vsc->colorimetry = DP_COLORIMETRY_BT709_YCC;
1489		break;
1490	case DRM_MODE_COLORIMETRY_XVYCC_601:
1491		vsc->colorimetry = DP_COLORIMETRY_XVYCC_601;
1492		break;
1493	case DRM_MODE_COLORIMETRY_XVYCC_709:
1494		vsc->colorimetry = DP_COLORIMETRY_XVYCC_709;
1495		break;
1496	case DRM_MODE_COLORIMETRY_SYCC_601:
1497		vsc->colorimetry = DP_COLORIMETRY_SYCC_601;
1498		break;
1499	case DRM_MODE_COLORIMETRY_OPYCC_601:
1500		vsc->colorimetry = DP_COLORIMETRY_OPYCC_601;
1501		break;
1502	case DRM_MODE_COLORIMETRY_BT2020_CYCC:
1503		vsc->colorimetry = DP_COLORIMETRY_BT2020_CYCC;
1504		break;
1505	case DRM_MODE_COLORIMETRY_BT2020_RGB:
1506		vsc->colorimetry = DP_COLORIMETRY_BT2020_RGB;
1507		break;
1508	case DRM_MODE_COLORIMETRY_BT2020_YCC:
1509		vsc->colorimetry = DP_COLORIMETRY_BT2020_YCC;
1510		break;
1511	case DRM_MODE_COLORIMETRY_DCI_P3_RGB_D65:
1512	case DRM_MODE_COLORIMETRY_DCI_P3_RGB_THEATER:
1513		vsc->colorimetry = DP_COLORIMETRY_DCI_P3_RGB;
1514		break;
1515	default:
1516		/*
1517		 * RGB->YCBCR color conversion uses the BT.709
1518		 * color space.
1519		 */
1520		if (crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR420)
1521			vsc->colorimetry = DP_COLORIMETRY_BT709_YCC;
1522		else
1523			vsc->colorimetry = DP_COLORIMETRY_DEFAULT;
1524		break;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1525	}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1526
1527	vsc->bpc = crtc_state->pipe_bpp / 3;
 
 
 
 
1528
1529	/* only RGB pixelformat supports 6 bpc */
1530	drm_WARN_ON(&dev_priv->drm,
1531		    vsc->bpc == 6 && vsc->pixelformat != DP_PIXELFORMAT_RGB);
1532
1533	/* all YCbCr are always limited range */
1534	vsc->dynamic_range = DP_DYNAMIC_RANGE_CTA;
1535	vsc->content_type = DP_CONTENT_TYPE_NOT_DEFINED;
 
1536}
1537
1538static void intel_dp_compute_vsc_sdp(struct intel_dp *intel_dp,
1539				     struct intel_crtc_state *crtc_state,
1540				     const struct drm_connector_state *conn_state)
1541{
1542	struct drm_dp_vsc_sdp *vsc = &crtc_state->infoframes.vsc;
 
 
1543
1544	/* When a crtc state has PSR, VSC SDP will be handled by PSR routine */
1545	if (crtc_state->has_psr)
1546		return;
 
1547
1548	if (!intel_dp_needs_vsc_sdp(crtc_state, conn_state))
1549		return;
 
 
 
 
 
 
 
 
 
 
1550
1551	crtc_state->infoframes.enable |= intel_hdmi_infoframe_enable(DP_SDP_VSC);
1552	vsc->sdp_type = DP_SDP_VSC;
1553	intel_dp_compute_vsc_colorimetry(crtc_state, conn_state,
1554					 &crtc_state->infoframes.vsc);
1555}
1556
1557void intel_dp_compute_psr_vsc_sdp(struct intel_dp *intel_dp,
1558				  const struct intel_crtc_state *crtc_state,
1559				  const struct drm_connector_state *conn_state,
1560				  struct drm_dp_vsc_sdp *vsc)
1561{
1562	vsc->sdp_type = DP_SDP_VSC;
 
 
1563
1564	if (intel_dp->psr.psr2_enabled) {
1565		if (intel_dp->psr.colorimetry_support &&
1566		    intel_dp_needs_vsc_sdp(crtc_state, conn_state)) {
1567			/* [PSR2, +Colorimetry] */
1568			intel_dp_compute_vsc_colorimetry(crtc_state, conn_state,
1569							 vsc);
1570		} else {
1571			/*
1572			 * [PSR2, -Colorimetry]
1573			 * Prepare VSC Header for SU as per eDP 1.4 spec, Table 6-11
1574			 * 3D stereo + PSR/PSR2 + Y-coordinate.
1575			 */
1576			vsc->revision = 0x4;
1577			vsc->length = 0xe;
1578		}
1579	} else {
1580		/*
1581		 * [PSR1]
1582		 * Prepare VSC Header for SU as per DP 1.4 spec, Table 2-118
1583		 * VSC SDP supporting 3D stereo + PSR (applies to eDP v1.3 or
1584		 * higher).
1585		 */
1586		vsc->revision = 0x2;
1587		vsc->length = 0x8;
1588	}
 
1589}
1590
1591static void
1592intel_dp_compute_hdr_metadata_infoframe_sdp(struct intel_dp *intel_dp,
1593					    struct intel_crtc_state *crtc_state,
1594					    const struct drm_connector_state *conn_state)
 
 
1595{
1596	int ret;
1597	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1598	struct hdmi_drm_infoframe *drm_infoframe = &crtc_state->infoframes.drm.drm;
 
 
 
1599
1600	if (!conn_state->hdr_output_metadata)
1601		return;
1602
1603	ret = drm_hdmi_infoframe_set_hdr_metadata(drm_infoframe, conn_state);
 
1604
1605	if (ret) {
1606		drm_dbg_kms(&dev_priv->drm, "couldn't set HDR metadata in infoframe\n");
1607		return;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1608	}
1609
1610	crtc_state->infoframes.enable |=
1611		intel_hdmi_infoframe_enable(HDMI_PACKET_TYPE_GAMUT_METADATA);
1612}
1613
1614static void
1615intel_dp_drrs_compute_config(struct intel_dp *intel_dp,
1616			     struct intel_crtc_state *pipe_config,
1617			     int output_bpp, bool constant_n)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1618{
1619	struct intel_connector *intel_connector = intel_dp->attached_connector;
1620	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1621	int pixel_clock;
 
 
 
 
 
1622
1623	if (pipe_config->vrr.enable)
 
 
1624		return;
1625
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1626	/*
1627	 * DRRS and PSR can't be enable together, so giving preference to PSR
1628	 * as it allows more power-savings by complete shutting down display,
1629	 * so to guarantee this, intel_dp_drrs_compute_config() must be called
1630	 * after intel_psr_compute_config().
1631	 */
1632	if (pipe_config->has_psr)
1633		return;
 
1634
1635	if (!intel_connector->panel.downclock_mode ||
1636	    dev_priv->drrs.type != SEAMLESS_DRRS_SUPPORT)
1637		return;
 
 
 
 
 
1638
1639	pipe_config->has_drrs = true;
1640
1641	pixel_clock = intel_connector->panel.downclock_mode->clock;
1642	if (pipe_config->splitter.enable)
1643		pixel_clock /= pipe_config->splitter.link_count;
1644
1645	intel_link_compute_m_n(output_bpp, pipe_config->lane_count, pixel_clock,
1646			       pipe_config->port_clock, &pipe_config->dp_m2_n2,
1647			       constant_n, pipe_config->fec_enable);
1648
1649	/* FIXME: abstract this better */
1650	if (pipe_config->splitter.enable)
1651		pipe_config->dp_m2_n2.gmch_m *= pipe_config->splitter.link_count;
 
 
 
1652}
1653
1654int
1655intel_dp_compute_config(struct intel_encoder *encoder,
1656			struct intel_crtc_state *pipe_config,
1657			struct drm_connector_state *conn_state)
1658{
1659	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
1660	struct drm_display_mode *adjusted_mode = &pipe_config->hw.adjusted_mode;
1661	struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
1662	enum port port = encoder->port;
1663	struct intel_connector *intel_connector = intel_dp->attached_connector;
1664	struct intel_digital_connector_state *intel_conn_state =
1665		to_intel_digital_connector_state(conn_state);
1666	bool constant_n = drm_dp_has_quirk(&intel_dp->desc, DP_DPCD_QUIRK_CONSTANT_N);
1667	int ret = 0, output_bpp;
1668
1669	if (HAS_PCH_SPLIT(dev_priv) && !HAS_DDI(dev_priv) && port != PORT_A)
1670		pipe_config->has_pch_encoder = true;
1671
1672	pipe_config->output_format = intel_dp_output_format(&intel_connector->base,
1673							    adjusted_mode);
1674
1675	if (pipe_config->output_format == INTEL_OUTPUT_FORMAT_YCBCR420) {
1676		ret = intel_pch_panel_fitting(pipe_config, conn_state);
1677		if (ret)
1678			return ret;
 
 
 
 
 
 
 
 
 
 
 
 
 
1679	}
1680
1681	if (!intel_dp_port_has_audio(dev_priv, port))
1682		pipe_config->has_audio = false;
1683	else if (intel_conn_state->force_audio == HDMI_AUDIO_AUTO)
1684		pipe_config->has_audio = intel_dp->has_audio;
1685	else
1686		pipe_config->has_audio = intel_conn_state->force_audio == HDMI_AUDIO_ON;
1687
1688	if (intel_dp_is_edp(intel_dp) && intel_connector->panel.fixed_mode) {
1689		intel_fixed_panel_mode(intel_connector->panel.fixed_mode,
1690				       adjusted_mode);
1691
1692		if (HAS_GMCH(dev_priv))
1693			ret = intel_gmch_panel_fitting(pipe_config, conn_state);
1694		else
1695			ret = intel_pch_panel_fitting(pipe_config, conn_state);
1696		if (ret)
1697			return ret;
 
1698	}
 
1699
1700	if (adjusted_mode->flags & DRM_MODE_FLAG_DBLSCAN)
1701		return -EINVAL;
 
1702
1703	if (HAS_GMCH(dev_priv) &&
1704	    adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE)
1705		return -EINVAL;
1706
1707	if (adjusted_mode->flags & DRM_MODE_FLAG_DBLCLK)
1708		return -EINVAL;
 
1709
1710	if (intel_dp_hdisplay_bad(dev_priv, adjusted_mode->crtc_hdisplay))
1711		return -EINVAL;
1712
1713	ret = intel_dp_compute_link_config(encoder, pipe_config, conn_state);
1714	if (ret < 0)
1715		return ret;
 
 
 
1716
1717	pipe_config->limited_color_range =
1718		intel_dp_limited_color_range(pipe_config, conn_state);
1719
1720	if (pipe_config->dsc.compression_enable)
1721		output_bpp = pipe_config->dsc.compressed_bpp;
1722	else
1723		output_bpp = intel_dp_output_bpp(pipe_config->output_format,
1724						 pipe_config->pipe_bpp);
1725
1726	if (intel_dp->mso_link_count) {
1727		int n = intel_dp->mso_link_count;
1728		int overlap = intel_dp->mso_pixel_overlap;
1729
1730		pipe_config->splitter.enable = true;
1731		pipe_config->splitter.link_count = n;
1732		pipe_config->splitter.pixel_overlap = overlap;
1733
1734		drm_dbg_kms(&dev_priv->drm, "MSO link count %d, pixel overlap %d\n",
1735			    n, overlap);
1736
1737		adjusted_mode->crtc_hdisplay = adjusted_mode->crtc_hdisplay / n + overlap;
1738		adjusted_mode->crtc_hblank_start = adjusted_mode->crtc_hblank_start / n + overlap;
1739		adjusted_mode->crtc_hblank_end = adjusted_mode->crtc_hblank_end / n + overlap;
1740		adjusted_mode->crtc_hsync_start = adjusted_mode->crtc_hsync_start / n + overlap;
1741		adjusted_mode->crtc_hsync_end = adjusted_mode->crtc_hsync_end / n + overlap;
1742		adjusted_mode->crtc_htotal = adjusted_mode->crtc_htotal / n + overlap;
1743		adjusted_mode->crtc_clock /= n;
1744	}
1745
1746	intel_link_compute_m_n(output_bpp,
1747			       pipe_config->lane_count,
1748			       adjusted_mode->crtc_clock,
1749			       pipe_config->port_clock,
1750			       &pipe_config->dp_m_n,
1751			       constant_n, pipe_config->fec_enable);
1752
1753	/* FIXME: abstract this better */
1754	if (pipe_config->splitter.enable)
1755		pipe_config->dp_m_n.gmch_m *= pipe_config->splitter.link_count;
 
 
1756
1757	if (!HAS_DDI(dev_priv))
1758		g4x_dp_set_clock(encoder, pipe_config);
1759
1760	intel_vrr_compute_config(pipe_config, conn_state);
1761	intel_psr_compute_config(intel_dp, pipe_config);
1762	intel_dp_drrs_compute_config(intel_dp, pipe_config, output_bpp,
1763				     constant_n);
1764	intel_dp_compute_vsc_sdp(intel_dp, pipe_config, conn_state);
1765	intel_dp_compute_hdr_metadata_infoframe_sdp(intel_dp, pipe_config, conn_state);
1766
1767	return 0;
 
 
 
 
 
 
 
1768}
1769
1770void intel_dp_set_link_params(struct intel_dp *intel_dp,
1771			      int link_rate, int lane_count)
1772{
1773	intel_dp->link_trained = false;
1774	intel_dp->link_rate = link_rate;
1775	intel_dp->lane_count = lane_count;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1776}
1777
1778/* Enable backlight PWM and backlight PP control. */
1779void intel_edp_backlight_on(const struct intel_crtc_state *crtc_state,
1780			    const struct drm_connector_state *conn_state)
1781{
1782	struct intel_dp *intel_dp = enc_to_intel_dp(to_intel_encoder(conn_state->best_encoder));
1783	struct drm_i915_private *i915 = dp_to_i915(intel_dp);
1784
1785	if (!intel_dp_is_edp(intel_dp))
1786		return;
1787
1788	drm_dbg_kms(&i915->drm, "\n");
1789
1790	intel_panel_enable_backlight(crtc_state, conn_state);
1791	intel_pps_backlight_on(intel_dp);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1792}
1793
1794/* Disable backlight PP control and backlight PWM. */
1795void intel_edp_backlight_off(const struct drm_connector_state *old_conn_state)
1796{
1797	struct intel_dp *intel_dp = enc_to_intel_dp(to_intel_encoder(old_conn_state->best_encoder));
1798	struct drm_i915_private *i915 = dp_to_i915(intel_dp);
1799
1800	if (!intel_dp_is_edp(intel_dp))
1801		return;
1802
1803	drm_dbg_kms(&i915->drm, "\n");
1804
1805	intel_pps_backlight_off(intel_dp);
1806	intel_panel_disable_backlight(old_conn_state);
1807}
1808
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1809static bool downstream_hpd_needs_d0(struct intel_dp *intel_dp)
1810{
1811	/*
1812	 * DPCD 1.2+ should support BRANCH_DEVICE_CTRL, and thus
1813	 * be capable of signalling downstream hpd with a long pulse.
1814	 * Whether or not that means D3 is safe to use is not clear,
1815	 * but let's assume so until proven otherwise.
1816	 *
1817	 * FIXME should really check all downstream ports...
1818	 */
1819	return intel_dp->dpcd[DP_DPCD_REV] == 0x11 &&
1820		drm_dp_is_branch(intel_dp->dpcd) &&
1821		intel_dp->downstream_ports[0] & DP_DS_PORT_HPD;
1822}
1823
1824void intel_dp_sink_set_decompression_state(struct intel_dp *intel_dp,
1825					   const struct intel_crtc_state *crtc_state,
1826					   bool enable)
1827{
1828	struct drm_i915_private *i915 = dp_to_i915(intel_dp);
1829	int ret;
1830
1831	if (!crtc_state->dsc.compression_enable)
1832		return;
1833
1834	ret = drm_dp_dpcd_writeb(&intel_dp->aux, DP_DSC_ENABLE,
1835				 enable ? DP_DECOMPRESSION_EN : 0);
1836	if (ret < 0)
1837		drm_dbg_kms(&i915->drm,
1838			    "Failed to %s sink decompression state\n",
1839			    enabledisable(enable));
1840}
1841
1842static void
1843intel_edp_init_source_oui(struct intel_dp *intel_dp, bool careful)
1844{
1845	struct drm_i915_private *i915 = dp_to_i915(intel_dp);
1846	u8 oui[] = { 0x00, 0xaa, 0x01 };
1847	u8 buf[3] = { 0 };
1848
1849	/*
1850	 * During driver init, we want to be careful and avoid changing the source OUI if it's
1851	 * already set to what we want, so as to avoid clearing any state by accident
1852	 */
1853	if (careful) {
1854		if (drm_dp_dpcd_read(&intel_dp->aux, DP_SOURCE_OUI, buf, sizeof(buf)) < 0)
1855			drm_err(&i915->drm, "Failed to read source OUI\n");
1856
1857		if (memcmp(oui, buf, sizeof(oui)) == 0)
1858			return;
1859	}
1860
1861	if (drm_dp_dpcd_write(&intel_dp->aux, DP_SOURCE_OUI, oui, sizeof(oui)) < 0)
1862		drm_err(&i915->drm, "Failed to write source OUI\n");
1863}
1864
1865/* If the device supports it, try to set the power state appropriately */
1866void intel_dp_set_power(struct intel_dp *intel_dp, u8 mode)
1867{
1868	struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base;
1869	struct drm_i915_private *i915 = to_i915(encoder->base.dev);
1870	int ret, i;
1871
1872	/* Should have a valid DPCD by this point */
1873	if (intel_dp->dpcd[DP_DPCD_REV] < 0x11)
1874		return;
1875
1876	if (mode != DP_SET_POWER_D0) {
1877		if (downstream_hpd_needs_d0(intel_dp))
1878			return;
1879
1880		ret = drm_dp_dpcd_writeb(&intel_dp->aux, DP_SET_POWER, mode);
 
1881	} else {
1882		struct intel_lspcon *lspcon = dp_to_lspcon(intel_dp);
1883
1884		lspcon_resume(dp_to_dig_port(intel_dp));
1885
1886		/* Write the source OUI as early as possible */
1887		if (intel_dp_is_edp(intel_dp))
1888			intel_edp_init_source_oui(intel_dp, false);
1889
1890		/*
1891		 * When turning on, we need to retry for 1ms to give the sink
1892		 * time to wake up.
1893		 */
1894		for (i = 0; i < 3; i++) {
1895			ret = drm_dp_dpcd_writeb(&intel_dp->aux, DP_SET_POWER, mode);
 
1896			if (ret == 1)
1897				break;
1898			msleep(1);
1899		}
1900
1901		if (ret == 1 && lspcon->active)
1902			lspcon_wait_pcon_mode(lspcon);
1903	}
1904
1905	if (ret != 1)
1906		drm_dbg_kms(&i915->drm, "[ENCODER:%d:%s] Set power to %s failed\n",
1907			    encoder->base.base.id, encoder->base.name,
1908			    mode == DP_SET_POWER_D0 ? "D0" : "D3");
1909}
1910
1911static bool
1912intel_dp_get_dpcd(struct intel_dp *intel_dp);
 
 
1913
1914/**
1915 * intel_dp_sync_state - sync the encoder state during init/resume
1916 * @encoder: intel encoder to sync
1917 * @crtc_state: state for the CRTC connected to the encoder
1918 *
1919 * Sync any state stored in the encoder wrt. HW state during driver init
1920 * and system resume.
1921 */
1922void intel_dp_sync_state(struct intel_encoder *encoder,
1923			 const struct intel_crtc_state *crtc_state)
 
 
 
 
 
 
 
 
 
 
1924{
1925	struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
 
1926
1927	/*
1928	 * Don't clobber DPCD if it's been already read out during output
1929	 * setup (eDP) or detect.
1930	 */
1931	if (intel_dp->dpcd[DP_DPCD_REV] == 0)
1932		intel_dp_get_dpcd(intel_dp);
 
 
 
 
 
 
 
1933
1934	intel_dp->max_link_lane_count = intel_dp_max_common_lane_count(intel_dp);
1935	intel_dp->max_link_rate = intel_dp_max_common_rate(intel_dp);
1936}
1937
1938bool intel_dp_initial_fastset_check(struct intel_encoder *encoder,
1939				    struct intel_crtc_state *crtc_state)
1940{
1941	struct drm_i915_private *i915 = to_i915(encoder->base.dev);
1942	struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
 
 
1943
1944	/*
1945	 * If BIOS has set an unsupported or non-standard link rate for some
1946	 * reason force an encoder recompute and full modeset.
1947	 */
1948	if (intel_dp_rate_index(intel_dp->source_rates, intel_dp->num_source_rates,
1949				crtc_state->port_clock) < 0) {
1950		drm_dbg_kms(&i915->drm, "Forcing full modeset due to unsupported link rate\n");
1951		crtc_state->uapi.connectors_changed = true;
1952		return false;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1953	}
1954
1955	/*
1956	 * FIXME hack to force full modeset when DSC is being used.
1957	 *
1958	 * As long as we do not have full state readout and config comparison
1959	 * of crtc_state->dsc, we have no way to ensure reliable fastset.
1960	 * Remove once we have readout for DSC.
1961	 */
1962	if (crtc_state->dsc.compression_enable) {
1963		drm_dbg_kms(&i915->drm, "Forcing full modeset due to DSC being enabled\n");
1964		crtc_state->uapi.mode_changed = true;
1965		return false;
 
 
 
 
1966	}
1967
1968	if (CAN_PSR(intel_dp)) {
1969		drm_dbg_kms(&i915->drm, "Forcing full modeset to compute PSR state\n");
1970		crtc_state->uapi.mode_changed = true;
1971		return false;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1972	}
 
1973
1974	return true;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1975}
1976
1977static void intel_dp_get_pcon_dsc_cap(struct intel_dp *intel_dp)
 
 
1978{
1979	struct drm_i915_private *i915 = dp_to_i915(intel_dp);
 
 
 
 
 
 
 
 
1980
1981	/* Clear the cached register set to avoid using stale values */
 
 
 
 
 
1982
1983	memset(intel_dp->pcon_dsc_dpcd, 0, sizeof(intel_dp->pcon_dsc_dpcd));
 
 
 
 
 
 
1984
1985	if (drm_dp_dpcd_read(&intel_dp->aux, DP_PCON_DSC_ENCODER,
1986			     intel_dp->pcon_dsc_dpcd,
1987			     sizeof(intel_dp->pcon_dsc_dpcd)) < 0)
1988		drm_err(&i915->drm, "Failed to read DPCD register 0x%x\n",
1989			DP_PCON_DSC_ENCODER);
1990
1991	drm_dbg_kms(&i915->drm, "PCON ENCODER DSC DPCD: %*ph\n",
1992		    (int)sizeof(intel_dp->pcon_dsc_dpcd), intel_dp->pcon_dsc_dpcd);
 
 
 
1993}
1994
1995static int intel_dp_pcon_get_frl_mask(u8 frl_bw_mask)
 
 
1996{
1997	int bw_gbps[] = {9, 18, 24, 32, 40, 48};
1998	int i;
1999
2000	for (i = ARRAY_SIZE(bw_gbps) - 1; i >= 0; i--) {
2001		if (frl_bw_mask & (1 << i))
2002			return bw_gbps[i];
2003	}
2004	return 0;
 
 
 
2005}
2006
2007static int intel_dp_pcon_set_frl_mask(int max_frl)
 
 
 
2008{
2009	switch (max_frl) {
2010	case 48:
2011		return DP_PCON_FRL_BW_MASK_48GBPS;
2012	case 40:
2013		return DP_PCON_FRL_BW_MASK_40GBPS;
2014	case 32:
2015		return DP_PCON_FRL_BW_MASK_32GBPS;
2016	case 24:
2017		return DP_PCON_FRL_BW_MASK_24GBPS;
2018	case 18:
2019		return DP_PCON_FRL_BW_MASK_18GBPS;
2020	case 9:
2021		return DP_PCON_FRL_BW_MASK_9GBPS;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2022	}
 
2023
2024	return 0;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2025}
2026
2027static int intel_dp_hdmi_sink_max_frl(struct intel_dp *intel_dp)
 
 
2028{
2029	struct intel_connector *intel_connector = intel_dp->attached_connector;
2030	struct drm_connector *connector = &intel_connector->base;
2031	int max_frl_rate;
2032	int max_lanes, rate_per_lane;
2033	int max_dsc_lanes, dsc_rate_per_lane;
 
 
 
 
2034
2035	max_lanes = connector->display_info.hdmi.max_lanes;
2036	rate_per_lane = connector->display_info.hdmi.max_frl_rate_per_lane;
2037	max_frl_rate = max_lanes * rate_per_lane;
2038
2039	if (connector->display_info.hdmi.dsc_cap.v_1p2) {
2040		max_dsc_lanes = connector->display_info.hdmi.dsc_cap.max_lanes;
2041		dsc_rate_per_lane = connector->display_info.hdmi.dsc_cap.max_frl_rate_per_lane;
2042		if (max_dsc_lanes && dsc_rate_per_lane)
2043			max_frl_rate = min(max_frl_rate, max_dsc_lanes * dsc_rate_per_lane);
 
 
 
 
 
 
 
 
 
 
2044	}
2045
2046	return max_frl_rate;
 
 
 
 
 
 
 
 
2047}
2048
2049static int intel_dp_pcon_start_frl_training(struct intel_dp *intel_dp)
 
 
2050{
2051#define TIMEOUT_FRL_READY_MS 500
2052#define TIMEOUT_HDMI_LINK_ACTIVE_MS 1000
 
2053
2054	struct drm_i915_private *i915 = dp_to_i915(intel_dp);
2055	int max_frl_bw, max_pcon_frl_bw, max_edid_frl_bw, ret;
2056	u8 max_frl_bw_mask = 0, frl_trained_mask;
2057	bool is_active;
 
 
2058
2059	ret = drm_dp_pcon_reset_frl_config(&intel_dp->aux);
2060	if (ret < 0)
2061		return ret;
 
 
 
2062
2063	max_pcon_frl_bw = intel_dp->dfp.pcon_max_frl_bw;
2064	drm_dbg(&i915->drm, "PCON max rate = %d Gbps\n", max_pcon_frl_bw);
2065
2066	max_edid_frl_bw = intel_dp_hdmi_sink_max_frl(intel_dp);
2067	drm_dbg(&i915->drm, "Sink max rate from EDID = %d Gbps\n", max_edid_frl_bw);
 
 
2068
2069	max_frl_bw = min(max_edid_frl_bw, max_pcon_frl_bw);
 
 
 
 
 
2070
2071	if (max_frl_bw <= 0)
2072		return -EINVAL;
2073
2074	ret = drm_dp_pcon_frl_prepare(&intel_dp->aux, false);
2075	if (ret < 0)
2076		return ret;
2077	/* Wait for PCON to be FRL Ready */
2078	wait_for(is_active = drm_dp_pcon_is_frl_ready(&intel_dp->aux) == true, TIMEOUT_FRL_READY_MS);
2079
2080	if (!is_active)
2081		return -ETIMEDOUT;
2082
2083	max_frl_bw_mask = intel_dp_pcon_set_frl_mask(max_frl_bw);
2084	ret = drm_dp_pcon_frl_configure_1(&intel_dp->aux, max_frl_bw,
2085					  DP_PCON_ENABLE_SEQUENTIAL_LINK);
2086	if (ret < 0)
2087		return ret;
2088	ret = drm_dp_pcon_frl_configure_2(&intel_dp->aux, max_frl_bw_mask,
2089					  DP_PCON_FRL_LINK_TRAIN_NORMAL);
2090	if (ret < 0)
2091		return ret;
2092	ret = drm_dp_pcon_frl_enable(&intel_dp->aux);
2093	if (ret < 0)
2094		return ret;
2095	/*
2096	 * Wait for FRL to be completed
2097	 * Check if the HDMI Link is up and active.
 
 
 
 
 
2098	 */
2099	wait_for(is_active = drm_dp_pcon_hdmi_link_active(&intel_dp->aux) == true, TIMEOUT_HDMI_LINK_ACTIVE_MS);
 
 
 
 
 
 
2100
2101	if (!is_active)
2102		return -ETIMEDOUT;
 
 
2103
2104	/* Verify HDMI Link configuration shows FRL Mode */
2105	if (drm_dp_pcon_hdmi_link_mode(&intel_dp->aux, &frl_trained_mask) !=
2106	    DP_PCON_HDMI_MODE_FRL) {
2107		drm_dbg(&i915->drm, "HDMI couldn't be trained in FRL Mode\n");
2108		return -EINVAL;
2109	}
2110	drm_dbg(&i915->drm, "MAX_FRL_MASK = %u, FRL_TRAINED_MASK = %u\n", max_frl_bw_mask, frl_trained_mask);
2111
2112	intel_dp->frl.trained_rate_gbps = intel_dp_pcon_get_frl_mask(frl_trained_mask);
2113	intel_dp->frl.is_trained = true;
2114	drm_dbg(&i915->drm, "FRL trained with : %d Gbps\n", intel_dp->frl.trained_rate_gbps);
2115
2116	return 0;
2117}
 
2118
2119static bool intel_dp_is_hdmi_2_1_sink(struct intel_dp *intel_dp)
2120{
2121	if (drm_dp_is_branch(intel_dp->dpcd) &&
2122	    intel_dp->has_hdmi_sink &&
2123	    intel_dp_hdmi_sink_max_frl(intel_dp) > 0)
2124		return true;
2125
2126	return false;
 
 
 
 
 
2127}
2128
2129void intel_dp_check_frl_training(struct intel_dp *intel_dp)
 
2130{
2131	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2132
2133	/*
2134	 * Always go for FRL training if:
2135	 * -PCON supports SRC_CTL_MODE (VESA DP2.0-HDMI2.1 PCON Spec Draft-1 Sec-7)
2136	 * -sink is HDMI2.1
2137	 */
2138	if (!(intel_dp->downstream_ports[2] & DP_PCON_SOURCE_CTL_MODE) ||
2139	    !intel_dp_is_hdmi_2_1_sink(intel_dp) ||
2140	    intel_dp->frl.is_trained)
 
 
2141		return;
2142
2143	if (intel_dp_pcon_start_frl_training(intel_dp) < 0) {
2144		int ret, mode;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2145
2146		drm_dbg(&dev_priv->drm, "Couldn't set FRL mode, continuing with TMDS mode\n");
2147		ret = drm_dp_pcon_reset_frl_config(&intel_dp->aux);
2148		mode = drm_dp_pcon_hdmi_link_mode(&intel_dp->aux, NULL);
 
 
 
2149
2150		if (ret < 0 || mode != DP_PCON_HDMI_MODE_TMDS)
2151			drm_dbg(&dev_priv->drm, "Issue with PCON, cannot set TMDS mode\n");
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2152	} else {
2153		drm_dbg(&dev_priv->drm, "FRL training Completed\n");
 
 
 
 
 
 
 
 
 
 
2154	}
2155}
2156
2157static int
2158intel_dp_pcon_dsc_enc_slice_height(const struct intel_crtc_state *crtc_state)
2159{
2160	int vactive = crtc_state->hw.adjusted_mode.vdisplay;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2161
2162	return intel_hdmi_dsc_get_slice_height(vactive);
2163}
2164
2165static int
2166intel_dp_pcon_dsc_enc_slices(struct intel_dp *intel_dp,
2167			     const struct intel_crtc_state *crtc_state)
2168{
2169	struct intel_connector *intel_connector = intel_dp->attached_connector;
2170	struct drm_connector *connector = &intel_connector->base;
2171	int hdmi_throughput = connector->display_info.hdmi.dsc_cap.clk_per_slice;
2172	int hdmi_max_slices = connector->display_info.hdmi.dsc_cap.max_slices;
2173	int pcon_max_slices = drm_dp_pcon_dsc_max_slices(intel_dp->pcon_dsc_dpcd);
2174	int pcon_max_slice_width = drm_dp_pcon_dsc_max_slice_width(intel_dp->pcon_dsc_dpcd);
2175
2176	return intel_hdmi_dsc_get_num_slices(crtc_state, pcon_max_slices,
2177					     pcon_max_slice_width,
2178					     hdmi_max_slices, hdmi_throughput);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2179}
2180
2181static int
2182intel_dp_pcon_dsc_enc_bpp(struct intel_dp *intel_dp,
2183			  const struct intel_crtc_state *crtc_state,
2184			  int num_slices, int slice_width)
2185{
2186	struct intel_connector *intel_connector = intel_dp->attached_connector;
2187	struct drm_connector *connector = &intel_connector->base;
2188	int output_format = crtc_state->output_format;
2189	bool hdmi_all_bpp = connector->display_info.hdmi.dsc_cap.all_bpp;
2190	int pcon_fractional_bpp = drm_dp_pcon_dsc_bpp_incr(intel_dp->pcon_dsc_dpcd);
2191	int hdmi_max_chunk_bytes =
2192		connector->display_info.hdmi.dsc_cap.total_chunk_kbytes * 1024;
2193
2194	return intel_hdmi_dsc_get_bpp(pcon_fractional_bpp, slice_width,
2195				      num_slices, output_format, hdmi_all_bpp,
2196				      hdmi_max_chunk_bytes);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2197}
2198
2199void
2200intel_dp_pcon_dsc_configure(struct intel_dp *intel_dp,
2201			    const struct intel_crtc_state *crtc_state)
2202{
2203	u8 pps_param[6];
2204	int slice_height;
2205	int slice_width;
2206	int num_slices;
2207	int bits_per_pixel;
2208	int ret;
2209	struct intel_connector *intel_connector = intel_dp->attached_connector;
2210	struct drm_i915_private *i915 = dp_to_i915(intel_dp);
2211	struct drm_connector *connector;
2212	bool hdmi_is_dsc_1_2;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2213
2214	if (!intel_dp_is_hdmi_2_1_sink(intel_dp))
2215		return;
2216
2217	if (!intel_connector)
2218		return;
2219	connector = &intel_connector->base;
2220	hdmi_is_dsc_1_2 = connector->display_info.hdmi.dsc_cap.v_1p2;
 
2221
2222	if (!drm_dp_pcon_enc_is_dsc_1_2(intel_dp->pcon_dsc_dpcd) ||
2223	    !hdmi_is_dsc_1_2)
2224		return;
2225
2226	slice_height = intel_dp_pcon_dsc_enc_slice_height(crtc_state);
2227	if (!slice_height)
2228		return;
2229
2230	num_slices = intel_dp_pcon_dsc_enc_slices(intel_dp, crtc_state);
2231	if (!num_slices)
2232		return;
 
 
 
 
 
 
2233
2234	slice_width = DIV_ROUND_UP(crtc_state->hw.adjusted_mode.hdisplay,
2235				   num_slices);
 
 
 
 
 
 
 
 
2236
2237	bits_per_pixel = intel_dp_pcon_dsc_enc_bpp(intel_dp, crtc_state,
2238						   num_slices, slice_width);
2239	if (!bits_per_pixel)
2240		return;
2241
2242	pps_param[0] = slice_height & 0xFF;
2243	pps_param[1] = slice_height >> 8;
2244	pps_param[2] = slice_width & 0xFF;
2245	pps_param[3] = slice_width >> 8;
2246	pps_param[4] = bits_per_pixel & 0xFF;
2247	pps_param[5] = (bits_per_pixel >> 8) & 0x3;
2248
2249	ret = drm_dp_pcon_pps_override_param(&intel_dp->aux, pps_param);
2250	if (ret < 0)
2251		drm_dbg_kms(&i915->drm, "Failed to set pcon DSC\n");
 
 
 
 
 
 
 
 
 
 
2252}
2253
2254void intel_dp_configure_protocol_converter(struct intel_dp *intel_dp,
2255					   const struct intel_crtc_state *crtc_state)
 
2256{
2257	struct drm_i915_private *i915 = dp_to_i915(intel_dp);
2258	u8 tmp;
 
 
 
2259
2260	if (intel_dp->dpcd[DP_DPCD_REV] < 0x13)
2261		return;
2262
2263	if (!drm_dp_is_branch(intel_dp->dpcd))
2264		return;
2265
2266	tmp = intel_dp->has_hdmi_sink ?
2267		DP_HDMI_DVI_OUTPUT_CONFIG : 0;
 
 
 
 
 
 
 
 
2268
2269	if (drm_dp_dpcd_writeb(&intel_dp->aux,
2270			       DP_PROTOCOL_CONVERTER_CONTROL_0, tmp) != 1)
2271		drm_dbg_kms(&i915->drm, "Failed to %s protocol converter HDMI mode\n",
2272			    enabledisable(intel_dp->has_hdmi_sink));
2273
2274	tmp = crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR444 &&
2275		intel_dp->dfp.ycbcr_444_to_420 ? DP_CONVERSION_TO_YCBCR420_ENABLE : 0;
2276
2277	if (drm_dp_dpcd_writeb(&intel_dp->aux,
2278			       DP_PROTOCOL_CONVERTER_CONTROL_1, tmp) != 1)
2279		drm_dbg_kms(&i915->drm,
2280			    "Failed to %s protocol converter YCbCr 4:2:0 conversion mode\n",
2281			    enabledisable(intel_dp->dfp.ycbcr_444_to_420));
2282
2283	tmp = 0;
2284	if (intel_dp->dfp.rgb_to_ycbcr) {
2285		bool bt2020, bt709;
2286
 
 
 
 
 
 
2287		/*
2288		 * FIXME: Currently if userspace selects BT2020 or BT709, but PCON supports only
2289		 * RGB->YCbCr for BT601 colorspace, we go ahead with BT601, as default.
2290		 *
2291		 */
2292		tmp = DP_CONVERSION_BT601_RGB_YCBCR_ENABLE;
 
2293
2294		bt2020 = drm_dp_downstream_rgb_to_ycbcr_conversion(intel_dp->dpcd,
2295								   intel_dp->downstream_ports,
2296								   DP_DS_HDMI_BT2020_RGB_YCBCR_CONV);
2297		bt709 = drm_dp_downstream_rgb_to_ycbcr_conversion(intel_dp->dpcd,
2298								  intel_dp->downstream_ports,
2299								  DP_DS_HDMI_BT709_RGB_YCBCR_CONV);
2300		switch (crtc_state->infoframes.vsc.colorimetry) {
2301		case DP_COLORIMETRY_BT2020_RGB:
2302		case DP_COLORIMETRY_BT2020_YCC:
2303			if (bt2020)
2304				tmp = DP_CONVERSION_BT2020_RGB_YCBCR_ENABLE;
2305			break;
2306		case DP_COLORIMETRY_BT709_YCC:
2307		case DP_COLORIMETRY_XVYCC_709:
2308			if (bt709)
2309				tmp = DP_CONVERSION_BT709_RGB_YCBCR_ENABLE;
2310			break;
2311		default:
2312			break;
2313		}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2314	}
2315
2316	if (drm_dp_pcon_convert_rgb_to_ycbcr(&intel_dp->aux, tmp) < 0)
2317		drm_dbg_kms(&i915->drm,
2318			   "Failed to %s protocol converter RGB->YCbCr conversion mode\n",
2319			   enabledisable(tmp));
 
 
 
2320}
2321
 
 
 
 
 
 
 
 
 
 
 
 
 
2322
2323bool intel_dp_get_colorimetry_status(struct intel_dp *intel_dp)
2324{
2325	u8 dprx = 0;
2326
2327	if (drm_dp_dpcd_readb(&intel_dp->aux, DP_DPRX_FEATURE_ENUMERATION_LIST,
2328			      &dprx) != 1)
2329		return false;
2330	return dprx & DP_VSC_SDP_EXT_FOR_COLORIMETRY_SUPPORTED;
2331}
2332
2333static void intel_dp_get_dsc_sink_cap(struct intel_dp *intel_dp)
2334{
2335	struct drm_i915_private *i915 = dp_to_i915(intel_dp);
2336
2337	/*
2338	 * Clear the cached register set to avoid using stale values
2339	 * for the sinks that do not support DSC.
2340	 */
2341	memset(intel_dp->dsc_dpcd, 0, sizeof(intel_dp->dsc_dpcd));
2342
2343	/* Clear fec_capable to avoid using stale values */
2344	intel_dp->fec_capable = 0;
2345
2346	/* Cache the DSC DPCD if eDP or DP rev >= 1.4 */
2347	if (intel_dp->dpcd[DP_DPCD_REV] >= 0x14 ||
2348	    intel_dp->edp_dpcd[0] >= DP_EDP_14) {
2349		if (drm_dp_dpcd_read(&intel_dp->aux, DP_DSC_SUPPORT,
2350				     intel_dp->dsc_dpcd,
2351				     sizeof(intel_dp->dsc_dpcd)) < 0)
2352			drm_err(&i915->drm,
2353				"Failed to read DPCD register 0x%x\n",
2354				DP_DSC_SUPPORT);
2355
2356		drm_dbg_kms(&i915->drm, "DSC DPCD: %*ph\n",
2357			    (int)sizeof(intel_dp->dsc_dpcd),
2358			    intel_dp->dsc_dpcd);
2359
2360		/* FEC is supported only on DP 1.4 */
2361		if (!intel_dp_is_edp(intel_dp) &&
2362		    drm_dp_dpcd_readb(&intel_dp->aux, DP_FEC_CAPABILITY,
2363				      &intel_dp->fec_capable) < 0)
2364			drm_err(&i915->drm,
2365				"Failed to read FEC DPCD register\n");
2366
2367		drm_dbg_kms(&i915->drm, "FEC CAPABILITY: %x\n",
2368			    intel_dp->fec_capable);
2369	}
2370}
2371
2372static void intel_edp_mso_mode_fixup(struct intel_connector *connector,
2373				     struct drm_display_mode *mode)
2374{
2375	struct intel_dp *intel_dp = intel_attached_dp(connector);
2376	struct drm_i915_private *i915 = to_i915(connector->base.dev);
2377	int n = intel_dp->mso_link_count;
2378	int overlap = intel_dp->mso_pixel_overlap;
2379
2380	if (!mode || !n)
2381		return;
2382
2383	mode->hdisplay = (mode->hdisplay - overlap) * n;
2384	mode->hsync_start = (mode->hsync_start - overlap) * n;
2385	mode->hsync_end = (mode->hsync_end - overlap) * n;
2386	mode->htotal = (mode->htotal - overlap) * n;
2387	mode->clock *= n;
2388
2389	drm_mode_set_name(mode);
2390
2391	drm_dbg_kms(&i915->drm,
2392		    "[CONNECTOR:%d:%s] using generated MSO mode: ",
2393		    connector->base.base.id, connector->base.name);
2394	drm_mode_debug_printmodeline(mode);
2395}
2396
2397static void intel_edp_mso_init(struct intel_dp *intel_dp)
2398{
2399	struct drm_i915_private *i915 = dp_to_i915(intel_dp);
2400	u8 mso;
2401
2402	if (intel_dp->edp_dpcd[0] < DP_EDP_14)
2403		return;
2404
2405	if (drm_dp_dpcd_readb(&intel_dp->aux, DP_EDP_MSO_LINK_CAPABILITIES, &mso) != 1) {
2406		drm_err(&i915->drm, "Failed to read MSO cap\n");
2407		return;
2408	}
2409
2410	/* Valid configurations are SST or MSO 2x1, 2x2, 4x1 */
2411	mso &= DP_EDP_MSO_NUMBER_OF_LINKS_MASK;
2412	if (mso % 2 || mso > drm_dp_max_lane_count(intel_dp->dpcd)) {
2413		drm_err(&i915->drm, "Invalid MSO link count cap %u\n", mso);
2414		mso = 0;
2415	}
2416
2417	if (mso) {
2418		drm_dbg_kms(&i915->drm, "Sink MSO %ux%u configuration\n",
2419			    mso, drm_dp_max_lane_count(intel_dp->dpcd) / mso);
2420		if (!HAS_MSO(i915)) {
2421			drm_err(&i915->drm, "No source MSO support, disabling\n");
2422			mso = 0;
2423		}
2424	}
2425
2426	intel_dp->mso_link_count = mso;
2427	intel_dp->mso_pixel_overlap = 0; /* FIXME: read from DisplayID v2.0 */
2428}
2429
2430static bool
2431intel_edp_init_dpcd(struct intel_dp *intel_dp)
2432{
2433	struct drm_i915_private *dev_priv =
2434		to_i915(dp_to_dig_port(intel_dp)->base.base.dev);
2435
2436	/* this function is meant to be called only once */
2437	drm_WARN_ON(&dev_priv->drm, intel_dp->dpcd[DP_DPCD_REV] != 0);
2438
2439	if (drm_dp_read_dpcd_caps(&intel_dp->aux, intel_dp->dpcd) != 0)
2440		return false;
2441
2442	drm_dp_read_desc(&intel_dp->aux, &intel_dp->desc,
2443			 drm_dp_is_branch(intel_dp->dpcd));
2444
2445	/*
2446	 * Read the eDP display control registers.
2447	 *
2448	 * Do this independent of DP_DPCD_DISPLAY_CONTROL_CAPABLE bit in
2449	 * DP_EDP_CONFIGURATION_CAP, because some buggy displays do not have it
2450	 * set, but require eDP 1.4+ detection (e.g. for supported link rates
2451	 * method). The display control registers should read zero if they're
2452	 * not supported anyway.
2453	 */
2454	if (drm_dp_dpcd_read(&intel_dp->aux, DP_EDP_DPCD_REV,
2455			     intel_dp->edp_dpcd, sizeof(intel_dp->edp_dpcd)) ==
2456			     sizeof(intel_dp->edp_dpcd)) {
2457		drm_dbg_kms(&dev_priv->drm, "eDP DPCD: %*ph\n",
2458			    (int)sizeof(intel_dp->edp_dpcd),
2459			    intel_dp->edp_dpcd);
2460
2461		intel_dp->use_max_params = intel_dp->edp_dpcd[0] < DP_EDP_14;
2462	}
2463
2464	/*
2465	 * This has to be called after intel_dp->edp_dpcd is filled, PSR checks
2466	 * for SET_POWER_CAPABLE bit in intel_dp->edp_dpcd[1]
2467	 */
2468	intel_psr_init_dpcd(intel_dp);
2469
2470	/* Read the eDP 1.4+ supported link rates. */
2471	if (intel_dp->edp_dpcd[0] >= DP_EDP_14) {
2472		__le16 sink_rates[DP_MAX_SUPPORTED_RATES];
2473		int i;
2474
2475		drm_dp_dpcd_read(&intel_dp->aux, DP_SUPPORTED_LINK_RATES,
2476				sink_rates, sizeof(sink_rates));
2477
2478		for (i = 0; i < ARRAY_SIZE(sink_rates); i++) {
2479			int val = le16_to_cpu(sink_rates[i]);
2480
2481			if (val == 0)
2482				break;
2483
2484			/* Value read multiplied by 200kHz gives the per-lane
2485			 * link rate in kHz. The source rates are, however,
2486			 * stored in terms of LS_Clk kHz. The full conversion
2487			 * back to symbols is
2488			 * (val * 200kHz)*(8/10 ch. encoding)*(1/8 bit to Byte)
2489			 */
2490			intel_dp->sink_rates[i] = (val * 200) / 10;
2491		}
2492		intel_dp->num_sink_rates = i;
2493	}
2494
2495	/*
2496	 * Use DP_LINK_RATE_SET if DP_SUPPORTED_LINK_RATES are available,
2497	 * default to DP_MAX_LINK_RATE and DP_LINK_BW_SET otherwise.
2498	 */
2499	if (intel_dp->num_sink_rates)
2500		intel_dp->use_rate_select = true;
2501	else
2502		intel_dp_set_sink_rates(intel_dp);
2503
2504	intel_dp_set_common_rates(intel_dp);
2505
2506	/* Read the eDP DSC DPCD registers */
2507	if (DISPLAY_VER(dev_priv) >= 10)
2508		intel_dp_get_dsc_sink_cap(intel_dp);
2509
2510	/*
2511	 * If needed, program our source OUI so we can make various Intel-specific AUX services
2512	 * available (such as HDR backlight controls)
2513	 */
2514	intel_edp_init_source_oui(intel_dp, true);
2515
2516	intel_edp_mso_init(intel_dp);
2517
2518	return true;
2519}
2520
2521static bool
2522intel_dp_has_sink_count(struct intel_dp *intel_dp)
2523{
2524	if (!intel_dp->attached_connector)
2525		return false;
2526
2527	return drm_dp_read_sink_count_cap(&intel_dp->attached_connector->base,
2528					  intel_dp->dpcd,
2529					  &intel_dp->desc);
2530}
2531
2532static bool
2533intel_dp_get_dpcd(struct intel_dp *intel_dp)
2534{
2535	int ret;
2536
2537	if (intel_dp_init_lttpr_and_dprx_caps(intel_dp) < 0)
2538		return false;
2539
2540	/*
2541	 * Don't clobber cached eDP rates. Also skip re-reading
2542	 * the OUI/ID since we know it won't change.
2543	 */
2544	if (!intel_dp_is_edp(intel_dp)) {
2545		drm_dp_read_desc(&intel_dp->aux, &intel_dp->desc,
2546				 drm_dp_is_branch(intel_dp->dpcd));
2547
2548		intel_dp_set_sink_rates(intel_dp);
2549		intel_dp_set_common_rates(intel_dp);
2550	}
2551
2552	if (intel_dp_has_sink_count(intel_dp)) {
2553		ret = drm_dp_read_sink_count(&intel_dp->aux);
2554		if (ret < 0)
 
 
 
 
 
 
 
 
2555			return false;
2556
2557		/*
2558		 * Sink count can change between short pulse hpd hence
2559		 * a member variable in intel_dp will track any changes
2560		 * between short pulse interrupts.
2561		 */
2562		intel_dp->sink_count = ret;
2563
2564		/*
2565		 * SINK_COUNT == 0 and DOWNSTREAM_PORT_PRESENT == 1 implies that
2566		 * a dongle is present but no display. Unless we require to know
2567		 * if a dongle is present or not, we don't need to update
2568		 * downstream port information. So, an early return here saves
2569		 * time from performing other operations which are not required.
2570		 */
2571		if (!intel_dp->sink_count)
2572			return false;
2573	}
2574
2575	return drm_dp_read_downstream_info(&intel_dp->aux, intel_dp->dpcd,
2576					   intel_dp->downstream_ports) == 0;
 
 
 
 
 
 
 
 
 
 
2577}
2578
2579static bool
2580intel_dp_can_mst(struct intel_dp *intel_dp)
2581{
2582	struct drm_i915_private *i915 = dp_to_i915(intel_dp);
 
 
 
 
 
 
2583
2584	return i915->params.enable_dp_mst &&
 
 
 
 
 
 
2585		intel_dp->can_mst &&
2586		drm_dp_read_mst_cap(&intel_dp->aux, intel_dp->dpcd);
2587}
2588
2589static void
2590intel_dp_configure_mst(struct intel_dp *intel_dp)
2591{
2592	struct drm_i915_private *i915 = dp_to_i915(intel_dp);
2593	struct intel_encoder *encoder =
2594		&dp_to_dig_port(intel_dp)->base;
2595	bool sink_can_mst = drm_dp_read_mst_cap(&intel_dp->aux, intel_dp->dpcd);
2596
2597	drm_dbg_kms(&i915->drm,
2598		    "[ENCODER:%d:%s] MST support: port: %s, sink: %s, modparam: %s\n",
2599		    encoder->base.base.id, encoder->base.name,
2600		    yesno(intel_dp->can_mst), yesno(sink_can_mst),
2601		    yesno(i915->params.enable_dp_mst));
2602
2603	if (!intel_dp->can_mst)
2604		return;
2605
2606	intel_dp->is_mst = sink_can_mst &&
2607		i915->params.enable_dp_mst;
2608
2609	drm_dp_mst_topology_mgr_set_mst(&intel_dp->mst_mgr,
2610					intel_dp->is_mst);
2611}
2612
2613static bool
2614intel_dp_get_sink_irq_esi(struct intel_dp *intel_dp, u8 *sink_irq_vector)
2615{
2616	return drm_dp_dpcd_read(&intel_dp->aux, DP_SINK_COUNT_ESI,
2617				sink_irq_vector, DP_DPRX_ESI_LEN) ==
2618		DP_DPRX_ESI_LEN;
2619}
2620
2621bool
2622intel_dp_needs_vsc_sdp(const struct intel_crtc_state *crtc_state,
2623		       const struct drm_connector_state *conn_state)
2624{
2625	/*
2626	 * As per DP 1.4a spec section 2.2.4.3 [MSA Field for Indication
2627	 * of Color Encoding Format and Content Color Gamut], in order to
2628	 * sending YCBCR 420 or HDR BT.2020 signals we should use DP VSC SDP.
2629	 */
2630	if (crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR420)
2631		return true;
2632
2633	switch (conn_state->colorspace) {
2634	case DRM_MODE_COLORIMETRY_SYCC_601:
2635	case DRM_MODE_COLORIMETRY_OPYCC_601:
2636	case DRM_MODE_COLORIMETRY_BT2020_YCC:
2637	case DRM_MODE_COLORIMETRY_BT2020_RGB:
2638	case DRM_MODE_COLORIMETRY_BT2020_CYCC:
2639		return true;
2640	default:
2641		break;
2642	}
2643
2644	return false;
2645}
2646
2647static ssize_t intel_dp_vsc_sdp_pack(const struct drm_dp_vsc_sdp *vsc,
2648				     struct dp_sdp *sdp, size_t size)
2649{
2650	size_t length = sizeof(struct dp_sdp);
 
2651
2652	if (size < length)
2653		return -ENOSPC;
 
2654
2655	memset(sdp, 0, size);
 
 
 
 
2656
2657	/*
2658	 * Prepare VSC Header for SU as per DP 1.4a spec, Table 2-119
2659	 * VSC SDP Header Bytes
2660	 */
2661	sdp->sdp_header.HB0 = 0; /* Secondary-Data Packet ID = 0 */
2662	sdp->sdp_header.HB1 = vsc->sdp_type; /* Secondary-data Packet Type */
2663	sdp->sdp_header.HB2 = vsc->revision; /* Revision Number */
2664	sdp->sdp_header.HB3 = vsc->length; /* Number of Valid Data Bytes */
2665
2666	/*
2667	 * Only revision 0x5 supports Pixel Encoding/Colorimetry Format as
2668	 * per DP 1.4a spec.
2669	 */
2670	if (vsc->revision != 0x5)
2671		goto out;
 
2672
2673	/* VSC SDP Payload for DB16 through DB18 */
2674	/* Pixel Encoding and Colorimetry Formats  */
2675	sdp->db[16] = (vsc->pixelformat & 0xf) << 4; /* DB16[7:4] */
2676	sdp->db[16] |= vsc->colorimetry & 0xf; /* DB16[3:0] */
2677
2678	switch (vsc->bpc) {
2679	case 6:
2680		/* 6bpc: 0x0 */
2681		break;
2682	case 8:
2683		sdp->db[17] = 0x1; /* DB17[3:0] */
2684		break;
2685	case 10:
2686		sdp->db[17] = 0x2;
2687		break;
2688	case 12:
2689		sdp->db[17] = 0x3;
2690		break;
2691	case 16:
2692		sdp->db[17] = 0x4;
2693		break;
2694	default:
2695		MISSING_CASE(vsc->bpc);
2696		break;
2697	}
2698	/* Dynamic Range and Component Bit Depth */
2699	if (vsc->dynamic_range == DP_DYNAMIC_RANGE_CTA)
2700		sdp->db[17] |= 0x80;  /* DB17[7] */
2701
2702	/* Content Type */
2703	sdp->db[18] = vsc->content_type & 0x7;
2704
2705out:
2706	return length;
2707}
2708
2709static ssize_t
2710intel_dp_hdr_metadata_infoframe_sdp_pack(const struct hdmi_drm_infoframe *drm_infoframe,
2711					 struct dp_sdp *sdp,
2712					 size_t size)
2713{
2714	size_t length = sizeof(struct dp_sdp);
2715	const int infoframe_size = HDMI_INFOFRAME_HEADER_SIZE + HDMI_DRM_INFOFRAME_SIZE;
2716	unsigned char buf[HDMI_INFOFRAME_HEADER_SIZE + HDMI_DRM_INFOFRAME_SIZE];
2717	ssize_t len;
2718
2719	if (size < length)
2720		return -ENOSPC;
2721
2722	memset(sdp, 0, size);
2723
2724	len = hdmi_drm_infoframe_pack_only(drm_infoframe, buf, sizeof(buf));
2725	if (len < 0) {
2726		DRM_DEBUG_KMS("buffer size is smaller than hdr metadata infoframe\n");
2727		return -ENOSPC;
2728	}
2729
2730	if (len != infoframe_size) {
2731		DRM_DEBUG_KMS("wrong static hdr metadata size\n");
2732		return -ENOSPC;
2733	}
2734
2735	/*
2736	 * Set up the infoframe sdp packet for HDR static metadata.
2737	 * Prepare VSC Header for SU as per DP 1.4a spec,
2738	 * Table 2-100 and Table 2-101
2739	 */
2740
2741	/* Secondary-Data Packet ID, 00h for non-Audio INFOFRAME */
2742	sdp->sdp_header.HB0 = 0;
2743	/*
2744	 * Packet Type 80h + Non-audio INFOFRAME Type value
2745	 * HDMI_INFOFRAME_TYPE_DRM: 0x87
2746	 * - 80h + Non-audio INFOFRAME Type value
2747	 * - InfoFrame Type: 0x07
2748	 *    [CTA-861-G Table-42 Dynamic Range and Mastering InfoFrame]
2749	 */
2750	sdp->sdp_header.HB1 = drm_infoframe->type;
2751	/*
2752	 * Least Significant Eight Bits of (Data Byte Count – 1)
2753	 * infoframe_size - 1
2754	 */
2755	sdp->sdp_header.HB2 = 0x1D;
2756	/* INFOFRAME SDP Version Number */
2757	sdp->sdp_header.HB3 = (0x13 << 2);
2758	/* CTA Header Byte 2 (INFOFRAME Version Number) */
2759	sdp->db[0] = drm_infoframe->version;
2760	/* CTA Header Byte 3 (Length of INFOFRAME): HDMI_DRM_INFOFRAME_SIZE */
2761	sdp->db[1] = drm_infoframe->length;
2762	/*
2763	 * Copy HDMI_DRM_INFOFRAME_SIZE size from a buffer after
2764	 * HDMI_INFOFRAME_HEADER_SIZE
2765	 */
2766	BUILD_BUG_ON(sizeof(sdp->db) < HDMI_DRM_INFOFRAME_SIZE + 2);
2767	memcpy(&sdp->db[2], &buf[HDMI_INFOFRAME_HEADER_SIZE],
2768	       HDMI_DRM_INFOFRAME_SIZE);
2769
2770	/*
2771	 * Size of DP infoframe sdp packet for HDR static metadata consists of
2772	 * - DP SDP Header(struct dp_sdp_header): 4 bytes
2773	 * - Two Data Blocks: 2 bytes
2774	 *    CTA Header Byte2 (INFOFRAME Version Number)
2775	 *    CTA Header Byte3 (Length of INFOFRAME)
2776	 * - HDMI_DRM_INFOFRAME_SIZE: 26 bytes
2777	 *
2778	 * Prior to GEN11's GMP register size is identical to DP HDR static metadata
2779	 * infoframe size. But GEN11+ has larger than that size, write_infoframe
2780	 * will pad rest of the size.
2781	 */
2782	return sizeof(struct dp_sdp_header) + 2 + HDMI_DRM_INFOFRAME_SIZE;
2783}
2784
2785static void intel_write_dp_sdp(struct intel_encoder *encoder,
2786			       const struct intel_crtc_state *crtc_state,
2787			       unsigned int type)
2788{
2789	struct intel_digital_port *dig_port = enc_to_dig_port(encoder);
2790	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
2791	struct dp_sdp sdp = {};
2792	ssize_t len;
2793
2794	if ((crtc_state->infoframes.enable &
2795	     intel_hdmi_infoframe_enable(type)) == 0)
2796		return;
2797
2798	switch (type) {
2799	case DP_SDP_VSC:
2800		len = intel_dp_vsc_sdp_pack(&crtc_state->infoframes.vsc, &sdp,
2801					    sizeof(sdp));
2802		break;
2803	case HDMI_PACKET_TYPE_GAMUT_METADATA:
2804		len = intel_dp_hdr_metadata_infoframe_sdp_pack(&crtc_state->infoframes.drm.drm,
2805							       &sdp, sizeof(sdp));
2806		break;
2807	default:
2808		MISSING_CASE(type);
2809		return;
2810	}
2811
2812	if (drm_WARN_ON(&dev_priv->drm, len < 0))
2813		return;
2814
2815	dig_port->write_infoframe(encoder, crtc_state, type, &sdp, len);
2816}
2817
2818void intel_write_dp_vsc_sdp(struct intel_encoder *encoder,
2819			    const struct intel_crtc_state *crtc_state,
2820			    struct drm_dp_vsc_sdp *vsc)
2821{
2822	struct intel_digital_port *dig_port = enc_to_dig_port(encoder);
2823	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
2824	struct dp_sdp sdp = {};
2825	ssize_t len;
2826
2827	len = intel_dp_vsc_sdp_pack(vsc, &sdp, sizeof(sdp));
2828
2829	if (drm_WARN_ON(&dev_priv->drm, len < 0))
2830		return;
2831
2832	dig_port->write_infoframe(encoder, crtc_state, DP_SDP_VSC,
2833					&sdp, len);
2834}
2835
2836void intel_dp_set_infoframes(struct intel_encoder *encoder,
2837			     bool enable,
2838			     const struct intel_crtc_state *crtc_state,
2839			     const struct drm_connector_state *conn_state)
2840{
2841	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
2842	i915_reg_t reg = HSW_TVIDEO_DIP_CTL(crtc_state->cpu_transcoder);
2843	u32 dip_enable = VIDEO_DIP_ENABLE_AVI_HSW | VIDEO_DIP_ENABLE_GCP_HSW |
2844			 VIDEO_DIP_ENABLE_VS_HSW | VIDEO_DIP_ENABLE_GMP_HSW |
2845			 VIDEO_DIP_ENABLE_SPD_HSW | VIDEO_DIP_ENABLE_DRM_GLK;
2846	u32 val = intel_de_read(dev_priv, reg) & ~dip_enable;
2847
2848	/* TODO: Add DSC case (DIP_ENABLE_PPS) */
2849	/* When PSR is enabled, this routine doesn't disable VSC DIP */
2850	if (!crtc_state->has_psr)
2851		val &= ~VIDEO_DIP_ENABLE_VSC_HSW;
2852
2853	intel_de_write(dev_priv, reg, val);
2854	intel_de_posting_read(dev_priv, reg);
2855
2856	if (!enable)
2857		return;
2858
2859	/* When PSR is enabled, VSC SDP is handled by PSR routine */
2860	if (!crtc_state->has_psr)
2861		intel_write_dp_sdp(encoder, crtc_state, DP_SDP_VSC);
2862
2863	intel_write_dp_sdp(encoder, crtc_state, HDMI_PACKET_TYPE_GAMUT_METADATA);
2864}
2865
2866static int intel_dp_vsc_sdp_unpack(struct drm_dp_vsc_sdp *vsc,
2867				   const void *buffer, size_t size)
2868{
2869	const struct dp_sdp *sdp = buffer;
2870
2871	if (size < sizeof(struct dp_sdp))
2872		return -EINVAL;
2873
2874	memset(vsc, 0, sizeof(*vsc));
2875
2876	if (sdp->sdp_header.HB0 != 0)
2877		return -EINVAL;
2878
2879	if (sdp->sdp_header.HB1 != DP_SDP_VSC)
2880		return -EINVAL;
2881
2882	vsc->sdp_type = sdp->sdp_header.HB1;
2883	vsc->revision = sdp->sdp_header.HB2;
2884	vsc->length = sdp->sdp_header.HB3;
2885
2886	if ((sdp->sdp_header.HB2 == 0x2 && sdp->sdp_header.HB3 == 0x8) ||
2887	    (sdp->sdp_header.HB2 == 0x4 && sdp->sdp_header.HB3 == 0xe)) {
2888		/*
2889		 * - HB2 = 0x2, HB3 = 0x8
2890		 *   VSC SDP supporting 3D stereo + PSR
2891		 * - HB2 = 0x4, HB3 = 0xe
2892		 *   VSC SDP supporting 3D stereo + PSR2 with Y-coordinate of
2893		 *   first scan line of the SU region (applies to eDP v1.4b
2894		 *   and higher).
2895		 */
2896		return 0;
2897	} else if (sdp->sdp_header.HB2 == 0x5 && sdp->sdp_header.HB3 == 0x13) {
2898		/*
2899		 * - HB2 = 0x5, HB3 = 0x13
2900		 *   VSC SDP supporting 3D stereo + PSR2 + Pixel Encoding/Colorimetry
2901		 *   Format.
2902		 */
2903		vsc->pixelformat = (sdp->db[16] >> 4) & 0xf;
2904		vsc->colorimetry = sdp->db[16] & 0xf;
2905		vsc->dynamic_range = (sdp->db[17] >> 7) & 0x1;
2906
2907		switch (sdp->db[17] & 0x7) {
2908		case 0x0:
2909			vsc->bpc = 6;
2910			break;
2911		case 0x1:
2912			vsc->bpc = 8;
2913			break;
2914		case 0x2:
2915			vsc->bpc = 10;
2916			break;
2917		case 0x3:
2918			vsc->bpc = 12;
2919			break;
2920		case 0x4:
2921			vsc->bpc = 16;
2922			break;
2923		default:
2924			MISSING_CASE(sdp->db[17] & 0x7);
2925			return -EINVAL;
2926		}
2927
2928		vsc->content_type = sdp->db[18] & 0x7;
2929	} else {
2930		return -EINVAL;
2931	}
2932
2933	return 0;
2934}
2935
2936static int
2937intel_dp_hdr_metadata_infoframe_sdp_unpack(struct hdmi_drm_infoframe *drm_infoframe,
2938					   const void *buffer, size_t size)
2939{
2940	int ret;
2941
2942	const struct dp_sdp *sdp = buffer;
2943
2944	if (size < sizeof(struct dp_sdp))
2945		return -EINVAL;
2946
2947	if (sdp->sdp_header.HB0 != 0)
2948		return -EINVAL;
2949
2950	if (sdp->sdp_header.HB1 != HDMI_INFOFRAME_TYPE_DRM)
2951		return -EINVAL;
2952
2953	/*
2954	 * Least Significant Eight Bits of (Data Byte Count – 1)
2955	 * 1Dh (i.e., Data Byte Count = 30 bytes).
 
 
 
 
 
 
 
2956	 */
2957	if (sdp->sdp_header.HB2 != 0x1D)
2958		return -EINVAL;
2959
2960	/* Most Significant Two Bits of (Data Byte Count – 1), Clear to 00b. */
2961	if ((sdp->sdp_header.HB3 & 0x3) != 0)
2962		return -EINVAL;
2963
2964	/* INFOFRAME SDP Version Number */
2965	if (((sdp->sdp_header.HB3 >> 2) & 0x3f) != 0x13)
2966		return -EINVAL;
2967
2968	/* CTA Header Byte 2 (INFOFRAME Version Number) */
2969	if (sdp->db[0] != 1)
2970		return -EINVAL;
2971
2972	/* CTA Header Byte 3 (Length of INFOFRAME): HDMI_DRM_INFOFRAME_SIZE */
2973	if (sdp->db[1] != HDMI_DRM_INFOFRAME_SIZE)
2974		return -EINVAL;
2975
2976	ret = hdmi_drm_infoframe_unpack_only(drm_infoframe, &sdp->db[2],
2977					     HDMI_DRM_INFOFRAME_SIZE);
2978
2979	return ret;
2980}
2981
2982static void intel_read_dp_vsc_sdp(struct intel_encoder *encoder,
2983				  struct intel_crtc_state *crtc_state,
2984				  struct drm_dp_vsc_sdp *vsc)
2985{
2986	struct intel_digital_port *dig_port = enc_to_dig_port(encoder);
2987	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
2988	unsigned int type = DP_SDP_VSC;
2989	struct dp_sdp sdp = {};
2990	int ret;
2991
2992	/* When PSR is enabled, VSC SDP is handled by PSR routine */
2993	if (crtc_state->has_psr)
2994		return;
2995
2996	if ((crtc_state->infoframes.enable &
2997	     intel_hdmi_infoframe_enable(type)) == 0)
2998		return;
2999
3000	dig_port->read_infoframe(encoder, crtc_state, type, &sdp, sizeof(sdp));
3001
3002	ret = intel_dp_vsc_sdp_unpack(vsc, &sdp, sizeof(sdp));
3003
3004	if (ret)
3005		drm_dbg_kms(&dev_priv->drm, "Failed to unpack DP VSC SDP\n");
3006}
3007
3008static void intel_read_dp_hdr_metadata_infoframe_sdp(struct intel_encoder *encoder,
3009						     struct intel_crtc_state *crtc_state,
3010						     struct hdmi_drm_infoframe *drm_infoframe)
3011{
3012	struct intel_digital_port *dig_port = enc_to_dig_port(encoder);
3013	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
3014	unsigned int type = HDMI_PACKET_TYPE_GAMUT_METADATA;
3015	struct dp_sdp sdp = {};
3016	int ret;
3017
3018	if ((crtc_state->infoframes.enable &
3019	    intel_hdmi_infoframe_enable(type)) == 0)
3020		return;
3021
3022	dig_port->read_infoframe(encoder, crtc_state, type, &sdp,
3023				 sizeof(sdp));
3024
3025	ret = intel_dp_hdr_metadata_infoframe_sdp_unpack(drm_infoframe, &sdp,
3026							 sizeof(sdp));
3027
3028	if (ret)
3029		drm_dbg_kms(&dev_priv->drm,
3030			    "Failed to unpack DP HDR Metadata Infoframe SDP\n");
3031}
3032
3033void intel_read_dp_sdp(struct intel_encoder *encoder,
3034		       struct intel_crtc_state *crtc_state,
3035		       unsigned int type)
3036{
3037	if (encoder->type != INTEL_OUTPUT_DDI)
3038		return;
3039
3040	switch (type) {
3041	case DP_SDP_VSC:
3042		intel_read_dp_vsc_sdp(encoder, crtc_state,
3043				      &crtc_state->infoframes.vsc);
3044		break;
3045	case HDMI_PACKET_TYPE_GAMUT_METADATA:
3046		intel_read_dp_hdr_metadata_infoframe_sdp(encoder, crtc_state,
3047							 &crtc_state->infoframes.drm.drm);
3048		break;
3049	default:
3050		MISSING_CASE(type);
3051		break;
3052	}
3053}
3054
3055static u8 intel_dp_autotest_link_training(struct intel_dp *intel_dp)
3056{
3057	struct drm_i915_private *i915 = dp_to_i915(intel_dp);
3058	int status = 0;
3059	int test_link_rate;
3060	u8 test_lane_count, test_link_bw;
3061	/* (DP CTS 1.2)
3062	 * 4.3.1.11
3063	 */
3064	/* Read the TEST_LANE_COUNT and TEST_LINK_RTAE fields (DP CTS 3.1.4) */
3065	status = drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_LANE_COUNT,
3066				   &test_lane_count);
3067
3068	if (status <= 0) {
3069		drm_dbg_kms(&i915->drm, "Lane count read failed\n");
3070		return DP_TEST_NAK;
3071	}
3072	test_lane_count &= DP_MAX_LANE_COUNT_MASK;
3073
3074	status = drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_LINK_RATE,
3075				   &test_link_bw);
3076	if (status <= 0) {
3077		drm_dbg_kms(&i915->drm, "Link Rate read failed\n");
3078		return DP_TEST_NAK;
3079	}
3080	test_link_rate = drm_dp_bw_code_to_link_rate(test_link_bw);
3081
3082	/* Validate the requested link rate and lane count */
3083	if (!intel_dp_link_params_valid(intel_dp, test_link_rate,
3084					test_lane_count))
3085		return DP_TEST_NAK;
3086
3087	intel_dp->compliance.test_lane_count = test_lane_count;
3088	intel_dp->compliance.test_link_rate = test_link_rate;
3089
3090	return DP_TEST_ACK;
3091}
3092
3093static u8 intel_dp_autotest_video_pattern(struct intel_dp *intel_dp)
3094{
3095	struct drm_i915_private *i915 = dp_to_i915(intel_dp);
3096	u8 test_pattern;
3097	u8 test_misc;
3098	__be16 h_width, v_height;
3099	int status = 0;
3100
3101	/* Read the TEST_PATTERN (DP CTS 3.1.5) */
3102	status = drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_PATTERN,
3103				   &test_pattern);
3104	if (status <= 0) {
3105		drm_dbg_kms(&i915->drm, "Test pattern read failed\n");
3106		return DP_TEST_NAK;
3107	}
3108	if (test_pattern != DP_COLOR_RAMP)
3109		return DP_TEST_NAK;
3110
3111	status = drm_dp_dpcd_read(&intel_dp->aux, DP_TEST_H_WIDTH_HI,
3112				  &h_width, 2);
3113	if (status <= 0) {
3114		drm_dbg_kms(&i915->drm, "H Width read failed\n");
3115		return DP_TEST_NAK;
3116	}
3117
3118	status = drm_dp_dpcd_read(&intel_dp->aux, DP_TEST_V_HEIGHT_HI,
3119				  &v_height, 2);
3120	if (status <= 0) {
3121		drm_dbg_kms(&i915->drm, "V Height read failed\n");
3122		return DP_TEST_NAK;
3123	}
3124
3125	status = drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_MISC0,
3126				   &test_misc);
3127	if (status <= 0) {
3128		drm_dbg_kms(&i915->drm, "TEST MISC read failed\n");
3129		return DP_TEST_NAK;
3130	}
3131	if ((test_misc & DP_TEST_COLOR_FORMAT_MASK) != DP_COLOR_FORMAT_RGB)
3132		return DP_TEST_NAK;
3133	if (test_misc & DP_TEST_DYNAMIC_RANGE_CEA)
3134		return DP_TEST_NAK;
3135	switch (test_misc & DP_TEST_BIT_DEPTH_MASK) {
3136	case DP_TEST_BIT_DEPTH_6:
3137		intel_dp->compliance.test_data.bpc = 6;
3138		break;
3139	case DP_TEST_BIT_DEPTH_8:
3140		intel_dp->compliance.test_data.bpc = 8;
3141		break;
3142	default:
3143		return DP_TEST_NAK;
3144	}
3145
3146	intel_dp->compliance.test_data.video_pattern = test_pattern;
3147	intel_dp->compliance.test_data.hdisplay = be16_to_cpu(h_width);
3148	intel_dp->compliance.test_data.vdisplay = be16_to_cpu(v_height);
3149	/* Set test active flag here so userspace doesn't interrupt things */
3150	intel_dp->compliance.test_active = true;
3151
3152	return DP_TEST_ACK;
3153}
3154
3155static u8 intel_dp_autotest_edid(struct intel_dp *intel_dp)
3156{
3157	struct drm_i915_private *i915 = dp_to_i915(intel_dp);
3158	u8 test_result = DP_TEST_ACK;
3159	struct intel_connector *intel_connector = intel_dp->attached_connector;
3160	struct drm_connector *connector = &intel_connector->base;
3161
3162	if (intel_connector->detect_edid == NULL ||
3163	    connector->edid_corrupt ||
3164	    intel_dp->aux.i2c_defer_count > 6) {
3165		/* Check EDID read for NACKs, DEFERs and corruption
3166		 * (DP CTS 1.2 Core r1.1)
3167		 *    4.2.2.4 : Failed EDID read, I2C_NAK
3168		 *    4.2.2.5 : Failed EDID read, I2C_DEFER
3169		 *    4.2.2.6 : EDID corruption detected
3170		 * Use failsafe mode for all cases
3171		 */
3172		if (intel_dp->aux.i2c_nack_count > 0 ||
3173			intel_dp->aux.i2c_defer_count > 0)
3174			drm_dbg_kms(&i915->drm,
3175				    "EDID read had %d NACKs, %d DEFERs\n",
3176				    intel_dp->aux.i2c_nack_count,
3177				    intel_dp->aux.i2c_defer_count);
3178		intel_dp->compliance.test_data.edid = INTEL_DP_RESOLUTION_FAILSAFE;
3179	} else {
3180		struct edid *block = intel_connector->detect_edid;
3181
3182		/* We have to write the checksum
3183		 * of the last block read
3184		 */
3185		block += intel_connector->detect_edid->extensions;
3186
3187		if (drm_dp_dpcd_writeb(&intel_dp->aux, DP_TEST_EDID_CHECKSUM,
3188				       block->checksum) <= 0)
3189			drm_dbg_kms(&i915->drm,
3190				    "Failed to write EDID checksum\n");
3191
3192		test_result = DP_TEST_ACK | DP_TEST_EDID_CHECKSUM_WRITE;
3193		intel_dp->compliance.test_data.edid = INTEL_DP_RESOLUTION_PREFERRED;
3194	}
3195
3196	/* Set test active flag here so userspace doesn't interrupt things */
3197	intel_dp->compliance.test_active = true;
3198
3199	return test_result;
3200}
3201
3202static void intel_dp_phy_pattern_update(struct intel_dp *intel_dp,
3203					const struct intel_crtc_state *crtc_state)
3204{
3205	struct drm_i915_private *dev_priv =
3206			to_i915(dp_to_dig_port(intel_dp)->base.base.dev);
3207	struct drm_dp_phy_test_params *data =
3208			&intel_dp->compliance.test_data.phytest;
3209	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
3210	enum pipe pipe = crtc->pipe;
3211	u32 pattern_val;
3212
3213	switch (data->phy_pattern) {
3214	case DP_PHY_TEST_PATTERN_NONE:
3215		DRM_DEBUG_KMS("Disable Phy Test Pattern\n");
3216		intel_de_write(dev_priv, DDI_DP_COMP_CTL(pipe), 0x0);
3217		break;
3218	case DP_PHY_TEST_PATTERN_D10_2:
3219		DRM_DEBUG_KMS("Set D10.2 Phy Test Pattern\n");
3220		intel_de_write(dev_priv, DDI_DP_COMP_CTL(pipe),
3221			       DDI_DP_COMP_CTL_ENABLE | DDI_DP_COMP_CTL_D10_2);
3222		break;
3223	case DP_PHY_TEST_PATTERN_ERROR_COUNT:
3224		DRM_DEBUG_KMS("Set Error Count Phy Test Pattern\n");
3225		intel_de_write(dev_priv, DDI_DP_COMP_CTL(pipe),
3226			       DDI_DP_COMP_CTL_ENABLE |
3227			       DDI_DP_COMP_CTL_SCRAMBLED_0);
3228		break;
3229	case DP_PHY_TEST_PATTERN_PRBS7:
3230		DRM_DEBUG_KMS("Set PRBS7 Phy Test Pattern\n");
3231		intel_de_write(dev_priv, DDI_DP_COMP_CTL(pipe),
3232			       DDI_DP_COMP_CTL_ENABLE | DDI_DP_COMP_CTL_PRBS7);
3233		break;
3234	case DP_PHY_TEST_PATTERN_80BIT_CUSTOM:
3235		/*
3236		 * FIXME: Ideally pattern should come from DPCD 0x250. As
3237		 * current firmware of DPR-100 could not set it, so hardcoding
3238		 * now for complaince test.
3239		 */
3240		DRM_DEBUG_KMS("Set 80Bit Custom Phy Test Pattern 0x3e0f83e0 0x0f83e0f8 0x0000f83e\n");
3241		pattern_val = 0x3e0f83e0;
3242		intel_de_write(dev_priv, DDI_DP_COMP_PAT(pipe, 0), pattern_val);
3243		pattern_val = 0x0f83e0f8;
3244		intel_de_write(dev_priv, DDI_DP_COMP_PAT(pipe, 1), pattern_val);
3245		pattern_val = 0x0000f83e;
3246		intel_de_write(dev_priv, DDI_DP_COMP_PAT(pipe, 2), pattern_val);
3247		intel_de_write(dev_priv, DDI_DP_COMP_CTL(pipe),
3248			       DDI_DP_COMP_CTL_ENABLE |
3249			       DDI_DP_COMP_CTL_CUSTOM80);
3250		break;
3251	case DP_PHY_TEST_PATTERN_CP2520:
3252		/*
3253		 * FIXME: Ideally pattern should come from DPCD 0x24A. As
3254		 * current firmware of DPR-100 could not set it, so hardcoding
3255		 * now for complaince test.
3256		 */
3257		DRM_DEBUG_KMS("Set HBR2 compliance Phy Test Pattern\n");
3258		pattern_val = 0xFB;
3259		intel_de_write(dev_priv, DDI_DP_COMP_CTL(pipe),
3260			       DDI_DP_COMP_CTL_ENABLE | DDI_DP_COMP_CTL_HBR2 |
3261			       pattern_val);
3262		break;
3263	default:
3264		WARN(1, "Invalid Phy Test Pattern\n");
3265	}
3266}
3267
3268static void
3269intel_dp_autotest_phy_ddi_disable(struct intel_dp *intel_dp,
3270				  const struct intel_crtc_state *crtc_state)
3271{
3272	struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
3273	struct drm_device *dev = dig_port->base.base.dev;
3274	struct drm_i915_private *dev_priv = to_i915(dev);
3275	struct intel_crtc *crtc = to_intel_crtc(dig_port->base.base.crtc);
3276	enum pipe pipe = crtc->pipe;
3277	u32 trans_ddi_func_ctl_value, trans_conf_value, dp_tp_ctl_value;
3278
3279	trans_ddi_func_ctl_value = intel_de_read(dev_priv,
3280						 TRANS_DDI_FUNC_CTL(pipe));
3281	trans_conf_value = intel_de_read(dev_priv, PIPECONF(pipe));
3282	dp_tp_ctl_value = intel_de_read(dev_priv, TGL_DP_TP_CTL(pipe));
3283
3284	trans_ddi_func_ctl_value &= ~(TRANS_DDI_FUNC_ENABLE |
3285				      TGL_TRANS_DDI_PORT_MASK);
3286	trans_conf_value &= ~PIPECONF_ENABLE;
3287	dp_tp_ctl_value &= ~DP_TP_CTL_ENABLE;
3288
3289	intel_de_write(dev_priv, PIPECONF(pipe), trans_conf_value);
3290	intel_de_write(dev_priv, TRANS_DDI_FUNC_CTL(pipe),
3291		       trans_ddi_func_ctl_value);
3292	intel_de_write(dev_priv, TGL_DP_TP_CTL(pipe), dp_tp_ctl_value);
3293}
3294
3295static void
3296intel_dp_autotest_phy_ddi_enable(struct intel_dp *intel_dp,
3297				 const struct intel_crtc_state *crtc_state)
3298{
3299	struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
3300	struct drm_device *dev = dig_port->base.base.dev;
3301	struct drm_i915_private *dev_priv = to_i915(dev);
3302	enum port port = dig_port->base.port;
3303	struct intel_crtc *crtc = to_intel_crtc(dig_port->base.base.crtc);
3304	enum pipe pipe = crtc->pipe;
3305	u32 trans_ddi_func_ctl_value, trans_conf_value, dp_tp_ctl_value;
3306
3307	trans_ddi_func_ctl_value = intel_de_read(dev_priv,
3308						 TRANS_DDI_FUNC_CTL(pipe));
3309	trans_conf_value = intel_de_read(dev_priv, PIPECONF(pipe));
3310	dp_tp_ctl_value = intel_de_read(dev_priv, TGL_DP_TP_CTL(pipe));
3311
3312	trans_ddi_func_ctl_value |= TRANS_DDI_FUNC_ENABLE |
3313				    TGL_TRANS_DDI_SELECT_PORT(port);
3314	trans_conf_value |= PIPECONF_ENABLE;
3315	dp_tp_ctl_value |= DP_TP_CTL_ENABLE;
3316
3317	intel_de_write(dev_priv, PIPECONF(pipe), trans_conf_value);
3318	intel_de_write(dev_priv, TGL_DP_TP_CTL(pipe), dp_tp_ctl_value);
3319	intel_de_write(dev_priv, TRANS_DDI_FUNC_CTL(pipe),
3320		       trans_ddi_func_ctl_value);
3321}
3322
3323static void intel_dp_process_phy_request(struct intel_dp *intel_dp,
3324					 const struct intel_crtc_state *crtc_state)
3325{
3326	struct drm_dp_phy_test_params *data =
3327		&intel_dp->compliance.test_data.phytest;
3328	u8 link_status[DP_LINK_STATUS_SIZE];
3329
3330	if (drm_dp_dpcd_read_phy_link_status(&intel_dp->aux, DP_PHY_DPRX,
3331					     link_status) < 0) {
3332		DRM_DEBUG_KMS("failed to get link status\n");
3333		return;
3334	}
3335
3336	/* retrieve vswing & pre-emphasis setting */
3337	intel_dp_get_adjust_train(intel_dp, crtc_state, DP_PHY_DPRX,
3338				  link_status);
3339
3340	intel_dp_autotest_phy_ddi_disable(intel_dp, crtc_state);
3341
3342	intel_dp_set_signal_levels(intel_dp, crtc_state, DP_PHY_DPRX);
3343
3344	intel_dp_phy_pattern_update(intel_dp, crtc_state);
3345
3346	intel_dp_autotest_phy_ddi_enable(intel_dp, crtc_state);
3347
3348	drm_dp_set_phy_test_pattern(&intel_dp->aux, data,
3349				    link_status[DP_DPCD_REV]);
3350}
3351
3352static u8 intel_dp_autotest_phy_pattern(struct intel_dp *intel_dp)
3353{
3354	struct drm_dp_phy_test_params *data =
3355		&intel_dp->compliance.test_data.phytest;
3356
3357	if (drm_dp_get_phy_test_pattern(&intel_dp->aux, data)) {
3358		DRM_DEBUG_KMS("DP Phy Test pattern AUX read failure\n");
3359		return DP_TEST_NAK;
3360	}
3361
3362	/* Set test active flag here so userspace doesn't interrupt things */
3363	intel_dp->compliance.test_active = true;
3364
3365	return DP_TEST_ACK;
3366}
3367
3368static void intel_dp_handle_test_request(struct intel_dp *intel_dp)
3369{
3370	struct drm_i915_private *i915 = dp_to_i915(intel_dp);
3371	u8 response = DP_TEST_NAK;
3372	u8 request = 0;
3373	int status;
3374
3375	status = drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_REQUEST, &request);
3376	if (status <= 0) {
3377		drm_dbg_kms(&i915->drm,
3378			    "Could not read test request from sink\n");
3379		goto update_status;
3380	}
3381
3382	switch (request) {
3383	case DP_TEST_LINK_TRAINING:
3384		drm_dbg_kms(&i915->drm, "LINK_TRAINING test requested\n");
3385		response = intel_dp_autotest_link_training(intel_dp);
3386		break;
3387	case DP_TEST_LINK_VIDEO_PATTERN:
3388		drm_dbg_kms(&i915->drm, "TEST_PATTERN test requested\n");
3389		response = intel_dp_autotest_video_pattern(intel_dp);
3390		break;
3391	case DP_TEST_LINK_EDID_READ:
3392		drm_dbg_kms(&i915->drm, "EDID test requested\n");
3393		response = intel_dp_autotest_edid(intel_dp);
3394		break;
3395	case DP_TEST_LINK_PHY_TEST_PATTERN:
3396		drm_dbg_kms(&i915->drm, "PHY_PATTERN test requested\n");
3397		response = intel_dp_autotest_phy_pattern(intel_dp);
3398		break;
3399	default:
3400		drm_dbg_kms(&i915->drm, "Invalid test request '%02x'\n",
3401			    request);
3402		break;
3403	}
3404
3405	if (response & DP_TEST_ACK)
3406		intel_dp->compliance.test_type = request;
3407
3408update_status:
3409	status = drm_dp_dpcd_writeb(&intel_dp->aux, DP_TEST_RESPONSE, response);
3410	if (status <= 0)
3411		drm_dbg_kms(&i915->drm,
3412			    "Could not write test response to sink\n");
3413}
3414
3415static void
3416intel_dp_mst_hpd_irq(struct intel_dp *intel_dp, u8 *esi, bool *handled)
3417{
3418		drm_dp_mst_hpd_irq(&intel_dp->mst_mgr, esi, handled);
3419
3420		if (esi[1] & DP_CP_IRQ) {
3421			intel_hdcp_handle_cp_irq(intel_dp->attached_connector);
3422			*handled = true;
3423		}
3424}
3425
3426/**
3427 * intel_dp_check_mst_status - service any pending MST interrupts, check link status
3428 * @intel_dp: Intel DP struct
3429 *
3430 * Read any pending MST interrupts, call MST core to handle these and ack the
3431 * interrupts. Check if the main and AUX link state is ok.
3432 *
3433 * Returns:
3434 * - %true if pending interrupts were serviced (or no interrupts were
3435 *   pending) w/o detecting an error condition.
3436 * - %false if an error condition - like AUX failure or a loss of link - is
3437 *   detected, which needs servicing from the hotplug work.
3438 */
3439static bool
3440intel_dp_check_mst_status(struct intel_dp *intel_dp)
3441{
3442	struct drm_i915_private *i915 = dp_to_i915(intel_dp);
3443	bool link_ok = true;
3444
3445	drm_WARN_ON_ONCE(&i915->drm, intel_dp->active_mst_links < 0);
3446
3447	for (;;) {
3448		/*
3449		 * The +2 is because DP_DPRX_ESI_LEN is 14, but we then
3450		 * pass in "esi+10" to drm_dp_channel_eq_ok(), which
3451		 * takes a 6-byte array. So we actually need 16 bytes
3452		 * here.
3453		 *
3454		 * Somebody who knows what the limits actually are
3455		 * should check this, but for now this is at least
3456		 * harmless and avoids a valid compiler warning about
3457		 * using more of the array than we have allocated.
3458		 */
3459		u8 esi[DP_DPRX_ESI_LEN+2] = {};
3460		bool handled;
3461		int retry;
 
3462
3463		if (!intel_dp_get_sink_irq_esi(intel_dp, esi)) {
3464			drm_dbg_kms(&i915->drm,
3465				    "failed to get ESI - device may have failed\n");
3466			link_ok = false;
3467
3468			break;
3469		}
3470
3471		/* check link status - esi[10] = 0x200c */
3472		if (intel_dp->active_mst_links > 0 && link_ok &&
3473		    !drm_dp_channel_eq_ok(&esi[10], intel_dp->lane_count)) {
3474			drm_dbg_kms(&i915->drm,
3475				    "channel EQ not ok, retraining\n");
3476			link_ok = false;
3477		}
3478
3479		drm_dbg_kms(&i915->drm, "got esi %3ph\n", esi);
3480
3481		intel_dp_mst_hpd_irq(intel_dp, esi, &handled);
 
3482
3483		if (!handled)
3484			break;
 
 
 
 
 
 
 
 
3485
3486		for (retry = 0; retry < 3; retry++) {
3487			int wret;
 
 
 
 
 
3488
3489			wret = drm_dp_dpcd_write(&intel_dp->aux,
3490						 DP_SINK_COUNT_ESI+1,
3491						 &esi[1], 3);
3492			if (wret == 3)
3493				break;
 
3494		}
3495	}
3496
3497	return link_ok;
3498}
3499
3500static void
3501intel_dp_handle_hdmi_link_status_change(struct intel_dp *intel_dp)
3502{
3503	bool is_active;
3504	u8 buf = 0;
3505
3506	is_active = drm_dp_pcon_hdmi_link_active(&intel_dp->aux);
3507	if (intel_dp->frl.is_trained && !is_active) {
3508		if (drm_dp_dpcd_readb(&intel_dp->aux, DP_PCON_HDMI_LINK_CONFIG_1, &buf) < 0)
3509			return;
3510
3511		buf &=  ~DP_PCON_ENABLE_HDMI_LINK;
3512		if (drm_dp_dpcd_writeb(&intel_dp->aux, DP_PCON_HDMI_LINK_CONFIG_1, buf) < 0)
3513			return;
3514
3515		drm_dp_pcon_hdmi_frl_link_error_count(&intel_dp->aux, &intel_dp->attached_connector->base);
3516
3517		/* Restart FRL training or fall back to TMDS mode */
3518		intel_dp_check_frl_training(intel_dp);
3519	}
3520}
3521
3522static bool
3523intel_dp_needs_link_retrain(struct intel_dp *intel_dp)
3524{
3525	u8 link_status[DP_LINK_STATUS_SIZE];
3526
3527	if (!intel_dp->link_trained)
3528		return false;
3529
3530	/*
3531	 * While PSR source HW is enabled, it will control main-link sending
3532	 * frames, enabling and disabling it so trying to do a retrain will fail
3533	 * as the link would or not be on or it could mix training patterns
3534	 * and frame data at the same time causing retrain to fail.
3535	 * Also when exiting PSR, HW will retrain the link anyways fixing
3536	 * any link status error.
3537	 */
3538	if (intel_psr_enabled(intel_dp))
3539		return false;
3540
3541	if (drm_dp_dpcd_read_phy_link_status(&intel_dp->aux, DP_PHY_DPRX,
3542					     link_status) < 0)
3543		return false;
3544
3545	/*
3546	 * Validate the cached values of intel_dp->link_rate and
3547	 * intel_dp->lane_count before attempting to retrain.
3548	 *
3549	 * FIXME would be nice to user the crtc state here, but since
3550	 * we need to call this from the short HPD handler that seems
3551	 * a bit hard.
3552	 */
3553	if (!intel_dp_link_params_valid(intel_dp, intel_dp->link_rate,
3554					intel_dp->lane_count))
3555		return false;
3556
3557	/* Retrain if Channel EQ or CR not ok */
3558	return !drm_dp_channel_eq_ok(link_status, intel_dp->lane_count);
3559}
3560
3561static bool intel_dp_has_connector(struct intel_dp *intel_dp,
3562				   const struct drm_connector_state *conn_state)
3563{
3564	struct drm_i915_private *i915 = dp_to_i915(intel_dp);
3565	struct intel_encoder *encoder;
3566	enum pipe pipe;
3567
3568	if (!conn_state->best_encoder)
3569		return false;
3570
3571	/* SST */
3572	encoder = &dp_to_dig_port(intel_dp)->base;
3573	if (conn_state->best_encoder == &encoder->base)
3574		return true;
3575
3576	/* MST */
3577	for_each_pipe(i915, pipe) {
3578		encoder = &intel_dp->mst_encoders[pipe]->base;
3579		if (conn_state->best_encoder == &encoder->base)
3580			return true;
3581	}
3582
3583	return false;
3584}
3585
3586static int intel_dp_prep_link_retrain(struct intel_dp *intel_dp,
3587				      struct drm_modeset_acquire_ctx *ctx,
3588				      u32 *crtc_mask)
3589{
3590	struct drm_i915_private *i915 = dp_to_i915(intel_dp);
3591	struct drm_connector_list_iter conn_iter;
3592	struct intel_connector *connector;
3593	int ret = 0;
3594
3595	*crtc_mask = 0;
3596
3597	if (!intel_dp_needs_link_retrain(intel_dp))
3598		return 0;
3599
3600	drm_connector_list_iter_begin(&i915->drm, &conn_iter);
3601	for_each_intel_connector_iter(connector, &conn_iter) {
3602		struct drm_connector_state *conn_state =
3603			connector->base.state;
3604		struct intel_crtc_state *crtc_state;
3605		struct intel_crtc *crtc;
3606
3607		if (!intel_dp_has_connector(intel_dp, conn_state))
3608			continue;
3609
3610		crtc = to_intel_crtc(conn_state->crtc);
3611		if (!crtc)
3612			continue;
3613
3614		ret = drm_modeset_lock(&crtc->base.mutex, ctx);
3615		if (ret)
3616			break;
3617
3618		crtc_state = to_intel_crtc_state(crtc->base.state);
3619
3620		drm_WARN_ON(&i915->drm, !intel_crtc_has_dp_encoder(crtc_state));
3621
3622		if (!crtc_state->hw.active)
3623			continue;
3624
3625		if (conn_state->commit &&
3626		    !try_wait_for_completion(&conn_state->commit->hw_done))
3627			continue;
3628
3629		*crtc_mask |= drm_crtc_mask(&crtc->base);
3630	}
3631	drm_connector_list_iter_end(&conn_iter);
3632
3633	if (!intel_dp_needs_link_retrain(intel_dp))
3634		*crtc_mask = 0;
3635
3636	return ret;
3637}
3638
3639static bool intel_dp_is_connected(struct intel_dp *intel_dp)
3640{
3641	struct intel_connector *connector = intel_dp->attached_connector;
3642
3643	return connector->base.status == connector_status_connected ||
3644		intel_dp->is_mst;
3645}
3646
3647int intel_dp_retrain_link(struct intel_encoder *encoder,
3648			  struct drm_modeset_acquire_ctx *ctx)
3649{
3650	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
3651	struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
 
 
 
3652	struct intel_crtc *crtc;
3653	u32 crtc_mask;
3654	int ret;
3655
3656	if (!intel_dp_is_connected(intel_dp))
 
 
3657		return 0;
3658
3659	ret = drm_modeset_lock(&dev_priv->drm.mode_config.connection_mutex,
3660			       ctx);
3661	if (ret)
3662		return ret;
3663
3664	ret = intel_dp_prep_link_retrain(intel_dp, ctx, &crtc_mask);
3665	if (ret)
3666		return ret;
3667
3668	if (crtc_mask == 0)
 
3669		return 0;
3670
3671	drm_dbg_kms(&dev_priv->drm, "[ENCODER:%d:%s] retraining link\n",
3672		    encoder->base.base.id, encoder->base.name);
3673
3674	for_each_intel_crtc_mask(&dev_priv->drm, crtc, crtc_mask) {
3675		const struct intel_crtc_state *crtc_state =
3676			to_intel_crtc_state(crtc->base.state);
3677
3678		/* Suppress underruns caused by re-training */
3679		intel_set_cpu_fifo_underrun_reporting(dev_priv, crtc->pipe, false);
3680		if (crtc_state->has_pch_encoder)
3681			intel_set_pch_fifo_underrun_reporting(dev_priv,
3682							      intel_crtc_pch_transcoder(crtc), false);
3683	}
3684
3685	for_each_intel_crtc_mask(&dev_priv->drm, crtc, crtc_mask) {
3686		const struct intel_crtc_state *crtc_state =
3687			to_intel_crtc_state(crtc->base.state);
3688
3689		/* retrain on the MST master transcoder */
3690		if (DISPLAY_VER(dev_priv) >= 12 &&
3691		    intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DP_MST) &&
3692		    !intel_dp_mst_is_master_trans(crtc_state))
3693			continue;
3694
3695		intel_dp_check_frl_training(intel_dp);
3696		intel_dp_pcon_dsc_configure(intel_dp, crtc_state);
3697		intel_dp_start_link_train(intel_dp, crtc_state);
3698		intel_dp_stop_link_train(intel_dp, crtc_state);
3699		break;
3700	}
3701
3702	for_each_intel_crtc_mask(&dev_priv->drm, crtc, crtc_mask) {
3703		const struct intel_crtc_state *crtc_state =
3704			to_intel_crtc_state(crtc->base.state);
3705
3706		/* Keep underrun reporting disabled until things are stable */
3707		intel_wait_for_vblank(dev_priv, crtc->pipe);
3708
3709		intel_set_cpu_fifo_underrun_reporting(dev_priv, crtc->pipe, true);
3710		if (crtc_state->has_pch_encoder)
3711			intel_set_pch_fifo_underrun_reporting(dev_priv,
3712							      intel_crtc_pch_transcoder(crtc), true);
3713	}
3714
3715	return 0;
3716}
3717
3718static int intel_dp_prep_phy_test(struct intel_dp *intel_dp,
3719				  struct drm_modeset_acquire_ctx *ctx,
3720				  u32 *crtc_mask)
3721{
3722	struct drm_i915_private *i915 = dp_to_i915(intel_dp);
3723	struct drm_connector_list_iter conn_iter;
3724	struct intel_connector *connector;
3725	int ret = 0;
3726
3727	*crtc_mask = 0;
3728
3729	drm_connector_list_iter_begin(&i915->drm, &conn_iter);
3730	for_each_intel_connector_iter(connector, &conn_iter) {
3731		struct drm_connector_state *conn_state =
3732			connector->base.state;
3733		struct intel_crtc_state *crtc_state;
3734		struct intel_crtc *crtc;
3735
3736		if (!intel_dp_has_connector(intel_dp, conn_state))
3737			continue;
3738
3739		crtc = to_intel_crtc(conn_state->crtc);
3740		if (!crtc)
3741			continue;
3742
3743		ret = drm_modeset_lock(&crtc->base.mutex, ctx);
3744		if (ret)
3745			break;
3746
3747		crtc_state = to_intel_crtc_state(crtc->base.state);
3748
3749		drm_WARN_ON(&i915->drm, !intel_crtc_has_dp_encoder(crtc_state));
3750
3751		if (!crtc_state->hw.active)
3752			continue;
3753
3754		if (conn_state->commit &&
3755		    !try_wait_for_completion(&conn_state->commit->hw_done))
3756			continue;
3757
3758		*crtc_mask |= drm_crtc_mask(&crtc->base);
3759	}
3760	drm_connector_list_iter_end(&conn_iter);
3761
3762	return ret;
3763}
3764
3765static int intel_dp_do_phy_test(struct intel_encoder *encoder,
3766				struct drm_modeset_acquire_ctx *ctx)
3767{
3768	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
3769	struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
3770	struct intel_crtc *crtc;
3771	u32 crtc_mask;
3772	int ret;
3773
3774	ret = drm_modeset_lock(&dev_priv->drm.mode_config.connection_mutex,
3775			       ctx);
3776	if (ret)
3777		return ret;
3778
3779	ret = intel_dp_prep_phy_test(intel_dp, ctx, &crtc_mask);
3780	if (ret)
3781		return ret;
3782
3783	if (crtc_mask == 0)
3784		return 0;
3785
3786	drm_dbg_kms(&dev_priv->drm, "[ENCODER:%d:%s] PHY test\n",
3787		    encoder->base.base.id, encoder->base.name);
 
3788
3789	for_each_intel_crtc_mask(&dev_priv->drm, crtc, crtc_mask) {
3790		const struct intel_crtc_state *crtc_state =
3791			to_intel_crtc_state(crtc->base.state);
3792
3793		/* test on the MST master transcoder */
3794		if (DISPLAY_VER(dev_priv) >= 12 &&
3795		    intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DP_MST) &&
3796		    !intel_dp_mst_is_master_trans(crtc_state))
3797			continue;
3798
3799		intel_dp_process_phy_request(intel_dp, crtc_state);
3800		break;
3801	}
 
 
 
 
 
 
 
 
 
 
 
 
 
3802
3803	return 0;
3804}
3805
3806void intel_dp_phy_test(struct intel_encoder *encoder)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3807{
3808	struct drm_modeset_acquire_ctx ctx;
 
3809	int ret;
3810
 
 
3811	drm_modeset_acquire_init(&ctx, 0);
3812
3813	for (;;) {
3814		ret = intel_dp_do_phy_test(encoder, &ctx);
3815
3816		if (ret == -EDEADLK) {
3817			drm_modeset_backoff(&ctx);
3818			continue;
3819		}
3820
3821		break;
3822	}
3823
3824	drm_modeset_drop_locks(&ctx);
3825	drm_modeset_acquire_fini(&ctx);
3826	drm_WARN(encoder->base.dev, ret,
3827		 "Acquiring modeset locks failed with %i\n", ret);
 
 
 
 
 
 
 
 
3828}
3829
3830static void intel_dp_check_device_service_irq(struct intel_dp *intel_dp)
3831{
3832	struct drm_i915_private *i915 = dp_to_i915(intel_dp);
3833	u8 val;
3834
3835	if (intel_dp->dpcd[DP_DPCD_REV] < 0x11)
3836		return;
3837
3838	if (drm_dp_dpcd_readb(&intel_dp->aux,
3839			      DP_DEVICE_SERVICE_IRQ_VECTOR, &val) != 1 || !val)
3840		return;
3841
3842	drm_dp_dpcd_writeb(&intel_dp->aux, DP_DEVICE_SERVICE_IRQ_VECTOR, val);
3843
3844	if (val & DP_AUTOMATED_TEST_REQUEST)
3845		intel_dp_handle_test_request(intel_dp);
3846
3847	if (val & DP_CP_IRQ)
3848		intel_hdcp_handle_cp_irq(intel_dp->attached_connector);
3849
3850	if (val & DP_SINK_SPECIFIC_IRQ)
3851		drm_dbg_kms(&i915->drm, "Sink specific irq unhandled\n");
3852}
3853
3854static void intel_dp_check_link_service_irq(struct intel_dp *intel_dp)
3855{
3856	u8 val;
3857
3858	if (intel_dp->dpcd[DP_DPCD_REV] < 0x11)
3859		return;
3860
3861	if (drm_dp_dpcd_readb(&intel_dp->aux,
3862			      DP_LINK_SERVICE_IRQ_VECTOR_ESI0, &val) != 1 || !val)
3863		return;
3864
3865	if (drm_dp_dpcd_writeb(&intel_dp->aux,
3866			       DP_LINK_SERVICE_IRQ_VECTOR_ESI0, val) != 1)
3867		return;
3868
3869	if (val & HDMI_LINK_STATUS_CHANGED)
3870		intel_dp_handle_hdmi_link_status_change(intel_dp);
3871}
3872
3873/*
3874 * According to DP spec
3875 * 5.1.2:
3876 *  1. Read DPCD
3877 *  2. Configure link according to Receiver Capabilities
3878 *  3. Use Link Training from 2.5.3.3 and 3.5.1.3
3879 *  4. Check link status on receipt of hot-plug interrupt
3880 *
3881 * intel_dp_short_pulse -  handles short pulse interrupts
3882 * when full detection is not required.
3883 * Returns %true if short pulse is handled and full detection
3884 * is NOT required and %false otherwise.
3885 */
3886static bool
3887intel_dp_short_pulse(struct intel_dp *intel_dp)
3888{
3889	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
3890	u8 old_sink_count = intel_dp->sink_count;
3891	bool ret;
3892
3893	/*
3894	 * Clearing compliance test variables to allow capturing
3895	 * of values for next automated test request.
3896	 */
3897	memset(&intel_dp->compliance, 0, sizeof(intel_dp->compliance));
3898
3899	/*
3900	 * Now read the DPCD to see if it's actually running
3901	 * If the current value of sink count doesn't match with
3902	 * the value that was stored earlier or dpcd read failed
3903	 * we need to do full detection
3904	 */
3905	ret = intel_dp_get_dpcd(intel_dp);
3906
3907	if ((old_sink_count != intel_dp->sink_count) || !ret) {
3908		/* No need to proceed if we are going to do full detect */
3909		return false;
3910	}
3911
3912	intel_dp_check_device_service_irq(intel_dp);
3913	intel_dp_check_link_service_irq(intel_dp);
3914
3915	/* Handle CEC interrupts, if any */
3916	drm_dp_cec_irq(&intel_dp->aux);
3917
3918	/* defer to the hotplug work for link retraining if needed */
3919	if (intel_dp_needs_link_retrain(intel_dp))
3920		return false;
3921
3922	intel_psr_short_pulse(intel_dp);
3923
3924	switch (intel_dp->compliance.test_type) {
3925	case DP_TEST_LINK_TRAINING:
3926		drm_dbg_kms(&dev_priv->drm,
3927			    "Link Training Compliance Test requested\n");
3928		/* Send a Hotplug Uevent to userspace to start modeset */
3929		drm_kms_helper_hotplug_event(&dev_priv->drm);
3930		break;
3931	case DP_TEST_LINK_PHY_TEST_PATTERN:
3932		drm_dbg_kms(&dev_priv->drm,
3933			    "PHY test pattern Compliance Test requested\n");
3934		/*
3935		 * Schedule long hpd to do the test
3936		 *
3937		 * FIXME get rid of the ad-hoc phy test modeset code
3938		 * and properly incorporate it into the normal modeset.
3939		 */
3940		return false;
3941	}
3942
3943	return true;
3944}
3945
3946/* XXX this is probably wrong for multiple downstream ports */
3947static enum drm_connector_status
3948intel_dp_detect_dpcd(struct intel_dp *intel_dp)
3949{
3950	struct drm_i915_private *i915 = dp_to_i915(intel_dp);
3951	struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
3952	u8 *dpcd = intel_dp->dpcd;
3953	u8 type;
3954
3955	if (drm_WARN_ON(&i915->drm, intel_dp_is_edp(intel_dp)))
3956		return connector_status_connected;
3957
3958	lspcon_resume(dig_port);
 
3959
3960	if (!intel_dp_get_dpcd(intel_dp))
3961		return connector_status_disconnected;
3962
3963	/* if there's no downstream port, we're done */
3964	if (!drm_dp_is_branch(dpcd))
3965		return connector_status_connected;
3966
3967	/* If we're HPD-aware, SINK_COUNT changes dynamically */
3968	if (intel_dp_has_sink_count(intel_dp) &&
3969	    intel_dp->downstream_ports[0] & DP_DS_PORT_HPD) {
 
3970		return intel_dp->sink_count ?
3971		connector_status_connected : connector_status_disconnected;
3972	}
3973
3974	if (intel_dp_can_mst(intel_dp))
3975		return connector_status_connected;
3976
3977	/* If no HPD, poke DDC gently */
3978	if (drm_probe_ddc(&intel_dp->aux.ddc))
3979		return connector_status_connected;
3980
3981	/* Well we tried, say unknown for unreliable port types */
3982	if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11) {
3983		type = intel_dp->downstream_ports[0] & DP_DS_PORT_TYPE_MASK;
3984		if (type == DP_DS_PORT_TYPE_VGA ||
3985		    type == DP_DS_PORT_TYPE_NON_EDID)
3986			return connector_status_unknown;
3987	} else {
3988		type = intel_dp->dpcd[DP_DOWNSTREAMPORT_PRESENT] &
3989			DP_DWN_STRM_PORT_TYPE_MASK;
3990		if (type == DP_DWN_STRM_PORT_TYPE_ANALOG ||
3991		    type == DP_DWN_STRM_PORT_TYPE_OTHER)
3992			return connector_status_unknown;
3993	}
3994
3995	/* Anything else is out of spec, warn and ignore */
3996	drm_dbg_kms(&i915->drm, "Broken DP branch device, ignoring\n");
3997	return connector_status_disconnected;
3998}
3999
4000static enum drm_connector_status
4001edp_detect(struct intel_dp *intel_dp)
4002{
4003	return connector_status_connected;
4004}
4005
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4006/*
4007 * intel_digital_port_connected - is the specified port connected?
4008 * @encoder: intel_encoder
4009 *
4010 * In cases where there's a connector physically connected but it can't be used
4011 * by our hardware we also return false, since the rest of the driver should
4012 * pretty much treat the port as disconnected. This is relevant for type-C
4013 * (starting on ICL) where there's ownership involved.
4014 *
4015 * Return %true if port is connected, %false otherwise.
4016 */
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4017bool intel_digital_port_connected(struct intel_encoder *encoder)
4018{
4019	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
4020	struct intel_digital_port *dig_port = enc_to_dig_port(encoder);
4021	bool is_connected = false;
4022	intel_wakeref_t wakeref;
4023
4024	with_intel_display_power(dev_priv, POWER_DOMAIN_DISPLAY_CORE, wakeref)
4025		is_connected = dig_port->connected(encoder);
4026
4027	return is_connected;
4028}
4029
4030static struct edid *
4031intel_dp_get_edid(struct intel_dp *intel_dp)
4032{
4033	struct intel_connector *intel_connector = intel_dp->attached_connector;
4034
4035	/* use cached edid if we have one */
4036	if (intel_connector->edid) {
4037		/* invalid edid */
4038		if (IS_ERR(intel_connector->edid))
4039			return NULL;
4040
4041		return drm_edid_duplicate(intel_connector->edid);
4042	} else
4043		return drm_get_edid(&intel_connector->base,
4044				    &intel_dp->aux.ddc);
4045}
4046
4047static void
4048intel_dp_update_dfp(struct intel_dp *intel_dp,
4049		    const struct edid *edid)
4050{
4051	struct drm_i915_private *i915 = dp_to_i915(intel_dp);
4052	struct intel_connector *connector = intel_dp->attached_connector;
4053
4054	intel_dp->dfp.max_bpc =
4055		drm_dp_downstream_max_bpc(intel_dp->dpcd,
4056					  intel_dp->downstream_ports, edid);
4057
4058	intel_dp->dfp.max_dotclock =
4059		drm_dp_downstream_max_dotclock(intel_dp->dpcd,
4060					       intel_dp->downstream_ports);
4061
4062	intel_dp->dfp.min_tmds_clock =
4063		drm_dp_downstream_min_tmds_clock(intel_dp->dpcd,
4064						 intel_dp->downstream_ports,
4065						 edid);
4066	intel_dp->dfp.max_tmds_clock =
4067		drm_dp_downstream_max_tmds_clock(intel_dp->dpcd,
4068						 intel_dp->downstream_ports,
4069						 edid);
4070
4071	intel_dp->dfp.pcon_max_frl_bw =
4072		drm_dp_get_pcon_max_frl_bw(intel_dp->dpcd,
4073					   intel_dp->downstream_ports);
4074
4075	drm_dbg_kms(&i915->drm,
4076		    "[CONNECTOR:%d:%s] DFP max bpc %d, max dotclock %d, TMDS clock %d-%d, PCON Max FRL BW %dGbps\n",
4077		    connector->base.base.id, connector->base.name,
4078		    intel_dp->dfp.max_bpc,
4079		    intel_dp->dfp.max_dotclock,
4080		    intel_dp->dfp.min_tmds_clock,
4081		    intel_dp->dfp.max_tmds_clock,
4082		    intel_dp->dfp.pcon_max_frl_bw);
4083
4084	intel_dp_get_pcon_dsc_cap(intel_dp);
4085}
4086
4087static void
4088intel_dp_update_420(struct intel_dp *intel_dp)
4089{
4090	struct drm_i915_private *i915 = dp_to_i915(intel_dp);
4091	struct intel_connector *connector = intel_dp->attached_connector;
4092	bool is_branch, ycbcr_420_passthrough, ycbcr_444_to_420, rgb_to_ycbcr;
4093
4094	/* No YCbCr output support on gmch platforms */
4095	if (HAS_GMCH(i915))
4096		return;
4097
4098	/*
4099	 * ILK doesn't seem capable of DP YCbCr output. The
4100	 * displayed image is severly corrupted. SNB+ is fine.
4101	 */
4102	if (IS_IRONLAKE(i915))
4103		return;
4104
4105	is_branch = drm_dp_is_branch(intel_dp->dpcd);
4106	ycbcr_420_passthrough =
4107		drm_dp_downstream_420_passthrough(intel_dp->dpcd,
4108						  intel_dp->downstream_ports);
4109	/* on-board LSPCON always assumed to support 4:4:4->4:2:0 conversion */
4110	ycbcr_444_to_420 =
4111		dp_to_dig_port(intel_dp)->lspcon.active ||
4112		drm_dp_downstream_444_to_420_conversion(intel_dp->dpcd,
4113							intel_dp->downstream_ports);
4114	rgb_to_ycbcr = drm_dp_downstream_rgb_to_ycbcr_conversion(intel_dp->dpcd,
4115								 intel_dp->downstream_ports,
4116								 DP_DS_HDMI_BT601_RGB_YCBCR_CONV |
4117								 DP_DS_HDMI_BT709_RGB_YCBCR_CONV |
4118								 DP_DS_HDMI_BT2020_RGB_YCBCR_CONV);
4119
4120	if (DISPLAY_VER(i915) >= 11) {
4121		/* Let PCON convert from RGB->YCbCr if possible */
4122		if (is_branch && rgb_to_ycbcr && ycbcr_444_to_420) {
4123			intel_dp->dfp.rgb_to_ycbcr = true;
4124			intel_dp->dfp.ycbcr_444_to_420 = true;
4125			connector->base.ycbcr_420_allowed = true;
4126		} else {
4127		/* Prefer 4:2:0 passthrough over 4:4:4->4:2:0 conversion */
4128			intel_dp->dfp.ycbcr_444_to_420 =
4129				ycbcr_444_to_420 && !ycbcr_420_passthrough;
4130
4131			connector->base.ycbcr_420_allowed =
4132				!is_branch || ycbcr_444_to_420 || ycbcr_420_passthrough;
4133		}
4134	} else {
4135		/* 4:4:4->4:2:0 conversion is the only way */
4136		intel_dp->dfp.ycbcr_444_to_420 = ycbcr_444_to_420;
4137
4138		connector->base.ycbcr_420_allowed = ycbcr_444_to_420;
4139	}
4140
4141	drm_dbg_kms(&i915->drm,
4142		    "[CONNECTOR:%d:%s] RGB->YcbCr conversion? %s, YCbCr 4:2:0 allowed? %s, YCbCr 4:4:4->4:2:0 conversion? %s\n",
4143		    connector->base.base.id, connector->base.name,
4144		    yesno(intel_dp->dfp.rgb_to_ycbcr),
4145		    yesno(connector->base.ycbcr_420_allowed),
4146		    yesno(intel_dp->dfp.ycbcr_444_to_420));
4147}
4148
4149static void
4150intel_dp_set_edid(struct intel_dp *intel_dp)
4151{
4152	struct intel_connector *connector = intel_dp->attached_connector;
4153	struct edid *edid;
4154
4155	intel_dp_unset_edid(intel_dp);
4156	edid = intel_dp_get_edid(intel_dp);
4157	connector->detect_edid = edid;
4158
4159	intel_dp_update_dfp(intel_dp, edid);
4160	intel_dp_update_420(intel_dp);
4161
4162	if (edid && edid->input & DRM_EDID_INPUT_DIGITAL) {
4163		intel_dp->has_hdmi_sink = drm_detect_hdmi_monitor(edid);
4164		intel_dp->has_audio = drm_detect_monitor_audio(edid);
4165	}
4166
 
4167	drm_dp_cec_set_edid(&intel_dp->aux, edid);
4168}
4169
4170static void
4171intel_dp_unset_edid(struct intel_dp *intel_dp)
4172{
4173	struct intel_connector *connector = intel_dp->attached_connector;
4174
4175	drm_dp_cec_unset_edid(&intel_dp->aux);
4176	kfree(connector->detect_edid);
4177	connector->detect_edid = NULL;
4178
4179	intel_dp->has_hdmi_sink = false;
4180	intel_dp->has_audio = false;
4181
4182	intel_dp->dfp.max_bpc = 0;
4183	intel_dp->dfp.max_dotclock = 0;
4184	intel_dp->dfp.min_tmds_clock = 0;
4185	intel_dp->dfp.max_tmds_clock = 0;
4186
4187	intel_dp->dfp.pcon_max_frl_bw = 0;
4188
4189	intel_dp->dfp.ycbcr_444_to_420 = false;
4190	connector->base.ycbcr_420_allowed = false;
4191}
4192
4193static int
4194intel_dp_detect(struct drm_connector *connector,
4195		struct drm_modeset_acquire_ctx *ctx,
4196		bool force)
4197{
4198	struct drm_i915_private *dev_priv = to_i915(connector->dev);
4199	struct intel_dp *intel_dp = intel_attached_dp(to_intel_connector(connector));
4200	struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
4201	struct intel_encoder *encoder = &dig_port->base;
4202	enum drm_connector_status status;
4203
4204	drm_dbg_kms(&dev_priv->drm, "[CONNECTOR:%d:%s]\n",
4205		    connector->base.id, connector->name);
4206	drm_WARN_ON(&dev_priv->drm,
4207		    !drm_modeset_is_locked(&dev_priv->drm.mode_config.connection_mutex));
4208
4209	if (!INTEL_DISPLAY_ENABLED(dev_priv))
4210		return connector_status_disconnected;
4211
4212	/* Can't disconnect eDP */
4213	if (intel_dp_is_edp(intel_dp))
4214		status = edp_detect(intel_dp);
4215	else if (intel_digital_port_connected(encoder))
4216		status = intel_dp_detect_dpcd(intel_dp);
4217	else
4218		status = connector_status_disconnected;
4219
4220	if (status == connector_status_disconnected) {
4221		memset(&intel_dp->compliance, 0, sizeof(intel_dp->compliance));
4222		memset(intel_dp->dsc_dpcd, 0, sizeof(intel_dp->dsc_dpcd));
4223
4224		if (intel_dp->is_mst) {
4225			drm_dbg_kms(&dev_priv->drm,
4226				    "MST device may have disappeared %d vs %d\n",
4227				    intel_dp->is_mst,
4228				    intel_dp->mst_mgr.mst_state);
4229			intel_dp->is_mst = false;
4230			drm_dp_mst_topology_mgr_set_mst(&intel_dp->mst_mgr,
4231							intel_dp->is_mst);
4232		}
4233
4234		goto out;
4235	}
4236
4237	/* Read DP Sink DSC Cap DPCD regs for DP v1.4 */
4238	if (DISPLAY_VER(dev_priv) >= 11)
4239		intel_dp_get_dsc_sink_cap(intel_dp);
4240
4241	intel_dp_configure_mst(intel_dp);
4242
4243	/*
4244	 * TODO: Reset link params when switching to MST mode, until MST
4245	 * supports link training fallback params.
4246	 */
4247	if (intel_dp->reset_link_params || intel_dp->is_mst) {
4248		/* Initial max link lane count */
4249		intel_dp->max_link_lane_count = intel_dp_max_common_lane_count(intel_dp);
4250
4251		/* Initial max link rate */
4252		intel_dp->max_link_rate = intel_dp_max_common_rate(intel_dp);
4253
4254		intel_dp->reset_link_params = false;
4255	}
4256
4257	intel_dp_print_rates(intel_dp);
4258
 
 
 
 
 
 
4259	if (intel_dp->is_mst) {
4260		/*
4261		 * If we are in MST mode then this connector
4262		 * won't appear connected or have anything
4263		 * with EDID on it
4264		 */
4265		status = connector_status_disconnected;
4266		goto out;
4267	}
4268
4269	/*
4270	 * Some external monitors do not signal loss of link synchronization
4271	 * with an IRQ_HPD, so force a link status check.
4272	 */
4273	if (!intel_dp_is_edp(intel_dp)) {
4274		int ret;
4275
4276		ret = intel_dp_retrain_link(encoder, ctx);
4277		if (ret)
4278			return ret;
4279	}
4280
4281	/*
4282	 * Clearing NACK and defer counts to get their exact values
4283	 * while reading EDID which are required by Compliance tests
4284	 * 4.2.2.4 and 4.2.2.5
4285	 */
4286	intel_dp->aux.i2c_nack_count = 0;
4287	intel_dp->aux.i2c_defer_count = 0;
4288
4289	intel_dp_set_edid(intel_dp);
4290	if (intel_dp_is_edp(intel_dp) ||
4291	    to_intel_connector(connector)->detect_edid)
4292		status = connector_status_connected;
4293
4294	intel_dp_check_device_service_irq(intel_dp);
4295
4296out:
4297	if (status != connector_status_connected && !intel_dp->is_mst)
4298		intel_dp_unset_edid(intel_dp);
4299
4300	/*
4301	 * Make sure the refs for power wells enabled during detect are
4302	 * dropped to avoid a new detect cycle triggered by HPD polling.
4303	 */
4304	intel_display_power_flush_work(dev_priv);
4305
4306	if (!intel_dp_is_edp(intel_dp))
4307		drm_dp_set_subconnector_property(connector,
4308						 status,
4309						 intel_dp->dpcd,
4310						 intel_dp->downstream_ports);
4311	return status;
4312}
4313
4314static void
4315intel_dp_force(struct drm_connector *connector)
4316{
4317	struct intel_dp *intel_dp = intel_attached_dp(to_intel_connector(connector));
4318	struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
4319	struct intel_encoder *intel_encoder = &dig_port->base;
4320	struct drm_i915_private *dev_priv = to_i915(intel_encoder->base.dev);
4321	enum intel_display_power_domain aux_domain =
4322		intel_aux_power_domain(dig_port);
4323	intel_wakeref_t wakeref;
4324
4325	drm_dbg_kms(&dev_priv->drm, "[CONNECTOR:%d:%s]\n",
4326		    connector->base.id, connector->name);
4327	intel_dp_unset_edid(intel_dp);
4328
4329	if (connector->status != connector_status_connected)
4330		return;
4331
4332	wakeref = intel_display_power_get(dev_priv, aux_domain);
4333
4334	intel_dp_set_edid(intel_dp);
4335
4336	intel_display_power_put(dev_priv, aux_domain, wakeref);
4337}
4338
4339static int intel_dp_get_modes(struct drm_connector *connector)
4340{
4341	struct intel_connector *intel_connector = to_intel_connector(connector);
4342	struct edid *edid;
4343	int num_modes = 0;
4344
4345	edid = intel_connector->detect_edid;
4346	if (edid) {
4347		num_modes = intel_connector_update_modes(connector, edid);
4348
4349		if (intel_vrr_is_capable(connector))
4350			drm_connector_set_vrr_capable_property(connector,
4351							       true);
4352	}
4353
4354	/* Also add fixed mode, which may or may not be present in EDID */
4355	if (intel_dp_is_edp(intel_attached_dp(intel_connector)) &&
4356	    intel_connector->panel.fixed_mode) {
4357		struct drm_display_mode *mode;
4358
4359		mode = drm_mode_duplicate(connector->dev,
4360					  intel_connector->panel.fixed_mode);
4361		if (mode) {
4362			drm_mode_probed_add(connector, mode);
4363			num_modes++;
4364		}
4365	}
4366
4367	if (num_modes)
4368		return num_modes;
4369
4370	if (!edid) {
4371		struct intel_dp *intel_dp = intel_attached_dp(intel_connector);
4372		struct drm_display_mode *mode;
4373
4374		mode = drm_dp_downstream_mode(connector->dev,
4375					      intel_dp->dpcd,
4376					      intel_dp->downstream_ports);
4377		if (mode) {
4378			drm_mode_probed_add(connector, mode);
4379			num_modes++;
4380		}
4381	}
4382
4383	return num_modes;
4384}
4385
4386static int
4387intel_dp_connector_register(struct drm_connector *connector)
4388{
4389	struct drm_i915_private *i915 = to_i915(connector->dev);
4390	struct intel_dp *intel_dp = intel_attached_dp(to_intel_connector(connector));
4391	struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
4392	struct intel_lspcon *lspcon = &dig_port->lspcon;
4393	int ret;
4394
4395	ret = intel_connector_register(connector);
4396	if (ret)
4397		return ret;
4398
4399	drm_dbg_kms(&i915->drm, "registering %s bus for %s\n",
4400		    intel_dp->aux.name, connector->kdev->kobj.name);
 
 
4401
4402	intel_dp->aux.dev = connector->kdev;
4403	ret = drm_dp_aux_register(&intel_dp->aux);
4404	if (!ret)
4405		drm_dp_cec_register_connector(&intel_dp->aux, connector);
4406
4407	if (!intel_bios_is_lspcon_present(i915, dig_port->base.port))
4408		return ret;
4409
4410	/*
4411	 * ToDo: Clean this up to handle lspcon init and resume more
4412	 * efficiently and streamlined.
4413	 */
4414	if (lspcon_init(dig_port)) {
4415		lspcon_detect_hdr_capability(lspcon);
4416		if (lspcon->hdr_supported)
4417			drm_object_attach_property(&connector->base,
4418						   connector->dev->mode_config.hdr_output_metadata_property,
4419						   0);
4420	}
4421
4422	return ret;
4423}
4424
4425static void
4426intel_dp_connector_unregister(struct drm_connector *connector)
4427{
4428	struct intel_dp *intel_dp = intel_attached_dp(to_intel_connector(connector));
4429
4430	drm_dp_cec_unregister_connector(&intel_dp->aux);
4431	drm_dp_aux_unregister(&intel_dp->aux);
4432	intel_connector_unregister(connector);
4433}
4434
4435void intel_dp_encoder_flush_work(struct drm_encoder *encoder)
4436{
4437	struct intel_digital_port *dig_port = enc_to_dig_port(to_intel_encoder(encoder));
4438	struct intel_dp *intel_dp = &dig_port->dp;
4439
4440	intel_dp_mst_encoder_cleanup(dig_port);
 
 
 
 
 
 
 
 
 
 
4441
4442	intel_pps_vdd_off_sync(intel_dp);
 
 
 
 
4443
4444	intel_dp_aux_fini(intel_dp);
4445}
4446
 
 
 
 
 
 
 
 
4447void intel_dp_encoder_suspend(struct intel_encoder *intel_encoder)
4448{
4449	struct intel_dp *intel_dp = enc_to_intel_dp(intel_encoder);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4450
4451	intel_pps_vdd_off_sync(intel_dp);
 
 
 
 
 
 
 
 
 
 
4452}
4453
4454void intel_dp_encoder_shutdown(struct intel_encoder *intel_encoder)
 
4455{
4456	struct intel_dp *intel_dp = enc_to_intel_dp(intel_encoder);
 
 
 
 
 
 
 
 
 
 
 
 
 
4457
4458	intel_pps_wait_power_cycle(intel_dp);
 
 
 
 
 
 
 
 
 
 
 
 
 
4459}
4460
4461static int intel_modeset_tile_group(struct intel_atomic_state *state,
4462				    int tile_group_id)
 
4463{
4464	struct drm_i915_private *dev_priv = to_i915(state->base.dev);
4465	struct drm_connector_list_iter conn_iter;
4466	struct drm_connector *connector;
4467	int ret = 0;
4468
4469	drm_connector_list_iter_begin(&dev_priv->drm, &conn_iter);
4470	drm_for_each_connector_iter(connector, &conn_iter) {
4471		struct drm_connector_state *conn_state;
4472		struct intel_crtc_state *crtc_state;
4473		struct intel_crtc *crtc;
4474
4475		if (!connector->has_tile ||
4476		    connector->tile_group->id != tile_group_id)
4477			continue;
4478
4479		conn_state = drm_atomic_get_connector_state(&state->base,
4480							    connector);
4481		if (IS_ERR(conn_state)) {
4482			ret = PTR_ERR(conn_state);
4483			break;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4484		}
 
 
 
4485
4486		crtc = to_intel_crtc(conn_state->crtc);
 
 
 
 
4487
4488		if (!crtc)
4489			continue;
4490
4491		crtc_state = intel_atomic_get_new_crtc_state(state, crtc);
4492		crtc_state->uapi.mode_changed = true;
 
 
 
 
 
 
 
4493
4494		ret = drm_atomic_add_affected_planes(&state->base, &crtc->base);
4495		if (ret)
4496			break;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4497	}
4498	drm_connector_list_iter_end(&conn_iter);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4499
4500	return ret;
4501}
4502
4503static int intel_modeset_affected_transcoders(struct intel_atomic_state *state, u8 transcoders)
4504{
4505	struct drm_i915_private *dev_priv = to_i915(state->base.dev);
4506	struct intel_crtc *crtc;
4507
4508	if (transcoders == 0)
4509		return 0;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4510
4511	for_each_intel_crtc(&dev_priv->drm, crtc) {
4512		struct intel_crtc_state *crtc_state;
4513		int ret;
4514
4515		crtc_state = intel_atomic_get_crtc_state(&state->base, crtc);
4516		if (IS_ERR(crtc_state))
4517			return PTR_ERR(crtc_state);
4518
4519		if (!crtc_state->hw.enable)
4520			continue;
 
4521
4522		if (!(transcoders & BIT(crtc_state->cpu_transcoder)))
4523			continue;
4524
4525		crtc_state->uapi.mode_changed = true;
 
 
4526
4527		ret = drm_atomic_add_affected_connectors(&state->base, &crtc->base);
4528		if (ret)
 
4529			return ret;
4530
4531		ret = drm_atomic_add_affected_planes(&state->base, &crtc->base);
4532		if (ret)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4533			return ret;
4534
4535		transcoders &= ~BIT(crtc_state->cpu_transcoder);
4536	}
 
4537
4538	drm_WARN_ON(&dev_priv->drm, transcoders != 0);
 
4539
4540	return 0;
4541}
 
4542
4543static int intel_modeset_synced_crtcs(struct intel_atomic_state *state,
4544				      struct drm_connector *connector)
4545{
4546	const struct drm_connector_state *old_conn_state =
4547		drm_atomic_get_old_connector_state(&state->base, connector);
4548	const struct intel_crtc_state *old_crtc_state;
4549	struct intel_crtc *crtc;
4550	u8 transcoders;
4551
4552	crtc = to_intel_crtc(old_conn_state->crtc);
4553	if (!crtc)
4554		return 0;
 
 
 
4555
4556	old_crtc_state = intel_atomic_get_old_crtc_state(state, crtc);
 
 
 
 
 
 
 
4557
4558	if (!old_crtc_state->hw.active)
4559		return 0;
4560
4561	transcoders = old_crtc_state->sync_mode_slaves_mask;
4562	if (old_crtc_state->master_transcoder != INVALID_TRANSCODER)
4563		transcoders |= BIT(old_crtc_state->master_transcoder);
 
 
 
 
 
 
4564
4565	return intel_modeset_affected_transcoders(state,
4566						  transcoders);
4567}
4568
4569static int intel_dp_connector_atomic_check(struct drm_connector *conn,
4570					   struct drm_atomic_state *_state)
4571{
4572	struct drm_i915_private *dev_priv = to_i915(conn->dev);
4573	struct intel_atomic_state *state = to_intel_atomic_state(_state);
4574	int ret;
4575
4576	ret = intel_digital_connector_atomic_check(conn, &state->base);
4577	if (ret)
4578		return ret;
4579
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4580	/*
4581	 * We don't enable port sync on BDW due to missing w/as and
4582	 * due to not having adjusted the modeset sequence appropriately.
 
 
4583	 */
4584	if (DISPLAY_VER(dev_priv) < 9)
4585		return 0;
4586
4587	if (!intel_connector_needs_modeset(state, conn))
4588		return 0;
4589
4590	if (conn->has_tile) {
4591		ret = intel_modeset_tile_group(state, conn->tile_group->id);
4592		if (ret)
4593			return ret;
4594	}
 
 
 
 
 
 
 
4595
4596	return intel_modeset_synced_crtcs(state, conn);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4597}
4598
4599static const struct drm_connector_funcs intel_dp_connector_funcs = {
4600	.force = intel_dp_force,
4601	.fill_modes = drm_helper_probe_single_connector_modes,
4602	.atomic_get_property = intel_digital_connector_atomic_get_property,
4603	.atomic_set_property = intel_digital_connector_atomic_set_property,
4604	.late_register = intel_dp_connector_register,
4605	.early_unregister = intel_dp_connector_unregister,
4606	.destroy = intel_connector_destroy,
4607	.atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
4608	.atomic_duplicate_state = intel_digital_connector_duplicate_state,
4609};
4610
4611static const struct drm_connector_helper_funcs intel_dp_connector_helper_funcs = {
4612	.detect_ctx = intel_dp_detect,
4613	.get_modes = intel_dp_get_modes,
4614	.mode_valid = intel_dp_mode_valid,
4615	.atomic_check = intel_dp_connector_atomic_check,
 
 
 
 
 
4616};
4617
4618enum irqreturn
4619intel_dp_hpd_pulse(struct intel_digital_port *dig_port, bool long_hpd)
4620{
4621	struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
4622	struct intel_dp *intel_dp = &dig_port->dp;
4623
4624	if (dig_port->base.type == INTEL_OUTPUT_EDP &&
4625	    (long_hpd || !intel_pps_have_power(intel_dp))) {
4626		/*
4627		 * vdd off can generate a long/short pulse on eDP which
4628		 * would require vdd on to handle it, and thus we
4629		 * would end up in an endless cycle of
4630		 * "vdd off -> long/short hpd -> vdd on -> detect -> vdd off -> ..."
4631		 */
4632		drm_dbg_kms(&i915->drm,
4633			    "ignoring %s hpd on eDP [ENCODER:%d:%s]\n",
4634			    long_hpd ? "long" : "short",
4635			    dig_port->base.base.base.id,
4636			    dig_port->base.base.name);
4637		return IRQ_HANDLED;
4638	}
4639
4640	drm_dbg_kms(&i915->drm, "got hpd irq on [ENCODER:%d:%s] - %s\n",
4641		    dig_port->base.base.base.id,
4642		    dig_port->base.base.name,
4643		    long_hpd ? "long" : "short");
4644
4645	if (long_hpd) {
4646		intel_dp->reset_link_params = true;
4647		return IRQ_NONE;
4648	}
4649
4650	if (intel_dp->is_mst) {
4651		if (!intel_dp_check_mst_status(intel_dp))
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4652			return IRQ_NONE;
4653	} else if (!intel_dp_short_pulse(intel_dp)) {
4654		return IRQ_NONE;
4655	}
4656
4657	return IRQ_HANDLED;
4658}
4659
4660/* check the VBT to see whether the eDP is on another port */
4661bool intel_dp_is_port_edp(struct drm_i915_private *dev_priv, enum port port)
4662{
4663	/*
4664	 * eDP not supported on g4x. so bail out early just
4665	 * for a bit extra safety in case the VBT is bonkers.
4666	 */
4667	if (DISPLAY_VER(dev_priv) < 5)
4668		return false;
4669
4670	if (DISPLAY_VER(dev_priv) < 9 && port == PORT_A)
4671		return true;
4672
4673	return intel_bios_is_port_edp(dev_priv, port);
4674}
4675
4676static void
4677intel_dp_add_properties(struct intel_dp *intel_dp, struct drm_connector *connector)
4678{
4679	struct drm_i915_private *dev_priv = to_i915(connector->dev);
4680	enum port port = dp_to_dig_port(intel_dp)->base.port;
4681
4682	if (!intel_dp_is_edp(intel_dp))
4683		drm_connector_attach_dp_subconnector_property(connector);
4684
4685	if (!IS_G4X(dev_priv) && port != PORT_A)
4686		intel_attach_force_audio_property(connector);
4687
4688	intel_attach_broadcast_rgb_property(connector);
4689	if (HAS_GMCH(dev_priv))
4690		drm_connector_attach_max_bpc_property(connector, 6, 10);
4691	else if (DISPLAY_VER(dev_priv) >= 5)
4692		drm_connector_attach_max_bpc_property(connector, 6, 12);
4693
4694	/* Register HDMI colorspace for case of lspcon */
4695	if (intel_bios_is_lspcon_present(dev_priv, port)) {
4696		drm_connector_attach_content_type_property(connector);
4697		intel_attach_hdmi_colorspace_property(connector);
4698	} else {
4699		intel_attach_dp_colorspace_property(connector);
4700	}
4701
4702	if (IS_GEMINILAKE(dev_priv) || DISPLAY_VER(dev_priv) >= 11)
4703		drm_object_attach_property(&connector->base,
4704					   connector->dev->mode_config.hdr_output_metadata_property,
4705					   0);
4706
4707	if (intel_dp_is_edp(intel_dp)) {
4708		u32 allowed_scalers;
4709
4710		allowed_scalers = BIT(DRM_MODE_SCALE_ASPECT) | BIT(DRM_MODE_SCALE_FULLSCREEN);
4711		if (!HAS_GMCH(dev_priv))
4712			allowed_scalers |= BIT(DRM_MODE_SCALE_CENTER);
4713
4714		drm_connector_attach_scaling_mode_property(connector, allowed_scalers);
4715
4716		connector->state->scaling_mode = DRM_MODE_SCALE_ASPECT;
4717
4718	}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4719
4720	if (HAS_VRR(dev_priv))
4721		drm_connector_attach_vrr_capable_property(connector);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4722}
4723
4724/**
4725 * intel_dp_set_drrs_state - program registers for RR switch to take effect
4726 * @dev_priv: i915 device
4727 * @crtc_state: a pointer to the active intel_crtc_state
4728 * @refresh_rate: RR to be programmed
4729 *
4730 * This function gets called when refresh rate (RR) has to be changed from
4731 * one frequency to another. Switches can be between high and low RR
4732 * supported by the panel or to any other RR based on media playback (in
4733 * this case, RR value needs to be passed from user space).
4734 *
4735 * The caller of this function needs to take a lock on dev_priv->drrs.
4736 */
4737static void intel_dp_set_drrs_state(struct drm_i915_private *dev_priv,
4738				    const struct intel_crtc_state *crtc_state,
4739				    int refresh_rate)
4740{
4741	struct intel_dp *intel_dp = dev_priv->drrs.dp;
4742	struct intel_crtc *intel_crtc = to_intel_crtc(crtc_state->uapi.crtc);
4743	enum drrs_refresh_rate_type index = DRRS_HIGH_RR;
4744
4745	if (refresh_rate <= 0) {
4746		drm_dbg_kms(&dev_priv->drm,
4747			    "Refresh rate should be positive non-zero.\n");
4748		return;
4749	}
4750
4751	if (intel_dp == NULL) {
4752		drm_dbg_kms(&dev_priv->drm, "DRRS not supported.\n");
4753		return;
4754	}
4755
4756	if (!intel_crtc) {
4757		drm_dbg_kms(&dev_priv->drm,
4758			    "DRRS: intel_crtc not initialized\n");
4759		return;
4760	}
4761
4762	if (dev_priv->drrs.type < SEAMLESS_DRRS_SUPPORT) {
4763		drm_dbg_kms(&dev_priv->drm, "Only Seamless DRRS supported.\n");
4764		return;
4765	}
4766
4767	if (drm_mode_vrefresh(intel_dp->attached_connector->panel.downclock_mode) ==
4768			refresh_rate)
4769		index = DRRS_LOW_RR;
4770
4771	if (index == dev_priv->drrs.refresh_rate_type) {
4772		drm_dbg_kms(&dev_priv->drm,
4773			    "DRRS requested for previously set RR...ignoring\n");
4774		return;
4775	}
4776
4777	if (!crtc_state->hw.active) {
4778		drm_dbg_kms(&dev_priv->drm,
4779			    "eDP encoder disabled. CRTC not Active\n");
4780		return;
4781	}
4782
4783	if (DISPLAY_VER(dev_priv) >= 8 && !IS_CHERRYVIEW(dev_priv)) {
4784		switch (index) {
4785		case DRRS_HIGH_RR:
4786			intel_dp_set_m_n(crtc_state, M1_N1);
4787			break;
4788		case DRRS_LOW_RR:
4789			intel_dp_set_m_n(crtc_state, M2_N2);
4790			break;
4791		case DRRS_MAX_RR:
4792		default:
4793			drm_err(&dev_priv->drm,
4794				"Unsupported refreshrate type\n");
4795		}
4796	} else if (DISPLAY_VER(dev_priv) > 6) {
4797		i915_reg_t reg = PIPECONF(crtc_state->cpu_transcoder);
4798		u32 val;
4799
4800		val = intel_de_read(dev_priv, reg);
4801		if (index > DRRS_HIGH_RR) {
4802			if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
4803				val |= PIPECONF_EDP_RR_MODE_SWITCH_VLV;
4804			else
4805				val |= PIPECONF_EDP_RR_MODE_SWITCH;
4806		} else {
4807			if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
4808				val &= ~PIPECONF_EDP_RR_MODE_SWITCH_VLV;
4809			else
4810				val &= ~PIPECONF_EDP_RR_MODE_SWITCH;
4811		}
4812		intel_de_write(dev_priv, reg, val);
4813	}
4814
4815	dev_priv->drrs.refresh_rate_type = index;
4816
4817	drm_dbg_kms(&dev_priv->drm, "eDP Refresh Rate set to : %dHz\n",
4818		    refresh_rate);
4819}
4820
4821static void
4822intel_edp_drrs_enable_locked(struct intel_dp *intel_dp)
4823{
4824	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
4825
4826	dev_priv->drrs.busy_frontbuffer_bits = 0;
4827	dev_priv->drrs.dp = intel_dp;
4828}
4829
4830/**
4831 * intel_edp_drrs_enable - init drrs struct if supported
4832 * @intel_dp: DP struct
4833 * @crtc_state: A pointer to the active crtc state.
4834 *
4835 * Initializes frontbuffer_bits and drrs.dp
4836 */
4837void intel_edp_drrs_enable(struct intel_dp *intel_dp,
4838			   const struct intel_crtc_state *crtc_state)
4839{
4840	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
4841
4842	if (!crtc_state->has_drrs)
 
4843		return;
 
4844
4845	drm_dbg_kms(&dev_priv->drm, "Enabling DRRS\n");
 
 
 
4846
4847	mutex_lock(&dev_priv->drrs.mutex);
4848
4849	if (dev_priv->drrs.dp) {
4850		drm_warn(&dev_priv->drm, "DRRS already enabled\n");
4851		goto unlock;
4852	}
4853
4854	intel_edp_drrs_enable_locked(intel_dp);
 
 
4855
4856unlock:
4857	mutex_unlock(&dev_priv->drrs.mutex);
4858}
4859
4860static void
4861intel_edp_drrs_disable_locked(struct intel_dp *intel_dp,
4862			      const struct intel_crtc_state *crtc_state)
4863{
4864	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
4865
4866	if (dev_priv->drrs.refresh_rate_type == DRRS_LOW_RR) {
4867		int refresh;
4868
4869		refresh = drm_mode_vrefresh(intel_dp->attached_connector->panel.fixed_mode);
4870		intel_dp_set_drrs_state(dev_priv, crtc_state, refresh);
4871	}
4872
4873	dev_priv->drrs.dp = NULL;
4874}
4875
4876/**
4877 * intel_edp_drrs_disable - Disable DRRS
4878 * @intel_dp: DP struct
4879 * @old_crtc_state: Pointer to old crtc_state.
4880 *
4881 */
4882void intel_edp_drrs_disable(struct intel_dp *intel_dp,
4883			    const struct intel_crtc_state *old_crtc_state)
4884{
4885	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
4886
4887	if (!old_crtc_state->has_drrs)
4888		return;
4889
4890	mutex_lock(&dev_priv->drrs.mutex);
4891	if (!dev_priv->drrs.dp) {
4892		mutex_unlock(&dev_priv->drrs.mutex);
4893		return;
4894	}
4895
4896	intel_edp_drrs_disable_locked(intel_dp, old_crtc_state);
 
 
 
 
4897	mutex_unlock(&dev_priv->drrs.mutex);
4898
4899	cancel_delayed_work_sync(&dev_priv->drrs.work);
4900}
4901
4902/**
4903 * intel_edp_drrs_update - Update DRRS state
4904 * @intel_dp: Intel DP
4905 * @crtc_state: new CRTC state
4906 *
4907 * This function will update DRRS states, disabling or enabling DRRS when
4908 * executing fastsets. For full modeset, intel_edp_drrs_disable() and
4909 * intel_edp_drrs_enable() should be called instead.
4910 */
4911void
4912intel_edp_drrs_update(struct intel_dp *intel_dp,
4913		      const struct intel_crtc_state *crtc_state)
4914{
4915	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
4916
4917	if (dev_priv->drrs.type != SEAMLESS_DRRS_SUPPORT)
4918		return;
4919
4920	mutex_lock(&dev_priv->drrs.mutex);
4921
4922	/* New state matches current one? */
4923	if (crtc_state->has_drrs == !!dev_priv->drrs.dp)
4924		goto unlock;
4925
4926	if (crtc_state->has_drrs)
4927		intel_edp_drrs_enable_locked(intel_dp);
4928	else
4929		intel_edp_drrs_disable_locked(intel_dp, crtc_state);
4930
4931unlock:
4932	mutex_unlock(&dev_priv->drrs.mutex);
4933}
4934
4935static void intel_edp_drrs_downclock_work(struct work_struct *work)
4936{
4937	struct drm_i915_private *dev_priv =
4938		container_of(work, typeof(*dev_priv), drrs.work.work);
4939	struct intel_dp *intel_dp;
4940
4941	mutex_lock(&dev_priv->drrs.mutex);
4942
4943	intel_dp = dev_priv->drrs.dp;
4944
4945	if (!intel_dp)
4946		goto unlock;
4947
4948	/*
4949	 * The delayed work can race with an invalidate hence we need to
4950	 * recheck.
4951	 */
4952
4953	if (dev_priv->drrs.busy_frontbuffer_bits)
4954		goto unlock;
4955
4956	if (dev_priv->drrs.refresh_rate_type != DRRS_LOW_RR) {
4957		struct drm_crtc *crtc = dp_to_dig_port(intel_dp)->base.base.crtc;
4958
4959		intel_dp_set_drrs_state(dev_priv, to_intel_crtc(crtc)->config,
4960			drm_mode_vrefresh(intel_dp->attached_connector->panel.downclock_mode));
4961	}
4962
4963unlock:
4964	mutex_unlock(&dev_priv->drrs.mutex);
4965}
4966
4967/**
4968 * intel_edp_drrs_invalidate - Disable Idleness DRRS
4969 * @dev_priv: i915 device
4970 * @frontbuffer_bits: frontbuffer plane tracking bits
4971 *
4972 * This function gets called everytime rendering on the given planes start.
4973 * Hence DRRS needs to be Upclocked, i.e. (LOW_RR -> HIGH_RR).
4974 *
4975 * Dirty frontbuffers relevant to DRRS are tracked in busy_frontbuffer_bits.
4976 */
4977void intel_edp_drrs_invalidate(struct drm_i915_private *dev_priv,
4978			       unsigned int frontbuffer_bits)
4979{
4980	struct intel_dp *intel_dp;
4981	struct drm_crtc *crtc;
4982	enum pipe pipe;
4983
4984	if (dev_priv->drrs.type == DRRS_NOT_SUPPORTED)
4985		return;
4986
4987	cancel_delayed_work(&dev_priv->drrs.work);
4988
4989	mutex_lock(&dev_priv->drrs.mutex);
4990
4991	intel_dp = dev_priv->drrs.dp;
4992	if (!intel_dp) {
4993		mutex_unlock(&dev_priv->drrs.mutex);
4994		return;
4995	}
4996
4997	crtc = dp_to_dig_port(intel_dp)->base.base.crtc;
4998	pipe = to_intel_crtc(crtc)->pipe;
4999
5000	frontbuffer_bits &= INTEL_FRONTBUFFER_ALL_MASK(pipe);
5001	dev_priv->drrs.busy_frontbuffer_bits |= frontbuffer_bits;
5002
5003	/* invalidate means busy screen hence upclock */
5004	if (frontbuffer_bits && dev_priv->drrs.refresh_rate_type == DRRS_LOW_RR)
5005		intel_dp_set_drrs_state(dev_priv, to_intel_crtc(crtc)->config,
5006					drm_mode_vrefresh(intel_dp->attached_connector->panel.fixed_mode));
5007
5008	mutex_unlock(&dev_priv->drrs.mutex);
5009}
5010
5011/**
5012 * intel_edp_drrs_flush - Restart Idleness DRRS
5013 * @dev_priv: i915 device
5014 * @frontbuffer_bits: frontbuffer plane tracking bits
5015 *
5016 * This function gets called every time rendering on the given planes has
5017 * completed or flip on a crtc is completed. So DRRS should be upclocked
5018 * (LOW_RR -> HIGH_RR). And also Idleness detection should be started again,
5019 * if no other planes are dirty.
5020 *
5021 * Dirty frontbuffers relevant to DRRS are tracked in busy_frontbuffer_bits.
5022 */
5023void intel_edp_drrs_flush(struct drm_i915_private *dev_priv,
5024			  unsigned int frontbuffer_bits)
5025{
5026	struct intel_dp *intel_dp;
5027	struct drm_crtc *crtc;
5028	enum pipe pipe;
5029
5030	if (dev_priv->drrs.type == DRRS_NOT_SUPPORTED)
5031		return;
5032
5033	cancel_delayed_work(&dev_priv->drrs.work);
5034
5035	mutex_lock(&dev_priv->drrs.mutex);
5036
5037	intel_dp = dev_priv->drrs.dp;
5038	if (!intel_dp) {
5039		mutex_unlock(&dev_priv->drrs.mutex);
5040		return;
5041	}
5042
5043	crtc = dp_to_dig_port(intel_dp)->base.base.crtc;
5044	pipe = to_intel_crtc(crtc)->pipe;
5045
5046	frontbuffer_bits &= INTEL_FRONTBUFFER_ALL_MASK(pipe);
5047	dev_priv->drrs.busy_frontbuffer_bits &= ~frontbuffer_bits;
5048
5049	/* flush means busy screen hence upclock */
5050	if (frontbuffer_bits && dev_priv->drrs.refresh_rate_type == DRRS_LOW_RR)
5051		intel_dp_set_drrs_state(dev_priv, to_intel_crtc(crtc)->config,
5052					drm_mode_vrefresh(intel_dp->attached_connector->panel.fixed_mode));
5053
5054	/*
5055	 * flush also means no more activity hence schedule downclock, if all
5056	 * other fbs are quiescent too
5057	 */
5058	if (!dev_priv->drrs.busy_frontbuffer_bits)
5059		schedule_delayed_work(&dev_priv->drrs.work,
5060				msecs_to_jiffies(1000));
5061	mutex_unlock(&dev_priv->drrs.mutex);
5062}
5063
5064/**
5065 * DOC: Display Refresh Rate Switching (DRRS)
5066 *
5067 * Display Refresh Rate Switching (DRRS) is a power conservation feature
5068 * which enables swtching between low and high refresh rates,
5069 * dynamically, based on the usage scenario. This feature is applicable
5070 * for internal panels.
5071 *
5072 * Indication that the panel supports DRRS is given by the panel EDID, which
5073 * would list multiple refresh rates for one resolution.
5074 *
5075 * DRRS is of 2 types - static and seamless.
5076 * Static DRRS involves changing refresh rate (RR) by doing a full modeset
5077 * (may appear as a blink on screen) and is used in dock-undock scenario.
5078 * Seamless DRRS involves changing RR without any visual effect to the user
5079 * and can be used during normal system usage. This is done by programming
5080 * certain registers.
5081 *
5082 * Support for static/seamless DRRS may be indicated in the VBT based on
5083 * inputs from the panel spec.
5084 *
5085 * DRRS saves power by switching to low RR based on usage scenarios.
5086 *
5087 * The implementation is based on frontbuffer tracking implementation.  When
5088 * there is a disturbance on the screen triggered by user activity or a periodic
5089 * system activity, DRRS is disabled (RR is changed to high RR).  When there is
5090 * no movement on screen, after a timeout of 1 second, a switch to low RR is
5091 * made.
5092 *
5093 * For integration with frontbuffer tracking code, intel_edp_drrs_invalidate()
5094 * and intel_edp_drrs_flush() are called.
5095 *
5096 * DRRS can be further extended to support other internal panels and also
5097 * the scenario of video playback wherein RR is set based on the rate
5098 * requested by userspace.
5099 */
5100
5101/**
5102 * intel_dp_drrs_init - Init basic DRRS work and mutex.
5103 * @connector: eDP connector
5104 * @fixed_mode: preferred mode of panel
5105 *
5106 * This function is  called only once at driver load to initialize basic
5107 * DRRS stuff.
5108 *
5109 * Returns:
5110 * Downclock mode if panel supports it, else return NULL.
5111 * DRRS support is determined by the presence of downclock mode (apart
5112 * from VBT setting).
5113 */
5114static struct drm_display_mode *
5115intel_dp_drrs_init(struct intel_connector *connector,
5116		   struct drm_display_mode *fixed_mode)
5117{
5118	struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
5119	struct drm_display_mode *downclock_mode = NULL;
5120
5121	INIT_DELAYED_WORK(&dev_priv->drrs.work, intel_edp_drrs_downclock_work);
5122	mutex_init(&dev_priv->drrs.mutex);
5123
5124	if (DISPLAY_VER(dev_priv) <= 6) {
5125		drm_dbg_kms(&dev_priv->drm,
5126			    "DRRS supported for Gen7 and above\n");
5127		return NULL;
5128	}
5129
5130	if (dev_priv->vbt.drrs_type != SEAMLESS_DRRS_SUPPORT) {
5131		drm_dbg_kms(&dev_priv->drm, "VBT doesn't support DRRS\n");
5132		return NULL;
5133	}
5134
5135	downclock_mode = intel_panel_edid_downclock_mode(connector, fixed_mode);
5136	if (!downclock_mode) {
5137		drm_dbg_kms(&dev_priv->drm,
5138			    "Downclock mode is not found. DRRS not supported\n");
5139		return NULL;
5140	}
5141
5142	dev_priv->drrs.type = dev_priv->vbt.drrs_type;
5143
5144	dev_priv->drrs.refresh_rate_type = DRRS_HIGH_RR;
5145	drm_dbg_kms(&dev_priv->drm,
5146		    "seamless DRRS supported for eDP panel.\n");
5147	return downclock_mode;
5148}
5149
5150static bool intel_edp_init_connector(struct intel_dp *intel_dp,
5151				     struct intel_connector *intel_connector)
5152{
5153	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
5154	struct drm_device *dev = &dev_priv->drm;
5155	struct drm_connector *connector = &intel_connector->base;
5156	struct drm_display_mode *fixed_mode = NULL;
5157	struct drm_display_mode *downclock_mode = NULL;
5158	bool has_dpcd;
5159	enum pipe pipe = INVALID_PIPE;
 
5160	struct edid *edid;
5161
5162	if (!intel_dp_is_edp(intel_dp))
5163		return true;
5164
 
 
5165	/*
5166	 * On IBX/CPT we may get here with LVDS already registered. Since the
5167	 * driver uses the only internal power sequencer available for both
5168	 * eDP and LVDS bail out early in this case to prevent interfering
5169	 * with an already powered-on LVDS power sequencer.
5170	 */
5171	if (intel_get_lvds_encoder(dev_priv)) {
5172		drm_WARN_ON(dev,
5173			    !(HAS_PCH_IBX(dev_priv) || HAS_PCH_CPT(dev_priv)));
5174		drm_info(&dev_priv->drm,
5175			 "LVDS was detected, not registering eDP\n");
5176
5177		return false;
5178	}
5179
5180	intel_pps_init(intel_dp);
 
 
 
 
5181
5182	/* Cache DPCD and EDID for edp. */
5183	has_dpcd = intel_edp_init_dpcd(intel_dp);
5184
5185	if (!has_dpcd) {
5186		/* if this fails, presume the device is a ghost */
5187		drm_info(&dev_priv->drm,
5188			 "failed to retrieve link info, disabling eDP\n");
5189		goto out_vdd_off;
5190	}
5191
5192	mutex_lock(&dev->mode_config.mutex);
5193	edid = drm_get_edid(connector, &intel_dp->aux.ddc);
5194	if (edid) {
5195		if (drm_add_edid_modes(connector, edid)) {
5196			drm_connector_update_edid_property(connector, edid);
 
5197		} else {
5198			kfree(edid);
5199			edid = ERR_PTR(-EINVAL);
5200		}
5201	} else {
5202		edid = ERR_PTR(-ENOENT);
5203	}
5204	intel_connector->edid = edid;
5205
5206	fixed_mode = intel_panel_edid_fixed_mode(intel_connector);
5207	if (fixed_mode)
5208		downclock_mode = intel_dp_drrs_init(intel_connector, fixed_mode);
5209
5210	/* multiply the mode clock and horizontal timings for MSO */
5211	intel_edp_mso_mode_fixup(intel_connector, fixed_mode);
5212	intel_edp_mso_mode_fixup(intel_connector, downclock_mode);
5213
5214	/* fallback to VBT if available for eDP */
5215	if (!fixed_mode)
5216		fixed_mode = intel_panel_vbt_fixed_mode(intel_connector);
5217	mutex_unlock(&dev->mode_config.mutex);
5218
5219	if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
 
 
 
5220		/*
5221		 * Figure out the current pipe for the initial backlight setup.
5222		 * If the current pipe isn't valid, try the PPS pipe, and if that
5223		 * fails just assume pipe A.
5224		 */
5225		pipe = vlv_active_pipe(intel_dp);
5226
5227		if (pipe != PIPE_A && pipe != PIPE_B)
5228			pipe = intel_dp->pps.pps_pipe;
5229
5230		if (pipe != PIPE_A && pipe != PIPE_B)
5231			pipe = PIPE_A;
5232
5233		drm_dbg_kms(&dev_priv->drm,
5234			    "using pipe %c for initial backlight setup\n",
5235			    pipe_name(pipe));
5236	}
5237
5238	intel_panel_init(&intel_connector->panel, fixed_mode, downclock_mode);
5239	intel_connector->panel.backlight.power = intel_pps_backlight_power;
5240	intel_panel_setup_backlight(connector, pipe);
5241
5242	if (fixed_mode) {
5243		drm_connector_set_panel_orientation_with_quirk(connector,
5244				dev_priv->vbt.orientation,
5245				fixed_mode->hdisplay, fixed_mode->vdisplay);
5246	}
5247
5248	return true;
5249
5250out_vdd_off:
5251	intel_pps_vdd_off_sync(intel_dp);
 
 
 
 
 
 
5252
5253	return false;
5254}
5255
5256static void intel_dp_modeset_retry_work_fn(struct work_struct *work)
5257{
5258	struct intel_connector *intel_connector;
5259	struct drm_connector *connector;
5260
5261	intel_connector = container_of(work, typeof(*intel_connector),
5262				       modeset_retry_work);
5263	connector = &intel_connector->base;
5264	DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n", connector->base.id,
5265		      connector->name);
5266
5267	/* Grab the locks before changing connector property*/
5268	mutex_lock(&connector->dev->mode_config.mutex);
5269	/* Set connector link status to BAD and send a Uevent to notify
5270	 * userspace to do a modeset.
5271	 */
5272	drm_connector_set_link_status_property(connector,
5273					       DRM_MODE_LINK_STATUS_BAD);
5274	mutex_unlock(&connector->dev->mode_config.mutex);
5275	/* Send Hotplug uevent so userspace can reprobe */
5276	drm_kms_helper_hotplug_event(connector->dev);
5277}
5278
5279bool
5280intel_dp_init_connector(struct intel_digital_port *dig_port,
5281			struct intel_connector *intel_connector)
5282{
5283	struct drm_connector *connector = &intel_connector->base;
5284	struct intel_dp *intel_dp = &dig_port->dp;
5285	struct intel_encoder *intel_encoder = &dig_port->base;
5286	struct drm_device *dev = intel_encoder->base.dev;
5287	struct drm_i915_private *dev_priv = to_i915(dev);
5288	enum port port = intel_encoder->port;
5289	enum phy phy = intel_port_to_phy(dev_priv, port);
5290	int type;
5291
5292	/* Initialize the work for modeset in case of link train failure */
5293	INIT_WORK(&intel_connector->modeset_retry_work,
5294		  intel_dp_modeset_retry_work_fn);
5295
5296	if (drm_WARN(dev, dig_port->max_lanes < 1,
5297		     "Not enough lanes (%d) for DP on [ENCODER:%d:%s]\n",
5298		     dig_port->max_lanes, intel_encoder->base.base.id,
5299		     intel_encoder->base.name))
5300		return false;
5301
5302	intel_dp_set_source_rates(intel_dp);
5303
5304	intel_dp->reset_link_params = true;
5305	intel_dp->pps.pps_pipe = INVALID_PIPE;
5306	intel_dp->pps.active_pipe = INVALID_PIPE;
5307
5308	/* Preserve the current hw state. */
5309	intel_dp->DP = intel_de_read(dev_priv, intel_dp->output_reg);
5310	intel_dp->attached_connector = intel_connector;
5311
5312	if (intel_dp_is_port_edp(dev_priv, port)) {
5313		/*
5314		 * Currently we don't support eDP on TypeC ports, although in
5315		 * theory it could work on TypeC legacy ports.
5316		 */
5317		drm_WARN_ON(dev, intel_phy_is_tc(dev_priv, phy));
5318		type = DRM_MODE_CONNECTOR_eDP;
5319	} else {
5320		type = DRM_MODE_CONNECTOR_DisplayPort;
5321	}
5322
5323	if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
5324		intel_dp->pps.active_pipe = vlv_active_pipe(intel_dp);
5325
5326	/*
5327	 * For eDP we always set the encoder type to INTEL_OUTPUT_EDP, but
5328	 * for DP the encoder type can be set by the caller to
5329	 * INTEL_OUTPUT_UNKNOWN for DDI, so don't rewrite it.
5330	 */
5331	if (type == DRM_MODE_CONNECTOR_eDP)
5332		intel_encoder->type = INTEL_OUTPUT_EDP;
5333
5334	/* eDP only on port B and/or C on vlv/chv */
5335	if (drm_WARN_ON(dev, (IS_VALLEYVIEW(dev_priv) ||
5336			      IS_CHERRYVIEW(dev_priv)) &&
5337			intel_dp_is_edp(intel_dp) &&
5338			port != PORT_B && port != PORT_C))
5339		return false;
5340
5341	drm_dbg_kms(&dev_priv->drm,
5342		    "Adding %s connector on [ENCODER:%d:%s]\n",
5343		    type == DRM_MODE_CONNECTOR_eDP ? "eDP" : "DP",
5344		    intel_encoder->base.base.id, intel_encoder->base.name);
5345
5346	drm_connector_init(dev, connector, &intel_dp_connector_funcs, type);
5347	drm_connector_helper_add(connector, &intel_dp_connector_helper_funcs);
5348
5349	if (!HAS_GMCH(dev_priv))
5350		connector->interlace_allowed = true;
5351	connector->doublescan_allowed = 0;
5352
5353	intel_connector->polled = DRM_CONNECTOR_POLL_HPD;
 
 
 
5354
5355	intel_dp_aux_init(intel_dp);
5356
5357	intel_connector_attach_encoder(intel_connector, intel_encoder);
5358
5359	if (HAS_DDI(dev_priv))
5360		intel_connector->get_hw_state = intel_ddi_connector_get_hw_state;
5361	else
5362		intel_connector->get_hw_state = intel_connector_get_hw_state;
5363
5364	/* init MST on ports that can support it */
5365	intel_dp_mst_encoder_init(dig_port,
5366				  intel_connector->base.base.id);
 
 
 
5367
5368	if (!intel_edp_init_connector(intel_dp, intel_connector)) {
5369		intel_dp_aux_fini(intel_dp);
5370		intel_dp_mst_encoder_cleanup(dig_port);
5371		goto fail;
5372	}
5373
5374	intel_dp_add_properties(intel_dp, connector);
5375
5376	if (is_hdcp_supported(dev_priv, port) && !intel_dp_is_edp(intel_dp)) {
5377		int ret = intel_dp_hdcp_init(dig_port, intel_connector);
5378		if (ret)
5379			drm_dbg_kms(&dev_priv->drm,
5380				    "HDCP init failed, skipping.\n");
5381	}
5382
5383	/* For G4X desktop chip, PEG_BAND_GAP_DATA 3:0 must first be written
5384	 * 0xd.  Failure to do so will result in spurious interrupts being
5385	 * generated on the port when a cable is not attached.
5386	 */
5387	if (IS_G45(dev_priv)) {
5388		u32 temp = intel_de_read(dev_priv, PEG_BAND_GAP_DATA);
5389		intel_de_write(dev_priv, PEG_BAND_GAP_DATA,
5390			       (temp & ~0xf) | 0xd);
5391	}
5392
5393	intel_dp->frl.is_trained = false;
5394	intel_dp->frl.trained_rate_gbps = 0;
5395
5396	intel_psr_init(intel_dp);
5397
5398	return true;
5399
5400fail:
5401	drm_connector_cleanup(connector);
5402
5403	return false;
5404}
5405
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
5406void intel_dp_mst_suspend(struct drm_i915_private *dev_priv)
5407{
5408	struct intel_encoder *encoder;
5409
5410	if (!HAS_DISPLAY(dev_priv))
5411		return;
5412
5413	for_each_intel_encoder(&dev_priv->drm, encoder) {
5414		struct intel_dp *intel_dp;
5415
5416		if (encoder->type != INTEL_OUTPUT_DDI)
5417			continue;
5418
5419		intel_dp = enc_to_intel_dp(encoder);
5420
5421		if (!intel_dp->can_mst)
5422			continue;
5423
5424		if (intel_dp->is_mst)
5425			drm_dp_mst_topology_mgr_suspend(&intel_dp->mst_mgr);
5426	}
5427}
5428
5429void intel_dp_mst_resume(struct drm_i915_private *dev_priv)
5430{
5431	struct intel_encoder *encoder;
5432
5433	if (!HAS_DISPLAY(dev_priv))
5434		return;
5435
5436	for_each_intel_encoder(&dev_priv->drm, encoder) {
5437		struct intel_dp *intel_dp;
5438		int ret;
5439
5440		if (encoder->type != INTEL_OUTPUT_DDI)
5441			continue;
5442
5443		intel_dp = enc_to_intel_dp(encoder);
5444
5445		if (!intel_dp->can_mst)
5446			continue;
5447
5448		ret = drm_dp_mst_topology_mgr_resume(&intel_dp->mst_mgr,
5449						     true);
5450		if (ret) {
5451			intel_dp->is_mst = false;
5452			drm_dp_mst_topology_mgr_set_mst(&intel_dp->mst_mgr,
5453							false);
5454		}
5455	}
5456}
v5.4
   1/*
   2 * Copyright © 2008 Intel Corporation
   3 *
   4 * Permission is hereby granted, free of charge, to any person obtaining a
   5 * copy of this software and associated documentation files (the "Software"),
   6 * to deal in the Software without restriction, including without limitation
   7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
   8 * and/or sell copies of the Software, and to permit persons to whom the
   9 * Software is furnished to do so, subject to the following conditions:
  10 *
  11 * The above copyright notice and this permission notice (including the next
  12 * paragraph) shall be included in all copies or substantial portions of the
  13 * Software.
  14 *
  15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
  18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
  19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
  20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
  21 * IN THE SOFTWARE.
  22 *
  23 * Authors:
  24 *    Keith Packard <keithp@keithp.com>
  25 *
  26 */
  27
  28#include <linux/export.h>
  29#include <linux/i2c.h>
  30#include <linux/notifier.h>
  31#include <linux/reboot.h>
  32#include <linux/slab.h>
  33#include <linux/types.h>
  34
  35#include <asm/byteorder.h>
  36
  37#include <drm/drm_atomic_helper.h>
  38#include <drm/drm_crtc.h>
  39#include <drm/drm_dp_helper.h>
  40#include <drm/drm_edid.h>
  41#include <drm/drm_hdcp.h>
  42#include <drm/drm_probe_helper.h>
  43#include <drm/i915_drm.h>
  44
 
  45#include "i915_debugfs.h"
  46#include "i915_drv.h"
  47#include "i915_trace.h"
  48#include "intel_atomic.h"
  49#include "intel_audio.h"
  50#include "intel_connector.h"
  51#include "intel_ddi.h"
 
  52#include "intel_display_types.h"
  53#include "intel_dp.h"
 
 
  54#include "intel_dp_link_training.h"
  55#include "intel_dp_mst.h"
  56#include "intel_dpio_phy.h"
 
  57#include "intel_fifo_underrun.h"
  58#include "intel_hdcp.h"
  59#include "intel_hdmi.h"
  60#include "intel_hotplug.h"
  61#include "intel_lspcon.h"
  62#include "intel_lvds.h"
  63#include "intel_panel.h"
 
  64#include "intel_psr.h"
  65#include "intel_sideband.h"
  66#include "intel_tc.h"
  67#include "intel_vdsc.h"
 
  68
  69#define DP_DPRX_ESI_LEN 14
  70
  71/* DP DSC small joiner has 2 FIFOs each of 640 x 6 bytes */
  72#define DP_DSC_MAX_SMALL_JOINER_RAM_BUFFER	61440
  73#define DP_DSC_MIN_SUPPORTED_BPC		8
  74#define DP_DSC_MAX_SUPPORTED_BPC		10
  75
  76/* DP DSC throughput values used for slice count calculations KPixels/s */
  77#define DP_DSC_PEAK_PIXEL_RATE			2720000
  78#define DP_DSC_MAX_ENC_THROUGHPUT_0		340000
  79#define DP_DSC_MAX_ENC_THROUGHPUT_1		400000
  80
  81/* DP DSC FEC Overhead factor = 1/(0.972261) */
  82#define DP_DSC_FEC_OVERHEAD_FACTOR		972261
  83
  84/* Compliance test status bits  */
  85#define INTEL_DP_RESOLUTION_SHIFT_MASK	0
  86#define INTEL_DP_RESOLUTION_PREFERRED	(1 << INTEL_DP_RESOLUTION_SHIFT_MASK)
  87#define INTEL_DP_RESOLUTION_STANDARD	(2 << INTEL_DP_RESOLUTION_SHIFT_MASK)
  88#define INTEL_DP_RESOLUTION_FAILSAFE	(3 << INTEL_DP_RESOLUTION_SHIFT_MASK)
  89
  90struct dp_link_dpll {
  91	int clock;
  92	struct dpll dpll;
  93};
  94
  95static const struct dp_link_dpll g4x_dpll[] = {
  96	{ 162000,
  97		{ .p1 = 2, .p2 = 10, .n = 2, .m1 = 23, .m2 = 8 } },
  98	{ 270000,
  99		{ .p1 = 1, .p2 = 10, .n = 1, .m1 = 14, .m2 = 2 } }
 100};
 101
 102static const struct dp_link_dpll pch_dpll[] = {
 103	{ 162000,
 104		{ .p1 = 2, .p2 = 10, .n = 1, .m1 = 12, .m2 = 9 } },
 105	{ 270000,
 106		{ .p1 = 1, .p2 = 10, .n = 2, .m1 = 14, .m2 = 8 } }
 107};
 108
 109static const struct dp_link_dpll vlv_dpll[] = {
 110	{ 162000,
 111		{ .p1 = 3, .p2 = 2, .n = 5, .m1 = 3, .m2 = 81 } },
 112	{ 270000,
 113		{ .p1 = 2, .p2 = 2, .n = 1, .m1 = 2, .m2 = 27 } }
 114};
 115
 116/*
 117 * CHV supports eDP 1.4 that have  more link rates.
 118 * Below only provides the fixed rate but exclude variable rate.
 119 */
 120static const struct dp_link_dpll chv_dpll[] = {
 121	/*
 122	 * CHV requires to program fractional division for m2.
 123	 * m2 is stored in fixed point format using formula below
 124	 * (m2_int << 22) | m2_fraction
 125	 */
 126	{ 162000,	/* m2_int = 32, m2_fraction = 1677722 */
 127		{ .p1 = 4, .p2 = 2, .n = 1, .m1 = 2, .m2 = 0x819999a } },
 128	{ 270000,	/* m2_int = 27, m2_fraction = 0 */
 129		{ .p1 = 4, .p2 = 1, .n = 1, .m1 = 2, .m2 = 0x6c00000 } },
 130};
 131
 132/* Constants for DP DSC configurations */
 133static const u8 valid_dsc_bpp[] = {6, 8, 10, 12, 15};
 134
 135/* With Single pipe configuration, HW is capable of supporting maximum
 136 * of 4 slices per line.
 137 */
 138static const u8 valid_dsc_slicecount[] = {1, 2, 4};
 139
 140/**
 141 * intel_dp_is_edp - is the given port attached to an eDP panel (either CPU or PCH)
 142 * @intel_dp: DP struct
 143 *
 144 * If a CPU or PCH DP output is attached to an eDP panel, this function
 145 * will return true, and false otherwise.
 146 */
 147bool intel_dp_is_edp(struct intel_dp *intel_dp)
 148{
 149	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
 150
 151	return intel_dig_port->base.type == INTEL_OUTPUT_EDP;
 152}
 153
 154static struct intel_dp *intel_attached_dp(struct drm_connector *connector)
 155{
 156	return enc_to_intel_dp(&intel_attached_encoder(connector)->base);
 157}
 158
 159static void intel_dp_link_down(struct intel_encoder *encoder,
 160			       const struct intel_crtc_state *old_crtc_state);
 161static bool edp_panel_vdd_on(struct intel_dp *intel_dp);
 162static void edp_panel_vdd_off(struct intel_dp *intel_dp, bool sync);
 163static void vlv_init_panel_power_sequencer(struct intel_encoder *encoder,
 164					   const struct intel_crtc_state *crtc_state);
 165static void vlv_steal_power_sequencer(struct drm_i915_private *dev_priv,
 166				      enum pipe pipe);
 167static void intel_dp_unset_edid(struct intel_dp *intel_dp);
 
 168
 169/* update sink rates from dpcd */
 170static void intel_dp_set_sink_rates(struct intel_dp *intel_dp)
 171{
 172	static const int dp_rates[] = {
 173		162000, 270000, 540000, 810000
 174	};
 175	int i, max_rate;
 
 
 
 
 
 
 
 
 
 
 
 176
 177	max_rate = drm_dp_bw_code_to_link_rate(intel_dp->dpcd[DP_MAX_LINK_RATE]);
 
 
 
 178
 179	for (i = 0; i < ARRAY_SIZE(dp_rates); i++) {
 180		if (dp_rates[i] > max_rate)
 181			break;
 182		intel_dp->sink_rates[i] = dp_rates[i];
 183	}
 184
 185	intel_dp->num_sink_rates = i;
 186}
 187
 188/* Get length of rates array potentially limited by max_rate. */
 189static int intel_dp_rate_limit_len(const int *rates, int len, int max_rate)
 190{
 191	int i;
 192
 193	/* Limit results by potentially reduced max rate */
 194	for (i = 0; i < len; i++) {
 195		if (rates[len - i - 1] <= max_rate)
 196			return len - i;
 197	}
 198
 199	return 0;
 200}
 201
 202/* Get length of common rates array potentially limited by max_rate. */
 203static int intel_dp_common_len_rate_limit(const struct intel_dp *intel_dp,
 204					  int max_rate)
 205{
 206	return intel_dp_rate_limit_len(intel_dp->common_rates,
 207				       intel_dp->num_common_rates, max_rate);
 208}
 209
 210/* Theoretical max between source and sink */
 211static int intel_dp_max_common_rate(struct intel_dp *intel_dp)
 212{
 213	return intel_dp->common_rates[intel_dp->num_common_rates - 1];
 214}
 215
 216/* Theoretical max between source and sink */
 217static int intel_dp_max_common_lane_count(struct intel_dp *intel_dp)
 218{
 219	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
 220	int source_max = intel_dig_port->max_lanes;
 221	int sink_max = drm_dp_max_lane_count(intel_dp->dpcd);
 222	int fia_max = intel_tc_port_fia_max_lane_count(intel_dig_port);
 
 
 
 
 223
 224	return min3(source_max, sink_max, fia_max);
 225}
 226
 227int intel_dp_max_lane_count(struct intel_dp *intel_dp)
 228{
 229	return intel_dp->max_link_lane_count;
 230}
 231
 232int
 233intel_dp_link_required(int pixel_clock, int bpp)
 234{
 235	/* pixel_clock is in kHz, divide bpp by 8 for bit to Byte conversion */
 236	return DIV_ROUND_UP(pixel_clock * bpp, 8);
 237}
 238
 239int
 240intel_dp_max_data_rate(int max_link_clock, int max_lanes)
 241{
 242	/* max_link_clock is the link symbol clock (LS_Clk) in kHz and not the
 243	 * link rate that is generally expressed in Gbps. Since, 8 bits of data
 244	 * is transmitted every LS_Clk per lane, there is no need to account for
 245	 * the channel encoding that is done in the PHY layer here.
 246	 */
 247
 248	return max_link_clock * max_lanes;
 249}
 250
 251static int
 252intel_dp_downstream_max_dotclock(struct intel_dp *intel_dp)
 253{
 254	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
 255	struct intel_encoder *encoder = &intel_dig_port->base;
 256	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
 257	int max_dotclk = dev_priv->max_dotclk_freq;
 258	int ds_max_dotclk;
 259
 260	int type = intel_dp->downstream_ports[0] & DP_DS_PORT_TYPE_MASK;
 261
 262	if (type != DP_DS_PORT_TYPE_VGA)
 263		return max_dotclk;
 264
 265	ds_max_dotclk = drm_dp_downstream_max_clock(intel_dp->dpcd,
 266						    intel_dp->downstream_ports);
 267
 268	if (ds_max_dotclk != 0)
 269		max_dotclk = min(max_dotclk, ds_max_dotclk);
 270
 271	return max_dotclk;
 272}
 273
 274static int cnl_max_source_rate(struct intel_dp *intel_dp)
 275{
 276	struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
 277	struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev);
 278	enum port port = dig_port->base.port;
 279
 280	u32 voltage = I915_READ(CNL_PORT_COMP_DW3) & VOLTAGE_INFO_MASK;
 281
 282	/* Low voltage SKUs are limited to max of 5.4G */
 283	if (voltage == VOLTAGE_INFO_0_85V)
 284		return 540000;
 285
 286	/* For this SKU 8.1G is supported in all ports */
 287	if (IS_CNL_WITH_PORT_F(dev_priv))
 288		return 810000;
 289
 290	/* For other SKUs, max rate on ports A and D is 5.4G */
 291	if (port == PORT_A || port == PORT_D)
 292		return 540000;
 293
 294	return 810000;
 295}
 296
 297static int icl_max_source_rate(struct intel_dp *intel_dp)
 298{
 299	struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
 300	struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev);
 301	enum phy phy = intel_port_to_phy(dev_priv, dig_port->base.port);
 302
 303	if (intel_phy_is_combo(dev_priv, phy) &&
 304	    !IS_ELKHARTLAKE(dev_priv) &&
 305	    !intel_dp_is_edp(intel_dp))
 306		return 540000;
 307
 308	return 810000;
 309}
 310
 
 
 
 
 
 
 
 
 311static void
 312intel_dp_set_source_rates(struct intel_dp *intel_dp)
 313{
 314	/* The values must be in increasing order */
 315	static const int cnl_rates[] = {
 316		162000, 216000, 270000, 324000, 432000, 540000, 648000, 810000
 317	};
 318	static const int bxt_rates[] = {
 319		162000, 216000, 243000, 270000, 324000, 432000, 540000
 320	};
 321	static const int skl_rates[] = {
 322		162000, 216000, 270000, 324000, 432000, 540000
 323	};
 324	static const int hsw_rates[] = {
 325		162000, 270000, 540000
 326	};
 327	static const int g4x_rates[] = {
 328		162000, 270000
 329	};
 330	struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
 
 331	struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev);
 332	const struct ddi_vbt_port_info *info =
 333		&dev_priv->vbt.ddi_port_info[dig_port->base.port];
 334	const int *source_rates;
 335	int size, max_rate = 0, vbt_max_rate = info->dp_max_link_rate;
 336
 337	/* This should only be done once */
 338	WARN_ON(intel_dp->source_rates || intel_dp->num_source_rates);
 
 339
 340	if (INTEL_GEN(dev_priv) >= 10) {
 341		source_rates = cnl_rates;
 342		size = ARRAY_SIZE(cnl_rates);
 343		if (IS_GEN(dev_priv, 10))
 344			max_rate = cnl_max_source_rate(intel_dp);
 
 
 345		else
 346			max_rate = icl_max_source_rate(intel_dp);
 347	} else if (IS_GEN9_LP(dev_priv)) {
 348		source_rates = bxt_rates;
 349		size = ARRAY_SIZE(bxt_rates);
 350	} else if (IS_GEN9_BC(dev_priv)) {
 351		source_rates = skl_rates;
 352		size = ARRAY_SIZE(skl_rates);
 353	} else if ((IS_HASWELL(dev_priv) && !IS_HSW_ULX(dev_priv)) ||
 354		   IS_BROADWELL(dev_priv)) {
 355		source_rates = hsw_rates;
 356		size = ARRAY_SIZE(hsw_rates);
 357	} else {
 358		source_rates = g4x_rates;
 359		size = ARRAY_SIZE(g4x_rates);
 360	}
 361
 
 362	if (max_rate && vbt_max_rate)
 363		max_rate = min(max_rate, vbt_max_rate);
 364	else if (vbt_max_rate)
 365		max_rate = vbt_max_rate;
 366
 367	if (max_rate)
 368		size = intel_dp_rate_limit_len(source_rates, size, max_rate);
 369
 370	intel_dp->source_rates = source_rates;
 371	intel_dp->num_source_rates = size;
 372}
 373
 374static int intersect_rates(const int *source_rates, int source_len,
 375			   const int *sink_rates, int sink_len,
 376			   int *common_rates)
 377{
 378	int i = 0, j = 0, k = 0;
 379
 380	while (i < source_len && j < sink_len) {
 381		if (source_rates[i] == sink_rates[j]) {
 382			if (WARN_ON(k >= DP_MAX_SUPPORTED_RATES))
 383				return k;
 384			common_rates[k] = source_rates[i];
 385			++k;
 386			++i;
 387			++j;
 388		} else if (source_rates[i] < sink_rates[j]) {
 389			++i;
 390		} else {
 391			++j;
 392		}
 393	}
 394	return k;
 395}
 396
 397/* return index of rate in rates array, or -1 if not found */
 398static int intel_dp_rate_index(const int *rates, int len, int rate)
 399{
 400	int i;
 401
 402	for (i = 0; i < len; i++)
 403		if (rate == rates[i])
 404			return i;
 405
 406	return -1;
 407}
 408
 409static void intel_dp_set_common_rates(struct intel_dp *intel_dp)
 410{
 411	WARN_ON(!intel_dp->num_source_rates || !intel_dp->num_sink_rates);
 
 
 
 412
 413	intel_dp->num_common_rates = intersect_rates(intel_dp->source_rates,
 414						     intel_dp->num_source_rates,
 415						     intel_dp->sink_rates,
 416						     intel_dp->num_sink_rates,
 417						     intel_dp->common_rates);
 418
 419	/* Paranoia, there should always be something in common. */
 420	if (WARN_ON(intel_dp->num_common_rates == 0)) {
 421		intel_dp->common_rates[0] = 162000;
 422		intel_dp->num_common_rates = 1;
 423	}
 424}
 425
 426static bool intel_dp_link_params_valid(struct intel_dp *intel_dp, int link_rate,
 427				       u8 lane_count)
 428{
 429	/*
 430	 * FIXME: we need to synchronize the current link parameters with
 431	 * hardware readout. Currently fast link training doesn't work on
 432	 * boot-up.
 433	 */
 434	if (link_rate == 0 ||
 435	    link_rate > intel_dp->max_link_rate)
 436		return false;
 437
 438	if (lane_count == 0 ||
 439	    lane_count > intel_dp_max_lane_count(intel_dp))
 440		return false;
 441
 442	return true;
 443}
 444
 445static bool intel_dp_can_link_train_fallback_for_edp(struct intel_dp *intel_dp,
 446						     int link_rate,
 447						     u8 lane_count)
 448{
 449	const struct drm_display_mode *fixed_mode =
 450		intel_dp->attached_connector->panel.fixed_mode;
 451	int mode_rate, max_rate;
 452
 453	mode_rate = intel_dp_link_required(fixed_mode->clock, 18);
 454	max_rate = intel_dp_max_data_rate(link_rate, lane_count);
 455	if (mode_rate > max_rate)
 456		return false;
 457
 458	return true;
 459}
 460
 461int intel_dp_get_link_train_fallback_values(struct intel_dp *intel_dp,
 462					    int link_rate, u8 lane_count)
 463{
 
 464	int index;
 465
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 466	index = intel_dp_rate_index(intel_dp->common_rates,
 467				    intel_dp->num_common_rates,
 468				    link_rate);
 469	if (index > 0) {
 470		if (intel_dp_is_edp(intel_dp) &&
 471		    !intel_dp_can_link_train_fallback_for_edp(intel_dp,
 472							      intel_dp->common_rates[index - 1],
 473							      lane_count)) {
 474			DRM_DEBUG_KMS("Retrying Link training for eDP with same parameters\n");
 
 475			return 0;
 476		}
 477		intel_dp->max_link_rate = intel_dp->common_rates[index - 1];
 478		intel_dp->max_link_lane_count = lane_count;
 479	} else if (lane_count > 1) {
 480		if (intel_dp_is_edp(intel_dp) &&
 481		    !intel_dp_can_link_train_fallback_for_edp(intel_dp,
 482							      intel_dp_max_common_rate(intel_dp),
 483							      lane_count >> 1)) {
 484			DRM_DEBUG_KMS("Retrying Link training for eDP with same parameters\n");
 
 485			return 0;
 486		}
 487		intel_dp->max_link_rate = intel_dp_max_common_rate(intel_dp);
 488		intel_dp->max_link_lane_count = lane_count >> 1;
 489	} else {
 490		DRM_ERROR("Link Training Unsuccessful\n");
 491		return -1;
 492	}
 493
 494	return 0;
 495}
 496
 497u32 intel_dp_mode_to_fec_clock(u32 mode_clock)
 498{
 499	return div_u64(mul_u32_u32(mode_clock, 1000000U),
 500		       DP_DSC_FEC_OVERHEAD_FACTOR);
 501}
 502
 503static u16 intel_dp_dsc_get_output_bpp(u32 link_clock, u32 lane_count,
 504				       u32 mode_clock, u32 mode_hdisplay)
 
 
 
 
 
 
 
 
 
 
 
 
 505{
 506	u32 bits_per_pixel, max_bpp_small_joiner_ram;
 507	int i;
 508
 509	/*
 510	 * Available Link Bandwidth(Kbits/sec) = (NumberOfLanes)*
 511	 * (LinkSymbolClock)* 8 * (TimeSlotsPerMTP)
 512	 * for SST -> TimeSlotsPerMTP is 1,
 513	 * for MST -> TimeSlotsPerMTP has to be calculated
 514	 */
 515	bits_per_pixel = (link_clock * lane_count * 8) /
 516			 intel_dp_mode_to_fec_clock(mode_clock);
 517	DRM_DEBUG_KMS("Max link bpp: %u\n", bits_per_pixel);
 518
 519	/* Small Joiner Check: output bpp <= joiner RAM (bits) / Horiz. width */
 520	max_bpp_small_joiner_ram = DP_DSC_MAX_SMALL_JOINER_RAM_BUFFER / mode_hdisplay;
 521	DRM_DEBUG_KMS("Max small joiner bpp: %u\n", max_bpp_small_joiner_ram);
 
 
 
 
 
 
 522
 523	/*
 524	 * Greatest allowed DSC BPP = MIN (output BPP from available Link BW
 525	 * check, output bpp from small joiner RAM check)
 526	 */
 527	bits_per_pixel = min(bits_per_pixel, max_bpp_small_joiner_ram);
 528
 
 
 
 
 
 
 
 
 
 529	/* Error out if the max bpp is less than smallest allowed valid bpp */
 530	if (bits_per_pixel < valid_dsc_bpp[0]) {
 531		DRM_DEBUG_KMS("Unsupported BPP %u, min %u\n",
 532			      bits_per_pixel, valid_dsc_bpp[0]);
 533		return 0;
 534	}
 535
 536	/* Find the nearest match in the array of known BPPs from VESA */
 537	for (i = 0; i < ARRAY_SIZE(valid_dsc_bpp) - 1; i++) {
 538		if (bits_per_pixel < valid_dsc_bpp[i + 1])
 539			break;
 
 
 
 
 
 
 540	}
 541	bits_per_pixel = valid_dsc_bpp[i];
 542
 543	/*
 544	 * Compressed BPP in U6.4 format so multiply by 16, for Gen 11,
 545	 * fractional part is 0
 546	 */
 547	return bits_per_pixel << 4;
 548}
 549
 550static u8 intel_dp_dsc_get_slice_count(struct intel_dp *intel_dp,
 551				       int mode_clock, int mode_hdisplay)
 
 552{
 
 553	u8 min_slice_count, i;
 554	int max_slice_width;
 555
 556	if (mode_clock <= DP_DSC_PEAK_PIXEL_RATE)
 557		min_slice_count = DIV_ROUND_UP(mode_clock,
 558					       DP_DSC_MAX_ENC_THROUGHPUT_0);
 559	else
 560		min_slice_count = DIV_ROUND_UP(mode_clock,
 561					       DP_DSC_MAX_ENC_THROUGHPUT_1);
 562
 563	max_slice_width = drm_dp_dsc_sink_max_slice_width(intel_dp->dsc_dpcd);
 564	if (max_slice_width < DP_DSC_MIN_SLICE_WIDTH_VALUE) {
 565		DRM_DEBUG_KMS("Unsupported slice width %d by DP DSC Sink device\n",
 566			      max_slice_width);
 
 567		return 0;
 568	}
 569	/* Also take into account max slice width */
 570	min_slice_count = min_t(u8, min_slice_count,
 571				DIV_ROUND_UP(mode_hdisplay,
 572					     max_slice_width));
 573
 574	/* Find the closest match to the valid slice count values */
 575	for (i = 0; i < ARRAY_SIZE(valid_dsc_slicecount); i++) {
 576		if (valid_dsc_slicecount[i] >
 577		    drm_dp_dsc_sink_max_slice_count(intel_dp->dsc_dpcd,
 578						    false))
 
 579			break;
 580		if (min_slice_count  <= valid_dsc_slicecount[i])
 581			return valid_dsc_slicecount[i];
 582	}
 583
 584	DRM_DEBUG_KMS("Unsupported Slice Count %d\n", min_slice_count);
 585	return 0;
 586}
 587
 588static enum drm_mode_status
 589intel_dp_mode_valid(struct drm_connector *connector,
 590		    struct drm_display_mode *mode)
 591{
 592	struct intel_dp *intel_dp = intel_attached_dp(connector);
 593	struct intel_connector *intel_connector = to_intel_connector(connector);
 594	struct drm_display_mode *fixed_mode = intel_connector->panel.fixed_mode;
 595	struct drm_i915_private *dev_priv = to_i915(connector->dev);
 596	int target_clock = mode->clock;
 597	int max_rate, mode_rate, max_lanes, max_link_clock;
 598	int max_dotclk;
 599	u16 dsc_max_output_bpp = 0;
 600	u8 dsc_slice_count = 0;
 601
 602	if (mode->flags & DRM_MODE_FLAG_DBLSCAN)
 603		return MODE_NO_DBLESCAN;
 604
 605	max_dotclk = intel_dp_downstream_max_dotclock(intel_dp);
 606
 607	if (intel_dp_is_edp(intel_dp) && fixed_mode) {
 608		if (mode->hdisplay > fixed_mode->hdisplay)
 609			return MODE_PANEL;
 610
 611		if (mode->vdisplay > fixed_mode->vdisplay)
 612			return MODE_PANEL;
 613
 614		target_clock = fixed_mode->clock;
 615	}
 616
 617	max_link_clock = intel_dp_max_link_rate(intel_dp);
 618	max_lanes = intel_dp_max_lane_count(intel_dp);
 619
 620	max_rate = intel_dp_max_data_rate(max_link_clock, max_lanes);
 621	mode_rate = intel_dp_link_required(target_clock, 18);
 622
 623	/*
 624	 * Output bpp is stored in 6.4 format so right shift by 4 to get the
 625	 * integer value since we support only integer values of bpp.
 626	 */
 627	if ((INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv)) &&
 628	    drm_dp_sink_supports_dsc(intel_dp->dsc_dpcd)) {
 629		if (intel_dp_is_edp(intel_dp)) {
 630			dsc_max_output_bpp =
 631				drm_edp_dsc_sink_output_bpp(intel_dp->dsc_dpcd) >> 4;
 632			dsc_slice_count =
 633				drm_dp_dsc_sink_max_slice_count(intel_dp->dsc_dpcd,
 634								true);
 635		} else if (drm_dp_sink_supports_fec(intel_dp->fec_capable)) {
 636			dsc_max_output_bpp =
 637				intel_dp_dsc_get_output_bpp(max_link_clock,
 638							    max_lanes,
 639							    target_clock,
 640							    mode->hdisplay) >> 4;
 641			dsc_slice_count =
 642				intel_dp_dsc_get_slice_count(intel_dp,
 643							     target_clock,
 644							     mode->hdisplay);
 645		}
 646	}
 647
 648	if ((mode_rate > max_rate && !(dsc_max_output_bpp && dsc_slice_count)) ||
 649	    target_clock > max_dotclk)
 650		return MODE_CLOCK_HIGH;
 651
 652	if (mode->clock < 10000)
 653		return MODE_CLOCK_LOW;
 654
 655	if (mode->flags & DRM_MODE_FLAG_DBLCLK)
 656		return MODE_H_ILLEGAL;
 657
 658	return MODE_OK;
 659}
 660
 661u32 intel_dp_pack_aux(const u8 *src, int src_bytes)
 
 
 662{
 663	int i;
 664	u32 v = 0;
 665
 666	if (src_bytes > 4)
 667		src_bytes = 4;
 668	for (i = 0; i < src_bytes; i++)
 669		v |= ((u32)src[i]) << ((3 - i) * 8);
 670	return v;
 671}
 
 672
 673static void intel_dp_unpack_aux(u32 src, u8 *dst, int dst_bytes)
 674{
 675	int i;
 676	if (dst_bytes > 4)
 677		dst_bytes = 4;
 678	for (i = 0; i < dst_bytes; i++)
 679		dst[i] = src >> ((3-i) * 8);
 680}
 681
 682static void
 683intel_dp_init_panel_power_sequencer(struct intel_dp *intel_dp);
 684static void
 685intel_dp_init_panel_power_sequencer_registers(struct intel_dp *intel_dp,
 686					      bool force_disable_vdd);
 687static void
 688intel_dp_pps_init(struct intel_dp *intel_dp);
 689
 690static intel_wakeref_t
 691pps_lock(struct intel_dp *intel_dp)
 692{
 693	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
 694	intel_wakeref_t wakeref;
 695
 696	/*
 697	 * See intel_power_sequencer_reset() why we need
 698	 * a power domain reference here.
 699	 */
 700	wakeref = intel_display_power_get(dev_priv,
 701					  intel_aux_power_domain(dp_to_dig_port(intel_dp)));
 702
 703	mutex_lock(&dev_priv->pps_mutex);
 704
 705	return wakeref;
 706}
 707
 708static intel_wakeref_t
 709pps_unlock(struct intel_dp *intel_dp, intel_wakeref_t wakeref)
 710{
 711	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
 712
 713	mutex_unlock(&dev_priv->pps_mutex);
 714	intel_display_power_put(dev_priv,
 715				intel_aux_power_domain(dp_to_dig_port(intel_dp)),
 716				wakeref);
 717	return 0;
 718}
 719
 720#define with_pps_lock(dp, wf) \
 721	for ((wf) = pps_lock(dp); (wf); (wf) = pps_unlock((dp), (wf)))
 722
 723static void
 724vlv_power_sequencer_kick(struct intel_dp *intel_dp)
 725{
 726	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
 727	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
 728	enum pipe pipe = intel_dp->pps_pipe;
 729	bool pll_enabled, release_cl_override = false;
 730	enum dpio_phy phy = DPIO_PHY(pipe);
 731	enum dpio_channel ch = vlv_pipe_to_channel(pipe);
 732	u32 DP;
 733
 734	if (WARN(I915_READ(intel_dp->output_reg) & DP_PORT_EN,
 735		 "skipping pipe %c power sequencer kick due to port %c being active\n",
 736		 pipe_name(pipe), port_name(intel_dig_port->base.port)))
 737		return;
 738
 739	DRM_DEBUG_KMS("kicking pipe %c power sequencer for port %c\n",
 740		      pipe_name(pipe), port_name(intel_dig_port->base.port));
 741
 742	/* Preserve the BIOS-computed detected bit. This is
 743	 * supposed to be read-only.
 744	 */
 745	DP = I915_READ(intel_dp->output_reg) & DP_DETECTED;
 746	DP |= DP_VOLTAGE_0_4 | DP_PRE_EMPHASIS_0;
 747	DP |= DP_PORT_WIDTH(1);
 748	DP |= DP_LINK_TRAIN_PAT_1;
 749
 750	if (IS_CHERRYVIEW(dev_priv))
 751		DP |= DP_PIPE_SEL_CHV(pipe);
 752	else
 753		DP |= DP_PIPE_SEL(pipe);
 754
 755	pll_enabled = I915_READ(DPLL(pipe)) & DPLL_VCO_ENABLE;
 756
 757	/*
 758	 * The DPLL for the pipe must be enabled for this to work.
 759	 * So enable temporarily it if it's not already enabled.
 760	 */
 761	if (!pll_enabled) {
 762		release_cl_override = IS_CHERRYVIEW(dev_priv) &&
 763			!chv_phy_powergate_ch(dev_priv, phy, ch, true);
 764
 765		if (vlv_force_pll_on(dev_priv, pipe, IS_CHERRYVIEW(dev_priv) ?
 766				     &chv_dpll[0].dpll : &vlv_dpll[0].dpll)) {
 767			DRM_ERROR("Failed to force on pll for pipe %c!\n",
 768				  pipe_name(pipe));
 769			return;
 770		}
 771	}
 772
 773	/*
 774	 * Similar magic as in intel_dp_enable_port().
 775	 * We _must_ do this port enable + disable trick
 776	 * to make this power sequencer lock onto the port.
 777	 * Otherwise even VDD force bit won't work.
 778	 */
 779	I915_WRITE(intel_dp->output_reg, DP);
 780	POSTING_READ(intel_dp->output_reg);
 781
 782	I915_WRITE(intel_dp->output_reg, DP | DP_PORT_EN);
 783	POSTING_READ(intel_dp->output_reg);
 784
 785	I915_WRITE(intel_dp->output_reg, DP & ~DP_PORT_EN);
 786	POSTING_READ(intel_dp->output_reg);
 787
 788	if (!pll_enabled) {
 789		vlv_force_pll_off(dev_priv, pipe);
 790
 791		if (release_cl_override)
 792			chv_phy_powergate_ch(dev_priv, phy, ch, false);
 793	}
 794}
 795
 796static enum pipe vlv_find_free_pps(struct drm_i915_private *dev_priv)
 797{
 798	struct intel_encoder *encoder;
 799	unsigned int pipes = (1 << PIPE_A) | (1 << PIPE_B);
 800
 801	/*
 802	 * We don't have power sequencer currently.
 803	 * Pick one that's not used by other ports.
 
 804	 */
 805	for_each_intel_dp(&dev_priv->drm, encoder) {
 806		struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
 807
 808		if (encoder->type == INTEL_OUTPUT_EDP) {
 809			WARN_ON(intel_dp->active_pipe != INVALID_PIPE &&
 810				intel_dp->active_pipe != intel_dp->pps_pipe);
 811
 812			if (intel_dp->pps_pipe != INVALID_PIPE)
 813				pipes &= ~(1 << intel_dp->pps_pipe);
 814		} else {
 815			WARN_ON(intel_dp->pps_pipe != INVALID_PIPE);
 816
 817			if (intel_dp->active_pipe != INVALID_PIPE)
 818				pipes &= ~(1 << intel_dp->active_pipe);
 819		}
 820	}
 821
 822	if (pipes == 0)
 823		return INVALID_PIPE;
 824
 825	return ffs(pipes) - 1;
 826}
 827
 828static enum pipe
 829vlv_power_sequencer_pipe(struct intel_dp *intel_dp)
 830{
 831	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
 832	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
 833	enum pipe pipe;
 834
 835	lockdep_assert_held(&dev_priv->pps_mutex);
 836
 837	/* We should never land here with regular DP ports */
 838	WARN_ON(!intel_dp_is_edp(intel_dp));
 839
 840	WARN_ON(intel_dp->active_pipe != INVALID_PIPE &&
 841		intel_dp->active_pipe != intel_dp->pps_pipe);
 842
 843	if (intel_dp->pps_pipe != INVALID_PIPE)
 844		return intel_dp->pps_pipe;
 845
 846	pipe = vlv_find_free_pps(dev_priv);
 847
 848	/*
 849	 * Didn't find one. This should not happen since there
 850	 * are two power sequencers and up to two eDP ports.
 851	 */
 852	if (WARN_ON(pipe == INVALID_PIPE))
 853		pipe = PIPE_A;
 854
 855	vlv_steal_power_sequencer(dev_priv, pipe);
 856	intel_dp->pps_pipe = pipe;
 857
 858	DRM_DEBUG_KMS("picked pipe %c power sequencer for port %c\n",
 859		      pipe_name(intel_dp->pps_pipe),
 860		      port_name(intel_dig_port->base.port));
 861
 862	/* init power sequencer on this pipe and port */
 863	intel_dp_init_panel_power_sequencer(intel_dp);
 864	intel_dp_init_panel_power_sequencer_registers(intel_dp, true);
 865
 866	/*
 867	 * Even vdd force doesn't work until we've made
 868	 * the power sequencer lock in on the port.
 869	 */
 870	vlv_power_sequencer_kick(intel_dp);
 871
 872	return intel_dp->pps_pipe;
 873}
 874
 875static int
 876bxt_power_sequencer_idx(struct intel_dp *intel_dp)
 877{
 878	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
 879	int backlight_controller = dev_priv->vbt.backlight.controller;
 880
 881	lockdep_assert_held(&dev_priv->pps_mutex);
 882
 883	/* We should never land here with regular DP ports */
 884	WARN_ON(!intel_dp_is_edp(intel_dp));
 885
 886	if (!intel_dp->pps_reset)
 887		return backlight_controller;
 888
 889	intel_dp->pps_reset = false;
 890
 891	/*
 892	 * Only the HW needs to be reprogrammed, the SW state is fixed and
 893	 * has been setup during connector init.
 894	 */
 895	intel_dp_init_panel_power_sequencer_registers(intel_dp, false);
 896
 897	return backlight_controller;
 898}
 899
 900typedef bool (*vlv_pipe_check)(struct drm_i915_private *dev_priv,
 901			       enum pipe pipe);
 902
 903static bool vlv_pipe_has_pp_on(struct drm_i915_private *dev_priv,
 904			       enum pipe pipe)
 905{
 906	return I915_READ(PP_STATUS(pipe)) & PP_ON;
 907}
 908
 909static bool vlv_pipe_has_vdd_on(struct drm_i915_private *dev_priv,
 910				enum pipe pipe)
 911{
 912	return I915_READ(PP_CONTROL(pipe)) & EDP_FORCE_VDD;
 913}
 914
 915static bool vlv_pipe_any(struct drm_i915_private *dev_priv,
 916			 enum pipe pipe)
 917{
 918	return true;
 919}
 920
 921static enum pipe
 922vlv_initial_pps_pipe(struct drm_i915_private *dev_priv,
 923		     enum port port,
 924		     vlv_pipe_check pipe_check)
 925{
 926	enum pipe pipe;
 927
 928	for (pipe = PIPE_A; pipe <= PIPE_B; pipe++) {
 929		u32 port_sel = I915_READ(PP_ON_DELAYS(pipe)) &
 930			PANEL_PORT_SELECT_MASK;
 931
 932		if (port_sel != PANEL_PORT_SELECT_VLV(port))
 933			continue;
 934
 935		if (!pipe_check(dev_priv, pipe))
 936			continue;
 937
 938		return pipe;
 939	}
 940
 941	return INVALID_PIPE;
 942}
 943
 944static void
 945vlv_initial_power_sequencer_setup(struct intel_dp *intel_dp)
 946{
 947	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
 948	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
 949	enum port port = intel_dig_port->base.port;
 950
 951	lockdep_assert_held(&dev_priv->pps_mutex);
 952
 953	/* try to find a pipe with this port selected */
 954	/* first pick one where the panel is on */
 955	intel_dp->pps_pipe = vlv_initial_pps_pipe(dev_priv, port,
 956						  vlv_pipe_has_pp_on);
 957	/* didn't find one? pick one where vdd is on */
 958	if (intel_dp->pps_pipe == INVALID_PIPE)
 959		intel_dp->pps_pipe = vlv_initial_pps_pipe(dev_priv, port,
 960							  vlv_pipe_has_vdd_on);
 961	/* didn't find one? pick one with just the correct port */
 962	if (intel_dp->pps_pipe == INVALID_PIPE)
 963		intel_dp->pps_pipe = vlv_initial_pps_pipe(dev_priv, port,
 964							  vlv_pipe_any);
 965
 966	/* didn't find one? just let vlv_power_sequencer_pipe() pick one when needed */
 967	if (intel_dp->pps_pipe == INVALID_PIPE) {
 968		DRM_DEBUG_KMS("no initial power sequencer for port %c\n",
 969			      port_name(port));
 970		return;
 971	}
 972
 973	DRM_DEBUG_KMS("initial power sequencer for port %c: pipe %c\n",
 974		      port_name(port), pipe_name(intel_dp->pps_pipe));
 975
 976	intel_dp_init_panel_power_sequencer(intel_dp);
 977	intel_dp_init_panel_power_sequencer_registers(intel_dp, false);
 978}
 979
 980void intel_power_sequencer_reset(struct drm_i915_private *dev_priv)
 
 981{
 982	struct intel_encoder *encoder;
 983
 984	if (WARN_ON(!IS_VALLEYVIEW(dev_priv) && !IS_CHERRYVIEW(dev_priv) &&
 985		    !IS_GEN9_LP(dev_priv)))
 986		return;
 987
 988	/*
 989	 * We can't grab pps_mutex here due to deadlock with power_domain
 990	 * mutex when power_domain functions are called while holding pps_mutex.
 991	 * That also means that in order to use pps_pipe the code needs to
 992	 * hold both a power domain reference and pps_mutex, and the power domain
 993	 * reference get/put must be done while _not_ holding pps_mutex.
 994	 * pps_{lock,unlock}() do these steps in the correct order, so one
 995	 * should use them always.
 
 
 
 
 996	 */
 997
 998	for_each_intel_dp(&dev_priv->drm, encoder) {
 999		struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
1000
1001		WARN_ON(intel_dp->active_pipe != INVALID_PIPE);
1002
1003		if (encoder->type != INTEL_OUTPUT_EDP)
1004			continue;
1005
1006		if (IS_GEN9_LP(dev_priv))
1007			intel_dp->pps_reset = true;
1008		else
1009			intel_dp->pps_pipe = INVALID_PIPE;
1010	}
1011}
1012
1013struct pps_registers {
1014	i915_reg_t pp_ctrl;
1015	i915_reg_t pp_stat;
1016	i915_reg_t pp_on;
1017	i915_reg_t pp_off;
1018	i915_reg_t pp_div;
1019};
1020
1021static void intel_pps_get_registers(struct intel_dp *intel_dp,
1022				    struct pps_registers *regs)
1023{
1024	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1025	int pps_idx = 0;
1026
1027	memset(regs, 0, sizeof(*regs));
1028
1029	if (IS_GEN9_LP(dev_priv))
1030		pps_idx = bxt_power_sequencer_idx(intel_dp);
1031	else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
1032		pps_idx = vlv_power_sequencer_pipe(intel_dp);
1033
1034	regs->pp_ctrl = PP_CONTROL(pps_idx);
1035	regs->pp_stat = PP_STATUS(pps_idx);
1036	regs->pp_on = PP_ON_DELAYS(pps_idx);
1037	regs->pp_off = PP_OFF_DELAYS(pps_idx);
1038
1039	/* Cycle delay moved from PP_DIVISOR to PP_CONTROL */
1040	if (IS_GEN9_LP(dev_priv) || INTEL_PCH_TYPE(dev_priv) >= PCH_CNP)
1041		regs->pp_div = INVALID_MMIO_REG;
1042	else
1043		regs->pp_div = PP_DIVISOR(pps_idx);
1044}
1045
1046static i915_reg_t
1047_pp_ctrl_reg(struct intel_dp *intel_dp)
1048{
1049	struct pps_registers regs;
1050
1051	intel_pps_get_registers(intel_dp, &regs);
1052
1053	return regs.pp_ctrl;
1054}
 
 
 
1055
1056static i915_reg_t
1057_pp_stat_reg(struct intel_dp *intel_dp)
1058{
1059	struct pps_registers regs;
1060
1061	intel_pps_get_registers(intel_dp, &regs);
1062
1063	return regs.pp_stat;
1064}
1065
1066/* Reboot notifier handler to shutdown panel power to guarantee T12 timing
1067   This function only applicable when panel PM state is not to be tracked */
1068static int edp_notify_handler(struct notifier_block *this, unsigned long code,
1069			      void *unused)
1070{
1071	struct intel_dp *intel_dp = container_of(this, typeof(* intel_dp),
1072						 edp_notifier);
1073	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1074	intel_wakeref_t wakeref;
1075
1076	if (!intel_dp_is_edp(intel_dp) || code != SYS_RESTART)
1077		return 0;
1078
1079	with_pps_lock(intel_dp, wakeref) {
1080		if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
1081			enum pipe pipe = vlv_power_sequencer_pipe(intel_dp);
1082			i915_reg_t pp_ctrl_reg, pp_div_reg;
1083			u32 pp_div;
1084
1085			pp_ctrl_reg = PP_CONTROL(pipe);
1086			pp_div_reg  = PP_DIVISOR(pipe);
1087			pp_div = I915_READ(pp_div_reg);
1088			pp_div &= PP_REFERENCE_DIVIDER_MASK;
1089
1090			/* 0x1F write to PP_DIV_REG sets max cycle delay */
1091			I915_WRITE(pp_div_reg, pp_div | 0x1F);
1092			I915_WRITE(pp_ctrl_reg, PANEL_UNLOCK_REGS);
1093			msleep(intel_dp->panel_power_cycle_delay);
1094		}
1095	}
1096
1097	return 0;
1098}
 
1099
1100static bool edp_have_panel_power(struct intel_dp *intel_dp)
1101{
1102	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
 
1103
1104	lockdep_assert_held(&dev_priv->pps_mutex);
 
 
 
 
 
1105
1106	if ((IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) &&
1107	    intel_dp->pps_pipe == INVALID_PIPE)
1108		return false;
1109
1110	return (I915_READ(_pp_stat_reg(intel_dp)) & PP_ON) != 0;
1111}
1112
1113static bool edp_have_panel_vdd(struct intel_dp *intel_dp)
 
 
1114{
1115	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
 
 
 
 
 
 
 
 
 
 
1116
1117	lockdep_assert_held(&dev_priv->pps_mutex);
 
1118
1119	if ((IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) &&
1120	    intel_dp->pps_pipe == INVALID_PIPE)
1121		return false;
1122
1123	return I915_READ(_pp_ctrl_reg(intel_dp)) & EDP_FORCE_VDD;
1124}
 
1125
1126static void
1127intel_dp_check_edp(struct intel_dp *intel_dp)
1128{
1129	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1130
1131	if (!intel_dp_is_edp(intel_dp))
1132		return;
1133
1134	if (!edp_have_panel_power(intel_dp) && !edp_have_panel_vdd(intel_dp)) {
1135		WARN(1, "eDP powered off while attempting aux channel communication.\n");
1136		DRM_DEBUG_KMS("Status 0x%08x Control 0x%08x\n",
1137			      I915_READ(_pp_stat_reg(intel_dp)),
1138			      I915_READ(_pp_ctrl_reg(intel_dp)));
1139	}
1140}
1141
1142static u32
1143intel_dp_aux_wait_done(struct intel_dp *intel_dp)
1144{
1145	struct drm_i915_private *i915 = dp_to_i915(intel_dp);
1146	i915_reg_t ch_ctl = intel_dp->aux_ch_ctl_reg(intel_dp);
1147	u32 status;
1148	bool done;
1149
1150#define C (((status = intel_uncore_read_notrace(&i915->uncore, ch_ctl)) & DP_AUX_CH_CTL_SEND_BUSY) == 0)
1151	done = wait_event_timeout(i915->gmbus_wait_queue, C,
1152				  msecs_to_jiffies_timeout(10));
1153
1154	/* just trace the final value */
1155	trace_i915_reg_rw(false, ch_ctl, status, sizeof(status), true);
1156
1157	if (!done)
1158		DRM_ERROR("dp aux hw did not signal timeout!\n");
1159#undef C
1160
1161	return status;
1162}
1163
1164static u32 g4x_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
1165{
1166	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1167
1168	if (index)
1169		return 0;
1170
1171	/*
1172	 * The clock divider is based off the hrawclk, and would like to run at
1173	 * 2MHz.  So, take the hrawclk value and divide by 2000 and use that
1174	 */
1175	return DIV_ROUND_CLOSEST(dev_priv->rawclk_freq, 2000);
1176}
1177
1178static u32 ilk_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
1179{
1180	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1181	struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
1182
1183	if (index)
1184		return 0;
1185
1186	/*
1187	 * The clock divider is based off the cdclk or PCH rawclk, and would
1188	 * like to run at 2MHz.  So, take the cdclk or PCH rawclk value and
1189	 * divide by 2000 and use that
1190	 */
1191	if (dig_port->aux_ch == AUX_CH_A)
1192		return DIV_ROUND_CLOSEST(dev_priv->cdclk.hw.cdclk, 2000);
1193	else
1194		return DIV_ROUND_CLOSEST(dev_priv->rawclk_freq, 2000);
1195}
1196
1197static u32 hsw_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
1198{
1199	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1200	struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
1201
1202	if (dig_port->aux_ch != AUX_CH_A && HAS_PCH_LPT_H(dev_priv)) {
1203		/* Workaround for non-ULT HSW */
1204		switch (index) {
1205		case 0: return 63;
1206		case 1: return 72;
1207		default: return 0;
1208		}
1209	}
 
 
1210
1211	return ilk_get_aux_clock_divider(intel_dp, index);
1212}
1213
1214static u32 skl_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
1215{
1216	/*
1217	 * SKL doesn't need us to program the AUX clock divider (Hardware will
1218	 * derive the clock from CDCLK automatically). We still implement the
1219	 * get_aux_clock_divider vfunc to plug-in into the existing code.
1220	 */
1221	return index ? 0 : 1;
1222}
1223
1224static u32 g4x_get_aux_send_ctl(struct intel_dp *intel_dp,
1225				int send_bytes,
1226				u32 aux_clock_divider)
1227{
1228	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
1229	struct drm_i915_private *dev_priv =
1230			to_i915(intel_dig_port->base.base.dev);
1231	u32 precharge, timeout;
1232
1233	if (IS_GEN(dev_priv, 6))
1234		precharge = 3;
1235	else
1236		precharge = 5;
1237
1238	if (IS_BROADWELL(dev_priv))
1239		timeout = DP_AUX_CH_CTL_TIME_OUT_600us;
1240	else
1241		timeout = DP_AUX_CH_CTL_TIME_OUT_400us;
1242
1243	return DP_AUX_CH_CTL_SEND_BUSY |
1244	       DP_AUX_CH_CTL_DONE |
1245	       DP_AUX_CH_CTL_INTERRUPT |
1246	       DP_AUX_CH_CTL_TIME_OUT_ERROR |
1247	       timeout |
1248	       DP_AUX_CH_CTL_RECEIVE_ERROR |
1249	       (send_bytes << DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT) |
1250	       (precharge << DP_AUX_CH_CTL_PRECHARGE_2US_SHIFT) |
1251	       (aux_clock_divider << DP_AUX_CH_CTL_BIT_CLOCK_2X_SHIFT);
1252}
1253
1254static u32 skl_get_aux_send_ctl(struct intel_dp *intel_dp,
1255				int send_bytes,
1256				u32 unused)
1257{
1258	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
1259	struct drm_i915_private *i915 =
1260			to_i915(intel_dig_port->base.base.dev);
1261	enum phy phy = intel_port_to_phy(i915, intel_dig_port->base.port);
1262	u32 ret;
1263
1264	ret = DP_AUX_CH_CTL_SEND_BUSY |
1265	      DP_AUX_CH_CTL_DONE |
1266	      DP_AUX_CH_CTL_INTERRUPT |
1267	      DP_AUX_CH_CTL_TIME_OUT_ERROR |
1268	      DP_AUX_CH_CTL_TIME_OUT_MAX |
1269	      DP_AUX_CH_CTL_RECEIVE_ERROR |
1270	      (send_bytes << DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT) |
1271	      DP_AUX_CH_CTL_FW_SYNC_PULSE_SKL(32) |
1272	      DP_AUX_CH_CTL_SYNC_PULSE_SKL(32);
1273
1274	if (intel_phy_is_tc(i915, phy) &&
1275	    intel_dig_port->tc_mode == TC_PORT_TBT_ALT)
1276		ret |= DP_AUX_CH_CTL_TBT_IO;
1277
1278	return ret;
1279}
1280
1281static int
1282intel_dp_aux_xfer(struct intel_dp *intel_dp,
1283		  const u8 *send, int send_bytes,
1284		  u8 *recv, int recv_size,
1285		  u32 aux_send_ctl_flags)
1286{
1287	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
1288	struct drm_i915_private *i915 =
1289			to_i915(intel_dig_port->base.base.dev);
1290	struct intel_uncore *uncore = &i915->uncore;
1291	enum phy phy = intel_port_to_phy(i915, intel_dig_port->base.port);
1292	bool is_tc_port = intel_phy_is_tc(i915, phy);
1293	i915_reg_t ch_ctl, ch_data[5];
1294	u32 aux_clock_divider;
1295	enum intel_display_power_domain aux_domain =
1296		intel_aux_power_domain(intel_dig_port);
1297	intel_wakeref_t aux_wakeref;
1298	intel_wakeref_t pps_wakeref;
1299	int i, ret, recv_bytes;
1300	int try, clock = 0;
1301	u32 status;
1302	bool vdd;
1303
1304	ch_ctl = intel_dp->aux_ch_ctl_reg(intel_dp);
1305	for (i = 0; i < ARRAY_SIZE(ch_data); i++)
1306		ch_data[i] = intel_dp->aux_ch_data_reg(intel_dp, i);
1307
1308	if (is_tc_port)
1309		intel_tc_port_lock(intel_dig_port);
1310
1311	aux_wakeref = intel_display_power_get(i915, aux_domain);
1312	pps_wakeref = pps_lock(intel_dp);
1313
1314	/*
1315	 * We will be called with VDD already enabled for dpcd/edid/oui reads.
1316	 * In such cases we want to leave VDD enabled and it's up to upper layers
1317	 * to turn it off. But for eg. i2c-dev access we need to turn it on/off
1318	 * ourselves.
1319	 */
1320	vdd = edp_panel_vdd_on(intel_dp);
 
 
 
 
 
 
1321
1322	/* dp aux is extremely sensitive to irq latency, hence request the
1323	 * lowest possible wakeup latency and so prevent the cpu from going into
1324	 * deep sleep states.
1325	 */
1326	pm_qos_update_request(&i915->pm_qos, 0);
1327
1328	intel_dp_check_edp(intel_dp);
1329
1330	/* Try to wait for any previous AUX channel activity */
1331	for (try = 0; try < 3; try++) {
1332		status = intel_uncore_read_notrace(uncore, ch_ctl);
1333		if ((status & DP_AUX_CH_CTL_SEND_BUSY) == 0)
1334			break;
1335		msleep(1);
1336	}
1337	/* just trace the final value */
1338	trace_i915_reg_rw(false, ch_ctl, status, sizeof(status), true);
1339
1340	if (try == 3) {
1341		static u32 last_status = -1;
1342		const u32 status = intel_uncore_read(uncore, ch_ctl);
1343
1344		if (status != last_status) {
1345			WARN(1, "dp_aux_ch not started status 0x%08x\n",
1346			     status);
1347			last_status = status;
1348		}
1349
1350		ret = -EBUSY;
1351		goto out;
1352	}
1353
1354	/* Only 5 data registers! */
1355	if (WARN_ON(send_bytes > 20 || recv_size > 20)) {
1356		ret = -E2BIG;
1357		goto out;
1358	}
1359
1360	while ((aux_clock_divider = intel_dp->get_aux_clock_divider(intel_dp, clock++))) {
1361		u32 send_ctl = intel_dp->get_aux_send_ctl(intel_dp,
1362							  send_bytes,
1363							  aux_clock_divider);
1364
1365		send_ctl |= aux_send_ctl_flags;
1366
1367		/* Must try at least 3 times according to DP spec */
1368		for (try = 0; try < 5; try++) {
1369			/* Load the send data into the aux channel data registers */
1370			for (i = 0; i < send_bytes; i += 4)
1371				intel_uncore_write(uncore,
1372						   ch_data[i >> 2],
1373						   intel_dp_pack_aux(send + i,
1374								     send_bytes - i));
1375
1376			/* Send the command and wait for it to complete */
1377			intel_uncore_write(uncore, ch_ctl, send_ctl);
1378
1379			status = intel_dp_aux_wait_done(intel_dp);
1380
1381			/* Clear done status and any errors */
1382			intel_uncore_write(uncore,
1383					   ch_ctl,
1384					   status |
1385					   DP_AUX_CH_CTL_DONE |
1386					   DP_AUX_CH_CTL_TIME_OUT_ERROR |
1387					   DP_AUX_CH_CTL_RECEIVE_ERROR);
1388
1389			/* DP CTS 1.2 Core Rev 1.1, 4.2.1.1 & 4.2.1.2
1390			 *   400us delay required for errors and timeouts
1391			 *   Timeout errors from the HW already meet this
1392			 *   requirement so skip to next iteration
1393			 */
1394			if (status & DP_AUX_CH_CTL_TIME_OUT_ERROR)
1395				continue;
1396
1397			if (status & DP_AUX_CH_CTL_RECEIVE_ERROR) {
1398				usleep_range(400, 500);
1399				continue;
1400			}
1401			if (status & DP_AUX_CH_CTL_DONE)
1402				goto done;
1403		}
1404	}
1405
1406	if ((status & DP_AUX_CH_CTL_DONE) == 0) {
1407		DRM_ERROR("dp_aux_ch not done status 0x%08x\n", status);
1408		ret = -EBUSY;
1409		goto out;
1410	}
1411
1412done:
1413	/* Check for timeout or receive error.
1414	 * Timeouts occur when the sink is not connected
1415	 */
1416	if (status & DP_AUX_CH_CTL_RECEIVE_ERROR) {
1417		DRM_ERROR("dp_aux_ch receive error status 0x%08x\n", status);
1418		ret = -EIO;
1419		goto out;
1420	}
1421
1422	/* Timeouts occur when the device isn't connected, so they're
1423	 * "normal" -- don't fill the kernel log with these */
1424	if (status & DP_AUX_CH_CTL_TIME_OUT_ERROR) {
1425		DRM_DEBUG_KMS("dp_aux_ch timeout status 0x%08x\n", status);
1426		ret = -ETIMEDOUT;
1427		goto out;
1428	}
1429
1430	/* Unload any bytes sent back from the other side */
1431	recv_bytes = ((status & DP_AUX_CH_CTL_MESSAGE_SIZE_MASK) >>
1432		      DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT);
1433
1434	/*
1435	 * By BSpec: "Message sizes of 0 or >20 are not allowed."
1436	 * We have no idea of what happened so we return -EBUSY so
1437	 * drm layer takes care for the necessary retries.
1438	 */
1439	if (recv_bytes == 0 || recv_bytes > 20) {
1440		DRM_DEBUG_KMS("Forbidden recv_bytes = %d on aux transaction\n",
1441			      recv_bytes);
1442		ret = -EBUSY;
1443		goto out;
1444	}
1445
1446	if (recv_bytes > recv_size)
1447		recv_bytes = recv_size;
1448
1449	for (i = 0; i < recv_bytes; i += 4)
1450		intel_dp_unpack_aux(intel_uncore_read(uncore, ch_data[i >> 2]),
1451				    recv + i, recv_bytes - i);
1452
1453	ret = recv_bytes;
1454out:
1455	pm_qos_update_request(&i915->pm_qos, PM_QOS_DEFAULT_VALUE);
1456
1457	if (vdd)
1458		edp_panel_vdd_off(intel_dp, false);
1459
1460	pps_unlock(intel_dp, pps_wakeref);
1461	intel_display_power_put_async(i915, aux_domain, aux_wakeref);
1462
1463	if (is_tc_port)
1464		intel_tc_port_unlock(intel_dig_port);
1465
1466	return ret;
1467}
1468
1469#define BARE_ADDRESS_SIZE	3
1470#define HEADER_SIZE		(BARE_ADDRESS_SIZE + 1)
1471
1472static void
1473intel_dp_aux_header(u8 txbuf[HEADER_SIZE],
1474		    const struct drm_dp_aux_msg *msg)
1475{
1476	txbuf[0] = (msg->request << 4) | ((msg->address >> 16) & 0xf);
1477	txbuf[1] = (msg->address >> 8) & 0xff;
1478	txbuf[2] = msg->address & 0xff;
1479	txbuf[3] = msg->size - 1;
1480}
1481
1482static ssize_t
1483intel_dp_aux_transfer(struct drm_dp_aux *aux, struct drm_dp_aux_msg *msg)
1484{
1485	struct intel_dp *intel_dp = container_of(aux, struct intel_dp, aux);
1486	u8 txbuf[20], rxbuf[20];
1487	size_t txsize, rxsize;
1488	int ret;
1489
1490	intel_dp_aux_header(txbuf, msg);
1491
1492	switch (msg->request & ~DP_AUX_I2C_MOT) {
1493	case DP_AUX_NATIVE_WRITE:
1494	case DP_AUX_I2C_WRITE:
1495	case DP_AUX_I2C_WRITE_STATUS_UPDATE:
1496		txsize = msg->size ? HEADER_SIZE + msg->size : BARE_ADDRESS_SIZE;
1497		rxsize = 2; /* 0 or 1 data bytes */
1498
1499		if (WARN_ON(txsize > 20))
1500			return -E2BIG;
1501
1502		WARN_ON(!msg->buffer != !msg->size);
1503
1504		if (msg->buffer)
1505			memcpy(txbuf + HEADER_SIZE, msg->buffer, msg->size);
1506
1507		ret = intel_dp_aux_xfer(intel_dp, txbuf, txsize,
1508					rxbuf, rxsize, 0);
1509		if (ret > 0) {
1510			msg->reply = rxbuf[0] >> 4;
1511
1512			if (ret > 1) {
1513				/* Number of bytes written in a short write. */
1514				ret = clamp_t(int, rxbuf[1], 0, msg->size);
1515			} else {
1516				/* Return payload size. */
1517				ret = msg->size;
1518			}
1519		}
1520		break;
1521
1522	case DP_AUX_NATIVE_READ:
1523	case DP_AUX_I2C_READ:
1524		txsize = msg->size ? HEADER_SIZE : BARE_ADDRESS_SIZE;
1525		rxsize = msg->size + 1;
1526
1527		if (WARN_ON(rxsize > 20))
1528			return -E2BIG;
1529
1530		ret = intel_dp_aux_xfer(intel_dp, txbuf, txsize,
1531					rxbuf, rxsize, 0);
1532		if (ret > 0) {
1533			msg->reply = rxbuf[0] >> 4;
1534			/*
1535			 * Assume happy day, and copy the data. The caller is
1536			 * expected to check msg->reply before touching it.
1537			 *
1538			 * Return payload size.
1539			 */
1540			ret--;
1541			memcpy(msg->buffer, rxbuf + 1, ret);
1542		}
1543		break;
1544
1545	default:
1546		ret = -EINVAL;
1547		break;
1548	}
1549
1550	return ret;
1551}
1552
1553
1554static i915_reg_t g4x_aux_ctl_reg(struct intel_dp *intel_dp)
1555{
1556	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1557	struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
1558	enum aux_ch aux_ch = dig_port->aux_ch;
1559
1560	switch (aux_ch) {
1561	case AUX_CH_B:
1562	case AUX_CH_C:
1563	case AUX_CH_D:
1564		return DP_AUX_CH_CTL(aux_ch);
1565	default:
1566		MISSING_CASE(aux_ch);
1567		return DP_AUX_CH_CTL(AUX_CH_B);
1568	}
1569}
1570
1571static i915_reg_t g4x_aux_data_reg(struct intel_dp *intel_dp, int index)
1572{
1573	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1574	struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
1575	enum aux_ch aux_ch = dig_port->aux_ch;
1576
1577	switch (aux_ch) {
1578	case AUX_CH_B:
1579	case AUX_CH_C:
1580	case AUX_CH_D:
1581		return DP_AUX_CH_DATA(aux_ch, index);
1582	default:
1583		MISSING_CASE(aux_ch);
1584		return DP_AUX_CH_DATA(AUX_CH_B, index);
1585	}
1586}
1587
1588static i915_reg_t ilk_aux_ctl_reg(struct intel_dp *intel_dp)
1589{
1590	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1591	struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
1592	enum aux_ch aux_ch = dig_port->aux_ch;
1593
1594	switch (aux_ch) {
1595	case AUX_CH_A:
1596		return DP_AUX_CH_CTL(aux_ch);
1597	case AUX_CH_B:
1598	case AUX_CH_C:
1599	case AUX_CH_D:
1600		return PCH_DP_AUX_CH_CTL(aux_ch);
1601	default:
1602		MISSING_CASE(aux_ch);
1603		return DP_AUX_CH_CTL(AUX_CH_A);
1604	}
1605}
1606
1607static i915_reg_t ilk_aux_data_reg(struct intel_dp *intel_dp, int index)
1608{
1609	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1610	struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
1611	enum aux_ch aux_ch = dig_port->aux_ch;
1612
1613	switch (aux_ch) {
1614	case AUX_CH_A:
1615		return DP_AUX_CH_DATA(aux_ch, index);
1616	case AUX_CH_B:
1617	case AUX_CH_C:
1618	case AUX_CH_D:
1619		return PCH_DP_AUX_CH_DATA(aux_ch, index);
1620	default:
1621		MISSING_CASE(aux_ch);
1622		return DP_AUX_CH_DATA(AUX_CH_A, index);
1623	}
1624}
1625
1626static i915_reg_t skl_aux_ctl_reg(struct intel_dp *intel_dp)
1627{
1628	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1629	struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
1630	enum aux_ch aux_ch = dig_port->aux_ch;
1631
1632	switch (aux_ch) {
1633	case AUX_CH_A:
1634	case AUX_CH_B:
1635	case AUX_CH_C:
1636	case AUX_CH_D:
1637	case AUX_CH_E:
1638	case AUX_CH_F:
1639		return DP_AUX_CH_CTL(aux_ch);
1640	default:
1641		MISSING_CASE(aux_ch);
1642		return DP_AUX_CH_CTL(AUX_CH_A);
1643	}
1644}
1645
1646static i915_reg_t skl_aux_data_reg(struct intel_dp *intel_dp, int index)
1647{
1648	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1649	struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
1650	enum aux_ch aux_ch = dig_port->aux_ch;
1651
1652	switch (aux_ch) {
1653	case AUX_CH_A:
1654	case AUX_CH_B:
1655	case AUX_CH_C:
1656	case AUX_CH_D:
1657	case AUX_CH_E:
1658	case AUX_CH_F:
1659		return DP_AUX_CH_DATA(aux_ch, index);
1660	default:
1661		MISSING_CASE(aux_ch);
1662		return DP_AUX_CH_DATA(AUX_CH_A, index);
1663	}
1664}
1665
1666static void
1667intel_dp_aux_fini(struct intel_dp *intel_dp)
1668{
1669	kfree(intel_dp->aux.name);
1670}
1671
1672static void
1673intel_dp_aux_init(struct intel_dp *intel_dp)
1674{
1675	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1676	struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
1677	struct intel_encoder *encoder = &dig_port->base;
1678
1679	if (INTEL_GEN(dev_priv) >= 9) {
1680		intel_dp->aux_ch_ctl_reg = skl_aux_ctl_reg;
1681		intel_dp->aux_ch_data_reg = skl_aux_data_reg;
1682	} else if (HAS_PCH_SPLIT(dev_priv)) {
1683		intel_dp->aux_ch_ctl_reg = ilk_aux_ctl_reg;
1684		intel_dp->aux_ch_data_reg = ilk_aux_data_reg;
1685	} else {
1686		intel_dp->aux_ch_ctl_reg = g4x_aux_ctl_reg;
1687		intel_dp->aux_ch_data_reg = g4x_aux_data_reg;
1688	}
1689
1690	if (INTEL_GEN(dev_priv) >= 9)
1691		intel_dp->get_aux_clock_divider = skl_get_aux_clock_divider;
1692	else if (IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv))
1693		intel_dp->get_aux_clock_divider = hsw_get_aux_clock_divider;
1694	else if (HAS_PCH_SPLIT(dev_priv))
1695		intel_dp->get_aux_clock_divider = ilk_get_aux_clock_divider;
1696	else
1697		intel_dp->get_aux_clock_divider = g4x_get_aux_clock_divider;
1698
1699	if (INTEL_GEN(dev_priv) >= 9)
1700		intel_dp->get_aux_send_ctl = skl_get_aux_send_ctl;
1701	else
1702		intel_dp->get_aux_send_ctl = g4x_get_aux_send_ctl;
1703
1704	drm_dp_aux_init(&intel_dp->aux);
1705
1706	/* Failure to allocate our preferred name is not critical */
1707	intel_dp->aux.name = kasprintf(GFP_KERNEL, "DPDDC-%c",
1708				       port_name(encoder->port));
1709	intel_dp->aux.transfer = intel_dp_aux_transfer;
1710}
1711
1712bool intel_dp_source_supports_hbr2(struct intel_dp *intel_dp)
1713{
1714	int max_rate = intel_dp->source_rates[intel_dp->num_source_rates - 1];
1715
1716	return max_rate >= 540000;
1717}
1718
1719bool intel_dp_source_supports_hbr3(struct intel_dp *intel_dp)
1720{
1721	int max_rate = intel_dp->source_rates[intel_dp->num_source_rates - 1];
1722
1723	return max_rate >= 810000;
1724}
1725
1726static void
1727intel_dp_set_clock(struct intel_encoder *encoder,
1728		   struct intel_crtc_state *pipe_config)
1729{
1730	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
1731	const struct dp_link_dpll *divisor = NULL;
1732	int i, count = 0;
1733
1734	if (IS_G4X(dev_priv)) {
1735		divisor = g4x_dpll;
1736		count = ARRAY_SIZE(g4x_dpll);
1737	} else if (HAS_PCH_SPLIT(dev_priv)) {
1738		divisor = pch_dpll;
1739		count = ARRAY_SIZE(pch_dpll);
1740	} else if (IS_CHERRYVIEW(dev_priv)) {
1741		divisor = chv_dpll;
1742		count = ARRAY_SIZE(chv_dpll);
1743	} else if (IS_VALLEYVIEW(dev_priv)) {
1744		divisor = vlv_dpll;
1745		count = ARRAY_SIZE(vlv_dpll);
1746	}
1747
1748	if (divisor && count) {
1749		for (i = 0; i < count; i++) {
1750			if (pipe_config->port_clock == divisor[i].clock) {
1751				pipe_config->dpll = divisor[i].dpll;
1752				pipe_config->clock_set = true;
1753				break;
1754			}
1755		}
1756	}
1757}
1758
1759static void snprintf_int_array(char *str, size_t len,
1760			       const int *array, int nelem)
1761{
1762	int i;
1763
1764	str[0] = '\0';
1765
1766	for (i = 0; i < nelem; i++) {
1767		int r = snprintf(str, len, "%s%d", i ? ", " : "", array[i]);
1768		if (r >= len)
1769			return;
1770		str += r;
1771		len -= r;
1772	}
1773}
1774
1775static void intel_dp_print_rates(struct intel_dp *intel_dp)
1776{
 
1777	char str[128]; /* FIXME: too big for stack? */
1778
1779	if ((drm_debug & DRM_UT_KMS) == 0)
1780		return;
1781
1782	snprintf_int_array(str, sizeof(str),
1783			   intel_dp->source_rates, intel_dp->num_source_rates);
1784	DRM_DEBUG_KMS("source rates: %s\n", str);
1785
1786	snprintf_int_array(str, sizeof(str),
1787			   intel_dp->sink_rates, intel_dp->num_sink_rates);
1788	DRM_DEBUG_KMS("sink rates: %s\n", str);
1789
1790	snprintf_int_array(str, sizeof(str),
1791			   intel_dp->common_rates, intel_dp->num_common_rates);
1792	DRM_DEBUG_KMS("common rates: %s\n", str);
1793}
1794
1795int
1796intel_dp_max_link_rate(struct intel_dp *intel_dp)
1797{
 
1798	int len;
1799
1800	len = intel_dp_common_len_rate_limit(intel_dp, intel_dp->max_link_rate);
1801	if (WARN_ON(len <= 0))
1802		return 162000;
1803
1804	return intel_dp->common_rates[len - 1];
1805}
1806
1807int intel_dp_rate_select(struct intel_dp *intel_dp, int rate)
1808{
 
1809	int i = intel_dp_rate_index(intel_dp->sink_rates,
1810				    intel_dp->num_sink_rates, rate);
1811
1812	if (WARN_ON(i < 0))
1813		i = 0;
1814
1815	return i;
1816}
1817
1818void intel_dp_compute_rate(struct intel_dp *intel_dp, int port_clock,
1819			   u8 *link_bw, u8 *rate_select)
1820{
1821	/* eDP 1.4 rate select method. */
1822	if (intel_dp->use_rate_select) {
1823		*link_bw = 0;
1824		*rate_select =
1825			intel_dp_rate_select(intel_dp, port_clock);
1826	} else {
1827		*link_bw = drm_dp_link_rate_to_bw_code(port_clock);
1828		*rate_select = 0;
1829	}
1830}
1831
1832static bool intel_dp_source_supports_fec(struct intel_dp *intel_dp,
1833					 const struct intel_crtc_state *pipe_config)
1834{
1835	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1836
1837	return INTEL_GEN(dev_priv) >= 11 &&
1838		pipe_config->cpu_transcoder != TRANSCODER_A;
 
 
 
 
 
 
1839}
1840
1841static bool intel_dp_supports_fec(struct intel_dp *intel_dp,
1842				  const struct intel_crtc_state *pipe_config)
1843{
1844	return intel_dp_source_supports_fec(intel_dp, pipe_config) &&
1845		drm_dp_sink_supports_fec(intel_dp->fec_capable);
1846}
1847
1848static bool intel_dp_source_supports_dsc(struct intel_dp *intel_dp,
1849					 const struct intel_crtc_state *pipe_config)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1850{
1851	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
 
 
 
1852
1853	return INTEL_GEN(dev_priv) >= 10 &&
1854		pipe_config->cpu_transcoder != TRANSCODER_A;
1855}
1856
1857static bool intel_dp_supports_dsc(struct intel_dp *intel_dp,
1858				  const struct intel_crtc_state *pipe_config)
1859{
1860	if (!intel_dp_is_edp(intel_dp) && !pipe_config->fec_enable)
 
 
 
 
 
 
 
1861		return false;
1862
1863	return intel_dp_source_supports_dsc(intel_dp, pipe_config) &&
1864		drm_dp_sink_supports_dsc(intel_dp->dsc_dpcd);
 
 
 
 
 
 
 
 
 
 
1865}
1866
1867static int intel_dp_compute_bpp(struct intel_dp *intel_dp,
1868				struct intel_crtc_state *pipe_config)
1869{
1870	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1871	struct intel_connector *intel_connector = intel_dp->attached_connector;
1872	int bpp, bpc;
1873
1874	bpp = pipe_config->pipe_bpp;
1875	bpc = drm_dp_downstream_max_bpc(intel_dp->dpcd, intel_dp->downstream_ports);
 
 
1876
1877	if (bpc > 0)
1878		bpp = min(bpp, 3*bpc);
 
 
 
 
1879
 
1880	if (intel_dp_is_edp(intel_dp)) {
1881		/* Get bpp from vbt only for panels that dont have bpp in edid */
1882		if (intel_connector->base.display_info.bpc == 0 &&
1883		    dev_priv->vbt.edp.bpp && dev_priv->vbt.edp.bpp < bpp) {
1884			DRM_DEBUG_KMS("clamping bpp for eDP panel to BIOS-provided %i\n",
1885				      dev_priv->vbt.edp.bpp);
 
1886			bpp = dev_priv->vbt.edp.bpp;
1887		}
1888	}
1889
1890	return bpp;
1891}
1892
1893/* Adjust link config limits based on compliance test requests. */
1894void
1895intel_dp_adjust_compliance_config(struct intel_dp *intel_dp,
1896				  struct intel_crtc_state *pipe_config,
1897				  struct link_config_limits *limits)
1898{
 
 
1899	/* For DP Compliance we override the computed bpp for the pipe */
1900	if (intel_dp->compliance.test_data.bpc != 0) {
1901		int bpp = 3 * intel_dp->compliance.test_data.bpc;
1902
1903		limits->min_bpp = limits->max_bpp = bpp;
1904		pipe_config->dither_force_disable = bpp == 6 * 3;
1905
1906		DRM_DEBUG_KMS("Setting pipe_bpp to %d\n", bpp);
1907	}
1908
1909	/* Use values requested by Compliance Test Request */
1910	if (intel_dp->compliance.test_type == DP_TEST_LINK_TRAINING) {
1911		int index;
1912
1913		/* Validate the compliance test data since max values
1914		 * might have changed due to link train fallback.
1915		 */
1916		if (intel_dp_link_params_valid(intel_dp, intel_dp->compliance.test_link_rate,
1917					       intel_dp->compliance.test_lane_count)) {
1918			index = intel_dp_rate_index(intel_dp->common_rates,
1919						    intel_dp->num_common_rates,
1920						    intel_dp->compliance.test_link_rate);
1921			if (index >= 0)
1922				limits->min_clock = limits->max_clock = index;
1923			limits->min_lane_count = limits->max_lane_count =
1924				intel_dp->compliance.test_lane_count;
1925		}
1926	}
1927}
1928
1929static int intel_dp_output_bpp(const struct intel_crtc_state *crtc_state, int bpp)
1930{
1931	/*
1932	 * bpp value was assumed to RGB format. And YCbCr 4:2:0 output
1933	 * format of the number of bytes per pixel will be half the number
1934	 * of bytes of RGB pixel.
1935	 */
1936	if (crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR420)
1937		bpp /= 2;
1938
1939	return bpp;
1940}
1941
1942/* Optimize link config in order: max bpp, min clock, min lanes */
1943static int
1944intel_dp_compute_link_config_wide(struct intel_dp *intel_dp,
1945				  struct intel_crtc_state *pipe_config,
1946				  const struct link_config_limits *limits)
1947{
1948	struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode;
1949	int bpp, clock, lane_count;
1950	int mode_rate, link_clock, link_avail;
1951
1952	for (bpp = limits->max_bpp; bpp >= limits->min_bpp; bpp -= 2 * 3) {
1953		int output_bpp = intel_dp_output_bpp(pipe_config, bpp);
1954
1955		mode_rate = intel_dp_link_required(adjusted_mode->crtc_clock,
1956						   output_bpp);
1957
1958		for (clock = limits->min_clock; clock <= limits->max_clock; clock++) {
1959			for (lane_count = limits->min_lane_count;
1960			     lane_count <= limits->max_lane_count;
1961			     lane_count <<= 1) {
1962				link_clock = intel_dp->common_rates[clock];
1963				link_avail = intel_dp_max_data_rate(link_clock,
1964								    lane_count);
1965
1966				if (mode_rate <= link_avail) {
1967					pipe_config->lane_count = lane_count;
1968					pipe_config->pipe_bpp = bpp;
1969					pipe_config->port_clock = link_clock;
1970
1971					return 0;
1972				}
1973			}
1974		}
1975	}
1976
1977	return -EINVAL;
1978}
1979
1980static int intel_dp_dsc_compute_bpp(struct intel_dp *intel_dp, u8 dsc_max_bpc)
1981{
 
1982	int i, num_bpc;
1983	u8 dsc_bpc[3] = {0};
 
 
 
 
 
 
 
1984
1985	num_bpc = drm_dp_dsc_sink_supported_input_bpcs(intel_dp->dsc_dpcd,
1986						       dsc_bpc);
1987	for (i = 0; i < num_bpc; i++) {
1988		if (dsc_max_bpc >= dsc_bpc[i])
1989			return dsc_bpc[i] * 3;
1990	}
1991
1992	return 0;
1993}
1994
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1995static int intel_dp_dsc_compute_config(struct intel_dp *intel_dp,
1996				       struct intel_crtc_state *pipe_config,
1997				       struct drm_connector_state *conn_state,
1998				       struct link_config_limits *limits)
1999{
2000	struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
2001	struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev);
2002	struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode;
2003	u8 dsc_max_bpc;
2004	int pipe_bpp;
2005	int ret;
2006
2007	pipe_config->fec_enable = !intel_dp_is_edp(intel_dp) &&
2008		intel_dp_supports_fec(intel_dp, pipe_config);
2009
2010	if (!intel_dp_supports_dsc(intel_dp, pipe_config))
2011		return -EINVAL;
2012
2013	dsc_max_bpc = min_t(u8, DP_DSC_MAX_SUPPORTED_BPC,
2014			    conn_state->max_requested_bpc);
2015
2016	pipe_bpp = intel_dp_dsc_compute_bpp(intel_dp, dsc_max_bpc);
2017	if (pipe_bpp < DP_DSC_MIN_SUPPORTED_BPC * 3) {
2018		DRM_DEBUG_KMS("No DSC support for less than 8bpc\n");
 
2019		return -EINVAL;
2020	}
2021
2022	/*
2023	 * For now enable DSC for max bpp, max link rate, max lane count.
2024	 * Optimize this later for the minimum possible link rate/lane count
2025	 * with DSC enabled for the requested mode.
2026	 */
2027	pipe_config->pipe_bpp = pipe_bpp;
2028	pipe_config->port_clock = intel_dp->common_rates[limits->max_clock];
2029	pipe_config->lane_count = limits->max_lane_count;
2030
2031	if (intel_dp_is_edp(intel_dp)) {
2032		pipe_config->dsc_params.compressed_bpp =
2033			min_t(u16, drm_edp_dsc_sink_output_bpp(intel_dp->dsc_dpcd) >> 4,
2034			      pipe_config->pipe_bpp);
2035		pipe_config->dsc_params.slice_count =
2036			drm_dp_dsc_sink_max_slice_count(intel_dp->dsc_dpcd,
2037							true);
2038	} else {
2039		u16 dsc_max_output_bpp;
2040		u8 dsc_dp_slice_count;
2041
2042		dsc_max_output_bpp =
2043			intel_dp_dsc_get_output_bpp(pipe_config->port_clock,
 
2044						    pipe_config->lane_count,
2045						    adjusted_mode->crtc_clock,
2046						    adjusted_mode->crtc_hdisplay);
 
 
2047		dsc_dp_slice_count =
2048			intel_dp_dsc_get_slice_count(intel_dp,
2049						     adjusted_mode->crtc_clock,
2050						     adjusted_mode->crtc_hdisplay);
 
2051		if (!dsc_max_output_bpp || !dsc_dp_slice_count) {
2052			DRM_DEBUG_KMS("Compressed BPP/Slice Count not supported\n");
 
2053			return -EINVAL;
2054		}
2055		pipe_config->dsc_params.compressed_bpp = min_t(u16,
2056							       dsc_max_output_bpp >> 4,
2057							       pipe_config->pipe_bpp);
2058		pipe_config->dsc_params.slice_count = dsc_dp_slice_count;
2059	}
2060	/*
2061	 * VDSC engine operates at 1 Pixel per clock, so if peak pixel rate
2062	 * is greater than the maximum Cdclock and if slice count is even
2063	 * then we need to use 2 VDSC instances.
2064	 */
2065	if (adjusted_mode->crtc_clock > dev_priv->max_cdclk_freq) {
2066		if (pipe_config->dsc_params.slice_count > 1) {
2067			pipe_config->dsc_params.dsc_split = true;
2068		} else {
2069			DRM_DEBUG_KMS("Cannot split stream to use 2 VDSC instances\n");
2070			return -EINVAL;
2071		}
 
 
2072	}
2073
2074	ret = intel_dp_compute_dsc_params(intel_dp, pipe_config);
2075	if (ret < 0) {
2076		DRM_DEBUG_KMS("Cannot compute valid DSC parameters for Input Bpp = %d "
2077			      "Compressed BPP = %d\n",
2078			      pipe_config->pipe_bpp,
2079			      pipe_config->dsc_params.compressed_bpp);
 
2080		return ret;
2081	}
2082
2083	pipe_config->dsc_params.compression_enable = true;
2084	DRM_DEBUG_KMS("DP DSC computed with Input Bpp = %d "
2085		      "Compressed Bpp = %d Slice Count = %d\n",
2086		      pipe_config->pipe_bpp,
2087		      pipe_config->dsc_params.compressed_bpp,
2088		      pipe_config->dsc_params.slice_count);
2089
2090	return 0;
2091}
2092
2093int intel_dp_min_bpp(const struct intel_crtc_state *crtc_state)
2094{
2095	if (crtc_state->output_format == INTEL_OUTPUT_FORMAT_RGB)
2096		return 6 * 3;
2097	else
2098		return 8 * 3;
2099}
2100
2101static int
2102intel_dp_compute_link_config(struct intel_encoder *encoder,
2103			     struct intel_crtc_state *pipe_config,
2104			     struct drm_connector_state *conn_state)
2105{
2106	struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode;
2107	struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
 
 
2108	struct link_config_limits limits;
2109	int common_len;
2110	int ret;
2111
2112	common_len = intel_dp_common_len_rate_limit(intel_dp,
2113						    intel_dp->max_link_rate);
2114
2115	/* No common link rates between source and sink */
2116	WARN_ON(common_len <= 0);
2117
2118	limits.min_clock = 0;
2119	limits.max_clock = common_len - 1;
2120
2121	limits.min_lane_count = 1;
2122	limits.max_lane_count = intel_dp_max_lane_count(intel_dp);
2123
2124	limits.min_bpp = intel_dp_min_bpp(pipe_config);
2125	limits.max_bpp = intel_dp_compute_bpp(intel_dp, pipe_config);
2126
2127	if (intel_dp_is_edp(intel_dp)) {
2128		/*
2129		 * Use the maximum clock and number of lanes the eDP panel
2130		 * advertizes being capable of. The panels are generally
 
2131		 * designed to support only a single clock and lane
2132		 * configuration, and typically these values correspond to the
2133		 * native resolution of the panel.
2134		 */
2135		limits.min_lane_count = limits.max_lane_count;
2136		limits.min_clock = limits.max_clock;
2137	}
2138
2139	intel_dp_adjust_compliance_config(intel_dp, pipe_config, &limits);
2140
2141	DRM_DEBUG_KMS("DP link computation with max lane count %i "
2142		      "max rate %d max bpp %d pixel clock %iKHz\n",
2143		      limits.max_lane_count,
2144		      intel_dp->common_rates[limits.max_clock],
2145		      limits.max_bpp, adjusted_mode->crtc_clock);
 
 
 
 
 
2146
2147	/*
2148	 * Optimize for slow and wide. This is the place to add alternative
2149	 * optimization policy.
2150	 */
2151	ret = intel_dp_compute_link_config_wide(intel_dp, pipe_config, &limits);
2152
2153	/* enable compression if the mode doesn't fit available BW */
2154	DRM_DEBUG_KMS("Force DSC en = %d\n", intel_dp->force_dsc_en);
2155	if (ret || intel_dp->force_dsc_en) {
 
 
 
 
2156		ret = intel_dp_dsc_compute_config(intel_dp, pipe_config,
2157						  conn_state, &limits);
2158		if (ret < 0)
2159			return ret;
2160	}
2161
2162	if (pipe_config->dsc_params.compression_enable) {
2163		DRM_DEBUG_KMS("DP lane count %d clock %d Input bpp %d Compressed bpp %d\n",
2164			      pipe_config->lane_count, pipe_config->port_clock,
2165			      pipe_config->pipe_bpp,
2166			      pipe_config->dsc_params.compressed_bpp);
2167
2168		DRM_DEBUG_KMS("DP link rate required %i available %i\n",
2169			      intel_dp_link_required(adjusted_mode->crtc_clock,
2170						     pipe_config->dsc_params.compressed_bpp),
2171			      intel_dp_max_data_rate(pipe_config->port_clock,
2172						     pipe_config->lane_count));
 
 
2173	} else {
2174		DRM_DEBUG_KMS("DP lane count %d clock %d bpp %d\n",
2175			      pipe_config->lane_count, pipe_config->port_clock,
2176			      pipe_config->pipe_bpp);
2177
2178		DRM_DEBUG_KMS("DP link rate required %i available %i\n",
2179			      intel_dp_link_required(adjusted_mode->crtc_clock,
2180						     pipe_config->pipe_bpp),
2181			      intel_dp_max_data_rate(pipe_config->port_clock,
2182						     pipe_config->lane_count));
2183	}
2184	return 0;
2185}
2186
2187static int
2188intel_dp_ycbcr420_config(struct intel_dp *intel_dp,
2189			 struct drm_connector *connector,
2190			 struct intel_crtc_state *crtc_state)
2191{
2192	const struct drm_display_info *info = &connector->display_info;
2193	const struct drm_display_mode *adjusted_mode =
2194		&crtc_state->base.adjusted_mode;
2195	struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
2196	int ret;
2197
2198	if (!drm_mode_is_420_only(info, adjusted_mode) ||
2199	    !intel_dp_get_colorimetry_status(intel_dp) ||
2200	    !connector->ycbcr_420_allowed)
2201		return 0;
2202
2203	crtc_state->output_format = INTEL_OUTPUT_FORMAT_YCBCR420;
2204
2205	/* YCBCR 420 output conversion needs a scaler */
2206	ret = skl_update_scaler_crtc(crtc_state);
2207	if (ret) {
2208		DRM_DEBUG_KMS("Scaler allocation for output failed\n");
2209		return ret;
2210	}
2211
2212	intel_pch_panel_fitting(crtc, crtc_state, DRM_MODE_SCALE_FULLSCREEN);
2213
2214	return 0;
2215}
2216
2217bool intel_dp_limited_color_range(const struct intel_crtc_state *crtc_state,
2218				  const struct drm_connector_state *conn_state)
2219{
2220	const struct intel_digital_connector_state *intel_conn_state =
2221		to_intel_digital_connector_state(conn_state);
2222	const struct drm_display_mode *adjusted_mode =
2223		&crtc_state->base.adjusted_mode;
 
 
 
 
 
 
 
 
 
 
2224
2225	if (intel_conn_state->broadcast_rgb == INTEL_BROADCAST_RGB_AUTO) {
2226		/*
2227		 * See:
2228		 * CEA-861-E - 5.1 Default Encoding Parameters
2229		 * VESA DisplayPort Ver.1.2a - 5.1.1.1 Video Colorimetry
2230		 */
2231		return crtc_state->pipe_bpp != 18 &&
2232			drm_default_rgb_quant_range(adjusted_mode) ==
2233			HDMI_QUANTIZATION_RANGE_LIMITED;
2234	} else {
2235		return intel_conn_state->broadcast_rgb ==
2236			INTEL_BROADCAST_RGB_LIMITED;
2237	}
2238}
2239
2240int
2241intel_dp_compute_config(struct intel_encoder *encoder,
2242			struct intel_crtc_state *pipe_config,
2243			struct drm_connector_state *conn_state)
2244{
2245	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
2246	struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode;
2247	struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2248	struct intel_lspcon *lspcon = enc_to_intel_lspcon(&encoder->base);
2249	enum port port = encoder->port;
2250	struct intel_crtc *intel_crtc = to_intel_crtc(pipe_config->base.crtc);
2251	struct intel_connector *intel_connector = intel_dp->attached_connector;
2252	struct intel_digital_connector_state *intel_conn_state =
2253		to_intel_digital_connector_state(conn_state);
2254	bool constant_n = drm_dp_has_quirk(&intel_dp->desc,
2255					   DP_DPCD_QUIRK_CONSTANT_N);
2256	int ret = 0, output_bpp;
2257
2258	if (HAS_PCH_SPLIT(dev_priv) && !HAS_DDI(dev_priv) && port != PORT_A)
2259		pipe_config->has_pch_encoder = true;
2260
2261	pipe_config->output_format = INTEL_OUTPUT_FORMAT_RGB;
2262	if (lspcon->active)
2263		lspcon_ycbcr420_config(&intel_connector->base, pipe_config);
2264	else
2265		ret = intel_dp_ycbcr420_config(intel_dp, &intel_connector->base,
2266					       pipe_config);
2267
2268	if (ret)
2269		return ret;
2270
2271	pipe_config->has_drrs = false;
2272	if (IS_G4X(dev_priv) || port == PORT_A)
2273		pipe_config->has_audio = false;
2274	else if (intel_conn_state->force_audio == HDMI_AUDIO_AUTO)
2275		pipe_config->has_audio = intel_dp->has_audio;
2276	else
2277		pipe_config->has_audio = intel_conn_state->force_audio == HDMI_AUDIO_ON;
2278
2279	if (intel_dp_is_edp(intel_dp) && intel_connector->panel.fixed_mode) {
2280		intel_fixed_panel_mode(intel_connector->panel.fixed_mode,
2281				       adjusted_mode);
2282
2283		if (INTEL_GEN(dev_priv) >= 9) {
2284			ret = skl_update_scaler_crtc(pipe_config);
2285			if (ret)
2286				return ret;
2287		}
2288
2289		if (HAS_GMCH(dev_priv))
2290			intel_gmch_panel_fitting(intel_crtc, pipe_config,
2291						 conn_state->scaling_mode);
2292		else
2293			intel_pch_panel_fitting(intel_crtc, pipe_config,
2294						conn_state->scaling_mode);
2295	}
2296
2297	if (adjusted_mode->flags & DRM_MODE_FLAG_DBLSCAN)
2298		return -EINVAL;
2299
2300	if (HAS_GMCH(dev_priv) &&
2301	    adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE)
2302		return -EINVAL;
2303
2304	if (adjusted_mode->flags & DRM_MODE_FLAG_DBLCLK)
2305		return -EINVAL;
2306
2307	ret = intel_dp_compute_link_config(encoder, pipe_config, conn_state);
2308	if (ret < 0)
2309		return ret;
2310
2311	pipe_config->limited_color_range =
2312		intel_dp_limited_color_range(pipe_config, conn_state);
2313
2314	if (pipe_config->dsc_params.compression_enable)
2315		output_bpp = pipe_config->dsc_params.compressed_bpp;
2316	else
2317		output_bpp = intel_dp_output_bpp(pipe_config, pipe_config->pipe_bpp);
2318
2319	intel_link_compute_m_n(output_bpp,
2320			       pipe_config->lane_count,
2321			       adjusted_mode->crtc_clock,
2322			       pipe_config->port_clock,
2323			       &pipe_config->dp_m_n,
2324			       constant_n, pipe_config->fec_enable);
2325
2326	if (intel_connector->panel.downclock_mode != NULL &&
2327		dev_priv->drrs.type == SEAMLESS_DRRS_SUPPORT) {
2328			pipe_config->has_drrs = true;
2329			intel_link_compute_m_n(output_bpp,
2330					       pipe_config->lane_count,
2331					       intel_connector->panel.downclock_mode->clock,
2332					       pipe_config->port_clock,
2333					       &pipe_config->dp_m2_n2,
2334					       constant_n, pipe_config->fec_enable);
2335	}
2336
2337	if (!HAS_DDI(dev_priv))
2338		intel_dp_set_clock(encoder, pipe_config);
2339
2340	intel_psr_compute_config(intel_dp, pipe_config);
2341
2342	return 0;
2343}
2344
2345void intel_dp_set_link_params(struct intel_dp *intel_dp,
2346			      int link_rate, u8 lane_count,
2347			      bool link_mst)
2348{
2349	intel_dp->link_trained = false;
2350	intel_dp->link_rate = link_rate;
2351	intel_dp->lane_count = lane_count;
2352	intel_dp->link_mst = link_mst;
2353}
2354
2355static void intel_dp_prepare(struct intel_encoder *encoder,
2356			     const struct intel_crtc_state *pipe_config)
2357{
2358	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
2359	struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2360	enum port port = encoder->port;
2361	struct intel_crtc *crtc = to_intel_crtc(pipe_config->base.crtc);
2362	const struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode;
2363
2364	intel_dp_set_link_params(intel_dp, pipe_config->port_clock,
2365				 pipe_config->lane_count,
2366				 intel_crtc_has_type(pipe_config,
2367						     INTEL_OUTPUT_DP_MST));
2368
2369	/*
2370	 * There are four kinds of DP registers:
2371	 *
2372	 * 	IBX PCH
2373	 * 	SNB CPU
2374	 *	IVB CPU
2375	 * 	CPT PCH
2376	 *
2377	 * IBX PCH and CPU are the same for almost everything,
2378	 * except that the CPU DP PLL is configured in this
2379	 * register
2380	 *
2381	 * CPT PCH is quite different, having many bits moved
2382	 * to the TRANS_DP_CTL register instead. That
2383	 * configuration happens (oddly) in ironlake_pch_enable
2384	 */
 
 
2385
2386	/* Preserve the BIOS-computed detected bit. This is
2387	 * supposed to be read-only.
2388	 */
2389	intel_dp->DP = I915_READ(intel_dp->output_reg) & DP_DETECTED;
 
 
 
 
 
 
 
 
2390
2391	/* Handle DP bits in common between all three register formats */
2392	intel_dp->DP |= DP_VOLTAGE_0_4 | DP_PRE_EMPHASIS_0;
2393	intel_dp->DP |= DP_PORT_WIDTH(pipe_config->lane_count);
2394
2395	/* Split out the IBX/CPU vs CPT settings */
2396
2397	if (IS_IVYBRIDGE(dev_priv) && port == PORT_A) {
2398		if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC)
2399			intel_dp->DP |= DP_SYNC_HS_HIGH;
2400		if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC)
2401			intel_dp->DP |= DP_SYNC_VS_HIGH;
2402		intel_dp->DP |= DP_LINK_TRAIN_OFF_CPT;
2403
2404		if (drm_dp_enhanced_frame_cap(intel_dp->dpcd))
2405			intel_dp->DP |= DP_ENHANCED_FRAMING;
2406
2407		intel_dp->DP |= DP_PIPE_SEL_IVB(crtc->pipe);
2408	} else if (HAS_PCH_CPT(dev_priv) && port != PORT_A) {
2409		u32 trans_dp;
2410
2411		intel_dp->DP |= DP_LINK_TRAIN_OFF_CPT;
2412
2413		trans_dp = I915_READ(TRANS_DP_CTL(crtc->pipe));
2414		if (drm_dp_enhanced_frame_cap(intel_dp->dpcd))
2415			trans_dp |= TRANS_DP_ENH_FRAMING;
 
 
 
 
 
 
 
 
 
 
 
2416		else
2417			trans_dp &= ~TRANS_DP_ENH_FRAMING;
2418		I915_WRITE(TRANS_DP_CTL(crtc->pipe), trans_dp);
2419	} else {
2420		if (IS_G4X(dev_priv) && pipe_config->limited_color_range)
2421			intel_dp->DP |= DP_COLOR_RANGE_16_235;
2422
2423		if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC)
2424			intel_dp->DP |= DP_SYNC_HS_HIGH;
2425		if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC)
2426			intel_dp->DP |= DP_SYNC_VS_HIGH;
2427		intel_dp->DP |= DP_LINK_TRAIN_OFF;
2428
2429		if (drm_dp_enhanced_frame_cap(intel_dp->dpcd))
2430			intel_dp->DP |= DP_ENHANCED_FRAMING;
2431
2432		if (IS_CHERRYVIEW(dev_priv))
2433			intel_dp->DP |= DP_PIPE_SEL_CHV(crtc->pipe);
2434		else
2435			intel_dp->DP |= DP_PIPE_SEL(crtc->pipe);
2436	}
2437}
2438
2439#define IDLE_ON_MASK		(PP_ON | PP_SEQUENCE_MASK | 0                     | PP_SEQUENCE_STATE_MASK)
2440#define IDLE_ON_VALUE   	(PP_ON | PP_SEQUENCE_NONE | 0                     | PP_SEQUENCE_STATE_ON_IDLE)
2441
2442#define IDLE_OFF_MASK		(PP_ON | PP_SEQUENCE_MASK | 0                     | 0)
2443#define IDLE_OFF_VALUE		(0     | PP_SEQUENCE_NONE | 0                     | 0)
2444
2445#define IDLE_CYCLE_MASK		(PP_ON | PP_SEQUENCE_MASK | PP_CYCLE_DELAY_ACTIVE | PP_SEQUENCE_STATE_MASK)
2446#define IDLE_CYCLE_VALUE	(0     | PP_SEQUENCE_NONE | 0                     | PP_SEQUENCE_STATE_OFF_IDLE)
2447
2448static void intel_pps_verify_state(struct intel_dp *intel_dp);
2449
2450static void wait_panel_status(struct intel_dp *intel_dp,
2451				       u32 mask,
2452				       u32 value)
2453{
2454	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
2455	i915_reg_t pp_stat_reg, pp_ctrl_reg;
2456
2457	lockdep_assert_held(&dev_priv->pps_mutex);
2458
2459	intel_pps_verify_state(intel_dp);
2460
2461	pp_stat_reg = _pp_stat_reg(intel_dp);
2462	pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
2463
2464	DRM_DEBUG_KMS("mask %08x value %08x status %08x control %08x\n",
2465			mask, value,
2466			I915_READ(pp_stat_reg),
2467			I915_READ(pp_ctrl_reg));
2468
2469	if (intel_de_wait_for_register(dev_priv, pp_stat_reg,
2470				       mask, value, 5000))
2471		DRM_ERROR("Panel status timeout: status %08x control %08x\n",
2472				I915_READ(pp_stat_reg),
2473				I915_READ(pp_ctrl_reg));
2474
2475	DRM_DEBUG_KMS("Wait complete\n");
2476}
 
2477
2478static void wait_panel_on(struct intel_dp *intel_dp)
2479{
2480	DRM_DEBUG_KMS("Wait for panel power on\n");
2481	wait_panel_status(intel_dp, IDLE_ON_MASK, IDLE_ON_VALUE);
2482}
2483
2484static void wait_panel_off(struct intel_dp *intel_dp)
 
 
2485{
2486	DRM_DEBUG_KMS("Wait for panel power off time\n");
2487	wait_panel_status(intel_dp, IDLE_OFF_MASK, IDLE_OFF_VALUE);
2488}
2489
2490static void wait_panel_power_cycle(struct intel_dp *intel_dp)
2491{
2492	ktime_t panel_power_on_time;
2493	s64 panel_power_off_duration;
2494
2495	DRM_DEBUG_KMS("Wait for panel power cycle\n");
2496
2497	/* take the difference of currrent time and panel power off time
2498	 * and then make panel wait for t11_t12 if needed. */
2499	panel_power_on_time = ktime_get_boottime();
2500	panel_power_off_duration = ktime_ms_delta(panel_power_on_time, intel_dp->panel_power_off_time);
2501
2502	/* When we disable the VDD override bit last we have to do the manual
2503	 * wait. */
2504	if (panel_power_off_duration < (s64)intel_dp->panel_power_cycle_delay)
2505		wait_remaining_ms_from_jiffies(jiffies,
2506				       intel_dp->panel_power_cycle_delay - panel_power_off_duration);
2507
2508	wait_panel_status(intel_dp, IDLE_CYCLE_MASK, IDLE_CYCLE_VALUE);
 
 
 
2509}
2510
2511static void wait_backlight_on(struct intel_dp *intel_dp)
 
 
 
2512{
2513	wait_remaining_ms_from_jiffies(intel_dp->last_power_on,
2514				       intel_dp->backlight_on_delay);
2515}
2516
2517static void edp_wait_backlight_off(struct intel_dp *intel_dp)
2518{
2519	wait_remaining_ms_from_jiffies(intel_dp->last_backlight_off,
2520				       intel_dp->backlight_off_delay);
2521}
2522
2523/* Read the current pp_control value, unlocking the register if it
2524 * is locked
2525 */
2526
2527static  u32 ironlake_get_pp_control(struct intel_dp *intel_dp)
2528{
2529	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
2530	u32 control;
2531
2532	lockdep_assert_held(&dev_priv->pps_mutex);
2533
2534	control = I915_READ(_pp_ctrl_reg(intel_dp));
2535	if (WARN_ON(!HAS_DDI(dev_priv) &&
2536		    (control & PANEL_UNLOCK_MASK) != PANEL_UNLOCK_REGS)) {
2537		control &= ~PANEL_UNLOCK_MASK;
2538		control |= PANEL_UNLOCK_REGS;
 
 
2539	}
2540	return control;
2541}
2542
2543/*
2544 * Must be paired with edp_panel_vdd_off().
2545 * Must hold pps_mutex around the whole on/off sequence.
2546 * Can be nested with intel_edp_panel_vdd_{on,off}() calls.
2547 */
2548static bool edp_panel_vdd_on(struct intel_dp *intel_dp)
2549{
 
2550	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
2551	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
2552	u32 pp;
2553	i915_reg_t pp_stat_reg, pp_ctrl_reg;
2554	bool need_to_disable = !intel_dp->want_panel_vdd;
2555
2556	lockdep_assert_held(&dev_priv->pps_mutex);
 
2557
2558	if (!intel_dp_is_edp(intel_dp))
2559		return false;
2560
2561	cancel_delayed_work(&intel_dp->panel_vdd_work);
2562	intel_dp->want_panel_vdd = true;
2563
2564	if (edp_have_panel_vdd(intel_dp))
2565		return need_to_disable;
2566
2567	intel_display_power_get(dev_priv,
2568				intel_aux_power_domain(intel_dig_port));
2569
2570	DRM_DEBUG_KMS("Turning eDP port %c VDD on\n",
2571		      port_name(intel_dig_port->base.port));
2572
2573	if (!edp_have_panel_power(intel_dp))
2574		wait_panel_power_cycle(intel_dp);
2575
2576	pp = ironlake_get_pp_control(intel_dp);
2577	pp |= EDP_FORCE_VDD;
2578
2579	pp_stat_reg = _pp_stat_reg(intel_dp);
2580	pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
2581
2582	I915_WRITE(pp_ctrl_reg, pp);
2583	POSTING_READ(pp_ctrl_reg);
2584	DRM_DEBUG_KMS("PP_STATUS: 0x%08x PP_CONTROL: 0x%08x\n",
2585			I915_READ(pp_stat_reg), I915_READ(pp_ctrl_reg));
2586	/*
2587	 * If the panel wasn't on, delay before accessing aux channel
2588	 */
2589	if (!edp_have_panel_power(intel_dp)) {
2590		DRM_DEBUG_KMS("eDP port %c panel power wasn't enabled\n",
2591			      port_name(intel_dig_port->base.port));
2592		msleep(intel_dp->panel_power_up_delay);
2593	}
2594
2595	return need_to_disable;
 
2596}
2597
2598/*
2599 * Must be paired with intel_edp_panel_vdd_off() or
2600 * intel_edp_panel_off().
2601 * Nested calls to these functions are not allowed since
2602 * we drop the lock. Caller must use some higher level
2603 * locking to prevent nested calls from other threads.
2604 */
2605void intel_edp_panel_vdd_on(struct intel_dp *intel_dp)
2606{
2607	intel_wakeref_t wakeref;
2608	bool vdd;
2609
2610	if (!intel_dp_is_edp(intel_dp))
2611		return;
2612
2613	vdd = false;
2614	with_pps_lock(intel_dp, wakeref)
2615		vdd = edp_panel_vdd_on(intel_dp);
2616	I915_STATE_WARN(!vdd, "eDP port %c VDD already requested on\n",
2617	     port_name(dp_to_dig_port(intel_dp)->base.port));
2618}
2619
2620static void edp_panel_vdd_off_sync(struct intel_dp *intel_dp)
2621{
 
2622	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
2623	struct intel_digital_port *intel_dig_port =
2624		dp_to_dig_port(intel_dp);
2625	u32 pp;
2626	i915_reg_t pp_stat_reg, pp_ctrl_reg;
2627
2628	lockdep_assert_held(&dev_priv->pps_mutex);
2629
2630	WARN_ON(intel_dp->want_panel_vdd);
2631
2632	if (!edp_have_panel_vdd(intel_dp))
2633		return;
2634
2635	DRM_DEBUG_KMS("Turning eDP port %c VDD off\n",
2636		      port_name(intel_dig_port->base.port));
2637
2638	pp = ironlake_get_pp_control(intel_dp);
2639	pp &= ~EDP_FORCE_VDD;
2640
2641	pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
2642	pp_stat_reg = _pp_stat_reg(intel_dp);
2643
2644	I915_WRITE(pp_ctrl_reg, pp);
2645	POSTING_READ(pp_ctrl_reg);
2646
2647	/* Make sure sequencer is idle before allowing subsequent activity */
2648	DRM_DEBUG_KMS("PP_STATUS: 0x%08x PP_CONTROL: 0x%08x\n",
2649	I915_READ(pp_stat_reg), I915_READ(pp_ctrl_reg));
2650
2651	if ((pp & PANEL_POWER_ON) == 0)
2652		intel_dp->panel_power_off_time = ktime_get_boottime();
2653
2654	intel_display_power_put_unchecked(dev_priv,
2655					  intel_aux_power_domain(intel_dig_port));
2656}
2657
2658static void edp_panel_vdd_work(struct work_struct *__work)
2659{
2660	struct intel_dp *intel_dp =
2661		container_of(to_delayed_work(__work),
2662			     struct intel_dp, panel_vdd_work);
2663	intel_wakeref_t wakeref;
2664
2665	with_pps_lock(intel_dp, wakeref) {
2666		if (!intel_dp->want_panel_vdd)
2667			edp_panel_vdd_off_sync(intel_dp);
2668	}
2669}
2670
2671static void edp_panel_vdd_schedule_off(struct intel_dp *intel_dp)
2672{
2673	unsigned long delay;
2674
2675	/*
2676	 * Queue the timer to fire a long time from now (relative to the power
2677	 * down delay) to keep the panel power up across a sequence of
2678	 * operations.
 
2679	 */
2680	delay = msecs_to_jiffies(intel_dp->panel_power_cycle_delay * 5);
2681	schedule_delayed_work(&intel_dp->panel_vdd_work, delay);
2682}
2683
2684/*
2685 * Must be paired with edp_panel_vdd_on().
2686 * Must hold pps_mutex around the whole on/off sequence.
2687 * Can be nested with intel_edp_panel_vdd_{on,off}() calls.
2688 */
2689static void edp_panel_vdd_off(struct intel_dp *intel_dp, bool sync)
2690{
2691	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
2692
2693	lockdep_assert_held(&dev_priv->pps_mutex);
2694
2695	if (!intel_dp_is_edp(intel_dp))
2696		return;
 
2697
2698	I915_STATE_WARN(!intel_dp->want_panel_vdd, "eDP port %c VDD not forced on",
2699	     port_name(dp_to_dig_port(intel_dp)->base.port));
 
2700
2701	intel_dp->want_panel_vdd = false;
2702
2703	if (sync)
2704		edp_panel_vdd_off_sync(intel_dp);
2705	else
2706		edp_panel_vdd_schedule_off(intel_dp);
2707}
2708
2709static void edp_panel_on(struct intel_dp *intel_dp)
 
 
 
2710{
2711	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
2712	u32 pp;
2713	i915_reg_t pp_ctrl_reg;
 
 
 
 
 
 
2714
2715	lockdep_assert_held(&dev_priv->pps_mutex);
 
2716
2717	if (!intel_dp_is_edp(intel_dp))
2718		return;
2719
2720	DRM_DEBUG_KMS("Turn eDP port %c panel power on\n",
2721		      port_name(dp_to_dig_port(intel_dp)->base.port));
2722
2723	if (WARN(edp_have_panel_power(intel_dp),
2724		 "eDP port %c panel power already on\n",
2725		 port_name(dp_to_dig_port(intel_dp)->base.port)))
2726		return;
2727
2728	wait_panel_power_cycle(intel_dp);
2729
2730	pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
2731	pp = ironlake_get_pp_control(intel_dp);
2732	if (IS_GEN(dev_priv, 5)) {
2733		/* ILK workaround: disable reset around power sequence */
2734		pp &= ~PANEL_POWER_RESET;
2735		I915_WRITE(pp_ctrl_reg, pp);
2736		POSTING_READ(pp_ctrl_reg);
2737	}
2738
2739	pp |= PANEL_POWER_ON;
2740	if (!IS_GEN(dev_priv, 5))
2741		pp |= PANEL_POWER_RESET;
 
 
 
2742
2743	I915_WRITE(pp_ctrl_reg, pp);
2744	POSTING_READ(pp_ctrl_reg);
 
2745
2746	wait_panel_on(intel_dp);
2747	intel_dp->last_power_on = jiffies;
2748
2749	if (IS_GEN(dev_priv, 5)) {
2750		pp |= PANEL_POWER_RESET; /* restore panel reset bit */
2751		I915_WRITE(pp_ctrl_reg, pp);
2752		POSTING_READ(pp_ctrl_reg);
2753	}
2754}
2755
2756void intel_edp_panel_on(struct intel_dp *intel_dp)
2757{
2758	intel_wakeref_t wakeref;
2759
2760	if (!intel_dp_is_edp(intel_dp))
2761		return;
 
2762
2763	with_pps_lock(intel_dp, wakeref)
2764		edp_panel_on(intel_dp);
2765}
2766
 
 
2767
2768static void edp_panel_off(struct intel_dp *intel_dp)
2769{
2770	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
2771	struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
2772	u32 pp;
2773	i915_reg_t pp_ctrl_reg;
2774
2775	lockdep_assert_held(&dev_priv->pps_mutex);
 
2776
2777	if (!intel_dp_is_edp(intel_dp))
2778		return;
 
 
 
2779
2780	DRM_DEBUG_KMS("Turn eDP port %c panel power off\n",
2781		      port_name(dig_port->base.port));
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2782
2783	WARN(!intel_dp->want_panel_vdd, "Need eDP port %c VDD to turn off panel\n",
2784	     port_name(dig_port->base.port));
 
 
 
 
2785
2786	pp = ironlake_get_pp_control(intel_dp);
2787	/* We need to switch off panel power _and_ force vdd, for otherwise some
2788	 * panels get very unhappy and cease to work. */
2789	pp &= ~(PANEL_POWER_ON | PANEL_POWER_RESET | EDP_FORCE_VDD |
2790		EDP_BLC_ENABLE);
2791
2792	pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
 
2793
2794	intel_dp->want_panel_vdd = false;
 
 
 
 
 
2795
2796	I915_WRITE(pp_ctrl_reg, pp);
2797	POSTING_READ(pp_ctrl_reg);
2798
2799	wait_panel_off(intel_dp);
2800	intel_dp->panel_power_off_time = ktime_get_boottime();
2801
2802	/* We got a reference when we enabled the VDD. */
2803	intel_display_power_put_unchecked(dev_priv, intel_aux_power_domain(dig_port));
2804}
2805
2806void intel_edp_panel_off(struct intel_dp *intel_dp)
 
2807{
2808	intel_wakeref_t wakeref;
2809
2810	if (!intel_dp_is_edp(intel_dp))
2811		return;
2812
2813	with_pps_lock(intel_dp, wakeref)
2814		edp_panel_off(intel_dp);
2815}
2816
2817/* Enable backlight in the panel power control. */
2818static void _intel_edp_backlight_on(struct intel_dp *intel_dp)
2819{
2820	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
2821	intel_wakeref_t wakeref;
2822
2823	/*
2824	 * If we enable the backlight right away following a panel power
2825	 * on, we may see slight flicker as the panel syncs with the eDP
2826	 * link.  So delay a bit to make sure the image is solid before
2827	 * allowing it to appear.
2828	 */
2829	wait_backlight_on(intel_dp);
2830
2831	with_pps_lock(intel_dp, wakeref) {
2832		i915_reg_t pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
2833		u32 pp;
2834
2835		pp = ironlake_get_pp_control(intel_dp);
2836		pp |= EDP_BLC_ENABLE;
2837
2838		I915_WRITE(pp_ctrl_reg, pp);
2839		POSTING_READ(pp_ctrl_reg);
2840	}
2841}
2842
2843/* Enable backlight PWM and backlight PP control. */
2844void intel_edp_backlight_on(const struct intel_crtc_state *crtc_state,
2845			    const struct drm_connector_state *conn_state)
2846{
2847	struct intel_dp *intel_dp = enc_to_intel_dp(conn_state->best_encoder);
 
2848
2849	if (!intel_dp_is_edp(intel_dp))
2850		return;
2851
2852	DRM_DEBUG_KMS("\n");
2853
2854	intel_panel_enable_backlight(crtc_state, conn_state);
2855	_intel_edp_backlight_on(intel_dp);
2856}
2857
2858/* Disable backlight in the panel power control. */
2859static void _intel_edp_backlight_off(struct intel_dp *intel_dp)
2860{
2861	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
2862	intel_wakeref_t wakeref;
2863
2864	if (!intel_dp_is_edp(intel_dp))
2865		return;
2866
2867	with_pps_lock(intel_dp, wakeref) {
2868		i915_reg_t pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
2869		u32 pp;
2870
2871		pp = ironlake_get_pp_control(intel_dp);
2872		pp &= ~EDP_BLC_ENABLE;
2873
2874		I915_WRITE(pp_ctrl_reg, pp);
2875		POSTING_READ(pp_ctrl_reg);
2876	}
2877
2878	intel_dp->last_backlight_off = jiffies;
2879	edp_wait_backlight_off(intel_dp);
2880}
2881
2882/* Disable backlight PP control and backlight PWM. */
2883void intel_edp_backlight_off(const struct drm_connector_state *old_conn_state)
2884{
2885	struct intel_dp *intel_dp = enc_to_intel_dp(old_conn_state->best_encoder);
 
2886
2887	if (!intel_dp_is_edp(intel_dp))
2888		return;
2889
2890	DRM_DEBUG_KMS("\n");
2891
2892	_intel_edp_backlight_off(intel_dp);
2893	intel_panel_disable_backlight(old_conn_state);
2894}
2895
2896/*
2897 * Hook for controlling the panel power control backlight through the bl_power
2898 * sysfs attribute. Take care to handle multiple calls.
2899 */
2900static void intel_edp_backlight_power(struct intel_connector *connector,
2901				      bool enable)
2902{
2903	struct intel_dp *intel_dp = intel_attached_dp(&connector->base);
2904	intel_wakeref_t wakeref;
2905	bool is_enabled;
2906
2907	is_enabled = false;
2908	with_pps_lock(intel_dp, wakeref)
2909		is_enabled = ironlake_get_pp_control(intel_dp) & EDP_BLC_ENABLE;
2910	if (is_enabled == enable)
2911		return;
2912
2913	DRM_DEBUG_KMS("panel power control backlight %s\n",
2914		      enable ? "enable" : "disable");
2915
2916	if (enable)
2917		_intel_edp_backlight_on(intel_dp);
2918	else
2919		_intel_edp_backlight_off(intel_dp);
2920}
2921
2922static void assert_dp_port(struct intel_dp *intel_dp, bool state)
2923{
2924	struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
2925	struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev);
2926	bool cur_state = I915_READ(intel_dp->output_reg) & DP_PORT_EN;
2927
2928	I915_STATE_WARN(cur_state != state,
2929			"DP port %c state assertion failure (expected %s, current %s)\n",
2930			port_name(dig_port->base.port),
2931			onoff(state), onoff(cur_state));
2932}
2933#define assert_dp_port_disabled(d) assert_dp_port((d), false)
2934
2935static void assert_edp_pll(struct drm_i915_private *dev_priv, bool state)
2936{
2937	bool cur_state = I915_READ(DP_A) & DP_PLL_ENABLE;
2938
2939	I915_STATE_WARN(cur_state != state,
2940			"eDP PLL state assertion failure (expected %s, current %s)\n",
2941			onoff(state), onoff(cur_state));
2942}
2943#define assert_edp_pll_enabled(d) assert_edp_pll((d), true)
2944#define assert_edp_pll_disabled(d) assert_edp_pll((d), false)
2945
2946static void ironlake_edp_pll_on(struct intel_dp *intel_dp,
2947				const struct intel_crtc_state *pipe_config)
2948{
2949	struct intel_crtc *crtc = to_intel_crtc(pipe_config->base.crtc);
2950	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
2951
2952	assert_pipe_disabled(dev_priv, crtc->pipe);
2953	assert_dp_port_disabled(intel_dp);
2954	assert_edp_pll_disabled(dev_priv);
2955
2956	DRM_DEBUG_KMS("enabling eDP PLL for clock %d\n",
2957		      pipe_config->port_clock);
2958
2959	intel_dp->DP &= ~DP_PLL_FREQ_MASK;
2960
2961	if (pipe_config->port_clock == 162000)
2962		intel_dp->DP |= DP_PLL_FREQ_162MHZ;
2963	else
2964		intel_dp->DP |= DP_PLL_FREQ_270MHZ;
2965
2966	I915_WRITE(DP_A, intel_dp->DP);
2967	POSTING_READ(DP_A);
2968	udelay(500);
2969
2970	/*
2971	 * [DevILK] Work around required when enabling DP PLL
2972	 * while a pipe is enabled going to FDI:
2973	 * 1. Wait for the start of vertical blank on the enabled pipe going to FDI
2974	 * 2. Program DP PLL enable
2975	 */
2976	if (IS_GEN(dev_priv, 5))
2977		intel_wait_for_vblank_if_active(dev_priv, !crtc->pipe);
2978
2979	intel_dp->DP |= DP_PLL_ENABLE;
2980
2981	I915_WRITE(DP_A, intel_dp->DP);
2982	POSTING_READ(DP_A);
2983	udelay(200);
2984}
2985
2986static void ironlake_edp_pll_off(struct intel_dp *intel_dp,
2987				 const struct intel_crtc_state *old_crtc_state)
2988{
2989	struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->base.crtc);
2990	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
2991
2992	assert_pipe_disabled(dev_priv, crtc->pipe);
2993	assert_dp_port_disabled(intel_dp);
2994	assert_edp_pll_enabled(dev_priv);
2995
2996	DRM_DEBUG_KMS("disabling eDP PLL\n");
2997
2998	intel_dp->DP &= ~DP_PLL_ENABLE;
2999
3000	I915_WRITE(DP_A, intel_dp->DP);
3001	POSTING_READ(DP_A);
3002	udelay(200);
3003}
3004
3005static bool downstream_hpd_needs_d0(struct intel_dp *intel_dp)
3006{
3007	/*
3008	 * DPCD 1.2+ should support BRANCH_DEVICE_CTRL, and thus
3009	 * be capable of signalling downstream hpd with a long pulse.
3010	 * Whether or not that means D3 is safe to use is not clear,
3011	 * but let's assume so until proven otherwise.
3012	 *
3013	 * FIXME should really check all downstream ports...
3014	 */
3015	return intel_dp->dpcd[DP_DPCD_REV] == 0x11 &&
3016		intel_dp->dpcd[DP_DOWNSTREAMPORT_PRESENT] & DP_DWN_STRM_PORT_PRESENT &&
3017		intel_dp->downstream_ports[0] & DP_DS_PORT_HPD;
3018}
3019
3020void intel_dp_sink_set_decompression_state(struct intel_dp *intel_dp,
3021					   const struct intel_crtc_state *crtc_state,
3022					   bool enable)
3023{
 
3024	int ret;
3025
3026	if (!crtc_state->dsc_params.compression_enable)
3027		return;
3028
3029	ret = drm_dp_dpcd_writeb(&intel_dp->aux, DP_DSC_ENABLE,
3030				 enable ? DP_DECOMPRESSION_EN : 0);
3031	if (ret < 0)
3032		DRM_DEBUG_KMS("Failed to %s sink decompression state\n",
3033			      enable ? "enable" : "disable");
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3034}
3035
3036/* If the sink supports it, try to set the power state appropriately */
3037void intel_dp_sink_dpms(struct intel_dp *intel_dp, int mode)
3038{
 
 
3039	int ret, i;
3040
3041	/* Should have a valid DPCD by this point */
3042	if (intel_dp->dpcd[DP_DPCD_REV] < 0x11)
3043		return;
3044
3045	if (mode != DRM_MODE_DPMS_ON) {
3046		if (downstream_hpd_needs_d0(intel_dp))
3047			return;
3048
3049		ret = drm_dp_dpcd_writeb(&intel_dp->aux, DP_SET_POWER,
3050					 DP_SET_POWER_D3);
3051	} else {
3052		struct intel_lspcon *lspcon = dp_to_lspcon(intel_dp);
3053
 
 
 
 
 
 
3054		/*
3055		 * When turning on, we need to retry for 1ms to give the sink
3056		 * time to wake up.
3057		 */
3058		for (i = 0; i < 3; i++) {
3059			ret = drm_dp_dpcd_writeb(&intel_dp->aux, DP_SET_POWER,
3060						 DP_SET_POWER_D0);
3061			if (ret == 1)
3062				break;
3063			msleep(1);
3064		}
3065
3066		if (ret == 1 && lspcon->active)
3067			lspcon_wait_pcon_mode(lspcon);
3068	}
3069
3070	if (ret != 1)
3071		DRM_DEBUG_KMS("failed to %s sink power state\n",
3072			      mode == DRM_MODE_DPMS_ON ? "enable" : "disable");
 
3073}
3074
3075static bool cpt_dp_port_selected(struct drm_i915_private *dev_priv,
3076				 enum port port, enum pipe *pipe)
3077{
3078	enum pipe p;
3079
3080	for_each_pipe(dev_priv, p) {
3081		u32 val = I915_READ(TRANS_DP_CTL(p));
3082
3083		if ((val & TRANS_DP_PORT_SEL_MASK) == TRANS_DP_PORT_SEL(port)) {
3084			*pipe = p;
3085			return true;
3086		}
3087	}
3088
3089	DRM_DEBUG_KMS("No pipe for DP port %c found\n", port_name(port));
3090
3091	/* must initialize pipe to something for the asserts */
3092	*pipe = PIPE_A;
3093
3094	return false;
3095}
3096
3097bool intel_dp_port_enabled(struct drm_i915_private *dev_priv,
3098			   i915_reg_t dp_reg, enum port port,
3099			   enum pipe *pipe)
3100{
3101	bool ret;
3102	u32 val;
3103
3104	val = I915_READ(dp_reg);
3105
3106	ret = val & DP_PORT_EN;
3107
3108	/* asserts want to know the pipe even if the port is disabled */
3109	if (IS_IVYBRIDGE(dev_priv) && port == PORT_A)
3110		*pipe = (val & DP_PIPE_SEL_MASK_IVB) >> DP_PIPE_SEL_SHIFT_IVB;
3111	else if (HAS_PCH_CPT(dev_priv) && port != PORT_A)
3112		ret &= cpt_dp_port_selected(dev_priv, port, pipe);
3113	else if (IS_CHERRYVIEW(dev_priv))
3114		*pipe = (val & DP_PIPE_SEL_MASK_CHV) >> DP_PIPE_SEL_SHIFT_CHV;
3115	else
3116		*pipe = (val & DP_PIPE_SEL_MASK) >> DP_PIPE_SEL_SHIFT;
3117
3118	return ret;
 
3119}
3120
3121static bool intel_dp_get_hw_state(struct intel_encoder *encoder,
3122				  enum pipe *pipe)
3123{
3124	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
3125	struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
3126	intel_wakeref_t wakeref;
3127	bool ret;
3128
3129	wakeref = intel_display_power_get_if_enabled(dev_priv,
3130						     encoder->power_domain);
3131	if (!wakeref)
 
 
 
 
 
3132		return false;
3133
3134	ret = intel_dp_port_enabled(dev_priv, intel_dp->output_reg,
3135				    encoder->port, pipe);
3136
3137	intel_display_power_put(dev_priv, encoder->power_domain, wakeref);
3138
3139	return ret;
3140}
3141
3142static void intel_dp_get_config(struct intel_encoder *encoder,
3143				struct intel_crtc_state *pipe_config)
3144{
3145	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
3146	struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
3147	u32 tmp, flags = 0;
3148	enum port port = encoder->port;
3149	struct intel_crtc *crtc = to_intel_crtc(pipe_config->base.crtc);
3150
3151	if (encoder->type == INTEL_OUTPUT_EDP)
3152		pipe_config->output_types |= BIT(INTEL_OUTPUT_EDP);
3153	else
3154		pipe_config->output_types |= BIT(INTEL_OUTPUT_DP);
3155
3156	tmp = I915_READ(intel_dp->output_reg);
3157
3158	pipe_config->has_audio = tmp & DP_AUDIO_OUTPUT_ENABLE && port != PORT_A;
3159
3160	if (HAS_PCH_CPT(dev_priv) && port != PORT_A) {
3161		u32 trans_dp = I915_READ(TRANS_DP_CTL(crtc->pipe));
3162
3163		if (trans_dp & TRANS_DP_HSYNC_ACTIVE_HIGH)
3164			flags |= DRM_MODE_FLAG_PHSYNC;
3165		else
3166			flags |= DRM_MODE_FLAG_NHSYNC;
3167
3168		if (trans_dp & TRANS_DP_VSYNC_ACTIVE_HIGH)
3169			flags |= DRM_MODE_FLAG_PVSYNC;
3170		else
3171			flags |= DRM_MODE_FLAG_NVSYNC;
3172	} else {
3173		if (tmp & DP_SYNC_HS_HIGH)
3174			flags |= DRM_MODE_FLAG_PHSYNC;
3175		else
3176			flags |= DRM_MODE_FLAG_NHSYNC;
3177
3178		if (tmp & DP_SYNC_VS_HIGH)
3179			flags |= DRM_MODE_FLAG_PVSYNC;
3180		else
3181			flags |= DRM_MODE_FLAG_NVSYNC;
3182	}
3183
3184	pipe_config->base.adjusted_mode.flags |= flags;
3185
3186	if (IS_G4X(dev_priv) && tmp & DP_COLOR_RANGE_16_235)
3187		pipe_config->limited_color_range = true;
3188
3189	pipe_config->lane_count =
3190		((tmp & DP_PORT_WIDTH_MASK) >> DP_PORT_WIDTH_SHIFT) + 1;
3191
3192	intel_dp_get_m_n(crtc, pipe_config);
3193
3194	if (port == PORT_A) {
3195		if ((I915_READ(DP_A) & DP_PLL_FREQ_MASK) == DP_PLL_FREQ_162MHZ)
3196			pipe_config->port_clock = 162000;
3197		else
3198			pipe_config->port_clock = 270000;
3199	}
3200
3201	pipe_config->base.adjusted_mode.crtc_clock =
3202		intel_dotclock_calculate(pipe_config->port_clock,
3203					 &pipe_config->dp_m_n);
3204
3205	if (intel_dp_is_edp(intel_dp) && dev_priv->vbt.edp.bpp &&
3206	    pipe_config->pipe_bpp > dev_priv->vbt.edp.bpp) {
3207		/*
3208		 * This is a big fat ugly hack.
3209		 *
3210		 * Some machines in UEFI boot mode provide us a VBT that has 18
3211		 * bpp and 1.62 GHz link bandwidth for eDP, which for reasons
3212		 * unknown we fail to light up. Yet the same BIOS boots up with
3213		 * 24 bpp and 2.7 GHz link. Use the same bpp as the BIOS uses as
3214		 * max, not what it tells us to use.
3215		 *
3216		 * Note: This will still be broken if the eDP panel is not lit
3217		 * up by the BIOS, and thus we can't get the mode at module
3218		 * load.
3219		 */
3220		DRM_DEBUG_KMS("pipe has %d bpp for eDP panel, overriding BIOS-provided max %d bpp\n",
3221			      pipe_config->pipe_bpp, dev_priv->vbt.edp.bpp);
3222		dev_priv->vbt.edp.bpp = pipe_config->pipe_bpp;
3223	}
3224}
3225
3226static void intel_disable_dp(struct intel_encoder *encoder,
3227			     const struct intel_crtc_state *old_crtc_state,
3228			     const struct drm_connector_state *old_conn_state)
3229{
3230	struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
3231
3232	intel_dp->link_trained = false;
3233
3234	if (old_crtc_state->has_audio)
3235		intel_audio_codec_disable(encoder,
3236					  old_crtc_state, old_conn_state);
3237
3238	/* Make sure the panel is off before trying to change the mode. But also
3239	 * ensure that we have vdd while we switch off the panel. */
3240	intel_edp_panel_vdd_on(intel_dp);
3241	intel_edp_backlight_off(old_conn_state);
3242	intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_OFF);
3243	intel_edp_panel_off(intel_dp);
3244}
3245
3246static void g4x_disable_dp(struct intel_encoder *encoder,
3247			   const struct intel_crtc_state *old_crtc_state,
3248			   const struct drm_connector_state *old_conn_state)
3249{
3250	intel_disable_dp(encoder, old_crtc_state, old_conn_state);
3251}
3252
3253static void vlv_disable_dp(struct intel_encoder *encoder,
3254			   const struct intel_crtc_state *old_crtc_state,
3255			   const struct drm_connector_state *old_conn_state)
3256{
3257	intel_disable_dp(encoder, old_crtc_state, old_conn_state);
3258}
3259
3260static void g4x_post_disable_dp(struct intel_encoder *encoder,
3261				const struct intel_crtc_state *old_crtc_state,
3262				const struct drm_connector_state *old_conn_state)
3263{
3264	struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
3265	enum port port = encoder->port;
3266
3267	/*
3268	 * Bspec does not list a specific disable sequence for g4x DP.
3269	 * Follow the ilk+ sequence (disable pipe before the port) for
3270	 * g4x DP as it does not suffer from underruns like the normal
3271	 * g4x modeset sequence (disable pipe after the port).
3272	 */
3273	intel_dp_link_down(encoder, old_crtc_state);
3274
3275	/* Only ilk+ has port A */
3276	if (port == PORT_A)
3277		ironlake_edp_pll_off(intel_dp, old_crtc_state);
3278}
 
3279
3280static void vlv_post_disable_dp(struct intel_encoder *encoder,
3281				const struct intel_crtc_state *old_crtc_state,
3282				const struct drm_connector_state *old_conn_state)
3283{
3284	intel_dp_link_down(encoder, old_crtc_state);
3285}
3286
3287static void chv_post_disable_dp(struct intel_encoder *encoder,
3288				const struct intel_crtc_state *old_crtc_state,
3289				const struct drm_connector_state *old_conn_state)
3290{
3291	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
 
3292
3293	intel_dp_link_down(encoder, old_crtc_state);
3294
3295	vlv_dpio_get(dev_priv);
3296
3297	/* Assert data lane reset */
3298	chv_data_lane_soft_reset(encoder, old_crtc_state, true);
3299
3300	vlv_dpio_put(dev_priv);
3301}
3302
3303static void
3304_intel_dp_set_link_train(struct intel_dp *intel_dp,
3305			 u32 *DP,
3306			 u8 dp_train_pat)
3307{
3308	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
3309	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
3310	enum port port = intel_dig_port->base.port;
3311	u8 train_pat_mask = drm_dp_training_pattern_mask(intel_dp->dpcd);
3312
3313	if (dp_train_pat & train_pat_mask)
3314		DRM_DEBUG_KMS("Using DP training pattern TPS%d\n",
3315			      dp_train_pat & train_pat_mask);
3316
3317	if (HAS_DDI(dev_priv)) {
3318		u32 temp = I915_READ(DP_TP_CTL(port));
3319
3320		if (dp_train_pat & DP_LINK_SCRAMBLING_DISABLE)
3321			temp |= DP_TP_CTL_SCRAMBLE_DISABLE;
3322		else
3323			temp &= ~DP_TP_CTL_SCRAMBLE_DISABLE;
3324
3325		temp &= ~DP_TP_CTL_LINK_TRAIN_MASK;
3326		switch (dp_train_pat & train_pat_mask) {
3327		case DP_TRAINING_PATTERN_DISABLE:
3328			temp |= DP_TP_CTL_LINK_TRAIN_NORMAL;
3329
3330			break;
3331		case DP_TRAINING_PATTERN_1:
3332			temp |= DP_TP_CTL_LINK_TRAIN_PAT1;
3333			break;
3334		case DP_TRAINING_PATTERN_2:
3335			temp |= DP_TP_CTL_LINK_TRAIN_PAT2;
3336			break;
3337		case DP_TRAINING_PATTERN_3:
3338			temp |= DP_TP_CTL_LINK_TRAIN_PAT3;
3339			break;
3340		case DP_TRAINING_PATTERN_4:
3341			temp |= DP_TP_CTL_LINK_TRAIN_PAT4;
3342			break;
3343		}
3344		I915_WRITE(DP_TP_CTL(port), temp);
3345
3346	} else if ((IS_IVYBRIDGE(dev_priv) && port == PORT_A) ||
3347		   (HAS_PCH_CPT(dev_priv) && port != PORT_A)) {
3348		*DP &= ~DP_LINK_TRAIN_MASK_CPT;
3349
3350		switch (dp_train_pat & DP_TRAINING_PATTERN_MASK) {
3351		case DP_TRAINING_PATTERN_DISABLE:
3352			*DP |= DP_LINK_TRAIN_OFF_CPT;
3353			break;
3354		case DP_TRAINING_PATTERN_1:
3355			*DP |= DP_LINK_TRAIN_PAT_1_CPT;
3356			break;
3357		case DP_TRAINING_PATTERN_2:
3358			*DP |= DP_LINK_TRAIN_PAT_2_CPT;
3359			break;
3360		case DP_TRAINING_PATTERN_3:
3361			DRM_DEBUG_KMS("TPS3 not supported, using TPS2 instead\n");
3362			*DP |= DP_LINK_TRAIN_PAT_2_CPT;
3363			break;
3364		}
3365
3366	} else {
3367		*DP &= ~DP_LINK_TRAIN_MASK;
3368
3369		switch (dp_train_pat & DP_TRAINING_PATTERN_MASK) {
3370		case DP_TRAINING_PATTERN_DISABLE:
3371			*DP |= DP_LINK_TRAIN_OFF;
3372			break;
3373		case DP_TRAINING_PATTERN_1:
3374			*DP |= DP_LINK_TRAIN_PAT_1;
3375			break;
3376		case DP_TRAINING_PATTERN_2:
3377			*DP |= DP_LINK_TRAIN_PAT_2;
3378			break;
3379		case DP_TRAINING_PATTERN_3:
3380			DRM_DEBUG_KMS("TPS3 not supported, using TPS2 instead\n");
3381			*DP |= DP_LINK_TRAIN_PAT_2;
3382			break;
3383		}
3384	}
3385}
3386
3387static void intel_dp_enable_port(struct intel_dp *intel_dp,
3388				 const struct intel_crtc_state *old_crtc_state)
3389{
3390	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
3391
3392	/* enable with pattern 1 (as per spec) */
3393
3394	intel_dp_program_link_training_pattern(intel_dp, DP_TRAINING_PATTERN_1);
3395
3396	/*
3397	 * Magic for VLV/CHV. We _must_ first set up the register
3398	 * without actually enabling the port, and then do another
3399	 * write to enable the port. Otherwise link training will
3400	 * fail when the power sequencer is freshly used for this port.
3401	 */
3402	intel_dp->DP |= DP_PORT_EN;
3403	if (old_crtc_state->has_audio)
3404		intel_dp->DP |= DP_AUDIO_OUTPUT_ENABLE;
3405
3406	I915_WRITE(intel_dp->output_reg, intel_dp->DP);
3407	POSTING_READ(intel_dp->output_reg);
3408}
3409
3410static void intel_enable_dp(struct intel_encoder *encoder,
3411			    const struct intel_crtc_state *pipe_config,
3412			    const struct drm_connector_state *conn_state)
3413{
3414	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
3415	struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
3416	struct intel_crtc *crtc = to_intel_crtc(pipe_config->base.crtc);
3417	u32 dp_reg = I915_READ(intel_dp->output_reg);
3418	enum pipe pipe = crtc->pipe;
3419	intel_wakeref_t wakeref;
3420
3421	if (WARN_ON(dp_reg & DP_PORT_EN))
3422		return;
3423
3424	with_pps_lock(intel_dp, wakeref) {
3425		if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
3426			vlv_init_panel_power_sequencer(encoder, pipe_config);
3427
3428		intel_dp_enable_port(intel_dp, pipe_config);
3429
3430		edp_panel_vdd_on(intel_dp);
3431		edp_panel_on(intel_dp);
3432		edp_panel_vdd_off(intel_dp, true);
3433	}
3434
3435	if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
3436		unsigned int lane_mask = 0x0;
3437
3438		if (IS_CHERRYVIEW(dev_priv))
3439			lane_mask = intel_dp_unused_lane_mask(pipe_config->lane_count);
3440
3441		vlv_wait_port_ready(dev_priv, dp_to_dig_port(intel_dp),
3442				    lane_mask);
3443	}
3444
3445	intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_ON);
3446	intel_dp_start_link_train(intel_dp);
3447	intel_dp_stop_link_train(intel_dp);
3448
3449	if (pipe_config->has_audio) {
3450		DRM_DEBUG_DRIVER("Enabling DP audio on pipe %c\n",
3451				 pipe_name(pipe));
3452		intel_audio_codec_enable(encoder, pipe_config, conn_state);
3453	}
3454}
3455
3456static void g4x_enable_dp(struct intel_encoder *encoder,
3457			  const struct intel_crtc_state *pipe_config,
3458			  const struct drm_connector_state *conn_state)
3459{
3460	intel_enable_dp(encoder, pipe_config, conn_state);
3461	intel_edp_backlight_on(pipe_config, conn_state);
3462}
3463
3464static void vlv_enable_dp(struct intel_encoder *encoder,
3465			  const struct intel_crtc_state *pipe_config,
3466			  const struct drm_connector_state *conn_state)
3467{
3468	intel_edp_backlight_on(pipe_config, conn_state);
3469}
3470
3471static void g4x_pre_enable_dp(struct intel_encoder *encoder,
3472			      const struct intel_crtc_state *pipe_config,
3473			      const struct drm_connector_state *conn_state)
3474{
3475	struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
3476	enum port port = encoder->port;
3477
3478	intel_dp_prepare(encoder, pipe_config);
 
3479
3480	/* Only ilk+ has port A */
3481	if (port == PORT_A)
3482		ironlake_edp_pll_on(intel_dp, pipe_config);
3483}
3484
3485static void vlv_detach_power_sequencer(struct intel_dp *intel_dp)
3486{
3487	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
3488	struct drm_i915_private *dev_priv = to_i915(intel_dig_port->base.base.dev);
3489	enum pipe pipe = intel_dp->pps_pipe;
3490	i915_reg_t pp_on_reg = PP_ON_DELAYS(pipe);
3491
3492	WARN_ON(intel_dp->active_pipe != INVALID_PIPE);
 
3493
3494	if (WARN_ON(pipe != PIPE_A && pipe != PIPE_B))
3495		return;
 
 
 
3496
3497	edp_panel_vdd_off_sync(intel_dp);
 
3498
 
 
 
 
 
 
 
 
 
 
 
 
3499	/*
3500	 * VLV seems to get confused when multiple power sequencers
3501	 * have the same port selected (even if only one has power/vdd
3502	 * enabled). The failure manifests as vlv_wait_port_ready() failing
3503	 * CHV on the other hand doesn't seem to mind having the same port
3504	 * selected in multiple power sequencers, but let's clear the
3505	 * port select always when logically disconnecting a power sequencer
3506	 * from a port.
3507	 */
3508	DRM_DEBUG_KMS("detaching pipe %c power sequencer from port %c\n",
3509		      pipe_name(pipe), port_name(intel_dig_port->base.port));
3510	I915_WRITE(pp_on_reg, 0);
3511	POSTING_READ(pp_on_reg);
3512
3513	intel_dp->pps_pipe = INVALID_PIPE;
3514}
3515
3516static void vlv_steal_power_sequencer(struct drm_i915_private *dev_priv,
3517				      enum pipe pipe)
3518{
3519	struct intel_encoder *encoder;
3520
3521	lockdep_assert_held(&dev_priv->pps_mutex);
 
 
 
 
 
 
3522
3523	for_each_intel_dp(&dev_priv->drm, encoder) {
3524		struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
3525		enum port port = encoder->port;
3526
3527		WARN(intel_dp->active_pipe == pipe,
3528		     "stealing pipe %c power sequencer from active (e)DP port %c\n",
3529		     pipe_name(pipe), port_name(port));
3530
3531		if (intel_dp->pps_pipe != pipe)
3532			continue;
 
 
 
 
3533
3534		DRM_DEBUG_KMS("stealing pipe %c power sequencer from port %c\n",
3535			      pipe_name(pipe), port_name(port));
3536
3537		/* make sure vdd is off before we steal it */
3538		vlv_detach_power_sequencer(intel_dp);
3539	}
3540}
3541
3542static void vlv_init_panel_power_sequencer(struct intel_encoder *encoder,
3543					   const struct intel_crtc_state *crtc_state)
3544{
3545	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
3546	struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
3547	struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
3548
3549	lockdep_assert_held(&dev_priv->pps_mutex);
3550
3551	WARN_ON(intel_dp->active_pipe != INVALID_PIPE);
3552
3553	if (intel_dp->pps_pipe != INVALID_PIPE &&
3554	    intel_dp->pps_pipe != crtc->pipe) {
3555		/*
3556		 * If another power sequencer was being used on this
3557		 * port previously make sure to turn off vdd there while
3558		 * we still have control of it.
3559		 */
3560		vlv_detach_power_sequencer(intel_dp);
3561	}
3562
3563	/*
3564	 * We may be stealing the power
3565	 * sequencer from another port.
 
3566	 */
3567	vlv_steal_power_sequencer(dev_priv, crtc->pipe);
3568
3569	intel_dp->active_pipe = crtc->pipe;
3570
3571	if (!intel_dp_is_edp(intel_dp))
3572		return;
3573
3574	/* now it's all ours */
3575	intel_dp->pps_pipe = crtc->pipe;
3576
3577	DRM_DEBUG_KMS("initializing pipe %c power sequencer for port %c\n",
3578		      pipe_name(intel_dp->pps_pipe), port_name(encoder->port));
3579
3580	/* init power sequencer on this pipe and port */
3581	intel_dp_init_panel_power_sequencer(intel_dp);
3582	intel_dp_init_panel_power_sequencer_registers(intel_dp, true);
3583}
3584
3585static void vlv_pre_enable_dp(struct intel_encoder *encoder,
3586			      const struct intel_crtc_state *pipe_config,
3587			      const struct drm_connector_state *conn_state)
3588{
3589	vlv_phy_pre_encoder_enable(encoder, pipe_config);
3590
3591	intel_enable_dp(encoder, pipe_config, conn_state);
3592}
3593
3594static void vlv_dp_pre_pll_enable(struct intel_encoder *encoder,
3595				  const struct intel_crtc_state *pipe_config,
3596				  const struct drm_connector_state *conn_state)
3597{
3598	intel_dp_prepare(encoder, pipe_config);
3599
3600	vlv_phy_pre_pll_enable(encoder, pipe_config);
3601}
3602
3603static void chv_pre_enable_dp(struct intel_encoder *encoder,
3604			      const struct intel_crtc_state *pipe_config,
3605			      const struct drm_connector_state *conn_state)
3606{
3607	chv_phy_pre_encoder_enable(encoder, pipe_config);
3608
3609	intel_enable_dp(encoder, pipe_config, conn_state);
3610
3611	/* Second common lane will stay alive on its own now */
3612	chv_phy_release_cl2_override(encoder);
3613}
3614
3615static void chv_dp_pre_pll_enable(struct intel_encoder *encoder,
3616				  const struct intel_crtc_state *pipe_config,
3617				  const struct drm_connector_state *conn_state)
3618{
3619	intel_dp_prepare(encoder, pipe_config);
3620
3621	chv_phy_pre_pll_enable(encoder, pipe_config);
3622}
3623
3624static void chv_dp_post_pll_disable(struct intel_encoder *encoder,
3625				    const struct intel_crtc_state *old_crtc_state,
3626				    const struct drm_connector_state *old_conn_state)
3627{
3628	chv_phy_post_pll_disable(encoder, old_crtc_state);
3629}
3630
3631/*
3632 * Fetch AUX CH registers 0x202 - 0x207 which contain
3633 * link status information
3634 */
3635bool
3636intel_dp_get_link_status(struct intel_dp *intel_dp, u8 link_status[DP_LINK_STATUS_SIZE])
3637{
3638	return drm_dp_dpcd_read(&intel_dp->aux, DP_LANE0_1_STATUS, link_status,
3639				DP_LINK_STATUS_SIZE) == DP_LINK_STATUS_SIZE;
3640}
3641
3642/* These are source-specific values. */
3643u8
3644intel_dp_voltage_max(struct intel_dp *intel_dp)
3645{
3646	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
3647	struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base;
3648	enum port port = encoder->port;
3649
3650	if (HAS_DDI(dev_priv))
3651		return intel_ddi_dp_voltage_max(encoder);
3652	else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
3653		return DP_TRAIN_VOLTAGE_SWING_LEVEL_3;
3654	else if (IS_IVYBRIDGE(dev_priv) && port == PORT_A)
3655		return DP_TRAIN_VOLTAGE_SWING_LEVEL_2;
3656	else if (HAS_PCH_CPT(dev_priv) && port != PORT_A)
3657		return DP_TRAIN_VOLTAGE_SWING_LEVEL_3;
3658	else
3659		return DP_TRAIN_VOLTAGE_SWING_LEVEL_2;
3660}
3661
3662u8
3663intel_dp_pre_emphasis_max(struct intel_dp *intel_dp, u8 voltage_swing)
3664{
3665	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
3666	struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base;
3667	enum port port = encoder->port;
3668
3669	if (HAS_DDI(dev_priv)) {
3670		return intel_ddi_dp_pre_emphasis_max(encoder, voltage_swing);
3671	} else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
3672		switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
3673		case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3674			return DP_TRAIN_PRE_EMPH_LEVEL_3;
3675		case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3676			return DP_TRAIN_PRE_EMPH_LEVEL_2;
3677		case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3678			return DP_TRAIN_PRE_EMPH_LEVEL_1;
3679		case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
3680		default:
3681			return DP_TRAIN_PRE_EMPH_LEVEL_0;
3682		}
3683	} else if (IS_IVYBRIDGE(dev_priv) && port == PORT_A) {
3684		switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
3685		case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3686			return DP_TRAIN_PRE_EMPH_LEVEL_2;
3687		case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3688		case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3689			return DP_TRAIN_PRE_EMPH_LEVEL_1;
3690		default:
3691			return DP_TRAIN_PRE_EMPH_LEVEL_0;
3692		}
3693	} else {
3694		switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
3695		case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3696			return DP_TRAIN_PRE_EMPH_LEVEL_2;
3697		case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3698			return DP_TRAIN_PRE_EMPH_LEVEL_2;
3699		case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3700			return DP_TRAIN_PRE_EMPH_LEVEL_1;
3701		case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
3702		default:
3703			return DP_TRAIN_PRE_EMPH_LEVEL_0;
3704		}
3705	}
3706}
3707
3708static u32 vlv_signal_levels(struct intel_dp *intel_dp)
 
3709{
3710	struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base;
3711	unsigned long demph_reg_value, preemph_reg_value,
3712		uniqtranscale_reg_value;
3713	u8 train_set = intel_dp->train_set[0];
3714
3715	switch (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) {
3716	case DP_TRAIN_PRE_EMPH_LEVEL_0:
3717		preemph_reg_value = 0x0004000;
3718		switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
3719		case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3720			demph_reg_value = 0x2B405555;
3721			uniqtranscale_reg_value = 0x552AB83A;
3722			break;
3723		case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3724			demph_reg_value = 0x2B404040;
3725			uniqtranscale_reg_value = 0x5548B83A;
3726			break;
3727		case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3728			demph_reg_value = 0x2B245555;
3729			uniqtranscale_reg_value = 0x5560B83A;
3730			break;
3731		case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
3732			demph_reg_value = 0x2B405555;
3733			uniqtranscale_reg_value = 0x5598DA3A;
3734			break;
3735		default:
3736			return 0;
3737		}
3738		break;
3739	case DP_TRAIN_PRE_EMPH_LEVEL_1:
3740		preemph_reg_value = 0x0002000;
3741		switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
3742		case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3743			demph_reg_value = 0x2B404040;
3744			uniqtranscale_reg_value = 0x5552B83A;
3745			break;
3746		case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3747			demph_reg_value = 0x2B404848;
3748			uniqtranscale_reg_value = 0x5580B83A;
3749			break;
3750		case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3751			demph_reg_value = 0x2B404040;
3752			uniqtranscale_reg_value = 0x55ADDA3A;
3753			break;
3754		default:
3755			return 0;
3756		}
3757		break;
3758	case DP_TRAIN_PRE_EMPH_LEVEL_2:
3759		preemph_reg_value = 0x0000000;
3760		switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
3761		case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3762			demph_reg_value = 0x2B305555;
3763			uniqtranscale_reg_value = 0x5570B83A;
3764			break;
3765		case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3766			demph_reg_value = 0x2B2B4040;
3767			uniqtranscale_reg_value = 0x55ADDA3A;
3768			break;
3769		default:
3770			return 0;
3771		}
3772		break;
3773	case DP_TRAIN_PRE_EMPH_LEVEL_3:
3774		preemph_reg_value = 0x0006000;
3775		switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
3776		case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3777			demph_reg_value = 0x1B405555;
3778			uniqtranscale_reg_value = 0x55ADDA3A;
3779			break;
3780		default:
3781			return 0;
3782		}
3783		break;
3784	default:
3785		return 0;
3786	}
3787
3788	vlv_set_phy_signal_level(encoder, demph_reg_value, preemph_reg_value,
3789				 uniqtranscale_reg_value, 0);
3790
3791	return 0;
3792}
3793
3794static u32 chv_signal_levels(struct intel_dp *intel_dp)
 
 
3795{
3796	struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base;
3797	u32 deemph_reg_value, margin_reg_value;
3798	bool uniq_trans_scale = false;
3799	u8 train_set = intel_dp->train_set[0];
3800
3801	switch (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) {
3802	case DP_TRAIN_PRE_EMPH_LEVEL_0:
3803		switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
3804		case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3805			deemph_reg_value = 128;
3806			margin_reg_value = 52;
3807			break;
3808		case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3809			deemph_reg_value = 128;
3810			margin_reg_value = 77;
3811			break;
3812		case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3813			deemph_reg_value = 128;
3814			margin_reg_value = 102;
3815			break;
3816		case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
3817			deemph_reg_value = 128;
3818			margin_reg_value = 154;
3819			uniq_trans_scale = true;
3820			break;
3821		default:
3822			return 0;
3823		}
3824		break;
3825	case DP_TRAIN_PRE_EMPH_LEVEL_1:
3826		switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
3827		case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3828			deemph_reg_value = 85;
3829			margin_reg_value = 78;
3830			break;
3831		case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3832			deemph_reg_value = 85;
3833			margin_reg_value = 116;
3834			break;
3835		case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3836			deemph_reg_value = 85;
3837			margin_reg_value = 154;
3838			break;
3839		default:
3840			return 0;
3841		}
3842		break;
3843	case DP_TRAIN_PRE_EMPH_LEVEL_2:
3844		switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
3845		case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3846			deemph_reg_value = 64;
3847			margin_reg_value = 104;
3848			break;
3849		case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3850			deemph_reg_value = 64;
3851			margin_reg_value = 154;
3852			break;
3853		default:
3854			return 0;
3855		}
3856		break;
3857	case DP_TRAIN_PRE_EMPH_LEVEL_3:
3858		switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
3859		case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3860			deemph_reg_value = 43;
3861			margin_reg_value = 154;
3862			break;
3863		default:
3864			return 0;
3865		}
3866		break;
3867	default:
3868		return 0;
3869	}
3870
3871	chv_set_phy_signal_level(encoder, deemph_reg_value,
3872				 margin_reg_value, uniq_trans_scale);
3873
3874	return 0;
3875}
3876
3877static u32
3878g4x_signal_levels(u8 train_set)
 
 
3879{
3880	u32 signal_levels = 0;
3881
3882	switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
3883	case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3884	default:
3885		signal_levels |= DP_VOLTAGE_0_4;
3886		break;
3887	case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3888		signal_levels |= DP_VOLTAGE_0_6;
3889		break;
3890	case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3891		signal_levels |= DP_VOLTAGE_0_8;
3892		break;
3893	case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
3894		signal_levels |= DP_VOLTAGE_1_2;
3895		break;
3896	}
3897	switch (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) {
3898	case DP_TRAIN_PRE_EMPH_LEVEL_0:
3899	default:
3900		signal_levels |= DP_PRE_EMPHASIS_0;
3901		break;
3902	case DP_TRAIN_PRE_EMPH_LEVEL_1:
3903		signal_levels |= DP_PRE_EMPHASIS_3_5;
3904		break;
3905	case DP_TRAIN_PRE_EMPH_LEVEL_2:
3906		signal_levels |= DP_PRE_EMPHASIS_6;
3907		break;
3908	case DP_TRAIN_PRE_EMPH_LEVEL_3:
3909		signal_levels |= DP_PRE_EMPHASIS_9_5;
3910		break;
3911	}
3912	return signal_levels;
3913}
3914
3915/* SNB CPU eDP voltage swing and pre-emphasis control */
3916static u32
3917snb_cpu_edp_signal_levels(u8 train_set)
3918{
3919	int signal_levels = train_set & (DP_TRAIN_VOLTAGE_SWING_MASK |
3920					 DP_TRAIN_PRE_EMPHASIS_MASK);
3921	switch (signal_levels) {
3922	case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3923	case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3924		return EDP_LINK_TRAIN_400_600MV_0DB_SNB_B;
3925	case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_1:
3926		return EDP_LINK_TRAIN_400MV_3_5DB_SNB_B;
3927	case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_2:
3928	case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_2:
3929		return EDP_LINK_TRAIN_400_600MV_6DB_SNB_B;
3930	case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_1:
3931	case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_1:
3932		return EDP_LINK_TRAIN_600_800MV_3_5DB_SNB_B;
3933	case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3934	case DP_TRAIN_VOLTAGE_SWING_LEVEL_3 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3935		return EDP_LINK_TRAIN_800_1200MV_0DB_SNB_B;
3936	default:
3937		DRM_DEBUG_KMS("Unsupported voltage swing/pre-emphasis level:"
3938			      "0x%x\n", signal_levels);
3939		return EDP_LINK_TRAIN_400_600MV_0DB_SNB_B;
3940	}
3941}
3942
3943/* IVB CPU eDP voltage swing and pre-emphasis control */
3944static u32
3945ivb_cpu_edp_signal_levels(u8 train_set)
3946{
3947	int signal_levels = train_set & (DP_TRAIN_VOLTAGE_SWING_MASK |
3948					 DP_TRAIN_PRE_EMPHASIS_MASK);
3949	switch (signal_levels) {
3950	case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3951		return EDP_LINK_TRAIN_400MV_0DB_IVB;
3952	case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_1:
3953		return EDP_LINK_TRAIN_400MV_3_5DB_IVB;
3954	case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_2:
3955		return EDP_LINK_TRAIN_400MV_6DB_IVB;
3956
3957	case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3958		return EDP_LINK_TRAIN_600MV_0DB_IVB;
3959	case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_1:
3960		return EDP_LINK_TRAIN_600MV_3_5DB_IVB;
3961
3962	case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3963		return EDP_LINK_TRAIN_800MV_0DB_IVB;
3964	case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_1:
3965		return EDP_LINK_TRAIN_800MV_3_5DB_IVB;
3966
3967	default:
3968		DRM_DEBUG_KMS("Unsupported voltage swing/pre-emphasis level:"
3969			      "0x%x\n", signal_levels);
3970		return EDP_LINK_TRAIN_500MV_0DB_IVB;
3971	}
3972}
3973
3974void
3975intel_dp_set_signal_levels(struct intel_dp *intel_dp)
 
3976{
3977	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
3978	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
3979	enum port port = intel_dig_port->base.port;
3980	u32 signal_levels, mask = 0;
3981	u8 train_set = intel_dp->train_set[0];
3982
3983	if (IS_GEN9_LP(dev_priv) || INTEL_GEN(dev_priv) >= 10) {
3984		signal_levels = bxt_signal_levels(intel_dp);
3985	} else if (HAS_DDI(dev_priv)) {
3986		signal_levels = ddi_signal_levels(intel_dp);
3987		mask = DDI_BUF_EMP_MASK;
3988	} else if (IS_CHERRYVIEW(dev_priv)) {
3989		signal_levels = chv_signal_levels(intel_dp);
3990	} else if (IS_VALLEYVIEW(dev_priv)) {
3991		signal_levels = vlv_signal_levels(intel_dp);
3992	} else if (IS_IVYBRIDGE(dev_priv) && port == PORT_A) {
3993		signal_levels = ivb_cpu_edp_signal_levels(train_set);
3994		mask = EDP_LINK_TRAIN_VOL_EMP_MASK_IVB;
3995	} else if (IS_GEN(dev_priv, 6) && port == PORT_A) {
3996		signal_levels = snb_cpu_edp_signal_levels(train_set);
3997		mask = EDP_LINK_TRAIN_VOL_EMP_MASK_SNB;
3998	} else {
3999		signal_levels = g4x_signal_levels(train_set);
4000		mask = DP_VOLTAGE_MASK | DP_PRE_EMPHASIS_MASK;
4001	}
4002
4003	if (mask)
4004		DRM_DEBUG_KMS("Using signal levels %08x\n", signal_levels);
4005
4006	DRM_DEBUG_KMS("Using vswing level %d\n",
4007		train_set & DP_TRAIN_VOLTAGE_SWING_MASK);
4008	DRM_DEBUG_KMS("Using pre-emphasis level %d\n",
4009		(train_set & DP_TRAIN_PRE_EMPHASIS_MASK) >>
4010			DP_TRAIN_PRE_EMPHASIS_SHIFT);
4011
4012	intel_dp->DP = (intel_dp->DP & ~mask) | signal_levels;
 
 
4013
4014	I915_WRITE(intel_dp->output_reg, intel_dp->DP);
4015	POSTING_READ(intel_dp->output_reg);
4016}
4017
4018void
4019intel_dp_program_link_training_pattern(struct intel_dp *intel_dp,
4020				       u8 dp_train_pat)
4021{
4022	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
4023	struct drm_i915_private *dev_priv =
4024		to_i915(intel_dig_port->base.base.dev);
4025
4026	_intel_dp_set_link_train(intel_dp, &intel_dp->DP, dp_train_pat);
4027
4028	I915_WRITE(intel_dp->output_reg, intel_dp->DP);
4029	POSTING_READ(intel_dp->output_reg);
4030}
4031
4032void intel_dp_set_idle_link_train(struct intel_dp *intel_dp)
4033{
4034	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
4035	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
4036	enum port port = intel_dig_port->base.port;
4037	u32 val;
4038
4039	if (!HAS_DDI(dev_priv))
 
 
4040		return;
4041
4042	val = I915_READ(DP_TP_CTL(port));
4043	val &= ~DP_TP_CTL_LINK_TRAIN_MASK;
4044	val |= DP_TP_CTL_LINK_TRAIN_IDLE;
4045	I915_WRITE(DP_TP_CTL(port), val);
 
 
4046
4047	/*
4048	 * On PORT_A we can have only eDP in SST mode. There the only reason
4049	 * we need to set idle transmission mode is to work around a HW issue
4050	 * where we enable the pipe while not in idle link-training mode.
4051	 * In this case there is requirement to wait for a minimum number of
4052	 * idle patterns to be sent.
4053	 */
4054	if (port == PORT_A)
4055		return;
4056
4057	if (intel_de_wait_for_set(dev_priv, DP_TP_STATUS(port),
4058				  DP_TP_STATUS_IDLE_DONE, 1))
4059		DRM_ERROR("Timed out waiting for DP idle patterns\n");
4060}
4061
4062static void
4063intel_dp_link_down(struct intel_encoder *encoder,
4064		   const struct intel_crtc_state *old_crtc_state)
4065{
4066	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
4067	struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
4068	struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->base.crtc);
4069	enum port port = encoder->port;
4070	u32 DP = intel_dp->DP;
4071
4072	if (WARN_ON((I915_READ(intel_dp->output_reg) & DP_PORT_EN) == 0))
4073		return;
4074
4075	DRM_DEBUG_KMS("\n");
 
4076
4077	if ((IS_IVYBRIDGE(dev_priv) && port == PORT_A) ||
4078	    (HAS_PCH_CPT(dev_priv) && port != PORT_A)) {
4079		DP &= ~DP_LINK_TRAIN_MASK_CPT;
4080		DP |= DP_LINK_TRAIN_PAT_IDLE_CPT;
4081	} else {
4082		DP &= ~DP_LINK_TRAIN_MASK;
4083		DP |= DP_LINK_TRAIN_PAT_IDLE;
4084	}
4085	I915_WRITE(intel_dp->output_reg, DP);
4086	POSTING_READ(intel_dp->output_reg);
4087
4088	DP &= ~(DP_PORT_EN | DP_AUDIO_OUTPUT_ENABLE);
4089	I915_WRITE(intel_dp->output_reg, DP);
4090	POSTING_READ(intel_dp->output_reg);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4091
4092	/*
4093	 * HW workaround for IBX, we need to move the port
4094	 * to transcoder A after disabling it to allow the
4095	 * matching HDMI port to be enabled on transcoder A.
4096	 */
4097	if (HAS_PCH_IBX(dev_priv) && crtc->pipe == PIPE_B && port != PORT_A) {
4098		/*
4099		 * We get CPU/PCH FIFO underruns on the other pipe when
4100		 * doing the workaround. Sweep them under the rug.
 
4101		 */
4102		intel_set_cpu_fifo_underrun_reporting(dev_priv, PIPE_A, false);
4103		intel_set_pch_fifo_underrun_reporting(dev_priv, PIPE_A, false);
4104
4105		/* always enable with pattern 1 (as per spec) */
4106		DP &= ~(DP_PIPE_SEL_MASK | DP_LINK_TRAIN_MASK);
4107		DP |= DP_PORT_EN | DP_PIPE_SEL(PIPE_A) |
4108			DP_LINK_TRAIN_PAT_1;
4109		I915_WRITE(intel_dp->output_reg, DP);
4110		POSTING_READ(intel_dp->output_reg);
4111
4112		DP &= ~DP_PORT_EN;
4113		I915_WRITE(intel_dp->output_reg, DP);
4114		POSTING_READ(intel_dp->output_reg);
4115
4116		intel_wait_for_vblank_if_active(dev_priv, PIPE_A);
4117		intel_set_cpu_fifo_underrun_reporting(dev_priv, PIPE_A, true);
4118		intel_set_pch_fifo_underrun_reporting(dev_priv, PIPE_A, true);
4119	}
4120
4121	msleep(intel_dp->panel_power_down_delay);
4122
4123	intel_dp->DP = DP;
4124
4125	if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
4126		intel_wakeref_t wakeref;
4127
4128		with_pps_lock(intel_dp, wakeref)
4129			intel_dp->active_pipe = INVALID_PIPE;
4130	}
4131}
4132
4133static void
4134intel_dp_extended_receiver_capabilities(struct intel_dp *intel_dp)
4135{
4136	u8 dpcd_ext[6];
4137
4138	/*
4139	 * Prior to DP1.3 the bit represented by
4140	 * DP_EXTENDED_RECEIVER_CAP_FIELD_PRESENT was reserved.
4141	 * if it is set DP_DPCD_REV at 0000h could be at a value less than
4142	 * the true capability of the panel. The only way to check is to
4143	 * then compare 0000h and 2200h.
4144	 */
4145	if (!(intel_dp->dpcd[DP_TRAINING_AUX_RD_INTERVAL] &
4146	      DP_EXTENDED_RECEIVER_CAP_FIELD_PRESENT))
4147		return;
4148
4149	if (drm_dp_dpcd_read(&intel_dp->aux, DP_DP13_DPCD_REV,
4150			     &dpcd_ext, sizeof(dpcd_ext)) != sizeof(dpcd_ext)) {
4151		DRM_ERROR("DPCD failed read at extended capabilities\n");
4152		return;
4153	}
4154
4155	if (intel_dp->dpcd[DP_DPCD_REV] > dpcd_ext[DP_DPCD_REV]) {
4156		DRM_DEBUG_KMS("DPCD extended DPCD rev less than base DPCD rev\n");
4157		return;
4158	}
4159
4160	if (!memcmp(intel_dp->dpcd, dpcd_ext, sizeof(dpcd_ext)))
4161		return;
4162
4163	DRM_DEBUG_KMS("Base DPCD: %*ph\n",
4164		      (int)sizeof(intel_dp->dpcd), intel_dp->dpcd);
4165
4166	memcpy(intel_dp->dpcd, dpcd_ext, sizeof(dpcd_ext));
4167}
4168
4169bool
4170intel_dp_read_dpcd(struct intel_dp *intel_dp)
4171{
4172	if (drm_dp_dpcd_read(&intel_dp->aux, 0x000, intel_dp->dpcd,
4173			     sizeof(intel_dp->dpcd)) < 0)
4174		return false; /* aux transfer failed */
4175
4176	intel_dp_extended_receiver_capabilities(intel_dp);
4177
4178	DRM_DEBUG_KMS("DPCD: %*ph\n", (int) sizeof(intel_dp->dpcd), intel_dp->dpcd);
4179
4180	return intel_dp->dpcd[DP_DPCD_REV] != 0;
4181}
4182
4183bool intel_dp_get_colorimetry_status(struct intel_dp *intel_dp)
4184{
4185	u8 dprx = 0;
4186
4187	if (drm_dp_dpcd_readb(&intel_dp->aux, DP_DPRX_FEATURE_ENUMERATION_LIST,
4188			      &dprx) != 1)
4189		return false;
4190	return dprx & DP_VSC_SDP_EXT_FOR_COLORIMETRY_SUPPORTED;
4191}
4192
4193static void intel_dp_get_dsc_sink_cap(struct intel_dp *intel_dp)
4194{
 
 
4195	/*
4196	 * Clear the cached register set to avoid using stale values
4197	 * for the sinks that do not support DSC.
4198	 */
4199	memset(intel_dp->dsc_dpcd, 0, sizeof(intel_dp->dsc_dpcd));
4200
4201	/* Clear fec_capable to avoid using stale values */
4202	intel_dp->fec_capable = 0;
4203
4204	/* Cache the DSC DPCD if eDP or DP rev >= 1.4 */
4205	if (intel_dp->dpcd[DP_DPCD_REV] >= 0x14 ||
4206	    intel_dp->edp_dpcd[0] >= DP_EDP_14) {
4207		if (drm_dp_dpcd_read(&intel_dp->aux, DP_DSC_SUPPORT,
4208				     intel_dp->dsc_dpcd,
4209				     sizeof(intel_dp->dsc_dpcd)) < 0)
4210			DRM_ERROR("Failed to read DPCD register 0x%x\n",
4211				  DP_DSC_SUPPORT);
4212
4213		DRM_DEBUG_KMS("DSC DPCD: %*ph\n",
4214			      (int)sizeof(intel_dp->dsc_dpcd),
4215			      intel_dp->dsc_dpcd);
 
4216
4217		/* FEC is supported only on DP 1.4 */
4218		if (!intel_dp_is_edp(intel_dp) &&
4219		    drm_dp_dpcd_readb(&intel_dp->aux, DP_FEC_CAPABILITY,
4220				      &intel_dp->fec_capable) < 0)
4221			DRM_ERROR("Failed to read FEC DPCD register\n");
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4222
4223		DRM_DEBUG_KMS("FEC CAPABILITY: %x\n", intel_dp->fec_capable);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4224	}
 
 
 
4225}
4226
4227static bool
4228intel_edp_init_dpcd(struct intel_dp *intel_dp)
4229{
4230	struct drm_i915_private *dev_priv =
4231		to_i915(dp_to_dig_port(intel_dp)->base.base.dev);
4232
4233	/* this function is meant to be called only once */
4234	WARN_ON(intel_dp->dpcd[DP_DPCD_REV] != 0);
4235
4236	if (!intel_dp_read_dpcd(intel_dp))
4237		return false;
4238
4239	drm_dp_read_desc(&intel_dp->aux, &intel_dp->desc,
4240			 drm_dp_is_branch(intel_dp->dpcd));
4241
4242	/*
4243	 * Read the eDP display control registers.
4244	 *
4245	 * Do this independent of DP_DPCD_DISPLAY_CONTROL_CAPABLE bit in
4246	 * DP_EDP_CONFIGURATION_CAP, because some buggy displays do not have it
4247	 * set, but require eDP 1.4+ detection (e.g. for supported link rates
4248	 * method). The display control registers should read zero if they're
4249	 * not supported anyway.
4250	 */
4251	if (drm_dp_dpcd_read(&intel_dp->aux, DP_EDP_DPCD_REV,
4252			     intel_dp->edp_dpcd, sizeof(intel_dp->edp_dpcd)) ==
4253			     sizeof(intel_dp->edp_dpcd))
4254		DRM_DEBUG_KMS("eDP DPCD: %*ph\n", (int) sizeof(intel_dp->edp_dpcd),
4255			      intel_dp->edp_dpcd);
 
 
 
 
4256
4257	/*
4258	 * This has to be called after intel_dp->edp_dpcd is filled, PSR checks
4259	 * for SET_POWER_CAPABLE bit in intel_dp->edp_dpcd[1]
4260	 */
4261	intel_psr_init_dpcd(intel_dp);
4262
4263	/* Read the eDP 1.4+ supported link rates. */
4264	if (intel_dp->edp_dpcd[0] >= DP_EDP_14) {
4265		__le16 sink_rates[DP_MAX_SUPPORTED_RATES];
4266		int i;
4267
4268		drm_dp_dpcd_read(&intel_dp->aux, DP_SUPPORTED_LINK_RATES,
4269				sink_rates, sizeof(sink_rates));
4270
4271		for (i = 0; i < ARRAY_SIZE(sink_rates); i++) {
4272			int val = le16_to_cpu(sink_rates[i]);
4273
4274			if (val == 0)
4275				break;
4276
4277			/* Value read multiplied by 200kHz gives the per-lane
4278			 * link rate in kHz. The source rates are, however,
4279			 * stored in terms of LS_Clk kHz. The full conversion
4280			 * back to symbols is
4281			 * (val * 200kHz)*(8/10 ch. encoding)*(1/8 bit to Byte)
4282			 */
4283			intel_dp->sink_rates[i] = (val * 200) / 10;
4284		}
4285		intel_dp->num_sink_rates = i;
4286	}
4287
4288	/*
4289	 * Use DP_LINK_RATE_SET if DP_SUPPORTED_LINK_RATES are available,
4290	 * default to DP_MAX_LINK_RATE and DP_LINK_BW_SET otherwise.
4291	 */
4292	if (intel_dp->num_sink_rates)
4293		intel_dp->use_rate_select = true;
4294	else
4295		intel_dp_set_sink_rates(intel_dp);
4296
4297	intel_dp_set_common_rates(intel_dp);
4298
4299	/* Read the eDP DSC DPCD registers */
4300	if (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv))
4301		intel_dp_get_dsc_sink_cap(intel_dp);
4302
 
 
 
 
 
 
 
 
4303	return true;
4304}
4305
 
 
 
 
 
 
 
 
 
 
4306
4307static bool
4308intel_dp_get_dpcd(struct intel_dp *intel_dp)
4309{
4310	if (!intel_dp_read_dpcd(intel_dp))
 
 
4311		return false;
4312
4313	/*
4314	 * Don't clobber cached eDP rates. Also skip re-reading
4315	 * the OUI/ID since we know it won't change.
4316	 */
4317	if (!intel_dp_is_edp(intel_dp)) {
4318		drm_dp_read_desc(&intel_dp->aux, &intel_dp->desc,
4319				 drm_dp_is_branch(intel_dp->dpcd));
4320
4321		intel_dp_set_sink_rates(intel_dp);
4322		intel_dp_set_common_rates(intel_dp);
4323	}
4324
4325	/*
4326	 * Some eDP panels do not set a valid value for sink count, that is why
4327	 * it don't care about read it here and in intel_edp_init_dpcd().
4328	 */
4329	if (!intel_dp_is_edp(intel_dp) &&
4330	    !drm_dp_has_quirk(&intel_dp->desc, DP_DPCD_QUIRK_NO_SINK_COUNT)) {
4331		u8 count;
4332		ssize_t r;
4333
4334		r = drm_dp_dpcd_readb(&intel_dp->aux, DP_SINK_COUNT, &count);
4335		if (r < 1)
4336			return false;
4337
4338		/*
4339		 * Sink count can change between short pulse hpd hence
4340		 * a member variable in intel_dp will track any changes
4341		 * between short pulse interrupts.
4342		 */
4343		intel_dp->sink_count = DP_GET_SINK_COUNT(count);
4344
4345		/*
4346		 * SINK_COUNT == 0 and DOWNSTREAM_PORT_PRESENT == 1 implies that
4347		 * a dongle is present but no display. Unless we require to know
4348		 * if a dongle is present or not, we don't need to update
4349		 * downstream port information. So, an early return here saves
4350		 * time from performing other operations which are not required.
4351		 */
4352		if (!intel_dp->sink_count)
4353			return false;
4354	}
4355
4356	if (!drm_dp_is_branch(intel_dp->dpcd))
4357		return true; /* native DP sink */
4358
4359	if (intel_dp->dpcd[DP_DPCD_REV] == 0x10)
4360		return true; /* no per-port downstream info */
4361
4362	if (drm_dp_dpcd_read(&intel_dp->aux, DP_DOWNSTREAM_PORT_0,
4363			     intel_dp->downstream_ports,
4364			     DP_MAX_DOWNSTREAM_PORTS) < 0)
4365		return false; /* downstream port status fetch failed */
4366
4367	return true;
4368}
4369
4370static bool
4371intel_dp_sink_can_mst(struct intel_dp *intel_dp)
4372{
4373	u8 mstm_cap;
4374
4375	if (intel_dp->dpcd[DP_DPCD_REV] < 0x12)
4376		return false;
4377
4378	if (drm_dp_dpcd_readb(&intel_dp->aux, DP_MSTM_CAP, &mstm_cap) != 1)
4379		return false;
4380
4381	return mstm_cap & DP_MST_CAP;
4382}
4383
4384static bool
4385intel_dp_can_mst(struct intel_dp *intel_dp)
4386{
4387	return i915_modparams.enable_dp_mst &&
4388		intel_dp->can_mst &&
4389		intel_dp_sink_can_mst(intel_dp);
4390}
4391
4392static void
4393intel_dp_configure_mst(struct intel_dp *intel_dp)
4394{
 
4395	struct intel_encoder *encoder =
4396		&dp_to_dig_port(intel_dp)->base;
4397	bool sink_can_mst = intel_dp_sink_can_mst(intel_dp);
4398
4399	DRM_DEBUG_KMS("MST support? port %c: %s, sink: %s, modparam: %s\n",
4400		      port_name(encoder->port), yesno(intel_dp->can_mst),
4401		      yesno(sink_can_mst), yesno(i915_modparams.enable_dp_mst));
 
 
4402
4403	if (!intel_dp->can_mst)
4404		return;
4405
4406	intel_dp->is_mst = sink_can_mst &&
4407		i915_modparams.enable_dp_mst;
4408
4409	drm_dp_mst_topology_mgr_set_mst(&intel_dp->mst_mgr,
4410					intel_dp->is_mst);
4411}
4412
4413static bool
4414intel_dp_get_sink_irq_esi(struct intel_dp *intel_dp, u8 *sink_irq_vector)
4415{
4416	return drm_dp_dpcd_read(&intel_dp->aux, DP_SINK_COUNT_ESI,
4417				sink_irq_vector, DP_DPRX_ESI_LEN) ==
4418		DP_DPRX_ESI_LEN;
4419}
4420
4421static void
4422intel_pixel_encoding_setup_vsc(struct intel_dp *intel_dp,
4423			       const struct intel_crtc_state *crtc_state)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4424{
4425	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
4426	struct dp_sdp vsc_sdp = {};
4427
4428	/* Prepare VSC Header for SU as per DP 1.4a spec, Table 2-119 */
4429	vsc_sdp.sdp_header.HB0 = 0;
4430	vsc_sdp.sdp_header.HB1 = 0x7;
4431
4432	/*
4433	 * VSC SDP supporting 3D stereo, PSR2, and Pixel Encoding/
4434	 * Colorimetry Format indication.
4435	 */
4436	vsc_sdp.sdp_header.HB2 = 0x5;
4437
4438	/*
4439	 * VSC SDP supporting 3D stereo, + PSR2, + Pixel Encoding/
4440	 * Colorimetry Format indication (HB2 = 05h).
4441	 */
4442	vsc_sdp.sdp_header.HB3 = 0x13;
 
 
 
4443
4444	/*
4445	 * YCbCr 420 = 3h DB16[7:4] ITU-R BT.601 = 0h, ITU-R BT.709 = 1h
4446	 * DB16[3:0] DP 1.4a spec, Table 2-120
4447	 */
4448	vsc_sdp.db[16] = 0x3 << 4; /* 0x3 << 4 , YCbCr 420*/
4449	/* RGB->YCBCR color conversion uses the BT.709 color space. */
4450	vsc_sdp.db[16] |= 0x1; /* 0x1, ITU-R BT.709 */
4451
4452	/*
4453	 * For pixel encoding formats YCbCr444, YCbCr422, YCbCr420, and Y Only,
4454	 * the following Component Bit Depth values are defined:
4455	 * 001b = 8bpc.
4456	 * 010b = 10bpc.
4457	 * 011b = 12bpc.
4458	 * 100b = 16bpc.
4459	 */
4460	switch (crtc_state->pipe_bpp) {
4461	case 24: /* 8bpc */
4462		vsc_sdp.db[17] = 0x1;
4463		break;
4464	case 30: /* 10bpc */
4465		vsc_sdp.db[17] = 0x2;
4466		break;
4467	case 36: /* 12bpc */
4468		vsc_sdp.db[17] = 0x3;
4469		break;
4470	case 48: /* 16bpc */
4471		vsc_sdp.db[17] = 0x4;
4472		break;
4473	default:
4474		MISSING_CASE(crtc_state->pipe_bpp);
4475		break;
4476	}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4477
4478	/*
4479	 * Dynamic Range (Bit 7)
4480	 * 0 = VESA range, 1 = CTA range.
4481	 * all YCbCr are always limited range
 
 
 
 
 
 
 
4482	 */
4483	vsc_sdp.db[17] |= 0x80;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4484
4485	/*
4486	 * Content Type (Bits 2:0)
4487	 * 000b = Not defined.
4488	 * 001b = Graphics.
4489	 * 010b = Photo.
4490	 * 011b = Video.
4491	 * 100b = Game
4492	 * All other values are RESERVED.
4493	 * Note: See CTA-861-G for the definition and expected
4494	 * processing by a stream sink for the above contect types.
4495	 */
4496	vsc_sdp.db[18] = 0;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4497
4498	intel_dig_port->write_infoframe(&intel_dig_port->base,
4499			crtc_state, DP_SDP_VSC, &vsc_sdp, sizeof(vsc_sdp));
 
4500}
4501
4502void intel_dp_ycbcr_420_enable(struct intel_dp *intel_dp,
4503			       const struct intel_crtc_state *crtc_state)
 
4504{
4505	if (crtc_state->output_format != INTEL_OUTPUT_FORMAT_YCBCR420)
4506		return;
4507
4508	intel_pixel_encoding_setup_vsc(intel_dp, crtc_state);
 
 
 
 
 
 
 
 
 
 
 
 
4509}
4510
4511static u8 intel_dp_autotest_link_training(struct intel_dp *intel_dp)
4512{
 
4513	int status = 0;
4514	int test_link_rate;
4515	u8 test_lane_count, test_link_bw;
4516	/* (DP CTS 1.2)
4517	 * 4.3.1.11
4518	 */
4519	/* Read the TEST_LANE_COUNT and TEST_LINK_RTAE fields (DP CTS 3.1.4) */
4520	status = drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_LANE_COUNT,
4521				   &test_lane_count);
4522
4523	if (status <= 0) {
4524		DRM_DEBUG_KMS("Lane count read failed\n");
4525		return DP_TEST_NAK;
4526	}
4527	test_lane_count &= DP_MAX_LANE_COUNT_MASK;
4528
4529	status = drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_LINK_RATE,
4530				   &test_link_bw);
4531	if (status <= 0) {
4532		DRM_DEBUG_KMS("Link Rate read failed\n");
4533		return DP_TEST_NAK;
4534	}
4535	test_link_rate = drm_dp_bw_code_to_link_rate(test_link_bw);
4536
4537	/* Validate the requested link rate and lane count */
4538	if (!intel_dp_link_params_valid(intel_dp, test_link_rate,
4539					test_lane_count))
4540		return DP_TEST_NAK;
4541
4542	intel_dp->compliance.test_lane_count = test_lane_count;
4543	intel_dp->compliance.test_link_rate = test_link_rate;
4544
4545	return DP_TEST_ACK;
4546}
4547
4548static u8 intel_dp_autotest_video_pattern(struct intel_dp *intel_dp)
4549{
 
4550	u8 test_pattern;
4551	u8 test_misc;
4552	__be16 h_width, v_height;
4553	int status = 0;
4554
4555	/* Read the TEST_PATTERN (DP CTS 3.1.5) */
4556	status = drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_PATTERN,
4557				   &test_pattern);
4558	if (status <= 0) {
4559		DRM_DEBUG_KMS("Test pattern read failed\n");
4560		return DP_TEST_NAK;
4561	}
4562	if (test_pattern != DP_COLOR_RAMP)
4563		return DP_TEST_NAK;
4564
4565	status = drm_dp_dpcd_read(&intel_dp->aux, DP_TEST_H_WIDTH_HI,
4566				  &h_width, 2);
4567	if (status <= 0) {
4568		DRM_DEBUG_KMS("H Width read failed\n");
4569		return DP_TEST_NAK;
4570	}
4571
4572	status = drm_dp_dpcd_read(&intel_dp->aux, DP_TEST_V_HEIGHT_HI,
4573				  &v_height, 2);
4574	if (status <= 0) {
4575		DRM_DEBUG_KMS("V Height read failed\n");
4576		return DP_TEST_NAK;
4577	}
4578
4579	status = drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_MISC0,
4580				   &test_misc);
4581	if (status <= 0) {
4582		DRM_DEBUG_KMS("TEST MISC read failed\n");
4583		return DP_TEST_NAK;
4584	}
4585	if ((test_misc & DP_TEST_COLOR_FORMAT_MASK) != DP_COLOR_FORMAT_RGB)
4586		return DP_TEST_NAK;
4587	if (test_misc & DP_TEST_DYNAMIC_RANGE_CEA)
4588		return DP_TEST_NAK;
4589	switch (test_misc & DP_TEST_BIT_DEPTH_MASK) {
4590	case DP_TEST_BIT_DEPTH_6:
4591		intel_dp->compliance.test_data.bpc = 6;
4592		break;
4593	case DP_TEST_BIT_DEPTH_8:
4594		intel_dp->compliance.test_data.bpc = 8;
4595		break;
4596	default:
4597		return DP_TEST_NAK;
4598	}
4599
4600	intel_dp->compliance.test_data.video_pattern = test_pattern;
4601	intel_dp->compliance.test_data.hdisplay = be16_to_cpu(h_width);
4602	intel_dp->compliance.test_data.vdisplay = be16_to_cpu(v_height);
4603	/* Set test active flag here so userspace doesn't interrupt things */
4604	intel_dp->compliance.test_active = 1;
4605
4606	return DP_TEST_ACK;
4607}
4608
4609static u8 intel_dp_autotest_edid(struct intel_dp *intel_dp)
4610{
 
4611	u8 test_result = DP_TEST_ACK;
4612	struct intel_connector *intel_connector = intel_dp->attached_connector;
4613	struct drm_connector *connector = &intel_connector->base;
4614
4615	if (intel_connector->detect_edid == NULL ||
4616	    connector->edid_corrupt ||
4617	    intel_dp->aux.i2c_defer_count > 6) {
4618		/* Check EDID read for NACKs, DEFERs and corruption
4619		 * (DP CTS 1.2 Core r1.1)
4620		 *    4.2.2.4 : Failed EDID read, I2C_NAK
4621		 *    4.2.2.5 : Failed EDID read, I2C_DEFER
4622		 *    4.2.2.6 : EDID corruption detected
4623		 * Use failsafe mode for all cases
4624		 */
4625		if (intel_dp->aux.i2c_nack_count > 0 ||
4626			intel_dp->aux.i2c_defer_count > 0)
4627			DRM_DEBUG_KMS("EDID read had %d NACKs, %d DEFERs\n",
4628				      intel_dp->aux.i2c_nack_count,
4629				      intel_dp->aux.i2c_defer_count);
 
4630		intel_dp->compliance.test_data.edid = INTEL_DP_RESOLUTION_FAILSAFE;
4631	} else {
4632		struct edid *block = intel_connector->detect_edid;
4633
4634		/* We have to write the checksum
4635		 * of the last block read
4636		 */
4637		block += intel_connector->detect_edid->extensions;
4638
4639		if (drm_dp_dpcd_writeb(&intel_dp->aux, DP_TEST_EDID_CHECKSUM,
4640				       block->checksum) <= 0)
4641			DRM_DEBUG_KMS("Failed to write EDID checksum\n");
 
4642
4643		test_result = DP_TEST_ACK | DP_TEST_EDID_CHECKSUM_WRITE;
4644		intel_dp->compliance.test_data.edid = INTEL_DP_RESOLUTION_PREFERRED;
4645	}
4646
4647	/* Set test active flag here so userspace doesn't interrupt things */
4648	intel_dp->compliance.test_active = 1;
4649
4650	return test_result;
4651}
4652
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4653static u8 intel_dp_autotest_phy_pattern(struct intel_dp *intel_dp)
4654{
4655	u8 test_result = DP_TEST_NAK;
4656	return test_result;
 
 
 
 
 
 
 
 
 
 
4657}
4658
4659static void intel_dp_handle_test_request(struct intel_dp *intel_dp)
4660{
 
4661	u8 response = DP_TEST_NAK;
4662	u8 request = 0;
4663	int status;
4664
4665	status = drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_REQUEST, &request);
4666	if (status <= 0) {
4667		DRM_DEBUG_KMS("Could not read test request from sink\n");
 
4668		goto update_status;
4669	}
4670
4671	switch (request) {
4672	case DP_TEST_LINK_TRAINING:
4673		DRM_DEBUG_KMS("LINK_TRAINING test requested\n");
4674		response = intel_dp_autotest_link_training(intel_dp);
4675		break;
4676	case DP_TEST_LINK_VIDEO_PATTERN:
4677		DRM_DEBUG_KMS("TEST_PATTERN test requested\n");
4678		response = intel_dp_autotest_video_pattern(intel_dp);
4679		break;
4680	case DP_TEST_LINK_EDID_READ:
4681		DRM_DEBUG_KMS("EDID test requested\n");
4682		response = intel_dp_autotest_edid(intel_dp);
4683		break;
4684	case DP_TEST_LINK_PHY_TEST_PATTERN:
4685		DRM_DEBUG_KMS("PHY_PATTERN test requested\n");
4686		response = intel_dp_autotest_phy_pattern(intel_dp);
4687		break;
4688	default:
4689		DRM_DEBUG_KMS("Invalid test request '%02x'\n", request);
 
4690		break;
4691	}
4692
4693	if (response & DP_TEST_ACK)
4694		intel_dp->compliance.test_type = request;
4695
4696update_status:
4697	status = drm_dp_dpcd_writeb(&intel_dp->aux, DP_TEST_RESPONSE, response);
4698	if (status <= 0)
4699		DRM_DEBUG_KMS("Could not write test response to sink\n");
 
4700}
4701
4702static int
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4703intel_dp_check_mst_status(struct intel_dp *intel_dp)
4704{
4705	bool bret;
 
 
 
4706
4707	if (intel_dp->is_mst) {
4708		u8 esi[DP_DPRX_ESI_LEN] = { 0 };
4709		int ret = 0;
 
 
 
 
 
 
 
 
 
 
 
4710		int retry;
4711		bool handled;
4712
4713		WARN_ON_ONCE(intel_dp->active_mst_links < 0);
4714		bret = intel_dp_get_sink_irq_esi(intel_dp, esi);
4715go_again:
4716		if (bret == true) {
4717
4718			/* check link status - esi[10] = 0x200c */
4719			if (intel_dp->active_mst_links > 0 &&
4720			    !drm_dp_channel_eq_ok(&esi[10], intel_dp->lane_count)) {
4721				DRM_DEBUG_KMS("channel EQ not ok, retraining\n");
4722				intel_dp_start_link_train(intel_dp);
4723				intel_dp_stop_link_train(intel_dp);
4724			}
 
 
 
 
 
4725
4726			DRM_DEBUG_KMS("got esi %3ph\n", esi);
4727			ret = drm_dp_mst_hpd_irq(&intel_dp->mst_mgr, esi, &handled);
4728
4729			if (handled) {
4730				for (retry = 0; retry < 3; retry++) {
4731					int wret;
4732					wret = drm_dp_dpcd_write(&intel_dp->aux,
4733								 DP_SINK_COUNT_ESI+1,
4734								 &esi[1], 3);
4735					if (wret == 3) {
4736						break;
4737					}
4738				}
4739
4740				bret = intel_dp_get_sink_irq_esi(intel_dp, esi);
4741				if (bret == true) {
4742					DRM_DEBUG_KMS("got esi2 %3ph\n", esi);
4743					goto go_again;
4744				}
4745			} else
4746				ret = 0;
4747
4748			return ret;
4749		} else {
4750			DRM_DEBUG_KMS("failed to get ESI - device may have failed\n");
4751			intel_dp->is_mst = false;
4752			drm_dp_mst_topology_mgr_set_mst(&intel_dp->mst_mgr,
4753							intel_dp->is_mst);
4754		}
4755	}
4756	return -EINVAL;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4757}
4758
4759static bool
4760intel_dp_needs_link_retrain(struct intel_dp *intel_dp)
4761{
4762	u8 link_status[DP_LINK_STATUS_SIZE];
4763
4764	if (!intel_dp->link_trained)
4765		return false;
4766
4767	/*
4768	 * While PSR source HW is enabled, it will control main-link sending
4769	 * frames, enabling and disabling it so trying to do a retrain will fail
4770	 * as the link would or not be on or it could mix training patterns
4771	 * and frame data at the same time causing retrain to fail.
4772	 * Also when exiting PSR, HW will retrain the link anyways fixing
4773	 * any link status error.
4774	 */
4775	if (intel_psr_enabled(intel_dp))
4776		return false;
4777
4778	if (!intel_dp_get_link_status(intel_dp, link_status))
 
4779		return false;
4780
4781	/*
4782	 * Validate the cached values of intel_dp->link_rate and
4783	 * intel_dp->lane_count before attempting to retrain.
 
 
 
 
4784	 */
4785	if (!intel_dp_link_params_valid(intel_dp, intel_dp->link_rate,
4786					intel_dp->lane_count))
4787		return false;
4788
4789	/* Retrain if Channel EQ or CR not ok */
4790	return !drm_dp_channel_eq_ok(link_status, intel_dp->lane_count);
4791}
4792
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4793int intel_dp_retrain_link(struct intel_encoder *encoder,
4794			  struct drm_modeset_acquire_ctx *ctx)
4795{
4796	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
4797	struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
4798	struct intel_connector *connector = intel_dp->attached_connector;
4799	struct drm_connector_state *conn_state;
4800	struct intel_crtc_state *crtc_state;
4801	struct intel_crtc *crtc;
 
4802	int ret;
4803
4804	/* FIXME handle the MST connectors as well */
4805
4806	if (!connector || connector->base.status != connector_status_connected)
4807		return 0;
4808
4809	ret = drm_modeset_lock(&dev_priv->drm.mode_config.connection_mutex,
4810			       ctx);
4811	if (ret)
4812		return ret;
4813
4814	conn_state = connector->base.state;
 
 
4815
4816	crtc = to_intel_crtc(conn_state->crtc);
4817	if (!crtc)
4818		return 0;
4819
4820	ret = drm_modeset_lock(&crtc->base.mutex, ctx);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4821	if (ret)
4822		return ret;
4823
4824	crtc_state = to_intel_crtc_state(crtc->base.state);
4825
4826	WARN_ON(!intel_crtc_has_dp_encoder(crtc_state));
4827
4828	if (!crtc_state->base.active)
4829		return 0;
4830
4831	if (conn_state->commit &&
4832	    !try_wait_for_completion(&conn_state->commit->hw_done))
4833		return 0;
4834
4835	if (!intel_dp_needs_link_retrain(intel_dp))
4836		return 0;
 
 
 
 
 
 
 
4837
4838	/* Suppress underruns caused by re-training */
4839	intel_set_cpu_fifo_underrun_reporting(dev_priv, crtc->pipe, false);
4840	if (crtc_state->has_pch_encoder)
4841		intel_set_pch_fifo_underrun_reporting(dev_priv,
4842						      intel_crtc_pch_transcoder(crtc), false);
4843
4844	intel_dp_start_link_train(intel_dp);
4845	intel_dp_stop_link_train(intel_dp);
4846
4847	/* Keep underrun reporting disabled until things are stable */
4848	intel_wait_for_vblank(dev_priv, crtc->pipe);
4849
4850	intel_set_cpu_fifo_underrun_reporting(dev_priv, crtc->pipe, true);
4851	if (crtc_state->has_pch_encoder)
4852		intel_set_pch_fifo_underrun_reporting(dev_priv,
4853						      intel_crtc_pch_transcoder(crtc), true);
4854
4855	return 0;
4856}
4857
4858/*
4859 * If display is now connected check links status,
4860 * there has been known issues of link loss triggering
4861 * long pulse.
4862 *
4863 * Some sinks (eg. ASUS PB287Q) seem to perform some
4864 * weird HPD ping pong during modesets. So we can apparently
4865 * end up with HPD going low during a modeset, and then
4866 * going back up soon after. And once that happens we must
4867 * retrain the link to get a picture. That's in case no
4868 * userspace component reacted to intermittent HPD dip.
4869 */
4870static enum intel_hotplug_state
4871intel_dp_hotplug(struct intel_encoder *encoder,
4872		 struct intel_connector *connector,
4873		 bool irq_received)
4874{
4875	struct drm_modeset_acquire_ctx ctx;
4876	enum intel_hotplug_state state;
4877	int ret;
4878
4879	state = intel_encoder_hotplug(encoder, connector, irq_received);
4880
4881	drm_modeset_acquire_init(&ctx, 0);
4882
4883	for (;;) {
4884		ret = intel_dp_retrain_link(encoder, &ctx);
4885
4886		if (ret == -EDEADLK) {
4887			drm_modeset_backoff(&ctx);
4888			continue;
4889		}
4890
4891		break;
4892	}
4893
4894	drm_modeset_drop_locks(&ctx);
4895	drm_modeset_acquire_fini(&ctx);
4896	WARN(ret, "Acquiring modeset locks failed with %i\n", ret);
4897
4898	/*
4899	 * Keeping it consistent with intel_ddi_hotplug() and
4900	 * intel_hdmi_hotplug().
4901	 */
4902	if (state == INTEL_HOTPLUG_UNCHANGED && irq_received)
4903		state = INTEL_HOTPLUG_RETRY;
4904
4905	return state;
4906}
4907
4908static void intel_dp_check_service_irq(struct intel_dp *intel_dp)
4909{
 
4910	u8 val;
4911
4912	if (intel_dp->dpcd[DP_DPCD_REV] < 0x11)
4913		return;
4914
4915	if (drm_dp_dpcd_readb(&intel_dp->aux,
4916			      DP_DEVICE_SERVICE_IRQ_VECTOR, &val) != 1 || !val)
4917		return;
4918
4919	drm_dp_dpcd_writeb(&intel_dp->aux, DP_DEVICE_SERVICE_IRQ_VECTOR, val);
4920
4921	if (val & DP_AUTOMATED_TEST_REQUEST)
4922		intel_dp_handle_test_request(intel_dp);
4923
4924	if (val & DP_CP_IRQ)
4925		intel_hdcp_handle_cp_irq(intel_dp->attached_connector);
4926
4927	if (val & DP_SINK_SPECIFIC_IRQ)
4928		DRM_DEBUG_DRIVER("Sink specific irq unhandled\n");
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4929}
4930
4931/*
4932 * According to DP spec
4933 * 5.1.2:
4934 *  1. Read DPCD
4935 *  2. Configure link according to Receiver Capabilities
4936 *  3. Use Link Training from 2.5.3.3 and 3.5.1.3
4937 *  4. Check link status on receipt of hot-plug interrupt
4938 *
4939 * intel_dp_short_pulse -  handles short pulse interrupts
4940 * when full detection is not required.
4941 * Returns %true if short pulse is handled and full detection
4942 * is NOT required and %false otherwise.
4943 */
4944static bool
4945intel_dp_short_pulse(struct intel_dp *intel_dp)
4946{
4947	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
4948	u8 old_sink_count = intel_dp->sink_count;
4949	bool ret;
4950
4951	/*
4952	 * Clearing compliance test variables to allow capturing
4953	 * of values for next automated test request.
4954	 */
4955	memset(&intel_dp->compliance, 0, sizeof(intel_dp->compliance));
4956
4957	/*
4958	 * Now read the DPCD to see if it's actually running
4959	 * If the current value of sink count doesn't match with
4960	 * the value that was stored earlier or dpcd read failed
4961	 * we need to do full detection
4962	 */
4963	ret = intel_dp_get_dpcd(intel_dp);
4964
4965	if ((old_sink_count != intel_dp->sink_count) || !ret) {
4966		/* No need to proceed if we are going to do full detect */
4967		return false;
4968	}
4969
4970	intel_dp_check_service_irq(intel_dp);
 
4971
4972	/* Handle CEC interrupts, if any */
4973	drm_dp_cec_irq(&intel_dp->aux);
4974
4975	/* defer to the hotplug work for link retraining if needed */
4976	if (intel_dp_needs_link_retrain(intel_dp))
4977		return false;
4978
4979	intel_psr_short_pulse(intel_dp);
4980
4981	if (intel_dp->compliance.test_type == DP_TEST_LINK_TRAINING) {
4982		DRM_DEBUG_KMS("Link Training Compliance Test requested\n");
 
 
4983		/* Send a Hotplug Uevent to userspace to start modeset */
4984		drm_kms_helper_hotplug_event(&dev_priv->drm);
 
 
 
 
 
 
 
 
 
 
 
4985	}
4986
4987	return true;
4988}
4989
4990/* XXX this is probably wrong for multiple downstream ports */
4991static enum drm_connector_status
4992intel_dp_detect_dpcd(struct intel_dp *intel_dp)
4993{
4994	struct intel_lspcon *lspcon = dp_to_lspcon(intel_dp);
 
4995	u8 *dpcd = intel_dp->dpcd;
4996	u8 type;
4997
4998	if (WARN_ON(intel_dp_is_edp(intel_dp)))
4999		return connector_status_connected;
5000
5001	if (lspcon->active)
5002		lspcon_resume(lspcon);
5003
5004	if (!intel_dp_get_dpcd(intel_dp))
5005		return connector_status_disconnected;
5006
5007	/* if there's no downstream port, we're done */
5008	if (!drm_dp_is_branch(dpcd))
5009		return connector_status_connected;
5010
5011	/* If we're HPD-aware, SINK_COUNT changes dynamically */
5012	if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11 &&
5013	    intel_dp->downstream_ports[0] & DP_DS_PORT_HPD) {
5014
5015		return intel_dp->sink_count ?
5016		connector_status_connected : connector_status_disconnected;
5017	}
5018
5019	if (intel_dp_can_mst(intel_dp))
5020		return connector_status_connected;
5021
5022	/* If no HPD, poke DDC gently */
5023	if (drm_probe_ddc(&intel_dp->aux.ddc))
5024		return connector_status_connected;
5025
5026	/* Well we tried, say unknown for unreliable port types */
5027	if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11) {
5028		type = intel_dp->downstream_ports[0] & DP_DS_PORT_TYPE_MASK;
5029		if (type == DP_DS_PORT_TYPE_VGA ||
5030		    type == DP_DS_PORT_TYPE_NON_EDID)
5031			return connector_status_unknown;
5032	} else {
5033		type = intel_dp->dpcd[DP_DOWNSTREAMPORT_PRESENT] &
5034			DP_DWN_STRM_PORT_TYPE_MASK;
5035		if (type == DP_DWN_STRM_PORT_TYPE_ANALOG ||
5036		    type == DP_DWN_STRM_PORT_TYPE_OTHER)
5037			return connector_status_unknown;
5038	}
5039
5040	/* Anything else is out of spec, warn and ignore */
5041	DRM_DEBUG_KMS("Broken DP branch device, ignoring\n");
5042	return connector_status_disconnected;
5043}
5044
5045static enum drm_connector_status
5046edp_detect(struct intel_dp *intel_dp)
5047{
5048	return connector_status_connected;
5049}
5050
5051static bool ibx_digital_port_connected(struct intel_encoder *encoder)
5052{
5053	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
5054	u32 bit;
5055
5056	switch (encoder->hpd_pin) {
5057	case HPD_PORT_B:
5058		bit = SDE_PORTB_HOTPLUG;
5059		break;
5060	case HPD_PORT_C:
5061		bit = SDE_PORTC_HOTPLUG;
5062		break;
5063	case HPD_PORT_D:
5064		bit = SDE_PORTD_HOTPLUG;
5065		break;
5066	default:
5067		MISSING_CASE(encoder->hpd_pin);
5068		return false;
5069	}
5070
5071	return I915_READ(SDEISR) & bit;
5072}
5073
5074static bool cpt_digital_port_connected(struct intel_encoder *encoder)
5075{
5076	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
5077	u32 bit;
5078
5079	switch (encoder->hpd_pin) {
5080	case HPD_PORT_B:
5081		bit = SDE_PORTB_HOTPLUG_CPT;
5082		break;
5083	case HPD_PORT_C:
5084		bit = SDE_PORTC_HOTPLUG_CPT;
5085		break;
5086	case HPD_PORT_D:
5087		bit = SDE_PORTD_HOTPLUG_CPT;
5088		break;
5089	default:
5090		MISSING_CASE(encoder->hpd_pin);
5091		return false;
5092	}
5093
5094	return I915_READ(SDEISR) & bit;
5095}
5096
5097static bool spt_digital_port_connected(struct intel_encoder *encoder)
5098{
5099	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
5100	u32 bit;
5101
5102	switch (encoder->hpd_pin) {
5103	case HPD_PORT_A:
5104		bit = SDE_PORTA_HOTPLUG_SPT;
5105		break;
5106	case HPD_PORT_E:
5107		bit = SDE_PORTE_HOTPLUG_SPT;
5108		break;
5109	default:
5110		return cpt_digital_port_connected(encoder);
5111	}
5112
5113	return I915_READ(SDEISR) & bit;
5114}
5115
5116static bool g4x_digital_port_connected(struct intel_encoder *encoder)
5117{
5118	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
5119	u32 bit;
5120
5121	switch (encoder->hpd_pin) {
5122	case HPD_PORT_B:
5123		bit = PORTB_HOTPLUG_LIVE_STATUS_G4X;
5124		break;
5125	case HPD_PORT_C:
5126		bit = PORTC_HOTPLUG_LIVE_STATUS_G4X;
5127		break;
5128	case HPD_PORT_D:
5129		bit = PORTD_HOTPLUG_LIVE_STATUS_G4X;
5130		break;
5131	default:
5132		MISSING_CASE(encoder->hpd_pin);
5133		return false;
5134	}
5135
5136	return I915_READ(PORT_HOTPLUG_STAT) & bit;
5137}
5138
5139static bool gm45_digital_port_connected(struct intel_encoder *encoder)
5140{
5141	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
5142	u32 bit;
5143
5144	switch (encoder->hpd_pin) {
5145	case HPD_PORT_B:
5146		bit = PORTB_HOTPLUG_LIVE_STATUS_GM45;
5147		break;
5148	case HPD_PORT_C:
5149		bit = PORTC_HOTPLUG_LIVE_STATUS_GM45;
5150		break;
5151	case HPD_PORT_D:
5152		bit = PORTD_HOTPLUG_LIVE_STATUS_GM45;
5153		break;
5154	default:
5155		MISSING_CASE(encoder->hpd_pin);
5156		return false;
5157	}
5158
5159	return I915_READ(PORT_HOTPLUG_STAT) & bit;
5160}
5161
5162static bool ilk_digital_port_connected(struct intel_encoder *encoder)
5163{
5164	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
5165
5166	if (encoder->hpd_pin == HPD_PORT_A)
5167		return I915_READ(DEISR) & DE_DP_A_HOTPLUG;
5168	else
5169		return ibx_digital_port_connected(encoder);
5170}
5171
5172static bool snb_digital_port_connected(struct intel_encoder *encoder)
5173{
5174	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
5175
5176	if (encoder->hpd_pin == HPD_PORT_A)
5177		return I915_READ(DEISR) & DE_DP_A_HOTPLUG;
5178	else
5179		return cpt_digital_port_connected(encoder);
5180}
5181
5182static bool ivb_digital_port_connected(struct intel_encoder *encoder)
5183{
5184	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
5185
5186	if (encoder->hpd_pin == HPD_PORT_A)
5187		return I915_READ(DEISR) & DE_DP_A_HOTPLUG_IVB;
5188	else
5189		return cpt_digital_port_connected(encoder);
5190}
5191
5192static bool bdw_digital_port_connected(struct intel_encoder *encoder)
5193{
5194	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
5195
5196	if (encoder->hpd_pin == HPD_PORT_A)
5197		return I915_READ(GEN8_DE_PORT_ISR) & GEN8_PORT_DP_A_HOTPLUG;
5198	else
5199		return cpt_digital_port_connected(encoder);
5200}
5201
5202static bool bxt_digital_port_connected(struct intel_encoder *encoder)
5203{
5204	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
5205	u32 bit;
5206
5207	switch (encoder->hpd_pin) {
5208	case HPD_PORT_A:
5209		bit = BXT_DE_PORT_HP_DDIA;
5210		break;
5211	case HPD_PORT_B:
5212		bit = BXT_DE_PORT_HP_DDIB;
5213		break;
5214	case HPD_PORT_C:
5215		bit = BXT_DE_PORT_HP_DDIC;
5216		break;
5217	default:
5218		MISSING_CASE(encoder->hpd_pin);
5219		return false;
5220	}
5221
5222	return I915_READ(GEN8_DE_PORT_ISR) & bit;
5223}
5224
5225static bool icl_combo_port_connected(struct drm_i915_private *dev_priv,
5226				     struct intel_digital_port *intel_dig_port)
5227{
5228	enum port port = intel_dig_port->base.port;
5229
5230	return I915_READ(SDEISR) & SDE_DDI_HOTPLUG_ICP(port);
5231}
5232
5233static bool icl_digital_port_connected(struct intel_encoder *encoder)
5234{
5235	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
5236	struct intel_digital_port *dig_port = enc_to_dig_port(&encoder->base);
5237	enum phy phy = intel_port_to_phy(dev_priv, encoder->port);
5238
5239	if (intel_phy_is_combo(dev_priv, phy))
5240		return icl_combo_port_connected(dev_priv, dig_port);
5241	else if (intel_phy_is_tc(dev_priv, phy))
5242		return intel_tc_port_connected(dig_port);
5243	else
5244		MISSING_CASE(encoder->hpd_pin);
5245
5246	return false;
5247}
5248
5249/*
5250 * intel_digital_port_connected - is the specified port connected?
5251 * @encoder: intel_encoder
5252 *
5253 * In cases where there's a connector physically connected but it can't be used
5254 * by our hardware we also return false, since the rest of the driver should
5255 * pretty much treat the port as disconnected. This is relevant for type-C
5256 * (starting on ICL) where there's ownership involved.
5257 *
5258 * Return %true if port is connected, %false otherwise.
5259 */
5260static bool __intel_digital_port_connected(struct intel_encoder *encoder)
5261{
5262	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
5263
5264	if (HAS_GMCH(dev_priv)) {
5265		if (IS_GM45(dev_priv))
5266			return gm45_digital_port_connected(encoder);
5267		else
5268			return g4x_digital_port_connected(encoder);
5269	}
5270
5271	if (INTEL_GEN(dev_priv) >= 11)
5272		return icl_digital_port_connected(encoder);
5273	else if (IS_GEN(dev_priv, 10) || IS_GEN9_BC(dev_priv))
5274		return spt_digital_port_connected(encoder);
5275	else if (IS_GEN9_LP(dev_priv))
5276		return bxt_digital_port_connected(encoder);
5277	else if (IS_GEN(dev_priv, 8))
5278		return bdw_digital_port_connected(encoder);
5279	else if (IS_GEN(dev_priv, 7))
5280		return ivb_digital_port_connected(encoder);
5281	else if (IS_GEN(dev_priv, 6))
5282		return snb_digital_port_connected(encoder);
5283	else if (IS_GEN(dev_priv, 5))
5284		return ilk_digital_port_connected(encoder);
5285
5286	MISSING_CASE(INTEL_GEN(dev_priv));
5287	return false;
5288}
5289
5290bool intel_digital_port_connected(struct intel_encoder *encoder)
5291{
5292	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
 
5293	bool is_connected = false;
5294	intel_wakeref_t wakeref;
5295
5296	with_intel_display_power(dev_priv, POWER_DOMAIN_DISPLAY_CORE, wakeref)
5297		is_connected = __intel_digital_port_connected(encoder);
5298
5299	return is_connected;
5300}
5301
5302static struct edid *
5303intel_dp_get_edid(struct intel_dp *intel_dp)
5304{
5305	struct intel_connector *intel_connector = intel_dp->attached_connector;
5306
5307	/* use cached edid if we have one */
5308	if (intel_connector->edid) {
5309		/* invalid edid */
5310		if (IS_ERR(intel_connector->edid))
5311			return NULL;
5312
5313		return drm_edid_duplicate(intel_connector->edid);
5314	} else
5315		return drm_get_edid(&intel_connector->base,
5316				    &intel_dp->aux.ddc);
5317}
5318
5319static void
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
5320intel_dp_set_edid(struct intel_dp *intel_dp)
5321{
5322	struct intel_connector *intel_connector = intel_dp->attached_connector;
5323	struct edid *edid;
5324
5325	intel_dp_unset_edid(intel_dp);
5326	edid = intel_dp_get_edid(intel_dp);
5327	intel_connector->detect_edid = edid;
 
 
 
 
 
 
 
 
5328
5329	intel_dp->has_audio = drm_detect_monitor_audio(edid);
5330	drm_dp_cec_set_edid(&intel_dp->aux, edid);
5331}
5332
5333static void
5334intel_dp_unset_edid(struct intel_dp *intel_dp)
5335{
5336	struct intel_connector *intel_connector = intel_dp->attached_connector;
5337
5338	drm_dp_cec_unset_edid(&intel_dp->aux);
5339	kfree(intel_connector->detect_edid);
5340	intel_connector->detect_edid = NULL;
5341
 
5342	intel_dp->has_audio = false;
 
 
 
 
 
 
 
 
 
 
5343}
5344
5345static int
5346intel_dp_detect(struct drm_connector *connector,
5347		struct drm_modeset_acquire_ctx *ctx,
5348		bool force)
5349{
5350	struct drm_i915_private *dev_priv = to_i915(connector->dev);
5351	struct intel_dp *intel_dp = intel_attached_dp(connector);
5352	struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
5353	struct intel_encoder *encoder = &dig_port->base;
5354	enum drm_connector_status status;
5355
5356	DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
5357		      connector->base.id, connector->name);
5358	WARN_ON(!drm_modeset_is_locked(&dev_priv->drm.mode_config.connection_mutex));
 
 
 
 
5359
5360	/* Can't disconnect eDP */
5361	if (intel_dp_is_edp(intel_dp))
5362		status = edp_detect(intel_dp);
5363	else if (intel_digital_port_connected(encoder))
5364		status = intel_dp_detect_dpcd(intel_dp);
5365	else
5366		status = connector_status_disconnected;
5367
5368	if (status == connector_status_disconnected) {
5369		memset(&intel_dp->compliance, 0, sizeof(intel_dp->compliance));
5370		memset(intel_dp->dsc_dpcd, 0, sizeof(intel_dp->dsc_dpcd));
5371
5372		if (intel_dp->is_mst) {
5373			DRM_DEBUG_KMS("MST device may have disappeared %d vs %d\n",
5374				      intel_dp->is_mst,
5375				      intel_dp->mst_mgr.mst_state);
 
5376			intel_dp->is_mst = false;
5377			drm_dp_mst_topology_mgr_set_mst(&intel_dp->mst_mgr,
5378							intel_dp->is_mst);
5379		}
5380
5381		goto out;
5382	}
5383
5384	if (intel_dp->reset_link_params) {
 
 
 
 
 
 
 
 
 
 
5385		/* Initial max link lane count */
5386		intel_dp->max_link_lane_count = intel_dp_max_common_lane_count(intel_dp);
5387
5388		/* Initial max link rate */
5389		intel_dp->max_link_rate = intel_dp_max_common_rate(intel_dp);
5390
5391		intel_dp->reset_link_params = false;
5392	}
5393
5394	intel_dp_print_rates(intel_dp);
5395
5396	/* Read DP Sink DSC Cap DPCD regs for DP v1.4 */
5397	if (INTEL_GEN(dev_priv) >= 11)
5398		intel_dp_get_dsc_sink_cap(intel_dp);
5399
5400	intel_dp_configure_mst(intel_dp);
5401
5402	if (intel_dp->is_mst) {
5403		/*
5404		 * If we are in MST mode then this connector
5405		 * won't appear connected or have anything
5406		 * with EDID on it
5407		 */
5408		status = connector_status_disconnected;
5409		goto out;
5410	}
5411
5412	/*
5413	 * Some external monitors do not signal loss of link synchronization
5414	 * with an IRQ_HPD, so force a link status check.
5415	 */
5416	if (!intel_dp_is_edp(intel_dp)) {
5417		int ret;
5418
5419		ret = intel_dp_retrain_link(encoder, ctx);
5420		if (ret)
5421			return ret;
5422	}
5423
5424	/*
5425	 * Clearing NACK and defer counts to get their exact values
5426	 * while reading EDID which are required by Compliance tests
5427	 * 4.2.2.4 and 4.2.2.5
5428	 */
5429	intel_dp->aux.i2c_nack_count = 0;
5430	intel_dp->aux.i2c_defer_count = 0;
5431
5432	intel_dp_set_edid(intel_dp);
5433	if (intel_dp_is_edp(intel_dp) ||
5434	    to_intel_connector(connector)->detect_edid)
5435		status = connector_status_connected;
5436
5437	intel_dp_check_service_irq(intel_dp);
5438
5439out:
5440	if (status != connector_status_connected && !intel_dp->is_mst)
5441		intel_dp_unset_edid(intel_dp);
5442
5443	/*
5444	 * Make sure the refs for power wells enabled during detect are
5445	 * dropped to avoid a new detect cycle triggered by HPD polling.
5446	 */
5447	intel_display_power_flush_work(dev_priv);
5448
 
 
 
 
 
5449	return status;
5450}
5451
5452static void
5453intel_dp_force(struct drm_connector *connector)
5454{
5455	struct intel_dp *intel_dp = intel_attached_dp(connector);
5456	struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
5457	struct intel_encoder *intel_encoder = &dig_port->base;
5458	struct drm_i915_private *dev_priv = to_i915(intel_encoder->base.dev);
5459	enum intel_display_power_domain aux_domain =
5460		intel_aux_power_domain(dig_port);
5461	intel_wakeref_t wakeref;
5462
5463	DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
5464		      connector->base.id, connector->name);
5465	intel_dp_unset_edid(intel_dp);
5466
5467	if (connector->status != connector_status_connected)
5468		return;
5469
5470	wakeref = intel_display_power_get(dev_priv, aux_domain);
5471
5472	intel_dp_set_edid(intel_dp);
5473
5474	intel_display_power_put(dev_priv, aux_domain, wakeref);
5475}
5476
5477static int intel_dp_get_modes(struct drm_connector *connector)
5478{
5479	struct intel_connector *intel_connector = to_intel_connector(connector);
5480	struct edid *edid;
 
5481
5482	edid = intel_connector->detect_edid;
5483	if (edid) {
5484		int ret = intel_connector_update_modes(connector, edid);
5485		if (ret)
5486			return ret;
 
 
5487	}
5488
5489	/* if eDP has no EDID, fall back to fixed mode */
5490	if (intel_dp_is_edp(intel_attached_dp(connector)) &&
5491	    intel_connector->panel.fixed_mode) {
5492		struct drm_display_mode *mode;
5493
5494		mode = drm_mode_duplicate(connector->dev,
5495					  intel_connector->panel.fixed_mode);
5496		if (mode) {
5497			drm_mode_probed_add(connector, mode);
5498			return 1;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
5499		}
5500	}
5501
5502	return 0;
5503}
5504
5505static int
5506intel_dp_connector_register(struct drm_connector *connector)
5507{
5508	struct intel_dp *intel_dp = intel_attached_dp(connector);
5509	struct drm_device *dev = connector->dev;
 
 
5510	int ret;
5511
5512	ret = intel_connector_register(connector);
5513	if (ret)
5514		return ret;
5515
5516	i915_debugfs_connector_add(connector);
5517
5518	DRM_DEBUG_KMS("registering %s bus for %s\n",
5519		      intel_dp->aux.name, connector->kdev->kobj.name);
5520
5521	intel_dp->aux.dev = connector->kdev;
5522	ret = drm_dp_aux_register(&intel_dp->aux);
5523	if (!ret)
5524		drm_dp_cec_register_connector(&intel_dp->aux,
5525					      connector->name, dev->dev);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
5526	return ret;
5527}
5528
5529static void
5530intel_dp_connector_unregister(struct drm_connector *connector)
5531{
5532	struct intel_dp *intel_dp = intel_attached_dp(connector);
5533
5534	drm_dp_cec_unregister_connector(&intel_dp->aux);
5535	drm_dp_aux_unregister(&intel_dp->aux);
5536	intel_connector_unregister(connector);
5537}
5538
5539void intel_dp_encoder_flush_work(struct drm_encoder *encoder)
5540{
5541	struct intel_digital_port *intel_dig_port = enc_to_dig_port(encoder);
5542	struct intel_dp *intel_dp = &intel_dig_port->dp;
5543
5544	intel_dp_mst_encoder_cleanup(intel_dig_port);
5545	if (intel_dp_is_edp(intel_dp)) {
5546		intel_wakeref_t wakeref;
5547
5548		cancel_delayed_work_sync(&intel_dp->panel_vdd_work);
5549		/*
5550		 * vdd might still be enabled do to the delayed vdd off.
5551		 * Make sure vdd is actually turned off here.
5552		 */
5553		with_pps_lock(intel_dp, wakeref)
5554			edp_panel_vdd_off_sync(intel_dp);
5555
5556		if (intel_dp->edp_notifier.notifier_call) {
5557			unregister_reboot_notifier(&intel_dp->edp_notifier);
5558			intel_dp->edp_notifier.notifier_call = NULL;
5559		}
5560	}
5561
5562	intel_dp_aux_fini(intel_dp);
5563}
5564
5565static void intel_dp_encoder_destroy(struct drm_encoder *encoder)
5566{
5567	intel_dp_encoder_flush_work(encoder);
5568
5569	drm_encoder_cleanup(encoder);
5570	kfree(enc_to_dig_port(encoder));
5571}
5572
5573void intel_dp_encoder_suspend(struct intel_encoder *intel_encoder)
5574{
5575	struct intel_dp *intel_dp = enc_to_intel_dp(&intel_encoder->base);
5576	intel_wakeref_t wakeref;
5577
5578	if (!intel_dp_is_edp(intel_dp))
5579		return;
5580
5581	/*
5582	 * vdd might still be enabled do to the delayed vdd off.
5583	 * Make sure vdd is actually turned off here.
5584	 */
5585	cancel_delayed_work_sync(&intel_dp->panel_vdd_work);
5586	with_pps_lock(intel_dp, wakeref)
5587		edp_panel_vdd_off_sync(intel_dp);
5588}
5589
5590static void intel_dp_hdcp_wait_for_cp_irq(struct intel_hdcp *hdcp, int timeout)
5591{
5592	long ret;
5593
5594#define C (hdcp->cp_irq_count_cached != atomic_read(&hdcp->cp_irq_count))
5595	ret = wait_event_interruptible_timeout(hdcp->cp_irq_queue, C,
5596					       msecs_to_jiffies(timeout));
5597
5598	if (!ret)
5599		DRM_DEBUG_KMS("Timedout at waiting for CP_IRQ\n");
5600}
5601
5602static
5603int intel_dp_hdcp_write_an_aksv(struct intel_digital_port *intel_dig_port,
5604				u8 *an)
5605{
5606	struct intel_dp *intel_dp = enc_to_intel_dp(&intel_dig_port->base.base);
5607	static const struct drm_dp_aux_msg msg = {
5608		.request = DP_AUX_NATIVE_WRITE,
5609		.address = DP_AUX_HDCP_AKSV,
5610		.size = DRM_HDCP_KSV_LEN,
5611	};
5612	u8 txbuf[HEADER_SIZE + DRM_HDCP_KSV_LEN] = {}, rxbuf[2], reply = 0;
5613	ssize_t dpcd_ret;
5614	int ret;
5615
5616	/* Output An first, that's easy */
5617	dpcd_ret = drm_dp_dpcd_write(&intel_dig_port->dp.aux, DP_AUX_HDCP_AN,
5618				     an, DRM_HDCP_AN_LEN);
5619	if (dpcd_ret != DRM_HDCP_AN_LEN) {
5620		DRM_DEBUG_KMS("Failed to write An over DP/AUX (%zd)\n",
5621			      dpcd_ret);
5622		return dpcd_ret >= 0 ? -EIO : dpcd_ret;
5623	}
5624
5625	/*
5626	 * Since Aksv is Oh-So-Secret, we can't access it in software. So in
5627	 * order to get it on the wire, we need to create the AUX header as if
5628	 * we were writing the data, and then tickle the hardware to output the
5629	 * data once the header is sent out.
5630	 */
5631	intel_dp_aux_header(txbuf, &msg);
5632
5633	ret = intel_dp_aux_xfer(intel_dp, txbuf, HEADER_SIZE + msg.size,
5634				rxbuf, sizeof(rxbuf),
5635				DP_AUX_CH_CTL_AUX_AKSV_SELECT);
5636	if (ret < 0) {
5637		DRM_DEBUG_KMS("Write Aksv over DP/AUX failed (%d)\n", ret);
5638		return ret;
5639	} else if (ret == 0) {
5640		DRM_DEBUG_KMS("Aksv write over DP/AUX was empty\n");
5641		return -EIO;
5642	}
5643
5644	reply = (rxbuf[0] >> 4) & DP_AUX_NATIVE_REPLY_MASK;
5645	if (reply != DP_AUX_NATIVE_REPLY_ACK) {
5646		DRM_DEBUG_KMS("Aksv write: no DP_AUX_NATIVE_REPLY_ACK %x\n",
5647			      reply);
5648		return -EIO;
5649	}
5650	return 0;
5651}
5652
5653static int intel_dp_hdcp_read_bksv(struct intel_digital_port *intel_dig_port,
5654				   u8 *bksv)
5655{
5656	ssize_t ret;
5657	ret = drm_dp_dpcd_read(&intel_dig_port->dp.aux, DP_AUX_HDCP_BKSV, bksv,
5658			       DRM_HDCP_KSV_LEN);
5659	if (ret != DRM_HDCP_KSV_LEN) {
5660		DRM_DEBUG_KMS("Read Bksv from DP/AUX failed (%zd)\n", ret);
5661		return ret >= 0 ? -EIO : ret;
5662	}
5663	return 0;
5664}
5665
5666static int intel_dp_hdcp_read_bstatus(struct intel_digital_port *intel_dig_port,
5667				      u8 *bstatus)
5668{
5669	ssize_t ret;
5670	/*
5671	 * For some reason the HDMI and DP HDCP specs call this register
5672	 * definition by different names. In the HDMI spec, it's called BSTATUS,
5673	 * but in DP it's called BINFO.
5674	 */
5675	ret = drm_dp_dpcd_read(&intel_dig_port->dp.aux, DP_AUX_HDCP_BINFO,
5676			       bstatus, DRM_HDCP_BSTATUS_LEN);
5677	if (ret != DRM_HDCP_BSTATUS_LEN) {
5678		DRM_DEBUG_KMS("Read bstatus from DP/AUX failed (%zd)\n", ret);
5679		return ret >= 0 ? -EIO : ret;
5680	}
5681	return 0;
5682}
5683
5684static
5685int intel_dp_hdcp_read_bcaps(struct intel_digital_port *intel_dig_port,
5686			     u8 *bcaps)
5687{
5688	ssize_t ret;
5689
5690	ret = drm_dp_dpcd_read(&intel_dig_port->dp.aux, DP_AUX_HDCP_BCAPS,
5691			       bcaps, 1);
5692	if (ret != 1) {
5693		DRM_DEBUG_KMS("Read bcaps from DP/AUX failed (%zd)\n", ret);
5694		return ret >= 0 ? -EIO : ret;
5695	}
5696
5697	return 0;
5698}
5699
5700static
5701int intel_dp_hdcp_repeater_present(struct intel_digital_port *intel_dig_port,
5702				   bool *repeater_present)
5703{
5704	ssize_t ret;
5705	u8 bcaps;
 
 
5706
5707	ret = intel_dp_hdcp_read_bcaps(intel_dig_port, &bcaps);
5708	if (ret)
5709		return ret;
 
 
5710
5711	*repeater_present = bcaps & DP_BCAPS_REPEATER_PRESENT;
5712	return 0;
5713}
5714
5715static
5716int intel_dp_hdcp_read_ri_prime(struct intel_digital_port *intel_dig_port,
5717				u8 *ri_prime)
5718{
5719	ssize_t ret;
5720	ret = drm_dp_dpcd_read(&intel_dig_port->dp.aux, DP_AUX_HDCP_RI_PRIME,
5721			       ri_prime, DRM_HDCP_RI_LEN);
5722	if (ret != DRM_HDCP_RI_LEN) {
5723		DRM_DEBUG_KMS("Read Ri' from DP/AUX failed (%zd)\n", ret);
5724		return ret >= 0 ? -EIO : ret;
5725	}
5726	return 0;
5727}
5728
5729static
5730int intel_dp_hdcp_read_ksv_ready(struct intel_digital_port *intel_dig_port,
5731				 bool *ksv_ready)
5732{
5733	ssize_t ret;
5734	u8 bstatus;
5735	ret = drm_dp_dpcd_read(&intel_dig_port->dp.aux, DP_AUX_HDCP_BSTATUS,
5736			       &bstatus, 1);
5737	if (ret != 1) {
5738		DRM_DEBUG_KMS("Read bstatus from DP/AUX failed (%zd)\n", ret);
5739		return ret >= 0 ? -EIO : ret;
5740	}
5741	*ksv_ready = bstatus & DP_BSTATUS_READY;
5742	return 0;
5743}
5744
5745static
5746int intel_dp_hdcp_read_ksv_fifo(struct intel_digital_port *intel_dig_port,
5747				int num_downstream, u8 *ksv_fifo)
5748{
5749	ssize_t ret;
5750	int i;
5751
5752	/* KSV list is read via 15 byte window (3 entries @ 5 bytes each) */
5753	for (i = 0; i < num_downstream; i += 3) {
5754		size_t len = min(num_downstream - i, 3) * DRM_HDCP_KSV_LEN;
5755		ret = drm_dp_dpcd_read(&intel_dig_port->dp.aux,
5756				       DP_AUX_HDCP_KSV_FIFO,
5757				       ksv_fifo + i * DRM_HDCP_KSV_LEN,
5758				       len);
5759		if (ret != len) {
5760			DRM_DEBUG_KMS("Read ksv[%d] from DP/AUX failed (%zd)\n",
5761				      i, ret);
5762			return ret >= 0 ? -EIO : ret;
5763		}
5764	}
5765	return 0;
5766}
5767
5768static
5769int intel_dp_hdcp_read_v_prime_part(struct intel_digital_port *intel_dig_port,
5770				    int i, u32 *part)
5771{
5772	ssize_t ret;
5773
5774	if (i >= DRM_HDCP_V_PRIME_NUM_PARTS)
5775		return -EINVAL;
5776
5777	ret = drm_dp_dpcd_read(&intel_dig_port->dp.aux,
5778			       DP_AUX_HDCP_V_PRIME(i), part,
5779			       DRM_HDCP_V_PRIME_PART_LEN);
5780	if (ret != DRM_HDCP_V_PRIME_PART_LEN) {
5781		DRM_DEBUG_KMS("Read v'[%d] from DP/AUX failed (%zd)\n", i, ret);
5782		return ret >= 0 ? -EIO : ret;
5783	}
5784	return 0;
5785}
5786
5787static
5788int intel_dp_hdcp_toggle_signalling(struct intel_digital_port *intel_dig_port,
5789				    bool enable)
5790{
5791	/* Not used for single stream DisplayPort setups */
5792	return 0;
5793}
5794
5795static
5796bool intel_dp_hdcp_check_link(struct intel_digital_port *intel_dig_port)
5797{
5798	ssize_t ret;
5799	u8 bstatus;
5800
5801	ret = drm_dp_dpcd_read(&intel_dig_port->dp.aux, DP_AUX_HDCP_BSTATUS,
5802			       &bstatus, 1);
5803	if (ret != 1) {
5804		DRM_DEBUG_KMS("Read bstatus from DP/AUX failed (%zd)\n", ret);
5805		return false;
5806	}
5807
5808	return !(bstatus & (DP_BSTATUS_LINK_FAILURE | DP_BSTATUS_REAUTH_REQ));
5809}
5810
5811static
5812int intel_dp_hdcp_capable(struct intel_digital_port *intel_dig_port,
5813			  bool *hdcp_capable)
5814{
5815	ssize_t ret;
5816	u8 bcaps;
5817
5818	ret = intel_dp_hdcp_read_bcaps(intel_dig_port, &bcaps);
5819	if (ret)
5820		return ret;
5821
5822	*hdcp_capable = bcaps & DP_BCAPS_HDCP_CAPABLE;
5823	return 0;
5824}
5825
5826struct hdcp2_dp_errata_stream_type {
5827	u8	msg_id;
5828	u8	stream_type;
5829} __packed;
5830
5831struct hdcp2_dp_msg_data {
5832	u8 msg_id;
5833	u32 offset;
5834	bool msg_detectable;
5835	u32 timeout;
5836	u32 timeout2; /* Added for non_paired situation */
5837};
5838
5839static const struct hdcp2_dp_msg_data hdcp2_dp_msg_data[] = {
5840	{ HDCP_2_2_AKE_INIT, DP_HDCP_2_2_AKE_INIT_OFFSET, false, 0, 0 },
5841	{ HDCP_2_2_AKE_SEND_CERT, DP_HDCP_2_2_AKE_SEND_CERT_OFFSET,
5842	  false, HDCP_2_2_CERT_TIMEOUT_MS, 0 },
5843	{ HDCP_2_2_AKE_NO_STORED_KM, DP_HDCP_2_2_AKE_NO_STORED_KM_OFFSET,
5844	  false, 0, 0 },
5845	{ HDCP_2_2_AKE_STORED_KM, DP_HDCP_2_2_AKE_STORED_KM_OFFSET,
5846	  false, 0, 0 },
5847	{ HDCP_2_2_AKE_SEND_HPRIME, DP_HDCP_2_2_AKE_SEND_HPRIME_OFFSET,
5848	  true, HDCP_2_2_HPRIME_PAIRED_TIMEOUT_MS,
5849	  HDCP_2_2_HPRIME_NO_PAIRED_TIMEOUT_MS },
5850	{ HDCP_2_2_AKE_SEND_PAIRING_INFO,
5851	  DP_HDCP_2_2_AKE_SEND_PAIRING_INFO_OFFSET, true,
5852	  HDCP_2_2_PAIRING_TIMEOUT_MS, 0 },
5853	{ HDCP_2_2_LC_INIT, DP_HDCP_2_2_LC_INIT_OFFSET, false, 0, 0 },
5854	{ HDCP_2_2_LC_SEND_LPRIME, DP_HDCP_2_2_LC_SEND_LPRIME_OFFSET,
5855	  false, HDCP_2_2_DP_LPRIME_TIMEOUT_MS, 0 },
5856	{ HDCP_2_2_SKE_SEND_EKS, DP_HDCP_2_2_SKE_SEND_EKS_OFFSET, false,
5857	  0, 0 },
5858	{ HDCP_2_2_REP_SEND_RECVID_LIST,
5859	  DP_HDCP_2_2_REP_SEND_RECVID_LIST_OFFSET, true,
5860	  HDCP_2_2_RECVID_LIST_TIMEOUT_MS, 0 },
5861	{ HDCP_2_2_REP_SEND_ACK, DP_HDCP_2_2_REP_SEND_ACK_OFFSET, false,
5862	  0, 0 },
5863	{ HDCP_2_2_REP_STREAM_MANAGE,
5864	  DP_HDCP_2_2_REP_STREAM_MANAGE_OFFSET, false,
5865	  0, 0 },
5866	{ HDCP_2_2_REP_STREAM_READY, DP_HDCP_2_2_REP_STREAM_READY_OFFSET,
5867	  false, HDCP_2_2_STREAM_READY_TIMEOUT_MS, 0 },
5868/* local define to shovel this through the write_2_2 interface */
5869#define HDCP_2_2_ERRATA_DP_STREAM_TYPE	50
5870	{ HDCP_2_2_ERRATA_DP_STREAM_TYPE,
5871	  DP_HDCP_2_2_REG_STREAM_TYPE_OFFSET, false,
5872	  0, 0 },
5873};
5874
5875static inline
5876int intel_dp_hdcp2_read_rx_status(struct intel_digital_port *intel_dig_port,
5877				  u8 *rx_status)
5878{
5879	ssize_t ret;
5880
5881	ret = drm_dp_dpcd_read(&intel_dig_port->dp.aux,
5882			       DP_HDCP_2_2_REG_RXSTATUS_OFFSET, rx_status,
5883			       HDCP_2_2_DP_RXSTATUS_LEN);
5884	if (ret != HDCP_2_2_DP_RXSTATUS_LEN) {
5885		DRM_DEBUG_KMS("Read bstatus from DP/AUX failed (%zd)\n", ret);
5886		return ret >= 0 ? -EIO : ret;
5887	}
5888
5889	return 0;
5890}
5891
5892static
5893int hdcp2_detect_msg_availability(struct intel_digital_port *intel_dig_port,
5894				  u8 msg_id, bool *msg_ready)
5895{
5896	u8 rx_status;
5897	int ret;
5898
5899	*msg_ready = false;
5900	ret = intel_dp_hdcp2_read_rx_status(intel_dig_port, &rx_status);
5901	if (ret < 0)
5902		return ret;
5903
5904	switch (msg_id) {
5905	case HDCP_2_2_AKE_SEND_HPRIME:
5906		if (HDCP_2_2_DP_RXSTATUS_H_PRIME(rx_status))
5907			*msg_ready = true;
5908		break;
5909	case HDCP_2_2_AKE_SEND_PAIRING_INFO:
5910		if (HDCP_2_2_DP_RXSTATUS_PAIRING(rx_status))
5911			*msg_ready = true;
5912		break;
5913	case HDCP_2_2_REP_SEND_RECVID_LIST:
5914		if (HDCP_2_2_DP_RXSTATUS_READY(rx_status))
5915			*msg_ready = true;
5916		break;
5917	default:
5918		DRM_ERROR("Unidentified msg_id: %d\n", msg_id);
5919		return -EINVAL;
5920	}
5921
5922	return 0;
5923}
5924
5925static ssize_t
5926intel_dp_hdcp2_wait_for_msg(struct intel_digital_port *intel_dig_port,
5927			    const struct hdcp2_dp_msg_data *hdcp2_msg_data)
5928{
5929	struct intel_dp *dp = &intel_dig_port->dp;
5930	struct intel_hdcp *hdcp = &dp->attached_connector->hdcp;
5931	u8 msg_id = hdcp2_msg_data->msg_id;
5932	int ret, timeout;
5933	bool msg_ready = false;
5934
5935	if (msg_id == HDCP_2_2_AKE_SEND_HPRIME && !hdcp->is_paired)
5936		timeout = hdcp2_msg_data->timeout2;
5937	else
5938		timeout = hdcp2_msg_data->timeout;
5939
5940	/*
5941	 * There is no way to detect the CERT, LPRIME and STREAM_READY
5942	 * availability. So Wait for timeout and read the msg.
5943	 */
5944	if (!hdcp2_msg_data->msg_detectable) {
5945		mdelay(timeout);
5946		ret = 0;
5947	} else {
5948		/*
5949		 * As we want to check the msg availability at timeout, Ignoring
5950		 * the timeout at wait for CP_IRQ.
5951		 */
5952		intel_dp_hdcp_wait_for_cp_irq(hdcp, timeout);
5953		ret = hdcp2_detect_msg_availability(intel_dig_port,
5954						    msg_id, &msg_ready);
5955		if (!msg_ready)
5956			ret = -ETIMEDOUT;
5957	}
5958
5959	if (ret)
5960		DRM_DEBUG_KMS("msg_id %d, ret %d, timeout(mSec): %d\n",
5961			      hdcp2_msg_data->msg_id, ret, timeout);
5962
5963	return ret;
5964}
5965
5966static const struct hdcp2_dp_msg_data *get_hdcp2_dp_msg_data(u8 msg_id)
5967{
5968	int i;
 
5969
5970	for (i = 0; i < ARRAY_SIZE(hdcp2_dp_msg_data); i++)
5971		if (hdcp2_dp_msg_data[i].msg_id == msg_id)
5972			return &hdcp2_dp_msg_data[i];
5973
5974	return NULL;
5975}
5976
5977static
5978int intel_dp_hdcp2_write_msg(struct intel_digital_port *intel_dig_port,
5979			     void *buf, size_t size)
5980{
5981	struct intel_dp *dp = &intel_dig_port->dp;
5982	struct intel_hdcp *hdcp = &dp->attached_connector->hdcp;
5983	unsigned int offset;
5984	u8 *byte = buf;
5985	ssize_t ret, bytes_to_write, len;
5986	const struct hdcp2_dp_msg_data *hdcp2_msg_data;
5987
5988	hdcp2_msg_data = get_hdcp2_dp_msg_data(*byte);
5989	if (!hdcp2_msg_data)
5990		return -EINVAL;
5991
5992	offset = hdcp2_msg_data->offset;
 
 
5993
5994	/* No msg_id in DP HDCP2.2 msgs */
5995	bytes_to_write = size - 1;
5996	byte++;
5997
5998	hdcp->cp_irq_count_cached = atomic_read(&hdcp->cp_irq_count);
 
5999
6000	while (bytes_to_write) {
6001		len = bytes_to_write > DP_AUX_MAX_PAYLOAD_BYTES ?
6002				DP_AUX_MAX_PAYLOAD_BYTES : bytes_to_write;
6003
6004		ret = drm_dp_dpcd_write(&intel_dig_port->dp.aux,
6005					offset, (void *)byte, len);
6006		if (ret < 0)
6007			return ret;
6008
6009		bytes_to_write -= ret;
6010		byte += ret;
6011		offset += ret;
6012	}
6013
6014	return size;
6015}
6016
6017static
6018ssize_t get_receiver_id_list_size(struct intel_digital_port *intel_dig_port)
6019{
6020	u8 rx_info[HDCP_2_2_RXINFO_LEN];
6021	u32 dev_cnt;
6022	ssize_t ret;
6023
6024	ret = drm_dp_dpcd_read(&intel_dig_port->dp.aux,
6025			       DP_HDCP_2_2_REG_RXINFO_OFFSET,
6026			       (void *)rx_info, HDCP_2_2_RXINFO_LEN);
6027	if (ret != HDCP_2_2_RXINFO_LEN)
6028		return ret >= 0 ? -EIO : ret;
6029
6030	dev_cnt = (HDCP_2_2_DEV_COUNT_HI(rx_info[0]) << 4 |
6031		   HDCP_2_2_DEV_COUNT_LO(rx_info[1]));
6032
6033	if (dev_cnt > HDCP_2_2_MAX_DEVICE_COUNT)
6034		dev_cnt = HDCP_2_2_MAX_DEVICE_COUNT;
6035
6036	ret = sizeof(struct hdcp2_rep_send_receiverid_list) -
6037		HDCP_2_2_RECEIVER_IDS_MAX_LEN +
6038		(dev_cnt * HDCP_2_2_RECEIVER_ID_LEN);
6039
6040	return ret;
6041}
6042
6043static
6044int intel_dp_hdcp2_read_msg(struct intel_digital_port *intel_dig_port,
6045			    u8 msg_id, void *buf, size_t size)
6046{
6047	unsigned int offset;
6048	u8 *byte = buf;
6049	ssize_t ret, bytes_to_recv, len;
6050	const struct hdcp2_dp_msg_data *hdcp2_msg_data;
6051
6052	hdcp2_msg_data = get_hdcp2_dp_msg_data(msg_id);
6053	if (!hdcp2_msg_data)
6054		return -EINVAL;
6055	offset = hdcp2_msg_data->offset;
6056
6057	ret = intel_dp_hdcp2_wait_for_msg(intel_dig_port, hdcp2_msg_data);
6058	if (ret < 0)
6059		return ret;
6060
6061	if (msg_id == HDCP_2_2_REP_SEND_RECVID_LIST) {
6062		ret = get_receiver_id_list_size(intel_dig_port);
6063		if (ret < 0)
6064			return ret;
6065
6066		size = ret;
6067	}
6068	bytes_to_recv = size - 1;
6069
6070	/* DP adaptation msgs has no msg_id */
6071	byte++;
6072
6073	while (bytes_to_recv) {
6074		len = bytes_to_recv > DP_AUX_MAX_PAYLOAD_BYTES ?
6075		      DP_AUX_MAX_PAYLOAD_BYTES : bytes_to_recv;
6076
6077		ret = drm_dp_dpcd_read(&intel_dig_port->dp.aux, offset,
6078				       (void *)byte, len);
6079		if (ret < 0) {
6080			DRM_DEBUG_KMS("msg_id %d, ret %zd\n", msg_id, ret);
6081			return ret;
6082		}
 
 
6083
6084		bytes_to_recv -= ret;
6085		byte += ret;
6086		offset += ret;
6087	}
6088	byte = buf;
6089	*byte = msg_id;
6090
6091	return size;
6092}
6093
6094static
6095int intel_dp_hdcp2_config_stream_type(struct intel_digital_port *intel_dig_port,
6096				      bool is_repeater, u8 content_type)
6097{
6098	struct hdcp2_dp_errata_stream_type stream_type_msg;
6099
6100	if (is_repeater)
6101		return 0;
6102
6103	/*
6104	 * Errata for DP: As Stream type is used for encryption, Receiver
6105	 * should be communicated with stream type for the decryption of the
6106	 * content.
6107	 * Repeater will be communicated with stream type as a part of it's
6108	 * auth later in time.
6109	 */
6110	stream_type_msg.msg_id = HDCP_2_2_ERRATA_DP_STREAM_TYPE;
6111	stream_type_msg.stream_type = content_type;
6112
6113	return intel_dp_hdcp2_write_msg(intel_dig_port, &stream_type_msg,
6114					sizeof(stream_type_msg));
6115}
6116
6117static
6118int intel_dp_hdcp2_check_link(struct intel_digital_port *intel_dig_port)
6119{
6120	u8 rx_status;
 
6121	int ret;
6122
6123	ret = intel_dp_hdcp2_read_rx_status(intel_dig_port, &rx_status);
6124	if (ret)
6125		return ret;
6126
6127	if (HDCP_2_2_DP_RXSTATUS_REAUTH_REQ(rx_status))
6128		ret = HDCP_REAUTH_REQUEST;
6129	else if (HDCP_2_2_DP_RXSTATUS_LINK_FAILED(rx_status))
6130		ret = HDCP_LINK_INTEGRITY_FAILURE;
6131	else if (HDCP_2_2_DP_RXSTATUS_READY(rx_status))
6132		ret = HDCP_TOPOLOGY_CHANGE;
6133
6134	return ret;
6135}
6136
6137static
6138int intel_dp_hdcp2_capable(struct intel_digital_port *intel_dig_port,
6139			   bool *capable)
6140{
6141	u8 rx_caps[3];
6142	int ret;
6143
6144	*capable = false;
6145	ret = drm_dp_dpcd_read(&intel_dig_port->dp.aux,
6146			       DP_HDCP_2_2_REG_RX_CAPS_OFFSET,
6147			       rx_caps, HDCP_2_2_RXCAPS_LEN);
6148	if (ret != HDCP_2_2_RXCAPS_LEN)
6149		return ret >= 0 ? -EIO : ret;
6150
6151	if (rx_caps[0] == HDCP_2_2_RX_CAPS_VERSION_VAL &&
6152	    HDCP_2_2_DP_HDCP_CAPABLE(rx_caps[2]))
6153		*capable = true;
6154
6155	return 0;
6156}
6157
6158static const struct intel_hdcp_shim intel_dp_hdcp_shim = {
6159	.write_an_aksv = intel_dp_hdcp_write_an_aksv,
6160	.read_bksv = intel_dp_hdcp_read_bksv,
6161	.read_bstatus = intel_dp_hdcp_read_bstatus,
6162	.repeater_present = intel_dp_hdcp_repeater_present,
6163	.read_ri_prime = intel_dp_hdcp_read_ri_prime,
6164	.read_ksv_ready = intel_dp_hdcp_read_ksv_ready,
6165	.read_ksv_fifo = intel_dp_hdcp_read_ksv_fifo,
6166	.read_v_prime_part = intel_dp_hdcp_read_v_prime_part,
6167	.toggle_signalling = intel_dp_hdcp_toggle_signalling,
6168	.check_link = intel_dp_hdcp_check_link,
6169	.hdcp_capable = intel_dp_hdcp_capable,
6170	.write_2_2_msg = intel_dp_hdcp2_write_msg,
6171	.read_2_2_msg = intel_dp_hdcp2_read_msg,
6172	.config_stream_type = intel_dp_hdcp2_config_stream_type,
6173	.check_2_2_link = intel_dp_hdcp2_check_link,
6174	.hdcp_2_2_capable = intel_dp_hdcp2_capable,
6175	.protocol = HDCP_PROTOCOL_DP,
6176};
6177
6178static void intel_edp_panel_vdd_sanitize(struct intel_dp *intel_dp)
6179{
6180	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
6181	struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
6182
6183	lockdep_assert_held(&dev_priv->pps_mutex);
6184
6185	if (!edp_have_panel_vdd(intel_dp))
6186		return;
6187
6188	/*
6189	 * The VDD bit needs a power domain reference, so if the bit is
6190	 * already enabled when we boot or resume, grab this reference and
6191	 * schedule a vdd off, so we don't hold on to the reference
6192	 * indefinitely.
6193	 */
6194	DRM_DEBUG_KMS("VDD left on by BIOS, adjusting state tracking\n");
6195	intel_display_power_get(dev_priv, intel_aux_power_domain(dig_port));
6196
6197	edp_panel_vdd_schedule_off(intel_dp);
6198}
6199
6200static enum pipe vlv_active_pipe(struct intel_dp *intel_dp)
6201{
6202	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
6203	struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base;
6204	enum pipe pipe;
6205
6206	if (intel_dp_port_enabled(dev_priv, intel_dp->output_reg,
6207				  encoder->port, &pipe))
6208		return pipe;
6209
6210	return INVALID_PIPE;
6211}
6212
6213void intel_dp_encoder_reset(struct drm_encoder *encoder)
6214{
6215	struct drm_i915_private *dev_priv = to_i915(encoder->dev);
6216	struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
6217	struct intel_lspcon *lspcon = dp_to_lspcon(intel_dp);
6218	intel_wakeref_t wakeref;
6219
6220	if (!HAS_DDI(dev_priv))
6221		intel_dp->DP = I915_READ(intel_dp->output_reg);
6222
6223	if (lspcon->active)
6224		lspcon_resume(lspcon);
6225
6226	intel_dp->reset_link_params = true;
6227
6228	if (!IS_VALLEYVIEW(dev_priv) && !IS_CHERRYVIEW(dev_priv) &&
6229	    !intel_dp_is_edp(intel_dp))
6230		return;
6231
6232	with_pps_lock(intel_dp, wakeref) {
6233		if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
6234			intel_dp->active_pipe = vlv_active_pipe(intel_dp);
6235
6236		if (intel_dp_is_edp(intel_dp)) {
6237			/*
6238			 * Reinit the power sequencer, in case BIOS did
6239			 * something nasty with it.
6240			 */
6241			intel_dp_pps_init(intel_dp);
6242			intel_edp_panel_vdd_sanitize(intel_dp);
6243		}
6244	}
6245}
6246
6247static const struct drm_connector_funcs intel_dp_connector_funcs = {
6248	.force = intel_dp_force,
6249	.fill_modes = drm_helper_probe_single_connector_modes,
6250	.atomic_get_property = intel_digital_connector_atomic_get_property,
6251	.atomic_set_property = intel_digital_connector_atomic_set_property,
6252	.late_register = intel_dp_connector_register,
6253	.early_unregister = intel_dp_connector_unregister,
6254	.destroy = intel_connector_destroy,
6255	.atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
6256	.atomic_duplicate_state = intel_digital_connector_duplicate_state,
6257};
6258
6259static const struct drm_connector_helper_funcs intel_dp_connector_helper_funcs = {
6260	.detect_ctx = intel_dp_detect,
6261	.get_modes = intel_dp_get_modes,
6262	.mode_valid = intel_dp_mode_valid,
6263	.atomic_check = intel_digital_connector_atomic_check,
6264};
6265
6266static const struct drm_encoder_funcs intel_dp_enc_funcs = {
6267	.reset = intel_dp_encoder_reset,
6268	.destroy = intel_dp_encoder_destroy,
6269};
6270
6271enum irqreturn
6272intel_dp_hpd_pulse(struct intel_digital_port *intel_dig_port, bool long_hpd)
6273{
6274	struct intel_dp *intel_dp = &intel_dig_port->dp;
 
6275
6276	if (long_hpd && intel_dig_port->base.type == INTEL_OUTPUT_EDP) {
 
6277		/*
6278		 * vdd off can generate a long pulse on eDP which
6279		 * would require vdd on to handle it, and thus we
6280		 * would end up in an endless cycle of
6281		 * "vdd off -> long hpd -> vdd on -> detect -> vdd off -> ..."
6282		 */
6283		DRM_DEBUG_KMS("ignoring long hpd on eDP port %c\n",
6284			      port_name(intel_dig_port->base.port));
 
 
 
6285		return IRQ_HANDLED;
6286	}
6287
6288	DRM_DEBUG_KMS("got hpd irq on port %c - %s\n",
6289		      port_name(intel_dig_port->base.port),
6290		      long_hpd ? "long" : "short");
 
6291
6292	if (long_hpd) {
6293		intel_dp->reset_link_params = true;
6294		return IRQ_NONE;
6295	}
6296
6297	if (intel_dp->is_mst) {
6298		if (intel_dp_check_mst_status(intel_dp) == -EINVAL) {
6299			/*
6300			 * If we were in MST mode, and device is not
6301			 * there, get out of MST mode
6302			 */
6303			DRM_DEBUG_KMS("MST device may have disappeared %d vs %d\n",
6304				      intel_dp->is_mst, intel_dp->mst_mgr.mst_state);
6305			intel_dp->is_mst = false;
6306			drm_dp_mst_topology_mgr_set_mst(&intel_dp->mst_mgr,
6307							intel_dp->is_mst);
6308
6309			return IRQ_NONE;
6310		}
6311	}
6312
6313	if (!intel_dp->is_mst) {
6314		bool handled;
6315
6316		handled = intel_dp_short_pulse(intel_dp);
6317
6318		if (!handled)
6319			return IRQ_NONE;
 
 
6320	}
6321
6322	return IRQ_HANDLED;
6323}
6324
6325/* check the VBT to see whether the eDP is on another port */
6326bool intel_dp_is_port_edp(struct drm_i915_private *dev_priv, enum port port)
6327{
6328	/*
6329	 * eDP not supported on g4x. so bail out early just
6330	 * for a bit extra safety in case the VBT is bonkers.
6331	 */
6332	if (INTEL_GEN(dev_priv) < 5)
6333		return false;
6334
6335	if (INTEL_GEN(dev_priv) < 9 && port == PORT_A)
6336		return true;
6337
6338	return intel_bios_is_port_edp(dev_priv, port);
6339}
6340
6341static void
6342intel_dp_add_properties(struct intel_dp *intel_dp, struct drm_connector *connector)
6343{
6344	struct drm_i915_private *dev_priv = to_i915(connector->dev);
6345	enum port port = dp_to_dig_port(intel_dp)->base.port;
6346
 
 
 
6347	if (!IS_G4X(dev_priv) && port != PORT_A)
6348		intel_attach_force_audio_property(connector);
6349
6350	intel_attach_broadcast_rgb_property(connector);
6351	if (HAS_GMCH(dev_priv))
6352		drm_connector_attach_max_bpc_property(connector, 6, 10);
6353	else if (INTEL_GEN(dev_priv) >= 5)
6354		drm_connector_attach_max_bpc_property(connector, 6, 12);
6355
 
 
 
 
 
 
 
 
 
 
 
 
 
6356	if (intel_dp_is_edp(intel_dp)) {
6357		u32 allowed_scalers;
6358
6359		allowed_scalers = BIT(DRM_MODE_SCALE_ASPECT) | BIT(DRM_MODE_SCALE_FULLSCREEN);
6360		if (!HAS_GMCH(dev_priv))
6361			allowed_scalers |= BIT(DRM_MODE_SCALE_CENTER);
6362
6363		drm_connector_attach_scaling_mode_property(connector, allowed_scalers);
6364
6365		connector->state->scaling_mode = DRM_MODE_SCALE_ASPECT;
6366
6367	}
6368}
6369
6370static void intel_dp_init_panel_power_timestamps(struct intel_dp *intel_dp)
6371{
6372	intel_dp->panel_power_off_time = ktime_get_boottime();
6373	intel_dp->last_power_on = jiffies;
6374	intel_dp->last_backlight_off = jiffies;
6375}
6376
6377static void
6378intel_pps_readout_hw_state(struct intel_dp *intel_dp, struct edp_power_seq *seq)
6379{
6380	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
6381	u32 pp_on, pp_off, pp_ctl;
6382	struct pps_registers regs;
6383
6384	intel_pps_get_registers(intel_dp, &regs);
6385
6386	pp_ctl = ironlake_get_pp_control(intel_dp);
6387
6388	/* Ensure PPS is unlocked */
6389	if (!HAS_DDI(dev_priv))
6390		I915_WRITE(regs.pp_ctrl, pp_ctl);
6391
6392	pp_on = I915_READ(regs.pp_on);
6393	pp_off = I915_READ(regs.pp_off);
6394
6395	/* Pull timing values out of registers */
6396	seq->t1_t3 = REG_FIELD_GET(PANEL_POWER_UP_DELAY_MASK, pp_on);
6397	seq->t8 = REG_FIELD_GET(PANEL_LIGHT_ON_DELAY_MASK, pp_on);
6398	seq->t9 = REG_FIELD_GET(PANEL_LIGHT_OFF_DELAY_MASK, pp_off);
6399	seq->t10 = REG_FIELD_GET(PANEL_POWER_DOWN_DELAY_MASK, pp_off);
6400
6401	if (i915_mmio_reg_valid(regs.pp_div)) {
6402		u32 pp_div;
6403
6404		pp_div = I915_READ(regs.pp_div);
6405
6406		seq->t11_t12 = REG_FIELD_GET(PANEL_POWER_CYCLE_DELAY_MASK, pp_div) * 1000;
6407	} else {
6408		seq->t11_t12 = REG_FIELD_GET(BXT_POWER_CYCLE_DELAY_MASK, pp_ctl) * 1000;
6409	}
6410}
6411
6412static void
6413intel_pps_dump_state(const char *state_name, const struct edp_power_seq *seq)
6414{
6415	DRM_DEBUG_KMS("%s t1_t3 %d t8 %d t9 %d t10 %d t11_t12 %d\n",
6416		      state_name,
6417		      seq->t1_t3, seq->t8, seq->t9, seq->t10, seq->t11_t12);
6418}
6419
6420static void
6421intel_pps_verify_state(struct intel_dp *intel_dp)
6422{
6423	struct edp_power_seq hw;
6424	struct edp_power_seq *sw = &intel_dp->pps_delays;
6425
6426	intel_pps_readout_hw_state(intel_dp, &hw);
6427
6428	if (hw.t1_t3 != sw->t1_t3 || hw.t8 != sw->t8 || hw.t9 != sw->t9 ||
6429	    hw.t10 != sw->t10 || hw.t11_t12 != sw->t11_t12) {
6430		DRM_ERROR("PPS state mismatch\n");
6431		intel_pps_dump_state("sw", sw);
6432		intel_pps_dump_state("hw", &hw);
6433	}
6434}
6435
6436static void
6437intel_dp_init_panel_power_sequencer(struct intel_dp *intel_dp)
6438{
6439	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
6440	struct edp_power_seq cur, vbt, spec,
6441		*final = &intel_dp->pps_delays;
6442
6443	lockdep_assert_held(&dev_priv->pps_mutex);
6444
6445	/* already initialized? */
6446	if (final->t11_t12 != 0)
6447		return;
6448
6449	intel_pps_readout_hw_state(intel_dp, &cur);
6450
6451	intel_pps_dump_state("cur", &cur);
6452
6453	vbt = dev_priv->vbt.edp.pps;
6454	/* On Toshiba Satellite P50-C-18C system the VBT T12 delay
6455	 * of 500ms appears to be too short. Ocassionally the panel
6456	 * just fails to power back on. Increasing the delay to 800ms
6457	 * seems sufficient to avoid this problem.
6458	 */
6459	if (dev_priv->quirks & QUIRK_INCREASE_T12_DELAY) {
6460		vbt.t11_t12 = max_t(u16, vbt.t11_t12, 1300 * 10);
6461		DRM_DEBUG_KMS("Increasing T12 panel delay as per the quirk to %d\n",
6462			      vbt.t11_t12);
6463	}
6464	/* T11_T12 delay is special and actually in units of 100ms, but zero
6465	 * based in the hw (so we need to add 100 ms). But the sw vbt
6466	 * table multiplies it with 1000 to make it in units of 100usec,
6467	 * too. */
6468	vbt.t11_t12 += 100 * 10;
6469
6470	/* Upper limits from eDP 1.3 spec. Note that we use the clunky units of
6471	 * our hw here, which are all in 100usec. */
6472	spec.t1_t3 = 210 * 10;
6473	spec.t8 = 50 * 10; /* no limit for t8, use t7 instead */
6474	spec.t9 = 50 * 10; /* no limit for t9, make it symmetric with t8 */
6475	spec.t10 = 500 * 10;
6476	/* This one is special and actually in units of 100ms, but zero
6477	 * based in the hw (so we need to add 100 ms). But the sw vbt
6478	 * table multiplies it with 1000 to make it in units of 100usec,
6479	 * too. */
6480	spec.t11_t12 = (510 + 100) * 10;
6481
6482	intel_pps_dump_state("vbt", &vbt);
6483
6484	/* Use the max of the register settings and vbt. If both are
6485	 * unset, fall back to the spec limits. */
6486#define assign_final(field)	final->field = (max(cur.field, vbt.field) == 0 ? \
6487				       spec.field : \
6488				       max(cur.field, vbt.field))
6489	assign_final(t1_t3);
6490	assign_final(t8);
6491	assign_final(t9);
6492	assign_final(t10);
6493	assign_final(t11_t12);
6494#undef assign_final
6495
6496#define get_delay(field)	(DIV_ROUND_UP(final->field, 10))
6497	intel_dp->panel_power_up_delay = get_delay(t1_t3);
6498	intel_dp->backlight_on_delay = get_delay(t8);
6499	intel_dp->backlight_off_delay = get_delay(t9);
6500	intel_dp->panel_power_down_delay = get_delay(t10);
6501	intel_dp->panel_power_cycle_delay = get_delay(t11_t12);
6502#undef get_delay
6503
6504	DRM_DEBUG_KMS("panel power up delay %d, power down delay %d, power cycle delay %d\n",
6505		      intel_dp->panel_power_up_delay, intel_dp->panel_power_down_delay,
6506		      intel_dp->panel_power_cycle_delay);
6507
6508	DRM_DEBUG_KMS("backlight on delay %d, off delay %d\n",
6509		      intel_dp->backlight_on_delay, intel_dp->backlight_off_delay);
6510
6511	/*
6512	 * We override the HW backlight delays to 1 because we do manual waits
6513	 * on them. For T8, even BSpec recommends doing it. For T9, if we
6514	 * don't do this, we'll end up waiting for the backlight off delay
6515	 * twice: once when we do the manual sleep, and once when we disable
6516	 * the panel and wait for the PP_STATUS bit to become zero.
6517	 */
6518	final->t8 = 1;
6519	final->t9 = 1;
6520
6521	/*
6522	 * HW has only a 100msec granularity for t11_t12 so round it up
6523	 * accordingly.
6524	 */
6525	final->t11_t12 = roundup(final->t11_t12, 100 * 10);
6526}
6527
6528static void
6529intel_dp_init_panel_power_sequencer_registers(struct intel_dp *intel_dp,
6530					      bool force_disable_vdd)
6531{
6532	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
6533	u32 pp_on, pp_off, port_sel = 0;
6534	int div = dev_priv->rawclk_freq / 1000;
6535	struct pps_registers regs;
6536	enum port port = dp_to_dig_port(intel_dp)->base.port;
6537	const struct edp_power_seq *seq = &intel_dp->pps_delays;
6538
6539	lockdep_assert_held(&dev_priv->pps_mutex);
6540
6541	intel_pps_get_registers(intel_dp, &regs);
6542
6543	/*
6544	 * On some VLV machines the BIOS can leave the VDD
6545	 * enabled even on power sequencers which aren't
6546	 * hooked up to any port. This would mess up the
6547	 * power domain tracking the first time we pick
6548	 * one of these power sequencers for use since
6549	 * edp_panel_vdd_on() would notice that the VDD was
6550	 * already on and therefore wouldn't grab the power
6551	 * domain reference. Disable VDD first to avoid this.
6552	 * This also avoids spuriously turning the VDD on as
6553	 * soon as the new power sequencer gets initialized.
6554	 */
6555	if (force_disable_vdd) {
6556		u32 pp = ironlake_get_pp_control(intel_dp);
6557
6558		WARN(pp & PANEL_POWER_ON, "Panel power already on\n");
6559
6560		if (pp & EDP_FORCE_VDD)
6561			DRM_DEBUG_KMS("VDD already on, disabling first\n");
6562
6563		pp &= ~EDP_FORCE_VDD;
6564
6565		I915_WRITE(regs.pp_ctrl, pp);
6566	}
6567
6568	pp_on = REG_FIELD_PREP(PANEL_POWER_UP_DELAY_MASK, seq->t1_t3) |
6569		REG_FIELD_PREP(PANEL_LIGHT_ON_DELAY_MASK, seq->t8);
6570	pp_off = REG_FIELD_PREP(PANEL_LIGHT_OFF_DELAY_MASK, seq->t9) |
6571		REG_FIELD_PREP(PANEL_POWER_DOWN_DELAY_MASK, seq->t10);
6572
6573	/* Haswell doesn't have any port selection bits for the panel
6574	 * power sequencer any more. */
6575	if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
6576		port_sel = PANEL_PORT_SELECT_VLV(port);
6577	} else if (HAS_PCH_IBX(dev_priv) || HAS_PCH_CPT(dev_priv)) {
6578		switch (port) {
6579		case PORT_A:
6580			port_sel = PANEL_PORT_SELECT_DPA;
6581			break;
6582		case PORT_C:
6583			port_sel = PANEL_PORT_SELECT_DPC;
6584			break;
6585		case PORT_D:
6586			port_sel = PANEL_PORT_SELECT_DPD;
6587			break;
6588		default:
6589			MISSING_CASE(port);
6590			break;
6591		}
6592	}
6593
6594	pp_on |= port_sel;
6595
6596	I915_WRITE(regs.pp_on, pp_on);
6597	I915_WRITE(regs.pp_off, pp_off);
6598
6599	/*
6600	 * Compute the divisor for the pp clock, simply match the Bspec formula.
6601	 */
6602	if (i915_mmio_reg_valid(regs.pp_div)) {
6603		I915_WRITE(regs.pp_div,
6604			   REG_FIELD_PREP(PP_REFERENCE_DIVIDER_MASK, (100 * div) / 2 - 1) |
6605			   REG_FIELD_PREP(PANEL_POWER_CYCLE_DELAY_MASK, DIV_ROUND_UP(seq->t11_t12, 1000)));
6606	} else {
6607		u32 pp_ctl;
6608
6609		pp_ctl = I915_READ(regs.pp_ctrl);
6610		pp_ctl &= ~BXT_POWER_CYCLE_DELAY_MASK;
6611		pp_ctl |= REG_FIELD_PREP(BXT_POWER_CYCLE_DELAY_MASK, DIV_ROUND_UP(seq->t11_t12, 1000));
6612		I915_WRITE(regs.pp_ctrl, pp_ctl);
6613	}
6614
6615	DRM_DEBUG_KMS("panel power sequencer register settings: PP_ON %#x, PP_OFF %#x, PP_DIV %#x\n",
6616		      I915_READ(regs.pp_on),
6617		      I915_READ(regs.pp_off),
6618		      i915_mmio_reg_valid(regs.pp_div) ?
6619		      I915_READ(regs.pp_div) :
6620		      (I915_READ(regs.pp_ctrl) & BXT_POWER_CYCLE_DELAY_MASK));
6621}
6622
6623static void intel_dp_pps_init(struct intel_dp *intel_dp)
6624{
6625	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
6626
6627	if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
6628		vlv_initial_power_sequencer_setup(intel_dp);
6629	} else {
6630		intel_dp_init_panel_power_sequencer(intel_dp);
6631		intel_dp_init_panel_power_sequencer_registers(intel_dp, false);
6632	}
6633}
6634
6635/**
6636 * intel_dp_set_drrs_state - program registers for RR switch to take effect
6637 * @dev_priv: i915 device
6638 * @crtc_state: a pointer to the active intel_crtc_state
6639 * @refresh_rate: RR to be programmed
6640 *
6641 * This function gets called when refresh rate (RR) has to be changed from
6642 * one frequency to another. Switches can be between high and low RR
6643 * supported by the panel or to any other RR based on media playback (in
6644 * this case, RR value needs to be passed from user space).
6645 *
6646 * The caller of this function needs to take a lock on dev_priv->drrs.
6647 */
6648static void intel_dp_set_drrs_state(struct drm_i915_private *dev_priv,
6649				    const struct intel_crtc_state *crtc_state,
6650				    int refresh_rate)
6651{
6652	struct intel_dp *intel_dp = dev_priv->drrs.dp;
6653	struct intel_crtc *intel_crtc = to_intel_crtc(crtc_state->base.crtc);
6654	enum drrs_refresh_rate_type index = DRRS_HIGH_RR;
6655
6656	if (refresh_rate <= 0) {
6657		DRM_DEBUG_KMS("Refresh rate should be positive non-zero.\n");
 
6658		return;
6659	}
6660
6661	if (intel_dp == NULL) {
6662		DRM_DEBUG_KMS("DRRS not supported.\n");
6663		return;
6664	}
6665
6666	if (!intel_crtc) {
6667		DRM_DEBUG_KMS("DRRS: intel_crtc not initialized\n");
 
6668		return;
6669	}
6670
6671	if (dev_priv->drrs.type < SEAMLESS_DRRS_SUPPORT) {
6672		DRM_DEBUG_KMS("Only Seamless DRRS supported.\n");
6673		return;
6674	}
6675
6676	if (intel_dp->attached_connector->panel.downclock_mode->vrefresh ==
6677			refresh_rate)
6678		index = DRRS_LOW_RR;
6679
6680	if (index == dev_priv->drrs.refresh_rate_type) {
6681		DRM_DEBUG_KMS(
6682			"DRRS requested for previously set RR...ignoring\n");
6683		return;
6684	}
6685
6686	if (!crtc_state->base.active) {
6687		DRM_DEBUG_KMS("eDP encoder disabled. CRTC not Active\n");
 
6688		return;
6689	}
6690
6691	if (INTEL_GEN(dev_priv) >= 8 && !IS_CHERRYVIEW(dev_priv)) {
6692		switch (index) {
6693		case DRRS_HIGH_RR:
6694			intel_dp_set_m_n(crtc_state, M1_N1);
6695			break;
6696		case DRRS_LOW_RR:
6697			intel_dp_set_m_n(crtc_state, M2_N2);
6698			break;
6699		case DRRS_MAX_RR:
6700		default:
6701			DRM_ERROR("Unsupported refreshrate type\n");
 
6702		}
6703	} else if (INTEL_GEN(dev_priv) > 6) {
6704		i915_reg_t reg = PIPECONF(crtc_state->cpu_transcoder);
6705		u32 val;
6706
6707		val = I915_READ(reg);
6708		if (index > DRRS_HIGH_RR) {
6709			if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
6710				val |= PIPECONF_EDP_RR_MODE_SWITCH_VLV;
6711			else
6712				val |= PIPECONF_EDP_RR_MODE_SWITCH;
6713		} else {
6714			if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
6715				val &= ~PIPECONF_EDP_RR_MODE_SWITCH_VLV;
6716			else
6717				val &= ~PIPECONF_EDP_RR_MODE_SWITCH;
6718		}
6719		I915_WRITE(reg, val);
6720	}
6721
6722	dev_priv->drrs.refresh_rate_type = index;
6723
6724	DRM_DEBUG_KMS("eDP Refresh Rate set to : %dHz\n", refresh_rate);
 
 
 
 
 
 
 
 
 
 
6725}
6726
6727/**
6728 * intel_edp_drrs_enable - init drrs struct if supported
6729 * @intel_dp: DP struct
6730 * @crtc_state: A pointer to the active crtc state.
6731 *
6732 * Initializes frontbuffer_bits and drrs.dp
6733 */
6734void intel_edp_drrs_enable(struct intel_dp *intel_dp,
6735			   const struct intel_crtc_state *crtc_state)
6736{
6737	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
6738
6739	if (!crtc_state->has_drrs) {
6740		DRM_DEBUG_KMS("Panel doesn't support DRRS\n");
6741		return;
6742	}
6743
6744	if (dev_priv->psr.enabled) {
6745		DRM_DEBUG_KMS("PSR enabled. Not enabling DRRS.\n");
6746		return;
6747	}
6748
6749	mutex_lock(&dev_priv->drrs.mutex);
 
6750	if (dev_priv->drrs.dp) {
6751		DRM_DEBUG_KMS("DRRS already enabled\n");
6752		goto unlock;
6753	}
6754
6755	dev_priv->drrs.busy_frontbuffer_bits = 0;
6756
6757	dev_priv->drrs.dp = intel_dp;
6758
6759unlock:
6760	mutex_unlock(&dev_priv->drrs.mutex);
6761}
6762
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
6763/**
6764 * intel_edp_drrs_disable - Disable DRRS
6765 * @intel_dp: DP struct
6766 * @old_crtc_state: Pointer to old crtc_state.
6767 *
6768 */
6769void intel_edp_drrs_disable(struct intel_dp *intel_dp,
6770			    const struct intel_crtc_state *old_crtc_state)
6771{
6772	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
6773
6774	if (!old_crtc_state->has_drrs)
6775		return;
6776
6777	mutex_lock(&dev_priv->drrs.mutex);
6778	if (!dev_priv->drrs.dp) {
6779		mutex_unlock(&dev_priv->drrs.mutex);
6780		return;
6781	}
6782
6783	if (dev_priv->drrs.refresh_rate_type == DRRS_LOW_RR)
6784		intel_dp_set_drrs_state(dev_priv, old_crtc_state,
6785			intel_dp->attached_connector->panel.fixed_mode->vrefresh);
6786
6787	dev_priv->drrs.dp = NULL;
6788	mutex_unlock(&dev_priv->drrs.mutex);
6789
6790	cancel_delayed_work_sync(&dev_priv->drrs.work);
6791}
6792
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
6793static void intel_edp_drrs_downclock_work(struct work_struct *work)
6794{
6795	struct drm_i915_private *dev_priv =
6796		container_of(work, typeof(*dev_priv), drrs.work.work);
6797	struct intel_dp *intel_dp;
6798
6799	mutex_lock(&dev_priv->drrs.mutex);
6800
6801	intel_dp = dev_priv->drrs.dp;
6802
6803	if (!intel_dp)
6804		goto unlock;
6805
6806	/*
6807	 * The delayed work can race with an invalidate hence we need to
6808	 * recheck.
6809	 */
6810
6811	if (dev_priv->drrs.busy_frontbuffer_bits)
6812		goto unlock;
6813
6814	if (dev_priv->drrs.refresh_rate_type != DRRS_LOW_RR) {
6815		struct drm_crtc *crtc = dp_to_dig_port(intel_dp)->base.base.crtc;
6816
6817		intel_dp_set_drrs_state(dev_priv, to_intel_crtc(crtc)->config,
6818			intel_dp->attached_connector->panel.downclock_mode->vrefresh);
6819	}
6820
6821unlock:
6822	mutex_unlock(&dev_priv->drrs.mutex);
6823}
6824
6825/**
6826 * intel_edp_drrs_invalidate - Disable Idleness DRRS
6827 * @dev_priv: i915 device
6828 * @frontbuffer_bits: frontbuffer plane tracking bits
6829 *
6830 * This function gets called everytime rendering on the given planes start.
6831 * Hence DRRS needs to be Upclocked, i.e. (LOW_RR -> HIGH_RR).
6832 *
6833 * Dirty frontbuffers relevant to DRRS are tracked in busy_frontbuffer_bits.
6834 */
6835void intel_edp_drrs_invalidate(struct drm_i915_private *dev_priv,
6836			       unsigned int frontbuffer_bits)
6837{
 
6838	struct drm_crtc *crtc;
6839	enum pipe pipe;
6840
6841	if (dev_priv->drrs.type == DRRS_NOT_SUPPORTED)
6842		return;
6843
6844	cancel_delayed_work(&dev_priv->drrs.work);
6845
6846	mutex_lock(&dev_priv->drrs.mutex);
6847	if (!dev_priv->drrs.dp) {
 
 
6848		mutex_unlock(&dev_priv->drrs.mutex);
6849		return;
6850	}
6851
6852	crtc = dp_to_dig_port(dev_priv->drrs.dp)->base.base.crtc;
6853	pipe = to_intel_crtc(crtc)->pipe;
6854
6855	frontbuffer_bits &= INTEL_FRONTBUFFER_ALL_MASK(pipe);
6856	dev_priv->drrs.busy_frontbuffer_bits |= frontbuffer_bits;
6857
6858	/* invalidate means busy screen hence upclock */
6859	if (frontbuffer_bits && dev_priv->drrs.refresh_rate_type == DRRS_LOW_RR)
6860		intel_dp_set_drrs_state(dev_priv, to_intel_crtc(crtc)->config,
6861			dev_priv->drrs.dp->attached_connector->panel.fixed_mode->vrefresh);
6862
6863	mutex_unlock(&dev_priv->drrs.mutex);
6864}
6865
6866/**
6867 * intel_edp_drrs_flush - Restart Idleness DRRS
6868 * @dev_priv: i915 device
6869 * @frontbuffer_bits: frontbuffer plane tracking bits
6870 *
6871 * This function gets called every time rendering on the given planes has
6872 * completed or flip on a crtc is completed. So DRRS should be upclocked
6873 * (LOW_RR -> HIGH_RR). And also Idleness detection should be started again,
6874 * if no other planes are dirty.
6875 *
6876 * Dirty frontbuffers relevant to DRRS are tracked in busy_frontbuffer_bits.
6877 */
6878void intel_edp_drrs_flush(struct drm_i915_private *dev_priv,
6879			  unsigned int frontbuffer_bits)
6880{
 
6881	struct drm_crtc *crtc;
6882	enum pipe pipe;
6883
6884	if (dev_priv->drrs.type == DRRS_NOT_SUPPORTED)
6885		return;
6886
6887	cancel_delayed_work(&dev_priv->drrs.work);
6888
6889	mutex_lock(&dev_priv->drrs.mutex);
6890	if (!dev_priv->drrs.dp) {
 
 
6891		mutex_unlock(&dev_priv->drrs.mutex);
6892		return;
6893	}
6894
6895	crtc = dp_to_dig_port(dev_priv->drrs.dp)->base.base.crtc;
6896	pipe = to_intel_crtc(crtc)->pipe;
6897
6898	frontbuffer_bits &= INTEL_FRONTBUFFER_ALL_MASK(pipe);
6899	dev_priv->drrs.busy_frontbuffer_bits &= ~frontbuffer_bits;
6900
6901	/* flush means busy screen hence upclock */
6902	if (frontbuffer_bits && dev_priv->drrs.refresh_rate_type == DRRS_LOW_RR)
6903		intel_dp_set_drrs_state(dev_priv, to_intel_crtc(crtc)->config,
6904				dev_priv->drrs.dp->attached_connector->panel.fixed_mode->vrefresh);
6905
6906	/*
6907	 * flush also means no more activity hence schedule downclock, if all
6908	 * other fbs are quiescent too
6909	 */
6910	if (!dev_priv->drrs.busy_frontbuffer_bits)
6911		schedule_delayed_work(&dev_priv->drrs.work,
6912				msecs_to_jiffies(1000));
6913	mutex_unlock(&dev_priv->drrs.mutex);
6914}
6915
6916/**
6917 * DOC: Display Refresh Rate Switching (DRRS)
6918 *
6919 * Display Refresh Rate Switching (DRRS) is a power conservation feature
6920 * which enables swtching between low and high refresh rates,
6921 * dynamically, based on the usage scenario. This feature is applicable
6922 * for internal panels.
6923 *
6924 * Indication that the panel supports DRRS is given by the panel EDID, which
6925 * would list multiple refresh rates for one resolution.
6926 *
6927 * DRRS is of 2 types - static and seamless.
6928 * Static DRRS involves changing refresh rate (RR) by doing a full modeset
6929 * (may appear as a blink on screen) and is used in dock-undock scenario.
6930 * Seamless DRRS involves changing RR without any visual effect to the user
6931 * and can be used during normal system usage. This is done by programming
6932 * certain registers.
6933 *
6934 * Support for static/seamless DRRS may be indicated in the VBT based on
6935 * inputs from the panel spec.
6936 *
6937 * DRRS saves power by switching to low RR based on usage scenarios.
6938 *
6939 * The implementation is based on frontbuffer tracking implementation.  When
6940 * there is a disturbance on the screen triggered by user activity or a periodic
6941 * system activity, DRRS is disabled (RR is changed to high RR).  When there is
6942 * no movement on screen, after a timeout of 1 second, a switch to low RR is
6943 * made.
6944 *
6945 * For integration with frontbuffer tracking code, intel_edp_drrs_invalidate()
6946 * and intel_edp_drrs_flush() are called.
6947 *
6948 * DRRS can be further extended to support other internal panels and also
6949 * the scenario of video playback wherein RR is set based on the rate
6950 * requested by userspace.
6951 */
6952
6953/**
6954 * intel_dp_drrs_init - Init basic DRRS work and mutex.
6955 * @connector: eDP connector
6956 * @fixed_mode: preferred mode of panel
6957 *
6958 * This function is  called only once at driver load to initialize basic
6959 * DRRS stuff.
6960 *
6961 * Returns:
6962 * Downclock mode if panel supports it, else return NULL.
6963 * DRRS support is determined by the presence of downclock mode (apart
6964 * from VBT setting).
6965 */
6966static struct drm_display_mode *
6967intel_dp_drrs_init(struct intel_connector *connector,
6968		   struct drm_display_mode *fixed_mode)
6969{
6970	struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
6971	struct drm_display_mode *downclock_mode = NULL;
6972
6973	INIT_DELAYED_WORK(&dev_priv->drrs.work, intel_edp_drrs_downclock_work);
6974	mutex_init(&dev_priv->drrs.mutex);
6975
6976	if (INTEL_GEN(dev_priv) <= 6) {
6977		DRM_DEBUG_KMS("DRRS supported for Gen7 and above\n");
 
6978		return NULL;
6979	}
6980
6981	if (dev_priv->vbt.drrs_type != SEAMLESS_DRRS_SUPPORT) {
6982		DRM_DEBUG_KMS("VBT doesn't support DRRS\n");
6983		return NULL;
6984	}
6985
6986	downclock_mode = intel_panel_edid_downclock_mode(connector, fixed_mode);
6987	if (!downclock_mode) {
6988		DRM_DEBUG_KMS("Downclock mode is not found. DRRS not supported\n");
 
6989		return NULL;
6990	}
6991
6992	dev_priv->drrs.type = dev_priv->vbt.drrs_type;
6993
6994	dev_priv->drrs.refresh_rate_type = DRRS_HIGH_RR;
6995	DRM_DEBUG_KMS("seamless DRRS supported for eDP panel.\n");
 
6996	return downclock_mode;
6997}
6998
6999static bool intel_edp_init_connector(struct intel_dp *intel_dp,
7000				     struct intel_connector *intel_connector)
7001{
7002	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
7003	struct drm_device *dev = &dev_priv->drm;
7004	struct drm_connector *connector = &intel_connector->base;
7005	struct drm_display_mode *fixed_mode = NULL;
7006	struct drm_display_mode *downclock_mode = NULL;
7007	bool has_dpcd;
7008	enum pipe pipe = INVALID_PIPE;
7009	intel_wakeref_t wakeref;
7010	struct edid *edid;
7011
7012	if (!intel_dp_is_edp(intel_dp))
7013		return true;
7014
7015	INIT_DELAYED_WORK(&intel_dp->panel_vdd_work, edp_panel_vdd_work);
7016
7017	/*
7018	 * On IBX/CPT we may get here with LVDS already registered. Since the
7019	 * driver uses the only internal power sequencer available for both
7020	 * eDP and LVDS bail out early in this case to prevent interfering
7021	 * with an already powered-on LVDS power sequencer.
7022	 */
7023	if (intel_get_lvds_encoder(dev_priv)) {
7024		WARN_ON(!(HAS_PCH_IBX(dev_priv) || HAS_PCH_CPT(dev_priv)));
7025		DRM_INFO("LVDS was detected, not registering eDP\n");
 
 
7026
7027		return false;
7028	}
7029
7030	with_pps_lock(intel_dp, wakeref) {
7031		intel_dp_init_panel_power_timestamps(intel_dp);
7032		intel_dp_pps_init(intel_dp);
7033		intel_edp_panel_vdd_sanitize(intel_dp);
7034	}
7035
7036	/* Cache DPCD and EDID for edp. */
7037	has_dpcd = intel_edp_init_dpcd(intel_dp);
7038
7039	if (!has_dpcd) {
7040		/* if this fails, presume the device is a ghost */
7041		DRM_INFO("failed to retrieve link info, disabling eDP\n");
 
7042		goto out_vdd_off;
7043	}
7044
7045	mutex_lock(&dev->mode_config.mutex);
7046	edid = drm_get_edid(connector, &intel_dp->aux.ddc);
7047	if (edid) {
7048		if (drm_add_edid_modes(connector, edid)) {
7049			drm_connector_update_edid_property(connector,
7050								edid);
7051		} else {
7052			kfree(edid);
7053			edid = ERR_PTR(-EINVAL);
7054		}
7055	} else {
7056		edid = ERR_PTR(-ENOENT);
7057	}
7058	intel_connector->edid = edid;
7059
7060	fixed_mode = intel_panel_edid_fixed_mode(intel_connector);
7061	if (fixed_mode)
7062		downclock_mode = intel_dp_drrs_init(intel_connector, fixed_mode);
7063
 
 
 
 
7064	/* fallback to VBT if available for eDP */
7065	if (!fixed_mode)
7066		fixed_mode = intel_panel_vbt_fixed_mode(intel_connector);
7067	mutex_unlock(&dev->mode_config.mutex);
7068
7069	if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
7070		intel_dp->edp_notifier.notifier_call = edp_notify_handler;
7071		register_reboot_notifier(&intel_dp->edp_notifier);
7072
7073		/*
7074		 * Figure out the current pipe for the initial backlight setup.
7075		 * If the current pipe isn't valid, try the PPS pipe, and if that
7076		 * fails just assume pipe A.
7077		 */
7078		pipe = vlv_active_pipe(intel_dp);
7079
7080		if (pipe != PIPE_A && pipe != PIPE_B)
7081			pipe = intel_dp->pps_pipe;
7082
7083		if (pipe != PIPE_A && pipe != PIPE_B)
7084			pipe = PIPE_A;
7085
7086		DRM_DEBUG_KMS("using pipe %c for initial backlight setup\n",
7087			      pipe_name(pipe));
 
7088	}
7089
7090	intel_panel_init(&intel_connector->panel, fixed_mode, downclock_mode);
7091	intel_connector->panel.backlight.power = intel_edp_backlight_power;
7092	intel_panel_setup_backlight(connector, pipe);
7093
7094	if (fixed_mode)
7095		drm_connector_init_panel_orientation_property(
7096			connector, fixed_mode->hdisplay, fixed_mode->vdisplay);
 
 
7097
7098	return true;
7099
7100out_vdd_off:
7101	cancel_delayed_work_sync(&intel_dp->panel_vdd_work);
7102	/*
7103	 * vdd might still be enabled do to the delayed vdd off.
7104	 * Make sure vdd is actually turned off here.
7105	 */
7106	with_pps_lock(intel_dp, wakeref)
7107		edp_panel_vdd_off_sync(intel_dp);
7108
7109	return false;
7110}
7111
7112static void intel_dp_modeset_retry_work_fn(struct work_struct *work)
7113{
7114	struct intel_connector *intel_connector;
7115	struct drm_connector *connector;
7116
7117	intel_connector = container_of(work, typeof(*intel_connector),
7118				       modeset_retry_work);
7119	connector = &intel_connector->base;
7120	DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n", connector->base.id,
7121		      connector->name);
7122
7123	/* Grab the locks before changing connector property*/
7124	mutex_lock(&connector->dev->mode_config.mutex);
7125	/* Set connector link status to BAD and send a Uevent to notify
7126	 * userspace to do a modeset.
7127	 */
7128	drm_connector_set_link_status_property(connector,
7129					       DRM_MODE_LINK_STATUS_BAD);
7130	mutex_unlock(&connector->dev->mode_config.mutex);
7131	/* Send Hotplug uevent so userspace can reprobe */
7132	drm_kms_helper_hotplug_event(connector->dev);
7133}
7134
7135bool
7136intel_dp_init_connector(struct intel_digital_port *intel_dig_port,
7137			struct intel_connector *intel_connector)
7138{
7139	struct drm_connector *connector = &intel_connector->base;
7140	struct intel_dp *intel_dp = &intel_dig_port->dp;
7141	struct intel_encoder *intel_encoder = &intel_dig_port->base;
7142	struct drm_device *dev = intel_encoder->base.dev;
7143	struct drm_i915_private *dev_priv = to_i915(dev);
7144	enum port port = intel_encoder->port;
7145	enum phy phy = intel_port_to_phy(dev_priv, port);
7146	int type;
7147
7148	/* Initialize the work for modeset in case of link train failure */
7149	INIT_WORK(&intel_connector->modeset_retry_work,
7150		  intel_dp_modeset_retry_work_fn);
7151
7152	if (WARN(intel_dig_port->max_lanes < 1,
7153		 "Not enough lanes (%d) for DP on port %c\n",
7154		 intel_dig_port->max_lanes, port_name(port)))
 
7155		return false;
7156
7157	intel_dp_set_source_rates(intel_dp);
7158
7159	intel_dp->reset_link_params = true;
7160	intel_dp->pps_pipe = INVALID_PIPE;
7161	intel_dp->active_pipe = INVALID_PIPE;
7162
7163	/* Preserve the current hw state. */
7164	intel_dp->DP = I915_READ(intel_dp->output_reg);
7165	intel_dp->attached_connector = intel_connector;
7166
7167	if (intel_dp_is_port_edp(dev_priv, port)) {
7168		/*
7169		 * Currently we don't support eDP on TypeC ports, although in
7170		 * theory it could work on TypeC legacy ports.
7171		 */
7172		WARN_ON(intel_phy_is_tc(dev_priv, phy));
7173		type = DRM_MODE_CONNECTOR_eDP;
7174	} else {
7175		type = DRM_MODE_CONNECTOR_DisplayPort;
7176	}
7177
7178	if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
7179		intel_dp->active_pipe = vlv_active_pipe(intel_dp);
7180
7181	/*
7182	 * For eDP we always set the encoder type to INTEL_OUTPUT_EDP, but
7183	 * for DP the encoder type can be set by the caller to
7184	 * INTEL_OUTPUT_UNKNOWN for DDI, so don't rewrite it.
7185	 */
7186	if (type == DRM_MODE_CONNECTOR_eDP)
7187		intel_encoder->type = INTEL_OUTPUT_EDP;
7188
7189	/* eDP only on port B and/or C on vlv/chv */
7190	if (WARN_ON((IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) &&
7191		    intel_dp_is_edp(intel_dp) &&
7192		    port != PORT_B && port != PORT_C))
 
7193		return false;
7194
7195	DRM_DEBUG_KMS("Adding %s connector on port %c\n",
7196			type == DRM_MODE_CONNECTOR_eDP ? "eDP" : "DP",
7197			port_name(port));
 
7198
7199	drm_connector_init(dev, connector, &intel_dp_connector_funcs, type);
7200	drm_connector_helper_add(connector, &intel_dp_connector_helper_funcs);
7201
7202	if (!HAS_GMCH(dev_priv))
7203		connector->interlace_allowed = true;
7204	connector->doublescan_allowed = 0;
7205
7206	if (INTEL_GEN(dev_priv) >= 11)
7207		connector->ycbcr_420_allowed = true;
7208
7209	intel_encoder->hpd_pin = intel_hpd_pin_default(dev_priv, port);
7210
7211	intel_dp_aux_init(intel_dp);
7212
7213	intel_connector_attach_encoder(intel_connector, intel_encoder);
7214
7215	if (HAS_DDI(dev_priv))
7216		intel_connector->get_hw_state = intel_ddi_connector_get_hw_state;
7217	else
7218		intel_connector->get_hw_state = intel_connector_get_hw_state;
7219
7220	/* init MST on ports that can support it */
7221	if (HAS_DP_MST(dev_priv) && !intel_dp_is_edp(intel_dp) &&
7222	    (port == PORT_B || port == PORT_C ||
7223	     port == PORT_D || port == PORT_F))
7224		intel_dp_mst_encoder_init(intel_dig_port,
7225					  intel_connector->base.base.id);
7226
7227	if (!intel_edp_init_connector(intel_dp, intel_connector)) {
7228		intel_dp_aux_fini(intel_dp);
7229		intel_dp_mst_encoder_cleanup(intel_dig_port);
7230		goto fail;
7231	}
7232
7233	intel_dp_add_properties(intel_dp, connector);
7234
7235	if (is_hdcp_supported(dev_priv, port) && !intel_dp_is_edp(intel_dp)) {
7236		int ret = intel_hdcp_init(intel_connector, &intel_dp_hdcp_shim);
7237		if (ret)
7238			DRM_DEBUG_KMS("HDCP init failed, skipping.\n");
 
7239	}
7240
7241	/* For G4X desktop chip, PEG_BAND_GAP_DATA 3:0 must first be written
7242	 * 0xd.  Failure to do so will result in spurious interrupts being
7243	 * generated on the port when a cable is not attached.
7244	 */
7245	if (IS_G45(dev_priv)) {
7246		u32 temp = I915_READ(PEG_BAND_GAP_DATA);
7247		I915_WRITE(PEG_BAND_GAP_DATA, (temp & ~0xf) | 0xd);
 
7248	}
7249
 
 
 
 
 
7250	return true;
7251
7252fail:
7253	drm_connector_cleanup(connector);
7254
7255	return false;
7256}
7257
7258bool intel_dp_init(struct drm_i915_private *dev_priv,
7259		   i915_reg_t output_reg,
7260		   enum port port)
7261{
7262	struct intel_digital_port *intel_dig_port;
7263	struct intel_encoder *intel_encoder;
7264	struct drm_encoder *encoder;
7265	struct intel_connector *intel_connector;
7266
7267	intel_dig_port = kzalloc(sizeof(*intel_dig_port), GFP_KERNEL);
7268	if (!intel_dig_port)
7269		return false;
7270
7271	intel_connector = intel_connector_alloc();
7272	if (!intel_connector)
7273		goto err_connector_alloc;
7274
7275	intel_encoder = &intel_dig_port->base;
7276	encoder = &intel_encoder->base;
7277
7278	if (drm_encoder_init(&dev_priv->drm, &intel_encoder->base,
7279			     &intel_dp_enc_funcs, DRM_MODE_ENCODER_TMDS,
7280			     "DP %c", port_name(port)))
7281		goto err_encoder_init;
7282
7283	intel_encoder->hotplug = intel_dp_hotplug;
7284	intel_encoder->compute_config = intel_dp_compute_config;
7285	intel_encoder->get_hw_state = intel_dp_get_hw_state;
7286	intel_encoder->get_config = intel_dp_get_config;
7287	intel_encoder->update_pipe = intel_panel_update_backlight;
7288	intel_encoder->suspend = intel_dp_encoder_suspend;
7289	if (IS_CHERRYVIEW(dev_priv)) {
7290		intel_encoder->pre_pll_enable = chv_dp_pre_pll_enable;
7291		intel_encoder->pre_enable = chv_pre_enable_dp;
7292		intel_encoder->enable = vlv_enable_dp;
7293		intel_encoder->disable = vlv_disable_dp;
7294		intel_encoder->post_disable = chv_post_disable_dp;
7295		intel_encoder->post_pll_disable = chv_dp_post_pll_disable;
7296	} else if (IS_VALLEYVIEW(dev_priv)) {
7297		intel_encoder->pre_pll_enable = vlv_dp_pre_pll_enable;
7298		intel_encoder->pre_enable = vlv_pre_enable_dp;
7299		intel_encoder->enable = vlv_enable_dp;
7300		intel_encoder->disable = vlv_disable_dp;
7301		intel_encoder->post_disable = vlv_post_disable_dp;
7302	} else {
7303		intel_encoder->pre_enable = g4x_pre_enable_dp;
7304		intel_encoder->enable = g4x_enable_dp;
7305		intel_encoder->disable = g4x_disable_dp;
7306		intel_encoder->post_disable = g4x_post_disable_dp;
7307	}
7308
7309	intel_dig_port->dp.output_reg = output_reg;
7310	intel_dig_port->max_lanes = 4;
7311
7312	intel_encoder->type = INTEL_OUTPUT_DP;
7313	intel_encoder->power_domain = intel_port_to_power_domain(port);
7314	if (IS_CHERRYVIEW(dev_priv)) {
7315		if (port == PORT_D)
7316			intel_encoder->crtc_mask = 1 << 2;
7317		else
7318			intel_encoder->crtc_mask = (1 << 0) | (1 << 1);
7319	} else {
7320		intel_encoder->crtc_mask = (1 << 0) | (1 << 1) | (1 << 2);
7321	}
7322	intel_encoder->cloneable = 0;
7323	intel_encoder->port = port;
7324
7325	intel_dig_port->hpd_pulse = intel_dp_hpd_pulse;
7326
7327	if (port != PORT_A)
7328		intel_infoframe_init(intel_dig_port);
7329
7330	intel_dig_port->aux_ch = intel_bios_port_aux_ch(dev_priv, port);
7331	if (!intel_dp_init_connector(intel_dig_port, intel_connector))
7332		goto err_init_connector;
7333
7334	return true;
7335
7336err_init_connector:
7337	drm_encoder_cleanup(encoder);
7338err_encoder_init:
7339	kfree(intel_connector);
7340err_connector_alloc:
7341	kfree(intel_dig_port);
7342	return false;
7343}
7344
7345void intel_dp_mst_suspend(struct drm_i915_private *dev_priv)
7346{
7347	struct intel_encoder *encoder;
7348
 
 
 
7349	for_each_intel_encoder(&dev_priv->drm, encoder) {
7350		struct intel_dp *intel_dp;
7351
7352		if (encoder->type != INTEL_OUTPUT_DDI)
7353			continue;
7354
7355		intel_dp = enc_to_intel_dp(&encoder->base);
7356
7357		if (!intel_dp->can_mst)
7358			continue;
7359
7360		if (intel_dp->is_mst)
7361			drm_dp_mst_topology_mgr_suspend(&intel_dp->mst_mgr);
7362	}
7363}
7364
7365void intel_dp_mst_resume(struct drm_i915_private *dev_priv)
7366{
7367	struct intel_encoder *encoder;
7368
 
 
 
7369	for_each_intel_encoder(&dev_priv->drm, encoder) {
7370		struct intel_dp *intel_dp;
7371		int ret;
7372
7373		if (encoder->type != INTEL_OUTPUT_DDI)
7374			continue;
7375
7376		intel_dp = enc_to_intel_dp(&encoder->base);
7377
7378		if (!intel_dp->can_mst)
7379			continue;
7380
7381		ret = drm_dp_mst_topology_mgr_resume(&intel_dp->mst_mgr);
 
7382		if (ret) {
7383			intel_dp->is_mst = false;
7384			drm_dp_mst_topology_mgr_set_mst(&intel_dp->mst_mgr,
7385							false);
7386		}
7387	}
7388}