Linux Audio

Check our new training course

Yocto / OpenEmbedded training

Mar 24-27, 2025, special US time zones
Register
Loading...
v5.14.15
  1/*
  2 * Copyright © 2008-2015 Intel Corporation
  3 *
  4 * Permission is hereby granted, free of charge, to any person obtaining a
  5 * copy of this software and associated documentation files (the "Software"),
  6 * to deal in the Software without restriction, including without limitation
  7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
  8 * and/or sell copies of the Software, and to permit persons to whom the
  9 * Software is furnished to do so, subject to the following conditions:
 10 *
 11 * The above copyright notice and this permission notice (including the next
 12 * paragraph) shall be included in all copies or substantial portions of the
 13 * Software.
 14 *
 15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
 18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
 19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
 20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
 21 * IN THE SOFTWARE.
 22 */
 23
 
 24#include "intel_display_types.h"
 25#include "intel_dp.h"
 26#include "intel_dp_link_training.h"
 27
 28static void
 29intel_dp_dump_link_status(struct drm_device *drm,
 30			  const u8 link_status[DP_LINK_STATUS_SIZE])
 31{
 32	drm_dbg_kms(drm,
 33		    "ln0_1:0x%x ln2_3:0x%x align:0x%x sink:0x%x adj_req0_1:0x%x adj_req2_3:0x%x\n",
 34		    link_status[0], link_status[1], link_status[2],
 35		    link_status[3], link_status[4], link_status[5]);
 36}
 
 
 
 
 
 
 
 
 
 
 
 37
 38static void intel_dp_reset_lttpr_common_caps(struct intel_dp *intel_dp)
 39{
 40	memset(intel_dp->lttpr_common_caps, 0, sizeof(intel_dp->lttpr_common_caps));
 41}
 42
 43static void intel_dp_reset_lttpr_count(struct intel_dp *intel_dp)
 44{
 45	intel_dp->lttpr_common_caps[DP_PHY_REPEATER_CNT -
 46				    DP_LT_TUNABLE_PHY_REPEATER_FIELD_DATA_STRUCTURE_REV] = 0;
 47}
 48
 49static const char *intel_dp_phy_name(enum drm_dp_phy dp_phy,
 50				     char *buf, size_t buf_size)
 51{
 52	if (dp_phy == DP_PHY_DPRX)
 53		snprintf(buf, buf_size, "DPRX");
 54	else
 55		snprintf(buf, buf_size, "LTTPR %d", dp_phy - DP_PHY_LTTPR1 + 1);
 56
 57	return buf;
 58}
 59
 60static u8 *intel_dp_lttpr_phy_caps(struct intel_dp *intel_dp,
 61				   enum drm_dp_phy dp_phy)
 62{
 63	return intel_dp->lttpr_phy_caps[dp_phy - DP_PHY_LTTPR1];
 64}
 65
 66static void intel_dp_read_lttpr_phy_caps(struct intel_dp *intel_dp,
 
 67					 enum drm_dp_phy dp_phy)
 68{
 69	u8 *phy_caps = intel_dp_lttpr_phy_caps(intel_dp, dp_phy);
 70	char phy_name[10];
 71
 72	intel_dp_phy_name(dp_phy, phy_name, sizeof(phy_name));
 73
 74	if (drm_dp_read_lttpr_phy_caps(&intel_dp->aux, dp_phy, phy_caps) < 0) {
 75		drm_dbg_kms(&dp_to_i915(intel_dp)->drm,
 76			    "failed to read the PHY caps for %s\n",
 77			    phy_name);
 78		return;
 79	}
 80
 81	drm_dbg_kms(&dp_to_i915(intel_dp)->drm,
 82		    "%s PHY capabilities: %*ph\n",
 83		    phy_name,
 84		    (int)sizeof(intel_dp->lttpr_phy_caps[0]),
 85		    phy_caps);
 86}
 87
 88static bool intel_dp_read_lttpr_common_caps(struct intel_dp *intel_dp)
 
 89{
 90	struct drm_i915_private *i915 = dp_to_i915(intel_dp);
 91
 92	if (intel_dp_is_edp(intel_dp))
 93		return false;
 94
 95	/*
 96	 * Detecting LTTPRs must be avoided on platforms with an AUX timeout
 97	 * period < 3.2ms. (see DP Standard v2.0, 2.11.2, 3.6.6.1).
 98	 */
 99	if (DISPLAY_VER(i915) < 10 || IS_GEMINILAKE(i915))
100		return false;
101
102	if (drm_dp_read_lttpr_common_caps(&intel_dp->aux,
103					  intel_dp->lttpr_common_caps) < 0)
 
104		goto reset_caps;
105
106	drm_dbg_kms(&dp_to_i915(intel_dp)->drm,
107		    "LTTPR common capabilities: %*ph\n",
108		    (int)sizeof(intel_dp->lttpr_common_caps),
109		    intel_dp->lttpr_common_caps);
110
111	/* The minimum value of LT_TUNABLE_PHY_REPEATER_FIELD_DATA_STRUCTURE_REV is 1.4 */
112	if (intel_dp->lttpr_common_caps[0] < 0x14)
113		goto reset_caps;
114
115	return true;
116
117reset_caps:
118	intel_dp_reset_lttpr_common_caps(intel_dp);
119	return false;
120}
121
122static bool
123intel_dp_set_lttpr_transparent_mode(struct intel_dp *intel_dp, bool enable)
124{
125	u8 val = enable ? DP_PHY_REPEATER_MODE_TRANSPARENT :
126			  DP_PHY_REPEATER_MODE_NON_TRANSPARENT;
127
128	return drm_dp_dpcd_write(&intel_dp->aux, DP_PHY_REPEATER_MODE, &val, 1) == 1;
129}
130
131static int intel_dp_init_lttpr(struct intel_dp *intel_dp)
132{
133	int lttpr_count;
134	int i;
135
136	if (!intel_dp_read_lttpr_common_caps(intel_dp))
137		return 0;
138
139	lttpr_count = drm_dp_lttpr_count(intel_dp->lttpr_common_caps);
140	/*
141	 * Prevent setting LTTPR transparent mode explicitly if no LTTPRs are
142	 * detected as this breaks link training at least on the Dell WD19TB
143	 * dock.
144	 */
145	if (lttpr_count == 0)
146		return 0;
147
148	/*
149	 * See DP Standard v2.0 3.6.6.1. about the explicit disabling of
150	 * non-transparent mode and the disable->enable non-transparent mode
151	 * sequence.
152	 */
153	intel_dp_set_lttpr_transparent_mode(intel_dp, true);
154
155	/*
156	 * In case of unsupported number of LTTPRs or failing to switch to
157	 * non-transparent mode fall-back to transparent link training mode,
158	 * still taking into account any LTTPR common lane- rate/count limits.
159	 */
160	if (lttpr_count < 0)
161		return 0;
162
163	if (!intel_dp_set_lttpr_transparent_mode(intel_dp, false)) {
164		drm_dbg_kms(&dp_to_i915(intel_dp)->drm,
165			    "Switching to LTTPR non-transparent LT mode failed, fall-back to transparent mode\n");
166
167		intel_dp_set_lttpr_transparent_mode(intel_dp, true);
168		intel_dp_reset_lttpr_count(intel_dp);
169
170		return 0;
171	}
172
173	for (i = 0; i < lttpr_count; i++)
174		intel_dp_read_lttpr_phy_caps(intel_dp, DP_PHY_LTTPR(i));
175
176	return lttpr_count;
177}
178
179/**
180 * intel_dp_init_lttpr_and_dprx_caps - detect LTTPR and DPRX caps, init the LTTPR link training mode
181 * @intel_dp: Intel DP struct
182 *
183 * Read the LTTPR common and DPRX capabilities and switch to non-transparent
184 * link training mode if any is detected and read the PHY capabilities for all
185 * detected LTTPRs. In case of an LTTPR detection error or if the number of
186 * LTTPRs is more than is supported (8), fall back to the no-LTTPR,
187 * transparent mode link training mode.
188 *
189 * Returns:
190 *   >0  if LTTPRs were detected and the non-transparent LT mode was set. The
191 *       DPRX capabilities are read out.
192 *    0  if no LTTPRs or more than 8 LTTPRs were detected or in case of a
193 *       detection failure and the transparent LT mode was set. The DPRX
194 *       capabilities are read out.
195 *   <0  Reading out the DPRX capabilities failed.
196 */
197int intel_dp_init_lttpr_and_dprx_caps(struct intel_dp *intel_dp)
198{
199	int lttpr_count = intel_dp_init_lttpr(intel_dp);
 
 
 
 
 
 
 
 
 
 
 
 
200
201	/* The DPTX shall read the DPRX caps after LTTPR detection. */
 
 
 
 
 
 
 
 
 
202	if (drm_dp_read_dpcd_caps(&intel_dp->aux, intel_dp->dpcd)) {
203		intel_dp_reset_lttpr_common_caps(intel_dp);
204		return -EIO;
205	}
206
207	return lttpr_count;
208}
209
210static u8 dp_voltage_max(u8 preemph)
211{
212	switch (preemph & DP_TRAIN_PRE_EMPHASIS_MASK) {
213	case DP_TRAIN_PRE_EMPH_LEVEL_0:
214		return DP_TRAIN_VOLTAGE_SWING_LEVEL_3;
215	case DP_TRAIN_PRE_EMPH_LEVEL_1:
216		return DP_TRAIN_VOLTAGE_SWING_LEVEL_2;
217	case DP_TRAIN_PRE_EMPH_LEVEL_2:
218		return DP_TRAIN_VOLTAGE_SWING_LEVEL_1;
219	case DP_TRAIN_PRE_EMPH_LEVEL_3:
220	default:
221		return DP_TRAIN_VOLTAGE_SWING_LEVEL_0;
222	}
223}
224
225static u8 intel_dp_lttpr_voltage_max(struct intel_dp *intel_dp,
226				     enum drm_dp_phy dp_phy)
227{
228	const u8 *phy_caps = intel_dp_lttpr_phy_caps(intel_dp, dp_phy);
229
230	if (drm_dp_lttpr_voltage_swing_level_3_supported(phy_caps))
231		return DP_TRAIN_VOLTAGE_SWING_LEVEL_3;
232	else
233		return DP_TRAIN_VOLTAGE_SWING_LEVEL_2;
234}
235
236static u8 intel_dp_lttpr_preemph_max(struct intel_dp *intel_dp,
237				     enum drm_dp_phy dp_phy)
238{
239	const u8 *phy_caps = intel_dp_lttpr_phy_caps(intel_dp, dp_phy);
240
241	if (drm_dp_lttpr_pre_emphasis_level_3_supported(phy_caps))
242		return DP_TRAIN_PRE_EMPH_LEVEL_3;
243	else
244		return DP_TRAIN_PRE_EMPH_LEVEL_2;
245}
246
247static bool
248intel_dp_phy_is_downstream_of_source(struct intel_dp *intel_dp,
249				     enum drm_dp_phy dp_phy)
250{
251	struct drm_i915_private *i915 = dp_to_i915(intel_dp);
252	int lttpr_count = drm_dp_lttpr_count(intel_dp->lttpr_common_caps);
253
254	drm_WARN_ON_ONCE(&i915->drm, lttpr_count <= 0 && dp_phy != DP_PHY_DPRX);
255
256	return lttpr_count <= 0 || dp_phy == DP_PHY_LTTPR(lttpr_count - 1);
257}
258
259static u8 intel_dp_phy_voltage_max(struct intel_dp *intel_dp,
260				   const struct intel_crtc_state *crtc_state,
261				   enum drm_dp_phy dp_phy)
262{
263	struct drm_i915_private *i915 = dp_to_i915(intel_dp);
264	u8 voltage_max;
265
266	/*
267	 * Get voltage_max from the DPTX_PHY (source or LTTPR) upstream from
268	 * the DPRX_PHY we train.
269	 */
270	if (intel_dp_phy_is_downstream_of_source(intel_dp, dp_phy))
271		voltage_max = intel_dp->voltage_max(intel_dp, crtc_state);
272	else
273		voltage_max = intel_dp_lttpr_voltage_max(intel_dp, dp_phy + 1);
274
275	drm_WARN_ON_ONCE(&i915->drm,
276			 voltage_max != DP_TRAIN_VOLTAGE_SWING_LEVEL_2 &&
277			 voltage_max != DP_TRAIN_VOLTAGE_SWING_LEVEL_3);
278
279	return voltage_max;
280}
281
282static u8 intel_dp_phy_preemph_max(struct intel_dp *intel_dp,
283				   enum drm_dp_phy dp_phy)
284{
285	struct drm_i915_private *i915 = dp_to_i915(intel_dp);
286	u8 preemph_max;
287
288	/*
289	 * Get preemph_max from the DPTX_PHY (source or LTTPR) upstream from
290	 * the DPRX_PHY we train.
291	 */
292	if (intel_dp_phy_is_downstream_of_source(intel_dp, dp_phy))
293		preemph_max = intel_dp->preemph_max(intel_dp);
294	else
295		preemph_max = intel_dp_lttpr_preemph_max(intel_dp, dp_phy + 1);
296
297	drm_WARN_ON_ONCE(&i915->drm,
298			 preemph_max != DP_TRAIN_PRE_EMPH_LEVEL_2 &&
299			 preemph_max != DP_TRAIN_PRE_EMPH_LEVEL_3);
300
301	return preemph_max;
302}
303
304void
305intel_dp_get_adjust_train(struct intel_dp *intel_dp,
306			  const struct intel_crtc_state *crtc_state,
307			  enum drm_dp_phy dp_phy,
308			  const u8 link_status[DP_LINK_STATUS_SIZE])
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
309{
310	u8 v = 0;
311	u8 p = 0;
312	int lane;
313	u8 voltage_max;
314	u8 preemph_max;
315
316	for (lane = 0; lane < crtc_state->lane_count; lane++) {
317		v = max(v, drm_dp_get_adjust_request_voltage(link_status, lane));
318		p = max(p, drm_dp_get_adjust_request_pre_emphasis(link_status, lane));
 
 
 
 
 
 
 
319	}
320
321	preemph_max = intel_dp_phy_preemph_max(intel_dp, dp_phy);
322	if (p >= preemph_max)
323		p = preemph_max | DP_TRAIN_MAX_PRE_EMPHASIS_REACHED;
324
325	v = min(v, dp_voltage_max(p));
326
327	voltage_max = intel_dp_phy_voltage_max(intel_dp, crtc_state, dp_phy);
328	if (v >= voltage_max)
329		v = voltage_max | DP_TRAIN_MAX_SWING_REACHED;
330
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
331	for (lane = 0; lane < 4; lane++)
332		intel_dp->train_set[lane] = v | p;
 
 
333}
334
335static int intel_dp_training_pattern_set_reg(struct intel_dp *intel_dp,
336					     enum drm_dp_phy dp_phy)
337{
338	return dp_phy == DP_PHY_DPRX ?
339		DP_TRAINING_PATTERN_SET :
340		DP_TRAINING_PATTERN_SET_PHY_REPEATER(dp_phy);
341}
342
343static bool
344intel_dp_set_link_train(struct intel_dp *intel_dp,
345			const struct intel_crtc_state *crtc_state,
346			enum drm_dp_phy dp_phy,
347			u8 dp_train_pat)
348{
349	int reg = intel_dp_training_pattern_set_reg(intel_dp, dp_phy);
350	u8 buf[sizeof(intel_dp->train_set) + 1];
351	int len;
352
353	intel_dp_program_link_training_pattern(intel_dp, crtc_state,
354					       dp_train_pat);
355
356	buf[0] = dp_train_pat;
357	/* DP_TRAINING_LANEx_SET follow DP_TRAINING_PATTERN_SET */
358	memcpy(buf + 1, intel_dp->train_set, crtc_state->lane_count);
359	len = crtc_state->lane_count + 1;
360
361	return drm_dp_dpcd_write(&intel_dp->aux, reg, buf, len) == len;
362}
363
364static char dp_training_pattern_name(u8 train_pat)
365{
366	switch (train_pat) {
367	case DP_TRAINING_PATTERN_1:
368	case DP_TRAINING_PATTERN_2:
369	case DP_TRAINING_PATTERN_3:
370		return '0' + train_pat;
371	case DP_TRAINING_PATTERN_4:
372		return '4';
373	default:
374		MISSING_CASE(train_pat);
375		return '?';
376	}
377}
378
379void
380intel_dp_program_link_training_pattern(struct intel_dp *intel_dp,
381				       const struct intel_crtc_state *crtc_state,
 
382				       u8 dp_train_pat)
383{
384	struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base;
385	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
386	u8 train_pat = intel_dp_training_pattern_symbol(dp_train_pat);
387
388	if (train_pat != DP_TRAINING_PATTERN_DISABLE)
389		drm_dbg_kms(&dev_priv->drm,
390			    "[ENCODER:%d:%s] Using DP training pattern TPS%c\n",
391			    encoder->base.base.id, encoder->base.name,
392			    dp_training_pattern_name(train_pat));
393
394	intel_dp->set_link_train(intel_dp, crtc_state, dp_train_pat);
395}
396
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
397void intel_dp_set_signal_levels(struct intel_dp *intel_dp,
398				const struct intel_crtc_state *crtc_state,
399				enum drm_dp_phy dp_phy)
400{
401	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
402	u8 train_set = intel_dp->train_set[0];
403	char phy_name[10];
404
405	drm_dbg_kms(&dev_priv->drm, "Using vswing level %d%s, pre-emphasis level %d%s, at %s\n",
406		    train_set & DP_TRAIN_VOLTAGE_SWING_MASK,
407		    train_set & DP_TRAIN_MAX_SWING_REACHED ? " (max)" : "",
408		    (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) >>
409		    DP_TRAIN_PRE_EMPHASIS_SHIFT,
410		    train_set & DP_TRAIN_MAX_PRE_EMPHASIS_REACHED ?
411		    " (max)" : "",
412		    intel_dp_phy_name(dp_phy, phy_name, sizeof(phy_name)));
 
 
 
 
 
413
414	if (intel_dp_phy_is_downstream_of_source(intel_dp, dp_phy))
415		intel_dp->set_signal_levels(intel_dp, crtc_state);
416}
417
418static bool
419intel_dp_reset_link_train(struct intel_dp *intel_dp,
420			  const struct intel_crtc_state *crtc_state,
421			  enum drm_dp_phy dp_phy,
422			  u8 dp_train_pat)
423{
424	memset(intel_dp->train_set, 0, sizeof(intel_dp->train_set));
425	intel_dp_set_signal_levels(intel_dp, crtc_state, dp_phy);
426	return intel_dp_set_link_train(intel_dp, crtc_state, dp_phy, dp_train_pat);
427}
428
429static bool
430intel_dp_update_link_train(struct intel_dp *intel_dp,
431			   const struct intel_crtc_state *crtc_state,
432			   enum drm_dp_phy dp_phy)
433{
434	int reg = dp_phy == DP_PHY_DPRX ?
435			    DP_TRAINING_LANE0_SET :
436			    DP_TRAINING_LANE0_SET_PHY_REPEATER(dp_phy);
437	int ret;
438
439	intel_dp_set_signal_levels(intel_dp, crtc_state, dp_phy);
440
441	ret = drm_dp_dpcd_write(&intel_dp->aux, reg,
442				intel_dp->train_set, crtc_state->lane_count);
443
444	return ret == crtc_state->lane_count;
445}
446
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
447static bool intel_dp_link_max_vswing_reached(struct intel_dp *intel_dp,
448					     const struct intel_crtc_state *crtc_state)
449{
450	int lane;
451
452	for (lane = 0; lane < crtc_state->lane_count; lane++)
453		if ((intel_dp->train_set[lane] &
454		     DP_TRAIN_MAX_SWING_REACHED) == 0)
455			return false;
 
 
 
 
 
 
 
456
457	return true;
458}
459
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
460/*
461 * Prepare link training by configuring the link parameters. On DDI platforms
462 * also enable the port here.
463 */
464static bool
465intel_dp_prepare_link_train(struct intel_dp *intel_dp,
466			    const struct intel_crtc_state *crtc_state)
467{
468	struct drm_i915_private *i915 = dp_to_i915(intel_dp);
469	u8 link_config[2];
470	u8 link_bw, rate_select;
471
472	if (intel_dp->prepare_link_retrain)
473		intel_dp->prepare_link_retrain(intel_dp, crtc_state);
474
475	intel_dp_compute_rate(intel_dp, crtc_state->port_clock,
476			      &link_bw, &rate_select);
477
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
478	if (link_bw)
479		drm_dbg_kms(&i915->drm,
480			    "Using LINK_BW_SET value %02x\n", link_bw);
481	else
482		drm_dbg_kms(&i915->drm,
483			    "Using LINK_RATE_SET value %02x\n", rate_select);
 
 
 
 
 
 
 
 
484
485	/* Write the link configuration data */
486	link_config[0] = link_bw;
487	link_config[1] = crtc_state->lane_count;
488	if (drm_dp_enhanced_frame_cap(intel_dp->dpcd))
489		link_config[1] |= DP_LANE_COUNT_ENHANCED_FRAME_EN;
490	drm_dp_dpcd_write(&intel_dp->aux, DP_LINK_BW_SET, link_config, 2);
491
492	/* eDP 1.4 rate select method. */
493	if (!link_bw)
494		drm_dp_dpcd_write(&intel_dp->aux, DP_LINK_RATE_SET,
495				  &rate_select, 1);
496
497	link_config[0] = crtc_state->vrr.enable ? DP_MSA_TIMING_PAR_IGNORE_EN : 0;
498	link_config[1] = DP_SET_ANSI_8B10B;
499	drm_dp_dpcd_write(&intel_dp->aux, DP_DOWNSPREAD_CTRL, link_config, 2);
 
 
500
501	intel_dp->DP |= DP_PORT_EN;
 
502
503	return true;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
504}
505
506static void intel_dp_link_training_clock_recovery_delay(struct intel_dp *intel_dp,
507							enum drm_dp_phy dp_phy)
 
508{
509	if (dp_phy == DP_PHY_DPRX)
510		drm_dp_link_train_clock_recovery_delay(&intel_dp->aux, intel_dp->dpcd);
511	else
512		drm_dp_lttpr_link_train_clock_recovery_delay();
513}
514
515/*
516 * Perform the link training clock recovery phase on the given DP PHY using
517 * training pattern 1.
518 */
519static bool
520intel_dp_link_training_clock_recovery(struct intel_dp *intel_dp,
521				      const struct intel_crtc_state *crtc_state,
522				      enum drm_dp_phy dp_phy)
523{
524	struct drm_i915_private *i915 = dp_to_i915(intel_dp);
525	u8 voltage;
526	int voltage_tries, cr_tries, max_cr_tries;
 
527	bool max_vswing_reached = false;
 
 
 
 
 
528
529	/* clock recovery */
530	if (!intel_dp_reset_link_train(intel_dp, crtc_state, dp_phy,
531				       DP_TRAINING_PATTERN_1 |
532				       DP_LINK_SCRAMBLING_DISABLE)) {
533		drm_err(&i915->drm, "failed to enable link training\n");
534		return false;
535	}
536
537	/*
538	 * The DP 1.4 spec defines the max clock recovery retries value
539	 * as 10 but for pre-DP 1.4 devices we set a very tolerant
540	 * retry limit of 80 (4 voltage levels x 4 preemphasis levels x
541	 * x 5 identical voltage retries). Since the previous specs didn't
542	 * define a limit and created the possibility of an infinite loop
543	 * we want to prevent any sync from triggering that corner case.
544	 */
545	if (intel_dp->dpcd[DP_DPCD_REV] >= DP_DPCD_REV_14)
546		max_cr_tries = 10;
547	else
548		max_cr_tries = 80;
549
550	voltage_tries = 1;
551	for (cr_tries = 0; cr_tries < max_cr_tries; ++cr_tries) {
552		u8 link_status[DP_LINK_STATUS_SIZE];
553
554		intel_dp_link_training_clock_recovery_delay(intel_dp, dp_phy);
555
556		if (drm_dp_dpcd_read_phy_link_status(&intel_dp->aux, dp_phy,
557						     link_status) < 0) {
558			drm_err(&i915->drm, "failed to get link status\n");
559			return false;
560		}
561
562		if (drm_dp_clock_recovery_ok(link_status, crtc_state->lane_count)) {
563			drm_dbg_kms(&i915->drm, "clock recovery OK\n");
564			return true;
565		}
566
567		if (voltage_tries == 5) {
568			drm_dbg_kms(&i915->drm,
569				    "Same voltage tried 5 times\n");
570			return false;
571		}
572
573		if (max_vswing_reached) {
574			drm_dbg_kms(&i915->drm, "Max Voltage Swing reached\n");
 
575			return false;
576		}
577
578		voltage = intel_dp->train_set[0] & DP_TRAIN_VOLTAGE_SWING_MASK;
579
580		/* Update training set as requested by target */
581		intel_dp_get_adjust_train(intel_dp, crtc_state, dp_phy,
582					  link_status);
583		if (!intel_dp_update_link_train(intel_dp, crtc_state, dp_phy)) {
584			drm_err(&i915->drm,
585				"failed to update link training\n");
586			return false;
587		}
588
589		if ((intel_dp->train_set[0] & DP_TRAIN_VOLTAGE_SWING_MASK) ==
590		    voltage)
591			++voltage_tries;
592		else
593			voltage_tries = 1;
594
 
 
595		if (intel_dp_link_max_vswing_reached(intel_dp, crtc_state))
596			max_vswing_reached = true;
597
598	}
599	drm_err(&i915->drm,
600		"Failed clock recovery %d times, giving up!\n", max_cr_tries);
 
 
 
601	return false;
602}
603
604/*
605 * Pick training pattern for channel equalization. Training pattern 4 for HBR3
606 * or for 1.4 devices that support it, training Pattern 3 for HBR2
607 * or 1.2 devices that support it, Training Pattern 2 otherwise.
608 */
609static u32 intel_dp_training_pattern(struct intel_dp *intel_dp,
610				     const struct intel_crtc_state *crtc_state,
611				     enum drm_dp_phy dp_phy)
612{
 
613	bool source_tps3, sink_tps3, source_tps4, sink_tps4;
614
 
 
 
 
615	/*
616	 * Intel platforms that support HBR3 also support TPS4. It is mandatory
617	 * for all downstream devices that support HBR3. There are no known eDP
618	 * panels that support TPS4 as of Feb 2018 as per VESA eDP_v1.4b_E1
619	 * specification.
620	 * LTTPRs must support TPS4.
621	 */
622	source_tps4 = intel_dp_source_supports_hbr3(intel_dp);
623	sink_tps4 = dp_phy != DP_PHY_DPRX ||
624		    drm_dp_tps4_supported(intel_dp->dpcd);
625	if (source_tps4 && sink_tps4) {
626		return DP_TRAINING_PATTERN_4;
627	} else if (crtc_state->port_clock == 810000) {
628		if (!source_tps4)
629			drm_dbg_kms(&dp_to_i915(intel_dp)->drm,
630				    "8.1 Gbps link rate without source HBR3/TPS4 support\n");
631		if (!sink_tps4)
632			drm_dbg_kms(&dp_to_i915(intel_dp)->drm,
633				    "8.1 Gbps link rate without sink TPS4 support\n");
634	}
 
635	/*
636	 * Intel platforms that support HBR2 also support TPS3. TPS3 support is
637	 * also mandatory for downstream devices that support HBR2. However, not
638	 * all sinks follow the spec.
639	 */
640	source_tps3 = intel_dp_source_supports_hbr2(intel_dp);
641	sink_tps3 = dp_phy != DP_PHY_DPRX ||
642		    drm_dp_tps3_supported(intel_dp->dpcd);
643	if (source_tps3 && sink_tps3) {
644		return  DP_TRAINING_PATTERN_3;
645	} else if (crtc_state->port_clock >= 540000) {
646		if (!source_tps3)
647			drm_dbg_kms(&dp_to_i915(intel_dp)->drm,
648				    ">=5.4/6.48 Gbps link rate without source HBR2/TPS3 support\n");
649		if (!sink_tps3)
650			drm_dbg_kms(&dp_to_i915(intel_dp)->drm,
651				    ">=5.4/6.48 Gbps link rate without sink TPS3 support\n");
652	}
653
654	return DP_TRAINING_PATTERN_2;
655}
656
657static void
658intel_dp_link_training_channel_equalization_delay(struct intel_dp *intel_dp,
659						  enum drm_dp_phy dp_phy)
660{
661	if (dp_phy == DP_PHY_DPRX) {
662		drm_dp_link_train_channel_eq_delay(&intel_dp->aux, intel_dp->dpcd);
663	} else {
664		const u8 *phy_caps = intel_dp_lttpr_phy_caps(intel_dp, dp_phy);
665
666		drm_dp_lttpr_link_train_channel_eq_delay(&intel_dp->aux, phy_caps);
667	}
668}
669
670/*
671 * Perform the link training channel equalization phase on the given DP PHY
672 * using one of training pattern 2, 3 or 4 depending on the source and
673 * sink capabilities.
674 */
675static bool
676intel_dp_link_training_channel_equalization(struct intel_dp *intel_dp,
677					    const struct intel_crtc_state *crtc_state,
678					    enum drm_dp_phy dp_phy)
679{
680	struct drm_i915_private *i915 = dp_to_i915(intel_dp);
681	int tries;
682	u32 training_pattern;
683	u8 link_status[DP_LINK_STATUS_SIZE];
684	bool channel_eq = false;
 
 
 
 
 
685
686	training_pattern = intel_dp_training_pattern(intel_dp, crtc_state, dp_phy);
687	/* Scrambling is disabled for TPS2/3 and enabled for TPS4 */
688	if (training_pattern != DP_TRAINING_PATTERN_4)
689		training_pattern |= DP_LINK_SCRAMBLING_DISABLE;
690
691	/* channel equalization */
692	if (!intel_dp_set_link_train(intel_dp, crtc_state, dp_phy,
693				     training_pattern)) {
694		drm_err(&i915->drm, "failed to start channel equalization\n");
695		return false;
696	}
697
698	for (tries = 0; tries < 5; tries++) {
699		intel_dp_link_training_channel_equalization_delay(intel_dp,
700								  dp_phy);
701		if (drm_dp_dpcd_read_phy_link_status(&intel_dp->aux, dp_phy,
702						     link_status) < 0) {
703			drm_err(&i915->drm,
704				"failed to get link status\n");
705			break;
706		}
707
708		/* Make sure clock is still ok */
709		if (!drm_dp_clock_recovery_ok(link_status,
710					      crtc_state->lane_count)) {
711			intel_dp_dump_link_status(&i915->drm, link_status);
712			drm_dbg_kms(&i915->drm,
713				    "Clock recovery check failed, cannot "
714				    "continue channel equalization\n");
715			break;
716		}
717
718		if (drm_dp_channel_eq_ok(link_status,
719					 crtc_state->lane_count)) {
720			channel_eq = true;
721			drm_dbg_kms(&i915->drm, "Channel EQ done. DP Training "
722				    "successful\n");
723			break;
724		}
725
726		/* Update training set as requested by target */
727		intel_dp_get_adjust_train(intel_dp, crtc_state, dp_phy,
728					  link_status);
729		if (!intel_dp_update_link_train(intel_dp, crtc_state, dp_phy)) {
730			drm_err(&i915->drm,
731				"failed to update link training\n");
732			break;
733		}
734	}
735
736	/* Try 5 times, else fail and try at lower BW */
737	if (tries == 5) {
738		intel_dp_dump_link_status(&i915->drm, link_status);
739		drm_dbg_kms(&i915->drm,
740			    "Channel equalization failed 5 times\n");
741	}
742
743	return channel_eq;
744}
745
746static bool intel_dp_disable_dpcd_training_pattern(struct intel_dp *intel_dp,
747						   enum drm_dp_phy dp_phy)
748{
749	int reg = intel_dp_training_pattern_set_reg(intel_dp, dp_phy);
750	u8 val = DP_TRAINING_PATTERN_DISABLE;
751
752	return drm_dp_dpcd_write(&intel_dp->aux, reg, &val, 1) == 1;
753}
754
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
755/**
756 * intel_dp_stop_link_train - stop link training
757 * @intel_dp: DP struct
758 * @crtc_state: state for CRTC attached to the encoder
759 *
760 * Stop the link training of the @intel_dp port, disabling the training
761 * pattern in the sink's DPCD, and disabling the test pattern symbol
762 * generation on the port.
763 *
764 * What symbols are output on the port after this point is
765 * platform specific: On DDI/VLV/CHV platforms it will be the idle pattern
766 * with the pipe being disabled, on older platforms it's HW specific if/how an
767 * idle pattern is generated, as the pipe is already enabled here for those.
768 *
769 * This function must be called after intel_dp_start_link_train().
770 */
771void intel_dp_stop_link_train(struct intel_dp *intel_dp,
772			      const struct intel_crtc_state *crtc_state)
773{
774	intel_dp->link_trained = true;
775
776	intel_dp_disable_dpcd_training_pattern(intel_dp, DP_PHY_DPRX);
777	intel_dp_program_link_training_pattern(intel_dp, crtc_state,
778					       DP_TRAINING_PATTERN_DISABLE);
 
 
 
 
 
779}
780
781static bool
782intel_dp_link_train_phy(struct intel_dp *intel_dp,
783			const struct intel_crtc_state *crtc_state,
784			enum drm_dp_phy dp_phy)
785{
786	struct intel_connector *intel_connector = intel_dp->attached_connector;
787	char phy_name[10];
788	bool ret = false;
789
790	if (!intel_dp_link_training_clock_recovery(intel_dp, crtc_state, dp_phy))
791		goto out;
792
793	if (!intel_dp_link_training_channel_equalization(intel_dp, crtc_state, dp_phy))
794		goto out;
795
796	ret = true;
797
798out:
799	drm_dbg_kms(&dp_to_i915(intel_dp)->drm,
800		    "[CONNECTOR:%d:%s] Link Training %s at link rate = %d, lane count = %d, at %s\n",
801		    intel_connector->base.base.id,
802		    intel_connector->base.name,
803		    ret ? "passed" : "failed",
804		    crtc_state->port_clock, crtc_state->lane_count,
805		    intel_dp_phy_name(dp_phy, phy_name, sizeof(phy_name)));
806
807	return ret;
808}
809
810static void intel_dp_schedule_fallback_link_training(struct intel_dp *intel_dp,
811						     const struct intel_crtc_state *crtc_state)
812{
813	struct intel_connector *intel_connector = intel_dp->attached_connector;
 
 
 
 
 
 
814
815	if (intel_dp->hobl_active) {
816		drm_dbg_kms(&dp_to_i915(intel_dp)->drm,
817			    "Link Training failed with HOBL active, not enabling it from now on");
818		intel_dp->hobl_failed = true;
819	} else if (intel_dp_get_link_train_fallback_values(intel_dp,
820							   crtc_state->port_clock,
821							   crtc_state->lane_count)) {
822		return;
823	}
824
825	/* Schedule a Hotplug Uevent to userspace to start modeset */
826	schedule_work(&intel_connector->modeset_retry_work);
827}
828
829/* Perform the link training on all LTTPRs and the DPRX on a link. */
830static bool
831intel_dp_link_train_all_phys(struct intel_dp *intel_dp,
832			     const struct intel_crtc_state *crtc_state,
833			     int lttpr_count)
834{
835	bool ret = true;
836	int i;
837
838	intel_dp_prepare_link_train(intel_dp, crtc_state);
839
840	for (i = lttpr_count - 1; i >= 0; i--) {
841		enum drm_dp_phy dp_phy = DP_PHY_LTTPR(i);
842
843		ret = intel_dp_link_train_phy(intel_dp, crtc_state, dp_phy);
844		intel_dp_disable_dpcd_training_pattern(intel_dp, dp_phy);
845
846		if (!ret)
847			break;
848	}
849
850	if (ret)
851		ret = intel_dp_link_train_phy(intel_dp, crtc_state, DP_PHY_DPRX);
852
853	if (intel_dp->set_idle_link_train)
854		intel_dp->set_idle_link_train(intel_dp, crtc_state);
855
856	return ret;
857}
858
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
859/**
860 * intel_dp_start_link_train - start link training
861 * @intel_dp: DP struct
862 * @crtc_state: state for CRTC attached to the encoder
863 *
864 * Start the link training of the @intel_dp port, scheduling a fallback
865 * retraining with reduced link rate/lane parameters if the link training
866 * fails.
867 * After calling this function intel_dp_stop_link_train() must be called.
868 */
869void intel_dp_start_link_train(struct intel_dp *intel_dp,
870			       const struct intel_crtc_state *crtc_state)
871{
 
 
 
872	/*
873	 * TODO: Reiniting LTTPRs here won't be needed once proper connector
874	 * HW state readout is added.
875	 */
876	int lttpr_count = intel_dp_init_lttpr_and_dprx_caps(intel_dp);
877
878	if (lttpr_count < 0)
879		/* Still continue with enabling the port and link training. */
880		lttpr_count = 0;
881
882	if (!intel_dp_link_train_all_phys(intel_dp, crtc_state, lttpr_count))
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
883		intel_dp_schedule_fallback_link_training(intel_dp, crtc_state);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
884}
v6.8
   1/*
   2 * Copyright © 2008-2015 Intel Corporation
   3 *
   4 * Permission is hereby granted, free of charge, to any person obtaining a
   5 * copy of this software and associated documentation files (the "Software"),
   6 * to deal in the Software without restriction, including without limitation
   7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
   8 * and/or sell copies of the Software, and to permit persons to whom the
   9 * Software is furnished to do so, subject to the following conditions:
  10 *
  11 * The above copyright notice and this permission notice (including the next
  12 * paragraph) shall be included in all copies or substantial portions of the
  13 * Software.
  14 *
  15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
  18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
  19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
  20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
  21 * IN THE SOFTWARE.
  22 */
  23
  24#include "i915_drv.h"
  25#include "intel_display_types.h"
  26#include "intel_dp.h"
  27#include "intel_dp_link_training.h"
  28
  29#define LT_MSG_PREFIX			"[CONNECTOR:%d:%s][ENCODER:%d:%s][%s] "
  30#define LT_MSG_ARGS(_intel_dp, _dp_phy)	(_intel_dp)->attached_connector->base.base.id, \
  31					(_intel_dp)->attached_connector->base.name, \
  32					dp_to_dig_port(_intel_dp)->base.base.base.id, \
  33					dp_to_dig_port(_intel_dp)->base.base.name, \
  34					drm_dp_phy_name(_dp_phy)
  35
  36#define lt_dbg(_intel_dp, _dp_phy, _format, ...) \
  37	drm_dbg_kms(&dp_to_i915(_intel_dp)->drm, \
  38		    LT_MSG_PREFIX _format, \
  39		    LT_MSG_ARGS(_intel_dp, _dp_phy), ## __VA_ARGS__)
  40
  41#define lt_err(_intel_dp, _dp_phy, _format, ...) do { \
  42	if (intel_digital_port_connected(&dp_to_dig_port(_intel_dp)->base)) \
  43		drm_err(&dp_to_i915(_intel_dp)->drm, \
  44			LT_MSG_PREFIX _format, \
  45			LT_MSG_ARGS(_intel_dp, _dp_phy), ## __VA_ARGS__); \
  46	else \
  47		lt_dbg(_intel_dp, _dp_phy, "Sink disconnected: " _format, ## __VA_ARGS__); \
  48} while (0)
  49
  50static void intel_dp_reset_lttpr_common_caps(struct intel_dp *intel_dp)
  51{
  52	memset(intel_dp->lttpr_common_caps, 0, sizeof(intel_dp->lttpr_common_caps));
  53}
  54
  55static void intel_dp_reset_lttpr_count(struct intel_dp *intel_dp)
  56{
  57	intel_dp->lttpr_common_caps[DP_PHY_REPEATER_CNT -
  58				    DP_LT_TUNABLE_PHY_REPEATER_FIELD_DATA_STRUCTURE_REV] = 0;
  59}
  60
 
 
 
 
 
 
 
 
 
 
 
  61static u8 *intel_dp_lttpr_phy_caps(struct intel_dp *intel_dp,
  62				   enum drm_dp_phy dp_phy)
  63{
  64	return intel_dp->lttpr_phy_caps[dp_phy - DP_PHY_LTTPR1];
  65}
  66
  67static void intel_dp_read_lttpr_phy_caps(struct intel_dp *intel_dp,
  68					 const u8 dpcd[DP_RECEIVER_CAP_SIZE],
  69					 enum drm_dp_phy dp_phy)
  70{
  71	u8 *phy_caps = intel_dp_lttpr_phy_caps(intel_dp, dp_phy);
 
 
 
  72
  73	if (drm_dp_read_lttpr_phy_caps(&intel_dp->aux, dpcd, dp_phy, phy_caps) < 0) {
  74		lt_dbg(intel_dp, dp_phy, "failed to read the PHY caps\n");
 
 
  75		return;
  76	}
  77
  78	lt_dbg(intel_dp, dp_phy, "PHY capabilities: %*ph\n",
  79	       (int)sizeof(intel_dp->lttpr_phy_caps[0]),
  80	       phy_caps);
 
 
  81}
  82
  83static bool intel_dp_read_lttpr_common_caps(struct intel_dp *intel_dp,
  84					    const u8 dpcd[DP_RECEIVER_CAP_SIZE])
  85{
  86	int ret;
 
 
 
 
 
 
 
 
 
 
  87
  88	ret = drm_dp_read_lttpr_common_caps(&intel_dp->aux, dpcd,
  89					    intel_dp->lttpr_common_caps);
  90	if (ret < 0)
  91		goto reset_caps;
  92
  93	lt_dbg(intel_dp, DP_PHY_DPRX, "LTTPR common capabilities: %*ph\n",
  94	       (int)sizeof(intel_dp->lttpr_common_caps),
  95	       intel_dp->lttpr_common_caps);
 
  96
  97	/* The minimum value of LT_TUNABLE_PHY_REPEATER_FIELD_DATA_STRUCTURE_REV is 1.4 */
  98	if (intel_dp->lttpr_common_caps[0] < 0x14)
  99		goto reset_caps;
 100
 101	return true;
 102
 103reset_caps:
 104	intel_dp_reset_lttpr_common_caps(intel_dp);
 105	return false;
 106}
 107
 108static bool
 109intel_dp_set_lttpr_transparent_mode(struct intel_dp *intel_dp, bool enable)
 110{
 111	u8 val = enable ? DP_PHY_REPEATER_MODE_TRANSPARENT :
 112			  DP_PHY_REPEATER_MODE_NON_TRANSPARENT;
 113
 114	return drm_dp_dpcd_write(&intel_dp->aux, DP_PHY_REPEATER_MODE, &val, 1) == 1;
 115}
 116
 117static int intel_dp_init_lttpr(struct intel_dp *intel_dp, const u8 dpcd[DP_RECEIVER_CAP_SIZE])
 118{
 119	int lttpr_count;
 120	int i;
 121
 122	if (!intel_dp_read_lttpr_common_caps(intel_dp, dpcd))
 123		return 0;
 124
 125	lttpr_count = drm_dp_lttpr_count(intel_dp->lttpr_common_caps);
 126	/*
 127	 * Prevent setting LTTPR transparent mode explicitly if no LTTPRs are
 128	 * detected as this breaks link training at least on the Dell WD19TB
 129	 * dock.
 130	 */
 131	if (lttpr_count == 0)
 132		return 0;
 133
 134	/*
 135	 * See DP Standard v2.0 3.6.6.1. about the explicit disabling of
 136	 * non-transparent mode and the disable->enable non-transparent mode
 137	 * sequence.
 138	 */
 139	intel_dp_set_lttpr_transparent_mode(intel_dp, true);
 140
 141	/*
 142	 * In case of unsupported number of LTTPRs or failing to switch to
 143	 * non-transparent mode fall-back to transparent link training mode,
 144	 * still taking into account any LTTPR common lane- rate/count limits.
 145	 */
 146	if (lttpr_count < 0)
 147		return 0;
 148
 149	if (!intel_dp_set_lttpr_transparent_mode(intel_dp, false)) {
 150		lt_dbg(intel_dp, DP_PHY_DPRX,
 151		       "Switching to LTTPR non-transparent LT mode failed, fall-back to transparent mode\n");
 152
 153		intel_dp_set_lttpr_transparent_mode(intel_dp, true);
 154		intel_dp_reset_lttpr_count(intel_dp);
 155
 156		return 0;
 157	}
 158
 159	for (i = 0; i < lttpr_count; i++)
 160		intel_dp_read_lttpr_phy_caps(intel_dp, dpcd, DP_PHY_LTTPR(i));
 161
 162	return lttpr_count;
 163}
 164
 165/**
 166 * intel_dp_init_lttpr_and_dprx_caps - detect LTTPR and DPRX caps, init the LTTPR link training mode
 167 * @intel_dp: Intel DP struct
 168 *
 169 * Read the LTTPR common and DPRX capabilities and switch to non-transparent
 170 * link training mode if any is detected and read the PHY capabilities for all
 171 * detected LTTPRs. In case of an LTTPR detection error or if the number of
 172 * LTTPRs is more than is supported (8), fall back to the no-LTTPR,
 173 * transparent mode link training mode.
 174 *
 175 * Returns:
 176 *   >0  if LTTPRs were detected and the non-transparent LT mode was set. The
 177 *       DPRX capabilities are read out.
 178 *    0  if no LTTPRs or more than 8 LTTPRs were detected or in case of a
 179 *       detection failure and the transparent LT mode was set. The DPRX
 180 *       capabilities are read out.
 181 *   <0  Reading out the DPRX capabilities failed.
 182 */
 183int intel_dp_init_lttpr_and_dprx_caps(struct intel_dp *intel_dp)
 184{
 185	struct drm_i915_private *i915 = dp_to_i915(intel_dp);
 186	int lttpr_count = 0;
 187
 188	/*
 189	 * Detecting LTTPRs must be avoided on platforms with an AUX timeout
 190	 * period < 3.2ms. (see DP Standard v2.0, 2.11.2, 3.6.6.1).
 191	 */
 192	if (!intel_dp_is_edp(intel_dp) &&
 193	    (DISPLAY_VER(i915) >= 10 && !IS_GEMINILAKE(i915))) {
 194		u8 dpcd[DP_RECEIVER_CAP_SIZE];
 195
 196		if (drm_dp_dpcd_probe(&intel_dp->aux, DP_LT_TUNABLE_PHY_REPEATER_FIELD_DATA_STRUCTURE_REV))
 197			return -EIO;
 198
 199		if (drm_dp_read_dpcd_caps(&intel_dp->aux, dpcd))
 200			return -EIO;
 201
 202		lttpr_count = intel_dp_init_lttpr(intel_dp, dpcd);
 203	}
 204
 205	/*
 206	 * The DPTX shall read the DPRX caps after LTTPR detection, so re-read
 207	 * it here.
 208	 */
 209	if (drm_dp_read_dpcd_caps(&intel_dp->aux, intel_dp->dpcd)) {
 210		intel_dp_reset_lttpr_common_caps(intel_dp);
 211		return -EIO;
 212	}
 213
 214	return lttpr_count;
 215}
 216
 217static u8 dp_voltage_max(u8 preemph)
 218{
 219	switch (preemph & DP_TRAIN_PRE_EMPHASIS_MASK) {
 220	case DP_TRAIN_PRE_EMPH_LEVEL_0:
 221		return DP_TRAIN_VOLTAGE_SWING_LEVEL_3;
 222	case DP_TRAIN_PRE_EMPH_LEVEL_1:
 223		return DP_TRAIN_VOLTAGE_SWING_LEVEL_2;
 224	case DP_TRAIN_PRE_EMPH_LEVEL_2:
 225		return DP_TRAIN_VOLTAGE_SWING_LEVEL_1;
 226	case DP_TRAIN_PRE_EMPH_LEVEL_3:
 227	default:
 228		return DP_TRAIN_VOLTAGE_SWING_LEVEL_0;
 229	}
 230}
 231
 232static u8 intel_dp_lttpr_voltage_max(struct intel_dp *intel_dp,
 233				     enum drm_dp_phy dp_phy)
 234{
 235	const u8 *phy_caps = intel_dp_lttpr_phy_caps(intel_dp, dp_phy);
 236
 237	if (drm_dp_lttpr_voltage_swing_level_3_supported(phy_caps))
 238		return DP_TRAIN_VOLTAGE_SWING_LEVEL_3;
 239	else
 240		return DP_TRAIN_VOLTAGE_SWING_LEVEL_2;
 241}
 242
 243static u8 intel_dp_lttpr_preemph_max(struct intel_dp *intel_dp,
 244				     enum drm_dp_phy dp_phy)
 245{
 246	const u8 *phy_caps = intel_dp_lttpr_phy_caps(intel_dp, dp_phy);
 247
 248	if (drm_dp_lttpr_pre_emphasis_level_3_supported(phy_caps))
 249		return DP_TRAIN_PRE_EMPH_LEVEL_3;
 250	else
 251		return DP_TRAIN_PRE_EMPH_LEVEL_2;
 252}
 253
 254static bool
 255intel_dp_phy_is_downstream_of_source(struct intel_dp *intel_dp,
 256				     enum drm_dp_phy dp_phy)
 257{
 258	struct drm_i915_private *i915 = dp_to_i915(intel_dp);
 259	int lttpr_count = drm_dp_lttpr_count(intel_dp->lttpr_common_caps);
 260
 261	drm_WARN_ON_ONCE(&i915->drm, lttpr_count <= 0 && dp_phy != DP_PHY_DPRX);
 262
 263	return lttpr_count <= 0 || dp_phy == DP_PHY_LTTPR(lttpr_count - 1);
 264}
 265
 266static u8 intel_dp_phy_voltage_max(struct intel_dp *intel_dp,
 267				   const struct intel_crtc_state *crtc_state,
 268				   enum drm_dp_phy dp_phy)
 269{
 270	struct drm_i915_private *i915 = dp_to_i915(intel_dp);
 271	u8 voltage_max;
 272
 273	/*
 274	 * Get voltage_max from the DPTX_PHY (source or LTTPR) upstream from
 275	 * the DPRX_PHY we train.
 276	 */
 277	if (intel_dp_phy_is_downstream_of_source(intel_dp, dp_phy))
 278		voltage_max = intel_dp->voltage_max(intel_dp, crtc_state);
 279	else
 280		voltage_max = intel_dp_lttpr_voltage_max(intel_dp, dp_phy + 1);
 281
 282	drm_WARN_ON_ONCE(&i915->drm,
 283			 voltage_max != DP_TRAIN_VOLTAGE_SWING_LEVEL_2 &&
 284			 voltage_max != DP_TRAIN_VOLTAGE_SWING_LEVEL_3);
 285
 286	return voltage_max;
 287}
 288
 289static u8 intel_dp_phy_preemph_max(struct intel_dp *intel_dp,
 290				   enum drm_dp_phy dp_phy)
 291{
 292	struct drm_i915_private *i915 = dp_to_i915(intel_dp);
 293	u8 preemph_max;
 294
 295	/*
 296	 * Get preemph_max from the DPTX_PHY (source or LTTPR) upstream from
 297	 * the DPRX_PHY we train.
 298	 */
 299	if (intel_dp_phy_is_downstream_of_source(intel_dp, dp_phy))
 300		preemph_max = intel_dp->preemph_max(intel_dp);
 301	else
 302		preemph_max = intel_dp_lttpr_preemph_max(intel_dp, dp_phy + 1);
 303
 304	drm_WARN_ON_ONCE(&i915->drm,
 305			 preemph_max != DP_TRAIN_PRE_EMPH_LEVEL_2 &&
 306			 preemph_max != DP_TRAIN_PRE_EMPH_LEVEL_3);
 307
 308	return preemph_max;
 309}
 310
 311static bool has_per_lane_signal_levels(struct intel_dp *intel_dp,
 312				       enum drm_dp_phy dp_phy)
 313{
 314	struct drm_i915_private *i915 = dp_to_i915(intel_dp);
 315
 316	return !intel_dp_phy_is_downstream_of_source(intel_dp, dp_phy) ||
 317		DISPLAY_VER(i915) >= 11;
 318}
 319
 320/* 128b/132b */
 321static u8 intel_dp_get_lane_adjust_tx_ffe_preset(struct intel_dp *intel_dp,
 322						 const struct intel_crtc_state *crtc_state,
 323						 enum drm_dp_phy dp_phy,
 324						 const u8 link_status[DP_LINK_STATUS_SIZE],
 325						 int lane)
 326{
 327	u8 tx_ffe = 0;
 328
 329	if (has_per_lane_signal_levels(intel_dp, dp_phy)) {
 330		lane = min(lane, crtc_state->lane_count - 1);
 331		tx_ffe = drm_dp_get_adjust_tx_ffe_preset(link_status, lane);
 332	} else {
 333		for (lane = 0; lane < crtc_state->lane_count; lane++)
 334			tx_ffe = max(tx_ffe, drm_dp_get_adjust_tx_ffe_preset(link_status, lane));
 335	}
 336
 337	return tx_ffe;
 338}
 339
 340/* 8b/10b */
 341static u8 intel_dp_get_lane_adjust_vswing_preemph(struct intel_dp *intel_dp,
 342						  const struct intel_crtc_state *crtc_state,
 343						  enum drm_dp_phy dp_phy,
 344						  const u8 link_status[DP_LINK_STATUS_SIZE],
 345						  int lane)
 346{
 347	u8 v = 0;
 348	u8 p = 0;
 
 349	u8 voltage_max;
 350	u8 preemph_max;
 351
 352	if (has_per_lane_signal_levels(intel_dp, dp_phy)) {
 353		lane = min(lane, crtc_state->lane_count - 1);
 354
 355		v = drm_dp_get_adjust_request_voltage(link_status, lane);
 356		p = drm_dp_get_adjust_request_pre_emphasis(link_status, lane);
 357	} else {
 358		for (lane = 0; lane < crtc_state->lane_count; lane++) {
 359			v = max(v, drm_dp_get_adjust_request_voltage(link_status, lane));
 360			p = max(p, drm_dp_get_adjust_request_pre_emphasis(link_status, lane));
 361		}
 362	}
 363
 364	preemph_max = intel_dp_phy_preemph_max(intel_dp, dp_phy);
 365	if (p >= preemph_max)
 366		p = preemph_max | DP_TRAIN_MAX_PRE_EMPHASIS_REACHED;
 367
 368	v = min(v, dp_voltage_max(p));
 369
 370	voltage_max = intel_dp_phy_voltage_max(intel_dp, crtc_state, dp_phy);
 371	if (v >= voltage_max)
 372		v = voltage_max | DP_TRAIN_MAX_SWING_REACHED;
 373
 374	return v | p;
 375}
 376
 377static u8 intel_dp_get_lane_adjust_train(struct intel_dp *intel_dp,
 378					 const struct intel_crtc_state *crtc_state,
 379					 enum drm_dp_phy dp_phy,
 380					 const u8 link_status[DP_LINK_STATUS_SIZE],
 381					 int lane)
 382{
 383	if (intel_dp_is_uhbr(crtc_state))
 384		return intel_dp_get_lane_adjust_tx_ffe_preset(intel_dp, crtc_state,
 385							      dp_phy, link_status, lane);
 386	else
 387		return intel_dp_get_lane_adjust_vswing_preemph(intel_dp, crtc_state,
 388							       dp_phy, link_status, lane);
 389}
 390
 391#define TRAIN_REQ_FMT "%d/%d/%d/%d"
 392#define _TRAIN_REQ_VSWING_ARGS(link_status, lane) \
 393	(drm_dp_get_adjust_request_voltage((link_status), (lane)) >> DP_TRAIN_VOLTAGE_SWING_SHIFT)
 394#define TRAIN_REQ_VSWING_ARGS(link_status) \
 395	_TRAIN_REQ_VSWING_ARGS(link_status, 0), \
 396	_TRAIN_REQ_VSWING_ARGS(link_status, 1), \
 397	_TRAIN_REQ_VSWING_ARGS(link_status, 2), \
 398	_TRAIN_REQ_VSWING_ARGS(link_status, 3)
 399#define _TRAIN_REQ_PREEMPH_ARGS(link_status, lane) \
 400	(drm_dp_get_adjust_request_pre_emphasis((link_status), (lane)) >> DP_TRAIN_PRE_EMPHASIS_SHIFT)
 401#define TRAIN_REQ_PREEMPH_ARGS(link_status) \
 402	_TRAIN_REQ_PREEMPH_ARGS(link_status, 0), \
 403	_TRAIN_REQ_PREEMPH_ARGS(link_status, 1), \
 404	_TRAIN_REQ_PREEMPH_ARGS(link_status, 2), \
 405	_TRAIN_REQ_PREEMPH_ARGS(link_status, 3)
 406#define _TRAIN_REQ_TX_FFE_ARGS(link_status, lane) \
 407	drm_dp_get_adjust_tx_ffe_preset((link_status), (lane))
 408#define TRAIN_REQ_TX_FFE_ARGS(link_status) \
 409	_TRAIN_REQ_TX_FFE_ARGS(link_status, 0), \
 410	_TRAIN_REQ_TX_FFE_ARGS(link_status, 1), \
 411	_TRAIN_REQ_TX_FFE_ARGS(link_status, 2), \
 412	_TRAIN_REQ_TX_FFE_ARGS(link_status, 3)
 413
 414void
 415intel_dp_get_adjust_train(struct intel_dp *intel_dp,
 416			  const struct intel_crtc_state *crtc_state,
 417			  enum drm_dp_phy dp_phy,
 418			  const u8 link_status[DP_LINK_STATUS_SIZE])
 419{
 420	int lane;
 421
 422	if (intel_dp_is_uhbr(crtc_state)) {
 423		lt_dbg(intel_dp, dp_phy,
 424		       "128b/132b, lanes: %d, "
 425		       "TX FFE request: " TRAIN_REQ_FMT "\n",
 426		       crtc_state->lane_count,
 427		       TRAIN_REQ_TX_FFE_ARGS(link_status));
 428	} else {
 429		lt_dbg(intel_dp, dp_phy,
 430		       "8b/10b, lanes: %d, "
 431		       "vswing request: " TRAIN_REQ_FMT ", "
 432		       "pre-emphasis request: " TRAIN_REQ_FMT "\n",
 433		       crtc_state->lane_count,
 434		       TRAIN_REQ_VSWING_ARGS(link_status),
 435		       TRAIN_REQ_PREEMPH_ARGS(link_status));
 436	}
 437
 438	for (lane = 0; lane < 4; lane++)
 439		intel_dp->train_set[lane] =
 440			intel_dp_get_lane_adjust_train(intel_dp, crtc_state,
 441						       dp_phy, link_status, lane);
 442}
 443
 444static int intel_dp_training_pattern_set_reg(struct intel_dp *intel_dp,
 445					     enum drm_dp_phy dp_phy)
 446{
 447	return dp_phy == DP_PHY_DPRX ?
 448		DP_TRAINING_PATTERN_SET :
 449		DP_TRAINING_PATTERN_SET_PHY_REPEATER(dp_phy);
 450}
 451
 452static bool
 453intel_dp_set_link_train(struct intel_dp *intel_dp,
 454			const struct intel_crtc_state *crtc_state,
 455			enum drm_dp_phy dp_phy,
 456			u8 dp_train_pat)
 457{
 458	int reg = intel_dp_training_pattern_set_reg(intel_dp, dp_phy);
 459	u8 buf[sizeof(intel_dp->train_set) + 1];
 460	int len;
 461
 462	intel_dp_program_link_training_pattern(intel_dp, crtc_state,
 463					       dp_phy, dp_train_pat);
 464
 465	buf[0] = dp_train_pat;
 466	/* DP_TRAINING_LANEx_SET follow DP_TRAINING_PATTERN_SET */
 467	memcpy(buf + 1, intel_dp->train_set, crtc_state->lane_count);
 468	len = crtc_state->lane_count + 1;
 469
 470	return drm_dp_dpcd_write(&intel_dp->aux, reg, buf, len) == len;
 471}
 472
 473static char dp_training_pattern_name(u8 train_pat)
 474{
 475	switch (train_pat) {
 476	case DP_TRAINING_PATTERN_1:
 477	case DP_TRAINING_PATTERN_2:
 478	case DP_TRAINING_PATTERN_3:
 479		return '0' + train_pat;
 480	case DP_TRAINING_PATTERN_4:
 481		return '4';
 482	default:
 483		MISSING_CASE(train_pat);
 484		return '?';
 485	}
 486}
 487
 488void
 489intel_dp_program_link_training_pattern(struct intel_dp *intel_dp,
 490				       const struct intel_crtc_state *crtc_state,
 491				       enum drm_dp_phy dp_phy,
 492				       u8 dp_train_pat)
 493{
 
 
 494	u8 train_pat = intel_dp_training_pattern_symbol(dp_train_pat);
 495
 496	if (train_pat != DP_TRAINING_PATTERN_DISABLE)
 497		lt_dbg(intel_dp, dp_phy, "Using DP training pattern TPS%c\n",
 498		       dp_training_pattern_name(train_pat));
 
 
 499
 500	intel_dp->set_link_train(intel_dp, crtc_state, dp_train_pat);
 501}
 502
 503#define TRAIN_SET_FMT "%d%s/%d%s/%d%s/%d%s"
 504#define _TRAIN_SET_VSWING_ARGS(train_set) \
 505	((train_set) & DP_TRAIN_VOLTAGE_SWING_MASK) >> DP_TRAIN_VOLTAGE_SWING_SHIFT, \
 506	(train_set) & DP_TRAIN_MAX_SWING_REACHED ? "(max)" : ""
 507#define TRAIN_SET_VSWING_ARGS(train_set) \
 508	_TRAIN_SET_VSWING_ARGS((train_set)[0]), \
 509	_TRAIN_SET_VSWING_ARGS((train_set)[1]), \
 510	_TRAIN_SET_VSWING_ARGS((train_set)[2]), \
 511	_TRAIN_SET_VSWING_ARGS((train_set)[3])
 512#define _TRAIN_SET_PREEMPH_ARGS(train_set) \
 513	((train_set) & DP_TRAIN_PRE_EMPHASIS_MASK) >> DP_TRAIN_PRE_EMPHASIS_SHIFT, \
 514	(train_set) & DP_TRAIN_MAX_PRE_EMPHASIS_REACHED ? "(max)" : ""
 515#define TRAIN_SET_PREEMPH_ARGS(train_set) \
 516	_TRAIN_SET_PREEMPH_ARGS((train_set)[0]), \
 517	_TRAIN_SET_PREEMPH_ARGS((train_set)[1]), \
 518	_TRAIN_SET_PREEMPH_ARGS((train_set)[2]), \
 519	_TRAIN_SET_PREEMPH_ARGS((train_set)[3])
 520#define _TRAIN_SET_TX_FFE_ARGS(train_set) \
 521	((train_set) & DP_TX_FFE_PRESET_VALUE_MASK), ""
 522#define TRAIN_SET_TX_FFE_ARGS(train_set) \
 523	_TRAIN_SET_TX_FFE_ARGS((train_set)[0]), \
 524	_TRAIN_SET_TX_FFE_ARGS((train_set)[1]), \
 525	_TRAIN_SET_TX_FFE_ARGS((train_set)[2]), \
 526	_TRAIN_SET_TX_FFE_ARGS((train_set)[3])
 527
 528void intel_dp_set_signal_levels(struct intel_dp *intel_dp,
 529				const struct intel_crtc_state *crtc_state,
 530				enum drm_dp_phy dp_phy)
 531{
 532	struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base;
 533
 534	if (intel_dp_is_uhbr(crtc_state)) {
 535		lt_dbg(intel_dp, dp_phy,
 536		       "128b/132b, lanes: %d, "
 537		       "TX FFE presets: " TRAIN_SET_FMT "\n",
 538		       crtc_state->lane_count,
 539		       TRAIN_SET_TX_FFE_ARGS(intel_dp->train_set));
 540	} else {
 541		lt_dbg(intel_dp, dp_phy,
 542		       "8b/10b, lanes: %d, "
 543		       "vswing levels: " TRAIN_SET_FMT ", "
 544		       "pre-emphasis levels: " TRAIN_SET_FMT "\n",
 545		       crtc_state->lane_count,
 546		       TRAIN_SET_VSWING_ARGS(intel_dp->train_set),
 547		       TRAIN_SET_PREEMPH_ARGS(intel_dp->train_set));
 548	}
 549
 550	if (intel_dp_phy_is_downstream_of_source(intel_dp, dp_phy))
 551		encoder->set_signal_levels(encoder, crtc_state);
 552}
 553
 554static bool
 555intel_dp_reset_link_train(struct intel_dp *intel_dp,
 556			  const struct intel_crtc_state *crtc_state,
 557			  enum drm_dp_phy dp_phy,
 558			  u8 dp_train_pat)
 559{
 560	memset(intel_dp->train_set, 0, sizeof(intel_dp->train_set));
 561	intel_dp_set_signal_levels(intel_dp, crtc_state, dp_phy);
 562	return intel_dp_set_link_train(intel_dp, crtc_state, dp_phy, dp_train_pat);
 563}
 564
 565static bool
 566intel_dp_update_link_train(struct intel_dp *intel_dp,
 567			   const struct intel_crtc_state *crtc_state,
 568			   enum drm_dp_phy dp_phy)
 569{
 570	int reg = dp_phy == DP_PHY_DPRX ?
 571			    DP_TRAINING_LANE0_SET :
 572			    DP_TRAINING_LANE0_SET_PHY_REPEATER(dp_phy);
 573	int ret;
 574
 575	intel_dp_set_signal_levels(intel_dp, crtc_state, dp_phy);
 576
 577	ret = drm_dp_dpcd_write(&intel_dp->aux, reg,
 578				intel_dp->train_set, crtc_state->lane_count);
 579
 580	return ret == crtc_state->lane_count;
 581}
 582
 583/* 128b/132b */
 584static bool intel_dp_lane_max_tx_ffe_reached(u8 train_set_lane)
 585{
 586	return (train_set_lane & DP_TX_FFE_PRESET_VALUE_MASK) ==
 587		DP_TX_FFE_PRESET_VALUE_MASK;
 588}
 589
 590/*
 591 * 8b/10b
 592 *
 593 * FIXME: The DP spec is very confusing here, also the Link CTS spec seems to
 594 * have self contradicting tests around this area.
 595 *
 596 * In lieu of better ideas let's just stop when we've reached the max supported
 597 * vswing with its max pre-emphasis, which is either 2+1 or 3+0 depending on
 598 * whether vswing level 3 is supported or not.
 599 */
 600static bool intel_dp_lane_max_vswing_reached(u8 train_set_lane)
 601{
 602	u8 v = (train_set_lane & DP_TRAIN_VOLTAGE_SWING_MASK) >>
 603		DP_TRAIN_VOLTAGE_SWING_SHIFT;
 604	u8 p = (train_set_lane & DP_TRAIN_PRE_EMPHASIS_MASK) >>
 605		DP_TRAIN_PRE_EMPHASIS_SHIFT;
 606
 607	if ((train_set_lane & DP_TRAIN_MAX_SWING_REACHED) == 0)
 608		return false;
 609
 610	if (v + p != 3)
 611		return false;
 612
 613	return true;
 614}
 615
 616static bool intel_dp_link_max_vswing_reached(struct intel_dp *intel_dp,
 617					     const struct intel_crtc_state *crtc_state)
 618{
 619	int lane;
 620
 621	for (lane = 0; lane < crtc_state->lane_count; lane++) {
 622		u8 train_set_lane = intel_dp->train_set[lane];
 623
 624		if (intel_dp_is_uhbr(crtc_state)) {
 625			if (!intel_dp_lane_max_tx_ffe_reached(train_set_lane))
 626				return false;
 627		} else {
 628			if (!intel_dp_lane_max_vswing_reached(train_set_lane))
 629				return false;
 630		}
 631	}
 632
 633	return true;
 634}
 635
 636static void
 637intel_dp_update_downspread_ctrl(struct intel_dp *intel_dp,
 638				const struct intel_crtc_state *crtc_state)
 639{
 640	u8 link_config[2];
 641
 642	link_config[0] = crtc_state->vrr.flipline ? DP_MSA_TIMING_PAR_IGNORE_EN : 0;
 643	link_config[1] = intel_dp_is_uhbr(crtc_state) ?
 644			 DP_SET_ANSI_128B132B : DP_SET_ANSI_8B10B;
 645	drm_dp_dpcd_write(&intel_dp->aux, DP_DOWNSPREAD_CTRL, link_config, 2);
 646}
 647
 648static void
 649intel_dp_update_link_bw_set(struct intel_dp *intel_dp,
 650			    const struct intel_crtc_state *crtc_state,
 651			    u8 link_bw, u8 rate_select)
 652{
 653	u8 lane_count = crtc_state->lane_count;
 654
 655	if (crtc_state->enhanced_framing)
 656		lane_count |= DP_LANE_COUNT_ENHANCED_FRAME_EN;
 657
 658	if (link_bw) {
 659		/* DP and eDP v1.3 and earlier link bw set method. */
 660		u8 link_config[] = { link_bw, lane_count };
 661
 662		drm_dp_dpcd_write(&intel_dp->aux, DP_LINK_BW_SET, link_config,
 663				  ARRAY_SIZE(link_config));
 664	} else {
 665		/*
 666		 * eDP v1.4 and later link rate set method.
 667		 *
 668		 * eDP v1.4x sinks shall ignore DP_LINK_RATE_SET if
 669		 * DP_LINK_BW_SET is set. Avoid writing DP_LINK_BW_SET.
 670		 *
 671		 * eDP v1.5 sinks allow choosing either, and the last choice
 672		 * shall be active.
 673		 */
 674		drm_dp_dpcd_writeb(&intel_dp->aux, DP_LANE_COUNT_SET, lane_count);
 675		drm_dp_dpcd_writeb(&intel_dp->aux, DP_LINK_RATE_SET, rate_select);
 676	}
 677}
 678
 679/*
 680 * Prepare link training by configuring the link parameters. On DDI platforms
 681 * also enable the port here.
 682 */
 683static bool
 684intel_dp_prepare_link_train(struct intel_dp *intel_dp,
 685			    const struct intel_crtc_state *crtc_state)
 686{
 
 
 687	u8 link_bw, rate_select;
 688
 689	if (intel_dp->prepare_link_retrain)
 690		intel_dp->prepare_link_retrain(intel_dp, crtc_state);
 691
 692	intel_dp_compute_rate(intel_dp, crtc_state->port_clock,
 693			      &link_bw, &rate_select);
 694
 695	/*
 696	 * WaEdpLinkRateDataReload
 697	 *
 698	 * Parade PS8461E MUX (used on varius TGL+ laptops) needs
 699	 * to snoop the link rates reported by the sink when we
 700	 * use LINK_RATE_SET in order to operate in jitter cleaning
 701	 * mode (as opposed to redriver mode). Unfortunately it
 702	 * loses track of the snooped link rates when powered down,
 703	 * so we need to make it re-snoop often. Without this high
 704	 * link rates are not stable.
 705	 */
 706	if (!link_bw) {
 707		__le16 sink_rates[DP_MAX_SUPPORTED_RATES];
 708
 709		lt_dbg(intel_dp, DP_PHY_DPRX, "Reloading eDP link rates\n");
 710
 711		drm_dp_dpcd_read(&intel_dp->aux, DP_SUPPORTED_LINK_RATES,
 712				 sink_rates, sizeof(sink_rates));
 713	}
 714
 715	if (link_bw)
 716		lt_dbg(intel_dp, DP_PHY_DPRX, "Using LINK_BW_SET value %02x\n",
 717		       link_bw);
 718	else
 719		lt_dbg(intel_dp, DP_PHY_DPRX,
 720		       "Using LINK_RATE_SET value %02x\n",
 721		       rate_select);
 722	/*
 723	 * Spec DP2.1 Section 3.5.2.16
 724	 * Prior to LT DPTX should set 128b/132b DP Channel coding and then set link rate
 725	 */
 726	intel_dp_update_downspread_ctrl(intel_dp, crtc_state);
 727	intel_dp_update_link_bw_set(intel_dp, crtc_state, link_bw,
 728				    rate_select);
 729
 730	return true;
 731}
 
 
 
 
 
 
 
 
 
 732
 733static bool intel_dp_adjust_request_changed(const struct intel_crtc_state *crtc_state,
 734					    const u8 old_link_status[DP_LINK_STATUS_SIZE],
 735					    const u8 new_link_status[DP_LINK_STATUS_SIZE])
 736{
 737	int lane;
 738
 739	for (lane = 0; lane < crtc_state->lane_count; lane++) {
 740		u8 old, new;
 741
 742		if (intel_dp_is_uhbr(crtc_state)) {
 743			old = drm_dp_get_adjust_tx_ffe_preset(old_link_status, lane);
 744			new = drm_dp_get_adjust_tx_ffe_preset(new_link_status, lane);
 745		} else {
 746			old = drm_dp_get_adjust_request_voltage(old_link_status, lane) |
 747				drm_dp_get_adjust_request_pre_emphasis(old_link_status, lane);
 748			new = drm_dp_get_adjust_request_voltage(new_link_status, lane) |
 749				drm_dp_get_adjust_request_pre_emphasis(new_link_status, lane);
 750		}
 751
 752		if (old != new)
 753			return true;
 754	}
 755
 756	return false;
 757}
 758
 759void
 760intel_dp_dump_link_status(struct intel_dp *intel_dp, enum drm_dp_phy dp_phy,
 761			  const u8 link_status[DP_LINK_STATUS_SIZE])
 762{
 763	lt_dbg(intel_dp, dp_phy,
 764	       "ln0_1:0x%x ln2_3:0x%x align:0x%x sink:0x%x adj_req0_1:0x%x adj_req2_3:0x%x\n",
 765	       link_status[0], link_status[1], link_status[2],
 766	       link_status[3], link_status[4], link_status[5]);
 767}
 768
 769/*
 770 * Perform the link training clock recovery phase on the given DP PHY using
 771 * training pattern 1.
 772 */
 773static bool
 774intel_dp_link_training_clock_recovery(struct intel_dp *intel_dp,
 775				      const struct intel_crtc_state *crtc_state,
 776				      enum drm_dp_phy dp_phy)
 777{
 778	u8 old_link_status[DP_LINK_STATUS_SIZE] = {};
 
 779	int voltage_tries, cr_tries, max_cr_tries;
 780	u8 link_status[DP_LINK_STATUS_SIZE];
 781	bool max_vswing_reached = false;
 782	int delay_us;
 783
 784	delay_us = drm_dp_read_clock_recovery_delay(&intel_dp->aux,
 785						    intel_dp->dpcd, dp_phy,
 786						    intel_dp_is_uhbr(crtc_state));
 787
 788	/* clock recovery */
 789	if (!intel_dp_reset_link_train(intel_dp, crtc_state, dp_phy,
 790				       DP_TRAINING_PATTERN_1 |
 791				       DP_LINK_SCRAMBLING_DISABLE)) {
 792		lt_err(intel_dp, dp_phy, "Failed to enable link training\n");
 793		return false;
 794	}
 795
 796	/*
 797	 * The DP 1.4 spec defines the max clock recovery retries value
 798	 * as 10 but for pre-DP 1.4 devices we set a very tolerant
 799	 * retry limit of 80 (4 voltage levels x 4 preemphasis levels x
 800	 * x 5 identical voltage retries). Since the previous specs didn't
 801	 * define a limit and created the possibility of an infinite loop
 802	 * we want to prevent any sync from triggering that corner case.
 803	 */
 804	if (intel_dp->dpcd[DP_DPCD_REV] >= DP_DPCD_REV_14)
 805		max_cr_tries = 10;
 806	else
 807		max_cr_tries = 80;
 808
 809	voltage_tries = 1;
 810	for (cr_tries = 0; cr_tries < max_cr_tries; ++cr_tries) {
 811		usleep_range(delay_us, 2 * delay_us);
 
 
 812
 813		if (drm_dp_dpcd_read_phy_link_status(&intel_dp->aux, dp_phy,
 814						     link_status) < 0) {
 815			lt_err(intel_dp, dp_phy, "Failed to get link status\n");
 816			return false;
 817		}
 818
 819		if (drm_dp_clock_recovery_ok(link_status, crtc_state->lane_count)) {
 820			lt_dbg(intel_dp, dp_phy, "Clock recovery OK\n");
 821			return true;
 822		}
 823
 824		if (voltage_tries == 5) {
 825			intel_dp_dump_link_status(intel_dp, dp_phy, link_status);
 826			lt_dbg(intel_dp, dp_phy, "Same voltage tried 5 times\n");
 827			return false;
 828		}
 829
 830		if (max_vswing_reached) {
 831			intel_dp_dump_link_status(intel_dp, dp_phy, link_status);
 832			lt_dbg(intel_dp, dp_phy, "Max Voltage Swing reached\n");
 833			return false;
 834		}
 835
 
 
 836		/* Update training set as requested by target */
 837		intel_dp_get_adjust_train(intel_dp, crtc_state, dp_phy,
 838					  link_status);
 839		if (!intel_dp_update_link_train(intel_dp, crtc_state, dp_phy)) {
 840			lt_err(intel_dp, dp_phy, "Failed to update link training\n");
 
 841			return false;
 842		}
 843
 844		if (!intel_dp_adjust_request_changed(crtc_state, old_link_status, link_status))
 
 845			++voltage_tries;
 846		else
 847			voltage_tries = 1;
 848
 849		memcpy(old_link_status, link_status, sizeof(link_status));
 850
 851		if (intel_dp_link_max_vswing_reached(intel_dp, crtc_state))
 852			max_vswing_reached = true;
 
 853	}
 854
 855	intel_dp_dump_link_status(intel_dp, dp_phy, link_status);
 856	lt_err(intel_dp, dp_phy, "Failed clock recovery %d times, giving up!\n",
 857	       max_cr_tries);
 858
 859	return false;
 860}
 861
 862/*
 863 * Pick Training Pattern Sequence (TPS) for channel equalization. 128b/132b TPS2
 864 * for UHBR+, TPS4 for HBR3 or for 1.4 devices that support it, TPS3 for HBR2 or
 865 * 1.2 devices that support it, TPS2 otherwise.
 866 */
 867static u32 intel_dp_training_pattern(struct intel_dp *intel_dp,
 868				     const struct intel_crtc_state *crtc_state,
 869				     enum drm_dp_phy dp_phy)
 870{
 871	struct drm_i915_private *i915 = dp_to_i915(intel_dp);
 872	bool source_tps3, sink_tps3, source_tps4, sink_tps4;
 873
 874	/* UHBR+ use separate 128b/132b TPS2 */
 875	if (intel_dp_is_uhbr(crtc_state))
 876		return DP_TRAINING_PATTERN_2;
 877
 878	/*
 879	 * TPS4 support is mandatory for all downstream devices that
 880	 * support HBR3. There are no known eDP panels that support
 881	 * TPS4 as of Feb 2018 as per VESA eDP_v1.4b_E1 specification.
 
 882	 * LTTPRs must support TPS4.
 883	 */
 884	source_tps4 = intel_dp_source_supports_tps4(i915);
 885	sink_tps4 = dp_phy != DP_PHY_DPRX ||
 886		    drm_dp_tps4_supported(intel_dp->dpcd);
 887	if (source_tps4 && sink_tps4) {
 888		return DP_TRAINING_PATTERN_4;
 889	} else if (crtc_state->port_clock == 810000) {
 890		if (!source_tps4)
 891			lt_dbg(intel_dp, dp_phy,
 892			       "8.1 Gbps link rate without source TPS4 support\n");
 893		if (!sink_tps4)
 894			lt_dbg(intel_dp, dp_phy,
 895			       "8.1 Gbps link rate without sink TPS4 support\n");
 896	}
 897
 898	/*
 899	 * TPS3 support is mandatory for downstream devices that
 900	 * support HBR2. However, not all sinks follow the spec.
 
 901	 */
 902	source_tps3 = intel_dp_source_supports_tps3(i915);
 903	sink_tps3 = dp_phy != DP_PHY_DPRX ||
 904		    drm_dp_tps3_supported(intel_dp->dpcd);
 905	if (source_tps3 && sink_tps3) {
 906		return  DP_TRAINING_PATTERN_3;
 907	} else if (crtc_state->port_clock >= 540000) {
 908		if (!source_tps3)
 909			lt_dbg(intel_dp, dp_phy,
 910			       ">=5.4/6.48 Gbps link rate without source TPS3 support\n");
 911		if (!sink_tps3)
 912			lt_dbg(intel_dp, dp_phy,
 913			       ">=5.4/6.48 Gbps link rate without sink TPS3 support\n");
 914	}
 915
 916	return DP_TRAINING_PATTERN_2;
 917}
 918
 
 
 
 
 
 
 
 
 
 
 
 
 
 919/*
 920 * Perform the link training channel equalization phase on the given DP PHY
 921 * using one of training pattern 2, 3 or 4 depending on the source and
 922 * sink capabilities.
 923 */
 924static bool
 925intel_dp_link_training_channel_equalization(struct intel_dp *intel_dp,
 926					    const struct intel_crtc_state *crtc_state,
 927					    enum drm_dp_phy dp_phy)
 928{
 
 929	int tries;
 930	u32 training_pattern;
 931	u8 link_status[DP_LINK_STATUS_SIZE];
 932	bool channel_eq = false;
 933	int delay_us;
 934
 935	delay_us = drm_dp_read_channel_eq_delay(&intel_dp->aux,
 936						intel_dp->dpcd, dp_phy,
 937						intel_dp_is_uhbr(crtc_state));
 938
 939	training_pattern = intel_dp_training_pattern(intel_dp, crtc_state, dp_phy);
 940	/* Scrambling is disabled for TPS2/3 and enabled for TPS4 */
 941	if (training_pattern != DP_TRAINING_PATTERN_4)
 942		training_pattern |= DP_LINK_SCRAMBLING_DISABLE;
 943
 944	/* channel equalization */
 945	if (!intel_dp_set_link_train(intel_dp, crtc_state, dp_phy,
 946				     training_pattern)) {
 947		lt_err(intel_dp, dp_phy, "Failed to start channel equalization\n");
 948		return false;
 949	}
 950
 951	for (tries = 0; tries < 5; tries++) {
 952		usleep_range(delay_us, 2 * delay_us);
 953
 954		if (drm_dp_dpcd_read_phy_link_status(&intel_dp->aux, dp_phy,
 955						     link_status) < 0) {
 956			lt_err(intel_dp, dp_phy, "Failed to get link status\n");
 
 957			break;
 958		}
 959
 960		/* Make sure clock is still ok */
 961		if (!drm_dp_clock_recovery_ok(link_status,
 962					      crtc_state->lane_count)) {
 963			intel_dp_dump_link_status(intel_dp, dp_phy, link_status);
 964			lt_dbg(intel_dp, dp_phy,
 965			       "Clock recovery check failed, cannot continue channel equalization\n");
 
 966			break;
 967		}
 968
 969		if (drm_dp_channel_eq_ok(link_status,
 970					 crtc_state->lane_count)) {
 971			channel_eq = true;
 972			lt_dbg(intel_dp, dp_phy, "Channel EQ done. DP Training successful\n");
 
 973			break;
 974		}
 975
 976		/* Update training set as requested by target */
 977		intel_dp_get_adjust_train(intel_dp, crtc_state, dp_phy,
 978					  link_status);
 979		if (!intel_dp_update_link_train(intel_dp, crtc_state, dp_phy)) {
 980			lt_err(intel_dp, dp_phy, "Failed to update link training\n");
 
 981			break;
 982		}
 983	}
 984
 985	/* Try 5 times, else fail and try at lower BW */
 986	if (tries == 5) {
 987		intel_dp_dump_link_status(intel_dp, dp_phy, link_status);
 988		lt_dbg(intel_dp, dp_phy, "Channel equalization failed 5 times\n");
 
 989	}
 990
 991	return channel_eq;
 992}
 993
 994static bool intel_dp_disable_dpcd_training_pattern(struct intel_dp *intel_dp,
 995						   enum drm_dp_phy dp_phy)
 996{
 997	int reg = intel_dp_training_pattern_set_reg(intel_dp, dp_phy);
 998	u8 val = DP_TRAINING_PATTERN_DISABLE;
 999
1000	return drm_dp_dpcd_write(&intel_dp->aux, reg, &val, 1) == 1;
1001}
1002
1003static int
1004intel_dp_128b132b_intra_hop(struct intel_dp *intel_dp,
1005			    const struct intel_crtc_state *crtc_state)
1006{
1007	u8 sink_status;
1008	int ret;
1009
1010	ret = drm_dp_dpcd_readb(&intel_dp->aux, DP_SINK_STATUS, &sink_status);
1011	if (ret != 1) {
1012		lt_dbg(intel_dp, DP_PHY_DPRX, "Failed to read sink status\n");
1013		return ret < 0 ? ret : -EIO;
1014	}
1015
1016	return sink_status & DP_INTRA_HOP_AUX_REPLY_INDICATION ? 1 : 0;
1017}
1018
1019/**
1020 * intel_dp_stop_link_train - stop link training
1021 * @intel_dp: DP struct
1022 * @crtc_state: state for CRTC attached to the encoder
1023 *
1024 * Stop the link training of the @intel_dp port, disabling the training
1025 * pattern in the sink's DPCD, and disabling the test pattern symbol
1026 * generation on the port.
1027 *
1028 * What symbols are output on the port after this point is
1029 * platform specific: On DDI/VLV/CHV platforms it will be the idle pattern
1030 * with the pipe being disabled, on older platforms it's HW specific if/how an
1031 * idle pattern is generated, as the pipe is already enabled here for those.
1032 *
1033 * This function must be called after intel_dp_start_link_train().
1034 */
1035void intel_dp_stop_link_train(struct intel_dp *intel_dp,
1036			      const struct intel_crtc_state *crtc_state)
1037{
1038	intel_dp->link_trained = true;
1039
1040	intel_dp_disable_dpcd_training_pattern(intel_dp, DP_PHY_DPRX);
1041	intel_dp_program_link_training_pattern(intel_dp, crtc_state, DP_PHY_DPRX,
1042					       DP_TRAINING_PATTERN_DISABLE);
1043
1044	if (intel_dp_is_uhbr(crtc_state) &&
1045	    wait_for(intel_dp_128b132b_intra_hop(intel_dp, crtc_state) == 0, 500)) {
1046		lt_dbg(intel_dp, DP_PHY_DPRX, "128b/132b intra-hop not clearing\n");
1047	}
1048}
1049
1050static bool
1051intel_dp_link_train_phy(struct intel_dp *intel_dp,
1052			const struct intel_crtc_state *crtc_state,
1053			enum drm_dp_phy dp_phy)
1054{
 
 
1055	bool ret = false;
1056
1057	if (!intel_dp_link_training_clock_recovery(intel_dp, crtc_state, dp_phy))
1058		goto out;
1059
1060	if (!intel_dp_link_training_channel_equalization(intel_dp, crtc_state, dp_phy))
1061		goto out;
1062
1063	ret = true;
1064
1065out:
1066	lt_dbg(intel_dp, dp_phy,
1067	       "Link Training %s at link rate = %d, lane count = %d\n",
1068	       ret ? "passed" : "failed",
1069	       crtc_state->port_clock, crtc_state->lane_count);
 
 
 
1070
1071	return ret;
1072}
1073
1074static void intel_dp_schedule_fallback_link_training(struct intel_dp *intel_dp,
1075						     const struct intel_crtc_state *crtc_state)
1076{
1077	struct intel_connector *intel_connector = intel_dp->attached_connector;
1078	struct drm_i915_private *i915 = dp_to_i915(intel_dp);
1079
1080	if (!intel_digital_port_connected(&dp_to_dig_port(intel_dp)->base)) {
1081		lt_dbg(intel_dp, DP_PHY_DPRX, "Link Training failed on disconnected sink.\n");
1082		return;
1083	}
1084
1085	if (intel_dp->hobl_active) {
1086		lt_dbg(intel_dp, DP_PHY_DPRX,
1087		       "Link Training failed with HOBL active, not enabling it from now on\n");
1088		intel_dp->hobl_failed = true;
1089	} else if (intel_dp_get_link_train_fallback_values(intel_dp,
1090							   crtc_state->port_clock,
1091							   crtc_state->lane_count)) {
1092		return;
1093	}
1094
1095	/* Schedule a Hotplug Uevent to userspace to start modeset */
1096	queue_work(i915->unordered_wq, &intel_connector->modeset_retry_work);
1097}
1098
1099/* Perform the link training on all LTTPRs and the DPRX on a link. */
1100static bool
1101intel_dp_link_train_all_phys(struct intel_dp *intel_dp,
1102			     const struct intel_crtc_state *crtc_state,
1103			     int lttpr_count)
1104{
1105	bool ret = true;
1106	int i;
1107
 
 
1108	for (i = lttpr_count - 1; i >= 0; i--) {
1109		enum drm_dp_phy dp_phy = DP_PHY_LTTPR(i);
1110
1111		ret = intel_dp_link_train_phy(intel_dp, crtc_state, dp_phy);
1112		intel_dp_disable_dpcd_training_pattern(intel_dp, dp_phy);
1113
1114		if (!ret)
1115			break;
1116	}
1117
1118	if (ret)
1119		ret = intel_dp_link_train_phy(intel_dp, crtc_state, DP_PHY_DPRX);
1120
1121	if (intel_dp->set_idle_link_train)
1122		intel_dp->set_idle_link_train(intel_dp, crtc_state);
1123
1124	return ret;
1125}
1126
1127/*
1128 * 128b/132b DP LANEx_EQ_DONE Sequence (DP 2.0 E11 3.5.2.16.1)
1129 */
1130static bool
1131intel_dp_128b132b_lane_eq(struct intel_dp *intel_dp,
1132			  const struct intel_crtc_state *crtc_state)
1133{
1134	u8 link_status[DP_LINK_STATUS_SIZE];
1135	int delay_us;
1136	int try, max_tries = 20;
1137	unsigned long deadline;
1138	bool timeout = false;
1139
1140	/*
1141	 * Reset signal levels. Start transmitting 128b/132b TPS1.
1142	 *
1143	 * Put DPRX and LTTPRs (if any) into intra-hop AUX mode by writing TPS1
1144	 * in DP_TRAINING_PATTERN_SET.
1145	 */
1146	if (!intel_dp_reset_link_train(intel_dp, crtc_state, DP_PHY_DPRX,
1147				       DP_TRAINING_PATTERN_1)) {
1148		lt_err(intel_dp, DP_PHY_DPRX, "Failed to start 128b/132b TPS1\n");
1149		return false;
1150	}
1151
1152	delay_us = drm_dp_128b132b_read_aux_rd_interval(&intel_dp->aux);
1153
1154	/* Read the initial TX FFE settings. */
1155	if (drm_dp_dpcd_read_link_status(&intel_dp->aux, link_status) < 0) {
1156		lt_err(intel_dp, DP_PHY_DPRX, "Failed to read TX FFE presets\n");
1157		return false;
1158	}
1159
1160	/* Update signal levels and training set as requested. */
1161	intel_dp_get_adjust_train(intel_dp, crtc_state, DP_PHY_DPRX, link_status);
1162	if (!intel_dp_update_link_train(intel_dp, crtc_state, DP_PHY_DPRX)) {
1163		lt_err(intel_dp, DP_PHY_DPRX, "Failed to set initial TX FFE settings\n");
1164		return false;
1165	}
1166
1167	/* Start transmitting 128b/132b TPS2. */
1168	if (!intel_dp_set_link_train(intel_dp, crtc_state, DP_PHY_DPRX,
1169				     DP_TRAINING_PATTERN_2)) {
1170		lt_err(intel_dp, DP_PHY_DPRX, "Failed to start 128b/132b TPS2\n");
1171		return false;
1172	}
1173
1174	/* Time budget for the LANEx_EQ_DONE Sequence */
1175	deadline = jiffies + msecs_to_jiffies_timeout(400);
1176
1177	for (try = 0; try < max_tries; try++) {
1178		usleep_range(delay_us, 2 * delay_us);
1179
1180		/*
1181		 * The delay may get updated. The transmitter shall read the
1182		 * delay before link status during link training.
1183		 */
1184		delay_us = drm_dp_128b132b_read_aux_rd_interval(&intel_dp->aux);
1185
1186		if (drm_dp_dpcd_read_link_status(&intel_dp->aux, link_status) < 0) {
1187			lt_err(intel_dp, DP_PHY_DPRX, "Failed to read link status\n");
1188			return false;
1189		}
1190
1191		if (drm_dp_128b132b_link_training_failed(link_status)) {
1192			intel_dp_dump_link_status(intel_dp, DP_PHY_DPRX, link_status);
1193			lt_err(intel_dp, DP_PHY_DPRX,
1194			       "Downstream link training failure\n");
1195			return false;
1196		}
1197
1198		if (drm_dp_128b132b_lane_channel_eq_done(link_status, crtc_state->lane_count)) {
1199			lt_dbg(intel_dp, DP_PHY_DPRX, "Lane channel eq done\n");
1200			break;
1201		}
1202
1203		if (timeout) {
1204			intel_dp_dump_link_status(intel_dp, DP_PHY_DPRX, link_status);
1205			lt_err(intel_dp, DP_PHY_DPRX, "Lane channel eq timeout\n");
1206			return false;
1207		}
1208
1209		if (time_after(jiffies, deadline))
1210			timeout = true; /* try one last time after deadline */
1211
1212		/* Update signal levels and training set as requested. */
1213		intel_dp_get_adjust_train(intel_dp, crtc_state, DP_PHY_DPRX, link_status);
1214		if (!intel_dp_update_link_train(intel_dp, crtc_state, DP_PHY_DPRX)) {
1215			lt_err(intel_dp, DP_PHY_DPRX, "Failed to update TX FFE settings\n");
1216			return false;
1217		}
1218	}
1219
1220	if (try == max_tries) {
1221		intel_dp_dump_link_status(intel_dp, DP_PHY_DPRX, link_status);
1222		lt_err(intel_dp, DP_PHY_DPRX, "Max loop count reached\n");
1223		return false;
1224	}
1225
1226	for (;;) {
1227		if (time_after(jiffies, deadline))
1228			timeout = true; /* try one last time after deadline */
1229
1230		if (drm_dp_dpcd_read_link_status(&intel_dp->aux, link_status) < 0) {
1231			lt_err(intel_dp, DP_PHY_DPRX, "Failed to read link status\n");
1232			return false;
1233		}
1234
1235		if (drm_dp_128b132b_link_training_failed(link_status)) {
1236			intel_dp_dump_link_status(intel_dp, DP_PHY_DPRX, link_status);
1237			lt_err(intel_dp, DP_PHY_DPRX, "Downstream link training failure\n");
1238			return false;
1239		}
1240
1241		if (drm_dp_128b132b_eq_interlane_align_done(link_status)) {
1242			lt_dbg(intel_dp, DP_PHY_DPRX, "Interlane align done\n");
1243			break;
1244		}
1245
1246		if (timeout) {
1247			intel_dp_dump_link_status(intel_dp, DP_PHY_DPRX, link_status);
1248			lt_err(intel_dp, DP_PHY_DPRX, "Interlane align timeout\n");
1249			return false;
1250		}
1251
1252		usleep_range(2000, 3000);
1253	}
1254
1255	return true;
1256}
1257
1258/*
1259 * 128b/132b DP LANEx_CDS_DONE Sequence (DP 2.0 E11 3.5.2.16.2)
1260 */
1261static bool
1262intel_dp_128b132b_lane_cds(struct intel_dp *intel_dp,
1263			   const struct intel_crtc_state *crtc_state,
1264			   int lttpr_count)
1265{
1266	u8 link_status[DP_LINK_STATUS_SIZE];
1267	unsigned long deadline;
1268
1269	if (drm_dp_dpcd_writeb(&intel_dp->aux, DP_TRAINING_PATTERN_SET,
1270			       DP_TRAINING_PATTERN_2_CDS) != 1) {
1271		lt_err(intel_dp, DP_PHY_DPRX, "Failed to start 128b/132b TPS2 CDS\n");
1272		return false;
1273	}
1274
1275	/* Time budget for the LANEx_CDS_DONE Sequence */
1276	deadline = jiffies + msecs_to_jiffies_timeout((lttpr_count + 1) * 20);
1277
1278	for (;;) {
1279		bool timeout = false;
1280
1281		if (time_after(jiffies, deadline))
1282			timeout = true; /* try one last time after deadline */
1283
1284		usleep_range(2000, 3000);
1285
1286		if (drm_dp_dpcd_read_link_status(&intel_dp->aux, link_status) < 0) {
1287			lt_err(intel_dp, DP_PHY_DPRX, "Failed to read link status\n");
1288			return false;
1289		}
1290
1291		if (drm_dp_128b132b_eq_interlane_align_done(link_status) &&
1292		    drm_dp_128b132b_cds_interlane_align_done(link_status) &&
1293		    drm_dp_128b132b_lane_symbol_locked(link_status, crtc_state->lane_count)) {
1294			lt_dbg(intel_dp, DP_PHY_DPRX, "CDS interlane align done\n");
1295			break;
1296		}
1297
1298		if (drm_dp_128b132b_link_training_failed(link_status)) {
1299			intel_dp_dump_link_status(intel_dp, DP_PHY_DPRX, link_status);
1300			lt_err(intel_dp, DP_PHY_DPRX, "Downstream link training failure\n");
1301			return false;
1302		}
1303
1304		if (timeout) {
1305			intel_dp_dump_link_status(intel_dp, DP_PHY_DPRX, link_status);
1306			lt_err(intel_dp, DP_PHY_DPRX, "CDS timeout\n");
1307			return false;
1308		}
1309	}
1310
1311	return true;
1312}
1313
1314/*
1315 * 128b/132b link training sequence. (DP 2.0 E11 SCR on link training.)
1316 */
1317static bool
1318intel_dp_128b132b_link_train(struct intel_dp *intel_dp,
1319			     const struct intel_crtc_state *crtc_state,
1320			     int lttpr_count)
1321{
1322	bool passed = false;
1323
1324	if (wait_for(intel_dp_128b132b_intra_hop(intel_dp, crtc_state) == 0, 500)) {
1325		lt_err(intel_dp, DP_PHY_DPRX, "128b/132b intra-hop not clear\n");
1326		return false;
1327	}
1328
1329	if (intel_dp_128b132b_lane_eq(intel_dp, crtc_state) &&
1330	    intel_dp_128b132b_lane_cds(intel_dp, crtc_state, lttpr_count))
1331		passed = true;
1332
1333	lt_dbg(intel_dp, DP_PHY_DPRX,
1334	       "128b/132b Link Training %s at link rate = %d, lane count = %d\n",
1335	       passed ? "passed" : "failed",
1336	       crtc_state->port_clock, crtc_state->lane_count);
1337
1338	return passed;
1339}
1340
1341/**
1342 * intel_dp_start_link_train - start link training
1343 * @intel_dp: DP struct
1344 * @crtc_state: state for CRTC attached to the encoder
1345 *
1346 * Start the link training of the @intel_dp port, scheduling a fallback
1347 * retraining with reduced link rate/lane parameters if the link training
1348 * fails.
1349 * After calling this function intel_dp_stop_link_train() must be called.
1350 */
1351void intel_dp_start_link_train(struct intel_dp *intel_dp,
1352			       const struct intel_crtc_state *crtc_state)
1353{
1354	struct drm_i915_private *i915 = dp_to_i915(intel_dp);
1355	bool passed;
1356
1357	/*
1358	 * TODO: Reiniting LTTPRs here won't be needed once proper connector
1359	 * HW state readout is added.
1360	 */
1361	int lttpr_count = intel_dp_init_lttpr_and_dprx_caps(intel_dp);
1362
1363	if (lttpr_count < 0)
1364		/* Still continue with enabling the port and link training. */
1365		lttpr_count = 0;
1366
1367	intel_dp_prepare_link_train(intel_dp, crtc_state);
1368
1369	if (intel_dp_is_uhbr(crtc_state))
1370		passed = intel_dp_128b132b_link_train(intel_dp, crtc_state, lttpr_count);
1371	else
1372		passed = intel_dp_link_train_all_phys(intel_dp, crtc_state, lttpr_count);
1373
1374	/*
1375	 * Ignore the link failure in CI
1376	 *
1377	 * In fixed enviroments like CI, sometimes unexpected long HPDs are
1378	 * generated by the displays. If ignore_long_hpd flag is set, such long
1379	 * HPDs are ignored. And probably as a consequence of these ignored
1380	 * long HPDs, subsequent link trainings are failed resulting into CI
1381	 * execution failures.
1382	 *
1383	 * For test cases which rely on the link training or processing of HPDs
1384	 * ignore_long_hpd flag can unset from the testcase.
1385	 */
1386	if (!passed && i915->display.hotplug.ignore_long_hpd) {
1387		lt_dbg(intel_dp, DP_PHY_DPRX, "Ignore the link failure\n");
1388		return;
1389	}
1390
1391	if (!passed)
1392		intel_dp_schedule_fallback_link_training(intel_dp, crtc_state);
1393}
1394
1395void intel_dp_128b132b_sdp_crc16(struct intel_dp *intel_dp,
1396				 const struct intel_crtc_state *crtc_state)
1397{
1398	/*
1399	 * VIDEO_DIP_CTL register bit 31 should be set to '0' to not
1400	 * disable SDP CRC. This is applicable for Display version 13.
1401	 * Default value of bit 31 is '0' hence discarding the write
1402	 * TODO: Corrective actions on SDP corruption yet to be defined
1403	 */
1404	if (!intel_dp_is_uhbr(crtc_state))
1405		return;
1406
1407	/* DP v2.0 SCR on SDP CRC16 for 128b/132b Link Layer */
1408	drm_dp_dpcd_writeb(&intel_dp->aux,
1409			   DP_SDP_ERROR_DETECTION_CONFIGURATION,
1410			   DP_SDP_CRC16_128B132B_EN);
1411
1412	lt_dbg(intel_dp, DP_PHY_DPRX, "DP2.0 SDP CRC16 for 128b/132b enabled\n");
1413}