Loading...
Note: File does not exist in v3.1.
1/*
2 * Copyright © 2008 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 *
23 * Authors:
24 * Keith Packard <keithp@keithp.com>
25 *
26 */
27
28#include <linux/export.h>
29#include <linux/i2c.h>
30#include <linux/notifier.h>
31#include <linux/slab.h>
32#include <linux/string_helpers.h>
33#include <linux/timekeeping.h>
34#include <linux/types.h>
35
36#include <asm/byteorder.h>
37
38#include <drm/display/drm_dp_helper.h>
39#include <drm/display/drm_dsc_helper.h>
40#include <drm/display/drm_hdmi_helper.h>
41#include <drm/drm_atomic_helper.h>
42#include <drm/drm_crtc.h>
43#include <drm/drm_edid.h>
44#include <drm/drm_probe_helper.h>
45
46#include "g4x_dp.h"
47#include "i915_drv.h"
48#include "i915_irq.h"
49#include "i915_reg.h"
50#include "intel_atomic.h"
51#include "intel_audio.h"
52#include "intel_backlight.h"
53#include "intel_combo_phy_regs.h"
54#include "intel_connector.h"
55#include "intel_crtc.h"
56#include "intel_cx0_phy.h"
57#include "intel_ddi.h"
58#include "intel_de.h"
59#include "intel_display_types.h"
60#include "intel_dp.h"
61#include "intel_dp_aux.h"
62#include "intel_dp_hdcp.h"
63#include "intel_dp_link_training.h"
64#include "intel_dp_mst.h"
65#include "intel_dpio_phy.h"
66#include "intel_dpll.h"
67#include "intel_fifo_underrun.h"
68#include "intel_hdcp.h"
69#include "intel_hdmi.h"
70#include "intel_hotplug.h"
71#include "intel_hotplug_irq.h"
72#include "intel_lspcon.h"
73#include "intel_lvds.h"
74#include "intel_panel.h"
75#include "intel_pch_display.h"
76#include "intel_pps.h"
77#include "intel_psr.h"
78#include "intel_tc.h"
79#include "intel_vdsc.h"
80#include "intel_vrr.h"
81#include "intel_crtc_state_dump.h"
82
83/* DP DSC throughput values used for slice count calculations KPixels/s */
84#define DP_DSC_PEAK_PIXEL_RATE 2720000
85#define DP_DSC_MAX_ENC_THROUGHPUT_0 340000
86#define DP_DSC_MAX_ENC_THROUGHPUT_1 400000
87
88/* DP DSC FEC Overhead factor in ppm = 1/(0.972261) = 1.028530 */
89#define DP_DSC_FEC_OVERHEAD_FACTOR 1028530
90
91/* Compliance test status bits */
92#define INTEL_DP_RESOLUTION_SHIFT_MASK 0
93#define INTEL_DP_RESOLUTION_PREFERRED (1 << INTEL_DP_RESOLUTION_SHIFT_MASK)
94#define INTEL_DP_RESOLUTION_STANDARD (2 << INTEL_DP_RESOLUTION_SHIFT_MASK)
95#define INTEL_DP_RESOLUTION_FAILSAFE (3 << INTEL_DP_RESOLUTION_SHIFT_MASK)
96
97
98/* Constants for DP DSC configurations */
99static const u8 valid_dsc_bpp[] = {6, 8, 10, 12, 15};
100
101/* With Single pipe configuration, HW is capable of supporting maximum
102 * of 4 slices per line.
103 */
104static const u8 valid_dsc_slicecount[] = {1, 2, 4};
105
106/**
107 * intel_dp_is_edp - is the given port attached to an eDP panel (either CPU or PCH)
108 * @intel_dp: DP struct
109 *
110 * If a CPU or PCH DP output is attached to an eDP panel, this function
111 * will return true, and false otherwise.
112 *
113 * This function is not safe to use prior to encoder type being set.
114 */
115bool intel_dp_is_edp(struct intel_dp *intel_dp)
116{
117 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
118
119 return dig_port->base.type == INTEL_OUTPUT_EDP;
120}
121
122static void intel_dp_unset_edid(struct intel_dp *intel_dp);
123
124/* Is link rate UHBR and thus 128b/132b? */
125bool intel_dp_is_uhbr(const struct intel_crtc_state *crtc_state)
126{
127 return drm_dp_is_uhbr_rate(crtc_state->port_clock);
128}
129
130/**
131 * intel_dp_link_symbol_size - get the link symbol size for a given link rate
132 * @rate: link rate in 10kbit/s units
133 *
134 * Returns the link symbol size in bits/symbol units depending on the link
135 * rate -> channel coding.
136 */
137int intel_dp_link_symbol_size(int rate)
138{
139 return drm_dp_is_uhbr_rate(rate) ? 32 : 10;
140}
141
142/**
143 * intel_dp_link_symbol_clock - convert link rate to link symbol clock
144 * @rate: link rate in 10kbit/s units
145 *
146 * Returns the link symbol clock frequency in kHz units depending on the
147 * link rate and channel coding.
148 */
149int intel_dp_link_symbol_clock(int rate)
150{
151 return DIV_ROUND_CLOSEST(rate * 10, intel_dp_link_symbol_size(rate));
152}
153
154static void intel_dp_set_default_sink_rates(struct intel_dp *intel_dp)
155{
156 intel_dp->sink_rates[0] = 162000;
157 intel_dp->num_sink_rates = 1;
158}
159
160/* update sink rates from dpcd */
161static void intel_dp_set_dpcd_sink_rates(struct intel_dp *intel_dp)
162{
163 static const int dp_rates[] = {
164 162000, 270000, 540000, 810000
165 };
166 int i, max_rate;
167 int max_lttpr_rate;
168
169 if (drm_dp_has_quirk(&intel_dp->desc, DP_DPCD_QUIRK_CAN_DO_MAX_LINK_RATE_3_24_GBPS)) {
170 /* Needed, e.g., for Apple MBP 2017, 15 inch eDP Retina panel */
171 static const int quirk_rates[] = { 162000, 270000, 324000 };
172
173 memcpy(intel_dp->sink_rates, quirk_rates, sizeof(quirk_rates));
174 intel_dp->num_sink_rates = ARRAY_SIZE(quirk_rates);
175
176 return;
177 }
178
179 /*
180 * Sink rates for 8b/10b.
181 */
182 max_rate = drm_dp_bw_code_to_link_rate(intel_dp->dpcd[DP_MAX_LINK_RATE]);
183 max_lttpr_rate = drm_dp_lttpr_max_link_rate(intel_dp->lttpr_common_caps);
184 if (max_lttpr_rate)
185 max_rate = min(max_rate, max_lttpr_rate);
186
187 for (i = 0; i < ARRAY_SIZE(dp_rates); i++) {
188 if (dp_rates[i] > max_rate)
189 break;
190 intel_dp->sink_rates[i] = dp_rates[i];
191 }
192
193 /*
194 * Sink rates for 128b/132b. If set, sink should support all 8b/10b
195 * rates and 10 Gbps.
196 */
197 if (intel_dp->dpcd[DP_MAIN_LINK_CHANNEL_CODING] & DP_CAP_ANSI_128B132B) {
198 u8 uhbr_rates = 0;
199
200 BUILD_BUG_ON(ARRAY_SIZE(intel_dp->sink_rates) < ARRAY_SIZE(dp_rates) + 3);
201
202 drm_dp_dpcd_readb(&intel_dp->aux,
203 DP_128B132B_SUPPORTED_LINK_RATES, &uhbr_rates);
204
205 if (drm_dp_lttpr_count(intel_dp->lttpr_common_caps)) {
206 /* We have a repeater */
207 if (intel_dp->lttpr_common_caps[0] >= 0x20 &&
208 intel_dp->lttpr_common_caps[DP_MAIN_LINK_CHANNEL_CODING_PHY_REPEATER -
209 DP_LT_TUNABLE_PHY_REPEATER_FIELD_DATA_STRUCTURE_REV] &
210 DP_PHY_REPEATER_128B132B_SUPPORTED) {
211 /* Repeater supports 128b/132b, valid UHBR rates */
212 uhbr_rates &= intel_dp->lttpr_common_caps[DP_PHY_REPEATER_128B132B_RATES -
213 DP_LT_TUNABLE_PHY_REPEATER_FIELD_DATA_STRUCTURE_REV];
214 } else {
215 /* Does not support 128b/132b */
216 uhbr_rates = 0;
217 }
218 }
219
220 if (uhbr_rates & DP_UHBR10)
221 intel_dp->sink_rates[i++] = 1000000;
222 if (uhbr_rates & DP_UHBR13_5)
223 intel_dp->sink_rates[i++] = 1350000;
224 if (uhbr_rates & DP_UHBR20)
225 intel_dp->sink_rates[i++] = 2000000;
226 }
227
228 intel_dp->num_sink_rates = i;
229}
230
231static void intel_dp_set_sink_rates(struct intel_dp *intel_dp)
232{
233 struct intel_connector *connector = intel_dp->attached_connector;
234 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
235 struct intel_encoder *encoder = &intel_dig_port->base;
236
237 intel_dp_set_dpcd_sink_rates(intel_dp);
238
239 if (intel_dp->num_sink_rates)
240 return;
241
242 drm_err(&dp_to_i915(intel_dp)->drm,
243 "[CONNECTOR:%d:%s][ENCODER:%d:%s] Invalid DPCD with no link rates, using defaults\n",
244 connector->base.base.id, connector->base.name,
245 encoder->base.base.id, encoder->base.name);
246
247 intel_dp_set_default_sink_rates(intel_dp);
248}
249
250static void intel_dp_set_default_max_sink_lane_count(struct intel_dp *intel_dp)
251{
252 intel_dp->max_sink_lane_count = 1;
253}
254
255static void intel_dp_set_max_sink_lane_count(struct intel_dp *intel_dp)
256{
257 struct intel_connector *connector = intel_dp->attached_connector;
258 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
259 struct intel_encoder *encoder = &intel_dig_port->base;
260
261 intel_dp->max_sink_lane_count = drm_dp_max_lane_count(intel_dp->dpcd);
262
263 switch (intel_dp->max_sink_lane_count) {
264 case 1:
265 case 2:
266 case 4:
267 return;
268 }
269
270 drm_err(&dp_to_i915(intel_dp)->drm,
271 "[CONNECTOR:%d:%s][ENCODER:%d:%s] Invalid DPCD max lane count (%d), using default\n",
272 connector->base.base.id, connector->base.name,
273 encoder->base.base.id, encoder->base.name,
274 intel_dp->max_sink_lane_count);
275
276 intel_dp_set_default_max_sink_lane_count(intel_dp);
277}
278
279/* Get length of rates array potentially limited by max_rate. */
280static int intel_dp_rate_limit_len(const int *rates, int len, int max_rate)
281{
282 int i;
283
284 /* Limit results by potentially reduced max rate */
285 for (i = 0; i < len; i++) {
286 if (rates[len - i - 1] <= max_rate)
287 return len - i;
288 }
289
290 return 0;
291}
292
293/* Get length of common rates array potentially limited by max_rate. */
294static int intel_dp_common_len_rate_limit(const struct intel_dp *intel_dp,
295 int max_rate)
296{
297 return intel_dp_rate_limit_len(intel_dp->common_rates,
298 intel_dp->num_common_rates, max_rate);
299}
300
301static int intel_dp_common_rate(struct intel_dp *intel_dp, int index)
302{
303 if (drm_WARN_ON(&dp_to_i915(intel_dp)->drm,
304 index < 0 || index >= intel_dp->num_common_rates))
305 return 162000;
306
307 return intel_dp->common_rates[index];
308}
309
310/* Theoretical max between source and sink */
311static int intel_dp_max_common_rate(struct intel_dp *intel_dp)
312{
313 return intel_dp_common_rate(intel_dp, intel_dp->num_common_rates - 1);
314}
315
316static int intel_dp_max_source_lane_count(struct intel_digital_port *dig_port)
317{
318 int vbt_max_lanes = intel_bios_dp_max_lane_count(dig_port->base.devdata);
319 int max_lanes = dig_port->max_lanes;
320
321 if (vbt_max_lanes)
322 max_lanes = min(max_lanes, vbt_max_lanes);
323
324 return max_lanes;
325}
326
327/* Theoretical max between source and sink */
328static int intel_dp_max_common_lane_count(struct intel_dp *intel_dp)
329{
330 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
331 int source_max = intel_dp_max_source_lane_count(dig_port);
332 int sink_max = intel_dp->max_sink_lane_count;
333 int lane_max = intel_tc_port_max_lane_count(dig_port);
334 int lttpr_max = drm_dp_lttpr_max_lane_count(intel_dp->lttpr_common_caps);
335
336 if (lttpr_max)
337 sink_max = min(sink_max, lttpr_max);
338
339 return min3(source_max, sink_max, lane_max);
340}
341
342int intel_dp_max_lane_count(struct intel_dp *intel_dp)
343{
344 switch (intel_dp->max_link_lane_count) {
345 case 1:
346 case 2:
347 case 4:
348 return intel_dp->max_link_lane_count;
349 default:
350 MISSING_CASE(intel_dp->max_link_lane_count);
351 return 1;
352 }
353}
354
355/*
356 * The required data bandwidth for a mode with given pixel clock and bpp. This
357 * is the required net bandwidth independent of the data bandwidth efficiency.
358 *
359 * TODO: check if callers of this functions should use
360 * intel_dp_effective_data_rate() instead.
361 */
362int
363intel_dp_link_required(int pixel_clock, int bpp)
364{
365 /* pixel_clock is in kHz, divide bpp by 8 for bit to Byte conversion */
366 return DIV_ROUND_UP(pixel_clock * bpp, 8);
367}
368
369/**
370 * intel_dp_effective_data_rate - Return the pixel data rate accounting for BW allocation overhead
371 * @pixel_clock: pixel clock in kHz
372 * @bpp_x16: bits per pixel .4 fixed point format
373 * @bw_overhead: BW allocation overhead in 1ppm units
374 *
375 * Return the effective pixel data rate in kB/sec units taking into account
376 * the provided SSC, FEC, DSC BW allocation overhead.
377 */
378int intel_dp_effective_data_rate(int pixel_clock, int bpp_x16,
379 int bw_overhead)
380{
381 return DIV_ROUND_UP_ULL(mul_u32_u32(pixel_clock * bpp_x16, bw_overhead),
382 1000000 * 16 * 8);
383}
384
385/*
386 * Given a link rate and lanes, get the data bandwidth.
387 *
388 * Data bandwidth is the actual payload rate, which depends on the data
389 * bandwidth efficiency and the link rate.
390 *
391 * For 8b/10b channel encoding, SST and non-FEC, the data bandwidth efficiency
392 * is 80%. For example, for a 1.62 Gbps link, 1.62*10^9 bps * 0.80 * (1/8) =
393 * 162000 kBps. With 8-bit symbols, we have 162000 kHz symbol clock. Just by
394 * coincidence, the port clock in kHz matches the data bandwidth in kBps, and
395 * they equal the link bit rate in Gbps multiplied by 100000. (Note that this no
396 * longer holds for data bandwidth as soon as FEC or MST is taken into account!)
397 *
398 * For 128b/132b channel encoding, the data bandwidth efficiency is 96.71%. For
399 * example, for a 10 Gbps link, 10*10^9 bps * 0.9671 * (1/8) = 1208875
400 * kBps. With 32-bit symbols, we have 312500 kHz symbol clock. The value 1000000
401 * does not match the symbol clock, the port clock (not even if you think in
402 * terms of a byte clock), nor the data bandwidth. It only matches the link bit
403 * rate in units of 10000 bps.
404 */
405int
406intel_dp_max_data_rate(int max_link_rate, int max_lanes)
407{
408 int ch_coding_efficiency =
409 drm_dp_bw_channel_coding_efficiency(drm_dp_is_uhbr_rate(max_link_rate));
410 int max_link_rate_kbps = max_link_rate * 10;
411
412 /*
413 * UHBR rates always use 128b/132b channel encoding, and have
414 * 97.71% data bandwidth efficiency. Consider max_link_rate the
415 * link bit rate in units of 10000 bps.
416 */
417 /*
418 * Lower than UHBR rates always use 8b/10b channel encoding, and have
419 * 80% data bandwidth efficiency for SST non-FEC. However, this turns
420 * out to be a nop by coincidence:
421 *
422 * int max_link_rate_kbps = max_link_rate * 10;
423 * max_link_rate_kbps = DIV_ROUND_DOWN_ULL(max_link_rate_kbps * 8, 10);
424 * max_link_rate = max_link_rate_kbps / 8;
425 */
426 return DIV_ROUND_DOWN_ULL(mul_u32_u32(max_link_rate_kbps * max_lanes,
427 ch_coding_efficiency),
428 1000000 * 8);
429}
430
431bool intel_dp_can_bigjoiner(struct intel_dp *intel_dp)
432{
433 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
434 struct intel_encoder *encoder = &intel_dig_port->base;
435 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
436
437 return DISPLAY_VER(dev_priv) >= 12 ||
438 (DISPLAY_VER(dev_priv) == 11 &&
439 encoder->port != PORT_A);
440}
441
442static int dg2_max_source_rate(struct intel_dp *intel_dp)
443{
444 return intel_dp_is_edp(intel_dp) ? 810000 : 1350000;
445}
446
447static int icl_max_source_rate(struct intel_dp *intel_dp)
448{
449 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
450 struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev);
451 enum phy phy = intel_port_to_phy(dev_priv, dig_port->base.port);
452
453 if (intel_phy_is_combo(dev_priv, phy) && !intel_dp_is_edp(intel_dp))
454 return 540000;
455
456 return 810000;
457}
458
459static int ehl_max_source_rate(struct intel_dp *intel_dp)
460{
461 if (intel_dp_is_edp(intel_dp))
462 return 540000;
463
464 return 810000;
465}
466
467static int mtl_max_source_rate(struct intel_dp *intel_dp)
468{
469 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
470 struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
471 enum phy phy = intel_port_to_phy(i915, dig_port->base.port);
472
473 if (intel_is_c10phy(i915, phy))
474 return 810000;
475
476 return 2000000;
477}
478
479static int vbt_max_link_rate(struct intel_dp *intel_dp)
480{
481 struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base;
482 int max_rate;
483
484 max_rate = intel_bios_dp_max_link_rate(encoder->devdata);
485
486 if (intel_dp_is_edp(intel_dp)) {
487 struct intel_connector *connector = intel_dp->attached_connector;
488 int edp_max_rate = connector->panel.vbt.edp.max_link_rate;
489
490 if (max_rate && edp_max_rate)
491 max_rate = min(max_rate, edp_max_rate);
492 else if (edp_max_rate)
493 max_rate = edp_max_rate;
494 }
495
496 return max_rate;
497}
498
499static void
500intel_dp_set_source_rates(struct intel_dp *intel_dp)
501{
502 /* The values must be in increasing order */
503 static const int mtl_rates[] = {
504 162000, 216000, 243000, 270000, 324000, 432000, 540000, 675000,
505 810000, 1000000, 1350000, 2000000,
506 };
507 static const int icl_rates[] = {
508 162000, 216000, 270000, 324000, 432000, 540000, 648000, 810000,
509 1000000, 1350000,
510 };
511 static const int bxt_rates[] = {
512 162000, 216000, 243000, 270000, 324000, 432000, 540000
513 };
514 static const int skl_rates[] = {
515 162000, 216000, 270000, 324000, 432000, 540000
516 };
517 static const int hsw_rates[] = {
518 162000, 270000, 540000
519 };
520 static const int g4x_rates[] = {
521 162000, 270000
522 };
523 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
524 struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev);
525 const int *source_rates;
526 int size, max_rate = 0, vbt_max_rate;
527
528 /* This should only be done once */
529 drm_WARN_ON(&dev_priv->drm,
530 intel_dp->source_rates || intel_dp->num_source_rates);
531
532 if (DISPLAY_VER(dev_priv) >= 14) {
533 source_rates = mtl_rates;
534 size = ARRAY_SIZE(mtl_rates);
535 max_rate = mtl_max_source_rate(intel_dp);
536 } else if (DISPLAY_VER(dev_priv) >= 11) {
537 source_rates = icl_rates;
538 size = ARRAY_SIZE(icl_rates);
539 if (IS_DG2(dev_priv))
540 max_rate = dg2_max_source_rate(intel_dp);
541 else if (IS_ALDERLAKE_P(dev_priv) || IS_ALDERLAKE_S(dev_priv) ||
542 IS_DG1(dev_priv) || IS_ROCKETLAKE(dev_priv))
543 max_rate = 810000;
544 else if (IS_JASPERLAKE(dev_priv) || IS_ELKHARTLAKE(dev_priv))
545 max_rate = ehl_max_source_rate(intel_dp);
546 else
547 max_rate = icl_max_source_rate(intel_dp);
548 } else if (IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv)) {
549 source_rates = bxt_rates;
550 size = ARRAY_SIZE(bxt_rates);
551 } else if (DISPLAY_VER(dev_priv) == 9) {
552 source_rates = skl_rates;
553 size = ARRAY_SIZE(skl_rates);
554 } else if ((IS_HASWELL(dev_priv) && !IS_HASWELL_ULX(dev_priv)) ||
555 IS_BROADWELL(dev_priv)) {
556 source_rates = hsw_rates;
557 size = ARRAY_SIZE(hsw_rates);
558 } else {
559 source_rates = g4x_rates;
560 size = ARRAY_SIZE(g4x_rates);
561 }
562
563 vbt_max_rate = vbt_max_link_rate(intel_dp);
564 if (max_rate && vbt_max_rate)
565 max_rate = min(max_rate, vbt_max_rate);
566 else if (vbt_max_rate)
567 max_rate = vbt_max_rate;
568
569 if (max_rate)
570 size = intel_dp_rate_limit_len(source_rates, size, max_rate);
571
572 intel_dp->source_rates = source_rates;
573 intel_dp->num_source_rates = size;
574}
575
576static int intersect_rates(const int *source_rates, int source_len,
577 const int *sink_rates, int sink_len,
578 int *common_rates)
579{
580 int i = 0, j = 0, k = 0;
581
582 while (i < source_len && j < sink_len) {
583 if (source_rates[i] == sink_rates[j]) {
584 if (WARN_ON(k >= DP_MAX_SUPPORTED_RATES))
585 return k;
586 common_rates[k] = source_rates[i];
587 ++k;
588 ++i;
589 ++j;
590 } else if (source_rates[i] < sink_rates[j]) {
591 ++i;
592 } else {
593 ++j;
594 }
595 }
596 return k;
597}
598
599/* return index of rate in rates array, or -1 if not found */
600static int intel_dp_rate_index(const int *rates, int len, int rate)
601{
602 int i;
603
604 for (i = 0; i < len; i++)
605 if (rate == rates[i])
606 return i;
607
608 return -1;
609}
610
611static void intel_dp_set_common_rates(struct intel_dp *intel_dp)
612{
613 struct drm_i915_private *i915 = dp_to_i915(intel_dp);
614
615 drm_WARN_ON(&i915->drm,
616 !intel_dp->num_source_rates || !intel_dp->num_sink_rates);
617
618 intel_dp->num_common_rates = intersect_rates(intel_dp->source_rates,
619 intel_dp->num_source_rates,
620 intel_dp->sink_rates,
621 intel_dp->num_sink_rates,
622 intel_dp->common_rates);
623
624 /* Paranoia, there should always be something in common. */
625 if (drm_WARN_ON(&i915->drm, intel_dp->num_common_rates == 0)) {
626 intel_dp->common_rates[0] = 162000;
627 intel_dp->num_common_rates = 1;
628 }
629}
630
631static bool intel_dp_link_params_valid(struct intel_dp *intel_dp, int link_rate,
632 u8 lane_count)
633{
634 /*
635 * FIXME: we need to synchronize the current link parameters with
636 * hardware readout. Currently fast link training doesn't work on
637 * boot-up.
638 */
639 if (link_rate == 0 ||
640 link_rate > intel_dp->max_link_rate)
641 return false;
642
643 if (lane_count == 0 ||
644 lane_count > intel_dp_max_lane_count(intel_dp))
645 return false;
646
647 return true;
648}
649
650static bool intel_dp_can_link_train_fallback_for_edp(struct intel_dp *intel_dp,
651 int link_rate,
652 u8 lane_count)
653{
654 /* FIXME figure out what we actually want here */
655 const struct drm_display_mode *fixed_mode =
656 intel_panel_preferred_fixed_mode(intel_dp->attached_connector);
657 int mode_rate, max_rate;
658
659 mode_rate = intel_dp_link_required(fixed_mode->clock, 18);
660 max_rate = intel_dp_max_data_rate(link_rate, lane_count);
661 if (mode_rate > max_rate)
662 return false;
663
664 return true;
665}
666
667int intel_dp_get_link_train_fallback_values(struct intel_dp *intel_dp,
668 int link_rate, u8 lane_count)
669{
670 struct drm_i915_private *i915 = dp_to_i915(intel_dp);
671 int index;
672
673 /*
674 * TODO: Enable fallback on MST links once MST link compute can handle
675 * the fallback params.
676 */
677 if (intel_dp->is_mst) {
678 drm_err(&i915->drm, "Link Training Unsuccessful\n");
679 return -1;
680 }
681
682 if (intel_dp_is_edp(intel_dp) && !intel_dp->use_max_params) {
683 drm_dbg_kms(&i915->drm,
684 "Retrying Link training for eDP with max parameters\n");
685 intel_dp->use_max_params = true;
686 return 0;
687 }
688
689 index = intel_dp_rate_index(intel_dp->common_rates,
690 intel_dp->num_common_rates,
691 link_rate);
692 if (index > 0) {
693 if (intel_dp_is_edp(intel_dp) &&
694 !intel_dp_can_link_train_fallback_for_edp(intel_dp,
695 intel_dp_common_rate(intel_dp, index - 1),
696 lane_count)) {
697 drm_dbg_kms(&i915->drm,
698 "Retrying Link training for eDP with same parameters\n");
699 return 0;
700 }
701 intel_dp->max_link_rate = intel_dp_common_rate(intel_dp, index - 1);
702 intel_dp->max_link_lane_count = lane_count;
703 } else if (lane_count > 1) {
704 if (intel_dp_is_edp(intel_dp) &&
705 !intel_dp_can_link_train_fallback_for_edp(intel_dp,
706 intel_dp_max_common_rate(intel_dp),
707 lane_count >> 1)) {
708 drm_dbg_kms(&i915->drm,
709 "Retrying Link training for eDP with same parameters\n");
710 return 0;
711 }
712 intel_dp->max_link_rate = intel_dp_max_common_rate(intel_dp);
713 intel_dp->max_link_lane_count = lane_count >> 1;
714 } else {
715 drm_err(&i915->drm, "Link Training Unsuccessful\n");
716 return -1;
717 }
718
719 return 0;
720}
721
722u32 intel_dp_mode_to_fec_clock(u32 mode_clock)
723{
724 return div_u64(mul_u32_u32(mode_clock, DP_DSC_FEC_OVERHEAD_FACTOR),
725 1000000U);
726}
727
728int intel_dp_bw_fec_overhead(bool fec_enabled)
729{
730 /*
731 * TODO: Calculate the actual overhead for a given mode.
732 * The hard-coded 1/0.972261=2.853% overhead factor
733 * corresponds (for instance) to the 8b/10b DP FEC 2.4% +
734 * 0.453% DSC overhead. This is enough for a 3840 width mode,
735 * which has a DSC overhead of up to ~0.2%, but may not be
736 * enough for a 1024 width mode where this is ~0.8% (on a 4
737 * lane DP link, with 2 DSC slices and 8 bpp color depth).
738 */
739 return fec_enabled ? DP_DSC_FEC_OVERHEAD_FACTOR : 1000000;
740}
741
742static int
743small_joiner_ram_size_bits(struct drm_i915_private *i915)
744{
745 if (DISPLAY_VER(i915) >= 13)
746 return 17280 * 8;
747 else if (DISPLAY_VER(i915) >= 11)
748 return 7680 * 8;
749 else
750 return 6144 * 8;
751}
752
753u32 intel_dp_dsc_nearest_valid_bpp(struct drm_i915_private *i915, u32 bpp, u32 pipe_bpp)
754{
755 u32 bits_per_pixel = bpp;
756 int i;
757
758 /* Error out if the max bpp is less than smallest allowed valid bpp */
759 if (bits_per_pixel < valid_dsc_bpp[0]) {
760 drm_dbg_kms(&i915->drm, "Unsupported BPP %u, min %u\n",
761 bits_per_pixel, valid_dsc_bpp[0]);
762 return 0;
763 }
764
765 /* From XE_LPD onwards we support from bpc upto uncompressed bpp-1 BPPs */
766 if (DISPLAY_VER(i915) >= 13) {
767 bits_per_pixel = min(bits_per_pixel, pipe_bpp - 1);
768
769 /*
770 * According to BSpec, 27 is the max DSC output bpp,
771 * 8 is the min DSC output bpp.
772 * While we can still clamp higher bpp values to 27, saving bandwidth,
773 * if it is required to oompress up to bpp < 8, means we can't do
774 * that and probably means we can't fit the required mode, even with
775 * DSC enabled.
776 */
777 if (bits_per_pixel < 8) {
778 drm_dbg_kms(&i915->drm, "Unsupported BPP %u, min 8\n",
779 bits_per_pixel);
780 return 0;
781 }
782 bits_per_pixel = min_t(u32, bits_per_pixel, 27);
783 } else {
784 /* Find the nearest match in the array of known BPPs from VESA */
785 for (i = 0; i < ARRAY_SIZE(valid_dsc_bpp) - 1; i++) {
786 if (bits_per_pixel < valid_dsc_bpp[i + 1])
787 break;
788 }
789 drm_dbg_kms(&i915->drm, "Set dsc bpp from %d to VESA %d\n",
790 bits_per_pixel, valid_dsc_bpp[i]);
791
792 bits_per_pixel = valid_dsc_bpp[i];
793 }
794
795 return bits_per_pixel;
796}
797
798static
799u32 get_max_compressed_bpp_with_joiner(struct drm_i915_private *i915,
800 u32 mode_clock, u32 mode_hdisplay,
801 bool bigjoiner)
802{
803 u32 max_bpp_small_joiner_ram;
804
805 /* Small Joiner Check: output bpp <= joiner RAM (bits) / Horiz. width */
806 max_bpp_small_joiner_ram = small_joiner_ram_size_bits(i915) / mode_hdisplay;
807
808 if (bigjoiner) {
809 int bigjoiner_interface_bits = DISPLAY_VER(i915) >= 14 ? 36 : 24;
810 /* With bigjoiner multiple dsc engines are used in parallel so PPC is 2 */
811 int ppc = 2;
812 u32 max_bpp_bigjoiner =
813 i915->display.cdclk.max_cdclk_freq * ppc * bigjoiner_interface_bits /
814 intel_dp_mode_to_fec_clock(mode_clock);
815
816 max_bpp_small_joiner_ram *= 2;
817
818 return min(max_bpp_small_joiner_ram, max_bpp_bigjoiner);
819 }
820
821 return max_bpp_small_joiner_ram;
822}
823
824u16 intel_dp_dsc_get_max_compressed_bpp(struct drm_i915_private *i915,
825 u32 link_clock, u32 lane_count,
826 u32 mode_clock, u32 mode_hdisplay,
827 bool bigjoiner,
828 enum intel_output_format output_format,
829 u32 pipe_bpp,
830 u32 timeslots)
831{
832 u32 bits_per_pixel, joiner_max_bpp;
833
834 /*
835 * Available Link Bandwidth(Kbits/sec) = (NumberOfLanes)*
836 * (LinkSymbolClock)* 8 * (TimeSlots / 64)
837 * for SST -> TimeSlots is 64(i.e all TimeSlots that are available)
838 * for MST -> TimeSlots has to be calculated, based on mode requirements
839 *
840 * Due to FEC overhead, the available bw is reduced to 97.2261%.
841 * To support the given mode:
842 * Bandwidth required should be <= Available link Bandwidth * FEC Overhead
843 * =>ModeClock * bits_per_pixel <= Available Link Bandwidth * FEC Overhead
844 * =>bits_per_pixel <= Available link Bandwidth * FEC Overhead / ModeClock
845 * =>bits_per_pixel <= (NumberOfLanes * LinkSymbolClock) * 8 (TimeSlots / 64) /
846 * (ModeClock / FEC Overhead)
847 * =>bits_per_pixel <= (NumberOfLanes * LinkSymbolClock * TimeSlots) /
848 * (ModeClock / FEC Overhead * 8)
849 */
850 bits_per_pixel = ((link_clock * lane_count) * timeslots) /
851 (intel_dp_mode_to_fec_clock(mode_clock) * 8);
852
853 /* Bandwidth required for 420 is half, that of 444 format */
854 if (output_format == INTEL_OUTPUT_FORMAT_YCBCR420)
855 bits_per_pixel *= 2;
856
857 /*
858 * According to DSC 1.2a Section 4.1.1 Table 4.1 the maximum
859 * supported PPS value can be 63.9375 and with the further
860 * mention that for 420, 422 formats, bpp should be programmed double
861 * the target bpp restricting our target bpp to be 31.9375 at max.
862 */
863 if (output_format == INTEL_OUTPUT_FORMAT_YCBCR420)
864 bits_per_pixel = min_t(u32, bits_per_pixel, 31);
865
866 drm_dbg_kms(&i915->drm, "Max link bpp is %u for %u timeslots "
867 "total bw %u pixel clock %u\n",
868 bits_per_pixel, timeslots,
869 (link_clock * lane_count * 8),
870 intel_dp_mode_to_fec_clock(mode_clock));
871
872 joiner_max_bpp = get_max_compressed_bpp_with_joiner(i915, mode_clock,
873 mode_hdisplay, bigjoiner);
874 bits_per_pixel = min(bits_per_pixel, joiner_max_bpp);
875
876 bits_per_pixel = intel_dp_dsc_nearest_valid_bpp(i915, bits_per_pixel, pipe_bpp);
877
878 return bits_per_pixel;
879}
880
881u8 intel_dp_dsc_get_slice_count(const struct intel_connector *connector,
882 int mode_clock, int mode_hdisplay,
883 bool bigjoiner)
884{
885 struct drm_i915_private *i915 = to_i915(connector->base.dev);
886 u8 min_slice_count, i;
887 int max_slice_width;
888
889 if (mode_clock <= DP_DSC_PEAK_PIXEL_RATE)
890 min_slice_count = DIV_ROUND_UP(mode_clock,
891 DP_DSC_MAX_ENC_THROUGHPUT_0);
892 else
893 min_slice_count = DIV_ROUND_UP(mode_clock,
894 DP_DSC_MAX_ENC_THROUGHPUT_1);
895
896 /*
897 * Due to some DSC engine BW limitations, we need to enable second
898 * slice and VDSC engine, whenever we approach close enough to max CDCLK
899 */
900 if (mode_clock >= ((i915->display.cdclk.max_cdclk_freq * 85) / 100))
901 min_slice_count = max_t(u8, min_slice_count, 2);
902
903 max_slice_width = drm_dp_dsc_sink_max_slice_width(connector->dp.dsc_dpcd);
904 if (max_slice_width < DP_DSC_MIN_SLICE_WIDTH_VALUE) {
905 drm_dbg_kms(&i915->drm,
906 "Unsupported slice width %d by DP DSC Sink device\n",
907 max_slice_width);
908 return 0;
909 }
910 /* Also take into account max slice width */
911 min_slice_count = max_t(u8, min_slice_count,
912 DIV_ROUND_UP(mode_hdisplay,
913 max_slice_width));
914
915 /* Find the closest match to the valid slice count values */
916 for (i = 0; i < ARRAY_SIZE(valid_dsc_slicecount); i++) {
917 u8 test_slice_count = valid_dsc_slicecount[i] << bigjoiner;
918
919 if (test_slice_count >
920 drm_dp_dsc_sink_max_slice_count(connector->dp.dsc_dpcd, false))
921 break;
922
923 /* big joiner needs small joiner to be enabled */
924 if (bigjoiner && test_slice_count < 4)
925 continue;
926
927 if (min_slice_count <= test_slice_count)
928 return test_slice_count;
929 }
930
931 drm_dbg_kms(&i915->drm, "Unsupported Slice Count %d\n",
932 min_slice_count);
933 return 0;
934}
935
936static bool source_can_output(struct intel_dp *intel_dp,
937 enum intel_output_format format)
938{
939 struct drm_i915_private *i915 = dp_to_i915(intel_dp);
940
941 switch (format) {
942 case INTEL_OUTPUT_FORMAT_RGB:
943 return true;
944
945 case INTEL_OUTPUT_FORMAT_YCBCR444:
946 /*
947 * No YCbCr output support on gmch platforms.
948 * Also, ILK doesn't seem capable of DP YCbCr output.
949 * The displayed image is severly corrupted. SNB+ is fine.
950 */
951 return !HAS_GMCH(i915) && !IS_IRONLAKE(i915);
952
953 case INTEL_OUTPUT_FORMAT_YCBCR420:
954 /* Platform < Gen 11 cannot output YCbCr420 format */
955 return DISPLAY_VER(i915) >= 11;
956
957 default:
958 MISSING_CASE(format);
959 return false;
960 }
961}
962
963static bool
964dfp_can_convert_from_rgb(struct intel_dp *intel_dp,
965 enum intel_output_format sink_format)
966{
967 if (!drm_dp_is_branch(intel_dp->dpcd))
968 return false;
969
970 if (sink_format == INTEL_OUTPUT_FORMAT_YCBCR444)
971 return intel_dp->dfp.rgb_to_ycbcr;
972
973 if (sink_format == INTEL_OUTPUT_FORMAT_YCBCR420)
974 return intel_dp->dfp.rgb_to_ycbcr &&
975 intel_dp->dfp.ycbcr_444_to_420;
976
977 return false;
978}
979
980static bool
981dfp_can_convert_from_ycbcr444(struct intel_dp *intel_dp,
982 enum intel_output_format sink_format)
983{
984 if (!drm_dp_is_branch(intel_dp->dpcd))
985 return false;
986
987 if (sink_format == INTEL_OUTPUT_FORMAT_YCBCR420)
988 return intel_dp->dfp.ycbcr_444_to_420;
989
990 return false;
991}
992
993static bool
994dfp_can_convert(struct intel_dp *intel_dp,
995 enum intel_output_format output_format,
996 enum intel_output_format sink_format)
997{
998 switch (output_format) {
999 case INTEL_OUTPUT_FORMAT_RGB:
1000 return dfp_can_convert_from_rgb(intel_dp, sink_format);
1001 case INTEL_OUTPUT_FORMAT_YCBCR444:
1002 return dfp_can_convert_from_ycbcr444(intel_dp, sink_format);
1003 default:
1004 MISSING_CASE(output_format);
1005 return false;
1006 }
1007
1008 return false;
1009}
1010
1011static enum intel_output_format
1012intel_dp_output_format(struct intel_connector *connector,
1013 enum intel_output_format sink_format)
1014{
1015 struct intel_dp *intel_dp = intel_attached_dp(connector);
1016 struct drm_i915_private *i915 = dp_to_i915(intel_dp);
1017 enum intel_output_format force_dsc_output_format =
1018 intel_dp->force_dsc_output_format;
1019 enum intel_output_format output_format;
1020 if (force_dsc_output_format) {
1021 if (source_can_output(intel_dp, force_dsc_output_format) &&
1022 (!drm_dp_is_branch(intel_dp->dpcd) ||
1023 sink_format != force_dsc_output_format ||
1024 dfp_can_convert(intel_dp, force_dsc_output_format, sink_format)))
1025 return force_dsc_output_format;
1026
1027 drm_dbg_kms(&i915->drm, "Cannot force DSC output format\n");
1028 }
1029
1030 if (sink_format == INTEL_OUTPUT_FORMAT_RGB ||
1031 dfp_can_convert_from_rgb(intel_dp, sink_format))
1032 output_format = INTEL_OUTPUT_FORMAT_RGB;
1033
1034 else if (sink_format == INTEL_OUTPUT_FORMAT_YCBCR444 ||
1035 dfp_can_convert_from_ycbcr444(intel_dp, sink_format))
1036 output_format = INTEL_OUTPUT_FORMAT_YCBCR444;
1037
1038 else
1039 output_format = INTEL_OUTPUT_FORMAT_YCBCR420;
1040
1041 drm_WARN_ON(&i915->drm, !source_can_output(intel_dp, output_format));
1042
1043 return output_format;
1044}
1045
1046int intel_dp_min_bpp(enum intel_output_format output_format)
1047{
1048 if (output_format == INTEL_OUTPUT_FORMAT_RGB)
1049 return 6 * 3;
1050 else
1051 return 8 * 3;
1052}
1053
1054int intel_dp_output_bpp(enum intel_output_format output_format, int bpp)
1055{
1056 /*
1057 * bpp value was assumed to RGB format. And YCbCr 4:2:0 output
1058 * format of the number of bytes per pixel will be half the number
1059 * of bytes of RGB pixel.
1060 */
1061 if (output_format == INTEL_OUTPUT_FORMAT_YCBCR420)
1062 bpp /= 2;
1063
1064 return bpp;
1065}
1066
1067static enum intel_output_format
1068intel_dp_sink_format(struct intel_connector *connector,
1069 const struct drm_display_mode *mode)
1070{
1071 const struct drm_display_info *info = &connector->base.display_info;
1072
1073 if (drm_mode_is_420_only(info, mode))
1074 return INTEL_OUTPUT_FORMAT_YCBCR420;
1075
1076 return INTEL_OUTPUT_FORMAT_RGB;
1077}
1078
1079static int
1080intel_dp_mode_min_output_bpp(struct intel_connector *connector,
1081 const struct drm_display_mode *mode)
1082{
1083 enum intel_output_format output_format, sink_format;
1084
1085 sink_format = intel_dp_sink_format(connector, mode);
1086
1087 output_format = intel_dp_output_format(connector, sink_format);
1088
1089 return intel_dp_output_bpp(output_format, intel_dp_min_bpp(output_format));
1090}
1091
1092static bool intel_dp_hdisplay_bad(struct drm_i915_private *dev_priv,
1093 int hdisplay)
1094{
1095 /*
1096 * Older platforms don't like hdisplay==4096 with DP.
1097 *
1098 * On ILK/SNB/IVB the pipe seems to be somewhat running (scanline
1099 * and frame counter increment), but we don't get vblank interrupts,
1100 * and the pipe underruns immediately. The link also doesn't seem
1101 * to get trained properly.
1102 *
1103 * On CHV the vblank interrupts don't seem to disappear but
1104 * otherwise the symptoms are similar.
1105 *
1106 * TODO: confirm the behaviour on HSW+
1107 */
1108 return hdisplay == 4096 && !HAS_DDI(dev_priv);
1109}
1110
1111static int intel_dp_max_tmds_clock(struct intel_dp *intel_dp)
1112{
1113 struct intel_connector *connector = intel_dp->attached_connector;
1114 const struct drm_display_info *info = &connector->base.display_info;
1115 int max_tmds_clock = intel_dp->dfp.max_tmds_clock;
1116
1117 /* Only consider the sink's max TMDS clock if we know this is a HDMI DFP */
1118 if (max_tmds_clock && info->max_tmds_clock)
1119 max_tmds_clock = min(max_tmds_clock, info->max_tmds_clock);
1120
1121 return max_tmds_clock;
1122}
1123
1124static enum drm_mode_status
1125intel_dp_tmds_clock_valid(struct intel_dp *intel_dp,
1126 int clock, int bpc,
1127 enum intel_output_format sink_format,
1128 bool respect_downstream_limits)
1129{
1130 int tmds_clock, min_tmds_clock, max_tmds_clock;
1131
1132 if (!respect_downstream_limits)
1133 return MODE_OK;
1134
1135 tmds_clock = intel_hdmi_tmds_clock(clock, bpc, sink_format);
1136
1137 min_tmds_clock = intel_dp->dfp.min_tmds_clock;
1138 max_tmds_clock = intel_dp_max_tmds_clock(intel_dp);
1139
1140 if (min_tmds_clock && tmds_clock < min_tmds_clock)
1141 return MODE_CLOCK_LOW;
1142
1143 if (max_tmds_clock && tmds_clock > max_tmds_clock)
1144 return MODE_CLOCK_HIGH;
1145
1146 return MODE_OK;
1147}
1148
1149static enum drm_mode_status
1150intel_dp_mode_valid_downstream(struct intel_connector *connector,
1151 const struct drm_display_mode *mode,
1152 int target_clock)
1153{
1154 struct intel_dp *intel_dp = intel_attached_dp(connector);
1155 const struct drm_display_info *info = &connector->base.display_info;
1156 enum drm_mode_status status;
1157 enum intel_output_format sink_format;
1158
1159 /* If PCON supports FRL MODE, check FRL bandwidth constraints */
1160 if (intel_dp->dfp.pcon_max_frl_bw) {
1161 int target_bw;
1162 int max_frl_bw;
1163 int bpp = intel_dp_mode_min_output_bpp(connector, mode);
1164
1165 target_bw = bpp * target_clock;
1166
1167 max_frl_bw = intel_dp->dfp.pcon_max_frl_bw;
1168
1169 /* converting bw from Gbps to Kbps*/
1170 max_frl_bw = max_frl_bw * 1000000;
1171
1172 if (target_bw > max_frl_bw)
1173 return MODE_CLOCK_HIGH;
1174
1175 return MODE_OK;
1176 }
1177
1178 if (intel_dp->dfp.max_dotclock &&
1179 target_clock > intel_dp->dfp.max_dotclock)
1180 return MODE_CLOCK_HIGH;
1181
1182 sink_format = intel_dp_sink_format(connector, mode);
1183
1184 /* Assume 8bpc for the DP++/HDMI/DVI TMDS clock check */
1185 status = intel_dp_tmds_clock_valid(intel_dp, target_clock,
1186 8, sink_format, true);
1187
1188 if (status != MODE_OK) {
1189 if (sink_format == INTEL_OUTPUT_FORMAT_YCBCR420 ||
1190 !connector->base.ycbcr_420_allowed ||
1191 !drm_mode_is_420_also(info, mode))
1192 return status;
1193 sink_format = INTEL_OUTPUT_FORMAT_YCBCR420;
1194 status = intel_dp_tmds_clock_valid(intel_dp, target_clock,
1195 8, sink_format, true);
1196 if (status != MODE_OK)
1197 return status;
1198 }
1199
1200 return MODE_OK;
1201}
1202
1203bool intel_dp_need_bigjoiner(struct intel_dp *intel_dp,
1204 int hdisplay, int clock)
1205{
1206 struct drm_i915_private *i915 = dp_to_i915(intel_dp);
1207
1208 if (!intel_dp_can_bigjoiner(intel_dp))
1209 return false;
1210
1211 return clock > i915->max_dotclk_freq || hdisplay > 5120;
1212}
1213
1214static enum drm_mode_status
1215intel_dp_mode_valid(struct drm_connector *_connector,
1216 struct drm_display_mode *mode)
1217{
1218 struct intel_connector *connector = to_intel_connector(_connector);
1219 struct intel_dp *intel_dp = intel_attached_dp(connector);
1220 struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
1221 const struct drm_display_mode *fixed_mode;
1222 int target_clock = mode->clock;
1223 int max_rate, mode_rate, max_lanes, max_link_clock;
1224 int max_dotclk = dev_priv->max_dotclk_freq;
1225 u16 dsc_max_compressed_bpp = 0;
1226 u8 dsc_slice_count = 0;
1227 enum drm_mode_status status;
1228 bool dsc = false, bigjoiner = false;
1229
1230 status = intel_cpu_transcoder_mode_valid(dev_priv, mode);
1231 if (status != MODE_OK)
1232 return status;
1233
1234 if (mode->flags & DRM_MODE_FLAG_DBLCLK)
1235 return MODE_H_ILLEGAL;
1236
1237 fixed_mode = intel_panel_fixed_mode(connector, mode);
1238 if (intel_dp_is_edp(intel_dp) && fixed_mode) {
1239 status = intel_panel_mode_valid(connector, mode);
1240 if (status != MODE_OK)
1241 return status;
1242
1243 target_clock = fixed_mode->clock;
1244 }
1245
1246 if (mode->clock < 10000)
1247 return MODE_CLOCK_LOW;
1248
1249 if (intel_dp_need_bigjoiner(intel_dp, mode->hdisplay, target_clock)) {
1250 bigjoiner = true;
1251 max_dotclk *= 2;
1252 }
1253 if (target_clock > max_dotclk)
1254 return MODE_CLOCK_HIGH;
1255
1256 if (intel_dp_hdisplay_bad(dev_priv, mode->hdisplay))
1257 return MODE_H_ILLEGAL;
1258
1259 max_link_clock = intel_dp_max_link_rate(intel_dp);
1260 max_lanes = intel_dp_max_lane_count(intel_dp);
1261
1262 max_rate = intel_dp_max_data_rate(max_link_clock, max_lanes);
1263 mode_rate = intel_dp_link_required(target_clock,
1264 intel_dp_mode_min_output_bpp(connector, mode));
1265
1266 if (HAS_DSC(dev_priv) &&
1267 drm_dp_sink_supports_dsc(connector->dp.dsc_dpcd)) {
1268 enum intel_output_format sink_format, output_format;
1269 int pipe_bpp;
1270
1271 sink_format = intel_dp_sink_format(connector, mode);
1272 output_format = intel_dp_output_format(connector, sink_format);
1273 /*
1274 * TBD pass the connector BPC,
1275 * for now U8_MAX so that max BPC on that platform would be picked
1276 */
1277 pipe_bpp = intel_dp_dsc_compute_max_bpp(connector, U8_MAX);
1278
1279 /*
1280 * Output bpp is stored in 6.4 format so right shift by 4 to get the
1281 * integer value since we support only integer values of bpp.
1282 */
1283 if (intel_dp_is_edp(intel_dp)) {
1284 dsc_max_compressed_bpp =
1285 drm_edp_dsc_sink_output_bpp(connector->dp.dsc_dpcd) >> 4;
1286 dsc_slice_count =
1287 drm_dp_dsc_sink_max_slice_count(connector->dp.dsc_dpcd,
1288 true);
1289 } else if (drm_dp_sink_supports_fec(connector->dp.fec_capability)) {
1290 dsc_max_compressed_bpp =
1291 intel_dp_dsc_get_max_compressed_bpp(dev_priv,
1292 max_link_clock,
1293 max_lanes,
1294 target_clock,
1295 mode->hdisplay,
1296 bigjoiner,
1297 output_format,
1298 pipe_bpp, 64);
1299 dsc_slice_count =
1300 intel_dp_dsc_get_slice_count(connector,
1301 target_clock,
1302 mode->hdisplay,
1303 bigjoiner);
1304 }
1305
1306 dsc = dsc_max_compressed_bpp && dsc_slice_count;
1307 }
1308
1309 /*
1310 * Big joiner configuration needs DSC for TGL which is not true for
1311 * XE_LPD where uncompressed joiner is supported.
1312 */
1313 if (DISPLAY_VER(dev_priv) < 13 && bigjoiner && !dsc)
1314 return MODE_CLOCK_HIGH;
1315
1316 if (mode_rate > max_rate && !dsc)
1317 return MODE_CLOCK_HIGH;
1318
1319 status = intel_dp_mode_valid_downstream(connector, mode, target_clock);
1320 if (status != MODE_OK)
1321 return status;
1322
1323 return intel_mode_valid_max_plane_size(dev_priv, mode, bigjoiner);
1324}
1325
1326bool intel_dp_source_supports_tps3(struct drm_i915_private *i915)
1327{
1328 return DISPLAY_VER(i915) >= 9 || IS_BROADWELL(i915) || IS_HASWELL(i915);
1329}
1330
1331bool intel_dp_source_supports_tps4(struct drm_i915_private *i915)
1332{
1333 return DISPLAY_VER(i915) >= 10;
1334}
1335
1336static void snprintf_int_array(char *str, size_t len,
1337 const int *array, int nelem)
1338{
1339 int i;
1340
1341 str[0] = '\0';
1342
1343 for (i = 0; i < nelem; i++) {
1344 int r = snprintf(str, len, "%s%d", i ? ", " : "", array[i]);
1345 if (r >= len)
1346 return;
1347 str += r;
1348 len -= r;
1349 }
1350}
1351
1352static void intel_dp_print_rates(struct intel_dp *intel_dp)
1353{
1354 struct drm_i915_private *i915 = dp_to_i915(intel_dp);
1355 char str[128]; /* FIXME: too big for stack? */
1356
1357 if (!drm_debug_enabled(DRM_UT_KMS))
1358 return;
1359
1360 snprintf_int_array(str, sizeof(str),
1361 intel_dp->source_rates, intel_dp->num_source_rates);
1362 drm_dbg_kms(&i915->drm, "source rates: %s\n", str);
1363
1364 snprintf_int_array(str, sizeof(str),
1365 intel_dp->sink_rates, intel_dp->num_sink_rates);
1366 drm_dbg_kms(&i915->drm, "sink rates: %s\n", str);
1367
1368 snprintf_int_array(str, sizeof(str),
1369 intel_dp->common_rates, intel_dp->num_common_rates);
1370 drm_dbg_kms(&i915->drm, "common rates: %s\n", str);
1371}
1372
1373int
1374intel_dp_max_link_rate(struct intel_dp *intel_dp)
1375{
1376 int len;
1377
1378 len = intel_dp_common_len_rate_limit(intel_dp, intel_dp->max_link_rate);
1379
1380 return intel_dp_common_rate(intel_dp, len - 1);
1381}
1382
1383int intel_dp_rate_select(struct intel_dp *intel_dp, int rate)
1384{
1385 struct drm_i915_private *i915 = dp_to_i915(intel_dp);
1386 int i = intel_dp_rate_index(intel_dp->sink_rates,
1387 intel_dp->num_sink_rates, rate);
1388
1389 if (drm_WARN_ON(&i915->drm, i < 0))
1390 i = 0;
1391
1392 return i;
1393}
1394
1395void intel_dp_compute_rate(struct intel_dp *intel_dp, int port_clock,
1396 u8 *link_bw, u8 *rate_select)
1397{
1398 /* eDP 1.4 rate select method. */
1399 if (intel_dp->use_rate_select) {
1400 *link_bw = 0;
1401 *rate_select =
1402 intel_dp_rate_select(intel_dp, port_clock);
1403 } else {
1404 *link_bw = drm_dp_link_rate_to_bw_code(port_clock);
1405 *rate_select = 0;
1406 }
1407}
1408
1409bool intel_dp_has_hdmi_sink(struct intel_dp *intel_dp)
1410{
1411 struct intel_connector *connector = intel_dp->attached_connector;
1412
1413 return connector->base.display_info.is_hdmi;
1414}
1415
1416static bool intel_dp_source_supports_fec(struct intel_dp *intel_dp,
1417 const struct intel_crtc_state *pipe_config)
1418{
1419 struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base;
1420 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1421
1422 if (DISPLAY_VER(dev_priv) >= 12)
1423 return true;
1424
1425 if (DISPLAY_VER(dev_priv) == 11 && encoder->port != PORT_A)
1426 return true;
1427
1428 return false;
1429}
1430
1431bool intel_dp_supports_fec(struct intel_dp *intel_dp,
1432 const struct intel_connector *connector,
1433 const struct intel_crtc_state *pipe_config)
1434{
1435 return intel_dp_source_supports_fec(intel_dp, pipe_config) &&
1436 drm_dp_sink_supports_fec(connector->dp.fec_capability);
1437}
1438
1439static bool intel_dp_supports_dsc(const struct intel_connector *connector,
1440 const struct intel_crtc_state *crtc_state)
1441{
1442 if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DP) && !crtc_state->fec_enable)
1443 return false;
1444
1445 return intel_dsc_source_support(crtc_state) &&
1446 connector->dp.dsc_decompression_aux &&
1447 drm_dp_sink_supports_dsc(connector->dp.dsc_dpcd);
1448}
1449
1450static int intel_dp_hdmi_compute_bpc(struct intel_dp *intel_dp,
1451 const struct intel_crtc_state *crtc_state,
1452 int bpc, bool respect_downstream_limits)
1453{
1454 int clock = crtc_state->hw.adjusted_mode.crtc_clock;
1455
1456 /*
1457 * Current bpc could already be below 8bpc due to
1458 * FDI bandwidth constraints or other limits.
1459 * HDMI minimum is 8bpc however.
1460 */
1461 bpc = max(bpc, 8);
1462
1463 /*
1464 * We will never exceed downstream TMDS clock limits while
1465 * attempting deep color. If the user insists on forcing an
1466 * out of spec mode they will have to be satisfied with 8bpc.
1467 */
1468 if (!respect_downstream_limits)
1469 bpc = 8;
1470
1471 for (; bpc >= 8; bpc -= 2) {
1472 if (intel_hdmi_bpc_possible(crtc_state, bpc,
1473 intel_dp_has_hdmi_sink(intel_dp)) &&
1474 intel_dp_tmds_clock_valid(intel_dp, clock, bpc, crtc_state->sink_format,
1475 respect_downstream_limits) == MODE_OK)
1476 return bpc;
1477 }
1478
1479 return -EINVAL;
1480}
1481
1482static int intel_dp_max_bpp(struct intel_dp *intel_dp,
1483 const struct intel_crtc_state *crtc_state,
1484 bool respect_downstream_limits)
1485{
1486 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1487 struct intel_connector *intel_connector = intel_dp->attached_connector;
1488 int bpp, bpc;
1489
1490 bpc = crtc_state->pipe_bpp / 3;
1491
1492 if (intel_dp->dfp.max_bpc)
1493 bpc = min_t(int, bpc, intel_dp->dfp.max_bpc);
1494
1495 if (intel_dp->dfp.min_tmds_clock) {
1496 int max_hdmi_bpc;
1497
1498 max_hdmi_bpc = intel_dp_hdmi_compute_bpc(intel_dp, crtc_state, bpc,
1499 respect_downstream_limits);
1500 if (max_hdmi_bpc < 0)
1501 return 0;
1502
1503 bpc = min(bpc, max_hdmi_bpc);
1504 }
1505
1506 bpp = bpc * 3;
1507 if (intel_dp_is_edp(intel_dp)) {
1508 /* Get bpp from vbt only for panels that dont have bpp in edid */
1509 if (intel_connector->base.display_info.bpc == 0 &&
1510 intel_connector->panel.vbt.edp.bpp &&
1511 intel_connector->panel.vbt.edp.bpp < bpp) {
1512 drm_dbg_kms(&dev_priv->drm,
1513 "clamping bpp for eDP panel to BIOS-provided %i\n",
1514 intel_connector->panel.vbt.edp.bpp);
1515 bpp = intel_connector->panel.vbt.edp.bpp;
1516 }
1517 }
1518
1519 return bpp;
1520}
1521
1522/* Adjust link config limits based on compliance test requests. */
1523void
1524intel_dp_adjust_compliance_config(struct intel_dp *intel_dp,
1525 struct intel_crtc_state *pipe_config,
1526 struct link_config_limits *limits)
1527{
1528 struct drm_i915_private *i915 = dp_to_i915(intel_dp);
1529
1530 /* For DP Compliance we override the computed bpp for the pipe */
1531 if (intel_dp->compliance.test_data.bpc != 0) {
1532 int bpp = 3 * intel_dp->compliance.test_data.bpc;
1533
1534 limits->pipe.min_bpp = limits->pipe.max_bpp = bpp;
1535 pipe_config->dither_force_disable = bpp == 6 * 3;
1536
1537 drm_dbg_kms(&i915->drm, "Setting pipe_bpp to %d\n", bpp);
1538 }
1539
1540 /* Use values requested by Compliance Test Request */
1541 if (intel_dp->compliance.test_type == DP_TEST_LINK_TRAINING) {
1542 int index;
1543
1544 /* Validate the compliance test data since max values
1545 * might have changed due to link train fallback.
1546 */
1547 if (intel_dp_link_params_valid(intel_dp, intel_dp->compliance.test_link_rate,
1548 intel_dp->compliance.test_lane_count)) {
1549 index = intel_dp_rate_index(intel_dp->common_rates,
1550 intel_dp->num_common_rates,
1551 intel_dp->compliance.test_link_rate);
1552 if (index >= 0)
1553 limits->min_rate = limits->max_rate =
1554 intel_dp->compliance.test_link_rate;
1555 limits->min_lane_count = limits->max_lane_count =
1556 intel_dp->compliance.test_lane_count;
1557 }
1558 }
1559}
1560
1561static bool has_seamless_m_n(struct intel_connector *connector)
1562{
1563 struct drm_i915_private *i915 = to_i915(connector->base.dev);
1564
1565 /*
1566 * Seamless M/N reprogramming only implemented
1567 * for BDW+ double buffered M/N registers so far.
1568 */
1569 return HAS_DOUBLE_BUFFERED_M_N(i915) &&
1570 intel_panel_drrs_type(connector) == DRRS_TYPE_SEAMLESS;
1571}
1572
1573static int intel_dp_mode_clock(const struct intel_crtc_state *crtc_state,
1574 const struct drm_connector_state *conn_state)
1575{
1576 struct intel_connector *connector = to_intel_connector(conn_state->connector);
1577 const struct drm_display_mode *adjusted_mode = &crtc_state->hw.adjusted_mode;
1578
1579 /* FIXME a bit of a mess wrt clock vs. crtc_clock */
1580 if (has_seamless_m_n(connector))
1581 return intel_panel_highest_mode(connector, adjusted_mode)->clock;
1582 else
1583 return adjusted_mode->crtc_clock;
1584}
1585
1586/* Optimize link config in order: max bpp, min clock, min lanes */
1587static int
1588intel_dp_compute_link_config_wide(struct intel_dp *intel_dp,
1589 struct intel_crtc_state *pipe_config,
1590 const struct drm_connector_state *conn_state,
1591 const struct link_config_limits *limits)
1592{
1593 int bpp, i, lane_count, clock = intel_dp_mode_clock(pipe_config, conn_state);
1594 int mode_rate, link_rate, link_avail;
1595
1596 for (bpp = to_bpp_int(limits->link.max_bpp_x16);
1597 bpp >= to_bpp_int(limits->link.min_bpp_x16);
1598 bpp -= 2 * 3) {
1599 int link_bpp = intel_dp_output_bpp(pipe_config->output_format, bpp);
1600
1601 mode_rate = intel_dp_link_required(clock, link_bpp);
1602
1603 for (i = 0; i < intel_dp->num_common_rates; i++) {
1604 link_rate = intel_dp_common_rate(intel_dp, i);
1605 if (link_rate < limits->min_rate ||
1606 link_rate > limits->max_rate)
1607 continue;
1608
1609 for (lane_count = limits->min_lane_count;
1610 lane_count <= limits->max_lane_count;
1611 lane_count <<= 1) {
1612 link_avail = intel_dp_max_data_rate(link_rate,
1613 lane_count);
1614
1615 if (mode_rate <= link_avail) {
1616 pipe_config->lane_count = lane_count;
1617 pipe_config->pipe_bpp = bpp;
1618 pipe_config->port_clock = link_rate;
1619
1620 return 0;
1621 }
1622 }
1623 }
1624 }
1625
1626 return -EINVAL;
1627}
1628
1629static
1630u8 intel_dp_dsc_max_src_input_bpc(struct drm_i915_private *i915)
1631{
1632 /* Max DSC Input BPC for ICL is 10 and for TGL+ is 12 */
1633 if (DISPLAY_VER(i915) >= 12)
1634 return 12;
1635 if (DISPLAY_VER(i915) == 11)
1636 return 10;
1637
1638 return 0;
1639}
1640
1641int intel_dp_dsc_compute_max_bpp(const struct intel_connector *connector,
1642 u8 max_req_bpc)
1643{
1644 struct drm_i915_private *i915 = to_i915(connector->base.dev);
1645 int i, num_bpc;
1646 u8 dsc_bpc[3] = {};
1647 u8 dsc_max_bpc;
1648
1649 dsc_max_bpc = intel_dp_dsc_max_src_input_bpc(i915);
1650
1651 if (!dsc_max_bpc)
1652 return dsc_max_bpc;
1653
1654 dsc_max_bpc = min_t(u8, dsc_max_bpc, max_req_bpc);
1655
1656 num_bpc = drm_dp_dsc_sink_supported_input_bpcs(connector->dp.dsc_dpcd,
1657 dsc_bpc);
1658 for (i = 0; i < num_bpc; i++) {
1659 if (dsc_max_bpc >= dsc_bpc[i])
1660 return dsc_bpc[i] * 3;
1661 }
1662
1663 return 0;
1664}
1665
1666static int intel_dp_source_dsc_version_minor(struct drm_i915_private *i915)
1667{
1668 return DISPLAY_VER(i915) >= 14 ? 2 : 1;
1669}
1670
1671static int intel_dp_sink_dsc_version_minor(const u8 dsc_dpcd[DP_DSC_RECEIVER_CAP_SIZE])
1672{
1673 return (dsc_dpcd[DP_DSC_REV - DP_DSC_SUPPORT] & DP_DSC_MINOR_MASK) >>
1674 DP_DSC_MINOR_SHIFT;
1675}
1676
1677static int intel_dp_get_slice_height(int vactive)
1678{
1679 int slice_height;
1680
1681 /*
1682 * VDSC 1.2a spec in Section 3.8 Options for Slices implies that 108
1683 * lines is an optimal slice height, but any size can be used as long as
1684 * vertical active integer multiple and maximum vertical slice count
1685 * requirements are met.
1686 */
1687 for (slice_height = 108; slice_height <= vactive; slice_height += 2)
1688 if (vactive % slice_height == 0)
1689 return slice_height;
1690
1691 /*
1692 * Highly unlikely we reach here as most of the resolutions will end up
1693 * finding appropriate slice_height in above loop but returning
1694 * slice_height as 2 here as it should work with all resolutions.
1695 */
1696 return 2;
1697}
1698
1699static int intel_dp_dsc_compute_params(const struct intel_connector *connector,
1700 struct intel_crtc_state *crtc_state)
1701{
1702 struct drm_i915_private *i915 = to_i915(connector->base.dev);
1703 struct drm_dsc_config *vdsc_cfg = &crtc_state->dsc.config;
1704 u8 line_buf_depth;
1705 int ret;
1706
1707 /*
1708 * RC_MODEL_SIZE is currently a constant across all configurations.
1709 *
1710 * FIXME: Look into using sink defined DPCD DP_DSC_RC_BUF_BLK_SIZE and
1711 * DP_DSC_RC_BUF_SIZE for this.
1712 */
1713 vdsc_cfg->rc_model_size = DSC_RC_MODEL_SIZE_CONST;
1714 vdsc_cfg->pic_height = crtc_state->hw.adjusted_mode.crtc_vdisplay;
1715
1716 vdsc_cfg->slice_height = intel_dp_get_slice_height(vdsc_cfg->pic_height);
1717
1718 ret = intel_dsc_compute_params(crtc_state);
1719 if (ret)
1720 return ret;
1721
1722 vdsc_cfg->dsc_version_major =
1723 (connector->dp.dsc_dpcd[DP_DSC_REV - DP_DSC_SUPPORT] &
1724 DP_DSC_MAJOR_MASK) >> DP_DSC_MAJOR_SHIFT;
1725 vdsc_cfg->dsc_version_minor =
1726 min(intel_dp_source_dsc_version_minor(i915),
1727 intel_dp_sink_dsc_version_minor(connector->dp.dsc_dpcd));
1728 if (vdsc_cfg->convert_rgb)
1729 vdsc_cfg->convert_rgb =
1730 connector->dp.dsc_dpcd[DP_DSC_DEC_COLOR_FORMAT_CAP - DP_DSC_SUPPORT] &
1731 DP_DSC_RGB;
1732
1733 line_buf_depth = drm_dp_dsc_sink_line_buf_depth(connector->dp.dsc_dpcd);
1734 if (!line_buf_depth) {
1735 drm_dbg_kms(&i915->drm,
1736 "DSC Sink Line Buffer Depth invalid\n");
1737 return -EINVAL;
1738 }
1739
1740 if (vdsc_cfg->dsc_version_minor == 2)
1741 vdsc_cfg->line_buf_depth = (line_buf_depth == DSC_1_2_MAX_LINEBUF_DEPTH_BITS) ?
1742 DSC_1_2_MAX_LINEBUF_DEPTH_VAL : line_buf_depth;
1743 else
1744 vdsc_cfg->line_buf_depth = (line_buf_depth > DSC_1_1_MAX_LINEBUF_DEPTH_BITS) ?
1745 DSC_1_1_MAX_LINEBUF_DEPTH_BITS : line_buf_depth;
1746
1747 vdsc_cfg->block_pred_enable =
1748 connector->dp.dsc_dpcd[DP_DSC_BLK_PREDICTION_SUPPORT - DP_DSC_SUPPORT] &
1749 DP_DSC_BLK_PREDICTION_IS_SUPPORTED;
1750
1751 return drm_dsc_compute_rc_parameters(vdsc_cfg);
1752}
1753
1754static bool intel_dp_dsc_supports_format(const struct intel_connector *connector,
1755 enum intel_output_format output_format)
1756{
1757 struct drm_i915_private *i915 = to_i915(connector->base.dev);
1758 u8 sink_dsc_format;
1759
1760 switch (output_format) {
1761 case INTEL_OUTPUT_FORMAT_RGB:
1762 sink_dsc_format = DP_DSC_RGB;
1763 break;
1764 case INTEL_OUTPUT_FORMAT_YCBCR444:
1765 sink_dsc_format = DP_DSC_YCbCr444;
1766 break;
1767 case INTEL_OUTPUT_FORMAT_YCBCR420:
1768 if (min(intel_dp_source_dsc_version_minor(i915),
1769 intel_dp_sink_dsc_version_minor(connector->dp.dsc_dpcd)) < 2)
1770 return false;
1771 sink_dsc_format = DP_DSC_YCbCr420_Native;
1772 break;
1773 default:
1774 return false;
1775 }
1776
1777 return drm_dp_dsc_sink_supports_format(connector->dp.dsc_dpcd, sink_dsc_format);
1778}
1779
1780static bool is_bw_sufficient_for_dsc_config(u16 compressed_bppx16, u32 link_clock,
1781 u32 lane_count, u32 mode_clock,
1782 enum intel_output_format output_format,
1783 int timeslots)
1784{
1785 u32 available_bw, required_bw;
1786
1787 available_bw = (link_clock * lane_count * timeslots * 16) / 8;
1788 required_bw = compressed_bppx16 * (intel_dp_mode_to_fec_clock(mode_clock));
1789
1790 return available_bw > required_bw;
1791}
1792
1793static int dsc_compute_link_config(struct intel_dp *intel_dp,
1794 struct intel_crtc_state *pipe_config,
1795 struct link_config_limits *limits,
1796 u16 compressed_bppx16,
1797 int timeslots)
1798{
1799 const struct drm_display_mode *adjusted_mode = &pipe_config->hw.adjusted_mode;
1800 int link_rate, lane_count;
1801 int i;
1802
1803 for (i = 0; i < intel_dp->num_common_rates; i++) {
1804 link_rate = intel_dp_common_rate(intel_dp, i);
1805 if (link_rate < limits->min_rate || link_rate > limits->max_rate)
1806 continue;
1807
1808 for (lane_count = limits->min_lane_count;
1809 lane_count <= limits->max_lane_count;
1810 lane_count <<= 1) {
1811 if (!is_bw_sufficient_for_dsc_config(compressed_bppx16, link_rate,
1812 lane_count, adjusted_mode->clock,
1813 pipe_config->output_format,
1814 timeslots))
1815 continue;
1816
1817 pipe_config->lane_count = lane_count;
1818 pipe_config->port_clock = link_rate;
1819
1820 return 0;
1821 }
1822 }
1823
1824 return -EINVAL;
1825}
1826
1827static
1828u16 intel_dp_dsc_max_sink_compressed_bppx16(const struct intel_connector *connector,
1829 struct intel_crtc_state *pipe_config,
1830 int bpc)
1831{
1832 u16 max_bppx16 = drm_edp_dsc_sink_output_bpp(connector->dp.dsc_dpcd);
1833
1834 if (max_bppx16)
1835 return max_bppx16;
1836 /*
1837 * If support not given in DPCD 67h, 68h use the Maximum Allowed bit rate
1838 * values as given in spec Table 2-157 DP v2.0
1839 */
1840 switch (pipe_config->output_format) {
1841 case INTEL_OUTPUT_FORMAT_RGB:
1842 case INTEL_OUTPUT_FORMAT_YCBCR444:
1843 return (3 * bpc) << 4;
1844 case INTEL_OUTPUT_FORMAT_YCBCR420:
1845 return (3 * (bpc / 2)) << 4;
1846 default:
1847 MISSING_CASE(pipe_config->output_format);
1848 break;
1849 }
1850
1851 return 0;
1852}
1853
1854int intel_dp_dsc_sink_min_compressed_bpp(struct intel_crtc_state *pipe_config)
1855{
1856 /* From Mandatory bit rate range Support Table 2-157 (DP v2.0) */
1857 switch (pipe_config->output_format) {
1858 case INTEL_OUTPUT_FORMAT_RGB:
1859 case INTEL_OUTPUT_FORMAT_YCBCR444:
1860 return 8;
1861 case INTEL_OUTPUT_FORMAT_YCBCR420:
1862 return 6;
1863 default:
1864 MISSING_CASE(pipe_config->output_format);
1865 break;
1866 }
1867
1868 return 0;
1869}
1870
1871int intel_dp_dsc_sink_max_compressed_bpp(const struct intel_connector *connector,
1872 struct intel_crtc_state *pipe_config,
1873 int bpc)
1874{
1875 return intel_dp_dsc_max_sink_compressed_bppx16(connector,
1876 pipe_config, bpc) >> 4;
1877}
1878
1879static int dsc_src_min_compressed_bpp(void)
1880{
1881 /* Min Compressed bpp supported by source is 8 */
1882 return 8;
1883}
1884
1885static int dsc_src_max_compressed_bpp(struct intel_dp *intel_dp)
1886{
1887 struct drm_i915_private *i915 = dp_to_i915(intel_dp);
1888
1889 /*
1890 * Max Compressed bpp for Gen 13+ is 27bpp.
1891 * For earlier platform is 23bpp. (Bspec:49259).
1892 */
1893 if (DISPLAY_VER(i915) < 13)
1894 return 23;
1895 else
1896 return 27;
1897}
1898
1899/*
1900 * From a list of valid compressed bpps try different compressed bpp and find a
1901 * suitable link configuration that can support it.
1902 */
1903static int
1904icl_dsc_compute_link_config(struct intel_dp *intel_dp,
1905 struct intel_crtc_state *pipe_config,
1906 struct link_config_limits *limits,
1907 int dsc_max_bpp,
1908 int dsc_min_bpp,
1909 int pipe_bpp,
1910 int timeslots)
1911{
1912 int i, ret;
1913
1914 /* Compressed BPP should be less than the Input DSC bpp */
1915 dsc_max_bpp = min(dsc_max_bpp, pipe_bpp - 1);
1916
1917 for (i = 0; i < ARRAY_SIZE(valid_dsc_bpp); i++) {
1918 if (valid_dsc_bpp[i] < dsc_min_bpp ||
1919 valid_dsc_bpp[i] > dsc_max_bpp)
1920 break;
1921
1922 ret = dsc_compute_link_config(intel_dp,
1923 pipe_config,
1924 limits,
1925 valid_dsc_bpp[i] << 4,
1926 timeslots);
1927 if (ret == 0) {
1928 pipe_config->dsc.compressed_bpp_x16 =
1929 to_bpp_x16(valid_dsc_bpp[i]);
1930 return 0;
1931 }
1932 }
1933
1934 return -EINVAL;
1935}
1936
1937/*
1938 * From XE_LPD onwards we supports compression bpps in steps of 1 up to
1939 * uncompressed bpp-1. So we start from max compressed bpp and see if any
1940 * link configuration is able to support that compressed bpp, if not we
1941 * step down and check for lower compressed bpp.
1942 */
1943static int
1944xelpd_dsc_compute_link_config(struct intel_dp *intel_dp,
1945 const struct intel_connector *connector,
1946 struct intel_crtc_state *pipe_config,
1947 struct link_config_limits *limits,
1948 int dsc_max_bpp,
1949 int dsc_min_bpp,
1950 int pipe_bpp,
1951 int timeslots)
1952{
1953 u8 bppx16_incr = drm_dp_dsc_sink_bpp_incr(connector->dp.dsc_dpcd);
1954 struct drm_i915_private *i915 = dp_to_i915(intel_dp);
1955 u16 compressed_bppx16;
1956 u8 bppx16_step;
1957 int ret;
1958
1959 if (DISPLAY_VER(i915) < 14 || bppx16_incr <= 1)
1960 bppx16_step = 16;
1961 else
1962 bppx16_step = 16 / bppx16_incr;
1963
1964 /* Compressed BPP should be less than the Input DSC bpp */
1965 dsc_max_bpp = min(dsc_max_bpp << 4, (pipe_bpp << 4) - bppx16_step);
1966 dsc_min_bpp = dsc_min_bpp << 4;
1967
1968 for (compressed_bppx16 = dsc_max_bpp;
1969 compressed_bppx16 >= dsc_min_bpp;
1970 compressed_bppx16 -= bppx16_step) {
1971 if (intel_dp->force_dsc_fractional_bpp_en &&
1972 !to_bpp_frac(compressed_bppx16))
1973 continue;
1974 ret = dsc_compute_link_config(intel_dp,
1975 pipe_config,
1976 limits,
1977 compressed_bppx16,
1978 timeslots);
1979 if (ret == 0) {
1980 pipe_config->dsc.compressed_bpp_x16 = compressed_bppx16;
1981 if (intel_dp->force_dsc_fractional_bpp_en &&
1982 to_bpp_frac(compressed_bppx16))
1983 drm_dbg_kms(&i915->drm, "Forcing DSC fractional bpp\n");
1984
1985 return 0;
1986 }
1987 }
1988 return -EINVAL;
1989}
1990
1991static int dsc_compute_compressed_bpp(struct intel_dp *intel_dp,
1992 const struct intel_connector *connector,
1993 struct intel_crtc_state *pipe_config,
1994 struct link_config_limits *limits,
1995 int pipe_bpp,
1996 int timeslots)
1997{
1998 const struct drm_display_mode *adjusted_mode = &pipe_config->hw.adjusted_mode;
1999 struct drm_i915_private *i915 = dp_to_i915(intel_dp);
2000 int dsc_src_min_bpp, dsc_sink_min_bpp, dsc_min_bpp;
2001 int dsc_src_max_bpp, dsc_sink_max_bpp, dsc_max_bpp;
2002 int dsc_joiner_max_bpp;
2003
2004 dsc_src_min_bpp = dsc_src_min_compressed_bpp();
2005 dsc_sink_min_bpp = intel_dp_dsc_sink_min_compressed_bpp(pipe_config);
2006 dsc_min_bpp = max(dsc_src_min_bpp, dsc_sink_min_bpp);
2007 dsc_min_bpp = max(dsc_min_bpp, to_bpp_int_roundup(limits->link.min_bpp_x16));
2008
2009 dsc_src_max_bpp = dsc_src_max_compressed_bpp(intel_dp);
2010 dsc_sink_max_bpp = intel_dp_dsc_sink_max_compressed_bpp(connector,
2011 pipe_config,
2012 pipe_bpp / 3);
2013 dsc_max_bpp = dsc_sink_max_bpp ? min(dsc_sink_max_bpp, dsc_src_max_bpp) : dsc_src_max_bpp;
2014
2015 dsc_joiner_max_bpp = get_max_compressed_bpp_with_joiner(i915, adjusted_mode->clock,
2016 adjusted_mode->hdisplay,
2017 pipe_config->bigjoiner_pipes);
2018 dsc_max_bpp = min(dsc_max_bpp, dsc_joiner_max_bpp);
2019 dsc_max_bpp = min(dsc_max_bpp, to_bpp_int(limits->link.max_bpp_x16));
2020
2021 if (DISPLAY_VER(i915) >= 13)
2022 return xelpd_dsc_compute_link_config(intel_dp, connector, pipe_config, limits,
2023 dsc_max_bpp, dsc_min_bpp, pipe_bpp, timeslots);
2024 return icl_dsc_compute_link_config(intel_dp, pipe_config, limits,
2025 dsc_max_bpp, dsc_min_bpp, pipe_bpp, timeslots);
2026}
2027
2028static
2029u8 intel_dp_dsc_min_src_input_bpc(struct drm_i915_private *i915)
2030{
2031 /* Min DSC Input BPC for ICL+ is 8 */
2032 return HAS_DSC(i915) ? 8 : 0;
2033}
2034
2035static
2036bool is_dsc_pipe_bpp_sufficient(struct drm_i915_private *i915,
2037 struct drm_connector_state *conn_state,
2038 struct link_config_limits *limits,
2039 int pipe_bpp)
2040{
2041 u8 dsc_max_bpc, dsc_min_bpc, dsc_max_pipe_bpp, dsc_min_pipe_bpp;
2042
2043 dsc_max_bpc = min(intel_dp_dsc_max_src_input_bpc(i915), conn_state->max_requested_bpc);
2044 dsc_min_bpc = intel_dp_dsc_min_src_input_bpc(i915);
2045
2046 dsc_max_pipe_bpp = min(dsc_max_bpc * 3, limits->pipe.max_bpp);
2047 dsc_min_pipe_bpp = max(dsc_min_bpc * 3, limits->pipe.min_bpp);
2048
2049 return pipe_bpp >= dsc_min_pipe_bpp &&
2050 pipe_bpp <= dsc_max_pipe_bpp;
2051}
2052
2053static
2054int intel_dp_force_dsc_pipe_bpp(struct intel_dp *intel_dp,
2055 struct drm_connector_state *conn_state,
2056 struct link_config_limits *limits)
2057{
2058 struct drm_i915_private *i915 = dp_to_i915(intel_dp);
2059 int forced_bpp;
2060
2061 if (!intel_dp->force_dsc_bpc)
2062 return 0;
2063
2064 forced_bpp = intel_dp->force_dsc_bpc * 3;
2065
2066 if (is_dsc_pipe_bpp_sufficient(i915, conn_state, limits, forced_bpp)) {
2067 drm_dbg_kms(&i915->drm, "Input DSC BPC forced to %d\n", intel_dp->force_dsc_bpc);
2068 return forced_bpp;
2069 }
2070
2071 drm_dbg_kms(&i915->drm, "Cannot force DSC BPC:%d, due to DSC BPC limits\n",
2072 intel_dp->force_dsc_bpc);
2073
2074 return 0;
2075}
2076
2077static int intel_dp_dsc_compute_pipe_bpp(struct intel_dp *intel_dp,
2078 struct intel_crtc_state *pipe_config,
2079 struct drm_connector_state *conn_state,
2080 struct link_config_limits *limits,
2081 int timeslots)
2082{
2083 struct drm_i915_private *i915 = dp_to_i915(intel_dp);
2084 const struct intel_connector *connector =
2085 to_intel_connector(conn_state->connector);
2086 u8 max_req_bpc = conn_state->max_requested_bpc;
2087 u8 dsc_max_bpc, dsc_max_bpp;
2088 u8 dsc_min_bpc, dsc_min_bpp;
2089 u8 dsc_bpc[3] = {};
2090 int forced_bpp, pipe_bpp;
2091 int num_bpc, i, ret;
2092
2093 forced_bpp = intel_dp_force_dsc_pipe_bpp(intel_dp, conn_state, limits);
2094
2095 if (forced_bpp) {
2096 ret = dsc_compute_compressed_bpp(intel_dp, connector, pipe_config,
2097 limits, forced_bpp, timeslots);
2098 if (ret == 0) {
2099 pipe_config->pipe_bpp = forced_bpp;
2100 return 0;
2101 }
2102 }
2103
2104 dsc_max_bpc = intel_dp_dsc_max_src_input_bpc(i915);
2105 if (!dsc_max_bpc)
2106 return -EINVAL;
2107
2108 dsc_max_bpc = min_t(u8, dsc_max_bpc, max_req_bpc);
2109 dsc_max_bpp = min(dsc_max_bpc * 3, limits->pipe.max_bpp);
2110
2111 dsc_min_bpc = intel_dp_dsc_min_src_input_bpc(i915);
2112 dsc_min_bpp = max(dsc_min_bpc * 3, limits->pipe.min_bpp);
2113
2114 /*
2115 * Get the maximum DSC bpc that will be supported by any valid
2116 * link configuration and compressed bpp.
2117 */
2118 num_bpc = drm_dp_dsc_sink_supported_input_bpcs(connector->dp.dsc_dpcd, dsc_bpc);
2119 for (i = 0; i < num_bpc; i++) {
2120 pipe_bpp = dsc_bpc[i] * 3;
2121 if (pipe_bpp < dsc_min_bpp)
2122 break;
2123 if (pipe_bpp > dsc_max_bpp)
2124 continue;
2125 ret = dsc_compute_compressed_bpp(intel_dp, connector, pipe_config,
2126 limits, pipe_bpp, timeslots);
2127 if (ret == 0) {
2128 pipe_config->pipe_bpp = pipe_bpp;
2129 return 0;
2130 }
2131 }
2132
2133 return -EINVAL;
2134}
2135
2136static int intel_edp_dsc_compute_pipe_bpp(struct intel_dp *intel_dp,
2137 struct intel_crtc_state *pipe_config,
2138 struct drm_connector_state *conn_state,
2139 struct link_config_limits *limits)
2140{
2141 struct drm_i915_private *i915 = dp_to_i915(intel_dp);
2142 struct intel_connector *connector =
2143 to_intel_connector(conn_state->connector);
2144 int pipe_bpp, forced_bpp;
2145 int dsc_src_min_bpp, dsc_sink_min_bpp, dsc_min_bpp;
2146 int dsc_src_max_bpp, dsc_sink_max_bpp, dsc_max_bpp;
2147
2148 forced_bpp = intel_dp_force_dsc_pipe_bpp(intel_dp, conn_state, limits);
2149
2150 if (forced_bpp) {
2151 pipe_bpp = forced_bpp;
2152 } else {
2153 int max_bpc = min(limits->pipe.max_bpp / 3, (int)conn_state->max_requested_bpc);
2154
2155 /* For eDP use max bpp that can be supported with DSC. */
2156 pipe_bpp = intel_dp_dsc_compute_max_bpp(connector, max_bpc);
2157 if (!is_dsc_pipe_bpp_sufficient(i915, conn_state, limits, pipe_bpp)) {
2158 drm_dbg_kms(&i915->drm,
2159 "Computed BPC is not in DSC BPC limits\n");
2160 return -EINVAL;
2161 }
2162 }
2163 pipe_config->port_clock = limits->max_rate;
2164 pipe_config->lane_count = limits->max_lane_count;
2165
2166 dsc_src_min_bpp = dsc_src_min_compressed_bpp();
2167 dsc_sink_min_bpp = intel_dp_dsc_sink_min_compressed_bpp(pipe_config);
2168 dsc_min_bpp = max(dsc_src_min_bpp, dsc_sink_min_bpp);
2169 dsc_min_bpp = max(dsc_min_bpp, to_bpp_int_roundup(limits->link.min_bpp_x16));
2170
2171 dsc_src_max_bpp = dsc_src_max_compressed_bpp(intel_dp);
2172 dsc_sink_max_bpp = intel_dp_dsc_sink_max_compressed_bpp(connector,
2173 pipe_config,
2174 pipe_bpp / 3);
2175 dsc_max_bpp = dsc_sink_max_bpp ? min(dsc_sink_max_bpp, dsc_src_max_bpp) : dsc_src_max_bpp;
2176 dsc_max_bpp = min(dsc_max_bpp, to_bpp_int(limits->link.max_bpp_x16));
2177
2178 /* Compressed BPP should be less than the Input DSC bpp */
2179 dsc_max_bpp = min(dsc_max_bpp, pipe_bpp - 1);
2180
2181 pipe_config->dsc.compressed_bpp_x16 =
2182 to_bpp_x16(max(dsc_min_bpp, dsc_max_bpp));
2183
2184 pipe_config->pipe_bpp = pipe_bpp;
2185
2186 return 0;
2187}
2188
2189int intel_dp_dsc_compute_config(struct intel_dp *intel_dp,
2190 struct intel_crtc_state *pipe_config,
2191 struct drm_connector_state *conn_state,
2192 struct link_config_limits *limits,
2193 int timeslots,
2194 bool compute_pipe_bpp)
2195{
2196 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
2197 struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev);
2198 const struct intel_connector *connector =
2199 to_intel_connector(conn_state->connector);
2200 const struct drm_display_mode *adjusted_mode =
2201 &pipe_config->hw.adjusted_mode;
2202 int ret;
2203
2204 pipe_config->fec_enable = pipe_config->fec_enable ||
2205 (!intel_dp_is_edp(intel_dp) &&
2206 intel_dp_supports_fec(intel_dp, connector, pipe_config));
2207
2208 if (!intel_dp_supports_dsc(connector, pipe_config))
2209 return -EINVAL;
2210
2211 if (!intel_dp_dsc_supports_format(connector, pipe_config->output_format))
2212 return -EINVAL;
2213
2214 /*
2215 * compute pipe bpp is set to false for DP MST DSC case
2216 * and compressed_bpp is calculated same time once
2217 * vpci timeslots are allocated, because overall bpp
2218 * calculation procedure is bit different for MST case.
2219 */
2220 if (compute_pipe_bpp) {
2221 if (intel_dp_is_edp(intel_dp))
2222 ret = intel_edp_dsc_compute_pipe_bpp(intel_dp, pipe_config,
2223 conn_state, limits);
2224 else
2225 ret = intel_dp_dsc_compute_pipe_bpp(intel_dp, pipe_config,
2226 conn_state, limits, timeslots);
2227 if (ret) {
2228 drm_dbg_kms(&dev_priv->drm,
2229 "No Valid pipe bpp for given mode ret = %d\n", ret);
2230 return ret;
2231 }
2232 }
2233
2234 /* Calculate Slice count */
2235 if (intel_dp_is_edp(intel_dp)) {
2236 pipe_config->dsc.slice_count =
2237 drm_dp_dsc_sink_max_slice_count(connector->dp.dsc_dpcd,
2238 true);
2239 if (!pipe_config->dsc.slice_count) {
2240 drm_dbg_kms(&dev_priv->drm, "Unsupported Slice Count %d\n",
2241 pipe_config->dsc.slice_count);
2242 return -EINVAL;
2243 }
2244 } else {
2245 u8 dsc_dp_slice_count;
2246
2247 dsc_dp_slice_count =
2248 intel_dp_dsc_get_slice_count(connector,
2249 adjusted_mode->crtc_clock,
2250 adjusted_mode->crtc_hdisplay,
2251 pipe_config->bigjoiner_pipes);
2252 if (!dsc_dp_slice_count) {
2253 drm_dbg_kms(&dev_priv->drm,
2254 "Compressed Slice Count not supported\n");
2255 return -EINVAL;
2256 }
2257
2258 pipe_config->dsc.slice_count = dsc_dp_slice_count;
2259 }
2260 /*
2261 * VDSC engine operates at 1 Pixel per clock, so if peak pixel rate
2262 * is greater than the maximum Cdclock and if slice count is even
2263 * then we need to use 2 VDSC instances.
2264 */
2265 if (pipe_config->bigjoiner_pipes || pipe_config->dsc.slice_count > 1)
2266 pipe_config->dsc.dsc_split = true;
2267
2268 ret = intel_dp_dsc_compute_params(connector, pipe_config);
2269 if (ret < 0) {
2270 drm_dbg_kms(&dev_priv->drm,
2271 "Cannot compute valid DSC parameters for Input Bpp = %d"
2272 "Compressed BPP = " BPP_X16_FMT "\n",
2273 pipe_config->pipe_bpp,
2274 BPP_X16_ARGS(pipe_config->dsc.compressed_bpp_x16));
2275 return ret;
2276 }
2277
2278 pipe_config->dsc.compression_enable = true;
2279 drm_dbg_kms(&dev_priv->drm, "DP DSC computed with Input Bpp = %d "
2280 "Compressed Bpp = " BPP_X16_FMT " Slice Count = %d\n",
2281 pipe_config->pipe_bpp,
2282 BPP_X16_ARGS(pipe_config->dsc.compressed_bpp_x16),
2283 pipe_config->dsc.slice_count);
2284
2285 return 0;
2286}
2287
2288/**
2289 * intel_dp_compute_config_link_bpp_limits - compute output link bpp limits
2290 * @intel_dp: intel DP
2291 * @crtc_state: crtc state
2292 * @dsc: DSC compression mode
2293 * @limits: link configuration limits
2294 *
2295 * Calculates the output link min, max bpp values in @limits based on the
2296 * pipe bpp range, @crtc_state and @dsc mode.
2297 *
2298 * Returns %true in case of success.
2299 */
2300bool
2301intel_dp_compute_config_link_bpp_limits(struct intel_dp *intel_dp,
2302 const struct intel_crtc_state *crtc_state,
2303 bool dsc,
2304 struct link_config_limits *limits)
2305{
2306 struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
2307 const struct drm_display_mode *adjusted_mode =
2308 &crtc_state->hw.adjusted_mode;
2309 const struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
2310 const struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base;
2311 int max_link_bpp_x16;
2312
2313 max_link_bpp_x16 = min(crtc_state->max_link_bpp_x16,
2314 to_bpp_x16(limits->pipe.max_bpp));
2315
2316 if (!dsc) {
2317 max_link_bpp_x16 = rounddown(max_link_bpp_x16, to_bpp_x16(2 * 3));
2318
2319 if (max_link_bpp_x16 < to_bpp_x16(limits->pipe.min_bpp))
2320 return false;
2321
2322 limits->link.min_bpp_x16 = to_bpp_x16(limits->pipe.min_bpp);
2323 } else {
2324 /*
2325 * TODO: set the DSC link limits already here, atm these are
2326 * initialized only later in intel_edp_dsc_compute_pipe_bpp() /
2327 * intel_dp_dsc_compute_pipe_bpp()
2328 */
2329 limits->link.min_bpp_x16 = 0;
2330 }
2331
2332 limits->link.max_bpp_x16 = max_link_bpp_x16;
2333
2334 drm_dbg_kms(&i915->drm,
2335 "[ENCODER:%d:%s][CRTC:%d:%s] DP link limits: pixel clock %d kHz DSC %s max lanes %d max rate %d max pipe_bpp %d max link_bpp " BPP_X16_FMT "\n",
2336 encoder->base.base.id, encoder->base.name,
2337 crtc->base.base.id, crtc->base.name,
2338 adjusted_mode->crtc_clock,
2339 dsc ? "on" : "off",
2340 limits->max_lane_count,
2341 limits->max_rate,
2342 limits->pipe.max_bpp,
2343 BPP_X16_ARGS(limits->link.max_bpp_x16));
2344
2345 return true;
2346}
2347
2348static bool
2349intel_dp_compute_config_limits(struct intel_dp *intel_dp,
2350 struct intel_crtc_state *crtc_state,
2351 bool respect_downstream_limits,
2352 bool dsc,
2353 struct link_config_limits *limits)
2354{
2355 limits->min_rate = intel_dp_common_rate(intel_dp, 0);
2356 limits->max_rate = intel_dp_max_link_rate(intel_dp);
2357
2358 /* FIXME 128b/132b SST support missing */
2359 limits->max_rate = min(limits->max_rate, 810000);
2360
2361 limits->min_lane_count = 1;
2362 limits->max_lane_count = intel_dp_max_lane_count(intel_dp);
2363
2364 limits->pipe.min_bpp = intel_dp_min_bpp(crtc_state->output_format);
2365 limits->pipe.max_bpp = intel_dp_max_bpp(intel_dp, crtc_state,
2366 respect_downstream_limits);
2367
2368 if (intel_dp->use_max_params) {
2369 /*
2370 * Use the maximum clock and number of lanes the eDP panel
2371 * advertizes being capable of in case the initial fast
2372 * optimal params failed us. The panels are generally
2373 * designed to support only a single clock and lane
2374 * configuration, and typically on older panels these
2375 * values correspond to the native resolution of the panel.
2376 */
2377 limits->min_lane_count = limits->max_lane_count;
2378 limits->min_rate = limits->max_rate;
2379 }
2380
2381 intel_dp_adjust_compliance_config(intel_dp, crtc_state, limits);
2382
2383 return intel_dp_compute_config_link_bpp_limits(intel_dp,
2384 crtc_state,
2385 dsc,
2386 limits);
2387}
2388
2389static int
2390intel_dp_compute_link_config(struct intel_encoder *encoder,
2391 struct intel_crtc_state *pipe_config,
2392 struct drm_connector_state *conn_state,
2393 bool respect_downstream_limits)
2394{
2395 struct drm_i915_private *i915 = to_i915(encoder->base.dev);
2396 struct intel_crtc *crtc = to_intel_crtc(pipe_config->uapi.crtc);
2397 const struct intel_connector *connector =
2398 to_intel_connector(conn_state->connector);
2399 const struct drm_display_mode *adjusted_mode =
2400 &pipe_config->hw.adjusted_mode;
2401 struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
2402 struct link_config_limits limits;
2403 bool joiner_needs_dsc = false;
2404 bool dsc_needed;
2405 int ret = 0;
2406
2407 if (pipe_config->fec_enable &&
2408 !intel_dp_supports_fec(intel_dp, connector, pipe_config))
2409 return -EINVAL;
2410
2411 if (intel_dp_need_bigjoiner(intel_dp, adjusted_mode->crtc_hdisplay,
2412 adjusted_mode->crtc_clock))
2413 pipe_config->bigjoiner_pipes = GENMASK(crtc->pipe + 1, crtc->pipe);
2414
2415 /*
2416 * Pipe joiner needs compression up to display 12 due to bandwidth
2417 * limitation. DG2 onwards pipe joiner can be enabled without
2418 * compression.
2419 */
2420 joiner_needs_dsc = DISPLAY_VER(i915) < 13 && pipe_config->bigjoiner_pipes;
2421
2422 dsc_needed = joiner_needs_dsc || intel_dp->force_dsc_en ||
2423 !intel_dp_compute_config_limits(intel_dp, pipe_config,
2424 respect_downstream_limits,
2425 false,
2426 &limits);
2427
2428 if (!dsc_needed) {
2429 /*
2430 * Optimize for slow and wide for everything, because there are some
2431 * eDP 1.3 and 1.4 panels don't work well with fast and narrow.
2432 */
2433 ret = intel_dp_compute_link_config_wide(intel_dp, pipe_config,
2434 conn_state, &limits);
2435 if (ret)
2436 dsc_needed = true;
2437 }
2438
2439 if (dsc_needed) {
2440 drm_dbg_kms(&i915->drm, "Try DSC (fallback=%s, joiner=%s, force=%s)\n",
2441 str_yes_no(ret), str_yes_no(joiner_needs_dsc),
2442 str_yes_no(intel_dp->force_dsc_en));
2443
2444 if (!intel_dp_compute_config_limits(intel_dp, pipe_config,
2445 respect_downstream_limits,
2446 true,
2447 &limits))
2448 return -EINVAL;
2449
2450 ret = intel_dp_dsc_compute_config(intel_dp, pipe_config,
2451 conn_state, &limits, 64, true);
2452 if (ret < 0)
2453 return ret;
2454 }
2455
2456 if (pipe_config->dsc.compression_enable) {
2457 drm_dbg_kms(&i915->drm,
2458 "DP lane count %d clock %d Input bpp %d Compressed bpp " BPP_X16_FMT "\n",
2459 pipe_config->lane_count, pipe_config->port_clock,
2460 pipe_config->pipe_bpp,
2461 BPP_X16_ARGS(pipe_config->dsc.compressed_bpp_x16));
2462
2463 drm_dbg_kms(&i915->drm,
2464 "DP link rate required %i available %i\n",
2465 intel_dp_link_required(adjusted_mode->crtc_clock,
2466 to_bpp_int_roundup(pipe_config->dsc.compressed_bpp_x16)),
2467 intel_dp_max_data_rate(pipe_config->port_clock,
2468 pipe_config->lane_count));
2469 } else {
2470 drm_dbg_kms(&i915->drm, "DP lane count %d clock %d bpp %d\n",
2471 pipe_config->lane_count, pipe_config->port_clock,
2472 pipe_config->pipe_bpp);
2473
2474 drm_dbg_kms(&i915->drm,
2475 "DP link rate required %i available %i\n",
2476 intel_dp_link_required(adjusted_mode->crtc_clock,
2477 pipe_config->pipe_bpp),
2478 intel_dp_max_data_rate(pipe_config->port_clock,
2479 pipe_config->lane_count));
2480 }
2481 return 0;
2482}
2483
2484bool intel_dp_limited_color_range(const struct intel_crtc_state *crtc_state,
2485 const struct drm_connector_state *conn_state)
2486{
2487 const struct intel_digital_connector_state *intel_conn_state =
2488 to_intel_digital_connector_state(conn_state);
2489 const struct drm_display_mode *adjusted_mode =
2490 &crtc_state->hw.adjusted_mode;
2491
2492 /*
2493 * Our YCbCr output is always limited range.
2494 * crtc_state->limited_color_range only applies to RGB,
2495 * and it must never be set for YCbCr or we risk setting
2496 * some conflicting bits in TRANSCONF which will mess up
2497 * the colors on the monitor.
2498 */
2499 if (crtc_state->output_format != INTEL_OUTPUT_FORMAT_RGB)
2500 return false;
2501
2502 if (intel_conn_state->broadcast_rgb == INTEL_BROADCAST_RGB_AUTO) {
2503 /*
2504 * See:
2505 * CEA-861-E - 5.1 Default Encoding Parameters
2506 * VESA DisplayPort Ver.1.2a - 5.1.1.1 Video Colorimetry
2507 */
2508 return crtc_state->pipe_bpp != 18 &&
2509 drm_default_rgb_quant_range(adjusted_mode) ==
2510 HDMI_QUANTIZATION_RANGE_LIMITED;
2511 } else {
2512 return intel_conn_state->broadcast_rgb ==
2513 INTEL_BROADCAST_RGB_LIMITED;
2514 }
2515}
2516
2517static bool intel_dp_port_has_audio(struct drm_i915_private *dev_priv,
2518 enum port port)
2519{
2520 if (IS_G4X(dev_priv))
2521 return false;
2522 if (DISPLAY_VER(dev_priv) < 12 && port == PORT_A)
2523 return false;
2524
2525 return true;
2526}
2527
2528static void intel_dp_compute_vsc_colorimetry(const struct intel_crtc_state *crtc_state,
2529 const struct drm_connector_state *conn_state,
2530 struct drm_dp_vsc_sdp *vsc)
2531{
2532 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
2533 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
2534
2535 if (crtc_state->has_panel_replay) {
2536 /*
2537 * Prepare VSC Header for SU as per DP 2.0 spec, Table 2-223
2538 * VSC SDP supporting 3D stereo, Panel Replay, and Pixel
2539 * Encoding/Colorimetry Format indication.
2540 */
2541 vsc->revision = 0x7;
2542 } else {
2543 /*
2544 * Prepare VSC Header for SU as per DP 1.4 spec, Table 2-118
2545 * VSC SDP supporting 3D stereo, PSR2, and Pixel Encoding/
2546 * Colorimetry Format indication.
2547 */
2548 vsc->revision = 0x5;
2549 }
2550
2551 vsc->length = 0x13;
2552
2553 /* DP 1.4a spec, Table 2-120 */
2554 switch (crtc_state->output_format) {
2555 case INTEL_OUTPUT_FORMAT_YCBCR444:
2556 vsc->pixelformat = DP_PIXELFORMAT_YUV444;
2557 break;
2558 case INTEL_OUTPUT_FORMAT_YCBCR420:
2559 vsc->pixelformat = DP_PIXELFORMAT_YUV420;
2560 break;
2561 case INTEL_OUTPUT_FORMAT_RGB:
2562 default:
2563 vsc->pixelformat = DP_PIXELFORMAT_RGB;
2564 }
2565
2566 switch (conn_state->colorspace) {
2567 case DRM_MODE_COLORIMETRY_BT709_YCC:
2568 vsc->colorimetry = DP_COLORIMETRY_BT709_YCC;
2569 break;
2570 case DRM_MODE_COLORIMETRY_XVYCC_601:
2571 vsc->colorimetry = DP_COLORIMETRY_XVYCC_601;
2572 break;
2573 case DRM_MODE_COLORIMETRY_XVYCC_709:
2574 vsc->colorimetry = DP_COLORIMETRY_XVYCC_709;
2575 break;
2576 case DRM_MODE_COLORIMETRY_SYCC_601:
2577 vsc->colorimetry = DP_COLORIMETRY_SYCC_601;
2578 break;
2579 case DRM_MODE_COLORIMETRY_OPYCC_601:
2580 vsc->colorimetry = DP_COLORIMETRY_OPYCC_601;
2581 break;
2582 case DRM_MODE_COLORIMETRY_BT2020_CYCC:
2583 vsc->colorimetry = DP_COLORIMETRY_BT2020_CYCC;
2584 break;
2585 case DRM_MODE_COLORIMETRY_BT2020_RGB:
2586 vsc->colorimetry = DP_COLORIMETRY_BT2020_RGB;
2587 break;
2588 case DRM_MODE_COLORIMETRY_BT2020_YCC:
2589 vsc->colorimetry = DP_COLORIMETRY_BT2020_YCC;
2590 break;
2591 case DRM_MODE_COLORIMETRY_DCI_P3_RGB_D65:
2592 case DRM_MODE_COLORIMETRY_DCI_P3_RGB_THEATER:
2593 vsc->colorimetry = DP_COLORIMETRY_DCI_P3_RGB;
2594 break;
2595 default:
2596 /*
2597 * RGB->YCBCR color conversion uses the BT.709
2598 * color space.
2599 */
2600 if (crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR420)
2601 vsc->colorimetry = DP_COLORIMETRY_BT709_YCC;
2602 else
2603 vsc->colorimetry = DP_COLORIMETRY_DEFAULT;
2604 break;
2605 }
2606
2607 vsc->bpc = crtc_state->pipe_bpp / 3;
2608
2609 /* only RGB pixelformat supports 6 bpc */
2610 drm_WARN_ON(&dev_priv->drm,
2611 vsc->bpc == 6 && vsc->pixelformat != DP_PIXELFORMAT_RGB);
2612
2613 /* all YCbCr are always limited range */
2614 vsc->dynamic_range = DP_DYNAMIC_RANGE_CTA;
2615 vsc->content_type = DP_CONTENT_TYPE_NOT_DEFINED;
2616}
2617
2618static void intel_dp_compute_vsc_sdp(struct intel_dp *intel_dp,
2619 struct intel_crtc_state *crtc_state,
2620 const struct drm_connector_state *conn_state)
2621{
2622 struct drm_dp_vsc_sdp *vsc = &crtc_state->infoframes.vsc;
2623
2624 /* When a crtc state has PSR, VSC SDP will be handled by PSR routine */
2625 if (crtc_state->has_psr)
2626 return;
2627
2628 if (!intel_dp_needs_vsc_sdp(crtc_state, conn_state))
2629 return;
2630
2631 crtc_state->infoframes.enable |= intel_hdmi_infoframe_enable(DP_SDP_VSC);
2632 vsc->sdp_type = DP_SDP_VSC;
2633 intel_dp_compute_vsc_colorimetry(crtc_state, conn_state,
2634 &crtc_state->infoframes.vsc);
2635}
2636
2637void intel_dp_compute_psr_vsc_sdp(struct intel_dp *intel_dp,
2638 const struct intel_crtc_state *crtc_state,
2639 const struct drm_connector_state *conn_state,
2640 struct drm_dp_vsc_sdp *vsc)
2641{
2642 vsc->sdp_type = DP_SDP_VSC;
2643
2644 if (crtc_state->has_psr2) {
2645 if (intel_dp->psr.colorimetry_support &&
2646 intel_dp_needs_vsc_sdp(crtc_state, conn_state)) {
2647 /* [PSR2, +Colorimetry] */
2648 intel_dp_compute_vsc_colorimetry(crtc_state, conn_state,
2649 vsc);
2650 } else {
2651 /*
2652 * [PSR2, -Colorimetry]
2653 * Prepare VSC Header for SU as per eDP 1.4 spec, Table 6-11
2654 * 3D stereo + PSR/PSR2 + Y-coordinate.
2655 */
2656 vsc->revision = 0x4;
2657 vsc->length = 0xe;
2658 }
2659 } else if (crtc_state->has_panel_replay) {
2660 if (intel_dp->psr.colorimetry_support &&
2661 intel_dp_needs_vsc_sdp(crtc_state, conn_state)) {
2662 /* [Panel Replay with colorimetry info] */
2663 intel_dp_compute_vsc_colorimetry(crtc_state, conn_state,
2664 vsc);
2665 } else {
2666 /*
2667 * [Panel Replay without colorimetry info]
2668 * Prepare VSC Header for SU as per DP 2.0 spec, Table 2-223
2669 * VSC SDP supporting 3D stereo + Panel Replay.
2670 */
2671 vsc->revision = 0x6;
2672 vsc->length = 0x10;
2673 }
2674 } else {
2675 /*
2676 * [PSR1]
2677 * Prepare VSC Header for SU as per DP 1.4 spec, Table 2-118
2678 * VSC SDP supporting 3D stereo + PSR (applies to eDP v1.3 or
2679 * higher).
2680 */
2681 vsc->revision = 0x2;
2682 vsc->length = 0x8;
2683 }
2684}
2685
2686static void
2687intel_dp_compute_hdr_metadata_infoframe_sdp(struct intel_dp *intel_dp,
2688 struct intel_crtc_state *crtc_state,
2689 const struct drm_connector_state *conn_state)
2690{
2691 int ret;
2692 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
2693 struct hdmi_drm_infoframe *drm_infoframe = &crtc_state->infoframes.drm.drm;
2694
2695 if (!conn_state->hdr_output_metadata)
2696 return;
2697
2698 ret = drm_hdmi_infoframe_set_hdr_metadata(drm_infoframe, conn_state);
2699
2700 if (ret) {
2701 drm_dbg_kms(&dev_priv->drm, "couldn't set HDR metadata in infoframe\n");
2702 return;
2703 }
2704
2705 crtc_state->infoframes.enable |=
2706 intel_hdmi_infoframe_enable(HDMI_PACKET_TYPE_GAMUT_METADATA);
2707}
2708
2709static bool cpu_transcoder_has_drrs(struct drm_i915_private *i915,
2710 enum transcoder cpu_transcoder)
2711{
2712 if (HAS_DOUBLE_BUFFERED_M_N(i915))
2713 return true;
2714
2715 return intel_cpu_transcoder_has_m2_n2(i915, cpu_transcoder);
2716}
2717
2718static bool can_enable_drrs(struct intel_connector *connector,
2719 const struct intel_crtc_state *pipe_config,
2720 const struct drm_display_mode *downclock_mode)
2721{
2722 struct drm_i915_private *i915 = to_i915(connector->base.dev);
2723
2724 if (pipe_config->vrr.enable)
2725 return false;
2726
2727 /*
2728 * DRRS and PSR can't be enable together, so giving preference to PSR
2729 * as it allows more power-savings by complete shutting down display,
2730 * so to guarantee this, intel_drrs_compute_config() must be called
2731 * after intel_psr_compute_config().
2732 */
2733 if (pipe_config->has_psr)
2734 return false;
2735
2736 /* FIXME missing FDI M2/N2 etc. */
2737 if (pipe_config->has_pch_encoder)
2738 return false;
2739
2740 if (!cpu_transcoder_has_drrs(i915, pipe_config->cpu_transcoder))
2741 return false;
2742
2743 return downclock_mode &&
2744 intel_panel_drrs_type(connector) == DRRS_TYPE_SEAMLESS;
2745}
2746
2747static void
2748intel_dp_drrs_compute_config(struct intel_connector *connector,
2749 struct intel_crtc_state *pipe_config,
2750 int link_bpp_x16)
2751{
2752 struct drm_i915_private *i915 = to_i915(connector->base.dev);
2753 const struct drm_display_mode *downclock_mode =
2754 intel_panel_downclock_mode(connector, &pipe_config->hw.adjusted_mode);
2755 int pixel_clock;
2756
2757 if (has_seamless_m_n(connector))
2758 pipe_config->update_m_n = true;
2759
2760 if (!can_enable_drrs(connector, pipe_config, downclock_mode)) {
2761 if (intel_cpu_transcoder_has_m2_n2(i915, pipe_config->cpu_transcoder))
2762 intel_zero_m_n(&pipe_config->dp_m2_n2);
2763 return;
2764 }
2765
2766 if (IS_IRONLAKE(i915) || IS_SANDYBRIDGE(i915) || IS_IVYBRIDGE(i915))
2767 pipe_config->msa_timing_delay = connector->panel.vbt.edp.drrs_msa_timing_delay;
2768
2769 pipe_config->has_drrs = true;
2770
2771 pixel_clock = downclock_mode->clock;
2772 if (pipe_config->splitter.enable)
2773 pixel_clock /= pipe_config->splitter.link_count;
2774
2775 intel_link_compute_m_n(link_bpp_x16, pipe_config->lane_count, pixel_clock,
2776 pipe_config->port_clock,
2777 intel_dp_bw_fec_overhead(pipe_config->fec_enable),
2778 &pipe_config->dp_m2_n2);
2779
2780 /* FIXME: abstract this better */
2781 if (pipe_config->splitter.enable)
2782 pipe_config->dp_m2_n2.data_m *= pipe_config->splitter.link_count;
2783}
2784
2785static bool intel_dp_has_audio(struct intel_encoder *encoder,
2786 struct intel_crtc_state *crtc_state,
2787 const struct drm_connector_state *conn_state)
2788{
2789 struct drm_i915_private *i915 = to_i915(encoder->base.dev);
2790 const struct intel_digital_connector_state *intel_conn_state =
2791 to_intel_digital_connector_state(conn_state);
2792 struct intel_connector *connector =
2793 to_intel_connector(conn_state->connector);
2794
2795 if (!intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DP_MST) &&
2796 !intel_dp_port_has_audio(i915, encoder->port))
2797 return false;
2798
2799 if (intel_conn_state->force_audio == HDMI_AUDIO_AUTO)
2800 return connector->base.display_info.has_audio;
2801 else
2802 return intel_conn_state->force_audio == HDMI_AUDIO_ON;
2803}
2804
2805static int
2806intel_dp_compute_output_format(struct intel_encoder *encoder,
2807 struct intel_crtc_state *crtc_state,
2808 struct drm_connector_state *conn_state,
2809 bool respect_downstream_limits)
2810{
2811 struct drm_i915_private *i915 = to_i915(encoder->base.dev);
2812 struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
2813 struct intel_connector *connector = intel_dp->attached_connector;
2814 const struct drm_display_info *info = &connector->base.display_info;
2815 const struct drm_display_mode *adjusted_mode = &crtc_state->hw.adjusted_mode;
2816 bool ycbcr_420_only;
2817 int ret;
2818
2819 ycbcr_420_only = drm_mode_is_420_only(info, adjusted_mode);
2820
2821 if (ycbcr_420_only && !connector->base.ycbcr_420_allowed) {
2822 drm_dbg_kms(&i915->drm,
2823 "YCbCr 4:2:0 mode but YCbCr 4:2:0 output not possible. Falling back to RGB.\n");
2824 crtc_state->sink_format = INTEL_OUTPUT_FORMAT_RGB;
2825 } else {
2826 crtc_state->sink_format = intel_dp_sink_format(connector, adjusted_mode);
2827 }
2828
2829 crtc_state->output_format = intel_dp_output_format(connector, crtc_state->sink_format);
2830
2831 ret = intel_dp_compute_link_config(encoder, crtc_state, conn_state,
2832 respect_downstream_limits);
2833 if (ret) {
2834 if (crtc_state->sink_format == INTEL_OUTPUT_FORMAT_YCBCR420 ||
2835 !connector->base.ycbcr_420_allowed ||
2836 !drm_mode_is_420_also(info, adjusted_mode))
2837 return ret;
2838
2839 crtc_state->sink_format = INTEL_OUTPUT_FORMAT_YCBCR420;
2840 crtc_state->output_format = intel_dp_output_format(connector,
2841 crtc_state->sink_format);
2842 ret = intel_dp_compute_link_config(encoder, crtc_state, conn_state,
2843 respect_downstream_limits);
2844 }
2845
2846 return ret;
2847}
2848
2849void
2850intel_dp_audio_compute_config(struct intel_encoder *encoder,
2851 struct intel_crtc_state *pipe_config,
2852 struct drm_connector_state *conn_state)
2853{
2854 pipe_config->has_audio =
2855 intel_dp_has_audio(encoder, pipe_config, conn_state) &&
2856 intel_audio_compute_config(encoder, pipe_config, conn_state);
2857
2858 pipe_config->sdp_split_enable = pipe_config->has_audio &&
2859 intel_dp_is_uhbr(pipe_config);
2860}
2861
2862int
2863intel_dp_compute_config(struct intel_encoder *encoder,
2864 struct intel_crtc_state *pipe_config,
2865 struct drm_connector_state *conn_state)
2866{
2867 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
2868 struct drm_display_mode *adjusted_mode = &pipe_config->hw.adjusted_mode;
2869 struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
2870 const struct drm_display_mode *fixed_mode;
2871 struct intel_connector *connector = intel_dp->attached_connector;
2872 int ret = 0, link_bpp_x16;
2873
2874 if (HAS_PCH_SPLIT(dev_priv) && !HAS_DDI(dev_priv) && encoder->port != PORT_A)
2875 pipe_config->has_pch_encoder = true;
2876
2877 fixed_mode = intel_panel_fixed_mode(connector, adjusted_mode);
2878 if (intel_dp_is_edp(intel_dp) && fixed_mode) {
2879 ret = intel_panel_compute_config(connector, adjusted_mode);
2880 if (ret)
2881 return ret;
2882 }
2883
2884 if (adjusted_mode->flags & DRM_MODE_FLAG_DBLSCAN)
2885 return -EINVAL;
2886
2887 if (!connector->base.interlace_allowed &&
2888 adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE)
2889 return -EINVAL;
2890
2891 if (adjusted_mode->flags & DRM_MODE_FLAG_DBLCLK)
2892 return -EINVAL;
2893
2894 if (intel_dp_hdisplay_bad(dev_priv, adjusted_mode->crtc_hdisplay))
2895 return -EINVAL;
2896
2897 /*
2898 * Try to respect downstream TMDS clock limits first, if
2899 * that fails assume the user might know something we don't.
2900 */
2901 ret = intel_dp_compute_output_format(encoder, pipe_config, conn_state, true);
2902 if (ret)
2903 ret = intel_dp_compute_output_format(encoder, pipe_config, conn_state, false);
2904 if (ret)
2905 return ret;
2906
2907 if ((intel_dp_is_edp(intel_dp) && fixed_mode) ||
2908 pipe_config->output_format == INTEL_OUTPUT_FORMAT_YCBCR420) {
2909 ret = intel_panel_fitting(pipe_config, conn_state);
2910 if (ret)
2911 return ret;
2912 }
2913
2914 pipe_config->limited_color_range =
2915 intel_dp_limited_color_range(pipe_config, conn_state);
2916
2917 pipe_config->enhanced_framing =
2918 drm_dp_enhanced_frame_cap(intel_dp->dpcd);
2919
2920 if (pipe_config->dsc.compression_enable)
2921 link_bpp_x16 = pipe_config->dsc.compressed_bpp_x16;
2922 else
2923 link_bpp_x16 = to_bpp_x16(intel_dp_output_bpp(pipe_config->output_format,
2924 pipe_config->pipe_bpp));
2925
2926 if (intel_dp->mso_link_count) {
2927 int n = intel_dp->mso_link_count;
2928 int overlap = intel_dp->mso_pixel_overlap;
2929
2930 pipe_config->splitter.enable = true;
2931 pipe_config->splitter.link_count = n;
2932 pipe_config->splitter.pixel_overlap = overlap;
2933
2934 drm_dbg_kms(&dev_priv->drm, "MSO link count %d, pixel overlap %d\n",
2935 n, overlap);
2936
2937 adjusted_mode->crtc_hdisplay = adjusted_mode->crtc_hdisplay / n + overlap;
2938 adjusted_mode->crtc_hblank_start = adjusted_mode->crtc_hblank_start / n + overlap;
2939 adjusted_mode->crtc_hblank_end = adjusted_mode->crtc_hblank_end / n + overlap;
2940 adjusted_mode->crtc_hsync_start = adjusted_mode->crtc_hsync_start / n + overlap;
2941 adjusted_mode->crtc_hsync_end = adjusted_mode->crtc_hsync_end / n + overlap;
2942 adjusted_mode->crtc_htotal = adjusted_mode->crtc_htotal / n + overlap;
2943 adjusted_mode->crtc_clock /= n;
2944 }
2945
2946 intel_dp_audio_compute_config(encoder, pipe_config, conn_state);
2947
2948 intel_link_compute_m_n(link_bpp_x16,
2949 pipe_config->lane_count,
2950 adjusted_mode->crtc_clock,
2951 pipe_config->port_clock,
2952 intel_dp_bw_fec_overhead(pipe_config->fec_enable),
2953 &pipe_config->dp_m_n);
2954
2955 /* FIXME: abstract this better */
2956 if (pipe_config->splitter.enable)
2957 pipe_config->dp_m_n.data_m *= pipe_config->splitter.link_count;
2958
2959 if (!HAS_DDI(dev_priv))
2960 g4x_dp_set_clock(encoder, pipe_config);
2961
2962 intel_vrr_compute_config(pipe_config, conn_state);
2963 intel_psr_compute_config(intel_dp, pipe_config, conn_state);
2964 intel_dp_drrs_compute_config(connector, pipe_config, link_bpp_x16);
2965 intel_dp_compute_vsc_sdp(intel_dp, pipe_config, conn_state);
2966 intel_dp_compute_hdr_metadata_infoframe_sdp(intel_dp, pipe_config, conn_state);
2967
2968 return 0;
2969}
2970
2971void intel_dp_set_link_params(struct intel_dp *intel_dp,
2972 int link_rate, int lane_count)
2973{
2974 memset(intel_dp->train_set, 0, sizeof(intel_dp->train_set));
2975 intel_dp->link_trained = false;
2976 intel_dp->link_rate = link_rate;
2977 intel_dp->lane_count = lane_count;
2978}
2979
2980static void intel_dp_reset_max_link_params(struct intel_dp *intel_dp)
2981{
2982 intel_dp->max_link_lane_count = intel_dp_max_common_lane_count(intel_dp);
2983 intel_dp->max_link_rate = intel_dp_max_common_rate(intel_dp);
2984}
2985
2986/* Enable backlight PWM and backlight PP control. */
2987void intel_edp_backlight_on(const struct intel_crtc_state *crtc_state,
2988 const struct drm_connector_state *conn_state)
2989{
2990 struct intel_dp *intel_dp = enc_to_intel_dp(to_intel_encoder(conn_state->best_encoder));
2991 struct drm_i915_private *i915 = dp_to_i915(intel_dp);
2992
2993 if (!intel_dp_is_edp(intel_dp))
2994 return;
2995
2996 drm_dbg_kms(&i915->drm, "\n");
2997
2998 intel_backlight_enable(crtc_state, conn_state);
2999 intel_pps_backlight_on(intel_dp);
3000}
3001
3002/* Disable backlight PP control and backlight PWM. */
3003void intel_edp_backlight_off(const struct drm_connector_state *old_conn_state)
3004{
3005 struct intel_dp *intel_dp = enc_to_intel_dp(to_intel_encoder(old_conn_state->best_encoder));
3006 struct drm_i915_private *i915 = dp_to_i915(intel_dp);
3007
3008 if (!intel_dp_is_edp(intel_dp))
3009 return;
3010
3011 drm_dbg_kms(&i915->drm, "\n");
3012
3013 intel_pps_backlight_off(intel_dp);
3014 intel_backlight_disable(old_conn_state);
3015}
3016
3017static bool downstream_hpd_needs_d0(struct intel_dp *intel_dp)
3018{
3019 /*
3020 * DPCD 1.2+ should support BRANCH_DEVICE_CTRL, and thus
3021 * be capable of signalling downstream hpd with a long pulse.
3022 * Whether or not that means D3 is safe to use is not clear,
3023 * but let's assume so until proven otherwise.
3024 *
3025 * FIXME should really check all downstream ports...
3026 */
3027 return intel_dp->dpcd[DP_DPCD_REV] == 0x11 &&
3028 drm_dp_is_branch(intel_dp->dpcd) &&
3029 intel_dp->downstream_ports[0] & DP_DS_PORT_HPD;
3030}
3031
3032static int
3033write_dsc_decompression_flag(struct drm_dp_aux *aux, u8 flag, bool set)
3034{
3035 int err;
3036 u8 val;
3037
3038 err = drm_dp_dpcd_readb(aux, DP_DSC_ENABLE, &val);
3039 if (err < 0)
3040 return err;
3041
3042 if (set)
3043 val |= flag;
3044 else
3045 val &= ~flag;
3046
3047 return drm_dp_dpcd_writeb(aux, DP_DSC_ENABLE, val);
3048}
3049
3050static void
3051intel_dp_sink_set_dsc_decompression(struct intel_connector *connector,
3052 bool enable)
3053{
3054 struct drm_i915_private *i915 = to_i915(connector->base.dev);
3055
3056 if (write_dsc_decompression_flag(connector->dp.dsc_decompression_aux,
3057 DP_DECOMPRESSION_EN, enable) < 0)
3058 drm_dbg_kms(&i915->drm,
3059 "Failed to %s sink decompression state\n",
3060 str_enable_disable(enable));
3061}
3062
3063static void
3064intel_dp_sink_set_dsc_passthrough(const struct intel_connector *connector,
3065 bool enable)
3066{
3067 struct drm_i915_private *i915 = to_i915(connector->base.dev);
3068 struct drm_dp_aux *aux = connector->port ?
3069 connector->port->passthrough_aux : NULL;
3070
3071 if (!aux)
3072 return;
3073
3074 if (write_dsc_decompression_flag(aux,
3075 DP_DSC_PASSTHROUGH_EN, enable) < 0)
3076 drm_dbg_kms(&i915->drm,
3077 "Failed to %s sink compression passthrough state\n",
3078 str_enable_disable(enable));
3079}
3080
3081static int intel_dp_dsc_aux_ref_count(struct intel_atomic_state *state,
3082 const struct intel_connector *connector,
3083 bool for_get_ref)
3084{
3085 struct drm_i915_private *i915 = to_i915(state->base.dev);
3086 struct drm_connector *_connector_iter;
3087 struct drm_connector_state *old_conn_state;
3088 struct drm_connector_state *new_conn_state;
3089 int ref_count = 0;
3090 int i;
3091
3092 /*
3093 * On SST the decompression AUX device won't be shared, each connector
3094 * uses for this its own AUX targeting the sink device.
3095 */
3096 if (!connector->mst_port)
3097 return connector->dp.dsc_decompression_enabled ? 1 : 0;
3098
3099 for_each_oldnew_connector_in_state(&state->base, _connector_iter,
3100 old_conn_state, new_conn_state, i) {
3101 const struct intel_connector *
3102 connector_iter = to_intel_connector(_connector_iter);
3103
3104 if (connector_iter->mst_port != connector->mst_port)
3105 continue;
3106
3107 if (!connector_iter->dp.dsc_decompression_enabled)
3108 continue;
3109
3110 drm_WARN_ON(&i915->drm,
3111 (for_get_ref && !new_conn_state->crtc) ||
3112 (!for_get_ref && !old_conn_state->crtc));
3113
3114 if (connector_iter->dp.dsc_decompression_aux ==
3115 connector->dp.dsc_decompression_aux)
3116 ref_count++;
3117 }
3118
3119 return ref_count;
3120}
3121
3122static bool intel_dp_dsc_aux_get_ref(struct intel_atomic_state *state,
3123 struct intel_connector *connector)
3124{
3125 bool ret = intel_dp_dsc_aux_ref_count(state, connector, true) == 0;
3126
3127 connector->dp.dsc_decompression_enabled = true;
3128
3129 return ret;
3130}
3131
3132static bool intel_dp_dsc_aux_put_ref(struct intel_atomic_state *state,
3133 struct intel_connector *connector)
3134{
3135 connector->dp.dsc_decompression_enabled = false;
3136
3137 return intel_dp_dsc_aux_ref_count(state, connector, false) == 0;
3138}
3139
3140/**
3141 * intel_dp_sink_enable_decompression - Enable DSC decompression in sink/last branch device
3142 * @state: atomic state
3143 * @connector: connector to enable the decompression for
3144 * @new_crtc_state: new state for the CRTC driving @connector
3145 *
3146 * Enable the DSC decompression if required in the %DP_DSC_ENABLE DPCD
3147 * register of the appropriate sink/branch device. On SST this is always the
3148 * sink device, whereas on MST based on each device's DSC capabilities it's
3149 * either the last branch device (enabling decompression in it) or both the
3150 * last branch device (enabling passthrough in it) and the sink device
3151 * (enabling decompression in it).
3152 */
3153void intel_dp_sink_enable_decompression(struct intel_atomic_state *state,
3154 struct intel_connector *connector,
3155 const struct intel_crtc_state *new_crtc_state)
3156{
3157 struct drm_i915_private *i915 = to_i915(state->base.dev);
3158
3159 if (!new_crtc_state->dsc.compression_enable)
3160 return;
3161
3162 if (drm_WARN_ON(&i915->drm,
3163 !connector->dp.dsc_decompression_aux ||
3164 connector->dp.dsc_decompression_enabled))
3165 return;
3166
3167 if (!intel_dp_dsc_aux_get_ref(state, connector))
3168 return;
3169
3170 intel_dp_sink_set_dsc_passthrough(connector, true);
3171 intel_dp_sink_set_dsc_decompression(connector, true);
3172}
3173
3174/**
3175 * intel_dp_sink_disable_decompression - Disable DSC decompression in sink/last branch device
3176 * @state: atomic state
3177 * @connector: connector to disable the decompression for
3178 * @old_crtc_state: old state for the CRTC driving @connector
3179 *
3180 * Disable the DSC decompression if required in the %DP_DSC_ENABLE DPCD
3181 * register of the appropriate sink/branch device, corresponding to the
3182 * sequence in intel_dp_sink_enable_decompression().
3183 */
3184void intel_dp_sink_disable_decompression(struct intel_atomic_state *state,
3185 struct intel_connector *connector,
3186 const struct intel_crtc_state *old_crtc_state)
3187{
3188 struct drm_i915_private *i915 = to_i915(state->base.dev);
3189
3190 if (!old_crtc_state->dsc.compression_enable)
3191 return;
3192
3193 if (drm_WARN_ON(&i915->drm,
3194 !connector->dp.dsc_decompression_aux ||
3195 !connector->dp.dsc_decompression_enabled))
3196 return;
3197
3198 if (!intel_dp_dsc_aux_put_ref(state, connector))
3199 return;
3200
3201 intel_dp_sink_set_dsc_decompression(connector, false);
3202 intel_dp_sink_set_dsc_passthrough(connector, false);
3203}
3204
3205static void
3206intel_edp_init_source_oui(struct intel_dp *intel_dp, bool careful)
3207{
3208 struct drm_i915_private *i915 = dp_to_i915(intel_dp);
3209 u8 oui[] = { 0x00, 0xaa, 0x01 };
3210 u8 buf[3] = {};
3211
3212 /*
3213 * During driver init, we want to be careful and avoid changing the source OUI if it's
3214 * already set to what we want, so as to avoid clearing any state by accident
3215 */
3216 if (careful) {
3217 if (drm_dp_dpcd_read(&intel_dp->aux, DP_SOURCE_OUI, buf, sizeof(buf)) < 0)
3218 drm_err(&i915->drm, "Failed to read source OUI\n");
3219
3220 if (memcmp(oui, buf, sizeof(oui)) == 0)
3221 return;
3222 }
3223
3224 if (drm_dp_dpcd_write(&intel_dp->aux, DP_SOURCE_OUI, oui, sizeof(oui)) < 0)
3225 drm_err(&i915->drm, "Failed to write source OUI\n");
3226
3227 intel_dp->last_oui_write = jiffies;
3228}
3229
3230void intel_dp_wait_source_oui(struct intel_dp *intel_dp)
3231{
3232 struct intel_connector *connector = intel_dp->attached_connector;
3233 struct drm_i915_private *i915 = dp_to_i915(intel_dp);
3234
3235 drm_dbg_kms(&i915->drm, "[CONNECTOR:%d:%s] Performing OUI wait (%u ms)\n",
3236 connector->base.base.id, connector->base.name,
3237 connector->panel.vbt.backlight.hdr_dpcd_refresh_timeout);
3238
3239 wait_remaining_ms_from_jiffies(intel_dp->last_oui_write,
3240 connector->panel.vbt.backlight.hdr_dpcd_refresh_timeout);
3241}
3242
3243/* If the device supports it, try to set the power state appropriately */
3244void intel_dp_set_power(struct intel_dp *intel_dp, u8 mode)
3245{
3246 struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base;
3247 struct drm_i915_private *i915 = to_i915(encoder->base.dev);
3248 int ret, i;
3249
3250 /* Should have a valid DPCD by this point */
3251 if (intel_dp->dpcd[DP_DPCD_REV] < 0x11)
3252 return;
3253
3254 if (mode != DP_SET_POWER_D0) {
3255 if (downstream_hpd_needs_d0(intel_dp))
3256 return;
3257
3258 ret = drm_dp_dpcd_writeb(&intel_dp->aux, DP_SET_POWER, mode);
3259 } else {
3260 struct intel_lspcon *lspcon = dp_to_lspcon(intel_dp);
3261
3262 lspcon_resume(dp_to_dig_port(intel_dp));
3263
3264 /* Write the source OUI as early as possible */
3265 if (intel_dp_is_edp(intel_dp))
3266 intel_edp_init_source_oui(intel_dp, false);
3267
3268 /*
3269 * When turning on, we need to retry for 1ms to give the sink
3270 * time to wake up.
3271 */
3272 for (i = 0; i < 3; i++) {
3273 ret = drm_dp_dpcd_writeb(&intel_dp->aux, DP_SET_POWER, mode);
3274 if (ret == 1)
3275 break;
3276 msleep(1);
3277 }
3278
3279 if (ret == 1 && lspcon->active)
3280 lspcon_wait_pcon_mode(lspcon);
3281 }
3282
3283 if (ret != 1)
3284 drm_dbg_kms(&i915->drm, "[ENCODER:%d:%s] Set power to %s failed\n",
3285 encoder->base.base.id, encoder->base.name,
3286 mode == DP_SET_POWER_D0 ? "D0" : "D3");
3287}
3288
3289static bool
3290intel_dp_get_dpcd(struct intel_dp *intel_dp);
3291
3292/**
3293 * intel_dp_sync_state - sync the encoder state during init/resume
3294 * @encoder: intel encoder to sync
3295 * @crtc_state: state for the CRTC connected to the encoder
3296 *
3297 * Sync any state stored in the encoder wrt. HW state during driver init
3298 * and system resume.
3299 */
3300void intel_dp_sync_state(struct intel_encoder *encoder,
3301 const struct intel_crtc_state *crtc_state)
3302{
3303 struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
3304
3305 if (!crtc_state)
3306 return;
3307
3308 /*
3309 * Don't clobber DPCD if it's been already read out during output
3310 * setup (eDP) or detect.
3311 */
3312 if (intel_dp->dpcd[DP_DPCD_REV] == 0)
3313 intel_dp_get_dpcd(intel_dp);
3314
3315 intel_dp_reset_max_link_params(intel_dp);
3316}
3317
3318bool intel_dp_initial_fastset_check(struct intel_encoder *encoder,
3319 struct intel_crtc_state *crtc_state)
3320{
3321 struct drm_i915_private *i915 = to_i915(encoder->base.dev);
3322 struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
3323 bool fastset = true;
3324
3325 /*
3326 * If BIOS has set an unsupported or non-standard link rate for some
3327 * reason force an encoder recompute and full modeset.
3328 */
3329 if (intel_dp_rate_index(intel_dp->source_rates, intel_dp->num_source_rates,
3330 crtc_state->port_clock) < 0) {
3331 drm_dbg_kms(&i915->drm, "[ENCODER:%d:%s] Forcing full modeset due to unsupported link rate\n",
3332 encoder->base.base.id, encoder->base.name);
3333 crtc_state->uapi.connectors_changed = true;
3334 fastset = false;
3335 }
3336
3337 /*
3338 * FIXME hack to force full modeset when DSC is being used.
3339 *
3340 * As long as we do not have full state readout and config comparison
3341 * of crtc_state->dsc, we have no way to ensure reliable fastset.
3342 * Remove once we have readout for DSC.
3343 */
3344 if (crtc_state->dsc.compression_enable) {
3345 drm_dbg_kms(&i915->drm, "[ENCODER:%d:%s] Forcing full modeset due to DSC being enabled\n",
3346 encoder->base.base.id, encoder->base.name);
3347 crtc_state->uapi.mode_changed = true;
3348 fastset = false;
3349 }
3350
3351 if (CAN_PSR(intel_dp)) {
3352 drm_dbg_kms(&i915->drm, "[ENCODER:%d:%s] Forcing full modeset to compute PSR state\n",
3353 encoder->base.base.id, encoder->base.name);
3354 crtc_state->uapi.mode_changed = true;
3355 fastset = false;
3356 }
3357
3358 return fastset;
3359}
3360
3361static void intel_dp_get_pcon_dsc_cap(struct intel_dp *intel_dp)
3362{
3363 struct drm_i915_private *i915 = dp_to_i915(intel_dp);
3364
3365 /* Clear the cached register set to avoid using stale values */
3366
3367 memset(intel_dp->pcon_dsc_dpcd, 0, sizeof(intel_dp->pcon_dsc_dpcd));
3368
3369 if (drm_dp_dpcd_read(&intel_dp->aux, DP_PCON_DSC_ENCODER,
3370 intel_dp->pcon_dsc_dpcd,
3371 sizeof(intel_dp->pcon_dsc_dpcd)) < 0)
3372 drm_err(&i915->drm, "Failed to read DPCD register 0x%x\n",
3373 DP_PCON_DSC_ENCODER);
3374
3375 drm_dbg_kms(&i915->drm, "PCON ENCODER DSC DPCD: %*ph\n",
3376 (int)sizeof(intel_dp->pcon_dsc_dpcd), intel_dp->pcon_dsc_dpcd);
3377}
3378
3379static int intel_dp_pcon_get_frl_mask(u8 frl_bw_mask)
3380{
3381 int bw_gbps[] = {9, 18, 24, 32, 40, 48};
3382 int i;
3383
3384 for (i = ARRAY_SIZE(bw_gbps) - 1; i >= 0; i--) {
3385 if (frl_bw_mask & (1 << i))
3386 return bw_gbps[i];
3387 }
3388 return 0;
3389}
3390
3391static int intel_dp_pcon_set_frl_mask(int max_frl)
3392{
3393 switch (max_frl) {
3394 case 48:
3395 return DP_PCON_FRL_BW_MASK_48GBPS;
3396 case 40:
3397 return DP_PCON_FRL_BW_MASK_40GBPS;
3398 case 32:
3399 return DP_PCON_FRL_BW_MASK_32GBPS;
3400 case 24:
3401 return DP_PCON_FRL_BW_MASK_24GBPS;
3402 case 18:
3403 return DP_PCON_FRL_BW_MASK_18GBPS;
3404 case 9:
3405 return DP_PCON_FRL_BW_MASK_9GBPS;
3406 }
3407
3408 return 0;
3409}
3410
3411static int intel_dp_hdmi_sink_max_frl(struct intel_dp *intel_dp)
3412{
3413 struct intel_connector *intel_connector = intel_dp->attached_connector;
3414 struct drm_connector *connector = &intel_connector->base;
3415 int max_frl_rate;
3416 int max_lanes, rate_per_lane;
3417 int max_dsc_lanes, dsc_rate_per_lane;
3418
3419 max_lanes = connector->display_info.hdmi.max_lanes;
3420 rate_per_lane = connector->display_info.hdmi.max_frl_rate_per_lane;
3421 max_frl_rate = max_lanes * rate_per_lane;
3422
3423 if (connector->display_info.hdmi.dsc_cap.v_1p2) {
3424 max_dsc_lanes = connector->display_info.hdmi.dsc_cap.max_lanes;
3425 dsc_rate_per_lane = connector->display_info.hdmi.dsc_cap.max_frl_rate_per_lane;
3426 if (max_dsc_lanes && dsc_rate_per_lane)
3427 max_frl_rate = min(max_frl_rate, max_dsc_lanes * dsc_rate_per_lane);
3428 }
3429
3430 return max_frl_rate;
3431}
3432
3433static bool
3434intel_dp_pcon_is_frl_trained(struct intel_dp *intel_dp,
3435 u8 max_frl_bw_mask, u8 *frl_trained_mask)
3436{
3437 if (drm_dp_pcon_hdmi_link_active(&intel_dp->aux) &&
3438 drm_dp_pcon_hdmi_link_mode(&intel_dp->aux, frl_trained_mask) == DP_PCON_HDMI_MODE_FRL &&
3439 *frl_trained_mask >= max_frl_bw_mask)
3440 return true;
3441
3442 return false;
3443}
3444
3445static int intel_dp_pcon_start_frl_training(struct intel_dp *intel_dp)
3446{
3447#define TIMEOUT_FRL_READY_MS 500
3448#define TIMEOUT_HDMI_LINK_ACTIVE_MS 1000
3449
3450 struct drm_i915_private *i915 = dp_to_i915(intel_dp);
3451 int max_frl_bw, max_pcon_frl_bw, max_edid_frl_bw, ret;
3452 u8 max_frl_bw_mask = 0, frl_trained_mask;
3453 bool is_active;
3454
3455 max_pcon_frl_bw = intel_dp->dfp.pcon_max_frl_bw;
3456 drm_dbg(&i915->drm, "PCON max rate = %d Gbps\n", max_pcon_frl_bw);
3457
3458 max_edid_frl_bw = intel_dp_hdmi_sink_max_frl(intel_dp);
3459 drm_dbg(&i915->drm, "Sink max rate from EDID = %d Gbps\n", max_edid_frl_bw);
3460
3461 max_frl_bw = min(max_edid_frl_bw, max_pcon_frl_bw);
3462
3463 if (max_frl_bw <= 0)
3464 return -EINVAL;
3465
3466 max_frl_bw_mask = intel_dp_pcon_set_frl_mask(max_frl_bw);
3467 drm_dbg(&i915->drm, "MAX_FRL_BW_MASK = %u\n", max_frl_bw_mask);
3468
3469 if (intel_dp_pcon_is_frl_trained(intel_dp, max_frl_bw_mask, &frl_trained_mask))
3470 goto frl_trained;
3471
3472 ret = drm_dp_pcon_frl_prepare(&intel_dp->aux, false);
3473 if (ret < 0)
3474 return ret;
3475 /* Wait for PCON to be FRL Ready */
3476 wait_for(is_active = drm_dp_pcon_is_frl_ready(&intel_dp->aux) == true, TIMEOUT_FRL_READY_MS);
3477
3478 if (!is_active)
3479 return -ETIMEDOUT;
3480
3481 ret = drm_dp_pcon_frl_configure_1(&intel_dp->aux, max_frl_bw,
3482 DP_PCON_ENABLE_SEQUENTIAL_LINK);
3483 if (ret < 0)
3484 return ret;
3485 ret = drm_dp_pcon_frl_configure_2(&intel_dp->aux, max_frl_bw_mask,
3486 DP_PCON_FRL_LINK_TRAIN_NORMAL);
3487 if (ret < 0)
3488 return ret;
3489 ret = drm_dp_pcon_frl_enable(&intel_dp->aux);
3490 if (ret < 0)
3491 return ret;
3492 /*
3493 * Wait for FRL to be completed
3494 * Check if the HDMI Link is up and active.
3495 */
3496 wait_for(is_active =
3497 intel_dp_pcon_is_frl_trained(intel_dp, max_frl_bw_mask, &frl_trained_mask),
3498 TIMEOUT_HDMI_LINK_ACTIVE_MS);
3499
3500 if (!is_active)
3501 return -ETIMEDOUT;
3502
3503frl_trained:
3504 drm_dbg(&i915->drm, "FRL_TRAINED_MASK = %u\n", frl_trained_mask);
3505 intel_dp->frl.trained_rate_gbps = intel_dp_pcon_get_frl_mask(frl_trained_mask);
3506 intel_dp->frl.is_trained = true;
3507 drm_dbg(&i915->drm, "FRL trained with : %d Gbps\n", intel_dp->frl.trained_rate_gbps);
3508
3509 return 0;
3510}
3511
3512static bool intel_dp_is_hdmi_2_1_sink(struct intel_dp *intel_dp)
3513{
3514 if (drm_dp_is_branch(intel_dp->dpcd) &&
3515 intel_dp_has_hdmi_sink(intel_dp) &&
3516 intel_dp_hdmi_sink_max_frl(intel_dp) > 0)
3517 return true;
3518
3519 return false;
3520}
3521
3522static
3523int intel_dp_pcon_set_tmds_mode(struct intel_dp *intel_dp)
3524{
3525 int ret;
3526 u8 buf = 0;
3527
3528 /* Set PCON source control mode */
3529 buf |= DP_PCON_ENABLE_SOURCE_CTL_MODE;
3530
3531 ret = drm_dp_dpcd_writeb(&intel_dp->aux, DP_PCON_HDMI_LINK_CONFIG_1, buf);
3532 if (ret < 0)
3533 return ret;
3534
3535 /* Set HDMI LINK ENABLE */
3536 buf |= DP_PCON_ENABLE_HDMI_LINK;
3537 ret = drm_dp_dpcd_writeb(&intel_dp->aux, DP_PCON_HDMI_LINK_CONFIG_1, buf);
3538 if (ret < 0)
3539 return ret;
3540
3541 return 0;
3542}
3543
3544void intel_dp_check_frl_training(struct intel_dp *intel_dp)
3545{
3546 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
3547
3548 /*
3549 * Always go for FRL training if:
3550 * -PCON supports SRC_CTL_MODE (VESA DP2.0-HDMI2.1 PCON Spec Draft-1 Sec-7)
3551 * -sink is HDMI2.1
3552 */
3553 if (!(intel_dp->downstream_ports[2] & DP_PCON_SOURCE_CTL_MODE) ||
3554 !intel_dp_is_hdmi_2_1_sink(intel_dp) ||
3555 intel_dp->frl.is_trained)
3556 return;
3557
3558 if (intel_dp_pcon_start_frl_training(intel_dp) < 0) {
3559 int ret, mode;
3560
3561 drm_dbg(&dev_priv->drm, "Couldn't set FRL mode, continuing with TMDS mode\n");
3562 ret = intel_dp_pcon_set_tmds_mode(intel_dp);
3563 mode = drm_dp_pcon_hdmi_link_mode(&intel_dp->aux, NULL);
3564
3565 if (ret < 0 || mode != DP_PCON_HDMI_MODE_TMDS)
3566 drm_dbg(&dev_priv->drm, "Issue with PCON, cannot set TMDS mode\n");
3567 } else {
3568 drm_dbg(&dev_priv->drm, "FRL training Completed\n");
3569 }
3570}
3571
3572static int
3573intel_dp_pcon_dsc_enc_slice_height(const struct intel_crtc_state *crtc_state)
3574{
3575 int vactive = crtc_state->hw.adjusted_mode.vdisplay;
3576
3577 return intel_hdmi_dsc_get_slice_height(vactive);
3578}
3579
3580static int
3581intel_dp_pcon_dsc_enc_slices(struct intel_dp *intel_dp,
3582 const struct intel_crtc_state *crtc_state)
3583{
3584 struct intel_connector *intel_connector = intel_dp->attached_connector;
3585 struct drm_connector *connector = &intel_connector->base;
3586 int hdmi_throughput = connector->display_info.hdmi.dsc_cap.clk_per_slice;
3587 int hdmi_max_slices = connector->display_info.hdmi.dsc_cap.max_slices;
3588 int pcon_max_slices = drm_dp_pcon_dsc_max_slices(intel_dp->pcon_dsc_dpcd);
3589 int pcon_max_slice_width = drm_dp_pcon_dsc_max_slice_width(intel_dp->pcon_dsc_dpcd);
3590
3591 return intel_hdmi_dsc_get_num_slices(crtc_state, pcon_max_slices,
3592 pcon_max_slice_width,
3593 hdmi_max_slices, hdmi_throughput);
3594}
3595
3596static int
3597intel_dp_pcon_dsc_enc_bpp(struct intel_dp *intel_dp,
3598 const struct intel_crtc_state *crtc_state,
3599 int num_slices, int slice_width)
3600{
3601 struct intel_connector *intel_connector = intel_dp->attached_connector;
3602 struct drm_connector *connector = &intel_connector->base;
3603 int output_format = crtc_state->output_format;
3604 bool hdmi_all_bpp = connector->display_info.hdmi.dsc_cap.all_bpp;
3605 int pcon_fractional_bpp = drm_dp_pcon_dsc_bpp_incr(intel_dp->pcon_dsc_dpcd);
3606 int hdmi_max_chunk_bytes =
3607 connector->display_info.hdmi.dsc_cap.total_chunk_kbytes * 1024;
3608
3609 return intel_hdmi_dsc_get_bpp(pcon_fractional_bpp, slice_width,
3610 num_slices, output_format, hdmi_all_bpp,
3611 hdmi_max_chunk_bytes);
3612}
3613
3614void
3615intel_dp_pcon_dsc_configure(struct intel_dp *intel_dp,
3616 const struct intel_crtc_state *crtc_state)
3617{
3618 u8 pps_param[6];
3619 int slice_height;
3620 int slice_width;
3621 int num_slices;
3622 int bits_per_pixel;
3623 int ret;
3624 struct intel_connector *intel_connector = intel_dp->attached_connector;
3625 struct drm_i915_private *i915 = dp_to_i915(intel_dp);
3626 struct drm_connector *connector;
3627 bool hdmi_is_dsc_1_2;
3628
3629 if (!intel_dp_is_hdmi_2_1_sink(intel_dp))
3630 return;
3631
3632 if (!intel_connector)
3633 return;
3634 connector = &intel_connector->base;
3635 hdmi_is_dsc_1_2 = connector->display_info.hdmi.dsc_cap.v_1p2;
3636
3637 if (!drm_dp_pcon_enc_is_dsc_1_2(intel_dp->pcon_dsc_dpcd) ||
3638 !hdmi_is_dsc_1_2)
3639 return;
3640
3641 slice_height = intel_dp_pcon_dsc_enc_slice_height(crtc_state);
3642 if (!slice_height)
3643 return;
3644
3645 num_slices = intel_dp_pcon_dsc_enc_slices(intel_dp, crtc_state);
3646 if (!num_slices)
3647 return;
3648
3649 slice_width = DIV_ROUND_UP(crtc_state->hw.adjusted_mode.hdisplay,
3650 num_slices);
3651
3652 bits_per_pixel = intel_dp_pcon_dsc_enc_bpp(intel_dp, crtc_state,
3653 num_slices, slice_width);
3654 if (!bits_per_pixel)
3655 return;
3656
3657 pps_param[0] = slice_height & 0xFF;
3658 pps_param[1] = slice_height >> 8;
3659 pps_param[2] = slice_width & 0xFF;
3660 pps_param[3] = slice_width >> 8;
3661 pps_param[4] = bits_per_pixel & 0xFF;
3662 pps_param[5] = (bits_per_pixel >> 8) & 0x3;
3663
3664 ret = drm_dp_pcon_pps_override_param(&intel_dp->aux, pps_param);
3665 if (ret < 0)
3666 drm_dbg_kms(&i915->drm, "Failed to set pcon DSC\n");
3667}
3668
3669void intel_dp_configure_protocol_converter(struct intel_dp *intel_dp,
3670 const struct intel_crtc_state *crtc_state)
3671{
3672 struct drm_i915_private *i915 = dp_to_i915(intel_dp);
3673 bool ycbcr444_to_420 = false;
3674 bool rgb_to_ycbcr = false;
3675 u8 tmp;
3676
3677 if (intel_dp->dpcd[DP_DPCD_REV] < 0x13)
3678 return;
3679
3680 if (!drm_dp_is_branch(intel_dp->dpcd))
3681 return;
3682
3683 tmp = intel_dp_has_hdmi_sink(intel_dp) ? DP_HDMI_DVI_OUTPUT_CONFIG : 0;
3684
3685 if (drm_dp_dpcd_writeb(&intel_dp->aux,
3686 DP_PROTOCOL_CONVERTER_CONTROL_0, tmp) != 1)
3687 drm_dbg_kms(&i915->drm, "Failed to %s protocol converter HDMI mode\n",
3688 str_enable_disable(intel_dp_has_hdmi_sink(intel_dp)));
3689
3690 if (crtc_state->sink_format == INTEL_OUTPUT_FORMAT_YCBCR420) {
3691 switch (crtc_state->output_format) {
3692 case INTEL_OUTPUT_FORMAT_YCBCR420:
3693 break;
3694 case INTEL_OUTPUT_FORMAT_YCBCR444:
3695 ycbcr444_to_420 = true;
3696 break;
3697 case INTEL_OUTPUT_FORMAT_RGB:
3698 rgb_to_ycbcr = true;
3699 ycbcr444_to_420 = true;
3700 break;
3701 default:
3702 MISSING_CASE(crtc_state->output_format);
3703 break;
3704 }
3705 } else if (crtc_state->sink_format == INTEL_OUTPUT_FORMAT_YCBCR444) {
3706 switch (crtc_state->output_format) {
3707 case INTEL_OUTPUT_FORMAT_YCBCR444:
3708 break;
3709 case INTEL_OUTPUT_FORMAT_RGB:
3710 rgb_to_ycbcr = true;
3711 break;
3712 default:
3713 MISSING_CASE(crtc_state->output_format);
3714 break;
3715 }
3716 }
3717
3718 tmp = ycbcr444_to_420 ? DP_CONVERSION_TO_YCBCR420_ENABLE : 0;
3719
3720 if (drm_dp_dpcd_writeb(&intel_dp->aux,
3721 DP_PROTOCOL_CONVERTER_CONTROL_1, tmp) != 1)
3722 drm_dbg_kms(&i915->drm,
3723 "Failed to %s protocol converter YCbCr 4:2:0 conversion mode\n",
3724 str_enable_disable(intel_dp->dfp.ycbcr_444_to_420));
3725
3726 tmp = rgb_to_ycbcr ? DP_CONVERSION_BT709_RGB_YCBCR_ENABLE : 0;
3727
3728 if (drm_dp_pcon_convert_rgb_to_ycbcr(&intel_dp->aux, tmp) < 0)
3729 drm_dbg_kms(&i915->drm,
3730 "Failed to %s protocol converter RGB->YCbCr conversion mode\n",
3731 str_enable_disable(tmp));
3732}
3733
3734bool intel_dp_get_colorimetry_status(struct intel_dp *intel_dp)
3735{
3736 u8 dprx = 0;
3737
3738 if (drm_dp_dpcd_readb(&intel_dp->aux, DP_DPRX_FEATURE_ENUMERATION_LIST,
3739 &dprx) != 1)
3740 return false;
3741 return dprx & DP_VSC_SDP_EXT_FOR_COLORIMETRY_SUPPORTED;
3742}
3743
3744static void intel_dp_read_dsc_dpcd(struct drm_dp_aux *aux,
3745 u8 dsc_dpcd[DP_DSC_RECEIVER_CAP_SIZE])
3746{
3747 if (drm_dp_dpcd_read(aux, DP_DSC_SUPPORT, dsc_dpcd,
3748 DP_DSC_RECEIVER_CAP_SIZE) < 0) {
3749 drm_err(aux->drm_dev,
3750 "Failed to read DPCD register 0x%x\n",
3751 DP_DSC_SUPPORT);
3752 return;
3753 }
3754
3755 drm_dbg_kms(aux->drm_dev, "DSC DPCD: %*ph\n",
3756 DP_DSC_RECEIVER_CAP_SIZE,
3757 dsc_dpcd);
3758}
3759
3760void intel_dp_get_dsc_sink_cap(u8 dpcd_rev, struct intel_connector *connector)
3761{
3762 struct drm_i915_private *i915 = to_i915(connector->base.dev);
3763
3764 /*
3765 * Clear the cached register set to avoid using stale values
3766 * for the sinks that do not support DSC.
3767 */
3768 memset(connector->dp.dsc_dpcd, 0, sizeof(connector->dp.dsc_dpcd));
3769
3770 /* Clear fec_capable to avoid using stale values */
3771 connector->dp.fec_capability = 0;
3772
3773 if (dpcd_rev < DP_DPCD_REV_14)
3774 return;
3775
3776 intel_dp_read_dsc_dpcd(connector->dp.dsc_decompression_aux,
3777 connector->dp.dsc_dpcd);
3778
3779 if (drm_dp_dpcd_readb(connector->dp.dsc_decompression_aux, DP_FEC_CAPABILITY,
3780 &connector->dp.fec_capability) < 0) {
3781 drm_err(&i915->drm, "Failed to read FEC DPCD register\n");
3782 return;
3783 }
3784
3785 drm_dbg_kms(&i915->drm, "FEC CAPABILITY: %x\n",
3786 connector->dp.fec_capability);
3787}
3788
3789static void intel_edp_get_dsc_sink_cap(u8 edp_dpcd_rev, struct intel_connector *connector)
3790{
3791 if (edp_dpcd_rev < DP_EDP_14)
3792 return;
3793
3794 intel_dp_read_dsc_dpcd(connector->dp.dsc_decompression_aux, connector->dp.dsc_dpcd);
3795}
3796
3797static void intel_edp_mso_mode_fixup(struct intel_connector *connector,
3798 struct drm_display_mode *mode)
3799{
3800 struct intel_dp *intel_dp = intel_attached_dp(connector);
3801 struct drm_i915_private *i915 = to_i915(connector->base.dev);
3802 int n = intel_dp->mso_link_count;
3803 int overlap = intel_dp->mso_pixel_overlap;
3804
3805 if (!mode || !n)
3806 return;
3807
3808 mode->hdisplay = (mode->hdisplay - overlap) * n;
3809 mode->hsync_start = (mode->hsync_start - overlap) * n;
3810 mode->hsync_end = (mode->hsync_end - overlap) * n;
3811 mode->htotal = (mode->htotal - overlap) * n;
3812 mode->clock *= n;
3813
3814 drm_mode_set_name(mode);
3815
3816 drm_dbg_kms(&i915->drm,
3817 "[CONNECTOR:%d:%s] using generated MSO mode: " DRM_MODE_FMT "\n",
3818 connector->base.base.id, connector->base.name,
3819 DRM_MODE_ARG(mode));
3820}
3821
3822void intel_edp_fixup_vbt_bpp(struct intel_encoder *encoder, int pipe_bpp)
3823{
3824 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
3825 struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
3826 struct intel_connector *connector = intel_dp->attached_connector;
3827
3828 if (connector->panel.vbt.edp.bpp && pipe_bpp > connector->panel.vbt.edp.bpp) {
3829 /*
3830 * This is a big fat ugly hack.
3831 *
3832 * Some machines in UEFI boot mode provide us a VBT that has 18
3833 * bpp and 1.62 GHz link bandwidth for eDP, which for reasons
3834 * unknown we fail to light up. Yet the same BIOS boots up with
3835 * 24 bpp and 2.7 GHz link. Use the same bpp as the BIOS uses as
3836 * max, not what it tells us to use.
3837 *
3838 * Note: This will still be broken if the eDP panel is not lit
3839 * up by the BIOS, and thus we can't get the mode at module
3840 * load.
3841 */
3842 drm_dbg_kms(&dev_priv->drm,
3843 "pipe has %d bpp for eDP panel, overriding BIOS-provided max %d bpp\n",
3844 pipe_bpp, connector->panel.vbt.edp.bpp);
3845 connector->panel.vbt.edp.bpp = pipe_bpp;
3846 }
3847}
3848
3849static void intel_edp_mso_init(struct intel_dp *intel_dp)
3850{
3851 struct drm_i915_private *i915 = dp_to_i915(intel_dp);
3852 struct intel_connector *connector = intel_dp->attached_connector;
3853 struct drm_display_info *info = &connector->base.display_info;
3854 u8 mso;
3855
3856 if (intel_dp->edp_dpcd[0] < DP_EDP_14)
3857 return;
3858
3859 if (drm_dp_dpcd_readb(&intel_dp->aux, DP_EDP_MSO_LINK_CAPABILITIES, &mso) != 1) {
3860 drm_err(&i915->drm, "Failed to read MSO cap\n");
3861 return;
3862 }
3863
3864 /* Valid configurations are SST or MSO 2x1, 2x2, 4x1 */
3865 mso &= DP_EDP_MSO_NUMBER_OF_LINKS_MASK;
3866 if (mso % 2 || mso > drm_dp_max_lane_count(intel_dp->dpcd)) {
3867 drm_err(&i915->drm, "Invalid MSO link count cap %u\n", mso);
3868 mso = 0;
3869 }
3870
3871 if (mso) {
3872 drm_dbg_kms(&i915->drm, "Sink MSO %ux%u configuration, pixel overlap %u\n",
3873 mso, drm_dp_max_lane_count(intel_dp->dpcd) / mso,
3874 info->mso_pixel_overlap);
3875 if (!HAS_MSO(i915)) {
3876 drm_err(&i915->drm, "No source MSO support, disabling\n");
3877 mso = 0;
3878 }
3879 }
3880
3881 intel_dp->mso_link_count = mso;
3882 intel_dp->mso_pixel_overlap = mso ? info->mso_pixel_overlap : 0;
3883}
3884
3885static bool
3886intel_edp_init_dpcd(struct intel_dp *intel_dp, struct intel_connector *connector)
3887{
3888 struct drm_i915_private *dev_priv =
3889 to_i915(dp_to_dig_port(intel_dp)->base.base.dev);
3890
3891 /* this function is meant to be called only once */
3892 drm_WARN_ON(&dev_priv->drm, intel_dp->dpcd[DP_DPCD_REV] != 0);
3893
3894 if (drm_dp_read_dpcd_caps(&intel_dp->aux, intel_dp->dpcd) != 0)
3895 return false;
3896
3897 drm_dp_read_desc(&intel_dp->aux, &intel_dp->desc,
3898 drm_dp_is_branch(intel_dp->dpcd));
3899
3900 /*
3901 * Read the eDP display control registers.
3902 *
3903 * Do this independent of DP_DPCD_DISPLAY_CONTROL_CAPABLE bit in
3904 * DP_EDP_CONFIGURATION_CAP, because some buggy displays do not have it
3905 * set, but require eDP 1.4+ detection (e.g. for supported link rates
3906 * method). The display control registers should read zero if they're
3907 * not supported anyway.
3908 */
3909 if (drm_dp_dpcd_read(&intel_dp->aux, DP_EDP_DPCD_REV,
3910 intel_dp->edp_dpcd, sizeof(intel_dp->edp_dpcd)) ==
3911 sizeof(intel_dp->edp_dpcd)) {
3912 drm_dbg_kms(&dev_priv->drm, "eDP DPCD: %*ph\n",
3913 (int)sizeof(intel_dp->edp_dpcd),
3914 intel_dp->edp_dpcd);
3915
3916 intel_dp->use_max_params = intel_dp->edp_dpcd[0] < DP_EDP_14;
3917 }
3918
3919 /*
3920 * This has to be called after intel_dp->edp_dpcd is filled, PSR checks
3921 * for SET_POWER_CAPABLE bit in intel_dp->edp_dpcd[1]
3922 */
3923 intel_psr_init_dpcd(intel_dp);
3924
3925 /* Clear the default sink rates */
3926 intel_dp->num_sink_rates = 0;
3927
3928 /* Read the eDP 1.4+ supported link rates. */
3929 if (intel_dp->edp_dpcd[0] >= DP_EDP_14) {
3930 __le16 sink_rates[DP_MAX_SUPPORTED_RATES];
3931 int i;
3932
3933 drm_dp_dpcd_read(&intel_dp->aux, DP_SUPPORTED_LINK_RATES,
3934 sink_rates, sizeof(sink_rates));
3935
3936 for (i = 0; i < ARRAY_SIZE(sink_rates); i++) {
3937 int val = le16_to_cpu(sink_rates[i]);
3938
3939 if (val == 0)
3940 break;
3941
3942 /* Value read multiplied by 200kHz gives the per-lane
3943 * link rate in kHz. The source rates are, however,
3944 * stored in terms of LS_Clk kHz. The full conversion
3945 * back to symbols is
3946 * (val * 200kHz)*(8/10 ch. encoding)*(1/8 bit to Byte)
3947 */
3948 intel_dp->sink_rates[i] = (val * 200) / 10;
3949 }
3950 intel_dp->num_sink_rates = i;
3951 }
3952
3953 /*
3954 * Use DP_LINK_RATE_SET if DP_SUPPORTED_LINK_RATES are available,
3955 * default to DP_MAX_LINK_RATE and DP_LINK_BW_SET otherwise.
3956 */
3957 if (intel_dp->num_sink_rates)
3958 intel_dp->use_rate_select = true;
3959 else
3960 intel_dp_set_sink_rates(intel_dp);
3961 intel_dp_set_max_sink_lane_count(intel_dp);
3962
3963 /* Read the eDP DSC DPCD registers */
3964 if (HAS_DSC(dev_priv))
3965 intel_edp_get_dsc_sink_cap(intel_dp->edp_dpcd[0],
3966 connector);
3967
3968 /*
3969 * If needed, program our source OUI so we can make various Intel-specific AUX services
3970 * available (such as HDR backlight controls)
3971 */
3972 intel_edp_init_source_oui(intel_dp, true);
3973
3974 return true;
3975}
3976
3977static bool
3978intel_dp_has_sink_count(struct intel_dp *intel_dp)
3979{
3980 if (!intel_dp->attached_connector)
3981 return false;
3982
3983 return drm_dp_read_sink_count_cap(&intel_dp->attached_connector->base,
3984 intel_dp->dpcd,
3985 &intel_dp->desc);
3986}
3987
3988static bool
3989intel_dp_get_dpcd(struct intel_dp *intel_dp)
3990{
3991 int ret;
3992
3993 if (intel_dp_init_lttpr_and_dprx_caps(intel_dp) < 0)
3994 return false;
3995
3996 /*
3997 * Don't clobber cached eDP rates. Also skip re-reading
3998 * the OUI/ID since we know it won't change.
3999 */
4000 if (!intel_dp_is_edp(intel_dp)) {
4001 drm_dp_read_desc(&intel_dp->aux, &intel_dp->desc,
4002 drm_dp_is_branch(intel_dp->dpcd));
4003
4004 intel_dp_set_sink_rates(intel_dp);
4005 intel_dp_set_max_sink_lane_count(intel_dp);
4006 intel_dp_set_common_rates(intel_dp);
4007 }
4008
4009 if (intel_dp_has_sink_count(intel_dp)) {
4010 ret = drm_dp_read_sink_count(&intel_dp->aux);
4011 if (ret < 0)
4012 return false;
4013
4014 /*
4015 * Sink count can change between short pulse hpd hence
4016 * a member variable in intel_dp will track any changes
4017 * between short pulse interrupts.
4018 */
4019 intel_dp->sink_count = ret;
4020
4021 /*
4022 * SINK_COUNT == 0 and DOWNSTREAM_PORT_PRESENT == 1 implies that
4023 * a dongle is present but no display. Unless we require to know
4024 * if a dongle is present or not, we don't need to update
4025 * downstream port information. So, an early return here saves
4026 * time from performing other operations which are not required.
4027 */
4028 if (!intel_dp->sink_count)
4029 return false;
4030 }
4031
4032 return drm_dp_read_downstream_info(&intel_dp->aux, intel_dp->dpcd,
4033 intel_dp->downstream_ports) == 0;
4034}
4035
4036static bool
4037intel_dp_can_mst(struct intel_dp *intel_dp)
4038{
4039 struct drm_i915_private *i915 = dp_to_i915(intel_dp);
4040
4041 return i915->display.params.enable_dp_mst &&
4042 intel_dp_mst_source_support(intel_dp) &&
4043 drm_dp_read_mst_cap(&intel_dp->aux, intel_dp->dpcd);
4044}
4045
4046static void
4047intel_dp_configure_mst(struct intel_dp *intel_dp)
4048{
4049 struct drm_i915_private *i915 = dp_to_i915(intel_dp);
4050 struct intel_encoder *encoder =
4051 &dp_to_dig_port(intel_dp)->base;
4052 bool sink_can_mst = drm_dp_read_mst_cap(&intel_dp->aux, intel_dp->dpcd);
4053
4054 drm_dbg_kms(&i915->drm,
4055 "[ENCODER:%d:%s] MST support: port: %s, sink: %s, modparam: %s\n",
4056 encoder->base.base.id, encoder->base.name,
4057 str_yes_no(intel_dp_mst_source_support(intel_dp)),
4058 str_yes_no(sink_can_mst),
4059 str_yes_no(i915->display.params.enable_dp_mst));
4060
4061 if (!intel_dp_mst_source_support(intel_dp))
4062 return;
4063
4064 intel_dp->is_mst = sink_can_mst &&
4065 i915->display.params.enable_dp_mst;
4066
4067 drm_dp_mst_topology_mgr_set_mst(&intel_dp->mst_mgr,
4068 intel_dp->is_mst);
4069}
4070
4071static bool
4072intel_dp_get_sink_irq_esi(struct intel_dp *intel_dp, u8 *esi)
4073{
4074 return drm_dp_dpcd_read(&intel_dp->aux, DP_SINK_COUNT_ESI, esi, 4) == 4;
4075}
4076
4077static bool intel_dp_ack_sink_irq_esi(struct intel_dp *intel_dp, u8 esi[4])
4078{
4079 int retry;
4080
4081 for (retry = 0; retry < 3; retry++) {
4082 if (drm_dp_dpcd_write(&intel_dp->aux, DP_SINK_COUNT_ESI + 1,
4083 &esi[1], 3) == 3)
4084 return true;
4085 }
4086
4087 return false;
4088}
4089
4090bool
4091intel_dp_needs_vsc_sdp(const struct intel_crtc_state *crtc_state,
4092 const struct drm_connector_state *conn_state)
4093{
4094 /*
4095 * As per DP 1.4a spec section 2.2.4.3 [MSA Field for Indication
4096 * of Color Encoding Format and Content Color Gamut], in order to
4097 * sending YCBCR 420 or HDR BT.2020 signals we should use DP VSC SDP.
4098 */
4099 if (crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR420)
4100 return true;
4101
4102 switch (conn_state->colorspace) {
4103 case DRM_MODE_COLORIMETRY_SYCC_601:
4104 case DRM_MODE_COLORIMETRY_OPYCC_601:
4105 case DRM_MODE_COLORIMETRY_BT2020_YCC:
4106 case DRM_MODE_COLORIMETRY_BT2020_RGB:
4107 case DRM_MODE_COLORIMETRY_BT2020_CYCC:
4108 return true;
4109 default:
4110 break;
4111 }
4112
4113 return false;
4114}
4115
4116static ssize_t intel_dp_vsc_sdp_pack(const struct drm_dp_vsc_sdp *vsc,
4117 struct dp_sdp *sdp, size_t size)
4118{
4119 size_t length = sizeof(struct dp_sdp);
4120
4121 if (size < length)
4122 return -ENOSPC;
4123
4124 memset(sdp, 0, size);
4125
4126 /*
4127 * Prepare VSC Header for SU as per DP 1.4a spec, Table 2-119
4128 * VSC SDP Header Bytes
4129 */
4130 sdp->sdp_header.HB0 = 0; /* Secondary-Data Packet ID = 0 */
4131 sdp->sdp_header.HB1 = vsc->sdp_type; /* Secondary-data Packet Type */
4132 sdp->sdp_header.HB2 = vsc->revision; /* Revision Number */
4133 sdp->sdp_header.HB3 = vsc->length; /* Number of Valid Data Bytes */
4134
4135 if (vsc->revision == 0x6) {
4136 sdp->db[0] = 1;
4137 sdp->db[3] = 1;
4138 }
4139
4140 /*
4141 * Revision 0x5 and revision 0x7 supports Pixel Encoding/Colorimetry
4142 * Format as per DP 1.4a spec and DP 2.0 respectively.
4143 */
4144 if (!(vsc->revision == 0x5 || vsc->revision == 0x7))
4145 goto out;
4146
4147 /* VSC SDP Payload for DB16 through DB18 */
4148 /* Pixel Encoding and Colorimetry Formats */
4149 sdp->db[16] = (vsc->pixelformat & 0xf) << 4; /* DB16[7:4] */
4150 sdp->db[16] |= vsc->colorimetry & 0xf; /* DB16[3:0] */
4151
4152 switch (vsc->bpc) {
4153 case 6:
4154 /* 6bpc: 0x0 */
4155 break;
4156 case 8:
4157 sdp->db[17] = 0x1; /* DB17[3:0] */
4158 break;
4159 case 10:
4160 sdp->db[17] = 0x2;
4161 break;
4162 case 12:
4163 sdp->db[17] = 0x3;
4164 break;
4165 case 16:
4166 sdp->db[17] = 0x4;
4167 break;
4168 default:
4169 MISSING_CASE(vsc->bpc);
4170 break;
4171 }
4172 /* Dynamic Range and Component Bit Depth */
4173 if (vsc->dynamic_range == DP_DYNAMIC_RANGE_CTA)
4174 sdp->db[17] |= 0x80; /* DB17[7] */
4175
4176 /* Content Type */
4177 sdp->db[18] = vsc->content_type & 0x7;
4178
4179out:
4180 return length;
4181}
4182
4183static ssize_t
4184intel_dp_hdr_metadata_infoframe_sdp_pack(struct drm_i915_private *i915,
4185 const struct hdmi_drm_infoframe *drm_infoframe,
4186 struct dp_sdp *sdp,
4187 size_t size)
4188{
4189 size_t length = sizeof(struct dp_sdp);
4190 const int infoframe_size = HDMI_INFOFRAME_HEADER_SIZE + HDMI_DRM_INFOFRAME_SIZE;
4191 unsigned char buf[HDMI_INFOFRAME_HEADER_SIZE + HDMI_DRM_INFOFRAME_SIZE];
4192 ssize_t len;
4193
4194 if (size < length)
4195 return -ENOSPC;
4196
4197 memset(sdp, 0, size);
4198
4199 len = hdmi_drm_infoframe_pack_only(drm_infoframe, buf, sizeof(buf));
4200 if (len < 0) {
4201 drm_dbg_kms(&i915->drm, "buffer size is smaller than hdr metadata infoframe\n");
4202 return -ENOSPC;
4203 }
4204
4205 if (len != infoframe_size) {
4206 drm_dbg_kms(&i915->drm, "wrong static hdr metadata size\n");
4207 return -ENOSPC;
4208 }
4209
4210 /*
4211 * Set up the infoframe sdp packet for HDR static metadata.
4212 * Prepare VSC Header for SU as per DP 1.4a spec,
4213 * Table 2-100 and Table 2-101
4214 */
4215
4216 /* Secondary-Data Packet ID, 00h for non-Audio INFOFRAME */
4217 sdp->sdp_header.HB0 = 0;
4218 /*
4219 * Packet Type 80h + Non-audio INFOFRAME Type value
4220 * HDMI_INFOFRAME_TYPE_DRM: 0x87
4221 * - 80h + Non-audio INFOFRAME Type value
4222 * - InfoFrame Type: 0x07
4223 * [CTA-861-G Table-42 Dynamic Range and Mastering InfoFrame]
4224 */
4225 sdp->sdp_header.HB1 = drm_infoframe->type;
4226 /*
4227 * Least Significant Eight Bits of (Data Byte Count – 1)
4228 * infoframe_size - 1
4229 */
4230 sdp->sdp_header.HB2 = 0x1D;
4231 /* INFOFRAME SDP Version Number */
4232 sdp->sdp_header.HB3 = (0x13 << 2);
4233 /* CTA Header Byte 2 (INFOFRAME Version Number) */
4234 sdp->db[0] = drm_infoframe->version;
4235 /* CTA Header Byte 3 (Length of INFOFRAME): HDMI_DRM_INFOFRAME_SIZE */
4236 sdp->db[1] = drm_infoframe->length;
4237 /*
4238 * Copy HDMI_DRM_INFOFRAME_SIZE size from a buffer after
4239 * HDMI_INFOFRAME_HEADER_SIZE
4240 */
4241 BUILD_BUG_ON(sizeof(sdp->db) < HDMI_DRM_INFOFRAME_SIZE + 2);
4242 memcpy(&sdp->db[2], &buf[HDMI_INFOFRAME_HEADER_SIZE],
4243 HDMI_DRM_INFOFRAME_SIZE);
4244
4245 /*
4246 * Size of DP infoframe sdp packet for HDR static metadata consists of
4247 * - DP SDP Header(struct dp_sdp_header): 4 bytes
4248 * - Two Data Blocks: 2 bytes
4249 * CTA Header Byte2 (INFOFRAME Version Number)
4250 * CTA Header Byte3 (Length of INFOFRAME)
4251 * - HDMI_DRM_INFOFRAME_SIZE: 26 bytes
4252 *
4253 * Prior to GEN11's GMP register size is identical to DP HDR static metadata
4254 * infoframe size. But GEN11+ has larger than that size, write_infoframe
4255 * will pad rest of the size.
4256 */
4257 return sizeof(struct dp_sdp_header) + 2 + HDMI_DRM_INFOFRAME_SIZE;
4258}
4259
4260static void intel_write_dp_sdp(struct intel_encoder *encoder,
4261 const struct intel_crtc_state *crtc_state,
4262 unsigned int type)
4263{
4264 struct intel_digital_port *dig_port = enc_to_dig_port(encoder);
4265 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
4266 struct dp_sdp sdp = {};
4267 ssize_t len;
4268
4269 if ((crtc_state->infoframes.enable &
4270 intel_hdmi_infoframe_enable(type)) == 0)
4271 return;
4272
4273 switch (type) {
4274 case DP_SDP_VSC:
4275 len = intel_dp_vsc_sdp_pack(&crtc_state->infoframes.vsc, &sdp,
4276 sizeof(sdp));
4277 break;
4278 case HDMI_PACKET_TYPE_GAMUT_METADATA:
4279 len = intel_dp_hdr_metadata_infoframe_sdp_pack(dev_priv,
4280 &crtc_state->infoframes.drm.drm,
4281 &sdp, sizeof(sdp));
4282 break;
4283 default:
4284 MISSING_CASE(type);
4285 return;
4286 }
4287
4288 if (drm_WARN_ON(&dev_priv->drm, len < 0))
4289 return;
4290
4291 dig_port->write_infoframe(encoder, crtc_state, type, &sdp, len);
4292}
4293
4294void intel_write_dp_vsc_sdp(struct intel_encoder *encoder,
4295 const struct intel_crtc_state *crtc_state,
4296 const struct drm_dp_vsc_sdp *vsc)
4297{
4298 struct intel_digital_port *dig_port = enc_to_dig_port(encoder);
4299 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
4300 struct dp_sdp sdp = {};
4301 ssize_t len;
4302
4303 len = intel_dp_vsc_sdp_pack(vsc, &sdp, sizeof(sdp));
4304
4305 if (drm_WARN_ON(&dev_priv->drm, len < 0))
4306 return;
4307
4308 dig_port->write_infoframe(encoder, crtc_state, DP_SDP_VSC,
4309 &sdp, len);
4310}
4311
4312void intel_dp_set_infoframes(struct intel_encoder *encoder,
4313 bool enable,
4314 const struct intel_crtc_state *crtc_state,
4315 const struct drm_connector_state *conn_state)
4316{
4317 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
4318 i915_reg_t reg = HSW_TVIDEO_DIP_CTL(crtc_state->cpu_transcoder);
4319 u32 dip_enable = VIDEO_DIP_ENABLE_AVI_HSW | VIDEO_DIP_ENABLE_GCP_HSW |
4320 VIDEO_DIP_ENABLE_VS_HSW | VIDEO_DIP_ENABLE_GMP_HSW |
4321 VIDEO_DIP_ENABLE_SPD_HSW | VIDEO_DIP_ENABLE_DRM_GLK;
4322 u32 val = intel_de_read(dev_priv, reg) & ~dip_enable;
4323
4324 /* TODO: Sanitize DSC enabling wrt. intel_dsc_dp_pps_write(). */
4325 if (!enable && HAS_DSC(dev_priv))
4326 val &= ~VDIP_ENABLE_PPS;
4327
4328 /* When PSR is enabled, this routine doesn't disable VSC DIP */
4329 if (!crtc_state->has_psr)
4330 val &= ~VIDEO_DIP_ENABLE_VSC_HSW;
4331
4332 intel_de_write(dev_priv, reg, val);
4333 intel_de_posting_read(dev_priv, reg);
4334
4335 if (!enable)
4336 return;
4337
4338 /* When PSR is enabled, VSC SDP is handled by PSR routine */
4339 if (!crtc_state->has_psr)
4340 intel_write_dp_sdp(encoder, crtc_state, DP_SDP_VSC);
4341
4342 intel_write_dp_sdp(encoder, crtc_state, HDMI_PACKET_TYPE_GAMUT_METADATA);
4343}
4344
4345static int intel_dp_vsc_sdp_unpack(struct drm_dp_vsc_sdp *vsc,
4346 const void *buffer, size_t size)
4347{
4348 const struct dp_sdp *sdp = buffer;
4349
4350 if (size < sizeof(struct dp_sdp))
4351 return -EINVAL;
4352
4353 memset(vsc, 0, sizeof(*vsc));
4354
4355 if (sdp->sdp_header.HB0 != 0)
4356 return -EINVAL;
4357
4358 if (sdp->sdp_header.HB1 != DP_SDP_VSC)
4359 return -EINVAL;
4360
4361 vsc->sdp_type = sdp->sdp_header.HB1;
4362 vsc->revision = sdp->sdp_header.HB2;
4363 vsc->length = sdp->sdp_header.HB3;
4364
4365 if ((sdp->sdp_header.HB2 == 0x2 && sdp->sdp_header.HB3 == 0x8) ||
4366 (sdp->sdp_header.HB2 == 0x4 && sdp->sdp_header.HB3 == 0xe)) {
4367 /*
4368 * - HB2 = 0x2, HB3 = 0x8
4369 * VSC SDP supporting 3D stereo + PSR
4370 * - HB2 = 0x4, HB3 = 0xe
4371 * VSC SDP supporting 3D stereo + PSR2 with Y-coordinate of
4372 * first scan line of the SU region (applies to eDP v1.4b
4373 * and higher).
4374 */
4375 return 0;
4376 } else if (sdp->sdp_header.HB2 == 0x5 && sdp->sdp_header.HB3 == 0x13) {
4377 /*
4378 * - HB2 = 0x5, HB3 = 0x13
4379 * VSC SDP supporting 3D stereo + PSR2 + Pixel Encoding/Colorimetry
4380 * Format.
4381 */
4382 vsc->pixelformat = (sdp->db[16] >> 4) & 0xf;
4383 vsc->colorimetry = sdp->db[16] & 0xf;
4384 vsc->dynamic_range = (sdp->db[17] >> 7) & 0x1;
4385
4386 switch (sdp->db[17] & 0x7) {
4387 case 0x0:
4388 vsc->bpc = 6;
4389 break;
4390 case 0x1:
4391 vsc->bpc = 8;
4392 break;
4393 case 0x2:
4394 vsc->bpc = 10;
4395 break;
4396 case 0x3:
4397 vsc->bpc = 12;
4398 break;
4399 case 0x4:
4400 vsc->bpc = 16;
4401 break;
4402 default:
4403 MISSING_CASE(sdp->db[17] & 0x7);
4404 return -EINVAL;
4405 }
4406
4407 vsc->content_type = sdp->db[18] & 0x7;
4408 } else {
4409 return -EINVAL;
4410 }
4411
4412 return 0;
4413}
4414
4415static int
4416intel_dp_hdr_metadata_infoframe_sdp_unpack(struct hdmi_drm_infoframe *drm_infoframe,
4417 const void *buffer, size_t size)
4418{
4419 int ret;
4420
4421 const struct dp_sdp *sdp = buffer;
4422
4423 if (size < sizeof(struct dp_sdp))
4424 return -EINVAL;
4425
4426 if (sdp->sdp_header.HB0 != 0)
4427 return -EINVAL;
4428
4429 if (sdp->sdp_header.HB1 != HDMI_INFOFRAME_TYPE_DRM)
4430 return -EINVAL;
4431
4432 /*
4433 * Least Significant Eight Bits of (Data Byte Count – 1)
4434 * 1Dh (i.e., Data Byte Count = 30 bytes).
4435 */
4436 if (sdp->sdp_header.HB2 != 0x1D)
4437 return -EINVAL;
4438
4439 /* Most Significant Two Bits of (Data Byte Count – 1), Clear to 00b. */
4440 if ((sdp->sdp_header.HB3 & 0x3) != 0)
4441 return -EINVAL;
4442
4443 /* INFOFRAME SDP Version Number */
4444 if (((sdp->sdp_header.HB3 >> 2) & 0x3f) != 0x13)
4445 return -EINVAL;
4446
4447 /* CTA Header Byte 2 (INFOFRAME Version Number) */
4448 if (sdp->db[0] != 1)
4449 return -EINVAL;
4450
4451 /* CTA Header Byte 3 (Length of INFOFRAME): HDMI_DRM_INFOFRAME_SIZE */
4452 if (sdp->db[1] != HDMI_DRM_INFOFRAME_SIZE)
4453 return -EINVAL;
4454
4455 ret = hdmi_drm_infoframe_unpack_only(drm_infoframe, &sdp->db[2],
4456 HDMI_DRM_INFOFRAME_SIZE);
4457
4458 return ret;
4459}
4460
4461static void intel_read_dp_vsc_sdp(struct intel_encoder *encoder,
4462 struct intel_crtc_state *crtc_state,
4463 struct drm_dp_vsc_sdp *vsc)
4464{
4465 struct intel_digital_port *dig_port = enc_to_dig_port(encoder);
4466 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
4467 unsigned int type = DP_SDP_VSC;
4468 struct dp_sdp sdp = {};
4469 int ret;
4470
4471 /* When PSR is enabled, VSC SDP is handled by PSR routine */
4472 if (crtc_state->has_psr)
4473 return;
4474
4475 if ((crtc_state->infoframes.enable &
4476 intel_hdmi_infoframe_enable(type)) == 0)
4477 return;
4478
4479 dig_port->read_infoframe(encoder, crtc_state, type, &sdp, sizeof(sdp));
4480
4481 ret = intel_dp_vsc_sdp_unpack(vsc, &sdp, sizeof(sdp));
4482
4483 if (ret)
4484 drm_dbg_kms(&dev_priv->drm, "Failed to unpack DP VSC SDP\n");
4485}
4486
4487static void intel_read_dp_hdr_metadata_infoframe_sdp(struct intel_encoder *encoder,
4488 struct intel_crtc_state *crtc_state,
4489 struct hdmi_drm_infoframe *drm_infoframe)
4490{
4491 struct intel_digital_port *dig_port = enc_to_dig_port(encoder);
4492 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
4493 unsigned int type = HDMI_PACKET_TYPE_GAMUT_METADATA;
4494 struct dp_sdp sdp = {};
4495 int ret;
4496
4497 if ((crtc_state->infoframes.enable &
4498 intel_hdmi_infoframe_enable(type)) == 0)
4499 return;
4500
4501 dig_port->read_infoframe(encoder, crtc_state, type, &sdp,
4502 sizeof(sdp));
4503
4504 ret = intel_dp_hdr_metadata_infoframe_sdp_unpack(drm_infoframe, &sdp,
4505 sizeof(sdp));
4506
4507 if (ret)
4508 drm_dbg_kms(&dev_priv->drm,
4509 "Failed to unpack DP HDR Metadata Infoframe SDP\n");
4510}
4511
4512void intel_read_dp_sdp(struct intel_encoder *encoder,
4513 struct intel_crtc_state *crtc_state,
4514 unsigned int type)
4515{
4516 switch (type) {
4517 case DP_SDP_VSC:
4518 intel_read_dp_vsc_sdp(encoder, crtc_state,
4519 &crtc_state->infoframes.vsc);
4520 break;
4521 case HDMI_PACKET_TYPE_GAMUT_METADATA:
4522 intel_read_dp_hdr_metadata_infoframe_sdp(encoder, crtc_state,
4523 &crtc_state->infoframes.drm.drm);
4524 break;
4525 default:
4526 MISSING_CASE(type);
4527 break;
4528 }
4529}
4530
4531static u8 intel_dp_autotest_link_training(struct intel_dp *intel_dp)
4532{
4533 struct drm_i915_private *i915 = dp_to_i915(intel_dp);
4534 int status = 0;
4535 int test_link_rate;
4536 u8 test_lane_count, test_link_bw;
4537 /* (DP CTS 1.2)
4538 * 4.3.1.11
4539 */
4540 /* Read the TEST_LANE_COUNT and TEST_LINK_RTAE fields (DP CTS 3.1.4) */
4541 status = drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_LANE_COUNT,
4542 &test_lane_count);
4543
4544 if (status <= 0) {
4545 drm_dbg_kms(&i915->drm, "Lane count read failed\n");
4546 return DP_TEST_NAK;
4547 }
4548 test_lane_count &= DP_MAX_LANE_COUNT_MASK;
4549
4550 status = drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_LINK_RATE,
4551 &test_link_bw);
4552 if (status <= 0) {
4553 drm_dbg_kms(&i915->drm, "Link Rate read failed\n");
4554 return DP_TEST_NAK;
4555 }
4556 test_link_rate = drm_dp_bw_code_to_link_rate(test_link_bw);
4557
4558 /* Validate the requested link rate and lane count */
4559 if (!intel_dp_link_params_valid(intel_dp, test_link_rate,
4560 test_lane_count))
4561 return DP_TEST_NAK;
4562
4563 intel_dp->compliance.test_lane_count = test_lane_count;
4564 intel_dp->compliance.test_link_rate = test_link_rate;
4565
4566 return DP_TEST_ACK;
4567}
4568
4569static u8 intel_dp_autotest_video_pattern(struct intel_dp *intel_dp)
4570{
4571 struct drm_i915_private *i915 = dp_to_i915(intel_dp);
4572 u8 test_pattern;
4573 u8 test_misc;
4574 __be16 h_width, v_height;
4575 int status = 0;
4576
4577 /* Read the TEST_PATTERN (DP CTS 3.1.5) */
4578 status = drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_PATTERN,
4579 &test_pattern);
4580 if (status <= 0) {
4581 drm_dbg_kms(&i915->drm, "Test pattern read failed\n");
4582 return DP_TEST_NAK;
4583 }
4584 if (test_pattern != DP_COLOR_RAMP)
4585 return DP_TEST_NAK;
4586
4587 status = drm_dp_dpcd_read(&intel_dp->aux, DP_TEST_H_WIDTH_HI,
4588 &h_width, 2);
4589 if (status <= 0) {
4590 drm_dbg_kms(&i915->drm, "H Width read failed\n");
4591 return DP_TEST_NAK;
4592 }
4593
4594 status = drm_dp_dpcd_read(&intel_dp->aux, DP_TEST_V_HEIGHT_HI,
4595 &v_height, 2);
4596 if (status <= 0) {
4597 drm_dbg_kms(&i915->drm, "V Height read failed\n");
4598 return DP_TEST_NAK;
4599 }
4600
4601 status = drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_MISC0,
4602 &test_misc);
4603 if (status <= 0) {
4604 drm_dbg_kms(&i915->drm, "TEST MISC read failed\n");
4605 return DP_TEST_NAK;
4606 }
4607 if ((test_misc & DP_TEST_COLOR_FORMAT_MASK) != DP_COLOR_FORMAT_RGB)
4608 return DP_TEST_NAK;
4609 if (test_misc & DP_TEST_DYNAMIC_RANGE_CEA)
4610 return DP_TEST_NAK;
4611 switch (test_misc & DP_TEST_BIT_DEPTH_MASK) {
4612 case DP_TEST_BIT_DEPTH_6:
4613 intel_dp->compliance.test_data.bpc = 6;
4614 break;
4615 case DP_TEST_BIT_DEPTH_8:
4616 intel_dp->compliance.test_data.bpc = 8;
4617 break;
4618 default:
4619 return DP_TEST_NAK;
4620 }
4621
4622 intel_dp->compliance.test_data.video_pattern = test_pattern;
4623 intel_dp->compliance.test_data.hdisplay = be16_to_cpu(h_width);
4624 intel_dp->compliance.test_data.vdisplay = be16_to_cpu(v_height);
4625 /* Set test active flag here so userspace doesn't interrupt things */
4626 intel_dp->compliance.test_active = true;
4627
4628 return DP_TEST_ACK;
4629}
4630
4631static u8 intel_dp_autotest_edid(struct intel_dp *intel_dp)
4632{
4633 struct drm_i915_private *i915 = dp_to_i915(intel_dp);
4634 u8 test_result = DP_TEST_ACK;
4635 struct intel_connector *intel_connector = intel_dp->attached_connector;
4636 struct drm_connector *connector = &intel_connector->base;
4637
4638 if (intel_connector->detect_edid == NULL ||
4639 connector->edid_corrupt ||
4640 intel_dp->aux.i2c_defer_count > 6) {
4641 /* Check EDID read for NACKs, DEFERs and corruption
4642 * (DP CTS 1.2 Core r1.1)
4643 * 4.2.2.4 : Failed EDID read, I2C_NAK
4644 * 4.2.2.5 : Failed EDID read, I2C_DEFER
4645 * 4.2.2.6 : EDID corruption detected
4646 * Use failsafe mode for all cases
4647 */
4648 if (intel_dp->aux.i2c_nack_count > 0 ||
4649 intel_dp->aux.i2c_defer_count > 0)
4650 drm_dbg_kms(&i915->drm,
4651 "EDID read had %d NACKs, %d DEFERs\n",
4652 intel_dp->aux.i2c_nack_count,
4653 intel_dp->aux.i2c_defer_count);
4654 intel_dp->compliance.test_data.edid = INTEL_DP_RESOLUTION_FAILSAFE;
4655 } else {
4656 /* FIXME: Get rid of drm_edid_raw() */
4657 const struct edid *block = drm_edid_raw(intel_connector->detect_edid);
4658
4659 /* We have to write the checksum of the last block read */
4660 block += block->extensions;
4661
4662 if (drm_dp_dpcd_writeb(&intel_dp->aux, DP_TEST_EDID_CHECKSUM,
4663 block->checksum) <= 0)
4664 drm_dbg_kms(&i915->drm,
4665 "Failed to write EDID checksum\n");
4666
4667 test_result = DP_TEST_ACK | DP_TEST_EDID_CHECKSUM_WRITE;
4668 intel_dp->compliance.test_data.edid = INTEL_DP_RESOLUTION_PREFERRED;
4669 }
4670
4671 /* Set test active flag here so userspace doesn't interrupt things */
4672 intel_dp->compliance.test_active = true;
4673
4674 return test_result;
4675}
4676
4677static void intel_dp_phy_pattern_update(struct intel_dp *intel_dp,
4678 const struct intel_crtc_state *crtc_state)
4679{
4680 struct drm_i915_private *dev_priv =
4681 to_i915(dp_to_dig_port(intel_dp)->base.base.dev);
4682 struct drm_dp_phy_test_params *data =
4683 &intel_dp->compliance.test_data.phytest;
4684 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
4685 enum pipe pipe = crtc->pipe;
4686 u32 pattern_val;
4687
4688 switch (data->phy_pattern) {
4689 case DP_PHY_TEST_PATTERN_NONE:
4690 drm_dbg_kms(&dev_priv->drm, "Disable Phy Test Pattern\n");
4691 intel_de_write(dev_priv, DDI_DP_COMP_CTL(pipe), 0x0);
4692 break;
4693 case DP_PHY_TEST_PATTERN_D10_2:
4694 drm_dbg_kms(&dev_priv->drm, "Set D10.2 Phy Test Pattern\n");
4695 intel_de_write(dev_priv, DDI_DP_COMP_CTL(pipe),
4696 DDI_DP_COMP_CTL_ENABLE | DDI_DP_COMP_CTL_D10_2);
4697 break;
4698 case DP_PHY_TEST_PATTERN_ERROR_COUNT:
4699 drm_dbg_kms(&dev_priv->drm, "Set Error Count Phy Test Pattern\n");
4700 intel_de_write(dev_priv, DDI_DP_COMP_CTL(pipe),
4701 DDI_DP_COMP_CTL_ENABLE |
4702 DDI_DP_COMP_CTL_SCRAMBLED_0);
4703 break;
4704 case DP_PHY_TEST_PATTERN_PRBS7:
4705 drm_dbg_kms(&dev_priv->drm, "Set PRBS7 Phy Test Pattern\n");
4706 intel_de_write(dev_priv, DDI_DP_COMP_CTL(pipe),
4707 DDI_DP_COMP_CTL_ENABLE | DDI_DP_COMP_CTL_PRBS7);
4708 break;
4709 case DP_PHY_TEST_PATTERN_80BIT_CUSTOM:
4710 /*
4711 * FIXME: Ideally pattern should come from DPCD 0x250. As
4712 * current firmware of DPR-100 could not set it, so hardcoding
4713 * now for complaince test.
4714 */
4715 drm_dbg_kms(&dev_priv->drm,
4716 "Set 80Bit Custom Phy Test Pattern 0x3e0f83e0 0x0f83e0f8 0x0000f83e\n");
4717 pattern_val = 0x3e0f83e0;
4718 intel_de_write(dev_priv, DDI_DP_COMP_PAT(pipe, 0), pattern_val);
4719 pattern_val = 0x0f83e0f8;
4720 intel_de_write(dev_priv, DDI_DP_COMP_PAT(pipe, 1), pattern_val);
4721 pattern_val = 0x0000f83e;
4722 intel_de_write(dev_priv, DDI_DP_COMP_PAT(pipe, 2), pattern_val);
4723 intel_de_write(dev_priv, DDI_DP_COMP_CTL(pipe),
4724 DDI_DP_COMP_CTL_ENABLE |
4725 DDI_DP_COMP_CTL_CUSTOM80);
4726 break;
4727 case DP_PHY_TEST_PATTERN_CP2520:
4728 /*
4729 * FIXME: Ideally pattern should come from DPCD 0x24A. As
4730 * current firmware of DPR-100 could not set it, so hardcoding
4731 * now for complaince test.
4732 */
4733 drm_dbg_kms(&dev_priv->drm, "Set HBR2 compliance Phy Test Pattern\n");
4734 pattern_val = 0xFB;
4735 intel_de_write(dev_priv, DDI_DP_COMP_CTL(pipe),
4736 DDI_DP_COMP_CTL_ENABLE | DDI_DP_COMP_CTL_HBR2 |
4737 pattern_val);
4738 break;
4739 default:
4740 WARN(1, "Invalid Phy Test Pattern\n");
4741 }
4742}
4743
4744static void intel_dp_process_phy_request(struct intel_dp *intel_dp,
4745 const struct intel_crtc_state *crtc_state)
4746{
4747 struct drm_i915_private *i915 = dp_to_i915(intel_dp);
4748 struct drm_dp_phy_test_params *data =
4749 &intel_dp->compliance.test_data.phytest;
4750 u8 link_status[DP_LINK_STATUS_SIZE];
4751
4752 if (drm_dp_dpcd_read_phy_link_status(&intel_dp->aux, DP_PHY_DPRX,
4753 link_status) < 0) {
4754 drm_dbg_kms(&i915->drm, "failed to get link status\n");
4755 return;
4756 }
4757
4758 /* retrieve vswing & pre-emphasis setting */
4759 intel_dp_get_adjust_train(intel_dp, crtc_state, DP_PHY_DPRX,
4760 link_status);
4761
4762 intel_dp_set_signal_levels(intel_dp, crtc_state, DP_PHY_DPRX);
4763
4764 intel_dp_phy_pattern_update(intel_dp, crtc_state);
4765
4766 drm_dp_dpcd_write(&intel_dp->aux, DP_TRAINING_LANE0_SET,
4767 intel_dp->train_set, crtc_state->lane_count);
4768
4769 drm_dp_set_phy_test_pattern(&intel_dp->aux, data,
4770 intel_dp->dpcd[DP_DPCD_REV]);
4771}
4772
4773static u8 intel_dp_autotest_phy_pattern(struct intel_dp *intel_dp)
4774{
4775 struct drm_i915_private *i915 = dp_to_i915(intel_dp);
4776 struct drm_dp_phy_test_params *data =
4777 &intel_dp->compliance.test_data.phytest;
4778
4779 if (drm_dp_get_phy_test_pattern(&intel_dp->aux, data)) {
4780 drm_dbg_kms(&i915->drm, "DP Phy Test pattern AUX read failure\n");
4781 return DP_TEST_NAK;
4782 }
4783
4784 /* Set test active flag here so userspace doesn't interrupt things */
4785 intel_dp->compliance.test_active = true;
4786
4787 return DP_TEST_ACK;
4788}
4789
4790static void intel_dp_handle_test_request(struct intel_dp *intel_dp)
4791{
4792 struct drm_i915_private *i915 = dp_to_i915(intel_dp);
4793 u8 response = DP_TEST_NAK;
4794 u8 request = 0;
4795 int status;
4796
4797 status = drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_REQUEST, &request);
4798 if (status <= 0) {
4799 drm_dbg_kms(&i915->drm,
4800 "Could not read test request from sink\n");
4801 goto update_status;
4802 }
4803
4804 switch (request) {
4805 case DP_TEST_LINK_TRAINING:
4806 drm_dbg_kms(&i915->drm, "LINK_TRAINING test requested\n");
4807 response = intel_dp_autotest_link_training(intel_dp);
4808 break;
4809 case DP_TEST_LINK_VIDEO_PATTERN:
4810 drm_dbg_kms(&i915->drm, "TEST_PATTERN test requested\n");
4811 response = intel_dp_autotest_video_pattern(intel_dp);
4812 break;
4813 case DP_TEST_LINK_EDID_READ:
4814 drm_dbg_kms(&i915->drm, "EDID test requested\n");
4815 response = intel_dp_autotest_edid(intel_dp);
4816 break;
4817 case DP_TEST_LINK_PHY_TEST_PATTERN:
4818 drm_dbg_kms(&i915->drm, "PHY_PATTERN test requested\n");
4819 response = intel_dp_autotest_phy_pattern(intel_dp);
4820 break;
4821 default:
4822 drm_dbg_kms(&i915->drm, "Invalid test request '%02x'\n",
4823 request);
4824 break;
4825 }
4826
4827 if (response & DP_TEST_ACK)
4828 intel_dp->compliance.test_type = request;
4829
4830update_status:
4831 status = drm_dp_dpcd_writeb(&intel_dp->aux, DP_TEST_RESPONSE, response);
4832 if (status <= 0)
4833 drm_dbg_kms(&i915->drm,
4834 "Could not write test response to sink\n");
4835}
4836
4837static bool intel_dp_link_ok(struct intel_dp *intel_dp,
4838 u8 link_status[DP_LINK_STATUS_SIZE])
4839{
4840 struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base;
4841 struct drm_i915_private *i915 = to_i915(encoder->base.dev);
4842 bool uhbr = intel_dp->link_rate >= 1000000;
4843 bool ok;
4844
4845 if (uhbr)
4846 ok = drm_dp_128b132b_lane_channel_eq_done(link_status,
4847 intel_dp->lane_count);
4848 else
4849 ok = drm_dp_channel_eq_ok(link_status, intel_dp->lane_count);
4850
4851 if (ok)
4852 return true;
4853
4854 intel_dp_dump_link_status(intel_dp, DP_PHY_DPRX, link_status);
4855 drm_dbg_kms(&i915->drm,
4856 "[ENCODER:%d:%s] %s link not ok, retraining\n",
4857 encoder->base.base.id, encoder->base.name,
4858 uhbr ? "128b/132b" : "8b/10b");
4859
4860 return false;
4861}
4862
4863static void
4864intel_dp_mst_hpd_irq(struct intel_dp *intel_dp, u8 *esi, u8 *ack)
4865{
4866 bool handled = false;
4867
4868 drm_dp_mst_hpd_irq_handle_event(&intel_dp->mst_mgr, esi, ack, &handled);
4869
4870 if (esi[1] & DP_CP_IRQ) {
4871 intel_hdcp_handle_cp_irq(intel_dp->attached_connector);
4872 ack[1] |= DP_CP_IRQ;
4873 }
4874}
4875
4876static bool intel_dp_mst_link_status(struct intel_dp *intel_dp)
4877{
4878 struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base;
4879 struct drm_i915_private *i915 = to_i915(encoder->base.dev);
4880 u8 link_status[DP_LINK_STATUS_SIZE] = {};
4881 const size_t esi_link_status_size = DP_LINK_STATUS_SIZE - 2;
4882
4883 if (drm_dp_dpcd_read(&intel_dp->aux, DP_LANE0_1_STATUS_ESI, link_status,
4884 esi_link_status_size) != esi_link_status_size) {
4885 drm_err(&i915->drm,
4886 "[ENCODER:%d:%s] Failed to read link status\n",
4887 encoder->base.base.id, encoder->base.name);
4888 return false;
4889 }
4890
4891 return intel_dp_link_ok(intel_dp, link_status);
4892}
4893
4894/**
4895 * intel_dp_check_mst_status - service any pending MST interrupts, check link status
4896 * @intel_dp: Intel DP struct
4897 *
4898 * Read any pending MST interrupts, call MST core to handle these and ack the
4899 * interrupts. Check if the main and AUX link state is ok.
4900 *
4901 * Returns:
4902 * - %true if pending interrupts were serviced (or no interrupts were
4903 * pending) w/o detecting an error condition.
4904 * - %false if an error condition - like AUX failure or a loss of link - is
4905 * detected, which needs servicing from the hotplug work.
4906 */
4907static bool
4908intel_dp_check_mst_status(struct intel_dp *intel_dp)
4909{
4910 struct drm_i915_private *i915 = dp_to_i915(intel_dp);
4911 bool link_ok = true;
4912
4913 drm_WARN_ON_ONCE(&i915->drm, intel_dp->active_mst_links < 0);
4914
4915 for (;;) {
4916 u8 esi[4] = {};
4917 u8 ack[4] = {};
4918
4919 if (!intel_dp_get_sink_irq_esi(intel_dp, esi)) {
4920 drm_dbg_kms(&i915->drm,
4921 "failed to get ESI - device may have failed\n");
4922 link_ok = false;
4923
4924 break;
4925 }
4926
4927 drm_dbg_kms(&i915->drm, "DPRX ESI: %4ph\n", esi);
4928
4929 if (intel_dp->active_mst_links > 0 && link_ok &&
4930 esi[3] & LINK_STATUS_CHANGED) {
4931 if (!intel_dp_mst_link_status(intel_dp))
4932 link_ok = false;
4933 ack[3] |= LINK_STATUS_CHANGED;
4934 }
4935
4936 intel_dp_mst_hpd_irq(intel_dp, esi, ack);
4937
4938 if (!memchr_inv(ack, 0, sizeof(ack)))
4939 break;
4940
4941 if (!intel_dp_ack_sink_irq_esi(intel_dp, ack))
4942 drm_dbg_kms(&i915->drm, "Failed to ack ESI\n");
4943
4944 if (ack[1] & (DP_DOWN_REP_MSG_RDY | DP_UP_REQ_MSG_RDY))
4945 drm_dp_mst_hpd_irq_send_new_request(&intel_dp->mst_mgr);
4946 }
4947
4948 return link_ok;
4949}
4950
4951static void
4952intel_dp_handle_hdmi_link_status_change(struct intel_dp *intel_dp)
4953{
4954 bool is_active;
4955 u8 buf = 0;
4956
4957 is_active = drm_dp_pcon_hdmi_link_active(&intel_dp->aux);
4958 if (intel_dp->frl.is_trained && !is_active) {
4959 if (drm_dp_dpcd_readb(&intel_dp->aux, DP_PCON_HDMI_LINK_CONFIG_1, &buf) < 0)
4960 return;
4961
4962 buf &= ~DP_PCON_ENABLE_HDMI_LINK;
4963 if (drm_dp_dpcd_writeb(&intel_dp->aux, DP_PCON_HDMI_LINK_CONFIG_1, buf) < 0)
4964 return;
4965
4966 drm_dp_pcon_hdmi_frl_link_error_count(&intel_dp->aux, &intel_dp->attached_connector->base);
4967
4968 intel_dp->frl.is_trained = false;
4969
4970 /* Restart FRL training or fall back to TMDS mode */
4971 intel_dp_check_frl_training(intel_dp);
4972 }
4973}
4974
4975static bool
4976intel_dp_needs_link_retrain(struct intel_dp *intel_dp)
4977{
4978 u8 link_status[DP_LINK_STATUS_SIZE];
4979
4980 if (!intel_dp->link_trained)
4981 return false;
4982
4983 /*
4984 * While PSR source HW is enabled, it will control main-link sending
4985 * frames, enabling and disabling it so trying to do a retrain will fail
4986 * as the link would or not be on or it could mix training patterns
4987 * and frame data at the same time causing retrain to fail.
4988 * Also when exiting PSR, HW will retrain the link anyways fixing
4989 * any link status error.
4990 */
4991 if (intel_psr_enabled(intel_dp))
4992 return false;
4993
4994 if (drm_dp_dpcd_read_phy_link_status(&intel_dp->aux, DP_PHY_DPRX,
4995 link_status) < 0)
4996 return false;
4997
4998 /*
4999 * Validate the cached values of intel_dp->link_rate and
5000 * intel_dp->lane_count before attempting to retrain.
5001 *
5002 * FIXME would be nice to user the crtc state here, but since
5003 * we need to call this from the short HPD handler that seems
5004 * a bit hard.
5005 */
5006 if (!intel_dp_link_params_valid(intel_dp, intel_dp->link_rate,
5007 intel_dp->lane_count))
5008 return false;
5009
5010 /* Retrain if link not ok */
5011 return !intel_dp_link_ok(intel_dp, link_status);
5012}
5013
5014static bool intel_dp_has_connector(struct intel_dp *intel_dp,
5015 const struct drm_connector_state *conn_state)
5016{
5017 struct drm_i915_private *i915 = dp_to_i915(intel_dp);
5018 struct intel_encoder *encoder;
5019 enum pipe pipe;
5020
5021 if (!conn_state->best_encoder)
5022 return false;
5023
5024 /* SST */
5025 encoder = &dp_to_dig_port(intel_dp)->base;
5026 if (conn_state->best_encoder == &encoder->base)
5027 return true;
5028
5029 /* MST */
5030 for_each_pipe(i915, pipe) {
5031 encoder = &intel_dp->mst_encoders[pipe]->base;
5032 if (conn_state->best_encoder == &encoder->base)
5033 return true;
5034 }
5035
5036 return false;
5037}
5038
5039int intel_dp_get_active_pipes(struct intel_dp *intel_dp,
5040 struct drm_modeset_acquire_ctx *ctx,
5041 u8 *pipe_mask)
5042{
5043 struct drm_i915_private *i915 = dp_to_i915(intel_dp);
5044 struct drm_connector_list_iter conn_iter;
5045 struct intel_connector *connector;
5046 int ret = 0;
5047
5048 *pipe_mask = 0;
5049
5050 drm_connector_list_iter_begin(&i915->drm, &conn_iter);
5051 for_each_intel_connector_iter(connector, &conn_iter) {
5052 struct drm_connector_state *conn_state =
5053 connector->base.state;
5054 struct intel_crtc_state *crtc_state;
5055 struct intel_crtc *crtc;
5056
5057 if (!intel_dp_has_connector(intel_dp, conn_state))
5058 continue;
5059
5060 crtc = to_intel_crtc(conn_state->crtc);
5061 if (!crtc)
5062 continue;
5063
5064 ret = drm_modeset_lock(&crtc->base.mutex, ctx);
5065 if (ret)
5066 break;
5067
5068 crtc_state = to_intel_crtc_state(crtc->base.state);
5069
5070 drm_WARN_ON(&i915->drm, !intel_crtc_has_dp_encoder(crtc_state));
5071
5072 if (!crtc_state->hw.active)
5073 continue;
5074
5075 if (conn_state->commit &&
5076 !try_wait_for_completion(&conn_state->commit->hw_done))
5077 continue;
5078
5079 *pipe_mask |= BIT(crtc->pipe);
5080 }
5081 drm_connector_list_iter_end(&conn_iter);
5082
5083 return ret;
5084}
5085
5086static bool intel_dp_is_connected(struct intel_dp *intel_dp)
5087{
5088 struct intel_connector *connector = intel_dp->attached_connector;
5089
5090 return connector->base.status == connector_status_connected ||
5091 intel_dp->is_mst;
5092}
5093
5094int intel_dp_retrain_link(struct intel_encoder *encoder,
5095 struct drm_modeset_acquire_ctx *ctx)
5096{
5097 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
5098 struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
5099 struct intel_crtc *crtc;
5100 u8 pipe_mask;
5101 int ret;
5102
5103 if (!intel_dp_is_connected(intel_dp))
5104 return 0;
5105
5106 ret = drm_modeset_lock(&dev_priv->drm.mode_config.connection_mutex,
5107 ctx);
5108 if (ret)
5109 return ret;
5110
5111 if (!intel_dp_needs_link_retrain(intel_dp))
5112 return 0;
5113
5114 ret = intel_dp_get_active_pipes(intel_dp, ctx, &pipe_mask);
5115 if (ret)
5116 return ret;
5117
5118 if (pipe_mask == 0)
5119 return 0;
5120
5121 if (!intel_dp_needs_link_retrain(intel_dp))
5122 return 0;
5123
5124 drm_dbg_kms(&dev_priv->drm, "[ENCODER:%d:%s] retraining link\n",
5125 encoder->base.base.id, encoder->base.name);
5126
5127 for_each_intel_crtc_in_pipe_mask(&dev_priv->drm, crtc, pipe_mask) {
5128 const struct intel_crtc_state *crtc_state =
5129 to_intel_crtc_state(crtc->base.state);
5130
5131 /* Suppress underruns caused by re-training */
5132 intel_set_cpu_fifo_underrun_reporting(dev_priv, crtc->pipe, false);
5133 if (crtc_state->has_pch_encoder)
5134 intel_set_pch_fifo_underrun_reporting(dev_priv,
5135 intel_crtc_pch_transcoder(crtc), false);
5136 }
5137
5138 for_each_intel_crtc_in_pipe_mask(&dev_priv->drm, crtc, pipe_mask) {
5139 const struct intel_crtc_state *crtc_state =
5140 to_intel_crtc_state(crtc->base.state);
5141
5142 /* retrain on the MST master transcoder */
5143 if (DISPLAY_VER(dev_priv) >= 12 &&
5144 intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DP_MST) &&
5145 !intel_dp_mst_is_master_trans(crtc_state))
5146 continue;
5147
5148 intel_dp_check_frl_training(intel_dp);
5149 intel_dp_pcon_dsc_configure(intel_dp, crtc_state);
5150 intel_dp_start_link_train(intel_dp, crtc_state);
5151 intel_dp_stop_link_train(intel_dp, crtc_state);
5152 break;
5153 }
5154
5155 for_each_intel_crtc_in_pipe_mask(&dev_priv->drm, crtc, pipe_mask) {
5156 const struct intel_crtc_state *crtc_state =
5157 to_intel_crtc_state(crtc->base.state);
5158
5159 /* Keep underrun reporting disabled until things are stable */
5160 intel_crtc_wait_for_next_vblank(crtc);
5161
5162 intel_set_cpu_fifo_underrun_reporting(dev_priv, crtc->pipe, true);
5163 if (crtc_state->has_pch_encoder)
5164 intel_set_pch_fifo_underrun_reporting(dev_priv,
5165 intel_crtc_pch_transcoder(crtc), true);
5166 }
5167
5168 return 0;
5169}
5170
5171static int intel_dp_prep_phy_test(struct intel_dp *intel_dp,
5172 struct drm_modeset_acquire_ctx *ctx,
5173 u8 *pipe_mask)
5174{
5175 struct drm_i915_private *i915 = dp_to_i915(intel_dp);
5176 struct drm_connector_list_iter conn_iter;
5177 struct intel_connector *connector;
5178 int ret = 0;
5179
5180 *pipe_mask = 0;
5181
5182 drm_connector_list_iter_begin(&i915->drm, &conn_iter);
5183 for_each_intel_connector_iter(connector, &conn_iter) {
5184 struct drm_connector_state *conn_state =
5185 connector->base.state;
5186 struct intel_crtc_state *crtc_state;
5187 struct intel_crtc *crtc;
5188
5189 if (!intel_dp_has_connector(intel_dp, conn_state))
5190 continue;
5191
5192 crtc = to_intel_crtc(conn_state->crtc);
5193 if (!crtc)
5194 continue;
5195
5196 ret = drm_modeset_lock(&crtc->base.mutex, ctx);
5197 if (ret)
5198 break;
5199
5200 crtc_state = to_intel_crtc_state(crtc->base.state);
5201
5202 drm_WARN_ON(&i915->drm, !intel_crtc_has_dp_encoder(crtc_state));
5203
5204 if (!crtc_state->hw.active)
5205 continue;
5206
5207 if (conn_state->commit &&
5208 !try_wait_for_completion(&conn_state->commit->hw_done))
5209 continue;
5210
5211 *pipe_mask |= BIT(crtc->pipe);
5212 }
5213 drm_connector_list_iter_end(&conn_iter);
5214
5215 return ret;
5216}
5217
5218static int intel_dp_do_phy_test(struct intel_encoder *encoder,
5219 struct drm_modeset_acquire_ctx *ctx)
5220{
5221 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
5222 struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
5223 struct intel_crtc *crtc;
5224 u8 pipe_mask;
5225 int ret;
5226
5227 ret = drm_modeset_lock(&dev_priv->drm.mode_config.connection_mutex,
5228 ctx);
5229 if (ret)
5230 return ret;
5231
5232 ret = intel_dp_prep_phy_test(intel_dp, ctx, &pipe_mask);
5233 if (ret)
5234 return ret;
5235
5236 if (pipe_mask == 0)
5237 return 0;
5238
5239 drm_dbg_kms(&dev_priv->drm, "[ENCODER:%d:%s] PHY test\n",
5240 encoder->base.base.id, encoder->base.name);
5241
5242 for_each_intel_crtc_in_pipe_mask(&dev_priv->drm, crtc, pipe_mask) {
5243 const struct intel_crtc_state *crtc_state =
5244 to_intel_crtc_state(crtc->base.state);
5245
5246 /* test on the MST master transcoder */
5247 if (DISPLAY_VER(dev_priv) >= 12 &&
5248 intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DP_MST) &&
5249 !intel_dp_mst_is_master_trans(crtc_state))
5250 continue;
5251
5252 intel_dp_process_phy_request(intel_dp, crtc_state);
5253 break;
5254 }
5255
5256 return 0;
5257}
5258
5259void intel_dp_phy_test(struct intel_encoder *encoder)
5260{
5261 struct drm_modeset_acquire_ctx ctx;
5262 int ret;
5263
5264 drm_modeset_acquire_init(&ctx, 0);
5265
5266 for (;;) {
5267 ret = intel_dp_do_phy_test(encoder, &ctx);
5268
5269 if (ret == -EDEADLK) {
5270 drm_modeset_backoff(&ctx);
5271 continue;
5272 }
5273
5274 break;
5275 }
5276
5277 drm_modeset_drop_locks(&ctx);
5278 drm_modeset_acquire_fini(&ctx);
5279 drm_WARN(encoder->base.dev, ret,
5280 "Acquiring modeset locks failed with %i\n", ret);
5281}
5282
5283static void intel_dp_check_device_service_irq(struct intel_dp *intel_dp)
5284{
5285 struct drm_i915_private *i915 = dp_to_i915(intel_dp);
5286 u8 val;
5287
5288 if (intel_dp->dpcd[DP_DPCD_REV] < 0x11)
5289 return;
5290
5291 if (drm_dp_dpcd_readb(&intel_dp->aux,
5292 DP_DEVICE_SERVICE_IRQ_VECTOR, &val) != 1 || !val)
5293 return;
5294
5295 drm_dp_dpcd_writeb(&intel_dp->aux, DP_DEVICE_SERVICE_IRQ_VECTOR, val);
5296
5297 if (val & DP_AUTOMATED_TEST_REQUEST)
5298 intel_dp_handle_test_request(intel_dp);
5299
5300 if (val & DP_CP_IRQ)
5301 intel_hdcp_handle_cp_irq(intel_dp->attached_connector);
5302
5303 if (val & DP_SINK_SPECIFIC_IRQ)
5304 drm_dbg_kms(&i915->drm, "Sink specific irq unhandled\n");
5305}
5306
5307static void intel_dp_check_link_service_irq(struct intel_dp *intel_dp)
5308{
5309 u8 val;
5310
5311 if (intel_dp->dpcd[DP_DPCD_REV] < 0x11)
5312 return;
5313
5314 if (drm_dp_dpcd_readb(&intel_dp->aux,
5315 DP_LINK_SERVICE_IRQ_VECTOR_ESI0, &val) != 1 || !val)
5316 return;
5317
5318 if (drm_dp_dpcd_writeb(&intel_dp->aux,
5319 DP_LINK_SERVICE_IRQ_VECTOR_ESI0, val) != 1)
5320 return;
5321
5322 if (val & HDMI_LINK_STATUS_CHANGED)
5323 intel_dp_handle_hdmi_link_status_change(intel_dp);
5324}
5325
5326/*
5327 * According to DP spec
5328 * 5.1.2:
5329 * 1. Read DPCD
5330 * 2. Configure link according to Receiver Capabilities
5331 * 3. Use Link Training from 2.5.3.3 and 3.5.1.3
5332 * 4. Check link status on receipt of hot-plug interrupt
5333 *
5334 * intel_dp_short_pulse - handles short pulse interrupts
5335 * when full detection is not required.
5336 * Returns %true if short pulse is handled and full detection
5337 * is NOT required and %false otherwise.
5338 */
5339static bool
5340intel_dp_short_pulse(struct intel_dp *intel_dp)
5341{
5342 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
5343 u8 old_sink_count = intel_dp->sink_count;
5344 bool ret;
5345
5346 /*
5347 * Clearing compliance test variables to allow capturing
5348 * of values for next automated test request.
5349 */
5350 memset(&intel_dp->compliance, 0, sizeof(intel_dp->compliance));
5351
5352 /*
5353 * Now read the DPCD to see if it's actually running
5354 * If the current value of sink count doesn't match with
5355 * the value that was stored earlier or dpcd read failed
5356 * we need to do full detection
5357 */
5358 ret = intel_dp_get_dpcd(intel_dp);
5359
5360 if ((old_sink_count != intel_dp->sink_count) || !ret) {
5361 /* No need to proceed if we are going to do full detect */
5362 return false;
5363 }
5364
5365 intel_dp_check_device_service_irq(intel_dp);
5366 intel_dp_check_link_service_irq(intel_dp);
5367
5368 /* Handle CEC interrupts, if any */
5369 drm_dp_cec_irq(&intel_dp->aux);
5370
5371 /* defer to the hotplug work for link retraining if needed */
5372 if (intel_dp_needs_link_retrain(intel_dp))
5373 return false;
5374
5375 intel_psr_short_pulse(intel_dp);
5376
5377 switch (intel_dp->compliance.test_type) {
5378 case DP_TEST_LINK_TRAINING:
5379 drm_dbg_kms(&dev_priv->drm,
5380 "Link Training Compliance Test requested\n");
5381 /* Send a Hotplug Uevent to userspace to start modeset */
5382 drm_kms_helper_hotplug_event(&dev_priv->drm);
5383 break;
5384 case DP_TEST_LINK_PHY_TEST_PATTERN:
5385 drm_dbg_kms(&dev_priv->drm,
5386 "PHY test pattern Compliance Test requested\n");
5387 /*
5388 * Schedule long hpd to do the test
5389 *
5390 * FIXME get rid of the ad-hoc phy test modeset code
5391 * and properly incorporate it into the normal modeset.
5392 */
5393 return false;
5394 }
5395
5396 return true;
5397}
5398
5399/* XXX this is probably wrong for multiple downstream ports */
5400static enum drm_connector_status
5401intel_dp_detect_dpcd(struct intel_dp *intel_dp)
5402{
5403 struct drm_i915_private *i915 = dp_to_i915(intel_dp);
5404 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
5405 u8 *dpcd = intel_dp->dpcd;
5406 u8 type;
5407
5408 if (drm_WARN_ON(&i915->drm, intel_dp_is_edp(intel_dp)))
5409 return connector_status_connected;
5410
5411 lspcon_resume(dig_port);
5412
5413 if (!intel_dp_get_dpcd(intel_dp))
5414 return connector_status_disconnected;
5415
5416 /* if there's no downstream port, we're done */
5417 if (!drm_dp_is_branch(dpcd))
5418 return connector_status_connected;
5419
5420 /* If we're HPD-aware, SINK_COUNT changes dynamically */
5421 if (intel_dp_has_sink_count(intel_dp) &&
5422 intel_dp->downstream_ports[0] & DP_DS_PORT_HPD) {
5423 return intel_dp->sink_count ?
5424 connector_status_connected : connector_status_disconnected;
5425 }
5426
5427 if (intel_dp_can_mst(intel_dp))
5428 return connector_status_connected;
5429
5430 /* If no HPD, poke DDC gently */
5431 if (drm_probe_ddc(&intel_dp->aux.ddc))
5432 return connector_status_connected;
5433
5434 /* Well we tried, say unknown for unreliable port types */
5435 if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11) {
5436 type = intel_dp->downstream_ports[0] & DP_DS_PORT_TYPE_MASK;
5437 if (type == DP_DS_PORT_TYPE_VGA ||
5438 type == DP_DS_PORT_TYPE_NON_EDID)
5439 return connector_status_unknown;
5440 } else {
5441 type = intel_dp->dpcd[DP_DOWNSTREAMPORT_PRESENT] &
5442 DP_DWN_STRM_PORT_TYPE_MASK;
5443 if (type == DP_DWN_STRM_PORT_TYPE_ANALOG ||
5444 type == DP_DWN_STRM_PORT_TYPE_OTHER)
5445 return connector_status_unknown;
5446 }
5447
5448 /* Anything else is out of spec, warn and ignore */
5449 drm_dbg_kms(&i915->drm, "Broken DP branch device, ignoring\n");
5450 return connector_status_disconnected;
5451}
5452
5453static enum drm_connector_status
5454edp_detect(struct intel_dp *intel_dp)
5455{
5456 return connector_status_connected;
5457}
5458
5459/*
5460 * intel_digital_port_connected - is the specified port connected?
5461 * @encoder: intel_encoder
5462 *
5463 * In cases where there's a connector physically connected but it can't be used
5464 * by our hardware we also return false, since the rest of the driver should
5465 * pretty much treat the port as disconnected. This is relevant for type-C
5466 * (starting on ICL) where there's ownership involved.
5467 *
5468 * Return %true if port is connected, %false otherwise.
5469 */
5470bool intel_digital_port_connected(struct intel_encoder *encoder)
5471{
5472 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
5473 struct intel_digital_port *dig_port = enc_to_dig_port(encoder);
5474 bool is_connected = false;
5475 intel_wakeref_t wakeref;
5476
5477 with_intel_display_power(dev_priv, POWER_DOMAIN_DISPLAY_CORE, wakeref)
5478 is_connected = dig_port->connected(encoder);
5479
5480 return is_connected;
5481}
5482
5483static const struct drm_edid *
5484intel_dp_get_edid(struct intel_dp *intel_dp)
5485{
5486 struct intel_connector *connector = intel_dp->attached_connector;
5487 const struct drm_edid *fixed_edid = connector->panel.fixed_edid;
5488
5489 /* Use panel fixed edid if we have one */
5490 if (fixed_edid) {
5491 /* invalid edid */
5492 if (IS_ERR(fixed_edid))
5493 return NULL;
5494
5495 return drm_edid_dup(fixed_edid);
5496 }
5497
5498 return drm_edid_read_ddc(&connector->base, &intel_dp->aux.ddc);
5499}
5500
5501static void
5502intel_dp_update_dfp(struct intel_dp *intel_dp,
5503 const struct drm_edid *drm_edid)
5504{
5505 struct drm_i915_private *i915 = dp_to_i915(intel_dp);
5506 struct intel_connector *connector = intel_dp->attached_connector;
5507
5508 intel_dp->dfp.max_bpc =
5509 drm_dp_downstream_max_bpc(intel_dp->dpcd,
5510 intel_dp->downstream_ports, drm_edid);
5511
5512 intel_dp->dfp.max_dotclock =
5513 drm_dp_downstream_max_dotclock(intel_dp->dpcd,
5514 intel_dp->downstream_ports);
5515
5516 intel_dp->dfp.min_tmds_clock =
5517 drm_dp_downstream_min_tmds_clock(intel_dp->dpcd,
5518 intel_dp->downstream_ports,
5519 drm_edid);
5520 intel_dp->dfp.max_tmds_clock =
5521 drm_dp_downstream_max_tmds_clock(intel_dp->dpcd,
5522 intel_dp->downstream_ports,
5523 drm_edid);
5524
5525 intel_dp->dfp.pcon_max_frl_bw =
5526 drm_dp_get_pcon_max_frl_bw(intel_dp->dpcd,
5527 intel_dp->downstream_ports);
5528
5529 drm_dbg_kms(&i915->drm,
5530 "[CONNECTOR:%d:%s] DFP max bpc %d, max dotclock %d, TMDS clock %d-%d, PCON Max FRL BW %dGbps\n",
5531 connector->base.base.id, connector->base.name,
5532 intel_dp->dfp.max_bpc,
5533 intel_dp->dfp.max_dotclock,
5534 intel_dp->dfp.min_tmds_clock,
5535 intel_dp->dfp.max_tmds_clock,
5536 intel_dp->dfp.pcon_max_frl_bw);
5537
5538 intel_dp_get_pcon_dsc_cap(intel_dp);
5539}
5540
5541static bool
5542intel_dp_can_ycbcr420(struct intel_dp *intel_dp)
5543{
5544 if (source_can_output(intel_dp, INTEL_OUTPUT_FORMAT_YCBCR420) &&
5545 (!drm_dp_is_branch(intel_dp->dpcd) || intel_dp->dfp.ycbcr420_passthrough))
5546 return true;
5547
5548 if (source_can_output(intel_dp, INTEL_OUTPUT_FORMAT_RGB) &&
5549 dfp_can_convert_from_rgb(intel_dp, INTEL_OUTPUT_FORMAT_YCBCR420))
5550 return true;
5551
5552 if (source_can_output(intel_dp, INTEL_OUTPUT_FORMAT_YCBCR444) &&
5553 dfp_can_convert_from_ycbcr444(intel_dp, INTEL_OUTPUT_FORMAT_YCBCR420))
5554 return true;
5555
5556 return false;
5557}
5558
5559static void
5560intel_dp_update_420(struct intel_dp *intel_dp)
5561{
5562 struct drm_i915_private *i915 = dp_to_i915(intel_dp);
5563 struct intel_connector *connector = intel_dp->attached_connector;
5564
5565 intel_dp->dfp.ycbcr420_passthrough =
5566 drm_dp_downstream_420_passthrough(intel_dp->dpcd,
5567 intel_dp->downstream_ports);
5568 /* on-board LSPCON always assumed to support 4:4:4->4:2:0 conversion */
5569 intel_dp->dfp.ycbcr_444_to_420 =
5570 dp_to_dig_port(intel_dp)->lspcon.active ||
5571 drm_dp_downstream_444_to_420_conversion(intel_dp->dpcd,
5572 intel_dp->downstream_ports);
5573 intel_dp->dfp.rgb_to_ycbcr =
5574 drm_dp_downstream_rgb_to_ycbcr_conversion(intel_dp->dpcd,
5575 intel_dp->downstream_ports,
5576 DP_DS_HDMI_BT709_RGB_YCBCR_CONV);
5577
5578 connector->base.ycbcr_420_allowed = intel_dp_can_ycbcr420(intel_dp);
5579
5580 drm_dbg_kms(&i915->drm,
5581 "[CONNECTOR:%d:%s] RGB->YcbCr conversion? %s, YCbCr 4:2:0 allowed? %s, YCbCr 4:4:4->4:2:0 conversion? %s\n",
5582 connector->base.base.id, connector->base.name,
5583 str_yes_no(intel_dp->dfp.rgb_to_ycbcr),
5584 str_yes_no(connector->base.ycbcr_420_allowed),
5585 str_yes_no(intel_dp->dfp.ycbcr_444_to_420));
5586}
5587
5588static void
5589intel_dp_set_edid(struct intel_dp *intel_dp)
5590{
5591 struct drm_i915_private *i915 = dp_to_i915(intel_dp);
5592 struct intel_connector *connector = intel_dp->attached_connector;
5593 const struct drm_edid *drm_edid;
5594 bool vrr_capable;
5595
5596 intel_dp_unset_edid(intel_dp);
5597 drm_edid = intel_dp_get_edid(intel_dp);
5598 connector->detect_edid = drm_edid;
5599
5600 /* Below we depend on display info having been updated */
5601 drm_edid_connector_update(&connector->base, drm_edid);
5602
5603 vrr_capable = intel_vrr_is_capable(connector);
5604 drm_dbg_kms(&i915->drm, "[CONNECTOR:%d:%s] VRR capable: %s\n",
5605 connector->base.base.id, connector->base.name, str_yes_no(vrr_capable));
5606 drm_connector_set_vrr_capable_property(&connector->base, vrr_capable);
5607
5608 intel_dp_update_dfp(intel_dp, drm_edid);
5609 intel_dp_update_420(intel_dp);
5610
5611 drm_dp_cec_attach(&intel_dp->aux,
5612 connector->base.display_info.source_physical_address);
5613}
5614
5615static void
5616intel_dp_unset_edid(struct intel_dp *intel_dp)
5617{
5618 struct intel_connector *connector = intel_dp->attached_connector;
5619
5620 drm_dp_cec_unset_edid(&intel_dp->aux);
5621 drm_edid_free(connector->detect_edid);
5622 connector->detect_edid = NULL;
5623
5624 intel_dp->dfp.max_bpc = 0;
5625 intel_dp->dfp.max_dotclock = 0;
5626 intel_dp->dfp.min_tmds_clock = 0;
5627 intel_dp->dfp.max_tmds_clock = 0;
5628
5629 intel_dp->dfp.pcon_max_frl_bw = 0;
5630
5631 intel_dp->dfp.ycbcr_444_to_420 = false;
5632 connector->base.ycbcr_420_allowed = false;
5633
5634 drm_connector_set_vrr_capable_property(&connector->base,
5635 false);
5636}
5637
5638static void
5639intel_dp_detect_dsc_caps(struct intel_dp *intel_dp, struct intel_connector *connector)
5640{
5641 struct drm_i915_private *i915 = dp_to_i915(intel_dp);
5642
5643 /* Read DP Sink DSC Cap DPCD regs for DP v1.4 */
5644 if (!HAS_DSC(i915))
5645 return;
5646
5647 if (intel_dp_is_edp(intel_dp))
5648 intel_edp_get_dsc_sink_cap(intel_dp->edp_dpcd[0],
5649 connector);
5650 else
5651 intel_dp_get_dsc_sink_cap(intel_dp->dpcd[DP_DPCD_REV],
5652 connector);
5653}
5654
5655static int
5656intel_dp_detect(struct drm_connector *connector,
5657 struct drm_modeset_acquire_ctx *ctx,
5658 bool force)
5659{
5660 struct drm_i915_private *dev_priv = to_i915(connector->dev);
5661 struct intel_connector *intel_connector =
5662 to_intel_connector(connector);
5663 struct intel_dp *intel_dp = intel_attached_dp(intel_connector);
5664 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
5665 struct intel_encoder *encoder = &dig_port->base;
5666 enum drm_connector_status status;
5667
5668 drm_dbg_kms(&dev_priv->drm, "[CONNECTOR:%d:%s]\n",
5669 connector->base.id, connector->name);
5670 drm_WARN_ON(&dev_priv->drm,
5671 !drm_modeset_is_locked(&dev_priv->drm.mode_config.connection_mutex));
5672
5673 if (!intel_display_device_enabled(dev_priv))
5674 return connector_status_disconnected;
5675
5676 /* Can't disconnect eDP */
5677 if (intel_dp_is_edp(intel_dp))
5678 status = edp_detect(intel_dp);
5679 else if (intel_digital_port_connected(encoder))
5680 status = intel_dp_detect_dpcd(intel_dp);
5681 else
5682 status = connector_status_disconnected;
5683
5684 if (status == connector_status_disconnected) {
5685 memset(&intel_dp->compliance, 0, sizeof(intel_dp->compliance));
5686 memset(intel_connector->dp.dsc_dpcd, 0, sizeof(intel_connector->dp.dsc_dpcd));
5687 intel_dp->psr.sink_panel_replay_support = false;
5688
5689 if (intel_dp->is_mst) {
5690 drm_dbg_kms(&dev_priv->drm,
5691 "MST device may have disappeared %d vs %d\n",
5692 intel_dp->is_mst,
5693 intel_dp->mst_mgr.mst_state);
5694 intel_dp->is_mst = false;
5695 drm_dp_mst_topology_mgr_set_mst(&intel_dp->mst_mgr,
5696 intel_dp->is_mst);
5697 }
5698
5699 goto out;
5700 }
5701
5702 if (!intel_dp_is_edp(intel_dp))
5703 intel_psr_init_dpcd(intel_dp);
5704
5705 intel_dp_detect_dsc_caps(intel_dp, intel_connector);
5706
5707 intel_dp_configure_mst(intel_dp);
5708
5709 /*
5710 * TODO: Reset link params when switching to MST mode, until MST
5711 * supports link training fallback params.
5712 */
5713 if (intel_dp->reset_link_params || intel_dp->is_mst) {
5714 intel_dp_reset_max_link_params(intel_dp);
5715 intel_dp->reset_link_params = false;
5716 }
5717
5718 intel_dp_print_rates(intel_dp);
5719
5720 if (intel_dp->is_mst) {
5721 /*
5722 * If we are in MST mode then this connector
5723 * won't appear connected or have anything
5724 * with EDID on it
5725 */
5726 status = connector_status_disconnected;
5727 goto out;
5728 }
5729
5730 /*
5731 * Some external monitors do not signal loss of link synchronization
5732 * with an IRQ_HPD, so force a link status check.
5733 */
5734 if (!intel_dp_is_edp(intel_dp)) {
5735 int ret;
5736
5737 ret = intel_dp_retrain_link(encoder, ctx);
5738 if (ret)
5739 return ret;
5740 }
5741
5742 /*
5743 * Clearing NACK and defer counts to get their exact values
5744 * while reading EDID which are required by Compliance tests
5745 * 4.2.2.4 and 4.2.2.5
5746 */
5747 intel_dp->aux.i2c_nack_count = 0;
5748 intel_dp->aux.i2c_defer_count = 0;
5749
5750 intel_dp_set_edid(intel_dp);
5751 if (intel_dp_is_edp(intel_dp) ||
5752 to_intel_connector(connector)->detect_edid)
5753 status = connector_status_connected;
5754
5755 intel_dp_check_device_service_irq(intel_dp);
5756
5757out:
5758 if (status != connector_status_connected && !intel_dp->is_mst)
5759 intel_dp_unset_edid(intel_dp);
5760
5761 if (!intel_dp_is_edp(intel_dp))
5762 drm_dp_set_subconnector_property(connector,
5763 status,
5764 intel_dp->dpcd,
5765 intel_dp->downstream_ports);
5766 return status;
5767}
5768
5769static void
5770intel_dp_force(struct drm_connector *connector)
5771{
5772 struct intel_dp *intel_dp = intel_attached_dp(to_intel_connector(connector));
5773 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
5774 struct intel_encoder *intel_encoder = &dig_port->base;
5775 struct drm_i915_private *dev_priv = to_i915(intel_encoder->base.dev);
5776
5777 drm_dbg_kms(&dev_priv->drm, "[CONNECTOR:%d:%s]\n",
5778 connector->base.id, connector->name);
5779 intel_dp_unset_edid(intel_dp);
5780
5781 if (connector->status != connector_status_connected)
5782 return;
5783
5784 intel_dp_set_edid(intel_dp);
5785}
5786
5787static int intel_dp_get_modes(struct drm_connector *connector)
5788{
5789 struct intel_connector *intel_connector = to_intel_connector(connector);
5790 int num_modes;
5791
5792 /* drm_edid_connector_update() done in ->detect() or ->force() */
5793 num_modes = drm_edid_connector_add_modes(connector);
5794
5795 /* Also add fixed mode, which may or may not be present in EDID */
5796 if (intel_dp_is_edp(intel_attached_dp(intel_connector)))
5797 num_modes += intel_panel_get_modes(intel_connector);
5798
5799 if (num_modes)
5800 return num_modes;
5801
5802 if (!intel_connector->detect_edid) {
5803 struct intel_dp *intel_dp = intel_attached_dp(intel_connector);
5804 struct drm_display_mode *mode;
5805
5806 mode = drm_dp_downstream_mode(connector->dev,
5807 intel_dp->dpcd,
5808 intel_dp->downstream_ports);
5809 if (mode) {
5810 drm_mode_probed_add(connector, mode);
5811 num_modes++;
5812 }
5813 }
5814
5815 return num_modes;
5816}
5817
5818static int
5819intel_dp_connector_register(struct drm_connector *connector)
5820{
5821 struct drm_i915_private *i915 = to_i915(connector->dev);
5822 struct intel_dp *intel_dp = intel_attached_dp(to_intel_connector(connector));
5823 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
5824 struct intel_lspcon *lspcon = &dig_port->lspcon;
5825 int ret;
5826
5827 ret = intel_connector_register(connector);
5828 if (ret)
5829 return ret;
5830
5831 drm_dbg_kms(&i915->drm, "registering %s bus for %s\n",
5832 intel_dp->aux.name, connector->kdev->kobj.name);
5833
5834 intel_dp->aux.dev = connector->kdev;
5835 ret = drm_dp_aux_register(&intel_dp->aux);
5836 if (!ret)
5837 drm_dp_cec_register_connector(&intel_dp->aux, connector);
5838
5839 if (!intel_bios_encoder_is_lspcon(dig_port->base.devdata))
5840 return ret;
5841
5842 /*
5843 * ToDo: Clean this up to handle lspcon init and resume more
5844 * efficiently and streamlined.
5845 */
5846 if (lspcon_init(dig_port)) {
5847 lspcon_detect_hdr_capability(lspcon);
5848 if (lspcon->hdr_supported)
5849 drm_connector_attach_hdr_output_metadata_property(connector);
5850 }
5851
5852 return ret;
5853}
5854
5855static void
5856intel_dp_connector_unregister(struct drm_connector *connector)
5857{
5858 struct intel_dp *intel_dp = intel_attached_dp(to_intel_connector(connector));
5859
5860 drm_dp_cec_unregister_connector(&intel_dp->aux);
5861 drm_dp_aux_unregister(&intel_dp->aux);
5862 intel_connector_unregister(connector);
5863}
5864
5865void intel_dp_connector_sync_state(struct intel_connector *connector,
5866 const struct intel_crtc_state *crtc_state)
5867{
5868 struct drm_i915_private *i915 = to_i915(connector->base.dev);
5869
5870 if (crtc_state && crtc_state->dsc.compression_enable) {
5871 drm_WARN_ON(&i915->drm, !connector->dp.dsc_decompression_aux);
5872 connector->dp.dsc_decompression_enabled = true;
5873 } else {
5874 connector->dp.dsc_decompression_enabled = false;
5875 }
5876}
5877
5878void intel_dp_encoder_flush_work(struct drm_encoder *encoder)
5879{
5880 struct intel_digital_port *dig_port = enc_to_dig_port(to_intel_encoder(encoder));
5881 struct intel_dp *intel_dp = &dig_port->dp;
5882
5883 intel_dp_mst_encoder_cleanup(dig_port);
5884
5885 intel_pps_vdd_off_sync(intel_dp);
5886
5887 /*
5888 * Ensure power off delay is respected on module remove, so that we can
5889 * reduce delays at driver probe. See pps_init_timestamps().
5890 */
5891 intel_pps_wait_power_cycle(intel_dp);
5892
5893 intel_dp_aux_fini(intel_dp);
5894}
5895
5896void intel_dp_encoder_suspend(struct intel_encoder *intel_encoder)
5897{
5898 struct intel_dp *intel_dp = enc_to_intel_dp(intel_encoder);
5899
5900 intel_pps_vdd_off_sync(intel_dp);
5901}
5902
5903void intel_dp_encoder_shutdown(struct intel_encoder *intel_encoder)
5904{
5905 struct intel_dp *intel_dp = enc_to_intel_dp(intel_encoder);
5906
5907 intel_pps_wait_power_cycle(intel_dp);
5908}
5909
5910static int intel_modeset_tile_group(struct intel_atomic_state *state,
5911 int tile_group_id)
5912{
5913 struct drm_i915_private *dev_priv = to_i915(state->base.dev);
5914 struct drm_connector_list_iter conn_iter;
5915 struct drm_connector *connector;
5916 int ret = 0;
5917
5918 drm_connector_list_iter_begin(&dev_priv->drm, &conn_iter);
5919 drm_for_each_connector_iter(connector, &conn_iter) {
5920 struct drm_connector_state *conn_state;
5921 struct intel_crtc_state *crtc_state;
5922 struct intel_crtc *crtc;
5923
5924 if (!connector->has_tile ||
5925 connector->tile_group->id != tile_group_id)
5926 continue;
5927
5928 conn_state = drm_atomic_get_connector_state(&state->base,
5929 connector);
5930 if (IS_ERR(conn_state)) {
5931 ret = PTR_ERR(conn_state);
5932 break;
5933 }
5934
5935 crtc = to_intel_crtc(conn_state->crtc);
5936
5937 if (!crtc)
5938 continue;
5939
5940 crtc_state = intel_atomic_get_new_crtc_state(state, crtc);
5941 crtc_state->uapi.mode_changed = true;
5942
5943 ret = drm_atomic_add_affected_planes(&state->base, &crtc->base);
5944 if (ret)
5945 break;
5946 }
5947 drm_connector_list_iter_end(&conn_iter);
5948
5949 return ret;
5950}
5951
5952static int intel_modeset_affected_transcoders(struct intel_atomic_state *state, u8 transcoders)
5953{
5954 struct drm_i915_private *dev_priv = to_i915(state->base.dev);
5955 struct intel_crtc *crtc;
5956
5957 if (transcoders == 0)
5958 return 0;
5959
5960 for_each_intel_crtc(&dev_priv->drm, crtc) {
5961 struct intel_crtc_state *crtc_state;
5962 int ret;
5963
5964 crtc_state = intel_atomic_get_crtc_state(&state->base, crtc);
5965 if (IS_ERR(crtc_state))
5966 return PTR_ERR(crtc_state);
5967
5968 if (!crtc_state->hw.enable)
5969 continue;
5970
5971 if (!(transcoders & BIT(crtc_state->cpu_transcoder)))
5972 continue;
5973
5974 crtc_state->uapi.mode_changed = true;
5975
5976 ret = drm_atomic_add_affected_connectors(&state->base, &crtc->base);
5977 if (ret)
5978 return ret;
5979
5980 ret = drm_atomic_add_affected_planes(&state->base, &crtc->base);
5981 if (ret)
5982 return ret;
5983
5984 transcoders &= ~BIT(crtc_state->cpu_transcoder);
5985 }
5986
5987 drm_WARN_ON(&dev_priv->drm, transcoders != 0);
5988
5989 return 0;
5990}
5991
5992static int intel_modeset_synced_crtcs(struct intel_atomic_state *state,
5993 struct drm_connector *connector)
5994{
5995 const struct drm_connector_state *old_conn_state =
5996 drm_atomic_get_old_connector_state(&state->base, connector);
5997 const struct intel_crtc_state *old_crtc_state;
5998 struct intel_crtc *crtc;
5999 u8 transcoders;
6000
6001 crtc = to_intel_crtc(old_conn_state->crtc);
6002 if (!crtc)
6003 return 0;
6004
6005 old_crtc_state = intel_atomic_get_old_crtc_state(state, crtc);
6006
6007 if (!old_crtc_state->hw.active)
6008 return 0;
6009
6010 transcoders = old_crtc_state->sync_mode_slaves_mask;
6011 if (old_crtc_state->master_transcoder != INVALID_TRANSCODER)
6012 transcoders |= BIT(old_crtc_state->master_transcoder);
6013
6014 return intel_modeset_affected_transcoders(state,
6015 transcoders);
6016}
6017
6018static int intel_dp_connector_atomic_check(struct drm_connector *conn,
6019 struct drm_atomic_state *_state)
6020{
6021 struct drm_i915_private *dev_priv = to_i915(conn->dev);
6022 struct intel_atomic_state *state = to_intel_atomic_state(_state);
6023 struct drm_connector_state *conn_state = drm_atomic_get_new_connector_state(_state, conn);
6024 struct intel_connector *intel_conn = to_intel_connector(conn);
6025 struct intel_dp *intel_dp = enc_to_intel_dp(intel_conn->encoder);
6026 int ret;
6027
6028 ret = intel_digital_connector_atomic_check(conn, &state->base);
6029 if (ret)
6030 return ret;
6031
6032 if (intel_dp_mst_source_support(intel_dp)) {
6033 ret = drm_dp_mst_root_conn_atomic_check(conn_state, &intel_dp->mst_mgr);
6034 if (ret)
6035 return ret;
6036 }
6037
6038 /*
6039 * We don't enable port sync on BDW due to missing w/as and
6040 * due to not having adjusted the modeset sequence appropriately.
6041 */
6042 if (DISPLAY_VER(dev_priv) < 9)
6043 return 0;
6044
6045 if (!intel_connector_needs_modeset(state, conn))
6046 return 0;
6047
6048 if (conn->has_tile) {
6049 ret = intel_modeset_tile_group(state, conn->tile_group->id);
6050 if (ret)
6051 return ret;
6052 }
6053
6054 return intel_modeset_synced_crtcs(state, conn);
6055}
6056
6057static void intel_dp_oob_hotplug_event(struct drm_connector *connector,
6058 enum drm_connector_status hpd_state)
6059{
6060 struct intel_encoder *encoder = intel_attached_encoder(to_intel_connector(connector));
6061 struct drm_i915_private *i915 = to_i915(connector->dev);
6062 bool hpd_high = hpd_state == connector_status_connected;
6063 unsigned int hpd_pin = encoder->hpd_pin;
6064 bool need_work = false;
6065
6066 spin_lock_irq(&i915->irq_lock);
6067 if (hpd_high != test_bit(hpd_pin, &i915->display.hotplug.oob_hotplug_last_state)) {
6068 i915->display.hotplug.event_bits |= BIT(hpd_pin);
6069
6070 __assign_bit(hpd_pin, &i915->display.hotplug.oob_hotplug_last_state, hpd_high);
6071 need_work = true;
6072 }
6073 spin_unlock_irq(&i915->irq_lock);
6074
6075 if (need_work)
6076 queue_delayed_work(i915->unordered_wq, &i915->display.hotplug.hotplug_work, 0);
6077}
6078
6079static const struct drm_connector_funcs intel_dp_connector_funcs = {
6080 .force = intel_dp_force,
6081 .fill_modes = drm_helper_probe_single_connector_modes,
6082 .atomic_get_property = intel_digital_connector_atomic_get_property,
6083 .atomic_set_property = intel_digital_connector_atomic_set_property,
6084 .late_register = intel_dp_connector_register,
6085 .early_unregister = intel_dp_connector_unregister,
6086 .destroy = intel_connector_destroy,
6087 .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
6088 .atomic_duplicate_state = intel_digital_connector_duplicate_state,
6089 .oob_hotplug_event = intel_dp_oob_hotplug_event,
6090};
6091
6092static const struct drm_connector_helper_funcs intel_dp_connector_helper_funcs = {
6093 .detect_ctx = intel_dp_detect,
6094 .get_modes = intel_dp_get_modes,
6095 .mode_valid = intel_dp_mode_valid,
6096 .atomic_check = intel_dp_connector_atomic_check,
6097};
6098
6099enum irqreturn
6100intel_dp_hpd_pulse(struct intel_digital_port *dig_port, bool long_hpd)
6101{
6102 struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
6103 struct intel_dp *intel_dp = &dig_port->dp;
6104
6105 if (dig_port->base.type == INTEL_OUTPUT_EDP &&
6106 (long_hpd || !intel_pps_have_panel_power_or_vdd(intel_dp))) {
6107 /*
6108 * vdd off can generate a long/short pulse on eDP which
6109 * would require vdd on to handle it, and thus we
6110 * would end up in an endless cycle of
6111 * "vdd off -> long/short hpd -> vdd on -> detect -> vdd off -> ..."
6112 */
6113 drm_dbg_kms(&i915->drm,
6114 "ignoring %s hpd on eDP [ENCODER:%d:%s]\n",
6115 long_hpd ? "long" : "short",
6116 dig_port->base.base.base.id,
6117 dig_port->base.base.name);
6118 return IRQ_HANDLED;
6119 }
6120
6121 drm_dbg_kms(&i915->drm, "got hpd irq on [ENCODER:%d:%s] - %s\n",
6122 dig_port->base.base.base.id,
6123 dig_port->base.base.name,
6124 long_hpd ? "long" : "short");
6125
6126 if (long_hpd) {
6127 intel_dp->reset_link_params = true;
6128 return IRQ_NONE;
6129 }
6130
6131 if (intel_dp->is_mst) {
6132 if (!intel_dp_check_mst_status(intel_dp))
6133 return IRQ_NONE;
6134 } else if (!intel_dp_short_pulse(intel_dp)) {
6135 return IRQ_NONE;
6136 }
6137
6138 return IRQ_HANDLED;
6139}
6140
6141static bool _intel_dp_is_port_edp(struct drm_i915_private *dev_priv,
6142 const struct intel_bios_encoder_data *devdata,
6143 enum port port)
6144{
6145 /*
6146 * eDP not supported on g4x. so bail out early just
6147 * for a bit extra safety in case the VBT is bonkers.
6148 */
6149 if (DISPLAY_VER(dev_priv) < 5)
6150 return false;
6151
6152 if (DISPLAY_VER(dev_priv) < 9 && port == PORT_A)
6153 return true;
6154
6155 return devdata && intel_bios_encoder_supports_edp(devdata);
6156}
6157
6158bool intel_dp_is_port_edp(struct drm_i915_private *i915, enum port port)
6159{
6160 const struct intel_bios_encoder_data *devdata =
6161 intel_bios_encoder_data_lookup(i915, port);
6162
6163 return _intel_dp_is_port_edp(i915, devdata, port);
6164}
6165
6166static bool
6167has_gamut_metadata_dip(struct intel_encoder *encoder)
6168{
6169 struct drm_i915_private *i915 = to_i915(encoder->base.dev);
6170 enum port port = encoder->port;
6171
6172 if (intel_bios_encoder_is_lspcon(encoder->devdata))
6173 return false;
6174
6175 if (DISPLAY_VER(i915) >= 11)
6176 return true;
6177
6178 if (port == PORT_A)
6179 return false;
6180
6181 if (IS_HASWELL(i915) || IS_BROADWELL(i915) ||
6182 DISPLAY_VER(i915) >= 9)
6183 return true;
6184
6185 return false;
6186}
6187
6188static void
6189intel_dp_add_properties(struct intel_dp *intel_dp, struct drm_connector *connector)
6190{
6191 struct drm_i915_private *dev_priv = to_i915(connector->dev);
6192 enum port port = dp_to_dig_port(intel_dp)->base.port;
6193
6194 if (!intel_dp_is_edp(intel_dp))
6195 drm_connector_attach_dp_subconnector_property(connector);
6196
6197 if (!IS_G4X(dev_priv) && port != PORT_A)
6198 intel_attach_force_audio_property(connector);
6199
6200 intel_attach_broadcast_rgb_property(connector);
6201 if (HAS_GMCH(dev_priv))
6202 drm_connector_attach_max_bpc_property(connector, 6, 10);
6203 else if (DISPLAY_VER(dev_priv) >= 5)
6204 drm_connector_attach_max_bpc_property(connector, 6, 12);
6205
6206 /* Register HDMI colorspace for case of lspcon */
6207 if (intel_bios_encoder_is_lspcon(dp_to_dig_port(intel_dp)->base.devdata)) {
6208 drm_connector_attach_content_type_property(connector);
6209 intel_attach_hdmi_colorspace_property(connector);
6210 } else {
6211 intel_attach_dp_colorspace_property(connector);
6212 }
6213
6214 if (has_gamut_metadata_dip(&dp_to_dig_port(intel_dp)->base))
6215 drm_connector_attach_hdr_output_metadata_property(connector);
6216
6217 if (HAS_VRR(dev_priv))
6218 drm_connector_attach_vrr_capable_property(connector);
6219}
6220
6221static void
6222intel_edp_add_properties(struct intel_dp *intel_dp)
6223{
6224 struct intel_connector *connector = intel_dp->attached_connector;
6225 struct drm_i915_private *i915 = to_i915(connector->base.dev);
6226 const struct drm_display_mode *fixed_mode =
6227 intel_panel_preferred_fixed_mode(connector);
6228
6229 intel_attach_scaling_mode_property(&connector->base);
6230
6231 drm_connector_set_panel_orientation_with_quirk(&connector->base,
6232 i915->display.vbt.orientation,
6233 fixed_mode->hdisplay,
6234 fixed_mode->vdisplay);
6235}
6236
6237static void intel_edp_backlight_setup(struct intel_dp *intel_dp,
6238 struct intel_connector *connector)
6239{
6240 struct drm_i915_private *i915 = dp_to_i915(intel_dp);
6241 enum pipe pipe = INVALID_PIPE;
6242
6243 if (IS_VALLEYVIEW(i915) || IS_CHERRYVIEW(i915)) {
6244 /*
6245 * Figure out the current pipe for the initial backlight setup.
6246 * If the current pipe isn't valid, try the PPS pipe, and if that
6247 * fails just assume pipe A.
6248 */
6249 pipe = vlv_active_pipe(intel_dp);
6250
6251 if (pipe != PIPE_A && pipe != PIPE_B)
6252 pipe = intel_dp->pps.pps_pipe;
6253
6254 if (pipe != PIPE_A && pipe != PIPE_B)
6255 pipe = PIPE_A;
6256 }
6257
6258 intel_backlight_setup(connector, pipe);
6259}
6260
6261static bool intel_edp_init_connector(struct intel_dp *intel_dp,
6262 struct intel_connector *intel_connector)
6263{
6264 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
6265 struct drm_connector *connector = &intel_connector->base;
6266 struct drm_display_mode *fixed_mode;
6267 struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base;
6268 bool has_dpcd;
6269 const struct drm_edid *drm_edid;
6270
6271 if (!intel_dp_is_edp(intel_dp))
6272 return true;
6273
6274 /*
6275 * On IBX/CPT we may get here with LVDS already registered. Since the
6276 * driver uses the only internal power sequencer available for both
6277 * eDP and LVDS bail out early in this case to prevent interfering
6278 * with an already powered-on LVDS power sequencer.
6279 */
6280 if (intel_get_lvds_encoder(dev_priv)) {
6281 drm_WARN_ON(&dev_priv->drm,
6282 !(HAS_PCH_IBX(dev_priv) || HAS_PCH_CPT(dev_priv)));
6283 drm_info(&dev_priv->drm,
6284 "LVDS was detected, not registering eDP\n");
6285
6286 return false;
6287 }
6288
6289 intel_bios_init_panel_early(dev_priv, &intel_connector->panel,
6290 encoder->devdata);
6291
6292 if (!intel_pps_init(intel_dp)) {
6293 drm_info(&dev_priv->drm,
6294 "[ENCODER:%d:%s] unusable PPS, disabling eDP\n",
6295 encoder->base.base.id, encoder->base.name);
6296 /*
6297 * The BIOS may have still enabled VDD on the PPS even
6298 * though it's unusable. Make sure we turn it back off
6299 * and to release the power domain references/etc.
6300 */
6301 goto out_vdd_off;
6302 }
6303
6304 /*
6305 * Enable HPD sense for live status check.
6306 * intel_hpd_irq_setup() will turn it off again
6307 * if it's no longer needed later.
6308 *
6309 * The DPCD probe below will make sure VDD is on.
6310 */
6311 intel_hpd_enable_detection(encoder);
6312
6313 /* Cache DPCD and EDID for edp. */
6314 has_dpcd = intel_edp_init_dpcd(intel_dp, intel_connector);
6315
6316 if (!has_dpcd) {
6317 /* if this fails, presume the device is a ghost */
6318 drm_info(&dev_priv->drm,
6319 "[ENCODER:%d:%s] failed to retrieve link info, disabling eDP\n",
6320 encoder->base.base.id, encoder->base.name);
6321 goto out_vdd_off;
6322 }
6323
6324 /*
6325 * VBT and straps are liars. Also check HPD as that seems
6326 * to be the most reliable piece of information available.
6327 *
6328 * ... expect on devices that forgot to hook HPD up for eDP
6329 * (eg. Acer Chromebook C710), so we'll check it only if multiple
6330 * ports are attempting to use the same AUX CH, according to VBT.
6331 */
6332 if (intel_bios_dp_has_shared_aux_ch(encoder->devdata)) {
6333 /*
6334 * If this fails, presume the DPCD answer came
6335 * from some other port using the same AUX CH.
6336 *
6337 * FIXME maybe cleaner to check this before the
6338 * DPCD read? Would need sort out the VDD handling...
6339 */
6340 if (!intel_digital_port_connected(encoder)) {
6341 drm_info(&dev_priv->drm,
6342 "[ENCODER:%d:%s] HPD is down, disabling eDP\n",
6343 encoder->base.base.id, encoder->base.name);
6344 goto out_vdd_off;
6345 }
6346
6347 /*
6348 * Unfortunately even the HPD based detection fails on
6349 * eg. Asus B360M-A (CFL+CNP), so as a last resort fall
6350 * back to checking for a VGA branch device. Only do this
6351 * on known affected platforms to minimize false positives.
6352 */
6353 if (DISPLAY_VER(dev_priv) == 9 && drm_dp_is_branch(intel_dp->dpcd) &&
6354 (intel_dp->dpcd[DP_DOWNSTREAMPORT_PRESENT] & DP_DWN_STRM_PORT_TYPE_MASK) ==
6355 DP_DWN_STRM_PORT_TYPE_ANALOG) {
6356 drm_info(&dev_priv->drm,
6357 "[ENCODER:%d:%s] VGA converter detected, disabling eDP\n",
6358 encoder->base.base.id, encoder->base.name);
6359 goto out_vdd_off;
6360 }
6361 }
6362
6363 mutex_lock(&dev_priv->drm.mode_config.mutex);
6364 drm_edid = drm_edid_read_ddc(connector, connector->ddc);
6365 if (!drm_edid) {
6366 /* Fallback to EDID from ACPI OpRegion, if any */
6367 drm_edid = intel_opregion_get_edid(intel_connector);
6368 if (drm_edid)
6369 drm_dbg_kms(&dev_priv->drm,
6370 "[CONNECTOR:%d:%s] Using OpRegion EDID\n",
6371 connector->base.id, connector->name);
6372 }
6373 if (drm_edid) {
6374 if (drm_edid_connector_update(connector, drm_edid) ||
6375 !drm_edid_connector_add_modes(connector)) {
6376 drm_edid_connector_update(connector, NULL);
6377 drm_edid_free(drm_edid);
6378 drm_edid = ERR_PTR(-EINVAL);
6379 }
6380 } else {
6381 drm_edid = ERR_PTR(-ENOENT);
6382 }
6383
6384 intel_bios_init_panel_late(dev_priv, &intel_connector->panel, encoder->devdata,
6385 IS_ERR(drm_edid) ? NULL : drm_edid);
6386
6387 intel_panel_add_edid_fixed_modes(intel_connector, true);
6388
6389 /* MSO requires information from the EDID */
6390 intel_edp_mso_init(intel_dp);
6391
6392 /* multiply the mode clock and horizontal timings for MSO */
6393 list_for_each_entry(fixed_mode, &intel_connector->panel.fixed_modes, head)
6394 intel_edp_mso_mode_fixup(intel_connector, fixed_mode);
6395
6396 /* fallback to VBT if available for eDP */
6397 if (!intel_panel_preferred_fixed_mode(intel_connector))
6398 intel_panel_add_vbt_lfp_fixed_mode(intel_connector);
6399
6400 mutex_unlock(&dev_priv->drm.mode_config.mutex);
6401
6402 if (!intel_panel_preferred_fixed_mode(intel_connector)) {
6403 drm_info(&dev_priv->drm,
6404 "[ENCODER:%d:%s] failed to find fixed mode for the panel, disabling eDP\n",
6405 encoder->base.base.id, encoder->base.name);
6406 goto out_vdd_off;
6407 }
6408
6409 intel_panel_init(intel_connector, drm_edid);
6410
6411 intel_edp_backlight_setup(intel_dp, intel_connector);
6412
6413 intel_edp_add_properties(intel_dp);
6414
6415 intel_pps_init_late(intel_dp);
6416
6417 return true;
6418
6419out_vdd_off:
6420 intel_pps_vdd_off_sync(intel_dp);
6421
6422 return false;
6423}
6424
6425static void intel_dp_modeset_retry_work_fn(struct work_struct *work)
6426{
6427 struct intel_connector *intel_connector;
6428 struct drm_connector *connector;
6429
6430 intel_connector = container_of(work, typeof(*intel_connector),
6431 modeset_retry_work);
6432 connector = &intel_connector->base;
6433 drm_dbg_kms(connector->dev, "[CONNECTOR:%d:%s]\n", connector->base.id,
6434 connector->name);
6435
6436 /* Grab the locks before changing connector property*/
6437 mutex_lock(&connector->dev->mode_config.mutex);
6438 /* Set connector link status to BAD and send a Uevent to notify
6439 * userspace to do a modeset.
6440 */
6441 drm_connector_set_link_status_property(connector,
6442 DRM_MODE_LINK_STATUS_BAD);
6443 mutex_unlock(&connector->dev->mode_config.mutex);
6444 /* Send Hotplug uevent so userspace can reprobe */
6445 drm_kms_helper_connector_hotplug_event(connector);
6446}
6447
6448bool
6449intel_dp_init_connector(struct intel_digital_port *dig_port,
6450 struct intel_connector *intel_connector)
6451{
6452 struct drm_connector *connector = &intel_connector->base;
6453 struct intel_dp *intel_dp = &dig_port->dp;
6454 struct intel_encoder *intel_encoder = &dig_port->base;
6455 struct drm_device *dev = intel_encoder->base.dev;
6456 struct drm_i915_private *dev_priv = to_i915(dev);
6457 enum port port = intel_encoder->port;
6458 enum phy phy = intel_port_to_phy(dev_priv, port);
6459 int type;
6460
6461 /* Initialize the work for modeset in case of link train failure */
6462 INIT_WORK(&intel_connector->modeset_retry_work,
6463 intel_dp_modeset_retry_work_fn);
6464
6465 if (drm_WARN(dev, dig_port->max_lanes < 1,
6466 "Not enough lanes (%d) for DP on [ENCODER:%d:%s]\n",
6467 dig_port->max_lanes, intel_encoder->base.base.id,
6468 intel_encoder->base.name))
6469 return false;
6470
6471 intel_dp->reset_link_params = true;
6472 intel_dp->pps.pps_pipe = INVALID_PIPE;
6473 intel_dp->pps.active_pipe = INVALID_PIPE;
6474
6475 /* Preserve the current hw state. */
6476 intel_dp->DP = intel_de_read(dev_priv, intel_dp->output_reg);
6477 intel_dp->attached_connector = intel_connector;
6478
6479 if (_intel_dp_is_port_edp(dev_priv, intel_encoder->devdata, port)) {
6480 /*
6481 * Currently we don't support eDP on TypeC ports, although in
6482 * theory it could work on TypeC legacy ports.
6483 */
6484 drm_WARN_ON(dev, intel_phy_is_tc(dev_priv, phy));
6485 type = DRM_MODE_CONNECTOR_eDP;
6486 intel_encoder->type = INTEL_OUTPUT_EDP;
6487
6488 /* eDP only on port B and/or C on vlv/chv */
6489 if (drm_WARN_ON(dev, (IS_VALLEYVIEW(dev_priv) ||
6490 IS_CHERRYVIEW(dev_priv)) &&
6491 port != PORT_B && port != PORT_C))
6492 return false;
6493 } else {
6494 type = DRM_MODE_CONNECTOR_DisplayPort;
6495 }
6496
6497 intel_dp_set_default_sink_rates(intel_dp);
6498 intel_dp_set_default_max_sink_lane_count(intel_dp);
6499
6500 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
6501 intel_dp->pps.active_pipe = vlv_active_pipe(intel_dp);
6502
6503 intel_dp_aux_init(intel_dp);
6504 intel_connector->dp.dsc_decompression_aux = &intel_dp->aux;
6505
6506 drm_dbg_kms(&dev_priv->drm,
6507 "Adding %s connector on [ENCODER:%d:%s]\n",
6508 type == DRM_MODE_CONNECTOR_eDP ? "eDP" : "DP",
6509 intel_encoder->base.base.id, intel_encoder->base.name);
6510
6511 drm_connector_init_with_ddc(dev, connector, &intel_dp_connector_funcs,
6512 type, &intel_dp->aux.ddc);
6513 drm_connector_helper_add(connector, &intel_dp_connector_helper_funcs);
6514
6515 if (!HAS_GMCH(dev_priv) && DISPLAY_VER(dev_priv) < 12)
6516 connector->interlace_allowed = true;
6517
6518 intel_connector->polled = DRM_CONNECTOR_POLL_HPD;
6519
6520 intel_connector_attach_encoder(intel_connector, intel_encoder);
6521
6522 if (HAS_DDI(dev_priv))
6523 intel_connector->get_hw_state = intel_ddi_connector_get_hw_state;
6524 else
6525 intel_connector->get_hw_state = intel_connector_get_hw_state;
6526
6527 if (!intel_edp_init_connector(intel_dp, intel_connector)) {
6528 intel_dp_aux_fini(intel_dp);
6529 goto fail;
6530 }
6531
6532 intel_dp_set_source_rates(intel_dp);
6533 intel_dp_set_common_rates(intel_dp);
6534 intel_dp_reset_max_link_params(intel_dp);
6535
6536 /* init MST on ports that can support it */
6537 intel_dp_mst_encoder_init(dig_port,
6538 intel_connector->base.base.id);
6539
6540 intel_dp_add_properties(intel_dp, connector);
6541
6542 if (is_hdcp_supported(dev_priv, port) && !intel_dp_is_edp(intel_dp)) {
6543 int ret = intel_dp_hdcp_init(dig_port, intel_connector);
6544 if (ret)
6545 drm_dbg_kms(&dev_priv->drm,
6546 "HDCP init failed, skipping.\n");
6547 }
6548
6549 intel_dp->frl.is_trained = false;
6550 intel_dp->frl.trained_rate_gbps = 0;
6551
6552 intel_psr_init(intel_dp);
6553
6554 return true;
6555
6556fail:
6557 intel_display_power_flush_work(dev_priv);
6558 drm_connector_cleanup(connector);
6559
6560 return false;
6561}
6562
6563void intel_dp_mst_suspend(struct drm_i915_private *dev_priv)
6564{
6565 struct intel_encoder *encoder;
6566
6567 if (!HAS_DISPLAY(dev_priv))
6568 return;
6569
6570 for_each_intel_encoder(&dev_priv->drm, encoder) {
6571 struct intel_dp *intel_dp;
6572
6573 if (encoder->type != INTEL_OUTPUT_DDI)
6574 continue;
6575
6576 intel_dp = enc_to_intel_dp(encoder);
6577
6578 if (!intel_dp_mst_source_support(intel_dp))
6579 continue;
6580
6581 if (intel_dp->is_mst)
6582 drm_dp_mst_topology_mgr_suspend(&intel_dp->mst_mgr);
6583 }
6584}
6585
6586void intel_dp_mst_resume(struct drm_i915_private *dev_priv)
6587{
6588 struct intel_encoder *encoder;
6589
6590 if (!HAS_DISPLAY(dev_priv))
6591 return;
6592
6593 for_each_intel_encoder(&dev_priv->drm, encoder) {
6594 struct intel_dp *intel_dp;
6595 int ret;
6596
6597 if (encoder->type != INTEL_OUTPUT_DDI)
6598 continue;
6599
6600 intel_dp = enc_to_intel_dp(encoder);
6601
6602 if (!intel_dp_mst_source_support(intel_dp))
6603 continue;
6604
6605 ret = drm_dp_mst_topology_mgr_resume(&intel_dp->mst_mgr,
6606 true);
6607 if (ret) {
6608 intel_dp->is_mst = false;
6609 drm_dp_mst_topology_mgr_set_mst(&intel_dp->mst_mgr,
6610 false);
6611 }
6612 }
6613}