Loading...
Note: File does not exist in v3.1.
1/*
2 * Copyright © 2008 Intel Corporation
3 * 2014 Red Hat Inc.
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9 * and/or sell copies of the Software, and to permit persons to whom the
10 * Software is furnished to do so, subject to the following conditions:
11 *
12 * The above copyright notice and this permission notice (including the next
13 * paragraph) shall be included in all copies or substantial portions of the
14 * Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
21 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
22 * IN THE SOFTWARE.
23 *
24 */
25
26#include <drm/drm_atomic.h>
27#include <drm/drm_atomic_helper.h>
28#include <drm/drm_edid.h>
29#include <drm/drm_fixed.h>
30#include <drm/drm_probe_helper.h>
31
32#include "i915_drv.h"
33#include "i915_reg.h"
34#include "intel_atomic.h"
35#include "intel_audio.h"
36#include "intel_connector.h"
37#include "intel_crtc.h"
38#include "intel_ddi.h"
39#include "intel_de.h"
40#include "intel_display_types.h"
41#include "intel_dp.h"
42#include "intel_dp_hdcp.h"
43#include "intel_dp_mst.h"
44#include "intel_dpio_phy.h"
45#include "intel_hdcp.h"
46#include "intel_hotplug.h"
47#include "intel_link_bw.h"
48#include "intel_psr.h"
49#include "intel_vdsc.h"
50#include "skl_scaler.h"
51
52static int intel_dp_mst_check_constraints(struct drm_i915_private *i915, int bpp,
53 const struct drm_display_mode *adjusted_mode,
54 struct intel_crtc_state *crtc_state,
55 bool dsc)
56{
57 if (intel_dp_is_uhbr(crtc_state) && DISPLAY_VER(i915) < 14 && dsc) {
58 int output_bpp = bpp;
59 /* DisplayPort 2 128b/132b, bits per lane is always 32 */
60 int symbol_clock = crtc_state->port_clock / 32;
61
62 if (output_bpp * adjusted_mode->crtc_clock >=
63 symbol_clock * 72) {
64 drm_dbg_kms(&i915->drm, "UHBR check failed(required bw %d available %d)\n",
65 output_bpp * adjusted_mode->crtc_clock, symbol_clock * 72);
66 return -EINVAL;
67 }
68 }
69
70 return 0;
71}
72
73static int intel_dp_mst_bw_overhead(const struct intel_crtc_state *crtc_state,
74 const struct intel_connector *connector,
75 bool ssc, bool dsc, int bpp_x16)
76{
77 const struct drm_display_mode *adjusted_mode =
78 &crtc_state->hw.adjusted_mode;
79 unsigned long flags = DRM_DP_BW_OVERHEAD_MST;
80 int dsc_slice_count = 0;
81 int overhead;
82
83 flags |= intel_dp_is_uhbr(crtc_state) ? DRM_DP_BW_OVERHEAD_UHBR : 0;
84 flags |= ssc ? DRM_DP_BW_OVERHEAD_SSC_REF_CLK : 0;
85 flags |= crtc_state->fec_enable ? DRM_DP_BW_OVERHEAD_FEC : 0;
86
87 if (dsc) {
88 flags |= DRM_DP_BW_OVERHEAD_DSC;
89 /* TODO: add support for bigjoiner */
90 dsc_slice_count = intel_dp_dsc_get_slice_count(connector,
91 adjusted_mode->clock,
92 adjusted_mode->hdisplay,
93 false);
94 }
95
96 overhead = drm_dp_bw_overhead(crtc_state->lane_count,
97 adjusted_mode->hdisplay,
98 dsc_slice_count,
99 bpp_x16,
100 flags);
101
102 /*
103 * TODO: clarify whether a minimum required by the fixed FEC overhead
104 * in the bspec audio programming sequence is required here.
105 */
106 return max(overhead, intel_dp_bw_fec_overhead(crtc_state->fec_enable));
107}
108
109static void intel_dp_mst_compute_m_n(const struct intel_crtc_state *crtc_state,
110 const struct intel_connector *connector,
111 int overhead,
112 int bpp_x16,
113 struct intel_link_m_n *m_n)
114{
115 const struct drm_display_mode *adjusted_mode =
116 &crtc_state->hw.adjusted_mode;
117
118 /* TODO: Check WA 14013163432 to set data M/N for full BW utilization. */
119 intel_link_compute_m_n(bpp_x16, crtc_state->lane_count,
120 adjusted_mode->crtc_clock,
121 crtc_state->port_clock,
122 overhead,
123 m_n);
124
125 m_n->tu = DIV_ROUND_UP_ULL(mul_u32_u32(m_n->data_m, 64), m_n->data_n);
126}
127
128static int intel_dp_mst_calc_pbn(int pixel_clock, int bpp_x16, int bw_overhead)
129{
130 int effective_data_rate =
131 intel_dp_effective_data_rate(pixel_clock, bpp_x16, bw_overhead);
132
133 /*
134 * TODO: Use drm_dp_calc_pbn_mode() instead, once it's converted
135 * to calculate PBN with the BW overhead passed to it.
136 */
137 return DIV_ROUND_UP(effective_data_rate * 64, 54 * 1000);
138}
139
140static int intel_dp_mst_find_vcpi_slots_for_bpp(struct intel_encoder *encoder,
141 struct intel_crtc_state *crtc_state,
142 int max_bpp,
143 int min_bpp,
144 struct link_config_limits *limits,
145 struct drm_connector_state *conn_state,
146 int step,
147 bool dsc)
148{
149 struct drm_atomic_state *state = crtc_state->uapi.state;
150 struct intel_dp_mst_encoder *intel_mst = enc_to_mst(encoder);
151 struct intel_dp *intel_dp = &intel_mst->primary->dp;
152 struct drm_dp_mst_topology_state *mst_state;
153 struct intel_connector *connector =
154 to_intel_connector(conn_state->connector);
155 struct drm_i915_private *i915 = to_i915(connector->base.dev);
156 const struct drm_display_mode *adjusted_mode =
157 &crtc_state->hw.adjusted_mode;
158 int bpp, slots = -EINVAL;
159 int ret = 0;
160
161 mst_state = drm_atomic_get_mst_topology_state(state, &intel_dp->mst_mgr);
162 if (IS_ERR(mst_state))
163 return PTR_ERR(mst_state);
164
165 crtc_state->lane_count = limits->max_lane_count;
166 crtc_state->port_clock = limits->max_rate;
167
168 if (dsc) {
169 if (!intel_dp_supports_fec(intel_dp, connector, crtc_state))
170 return -EINVAL;
171
172 crtc_state->fec_enable = !intel_dp_is_uhbr(crtc_state);
173 }
174
175 mst_state->pbn_div = drm_dp_get_vc_payload_bw(&intel_dp->mst_mgr,
176 crtc_state->port_clock,
177 crtc_state->lane_count);
178
179 drm_dbg_kms(&i915->drm, "Looking for slots in range min bpp %d max bpp %d\n",
180 min_bpp, max_bpp);
181
182 for (bpp = max_bpp; bpp >= min_bpp; bpp -= step) {
183 int local_bw_overhead;
184 int remote_bw_overhead;
185 int link_bpp_x16;
186 int remote_tu;
187
188 drm_dbg_kms(&i915->drm, "Trying bpp %d\n", bpp);
189
190 ret = intel_dp_mst_check_constraints(i915, bpp, adjusted_mode, crtc_state, dsc);
191 if (ret)
192 continue;
193
194 link_bpp_x16 = to_bpp_x16(dsc ? bpp :
195 intel_dp_output_bpp(crtc_state->output_format, bpp));
196
197 local_bw_overhead = intel_dp_mst_bw_overhead(crtc_state, connector,
198 false, dsc, link_bpp_x16);
199 remote_bw_overhead = intel_dp_mst_bw_overhead(crtc_state, connector,
200 true, dsc, link_bpp_x16);
201
202 intel_dp_mst_compute_m_n(crtc_state, connector,
203 local_bw_overhead,
204 link_bpp_x16,
205 &crtc_state->dp_m_n);
206
207 /*
208 * The TU size programmed to the HW determines which slots in
209 * an MTP frame are used for this stream, which needs to match
210 * the payload size programmed to the first downstream branch
211 * device's payload table.
212 *
213 * Note that atm the payload's PBN value DRM core sends via
214 * the ALLOCATE_PAYLOAD side-band message matches the payload
215 * size (which it calculates from the PBN value) it programs
216 * to the first branch device's payload table. The allocation
217 * in the payload table could be reduced though (to
218 * crtc_state->dp_m_n.tu), provided that the driver doesn't
219 * enable SSC on the corresponding link.
220 */
221 crtc_state->pbn = intel_dp_mst_calc_pbn(adjusted_mode->crtc_clock,
222 link_bpp_x16,
223 remote_bw_overhead);
224
225 remote_tu = DIV_ROUND_UP(dfixed_const(crtc_state->pbn), mst_state->pbn_div.full);
226
227 drm_WARN_ON(&i915->drm, remote_tu < crtc_state->dp_m_n.tu);
228 crtc_state->dp_m_n.tu = remote_tu;
229
230 slots = drm_dp_atomic_find_time_slots(state, &intel_dp->mst_mgr,
231 connector->port,
232 crtc_state->pbn);
233 if (slots == -EDEADLK)
234 return slots;
235
236 if (slots >= 0) {
237 drm_WARN_ON(&i915->drm, slots != crtc_state->dp_m_n.tu);
238
239 break;
240 }
241 }
242
243 /* We failed to find a proper bpp/timeslots, return error */
244 if (ret)
245 slots = ret;
246
247 if (slots < 0) {
248 drm_dbg_kms(&i915->drm, "failed finding vcpi slots:%d\n",
249 slots);
250 } else {
251 if (!dsc)
252 crtc_state->pipe_bpp = bpp;
253 else
254 crtc_state->dsc.compressed_bpp_x16 = to_bpp_x16(bpp);
255 drm_dbg_kms(&i915->drm, "Got %d slots for pipe bpp %d dsc %d\n", slots, bpp, dsc);
256 }
257
258 return slots;
259}
260
261static int intel_dp_mst_compute_link_config(struct intel_encoder *encoder,
262 struct intel_crtc_state *crtc_state,
263 struct drm_connector_state *conn_state,
264 struct link_config_limits *limits)
265{
266 int slots = -EINVAL;
267
268 /*
269 * FIXME: allocate the BW according to link_bpp, which in the case of
270 * YUV420 is only half of the pipe bpp value.
271 */
272 slots = intel_dp_mst_find_vcpi_slots_for_bpp(encoder, crtc_state,
273 to_bpp_int(limits->link.max_bpp_x16),
274 to_bpp_int(limits->link.min_bpp_x16),
275 limits,
276 conn_state, 2 * 3, false);
277
278 if (slots < 0)
279 return slots;
280
281 return 0;
282}
283
284static int intel_dp_dsc_mst_compute_link_config(struct intel_encoder *encoder,
285 struct intel_crtc_state *crtc_state,
286 struct drm_connector_state *conn_state,
287 struct link_config_limits *limits)
288{
289 struct intel_connector *connector =
290 to_intel_connector(conn_state->connector);
291 struct drm_i915_private *i915 = to_i915(connector->base.dev);
292 int slots = -EINVAL;
293 int i, num_bpc;
294 u8 dsc_bpc[3] = {};
295 int min_bpp, max_bpp, sink_min_bpp, sink_max_bpp;
296 u8 dsc_max_bpc;
297 int min_compressed_bpp, max_compressed_bpp;
298
299 /* Max DSC Input BPC for ICL is 10 and for TGL+ is 12 */
300 if (DISPLAY_VER(i915) >= 12)
301 dsc_max_bpc = min_t(u8, 12, conn_state->max_requested_bpc);
302 else
303 dsc_max_bpc = min_t(u8, 10, conn_state->max_requested_bpc);
304
305 max_bpp = min_t(u8, dsc_max_bpc * 3, limits->pipe.max_bpp);
306 min_bpp = limits->pipe.min_bpp;
307
308 num_bpc = drm_dp_dsc_sink_supported_input_bpcs(connector->dp.dsc_dpcd,
309 dsc_bpc);
310
311 drm_dbg_kms(&i915->drm, "DSC Source supported min bpp %d max bpp %d\n",
312 min_bpp, max_bpp);
313
314 sink_max_bpp = dsc_bpc[0] * 3;
315 sink_min_bpp = sink_max_bpp;
316
317 for (i = 1; i < num_bpc; i++) {
318 if (sink_min_bpp > dsc_bpc[i] * 3)
319 sink_min_bpp = dsc_bpc[i] * 3;
320 if (sink_max_bpp < dsc_bpc[i] * 3)
321 sink_max_bpp = dsc_bpc[i] * 3;
322 }
323
324 drm_dbg_kms(&i915->drm, "DSC Sink supported min bpp %d max bpp %d\n",
325 sink_min_bpp, sink_max_bpp);
326
327 if (min_bpp < sink_min_bpp)
328 min_bpp = sink_min_bpp;
329
330 if (max_bpp > sink_max_bpp)
331 max_bpp = sink_max_bpp;
332
333 max_compressed_bpp = intel_dp_dsc_sink_max_compressed_bpp(connector,
334 crtc_state,
335 max_bpp / 3);
336 max_compressed_bpp = min(max_compressed_bpp,
337 to_bpp_int(limits->link.max_bpp_x16));
338
339 min_compressed_bpp = intel_dp_dsc_sink_min_compressed_bpp(crtc_state);
340 min_compressed_bpp = max(min_compressed_bpp,
341 to_bpp_int_roundup(limits->link.min_bpp_x16));
342
343 drm_dbg_kms(&i915->drm, "DSC Sink supported compressed min bpp %d compressed max bpp %d\n",
344 min_compressed_bpp, max_compressed_bpp);
345
346 /* Align compressed bpps according to our own constraints */
347 max_compressed_bpp = intel_dp_dsc_nearest_valid_bpp(i915, max_compressed_bpp,
348 crtc_state->pipe_bpp);
349 min_compressed_bpp = intel_dp_dsc_nearest_valid_bpp(i915, min_compressed_bpp,
350 crtc_state->pipe_bpp);
351
352 slots = intel_dp_mst_find_vcpi_slots_for_bpp(encoder, crtc_state, max_compressed_bpp,
353 min_compressed_bpp, limits,
354 conn_state, 1, true);
355
356 if (slots < 0)
357 return slots;
358
359 return 0;
360}
361static int intel_dp_mst_update_slots(struct intel_encoder *encoder,
362 struct intel_crtc_state *crtc_state,
363 struct drm_connector_state *conn_state)
364{
365 struct drm_i915_private *i915 = to_i915(encoder->base.dev);
366 struct intel_dp_mst_encoder *intel_mst = enc_to_mst(encoder);
367 struct intel_dp *intel_dp = &intel_mst->primary->dp;
368 struct drm_dp_mst_topology_mgr *mgr = &intel_dp->mst_mgr;
369 struct drm_dp_mst_topology_state *topology_state;
370 u8 link_coding_cap = intel_dp_is_uhbr(crtc_state) ?
371 DP_CAP_ANSI_128B132B : DP_CAP_ANSI_8B10B;
372
373 topology_state = drm_atomic_get_mst_topology_state(conn_state->state, mgr);
374 if (IS_ERR(topology_state)) {
375 drm_dbg_kms(&i915->drm, "slot update failed\n");
376 return PTR_ERR(topology_state);
377 }
378
379 drm_dp_mst_update_slots(topology_state, link_coding_cap);
380
381 return 0;
382}
383
384static bool
385intel_dp_mst_dsc_source_support(const struct intel_crtc_state *crtc_state)
386{
387 struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
388
389 /*
390 * FIXME: Enabling DSC on ICL results in blank screen and FIFO pipe /
391 * transcoder underruns, re-enable DSC after fixing this issue.
392 */
393 return DISPLAY_VER(i915) >= 12 && intel_dsc_source_support(crtc_state);
394}
395
396static int mode_hblank_period_ns(const struct drm_display_mode *mode)
397{
398 return DIV_ROUND_CLOSEST_ULL(mul_u32_u32(mode->htotal - mode->hdisplay,
399 NSEC_PER_SEC / 1000),
400 mode->crtc_clock);
401}
402
403static bool
404hblank_expansion_quirk_needs_dsc(const struct intel_connector *connector,
405 const struct intel_crtc_state *crtc_state)
406{
407 const struct drm_display_mode *adjusted_mode =
408 &crtc_state->hw.adjusted_mode;
409
410 if (!connector->dp.dsc_hblank_expansion_quirk)
411 return false;
412
413 if (mode_hblank_period_ns(adjusted_mode) > 300)
414 return false;
415
416 return true;
417}
418
419static bool
420adjust_limits_for_dsc_hblank_expansion_quirk(const struct intel_connector *connector,
421 const struct intel_crtc_state *crtc_state,
422 struct link_config_limits *limits,
423 bool dsc)
424{
425 struct drm_i915_private *i915 = to_i915(connector->base.dev);
426 const struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
427 int min_bpp_x16 = limits->link.min_bpp_x16;
428
429 if (!hblank_expansion_quirk_needs_dsc(connector, crtc_state))
430 return true;
431
432 if (!dsc) {
433 if (intel_dp_mst_dsc_source_support(crtc_state)) {
434 drm_dbg_kms(&i915->drm,
435 "[CRTC:%d:%s][CONNECTOR:%d:%s] DSC needed by hblank expansion quirk\n",
436 crtc->base.base.id, crtc->base.name,
437 connector->base.base.id, connector->base.name);
438 return false;
439 }
440
441 drm_dbg_kms(&i915->drm,
442 "[CRTC:%d:%s][CONNECTOR:%d:%s] Increasing link min bpp to 24 due to hblank expansion quirk\n",
443 crtc->base.base.id, crtc->base.name,
444 connector->base.base.id, connector->base.name);
445
446 if (limits->link.max_bpp_x16 < to_bpp_x16(24))
447 return false;
448
449 limits->link.min_bpp_x16 = to_bpp_x16(24);
450
451 return true;
452 }
453
454 drm_WARN_ON(&i915->drm, limits->min_rate != limits->max_rate);
455
456 if (limits->max_rate < 540000)
457 min_bpp_x16 = to_bpp_x16(13);
458 else if (limits->max_rate < 810000)
459 min_bpp_x16 = to_bpp_x16(10);
460
461 if (limits->link.min_bpp_x16 >= min_bpp_x16)
462 return true;
463
464 drm_dbg_kms(&i915->drm,
465 "[CRTC:%d:%s][CONNECTOR:%d:%s] Increasing link min bpp to " BPP_X16_FMT " in DSC mode due to hblank expansion quirk\n",
466 crtc->base.base.id, crtc->base.name,
467 connector->base.base.id, connector->base.name,
468 BPP_X16_ARGS(min_bpp_x16));
469
470 if (limits->link.max_bpp_x16 < min_bpp_x16)
471 return false;
472
473 limits->link.min_bpp_x16 = min_bpp_x16;
474
475 return true;
476}
477
478static bool
479intel_dp_mst_compute_config_limits(struct intel_dp *intel_dp,
480 const struct intel_connector *connector,
481 struct intel_crtc_state *crtc_state,
482 bool dsc,
483 struct link_config_limits *limits)
484{
485 /*
486 * for MST we always configure max link bw - the spec doesn't
487 * seem to suggest we should do otherwise.
488 */
489 limits->min_rate = limits->max_rate =
490 intel_dp_max_link_rate(intel_dp);
491
492 limits->min_lane_count = limits->max_lane_count =
493 intel_dp_max_lane_count(intel_dp);
494
495 limits->pipe.min_bpp = intel_dp_min_bpp(crtc_state->output_format);
496 /*
497 * FIXME: If all the streams can't fit into the link with
498 * their current pipe_bpp we should reduce pipe_bpp across
499 * the board until things start to fit. Until then we
500 * limit to <= 8bpc since that's what was hardcoded for all
501 * MST streams previously. This hack should be removed once
502 * we have the proper retry logic in place.
503 */
504 limits->pipe.max_bpp = min(crtc_state->pipe_bpp, 24);
505
506 intel_dp_adjust_compliance_config(intel_dp, crtc_state, limits);
507
508 if (!intel_dp_compute_config_link_bpp_limits(intel_dp,
509 crtc_state,
510 dsc,
511 limits))
512 return false;
513
514 return adjust_limits_for_dsc_hblank_expansion_quirk(connector,
515 crtc_state,
516 limits,
517 dsc);
518}
519
520static int intel_dp_mst_compute_config(struct intel_encoder *encoder,
521 struct intel_crtc_state *pipe_config,
522 struct drm_connector_state *conn_state)
523{
524 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
525 struct intel_dp_mst_encoder *intel_mst = enc_to_mst(encoder);
526 struct intel_dp *intel_dp = &intel_mst->primary->dp;
527 const struct intel_connector *connector =
528 to_intel_connector(conn_state->connector);
529 const struct drm_display_mode *adjusted_mode =
530 &pipe_config->hw.adjusted_mode;
531 struct link_config_limits limits;
532 bool dsc_needed;
533 int ret = 0;
534
535 if (pipe_config->fec_enable &&
536 !intel_dp_supports_fec(intel_dp, connector, pipe_config))
537 return -EINVAL;
538
539 if (adjusted_mode->flags & DRM_MODE_FLAG_DBLSCAN)
540 return -EINVAL;
541
542 pipe_config->sink_format = INTEL_OUTPUT_FORMAT_RGB;
543 pipe_config->output_format = INTEL_OUTPUT_FORMAT_RGB;
544 pipe_config->has_pch_encoder = false;
545
546 dsc_needed = intel_dp->force_dsc_en ||
547 !intel_dp_mst_compute_config_limits(intel_dp,
548 connector,
549 pipe_config,
550 false,
551 &limits);
552
553 if (!dsc_needed) {
554 ret = intel_dp_mst_compute_link_config(encoder, pipe_config,
555 conn_state, &limits);
556
557 if (ret == -EDEADLK)
558 return ret;
559
560 if (ret)
561 dsc_needed = true;
562 }
563
564 /* enable compression if the mode doesn't fit available BW */
565 if (dsc_needed) {
566 drm_dbg_kms(&dev_priv->drm, "Try DSC (fallback=%s, force=%s)\n",
567 str_yes_no(ret),
568 str_yes_no(intel_dp->force_dsc_en));
569
570 if (!intel_dp_mst_dsc_source_support(pipe_config))
571 return -EINVAL;
572
573 if (!intel_dp_mst_compute_config_limits(intel_dp,
574 connector,
575 pipe_config,
576 true,
577 &limits))
578 return -EINVAL;
579
580 /*
581 * FIXME: As bpc is hardcoded to 8, as mentioned above,
582 * WARN and ignore the debug flag force_dsc_bpc for now.
583 */
584 drm_WARN(&dev_priv->drm, intel_dp->force_dsc_bpc, "Cannot Force BPC for MST\n");
585 /*
586 * Try to get at least some timeslots and then see, if
587 * we can fit there with DSC.
588 */
589 drm_dbg_kms(&dev_priv->drm, "Trying to find VCPI slots in DSC mode\n");
590
591 ret = intel_dp_dsc_mst_compute_link_config(encoder, pipe_config,
592 conn_state, &limits);
593 if (ret < 0)
594 return ret;
595
596 ret = intel_dp_dsc_compute_config(intel_dp, pipe_config,
597 conn_state, &limits,
598 pipe_config->dp_m_n.tu, false);
599 }
600
601 if (ret)
602 return ret;
603
604 ret = intel_dp_mst_update_slots(encoder, pipe_config, conn_state);
605 if (ret)
606 return ret;
607
608 pipe_config->limited_color_range =
609 intel_dp_limited_color_range(pipe_config, conn_state);
610
611 if (IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv))
612 pipe_config->lane_lat_optim_mask =
613 bxt_ddi_phy_calc_lane_lat_optim_mask(pipe_config->lane_count);
614
615 intel_dp_audio_compute_config(encoder, pipe_config, conn_state);
616
617 intel_ddi_compute_min_voltage_level(pipe_config);
618
619 intel_psr_compute_config(intel_dp, pipe_config, conn_state);
620
621 return 0;
622}
623
624/*
625 * Iterate over all connectors and return a mask of
626 * all CPU transcoders streaming over the same DP link.
627 */
628static unsigned int
629intel_dp_mst_transcoder_mask(struct intel_atomic_state *state,
630 struct intel_dp *mst_port)
631{
632 struct drm_i915_private *dev_priv = to_i915(state->base.dev);
633 const struct intel_digital_connector_state *conn_state;
634 struct intel_connector *connector;
635 u8 transcoders = 0;
636 int i;
637
638 if (DISPLAY_VER(dev_priv) < 12)
639 return 0;
640
641 for_each_new_intel_connector_in_state(state, connector, conn_state, i) {
642 const struct intel_crtc_state *crtc_state;
643 struct intel_crtc *crtc;
644
645 if (connector->mst_port != mst_port || !conn_state->base.crtc)
646 continue;
647
648 crtc = to_intel_crtc(conn_state->base.crtc);
649 crtc_state = intel_atomic_get_new_crtc_state(state, crtc);
650
651 if (!crtc_state->hw.active)
652 continue;
653
654 transcoders |= BIT(crtc_state->cpu_transcoder);
655 }
656
657 return transcoders;
658}
659
660static u8 get_pipes_downstream_of_mst_port(struct intel_atomic_state *state,
661 struct drm_dp_mst_topology_mgr *mst_mgr,
662 struct drm_dp_mst_port *parent_port)
663{
664 const struct intel_digital_connector_state *conn_state;
665 struct intel_connector *connector;
666 u8 mask = 0;
667 int i;
668
669 for_each_new_intel_connector_in_state(state, connector, conn_state, i) {
670 if (!conn_state->base.crtc)
671 continue;
672
673 if (&connector->mst_port->mst_mgr != mst_mgr)
674 continue;
675
676 if (connector->port != parent_port &&
677 !drm_dp_mst_port_downstream_of_parent(mst_mgr,
678 connector->port,
679 parent_port))
680 continue;
681
682 mask |= BIT(to_intel_crtc(conn_state->base.crtc)->pipe);
683 }
684
685 return mask;
686}
687
688static int intel_dp_mst_check_fec_change(struct intel_atomic_state *state,
689 struct drm_dp_mst_topology_mgr *mst_mgr,
690 struct intel_link_bw_limits *limits)
691{
692 struct drm_i915_private *i915 = to_i915(state->base.dev);
693 struct intel_crtc *crtc;
694 u8 mst_pipe_mask;
695 u8 fec_pipe_mask = 0;
696 int ret;
697
698 mst_pipe_mask = get_pipes_downstream_of_mst_port(state, mst_mgr, NULL);
699
700 for_each_intel_crtc_in_pipe_mask(&i915->drm, crtc, mst_pipe_mask) {
701 struct intel_crtc_state *crtc_state =
702 intel_atomic_get_new_crtc_state(state, crtc);
703
704 /* Atomic connector check should've added all the MST CRTCs. */
705 if (drm_WARN_ON(&i915->drm, !crtc_state))
706 return -EINVAL;
707
708 if (crtc_state->fec_enable)
709 fec_pipe_mask |= BIT(crtc->pipe);
710 }
711
712 if (!fec_pipe_mask || mst_pipe_mask == fec_pipe_mask)
713 return 0;
714
715 limits->force_fec_pipes |= mst_pipe_mask;
716
717 ret = intel_modeset_pipes_in_mask_early(state, "MST FEC",
718 mst_pipe_mask);
719
720 return ret ? : -EAGAIN;
721}
722
723static int intel_dp_mst_check_bw(struct intel_atomic_state *state,
724 struct drm_dp_mst_topology_mgr *mst_mgr,
725 struct drm_dp_mst_topology_state *mst_state,
726 struct intel_link_bw_limits *limits)
727{
728 struct drm_dp_mst_port *mst_port;
729 u8 mst_port_pipes;
730 int ret;
731
732 ret = drm_dp_mst_atomic_check_mgr(&state->base, mst_mgr, mst_state, &mst_port);
733 if (ret != -ENOSPC)
734 return ret;
735
736 mst_port_pipes = get_pipes_downstream_of_mst_port(state, mst_mgr, mst_port);
737
738 ret = intel_link_bw_reduce_bpp(state, limits,
739 mst_port_pipes, "MST link BW");
740
741 return ret ? : -EAGAIN;
742}
743
744/**
745 * intel_dp_mst_atomic_check_link - check all modeset MST link configuration
746 * @state: intel atomic state
747 * @limits: link BW limits
748 *
749 * Check the link configuration for all modeset MST outputs. If the
750 * configuration is invalid @limits will be updated if possible to
751 * reduce the total BW, after which the configuration for all CRTCs in
752 * @state must be recomputed with the updated @limits.
753 *
754 * Returns:
755 * - 0 if the confugration is valid
756 * - %-EAGAIN, if the configuration is invalid and @limits got updated
757 * with fallback values with which the configuration of all CRTCs in
758 * @state must be recomputed
759 * - Other negative error, if the configuration is invalid without a
760 * fallback possibility, or the check failed for another reason
761 */
762int intel_dp_mst_atomic_check_link(struct intel_atomic_state *state,
763 struct intel_link_bw_limits *limits)
764{
765 struct drm_dp_mst_topology_mgr *mgr;
766 struct drm_dp_mst_topology_state *mst_state;
767 int ret;
768 int i;
769
770 for_each_new_mst_mgr_in_state(&state->base, mgr, mst_state, i) {
771 ret = intel_dp_mst_check_fec_change(state, mgr, limits);
772 if (ret)
773 return ret;
774
775 ret = intel_dp_mst_check_bw(state, mgr, mst_state,
776 limits);
777 if (ret)
778 return ret;
779 }
780
781 return 0;
782}
783
784static int intel_dp_mst_compute_config_late(struct intel_encoder *encoder,
785 struct intel_crtc_state *crtc_state,
786 struct drm_connector_state *conn_state)
787{
788 struct intel_atomic_state *state = to_intel_atomic_state(conn_state->state);
789 struct intel_dp_mst_encoder *intel_mst = enc_to_mst(encoder);
790 struct intel_dp *intel_dp = &intel_mst->primary->dp;
791
792 /* lowest numbered transcoder will be designated master */
793 crtc_state->mst_master_transcoder =
794 ffs(intel_dp_mst_transcoder_mask(state, intel_dp)) - 1;
795
796 return 0;
797}
798
799/*
800 * If one of the connectors in a MST stream needs a modeset, mark all CRTCs
801 * that shares the same MST stream as mode changed,
802 * intel_modeset_pipe_config()+intel_crtc_check_fastset() will take care to do
803 * a fastset when possible.
804 *
805 * On TGL+ this is required since each stream go through a master transcoder,
806 * so if the master transcoder needs modeset, all other streams in the
807 * topology need a modeset. All platforms need to add the atomic state
808 * for all streams in the topology, since a modeset on one may require
809 * changing the MST link BW usage of the others, which in turn needs a
810 * recomputation of the corresponding CRTC states.
811 */
812static int
813intel_dp_mst_atomic_topology_check(struct intel_connector *connector,
814 struct intel_atomic_state *state)
815{
816 struct drm_i915_private *dev_priv = to_i915(state->base.dev);
817 struct drm_connector_list_iter connector_list_iter;
818 struct intel_connector *connector_iter;
819 int ret = 0;
820
821 if (!intel_connector_needs_modeset(state, &connector->base))
822 return 0;
823
824 drm_connector_list_iter_begin(&dev_priv->drm, &connector_list_iter);
825 for_each_intel_connector_iter(connector_iter, &connector_list_iter) {
826 struct intel_digital_connector_state *conn_iter_state;
827 struct intel_crtc_state *crtc_state;
828 struct intel_crtc *crtc;
829
830 if (connector_iter->mst_port != connector->mst_port ||
831 connector_iter == connector)
832 continue;
833
834 conn_iter_state = intel_atomic_get_digital_connector_state(state,
835 connector_iter);
836 if (IS_ERR(conn_iter_state)) {
837 ret = PTR_ERR(conn_iter_state);
838 break;
839 }
840
841 if (!conn_iter_state->base.crtc)
842 continue;
843
844 crtc = to_intel_crtc(conn_iter_state->base.crtc);
845 crtc_state = intel_atomic_get_crtc_state(&state->base, crtc);
846 if (IS_ERR(crtc_state)) {
847 ret = PTR_ERR(crtc_state);
848 break;
849 }
850
851 ret = drm_atomic_add_affected_planes(&state->base, &crtc->base);
852 if (ret)
853 break;
854 crtc_state->uapi.mode_changed = true;
855 }
856 drm_connector_list_iter_end(&connector_list_iter);
857
858 return ret;
859}
860
861static int
862intel_dp_mst_atomic_check(struct drm_connector *connector,
863 struct drm_atomic_state *_state)
864{
865 struct intel_atomic_state *state = to_intel_atomic_state(_state);
866 struct intel_connector *intel_connector =
867 to_intel_connector(connector);
868 int ret;
869
870 ret = intel_digital_connector_atomic_check(connector, &state->base);
871 if (ret)
872 return ret;
873
874 ret = intel_dp_mst_atomic_topology_check(intel_connector, state);
875 if (ret)
876 return ret;
877
878 return drm_dp_atomic_release_time_slots(&state->base,
879 &intel_connector->mst_port->mst_mgr,
880 intel_connector->port);
881}
882
883static void clear_act_sent(struct intel_encoder *encoder,
884 const struct intel_crtc_state *crtc_state)
885{
886 struct drm_i915_private *i915 = to_i915(encoder->base.dev);
887
888 intel_de_write(i915, dp_tp_status_reg(encoder, crtc_state),
889 DP_TP_STATUS_ACT_SENT);
890}
891
892static void wait_for_act_sent(struct intel_encoder *encoder,
893 const struct intel_crtc_state *crtc_state)
894{
895 struct drm_i915_private *i915 = to_i915(encoder->base.dev);
896 struct intel_dp_mst_encoder *intel_mst = enc_to_mst(encoder);
897 struct intel_dp *intel_dp = &intel_mst->primary->dp;
898
899 if (intel_de_wait_for_set(i915, dp_tp_status_reg(encoder, crtc_state),
900 DP_TP_STATUS_ACT_SENT, 1))
901 drm_err(&i915->drm, "Timed out waiting for ACT sent\n");
902
903 drm_dp_check_act_status(&intel_dp->mst_mgr);
904}
905
906static void intel_mst_disable_dp(struct intel_atomic_state *state,
907 struct intel_encoder *encoder,
908 const struct intel_crtc_state *old_crtc_state,
909 const struct drm_connector_state *old_conn_state)
910{
911 struct intel_dp_mst_encoder *intel_mst = enc_to_mst(encoder);
912 struct intel_digital_port *dig_port = intel_mst->primary;
913 struct intel_dp *intel_dp = &dig_port->dp;
914 struct intel_connector *connector =
915 to_intel_connector(old_conn_state->connector);
916 struct drm_i915_private *i915 = to_i915(connector->base.dev);
917
918 drm_dbg_kms(&i915->drm, "active links %d\n",
919 intel_dp->active_mst_links);
920
921 intel_hdcp_disable(intel_mst->connector);
922
923 intel_dp_sink_disable_decompression(state, connector, old_crtc_state);
924}
925
926static void intel_mst_post_disable_dp(struct intel_atomic_state *state,
927 struct intel_encoder *encoder,
928 const struct intel_crtc_state *old_crtc_state,
929 const struct drm_connector_state *old_conn_state)
930{
931 struct intel_dp_mst_encoder *intel_mst = enc_to_mst(encoder);
932 struct intel_digital_port *dig_port = intel_mst->primary;
933 struct intel_dp *intel_dp = &dig_port->dp;
934 struct intel_connector *connector =
935 to_intel_connector(old_conn_state->connector);
936 struct drm_dp_mst_topology_state *old_mst_state =
937 drm_atomic_get_old_mst_topology_state(&state->base, &intel_dp->mst_mgr);
938 struct drm_dp_mst_topology_state *new_mst_state =
939 drm_atomic_get_new_mst_topology_state(&state->base, &intel_dp->mst_mgr);
940 const struct drm_dp_mst_atomic_payload *old_payload =
941 drm_atomic_get_mst_payload_state(old_mst_state, connector->port);
942 struct drm_dp_mst_atomic_payload *new_payload =
943 drm_atomic_get_mst_payload_state(new_mst_state, connector->port);
944 struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
945 bool last_mst_stream;
946
947 intel_dp->active_mst_links--;
948 last_mst_stream = intel_dp->active_mst_links == 0;
949 drm_WARN_ON(&dev_priv->drm,
950 DISPLAY_VER(dev_priv) >= 12 && last_mst_stream &&
951 !intel_dp_mst_is_master_trans(old_crtc_state));
952
953 intel_crtc_vblank_off(old_crtc_state);
954
955 intel_disable_transcoder(old_crtc_state);
956
957 drm_dp_remove_payload_part1(&intel_dp->mst_mgr, new_mst_state, new_payload);
958
959 clear_act_sent(encoder, old_crtc_state);
960
961 intel_de_rmw(dev_priv, TRANS_DDI_FUNC_CTL(old_crtc_state->cpu_transcoder),
962 TRANS_DDI_DP_VC_PAYLOAD_ALLOC, 0);
963
964 wait_for_act_sent(encoder, old_crtc_state);
965
966 drm_dp_remove_payload_part2(&intel_dp->mst_mgr, new_mst_state,
967 old_payload, new_payload);
968
969 intel_ddi_disable_transcoder_func(old_crtc_state);
970
971 intel_dsc_disable(old_crtc_state);
972
973 if (DISPLAY_VER(dev_priv) >= 9)
974 skl_scaler_disable(old_crtc_state);
975 else
976 ilk_pfit_disable(old_crtc_state);
977
978 /*
979 * Power down mst path before disabling the port, otherwise we end
980 * up getting interrupts from the sink upon detecting link loss.
981 */
982 drm_dp_send_power_updown_phy(&intel_dp->mst_mgr, connector->port,
983 false);
984
985 /*
986 * BSpec 4287: disable DIP after the transcoder is disabled and before
987 * the transcoder clock select is set to none.
988 */
989 intel_dp_set_infoframes(&dig_port->base, false,
990 old_crtc_state, NULL);
991 /*
992 * From TGL spec: "If multi-stream slave transcoder: Configure
993 * Transcoder Clock Select to direct no clock to the transcoder"
994 *
995 * From older GENs spec: "Configure Transcoder Clock Select to direct
996 * no clock to the transcoder"
997 */
998 if (DISPLAY_VER(dev_priv) < 12 || !last_mst_stream)
999 intel_ddi_disable_transcoder_clock(old_crtc_state);
1000
1001
1002 intel_mst->connector = NULL;
1003 if (last_mst_stream)
1004 dig_port->base.post_disable(state, &dig_port->base,
1005 old_crtc_state, NULL);
1006
1007 drm_dbg_kms(&dev_priv->drm, "active links %d\n",
1008 intel_dp->active_mst_links);
1009}
1010
1011static void intel_mst_post_pll_disable_dp(struct intel_atomic_state *state,
1012 struct intel_encoder *encoder,
1013 const struct intel_crtc_state *old_crtc_state,
1014 const struct drm_connector_state *old_conn_state)
1015{
1016 struct intel_dp_mst_encoder *intel_mst = enc_to_mst(encoder);
1017 struct intel_digital_port *dig_port = intel_mst->primary;
1018 struct intel_dp *intel_dp = &dig_port->dp;
1019
1020 if (intel_dp->active_mst_links == 0 &&
1021 dig_port->base.post_pll_disable)
1022 dig_port->base.post_pll_disable(state, encoder, old_crtc_state, old_conn_state);
1023}
1024
1025static void intel_mst_pre_pll_enable_dp(struct intel_atomic_state *state,
1026 struct intel_encoder *encoder,
1027 const struct intel_crtc_state *pipe_config,
1028 const struct drm_connector_state *conn_state)
1029{
1030 struct intel_dp_mst_encoder *intel_mst = enc_to_mst(encoder);
1031 struct intel_digital_port *dig_port = intel_mst->primary;
1032 struct intel_dp *intel_dp = &dig_port->dp;
1033
1034 if (intel_dp->active_mst_links == 0)
1035 dig_port->base.pre_pll_enable(state, &dig_port->base,
1036 pipe_config, NULL);
1037 else
1038 /*
1039 * The port PLL state needs to get updated for secondary
1040 * streams as for the primary stream.
1041 */
1042 intel_ddi_update_active_dpll(state, &dig_port->base,
1043 to_intel_crtc(pipe_config->uapi.crtc));
1044}
1045
1046static void intel_mst_pre_enable_dp(struct intel_atomic_state *state,
1047 struct intel_encoder *encoder,
1048 const struct intel_crtc_state *pipe_config,
1049 const struct drm_connector_state *conn_state)
1050{
1051 struct intel_dp_mst_encoder *intel_mst = enc_to_mst(encoder);
1052 struct intel_digital_port *dig_port = intel_mst->primary;
1053 struct intel_dp *intel_dp = &dig_port->dp;
1054 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
1055 struct intel_connector *connector =
1056 to_intel_connector(conn_state->connector);
1057 struct drm_dp_mst_topology_state *mst_state =
1058 drm_atomic_get_new_mst_topology_state(&state->base, &intel_dp->mst_mgr);
1059 int ret;
1060 bool first_mst_stream;
1061
1062 /* MST encoders are bound to a crtc, not to a connector,
1063 * force the mapping here for get_hw_state.
1064 */
1065 connector->encoder = encoder;
1066 intel_mst->connector = connector;
1067 first_mst_stream = intel_dp->active_mst_links == 0;
1068 drm_WARN_ON(&dev_priv->drm,
1069 DISPLAY_VER(dev_priv) >= 12 && first_mst_stream &&
1070 !intel_dp_mst_is_master_trans(pipe_config));
1071
1072 drm_dbg_kms(&dev_priv->drm, "active links %d\n",
1073 intel_dp->active_mst_links);
1074
1075 if (first_mst_stream)
1076 intel_dp_set_power(intel_dp, DP_SET_POWER_D0);
1077
1078 drm_dp_send_power_updown_phy(&intel_dp->mst_mgr, connector->port, true);
1079
1080 intel_dp_sink_enable_decompression(state, connector, pipe_config);
1081
1082 if (first_mst_stream)
1083 dig_port->base.pre_enable(state, &dig_port->base,
1084 pipe_config, NULL);
1085
1086 intel_dp->active_mst_links++;
1087
1088 ret = drm_dp_add_payload_part1(&intel_dp->mst_mgr, mst_state,
1089 drm_atomic_get_mst_payload_state(mst_state, connector->port));
1090 if (ret < 0)
1091 drm_dbg_kms(&dev_priv->drm, "Failed to create MST payload for %s: %d\n",
1092 connector->base.name, ret);
1093
1094 /*
1095 * Before Gen 12 this is not done as part of
1096 * dig_port->base.pre_enable() and should be done here. For
1097 * Gen 12+ the step in which this should be done is different for the
1098 * first MST stream, so it's done on the DDI for the first stream and
1099 * here for the following ones.
1100 */
1101 if (DISPLAY_VER(dev_priv) < 12 || !first_mst_stream)
1102 intel_ddi_enable_transcoder_clock(encoder, pipe_config);
1103
1104 intel_dsc_dp_pps_write(&dig_port->base, pipe_config);
1105 intel_ddi_set_dp_msa(pipe_config, conn_state);
1106}
1107
1108static void intel_mst_enable_dp(struct intel_atomic_state *state,
1109 struct intel_encoder *encoder,
1110 const struct intel_crtc_state *pipe_config,
1111 const struct drm_connector_state *conn_state)
1112{
1113 struct intel_dp_mst_encoder *intel_mst = enc_to_mst(encoder);
1114 struct intel_digital_port *dig_port = intel_mst->primary;
1115 struct intel_dp *intel_dp = &dig_port->dp;
1116 struct intel_connector *connector = to_intel_connector(conn_state->connector);
1117 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
1118 struct drm_dp_mst_topology_state *mst_state =
1119 drm_atomic_get_new_mst_topology_state(&state->base, &intel_dp->mst_mgr);
1120 enum transcoder trans = pipe_config->cpu_transcoder;
1121 bool first_mst_stream = intel_dp->active_mst_links == 1;
1122
1123 drm_WARN_ON(&dev_priv->drm, pipe_config->has_pch_encoder);
1124
1125 if (intel_dp_is_uhbr(pipe_config)) {
1126 const struct drm_display_mode *adjusted_mode =
1127 &pipe_config->hw.adjusted_mode;
1128 u64 crtc_clock_hz = KHz(adjusted_mode->crtc_clock);
1129
1130 intel_de_write(dev_priv, TRANS_DP2_VFREQHIGH(pipe_config->cpu_transcoder),
1131 TRANS_DP2_VFREQ_PIXEL_CLOCK(crtc_clock_hz >> 24));
1132 intel_de_write(dev_priv, TRANS_DP2_VFREQLOW(pipe_config->cpu_transcoder),
1133 TRANS_DP2_VFREQ_PIXEL_CLOCK(crtc_clock_hz & 0xffffff));
1134 }
1135
1136 intel_ddi_enable_transcoder_func(encoder, pipe_config);
1137
1138 clear_act_sent(encoder, pipe_config);
1139
1140 intel_de_rmw(dev_priv, TRANS_DDI_FUNC_CTL(trans), 0,
1141 TRANS_DDI_DP_VC_PAYLOAD_ALLOC);
1142
1143 drm_dbg_kms(&dev_priv->drm, "active links %d\n",
1144 intel_dp->active_mst_links);
1145
1146 wait_for_act_sent(encoder, pipe_config);
1147
1148 if (first_mst_stream)
1149 intel_ddi_wait_for_fec_status(encoder, pipe_config, true);
1150
1151 drm_dp_add_payload_part2(&intel_dp->mst_mgr, &state->base,
1152 drm_atomic_get_mst_payload_state(mst_state, connector->port));
1153
1154 if (DISPLAY_VER(dev_priv) >= 12)
1155 intel_de_rmw(dev_priv, hsw_chicken_trans_reg(dev_priv, trans),
1156 FECSTALL_DIS_DPTSTREAM_DPTTG,
1157 pipe_config->fec_enable ? FECSTALL_DIS_DPTSTREAM_DPTTG : 0);
1158
1159 intel_audio_sdp_split_update(pipe_config);
1160
1161 intel_enable_transcoder(pipe_config);
1162
1163 intel_crtc_vblank_on(pipe_config);
1164
1165 intel_hdcp_enable(state, encoder, pipe_config, conn_state);
1166}
1167
1168static bool intel_dp_mst_enc_get_hw_state(struct intel_encoder *encoder,
1169 enum pipe *pipe)
1170{
1171 struct intel_dp_mst_encoder *intel_mst = enc_to_mst(encoder);
1172 *pipe = intel_mst->pipe;
1173 if (intel_mst->connector)
1174 return true;
1175 return false;
1176}
1177
1178static void intel_dp_mst_enc_get_config(struct intel_encoder *encoder,
1179 struct intel_crtc_state *pipe_config)
1180{
1181 struct intel_dp_mst_encoder *intel_mst = enc_to_mst(encoder);
1182 struct intel_digital_port *dig_port = intel_mst->primary;
1183
1184 dig_port->base.get_config(&dig_port->base, pipe_config);
1185}
1186
1187static bool intel_dp_mst_initial_fastset_check(struct intel_encoder *encoder,
1188 struct intel_crtc_state *crtc_state)
1189{
1190 struct intel_dp_mst_encoder *intel_mst = enc_to_mst(encoder);
1191 struct intel_digital_port *dig_port = intel_mst->primary;
1192
1193 return intel_dp_initial_fastset_check(&dig_port->base, crtc_state);
1194}
1195
1196static int intel_dp_mst_get_ddc_modes(struct drm_connector *connector)
1197{
1198 struct intel_connector *intel_connector = to_intel_connector(connector);
1199 struct intel_dp *intel_dp = intel_connector->mst_port;
1200 const struct drm_edid *drm_edid;
1201 int ret;
1202
1203 if (drm_connector_is_unregistered(connector))
1204 return intel_connector_update_modes(connector, NULL);
1205
1206 drm_edid = drm_dp_mst_edid_read(connector, &intel_dp->mst_mgr, intel_connector->port);
1207
1208 ret = intel_connector_update_modes(connector, drm_edid);
1209
1210 drm_edid_free(drm_edid);
1211
1212 return ret;
1213}
1214
1215static int
1216intel_dp_mst_connector_late_register(struct drm_connector *connector)
1217{
1218 struct intel_connector *intel_connector = to_intel_connector(connector);
1219 int ret;
1220
1221 ret = drm_dp_mst_connector_late_register(connector,
1222 intel_connector->port);
1223 if (ret < 0)
1224 return ret;
1225
1226 ret = intel_connector_register(connector);
1227 if (ret < 0)
1228 drm_dp_mst_connector_early_unregister(connector,
1229 intel_connector->port);
1230
1231 return ret;
1232}
1233
1234static void
1235intel_dp_mst_connector_early_unregister(struct drm_connector *connector)
1236{
1237 struct intel_connector *intel_connector = to_intel_connector(connector);
1238
1239 intel_connector_unregister(connector);
1240 drm_dp_mst_connector_early_unregister(connector,
1241 intel_connector->port);
1242}
1243
1244static const struct drm_connector_funcs intel_dp_mst_connector_funcs = {
1245 .fill_modes = drm_helper_probe_single_connector_modes,
1246 .atomic_get_property = intel_digital_connector_atomic_get_property,
1247 .atomic_set_property = intel_digital_connector_atomic_set_property,
1248 .late_register = intel_dp_mst_connector_late_register,
1249 .early_unregister = intel_dp_mst_connector_early_unregister,
1250 .destroy = intel_connector_destroy,
1251 .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
1252 .atomic_duplicate_state = intel_digital_connector_duplicate_state,
1253};
1254
1255static int intel_dp_mst_get_modes(struct drm_connector *connector)
1256{
1257 return intel_dp_mst_get_ddc_modes(connector);
1258}
1259
1260static int
1261intel_dp_mst_mode_valid_ctx(struct drm_connector *connector,
1262 struct drm_display_mode *mode,
1263 struct drm_modeset_acquire_ctx *ctx,
1264 enum drm_mode_status *status)
1265{
1266 struct drm_i915_private *dev_priv = to_i915(connector->dev);
1267 struct intel_connector *intel_connector = to_intel_connector(connector);
1268 struct intel_dp *intel_dp = intel_connector->mst_port;
1269 struct drm_dp_mst_topology_mgr *mgr = &intel_dp->mst_mgr;
1270 struct drm_dp_mst_port *port = intel_connector->port;
1271 const int min_bpp = 18;
1272 int max_dotclk = to_i915(connector->dev)->max_dotclk_freq;
1273 int max_rate, mode_rate, max_lanes, max_link_clock;
1274 int ret;
1275 bool dsc = false, bigjoiner = false;
1276 u16 dsc_max_compressed_bpp = 0;
1277 u8 dsc_slice_count = 0;
1278 int target_clock = mode->clock;
1279
1280 if (drm_connector_is_unregistered(connector)) {
1281 *status = MODE_ERROR;
1282 return 0;
1283 }
1284
1285 *status = intel_cpu_transcoder_mode_valid(dev_priv, mode);
1286 if (*status != MODE_OK)
1287 return 0;
1288
1289 if (mode->flags & DRM_MODE_FLAG_DBLSCAN) {
1290 *status = MODE_NO_DBLESCAN;
1291 return 0;
1292 }
1293
1294 max_link_clock = intel_dp_max_link_rate(intel_dp);
1295 max_lanes = intel_dp_max_lane_count(intel_dp);
1296
1297 max_rate = intel_dp_max_data_rate(max_link_clock, max_lanes);
1298 mode_rate = intel_dp_link_required(mode->clock, min_bpp);
1299
1300 ret = drm_modeset_lock(&mgr->base.lock, ctx);
1301 if (ret)
1302 return ret;
1303
1304 /*
1305 * TODO:
1306 * - Also check if compression would allow for the mode
1307 * - Calculate the overhead using drm_dp_bw_overhead() /
1308 * drm_dp_bw_channel_coding_efficiency(), similarly to the
1309 * compute config code, as drm_dp_calc_pbn_mode() doesn't
1310 * account with all the overheads.
1311 * - Check here and during compute config the BW reported by
1312 * DFP_Link_Available_Payload_Bandwidth_Number (or the
1313 * corresponding link capabilities of the sink) in case the
1314 * stream is uncompressed for it by the last branch device.
1315 */
1316 if (mode_rate > max_rate || mode->clock > max_dotclk ||
1317 drm_dp_calc_pbn_mode(mode->clock, min_bpp << 4) > port->full_pbn) {
1318 *status = MODE_CLOCK_HIGH;
1319 return 0;
1320 }
1321
1322 if (mode->clock < 10000) {
1323 *status = MODE_CLOCK_LOW;
1324 return 0;
1325 }
1326
1327 if (mode->flags & DRM_MODE_FLAG_DBLCLK) {
1328 *status = MODE_H_ILLEGAL;
1329 return 0;
1330 }
1331
1332 if (intel_dp_need_bigjoiner(intel_dp, mode->hdisplay, target_clock)) {
1333 bigjoiner = true;
1334 max_dotclk *= 2;
1335
1336 /* TODO: add support for bigjoiner */
1337 *status = MODE_CLOCK_HIGH;
1338 return 0;
1339 }
1340
1341 if (DISPLAY_VER(dev_priv) >= 10 &&
1342 drm_dp_sink_supports_dsc(intel_connector->dp.dsc_dpcd)) {
1343 /*
1344 * TBD pass the connector BPC,
1345 * for now U8_MAX so that max BPC on that platform would be picked
1346 */
1347 int pipe_bpp = intel_dp_dsc_compute_max_bpp(intel_connector, U8_MAX);
1348
1349 if (drm_dp_sink_supports_fec(intel_connector->dp.fec_capability)) {
1350 dsc_max_compressed_bpp =
1351 intel_dp_dsc_get_max_compressed_bpp(dev_priv,
1352 max_link_clock,
1353 max_lanes,
1354 target_clock,
1355 mode->hdisplay,
1356 bigjoiner,
1357 INTEL_OUTPUT_FORMAT_RGB,
1358 pipe_bpp, 64);
1359 dsc_slice_count =
1360 intel_dp_dsc_get_slice_count(intel_connector,
1361 target_clock,
1362 mode->hdisplay,
1363 bigjoiner);
1364 }
1365
1366 dsc = dsc_max_compressed_bpp && dsc_slice_count;
1367 }
1368
1369 /*
1370 * Big joiner configuration needs DSC for TGL which is not true for
1371 * XE_LPD where uncompressed joiner is supported.
1372 */
1373 if (DISPLAY_VER(dev_priv) < 13 && bigjoiner && !dsc) {
1374 *status = MODE_CLOCK_HIGH;
1375 return 0;
1376 }
1377
1378 if (mode_rate > max_rate && !dsc) {
1379 *status = MODE_CLOCK_HIGH;
1380 return 0;
1381 }
1382
1383 *status = intel_mode_valid_max_plane_size(dev_priv, mode, false);
1384 return 0;
1385}
1386
1387static struct drm_encoder *intel_mst_atomic_best_encoder(struct drm_connector *connector,
1388 struct drm_atomic_state *state)
1389{
1390 struct drm_connector_state *connector_state = drm_atomic_get_new_connector_state(state,
1391 connector);
1392 struct intel_connector *intel_connector = to_intel_connector(connector);
1393 struct intel_dp *intel_dp = intel_connector->mst_port;
1394 struct intel_crtc *crtc = to_intel_crtc(connector_state->crtc);
1395
1396 return &intel_dp->mst_encoders[crtc->pipe]->base.base;
1397}
1398
1399static int
1400intel_dp_mst_detect(struct drm_connector *connector,
1401 struct drm_modeset_acquire_ctx *ctx, bool force)
1402{
1403 struct drm_i915_private *i915 = to_i915(connector->dev);
1404 struct intel_connector *intel_connector = to_intel_connector(connector);
1405 struct intel_dp *intel_dp = intel_connector->mst_port;
1406
1407 if (!intel_display_device_enabled(i915))
1408 return connector_status_disconnected;
1409
1410 if (drm_connector_is_unregistered(connector))
1411 return connector_status_disconnected;
1412
1413 return drm_dp_mst_detect_port(connector, ctx, &intel_dp->mst_mgr,
1414 intel_connector->port);
1415}
1416
1417static const struct drm_connector_helper_funcs intel_dp_mst_connector_helper_funcs = {
1418 .get_modes = intel_dp_mst_get_modes,
1419 .mode_valid_ctx = intel_dp_mst_mode_valid_ctx,
1420 .atomic_best_encoder = intel_mst_atomic_best_encoder,
1421 .atomic_check = intel_dp_mst_atomic_check,
1422 .detect_ctx = intel_dp_mst_detect,
1423};
1424
1425static void intel_dp_mst_encoder_destroy(struct drm_encoder *encoder)
1426{
1427 struct intel_dp_mst_encoder *intel_mst = enc_to_mst(to_intel_encoder(encoder));
1428
1429 drm_encoder_cleanup(encoder);
1430 kfree(intel_mst);
1431}
1432
1433static const struct drm_encoder_funcs intel_dp_mst_enc_funcs = {
1434 .destroy = intel_dp_mst_encoder_destroy,
1435};
1436
1437static bool intel_dp_mst_get_hw_state(struct intel_connector *connector)
1438{
1439 if (intel_attached_encoder(connector) && connector->base.state->crtc) {
1440 enum pipe pipe;
1441 if (!intel_attached_encoder(connector)->get_hw_state(intel_attached_encoder(connector), &pipe))
1442 return false;
1443 return true;
1444 }
1445 return false;
1446}
1447
1448static int intel_dp_mst_add_properties(struct intel_dp *intel_dp,
1449 struct drm_connector *connector,
1450 const char *pathprop)
1451{
1452 struct drm_i915_private *i915 = to_i915(connector->dev);
1453
1454 drm_object_attach_property(&connector->base,
1455 i915->drm.mode_config.path_property, 0);
1456 drm_object_attach_property(&connector->base,
1457 i915->drm.mode_config.tile_property, 0);
1458
1459 intel_attach_force_audio_property(connector);
1460 intel_attach_broadcast_rgb_property(connector);
1461
1462 /*
1463 * Reuse the prop from the SST connector because we're
1464 * not allowed to create new props after device registration.
1465 */
1466 connector->max_bpc_property =
1467 intel_dp->attached_connector->base.max_bpc_property;
1468 if (connector->max_bpc_property)
1469 drm_connector_attach_max_bpc_property(connector, 6, 12);
1470
1471 return drm_connector_set_path_property(connector, pathprop);
1472}
1473
1474static void
1475intel_dp_mst_read_decompression_port_dsc_caps(struct intel_dp *intel_dp,
1476 struct intel_connector *connector)
1477{
1478 u8 dpcd_caps[DP_RECEIVER_CAP_SIZE];
1479
1480 if (!connector->dp.dsc_decompression_aux)
1481 return;
1482
1483 if (drm_dp_read_dpcd_caps(connector->dp.dsc_decompression_aux, dpcd_caps) < 0)
1484 return;
1485
1486 intel_dp_get_dsc_sink_cap(dpcd_caps[DP_DPCD_REV], connector);
1487}
1488
1489static bool detect_dsc_hblank_expansion_quirk(const struct intel_connector *connector)
1490{
1491 struct drm_i915_private *i915 = to_i915(connector->base.dev);
1492 struct drm_dp_desc desc;
1493 u8 dpcd[DP_RECEIVER_CAP_SIZE];
1494
1495 if (!connector->dp.dsc_decompression_aux)
1496 return false;
1497
1498 if (drm_dp_read_desc(connector->dp.dsc_decompression_aux,
1499 &desc, true) < 0)
1500 return false;
1501
1502 if (!drm_dp_has_quirk(&desc,
1503 DP_DPCD_QUIRK_HBLANK_EXPANSION_REQUIRES_DSC))
1504 return false;
1505
1506 if (drm_dp_read_dpcd_caps(connector->dp.dsc_decompression_aux, dpcd) < 0)
1507 return false;
1508
1509 if (!(dpcd[DP_RECEIVE_PORT_0_CAP_0] & DP_HBLANK_EXPANSION_CAPABLE))
1510 return false;
1511
1512 drm_dbg_kms(&i915->drm,
1513 "[CONNECTOR:%d:%s] DSC HBLANK expansion quirk detected\n",
1514 connector->base.base.id, connector->base.name);
1515
1516 return true;
1517}
1518
1519static struct drm_connector *intel_dp_add_mst_connector(struct drm_dp_mst_topology_mgr *mgr,
1520 struct drm_dp_mst_port *port,
1521 const char *pathprop)
1522{
1523 struct intel_dp *intel_dp = container_of(mgr, struct intel_dp, mst_mgr);
1524 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
1525 struct drm_device *dev = dig_port->base.base.dev;
1526 struct drm_i915_private *dev_priv = to_i915(dev);
1527 struct intel_connector *intel_connector;
1528 struct drm_connector *connector;
1529 enum pipe pipe;
1530 int ret;
1531
1532 intel_connector = intel_connector_alloc();
1533 if (!intel_connector)
1534 return NULL;
1535
1536 intel_connector->get_hw_state = intel_dp_mst_get_hw_state;
1537 intel_connector->sync_state = intel_dp_connector_sync_state;
1538 intel_connector->mst_port = intel_dp;
1539 intel_connector->port = port;
1540 drm_dp_mst_get_port_malloc(port);
1541
1542 intel_connector->dp.dsc_decompression_aux = drm_dp_mst_dsc_aux_for_port(port);
1543 intel_dp_mst_read_decompression_port_dsc_caps(intel_dp, intel_connector);
1544 intel_connector->dp.dsc_hblank_expansion_quirk =
1545 detect_dsc_hblank_expansion_quirk(intel_connector);
1546
1547 connector = &intel_connector->base;
1548 ret = drm_connector_init(dev, connector, &intel_dp_mst_connector_funcs,
1549 DRM_MODE_CONNECTOR_DisplayPort);
1550 if (ret) {
1551 drm_dp_mst_put_port_malloc(port);
1552 intel_connector_free(intel_connector);
1553 return NULL;
1554 }
1555
1556 drm_connector_helper_add(connector, &intel_dp_mst_connector_helper_funcs);
1557
1558 for_each_pipe(dev_priv, pipe) {
1559 struct drm_encoder *enc =
1560 &intel_dp->mst_encoders[pipe]->base.base;
1561
1562 ret = drm_connector_attach_encoder(&intel_connector->base, enc);
1563 if (ret)
1564 goto err;
1565 }
1566
1567 ret = intel_dp_mst_add_properties(intel_dp, connector, pathprop);
1568 if (ret)
1569 goto err;
1570
1571 ret = intel_dp_hdcp_init(dig_port, intel_connector);
1572 if (ret)
1573 drm_dbg_kms(&dev_priv->drm, "[%s:%d] HDCP MST init failed, skipping.\n",
1574 connector->name, connector->base.id);
1575
1576 return connector;
1577
1578err:
1579 drm_connector_cleanup(connector);
1580 return NULL;
1581}
1582
1583static void
1584intel_dp_mst_poll_hpd_irq(struct drm_dp_mst_topology_mgr *mgr)
1585{
1586 struct intel_dp *intel_dp = container_of(mgr, struct intel_dp, mst_mgr);
1587
1588 intel_hpd_trigger_irq(dp_to_dig_port(intel_dp));
1589}
1590
1591static const struct drm_dp_mst_topology_cbs mst_cbs = {
1592 .add_connector = intel_dp_add_mst_connector,
1593 .poll_hpd_irq = intel_dp_mst_poll_hpd_irq,
1594};
1595
1596static struct intel_dp_mst_encoder *
1597intel_dp_create_fake_mst_encoder(struct intel_digital_port *dig_port, enum pipe pipe)
1598{
1599 struct intel_dp_mst_encoder *intel_mst;
1600 struct intel_encoder *intel_encoder;
1601 struct drm_device *dev = dig_port->base.base.dev;
1602
1603 intel_mst = kzalloc(sizeof(*intel_mst), GFP_KERNEL);
1604
1605 if (!intel_mst)
1606 return NULL;
1607
1608 intel_mst->pipe = pipe;
1609 intel_encoder = &intel_mst->base;
1610 intel_mst->primary = dig_port;
1611
1612 drm_encoder_init(dev, &intel_encoder->base, &intel_dp_mst_enc_funcs,
1613 DRM_MODE_ENCODER_DPMST, "DP-MST %c", pipe_name(pipe));
1614
1615 intel_encoder->type = INTEL_OUTPUT_DP_MST;
1616 intel_encoder->power_domain = dig_port->base.power_domain;
1617 intel_encoder->port = dig_port->base.port;
1618 intel_encoder->cloneable = 0;
1619 /*
1620 * This is wrong, but broken userspace uses the intersection
1621 * of possible_crtcs of all the encoders of a given connector
1622 * to figure out which crtcs can drive said connector. What
1623 * should be used instead is the union of possible_crtcs.
1624 * To keep such userspace functioning we must misconfigure
1625 * this to make sure the intersection is not empty :(
1626 */
1627 intel_encoder->pipe_mask = ~0;
1628
1629 intel_encoder->compute_config = intel_dp_mst_compute_config;
1630 intel_encoder->compute_config_late = intel_dp_mst_compute_config_late;
1631 intel_encoder->disable = intel_mst_disable_dp;
1632 intel_encoder->post_disable = intel_mst_post_disable_dp;
1633 intel_encoder->post_pll_disable = intel_mst_post_pll_disable_dp;
1634 intel_encoder->update_pipe = intel_ddi_update_pipe;
1635 intel_encoder->pre_pll_enable = intel_mst_pre_pll_enable_dp;
1636 intel_encoder->pre_enable = intel_mst_pre_enable_dp;
1637 intel_encoder->enable = intel_mst_enable_dp;
1638 intel_encoder->audio_enable = intel_audio_codec_enable;
1639 intel_encoder->audio_disable = intel_audio_codec_disable;
1640 intel_encoder->get_hw_state = intel_dp_mst_enc_get_hw_state;
1641 intel_encoder->get_config = intel_dp_mst_enc_get_config;
1642 intel_encoder->initial_fastset_check = intel_dp_mst_initial_fastset_check;
1643
1644 return intel_mst;
1645
1646}
1647
1648static bool
1649intel_dp_create_fake_mst_encoders(struct intel_digital_port *dig_port)
1650{
1651 struct intel_dp *intel_dp = &dig_port->dp;
1652 struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev);
1653 enum pipe pipe;
1654
1655 for_each_pipe(dev_priv, pipe)
1656 intel_dp->mst_encoders[pipe] = intel_dp_create_fake_mst_encoder(dig_port, pipe);
1657 return true;
1658}
1659
1660int
1661intel_dp_mst_encoder_active_links(struct intel_digital_port *dig_port)
1662{
1663 return dig_port->dp.active_mst_links;
1664}
1665
1666int
1667intel_dp_mst_encoder_init(struct intel_digital_port *dig_port, int conn_base_id)
1668{
1669 struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
1670 struct intel_dp *intel_dp = &dig_port->dp;
1671 enum port port = dig_port->base.port;
1672 int ret;
1673
1674 if (!HAS_DP_MST(i915) || intel_dp_is_edp(intel_dp))
1675 return 0;
1676
1677 if (DISPLAY_VER(i915) < 12 && port == PORT_A)
1678 return 0;
1679
1680 if (DISPLAY_VER(i915) < 11 && port == PORT_E)
1681 return 0;
1682
1683 intel_dp->mst_mgr.cbs = &mst_cbs;
1684
1685 /* create encoders */
1686 intel_dp_create_fake_mst_encoders(dig_port);
1687 ret = drm_dp_mst_topology_mgr_init(&intel_dp->mst_mgr, &i915->drm,
1688 &intel_dp->aux, 16, 3, conn_base_id);
1689 if (ret) {
1690 intel_dp->mst_mgr.cbs = NULL;
1691 return ret;
1692 }
1693
1694 return 0;
1695}
1696
1697bool intel_dp_mst_source_support(struct intel_dp *intel_dp)
1698{
1699 return intel_dp->mst_mgr.cbs;
1700}
1701
1702void
1703intel_dp_mst_encoder_cleanup(struct intel_digital_port *dig_port)
1704{
1705 struct intel_dp *intel_dp = &dig_port->dp;
1706
1707 if (!intel_dp_mst_source_support(intel_dp))
1708 return;
1709
1710 drm_dp_mst_topology_mgr_destroy(&intel_dp->mst_mgr);
1711 /* encoders will get killed by normal cleanup */
1712
1713 intel_dp->mst_mgr.cbs = NULL;
1714}
1715
1716bool intel_dp_mst_is_master_trans(const struct intel_crtc_state *crtc_state)
1717{
1718 return crtc_state->mst_master_transcoder == crtc_state->cpu_transcoder;
1719}
1720
1721bool intel_dp_mst_is_slave_trans(const struct intel_crtc_state *crtc_state)
1722{
1723 return crtc_state->mst_master_transcoder != INVALID_TRANSCODER &&
1724 crtc_state->mst_master_transcoder != crtc_state->cpu_transcoder;
1725}
1726
1727/**
1728 * intel_dp_mst_add_topology_state_for_connector - add MST topology state for a connector
1729 * @state: atomic state
1730 * @connector: connector to add the state for
1731 * @crtc: the CRTC @connector is attached to
1732 *
1733 * Add the MST topology state for @connector to @state.
1734 *
1735 * Returns 0 on success, negative error code on failure.
1736 */
1737static int
1738intel_dp_mst_add_topology_state_for_connector(struct intel_atomic_state *state,
1739 struct intel_connector *connector,
1740 struct intel_crtc *crtc)
1741{
1742 struct drm_dp_mst_topology_state *mst_state;
1743
1744 if (!connector->mst_port)
1745 return 0;
1746
1747 mst_state = drm_atomic_get_mst_topology_state(&state->base,
1748 &connector->mst_port->mst_mgr);
1749 if (IS_ERR(mst_state))
1750 return PTR_ERR(mst_state);
1751
1752 mst_state->pending_crtc_mask |= drm_crtc_mask(&crtc->base);
1753
1754 return 0;
1755}
1756
1757/**
1758 * intel_dp_mst_add_topology_state_for_crtc - add MST topology state for a CRTC
1759 * @state: atomic state
1760 * @crtc: CRTC to add the state for
1761 *
1762 * Add the MST topology state for @crtc to @state.
1763 *
1764 * Returns 0 on success, negative error code on failure.
1765 */
1766int intel_dp_mst_add_topology_state_for_crtc(struct intel_atomic_state *state,
1767 struct intel_crtc *crtc)
1768{
1769 struct drm_connector *_connector;
1770 struct drm_connector_state *conn_state;
1771 int i;
1772
1773 for_each_new_connector_in_state(&state->base, _connector, conn_state, i) {
1774 struct intel_connector *connector = to_intel_connector(_connector);
1775 int ret;
1776
1777 if (conn_state->crtc != &crtc->base)
1778 continue;
1779
1780 ret = intel_dp_mst_add_topology_state_for_connector(state, connector, crtc);
1781 if (ret)
1782 return ret;
1783 }
1784
1785 return 0;
1786}
1787
1788static struct intel_connector *
1789get_connector_in_state_for_crtc(struct intel_atomic_state *state,
1790 const struct intel_crtc *crtc)
1791{
1792 struct drm_connector_state *old_conn_state;
1793 struct drm_connector_state *new_conn_state;
1794 struct drm_connector *_connector;
1795 int i;
1796
1797 for_each_oldnew_connector_in_state(&state->base, _connector,
1798 old_conn_state, new_conn_state, i) {
1799 struct intel_connector *connector =
1800 to_intel_connector(_connector);
1801
1802 if (old_conn_state->crtc == &crtc->base ||
1803 new_conn_state->crtc == &crtc->base)
1804 return connector;
1805 }
1806
1807 return NULL;
1808}
1809
1810/**
1811 * intel_dp_mst_crtc_needs_modeset - check if changes in topology need to modeset the given CRTC
1812 * @state: atomic state
1813 * @crtc: CRTC for which to check the modeset requirement
1814 *
1815 * Check if any change in a MST topology requires a forced modeset on @crtc in
1816 * this topology. One such change is enabling/disabling the DSC decompression
1817 * state in the first branch device's UFP DPCD as required by one CRTC, while
1818 * the other @crtc in the same topology is still active, requiring a full modeset
1819 * on @crtc.
1820 */
1821bool intel_dp_mst_crtc_needs_modeset(struct intel_atomic_state *state,
1822 struct intel_crtc *crtc)
1823{
1824 const struct intel_connector *crtc_connector;
1825 const struct drm_connector_state *conn_state;
1826 const struct drm_connector *_connector;
1827 int i;
1828
1829 if (!intel_crtc_has_type(intel_atomic_get_new_crtc_state(state, crtc),
1830 INTEL_OUTPUT_DP_MST))
1831 return false;
1832
1833 crtc_connector = get_connector_in_state_for_crtc(state, crtc);
1834
1835 if (!crtc_connector)
1836 /* None of the connectors in the topology needs modeset */
1837 return false;
1838
1839 for_each_new_connector_in_state(&state->base, _connector, conn_state, i) {
1840 const struct intel_connector *connector =
1841 to_intel_connector(_connector);
1842 const struct intel_crtc_state *new_crtc_state;
1843 const struct intel_crtc_state *old_crtc_state;
1844 struct intel_crtc *crtc_iter;
1845
1846 if (connector->mst_port != crtc_connector->mst_port ||
1847 !conn_state->crtc)
1848 continue;
1849
1850 crtc_iter = to_intel_crtc(conn_state->crtc);
1851
1852 new_crtc_state = intel_atomic_get_new_crtc_state(state, crtc_iter);
1853 old_crtc_state = intel_atomic_get_old_crtc_state(state, crtc_iter);
1854
1855 if (!intel_crtc_needs_modeset(new_crtc_state))
1856 continue;
1857
1858 if (old_crtc_state->dsc.compression_enable ==
1859 new_crtc_state->dsc.compression_enable)
1860 continue;
1861 /*
1862 * Toggling the decompression flag because of this stream in
1863 * the first downstream branch device's UFP DPCD may reset the
1864 * whole branch device. To avoid the reset while other streams
1865 * are also active modeset the whole MST topology in this
1866 * case.
1867 */
1868 if (connector->dp.dsc_decompression_aux ==
1869 &connector->mst_port->aux)
1870 return true;
1871 }
1872
1873 return false;
1874}