Loading...
1// SPDX-License-Identifier: MIT
2/*
3 * Copyright © 2019 Intel Corporation
4 */
5
6#include "i915_drv.h"
7#include "intel_display.h"
8#include "intel_display_types.h"
9#include "intel_dp_mst.h"
10#include "intel_tc.h"
11
12static const char *tc_port_mode_name(enum tc_port_mode mode)
13{
14 static const char * const names[] = {
15 [TC_PORT_TBT_ALT] = "tbt-alt",
16 [TC_PORT_DP_ALT] = "dp-alt",
17 [TC_PORT_LEGACY] = "legacy",
18 };
19
20 if (WARN_ON(mode >= ARRAY_SIZE(names)))
21 mode = TC_PORT_TBT_ALT;
22
23 return names[mode];
24}
25
26static enum intel_display_power_domain
27tc_cold_get_power_domain(struct intel_digital_port *dig_port)
28{
29 if (intel_tc_cold_requires_aux_pw(dig_port))
30 return intel_legacy_aux_to_power_domain(dig_port->aux_ch);
31 else
32 return POWER_DOMAIN_TC_COLD_OFF;
33}
34
35static intel_wakeref_t
36tc_cold_block(struct intel_digital_port *dig_port)
37{
38 struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
39 enum intel_display_power_domain domain;
40
41 if (DISPLAY_VER(i915) == 11 && !dig_port->tc_legacy_port)
42 return 0;
43
44 domain = tc_cold_get_power_domain(dig_port);
45 return intel_display_power_get(i915, domain);
46}
47
48static void
49tc_cold_unblock(struct intel_digital_port *dig_port, intel_wakeref_t wakeref)
50{
51 struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
52 enum intel_display_power_domain domain;
53
54 /*
55 * wakeref == -1, means some error happened saving save_depot_stack but
56 * power should still be put down and 0 is a invalid save_depot_stack
57 * id so can be used to skip it for non TC legacy ports.
58 */
59 if (wakeref == 0)
60 return;
61
62 domain = tc_cold_get_power_domain(dig_port);
63 intel_display_power_put_async(i915, domain, wakeref);
64}
65
66static void
67assert_tc_cold_blocked(struct intel_digital_port *dig_port)
68{
69 struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
70 bool enabled;
71
72 if (DISPLAY_VER(i915) == 11 && !dig_port->tc_legacy_port)
73 return;
74
75 enabled = intel_display_power_is_enabled(i915,
76 tc_cold_get_power_domain(dig_port));
77 drm_WARN_ON(&i915->drm, !enabled);
78}
79
80u32 intel_tc_port_get_lane_mask(struct intel_digital_port *dig_port)
81{
82 struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
83 struct intel_uncore *uncore = &i915->uncore;
84 u32 lane_mask;
85
86 lane_mask = intel_uncore_read(uncore,
87 PORT_TX_DFLEXDPSP(dig_port->tc_phy_fia));
88
89 drm_WARN_ON(&i915->drm, lane_mask == 0xffffffff);
90 assert_tc_cold_blocked(dig_port);
91
92 lane_mask &= DP_LANE_ASSIGNMENT_MASK(dig_port->tc_phy_fia_idx);
93 return lane_mask >> DP_LANE_ASSIGNMENT_SHIFT(dig_port->tc_phy_fia_idx);
94}
95
96u32 intel_tc_port_get_pin_assignment_mask(struct intel_digital_port *dig_port)
97{
98 struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
99 struct intel_uncore *uncore = &i915->uncore;
100 u32 pin_mask;
101
102 pin_mask = intel_uncore_read(uncore,
103 PORT_TX_DFLEXPA1(dig_port->tc_phy_fia));
104
105 drm_WARN_ON(&i915->drm, pin_mask == 0xffffffff);
106 assert_tc_cold_blocked(dig_port);
107
108 return (pin_mask & DP_PIN_ASSIGNMENT_MASK(dig_port->tc_phy_fia_idx)) >>
109 DP_PIN_ASSIGNMENT_SHIFT(dig_port->tc_phy_fia_idx);
110}
111
112int intel_tc_port_fia_max_lane_count(struct intel_digital_port *dig_port)
113{
114 struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
115 intel_wakeref_t wakeref;
116 u32 lane_mask;
117
118 if (dig_port->tc_mode != TC_PORT_DP_ALT)
119 return 4;
120
121 assert_tc_cold_blocked(dig_port);
122
123 lane_mask = 0;
124 with_intel_display_power(i915, POWER_DOMAIN_DISPLAY_CORE, wakeref)
125 lane_mask = intel_tc_port_get_lane_mask(dig_port);
126
127 switch (lane_mask) {
128 default:
129 MISSING_CASE(lane_mask);
130 fallthrough;
131 case 0x1:
132 case 0x2:
133 case 0x4:
134 case 0x8:
135 return 1;
136 case 0x3:
137 case 0xc:
138 return 2;
139 case 0xf:
140 return 4;
141 }
142}
143
144void intel_tc_port_set_fia_lane_count(struct intel_digital_port *dig_port,
145 int required_lanes)
146{
147 struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
148 bool lane_reversal = dig_port->saved_port_bits & DDI_BUF_PORT_REVERSAL;
149 struct intel_uncore *uncore = &i915->uncore;
150 u32 val;
151
152 drm_WARN_ON(&i915->drm,
153 lane_reversal && dig_port->tc_mode != TC_PORT_LEGACY);
154
155 assert_tc_cold_blocked(dig_port);
156
157 val = intel_uncore_read(uncore,
158 PORT_TX_DFLEXDPMLE1(dig_port->tc_phy_fia));
159 val &= ~DFLEXDPMLE1_DPMLETC_MASK(dig_port->tc_phy_fia_idx);
160
161 switch (required_lanes) {
162 case 1:
163 val |= lane_reversal ?
164 DFLEXDPMLE1_DPMLETC_ML3(dig_port->tc_phy_fia_idx) :
165 DFLEXDPMLE1_DPMLETC_ML0(dig_port->tc_phy_fia_idx);
166 break;
167 case 2:
168 val |= lane_reversal ?
169 DFLEXDPMLE1_DPMLETC_ML3_2(dig_port->tc_phy_fia_idx) :
170 DFLEXDPMLE1_DPMLETC_ML1_0(dig_port->tc_phy_fia_idx);
171 break;
172 case 4:
173 val |= DFLEXDPMLE1_DPMLETC_ML3_0(dig_port->tc_phy_fia_idx);
174 break;
175 default:
176 MISSING_CASE(required_lanes);
177 }
178
179 intel_uncore_write(uncore,
180 PORT_TX_DFLEXDPMLE1(dig_port->tc_phy_fia), val);
181}
182
183static void tc_port_fixup_legacy_flag(struct intel_digital_port *dig_port,
184 u32 live_status_mask)
185{
186 struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
187 u32 valid_hpd_mask;
188
189 if (dig_port->tc_legacy_port)
190 valid_hpd_mask = BIT(TC_PORT_LEGACY);
191 else
192 valid_hpd_mask = BIT(TC_PORT_DP_ALT) |
193 BIT(TC_PORT_TBT_ALT);
194
195 if (!(live_status_mask & ~valid_hpd_mask))
196 return;
197
198 /* If live status mismatches the VBT flag, trust the live status. */
199 drm_dbg_kms(&i915->drm,
200 "Port %s: live status %08x mismatches the legacy port flag %08x, fixing flag\n",
201 dig_port->tc_port_name, live_status_mask, valid_hpd_mask);
202
203 dig_port->tc_legacy_port = !dig_port->tc_legacy_port;
204}
205
206static u32 icl_tc_port_live_status_mask(struct intel_digital_port *dig_port)
207{
208 struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
209 struct intel_uncore *uncore = &i915->uncore;
210 u32 isr_bit = i915->hotplug.pch_hpd[dig_port->base.hpd_pin];
211 u32 mask = 0;
212 u32 val;
213
214 val = intel_uncore_read(uncore,
215 PORT_TX_DFLEXDPSP(dig_port->tc_phy_fia));
216
217 if (val == 0xffffffff) {
218 drm_dbg_kms(&i915->drm,
219 "Port %s: PHY in TCCOLD, nothing connected\n",
220 dig_port->tc_port_name);
221 return mask;
222 }
223
224 if (val & TC_LIVE_STATE_TBT(dig_port->tc_phy_fia_idx))
225 mask |= BIT(TC_PORT_TBT_ALT);
226 if (val & TC_LIVE_STATE_TC(dig_port->tc_phy_fia_idx))
227 mask |= BIT(TC_PORT_DP_ALT);
228
229 if (intel_uncore_read(uncore, SDEISR) & isr_bit)
230 mask |= BIT(TC_PORT_LEGACY);
231
232 /* The sink can be connected only in a single mode. */
233 if (!drm_WARN_ON_ONCE(&i915->drm, hweight32(mask) > 1))
234 tc_port_fixup_legacy_flag(dig_port, mask);
235
236 return mask;
237}
238
239static u32 adl_tc_port_live_status_mask(struct intel_digital_port *dig_port)
240{
241 struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
242 enum tc_port tc_port = intel_port_to_tc(i915, dig_port->base.port);
243 u32 isr_bit = i915->hotplug.pch_hpd[dig_port->base.hpd_pin];
244 struct intel_uncore *uncore = &i915->uncore;
245 u32 val, mask = 0;
246
247 val = intel_uncore_read(uncore, TCSS_DDI_STATUS(tc_port));
248 if (val & TCSS_DDI_STATUS_HPD_LIVE_STATUS_ALT)
249 mask |= BIT(TC_PORT_DP_ALT);
250 if (val & TCSS_DDI_STATUS_HPD_LIVE_STATUS_TBT)
251 mask |= BIT(TC_PORT_TBT_ALT);
252
253 if (intel_uncore_read(uncore, SDEISR) & isr_bit)
254 mask |= BIT(TC_PORT_LEGACY);
255
256 /* The sink can be connected only in a single mode. */
257 if (!drm_WARN_ON(&i915->drm, hweight32(mask) > 1))
258 tc_port_fixup_legacy_flag(dig_port, mask);
259
260 return mask;
261}
262
263static u32 tc_port_live_status_mask(struct intel_digital_port *dig_port)
264{
265 struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
266
267 if (IS_ALDERLAKE_P(i915))
268 return adl_tc_port_live_status_mask(dig_port);
269
270 return icl_tc_port_live_status_mask(dig_port);
271}
272
273static bool icl_tc_phy_status_complete(struct intel_digital_port *dig_port)
274{
275 struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
276 struct intel_uncore *uncore = &i915->uncore;
277 u32 val;
278
279 val = intel_uncore_read(uncore,
280 PORT_TX_DFLEXDPPMS(dig_port->tc_phy_fia));
281 if (val == 0xffffffff) {
282 drm_dbg_kms(&i915->drm,
283 "Port %s: PHY in TCCOLD, assuming not complete\n",
284 dig_port->tc_port_name);
285 return false;
286 }
287
288 return val & DP_PHY_MODE_STATUS_COMPLETED(dig_port->tc_phy_fia_idx);
289}
290
291static bool adl_tc_phy_status_complete(struct intel_digital_port *dig_port)
292{
293 struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
294 struct intel_uncore *uncore = &i915->uncore;
295 u32 val;
296
297 val = intel_uncore_read(uncore, TCSS_DDI_STATUS(dig_port->tc_phy_fia_idx));
298 if (val == 0xffffffff) {
299 drm_dbg_kms(&i915->drm,
300 "Port %s: PHY in TCCOLD, assuming not complete\n",
301 dig_port->tc_port_name);
302 return false;
303 }
304
305 return val & TCSS_DDI_STATUS_READY;
306}
307
308static bool tc_phy_status_complete(struct intel_digital_port *dig_port)
309{
310 struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
311
312 if (IS_ALDERLAKE_P(i915))
313 return adl_tc_phy_status_complete(dig_port);
314
315 return icl_tc_phy_status_complete(dig_port);
316}
317
318static bool icl_tc_phy_take_ownership(struct intel_digital_port *dig_port,
319 bool take)
320{
321 struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
322 struct intel_uncore *uncore = &i915->uncore;
323 u32 val;
324
325 val = intel_uncore_read(uncore,
326 PORT_TX_DFLEXDPCSSS(dig_port->tc_phy_fia));
327 if (val == 0xffffffff) {
328 drm_dbg_kms(&i915->drm,
329 "Port %s: PHY in TCCOLD, can't %s ownership\n",
330 dig_port->tc_port_name, take ? "take" : "release");
331
332 return false;
333 }
334
335 val &= ~DP_PHY_MODE_STATUS_NOT_SAFE(dig_port->tc_phy_fia_idx);
336 if (take)
337 val |= DP_PHY_MODE_STATUS_NOT_SAFE(dig_port->tc_phy_fia_idx);
338
339 intel_uncore_write(uncore,
340 PORT_TX_DFLEXDPCSSS(dig_port->tc_phy_fia), val);
341
342 if (!take && wait_for(!tc_phy_status_complete(dig_port), 10))
343 drm_dbg_kms(&i915->drm,
344 "Port %s: PHY complete clear timed out\n",
345 dig_port->tc_port_name);
346
347 return true;
348}
349
350static bool adl_tc_phy_take_ownership(struct intel_digital_port *dig_port,
351 bool take)
352{
353 struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
354 struct intel_uncore *uncore = &i915->uncore;
355 enum port port = dig_port->base.port;
356 u32 val;
357
358 val = intel_uncore_read(uncore, DDI_BUF_CTL(port));
359 if (take)
360 val |= DDI_BUF_CTL_TC_PHY_OWNERSHIP;
361 else
362 val &= ~DDI_BUF_CTL_TC_PHY_OWNERSHIP;
363 intel_uncore_write(uncore, DDI_BUF_CTL(port), val);
364
365 return true;
366}
367
368static bool tc_phy_take_ownership(struct intel_digital_port *dig_port, bool take)
369{
370 struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
371
372 if (IS_ALDERLAKE_P(i915))
373 return adl_tc_phy_take_ownership(dig_port, take);
374
375 return icl_tc_phy_take_ownership(dig_port, take);
376}
377
378static bool icl_tc_phy_is_owned(struct intel_digital_port *dig_port)
379{
380 struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
381 struct intel_uncore *uncore = &i915->uncore;
382 u32 val;
383
384 val = intel_uncore_read(uncore,
385 PORT_TX_DFLEXDPCSSS(dig_port->tc_phy_fia));
386 if (val == 0xffffffff) {
387 drm_dbg_kms(&i915->drm,
388 "Port %s: PHY in TCCOLD, assume safe mode\n",
389 dig_port->tc_port_name);
390 return true;
391 }
392
393 return val & DP_PHY_MODE_STATUS_NOT_SAFE(dig_port->tc_phy_fia_idx);
394}
395
396static bool adl_tc_phy_is_owned(struct intel_digital_port *dig_port)
397{
398 struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
399 struct intel_uncore *uncore = &i915->uncore;
400 enum port port = dig_port->base.port;
401 u32 val;
402
403 val = intel_uncore_read(uncore, DDI_BUF_CTL(port));
404 return val & DDI_BUF_CTL_TC_PHY_OWNERSHIP;
405}
406
407static bool tc_phy_is_owned(struct intel_digital_port *dig_port)
408{
409 struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
410
411 if (IS_ALDERLAKE_P(i915))
412 return adl_tc_phy_is_owned(dig_port);
413
414 return icl_tc_phy_is_owned(dig_port);
415}
416
417/*
418 * This function implements the first part of the Connect Flow described by our
419 * specification, Gen11 TypeC Programming chapter. The rest of the flow (reading
420 * lanes, EDID, etc) is done as needed in the typical places.
421 *
422 * Unlike the other ports, type-C ports are not available to use as soon as we
423 * get a hotplug. The type-C PHYs can be shared between multiple controllers:
424 * display, USB, etc. As a result, handshaking through FIA is required around
425 * connect and disconnect to cleanly transfer ownership with the controller and
426 * set the type-C power state.
427 */
428static void icl_tc_phy_connect(struct intel_digital_port *dig_port,
429 int required_lanes)
430{
431 struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
432 int max_lanes;
433
434 if (!tc_phy_status_complete(dig_port)) {
435 drm_dbg_kms(&i915->drm, "Port %s: PHY not ready\n",
436 dig_port->tc_port_name);
437 goto out_set_tbt_alt_mode;
438 }
439
440 if (!tc_phy_take_ownership(dig_port, true) &&
441 !drm_WARN_ON(&i915->drm, dig_port->tc_legacy_port))
442 goto out_set_tbt_alt_mode;
443
444 max_lanes = intel_tc_port_fia_max_lane_count(dig_port);
445 if (dig_port->tc_legacy_port) {
446 drm_WARN_ON(&i915->drm, max_lanes != 4);
447 dig_port->tc_mode = TC_PORT_LEGACY;
448
449 return;
450 }
451
452 /*
453 * Now we have to re-check the live state, in case the port recently
454 * became disconnected. Not necessary for legacy mode.
455 */
456 if (!(tc_port_live_status_mask(dig_port) & BIT(TC_PORT_DP_ALT))) {
457 drm_dbg_kms(&i915->drm, "Port %s: PHY sudden disconnect\n",
458 dig_port->tc_port_name);
459 goto out_release_phy;
460 }
461
462 if (max_lanes < required_lanes) {
463 drm_dbg_kms(&i915->drm,
464 "Port %s: PHY max lanes %d < required lanes %d\n",
465 dig_port->tc_port_name,
466 max_lanes, required_lanes);
467 goto out_release_phy;
468 }
469
470 dig_port->tc_mode = TC_PORT_DP_ALT;
471
472 return;
473
474out_release_phy:
475 tc_phy_take_ownership(dig_port, false);
476out_set_tbt_alt_mode:
477 dig_port->tc_mode = TC_PORT_TBT_ALT;
478}
479
480/*
481 * See the comment at the connect function. This implements the Disconnect
482 * Flow.
483 */
484static void icl_tc_phy_disconnect(struct intel_digital_port *dig_port)
485{
486 switch (dig_port->tc_mode) {
487 case TC_PORT_LEGACY:
488 /* Nothing to do, we never disconnect from legacy mode */
489 break;
490 case TC_PORT_DP_ALT:
491 tc_phy_take_ownership(dig_port, false);
492 dig_port->tc_mode = TC_PORT_TBT_ALT;
493 break;
494 case TC_PORT_TBT_ALT:
495 /* Nothing to do, we stay in TBT-alt mode */
496 break;
497 default:
498 MISSING_CASE(dig_port->tc_mode);
499 }
500}
501
502static bool icl_tc_phy_is_connected(struct intel_digital_port *dig_port)
503{
504 struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
505
506 if (!tc_phy_status_complete(dig_port)) {
507 drm_dbg_kms(&i915->drm, "Port %s: PHY status not complete\n",
508 dig_port->tc_port_name);
509 return dig_port->tc_mode == TC_PORT_TBT_ALT;
510 }
511
512 if (!tc_phy_is_owned(dig_port)) {
513 drm_dbg_kms(&i915->drm, "Port %s: PHY not owned\n",
514 dig_port->tc_port_name);
515
516 return false;
517 }
518
519 return dig_port->tc_mode == TC_PORT_DP_ALT ||
520 dig_port->tc_mode == TC_PORT_LEGACY;
521}
522
523static enum tc_port_mode
524intel_tc_port_get_current_mode(struct intel_digital_port *dig_port)
525{
526 struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
527 u32 live_status_mask = tc_port_live_status_mask(dig_port);
528 enum tc_port_mode mode;
529
530 if (!tc_phy_is_owned(dig_port) ||
531 drm_WARN_ON(&i915->drm, !tc_phy_status_complete(dig_port)))
532 return TC_PORT_TBT_ALT;
533
534 mode = dig_port->tc_legacy_port ? TC_PORT_LEGACY : TC_PORT_DP_ALT;
535 if (live_status_mask) {
536 enum tc_port_mode live_mode = fls(live_status_mask) - 1;
537
538 if (!drm_WARN_ON(&i915->drm, live_mode == TC_PORT_TBT_ALT))
539 mode = live_mode;
540 }
541
542 return mode;
543}
544
545static enum tc_port_mode
546intel_tc_port_get_target_mode(struct intel_digital_port *dig_port)
547{
548 u32 live_status_mask = tc_port_live_status_mask(dig_port);
549
550 if (live_status_mask)
551 return fls(live_status_mask) - 1;
552
553 return tc_phy_status_complete(dig_port) &&
554 dig_port->tc_legacy_port ? TC_PORT_LEGACY :
555 TC_PORT_TBT_ALT;
556}
557
558static void intel_tc_port_reset_mode(struct intel_digital_port *dig_port,
559 int required_lanes)
560{
561 struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
562 enum tc_port_mode old_tc_mode = dig_port->tc_mode;
563
564 intel_display_power_flush_work(i915);
565 if (!intel_tc_cold_requires_aux_pw(dig_port)) {
566 enum intel_display_power_domain aux_domain;
567 bool aux_powered;
568
569 aux_domain = intel_aux_power_domain(dig_port);
570 aux_powered = intel_display_power_is_enabled(i915, aux_domain);
571 drm_WARN_ON(&i915->drm, aux_powered);
572 }
573
574 icl_tc_phy_disconnect(dig_port);
575 icl_tc_phy_connect(dig_port, required_lanes);
576
577 drm_dbg_kms(&i915->drm, "Port %s: TC port mode reset (%s -> %s)\n",
578 dig_port->tc_port_name,
579 tc_port_mode_name(old_tc_mode),
580 tc_port_mode_name(dig_port->tc_mode));
581}
582
583static void
584intel_tc_port_link_init_refcount(struct intel_digital_port *dig_port,
585 int refcount)
586{
587 struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
588
589 drm_WARN_ON(&i915->drm, dig_port->tc_link_refcount);
590 dig_port->tc_link_refcount = refcount;
591}
592
593void intel_tc_port_sanitize(struct intel_digital_port *dig_port)
594{
595 struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
596 struct intel_encoder *encoder = &dig_port->base;
597 intel_wakeref_t tc_cold_wref;
598 int active_links = 0;
599
600 mutex_lock(&dig_port->tc_lock);
601 tc_cold_wref = tc_cold_block(dig_port);
602
603 dig_port->tc_mode = intel_tc_port_get_current_mode(dig_port);
604 if (dig_port->dp.is_mst)
605 active_links = intel_dp_mst_encoder_active_links(dig_port);
606 else if (encoder->base.crtc)
607 active_links = to_intel_crtc(encoder->base.crtc)->active;
608
609 if (active_links) {
610 if (!icl_tc_phy_is_connected(dig_port))
611 drm_dbg_kms(&i915->drm,
612 "Port %s: PHY disconnected with %d active link(s)\n",
613 dig_port->tc_port_name, active_links);
614 intel_tc_port_link_init_refcount(dig_port, active_links);
615
616 goto out;
617 }
618
619 if (dig_port->tc_legacy_port)
620 icl_tc_phy_connect(dig_port, 1);
621
622out:
623 drm_dbg_kms(&i915->drm, "Port %s: sanitize mode (%s)\n",
624 dig_port->tc_port_name,
625 tc_port_mode_name(dig_port->tc_mode));
626
627 tc_cold_unblock(dig_port, tc_cold_wref);
628 mutex_unlock(&dig_port->tc_lock);
629}
630
631static bool intel_tc_port_needs_reset(struct intel_digital_port *dig_port)
632{
633 return intel_tc_port_get_target_mode(dig_port) != dig_port->tc_mode;
634}
635
636/*
637 * The type-C ports are different because even when they are connected, they may
638 * not be available/usable by the graphics driver: see the comment on
639 * icl_tc_phy_connect(). So in our driver instead of adding the additional
640 * concept of "usable" and make everything check for "connected and usable" we
641 * define a port as "connected" when it is not only connected, but also when it
642 * is usable by the rest of the driver. That maintains the old assumption that
643 * connected ports are usable, and avoids exposing to the users objects they
644 * can't really use.
645 */
646bool intel_tc_port_connected(struct intel_encoder *encoder)
647{
648 struct intel_digital_port *dig_port = enc_to_dig_port(encoder);
649 bool is_connected;
650 intel_wakeref_t tc_cold_wref;
651
652 intel_tc_port_lock(dig_port);
653 tc_cold_wref = tc_cold_block(dig_port);
654
655 is_connected = tc_port_live_status_mask(dig_port) &
656 BIT(dig_port->tc_mode);
657
658 tc_cold_unblock(dig_port, tc_cold_wref);
659 intel_tc_port_unlock(dig_port);
660
661 return is_connected;
662}
663
664static void __intel_tc_port_lock(struct intel_digital_port *dig_port,
665 int required_lanes)
666{
667 struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
668 intel_wakeref_t wakeref;
669
670 wakeref = intel_display_power_get(i915, POWER_DOMAIN_DISPLAY_CORE);
671
672 mutex_lock(&dig_port->tc_lock);
673
674 if (!dig_port->tc_link_refcount) {
675 intel_wakeref_t tc_cold_wref;
676
677 tc_cold_wref = tc_cold_block(dig_port);
678
679 if (intel_tc_port_needs_reset(dig_port))
680 intel_tc_port_reset_mode(dig_port, required_lanes);
681
682 tc_cold_unblock(dig_port, tc_cold_wref);
683 }
684
685 drm_WARN_ON(&i915->drm, dig_port->tc_lock_wakeref);
686 dig_port->tc_lock_wakeref = wakeref;
687}
688
689void intel_tc_port_lock(struct intel_digital_port *dig_port)
690{
691 __intel_tc_port_lock(dig_port, 1);
692}
693
694void intel_tc_port_unlock(struct intel_digital_port *dig_port)
695{
696 struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
697 intel_wakeref_t wakeref = fetch_and_zero(&dig_port->tc_lock_wakeref);
698
699 mutex_unlock(&dig_port->tc_lock);
700
701 intel_display_power_put_async(i915, POWER_DOMAIN_DISPLAY_CORE,
702 wakeref);
703}
704
705bool intel_tc_port_ref_held(struct intel_digital_port *dig_port)
706{
707 return mutex_is_locked(&dig_port->tc_lock) ||
708 dig_port->tc_link_refcount;
709}
710
711void intel_tc_port_get_link(struct intel_digital_port *dig_port,
712 int required_lanes)
713{
714 __intel_tc_port_lock(dig_port, required_lanes);
715 dig_port->tc_link_refcount++;
716 intel_tc_port_unlock(dig_port);
717}
718
719void intel_tc_port_put_link(struct intel_digital_port *dig_port)
720{
721 mutex_lock(&dig_port->tc_lock);
722 dig_port->tc_link_refcount--;
723 mutex_unlock(&dig_port->tc_lock);
724}
725
726static bool
727tc_has_modular_fia(struct drm_i915_private *i915, struct intel_digital_port *dig_port)
728{
729 intel_wakeref_t wakeref;
730 u32 val;
731
732 if (!INTEL_INFO(i915)->display.has_modular_fia)
733 return false;
734
735 mutex_lock(&dig_port->tc_lock);
736 wakeref = tc_cold_block(dig_port);
737 val = intel_uncore_read(&i915->uncore, PORT_TX_DFLEXDPSP(FIA1));
738 tc_cold_unblock(dig_port, wakeref);
739 mutex_unlock(&dig_port->tc_lock);
740
741 drm_WARN_ON(&i915->drm, val == 0xffffffff);
742
743 return val & MODULAR_FIA_MASK;
744}
745
746static void
747tc_port_load_fia_params(struct drm_i915_private *i915, struct intel_digital_port *dig_port)
748{
749 enum port port = dig_port->base.port;
750 enum tc_port tc_port = intel_port_to_tc(i915, port);
751
752 /*
753 * Each Modular FIA instance houses 2 TC ports. In SOC that has more
754 * than two TC ports, there are multiple instances of Modular FIA.
755 */
756 if (tc_has_modular_fia(i915, dig_port)) {
757 dig_port->tc_phy_fia = tc_port / 2;
758 dig_port->tc_phy_fia_idx = tc_port % 2;
759 } else {
760 dig_port->tc_phy_fia = FIA1;
761 dig_port->tc_phy_fia_idx = tc_port;
762 }
763}
764
765void intel_tc_port_init(struct intel_digital_port *dig_port, bool is_legacy)
766{
767 struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
768 enum port port = dig_port->base.port;
769 enum tc_port tc_port = intel_port_to_tc(i915, port);
770
771 if (drm_WARN_ON(&i915->drm, tc_port == TC_PORT_NONE))
772 return;
773
774 snprintf(dig_port->tc_port_name, sizeof(dig_port->tc_port_name),
775 "%c/TC#%d", port_name(port), tc_port + 1);
776
777 mutex_init(&dig_port->tc_lock);
778 dig_port->tc_legacy_port = is_legacy;
779 dig_port->tc_link_refcount = 0;
780 tc_port_load_fia_params(i915, dig_port);
781}
782
783bool intel_tc_cold_requires_aux_pw(struct intel_digital_port *dig_port)
784{
785 struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
786
787 return (DISPLAY_VER(i915) == 11 && dig_port->tc_legacy_port) ||
788 IS_ALDERLAKE_P(i915);
789}
1// SPDX-License-Identifier: MIT
2/*
3 * Copyright © 2019 Intel Corporation
4 */
5
6#include "i915_drv.h"
7#include "i915_reg.h"
8#include "intel_atomic.h"
9#include "intel_cx0_phy_regs.h"
10#include "intel_ddi.h"
11#include "intel_de.h"
12#include "intel_display.h"
13#include "intel_display_driver.h"
14#include "intel_display_power_map.h"
15#include "intel_display_types.h"
16#include "intel_dkl_phy_regs.h"
17#include "intel_dp.h"
18#include "intel_dp_mst.h"
19#include "intel_mg_phy_regs.h"
20#include "intel_modeset_lock.h"
21#include "intel_tc.h"
22
23#define DP_PIN_ASSIGNMENT_C 0x3
24#define DP_PIN_ASSIGNMENT_D 0x4
25#define DP_PIN_ASSIGNMENT_E 0x5
26
27enum tc_port_mode {
28 TC_PORT_DISCONNECTED,
29 TC_PORT_TBT_ALT,
30 TC_PORT_DP_ALT,
31 TC_PORT_LEGACY,
32};
33
34struct intel_tc_port;
35
36struct intel_tc_phy_ops {
37 enum intel_display_power_domain (*cold_off_domain)(struct intel_tc_port *tc);
38 u32 (*hpd_live_status)(struct intel_tc_port *tc);
39 bool (*is_ready)(struct intel_tc_port *tc);
40 bool (*is_owned)(struct intel_tc_port *tc);
41 void (*get_hw_state)(struct intel_tc_port *tc);
42 bool (*connect)(struct intel_tc_port *tc, int required_lanes);
43 void (*disconnect)(struct intel_tc_port *tc);
44 void (*init)(struct intel_tc_port *tc);
45};
46
47struct intel_tc_port {
48 struct intel_digital_port *dig_port;
49
50 const struct intel_tc_phy_ops *phy_ops;
51
52 struct mutex lock; /* protects the TypeC port mode */
53 intel_wakeref_t lock_wakeref;
54#if IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM)
55 enum intel_display_power_domain lock_power_domain;
56#endif
57 struct delayed_work disconnect_phy_work;
58 struct delayed_work link_reset_work;
59 int link_refcount;
60 bool legacy_port:1;
61 const char *port_name;
62 enum tc_port_mode mode;
63 enum tc_port_mode init_mode;
64 enum phy_fia phy_fia;
65 u8 phy_fia_idx;
66};
67
68static enum intel_display_power_domain
69tc_phy_cold_off_domain(struct intel_tc_port *);
70static u32 tc_phy_hpd_live_status(struct intel_tc_port *tc);
71static bool tc_phy_is_ready(struct intel_tc_port *tc);
72static bool tc_phy_wait_for_ready(struct intel_tc_port *tc);
73static enum tc_port_mode tc_phy_get_current_mode(struct intel_tc_port *tc);
74
75static const char *tc_port_mode_name(enum tc_port_mode mode)
76{
77 static const char * const names[] = {
78 [TC_PORT_DISCONNECTED] = "disconnected",
79 [TC_PORT_TBT_ALT] = "tbt-alt",
80 [TC_PORT_DP_ALT] = "dp-alt",
81 [TC_PORT_LEGACY] = "legacy",
82 };
83
84 if (WARN_ON(mode >= ARRAY_SIZE(names)))
85 mode = TC_PORT_DISCONNECTED;
86
87 return names[mode];
88}
89
90static struct intel_tc_port *to_tc_port(struct intel_digital_port *dig_port)
91{
92 return dig_port->tc;
93}
94
95static struct drm_i915_private *tc_to_i915(struct intel_tc_port *tc)
96{
97 return to_i915(tc->dig_port->base.base.dev);
98}
99
100static bool intel_tc_port_in_mode(struct intel_digital_port *dig_port,
101 enum tc_port_mode mode)
102{
103 struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
104 enum phy phy = intel_port_to_phy(i915, dig_port->base.port);
105 struct intel_tc_port *tc = to_tc_port(dig_port);
106
107 return intel_phy_is_tc(i915, phy) && tc->mode == mode;
108}
109
110bool intel_tc_port_in_tbt_alt_mode(struct intel_digital_port *dig_port)
111{
112 return intel_tc_port_in_mode(dig_port, TC_PORT_TBT_ALT);
113}
114
115bool intel_tc_port_in_dp_alt_mode(struct intel_digital_port *dig_port)
116{
117 return intel_tc_port_in_mode(dig_port, TC_PORT_DP_ALT);
118}
119
120bool intel_tc_port_in_legacy_mode(struct intel_digital_port *dig_port)
121{
122 return intel_tc_port_in_mode(dig_port, TC_PORT_LEGACY);
123}
124
125/*
126 * The display power domains used for TC ports depending on the
127 * platform and TC mode (legacy, DP-alt, TBT):
128 *
129 * POWER_DOMAIN_DISPLAY_CORE:
130 * --------------------------
131 * ADLP/all modes:
132 * - TCSS/IOM access for PHY ready state.
133 * ADLP+/all modes:
134 * - DE/north-,south-HPD ISR access for HPD live state.
135 *
136 * POWER_DOMAIN_PORT_DDI_LANES_<port>:
137 * -----------------------------------
138 * ICL+/all modes:
139 * - DE/DDI_BUF access for port enabled state.
140 * ADLP/all modes:
141 * - DE/DDI_BUF access for PHY owned state.
142 *
143 * POWER_DOMAIN_AUX_USBC<TC port index>:
144 * -------------------------------------
145 * ICL/legacy mode:
146 * - TCSS/IOM,FIA access for PHY ready, owned and HPD live state
147 * - TCSS/PHY: block TC-cold power state for using the PHY AUX and
148 * main lanes.
149 * ADLP/legacy, DP-alt modes:
150 * - TCSS/PHY: block TC-cold power state for using the PHY AUX and
151 * main lanes.
152 *
153 * POWER_DOMAIN_TC_COLD_OFF:
154 * -------------------------
155 * ICL/DP-alt, TBT mode:
156 * - TCSS/TBT: block TC-cold power state for using the (direct or
157 * TBT DP-IN) AUX and main lanes.
158 *
159 * TGL/all modes:
160 * - TCSS/IOM,FIA access for PHY ready, owned and HPD live state
161 * - TCSS/PHY: block TC-cold power state for using the (direct or
162 * TBT DP-IN) AUX and main lanes.
163 *
164 * ADLP/TBT mode:
165 * - TCSS/TBT: block TC-cold power state for using the (TBT DP-IN)
166 * AUX and main lanes.
167 *
168 * XELPDP+/all modes:
169 * - TCSS/IOM,FIA access for PHY ready, owned state
170 * - TCSS/PHY: block TC-cold power state for using the (direct or
171 * TBT DP-IN) AUX and main lanes.
172 */
173bool intel_tc_cold_requires_aux_pw(struct intel_digital_port *dig_port)
174{
175 struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
176 struct intel_tc_port *tc = to_tc_port(dig_port);
177
178 return tc_phy_cold_off_domain(tc) ==
179 intel_display_power_legacy_aux_domain(i915, dig_port->aux_ch);
180}
181
182static intel_wakeref_t
183__tc_cold_block(struct intel_tc_port *tc, enum intel_display_power_domain *domain)
184{
185 struct drm_i915_private *i915 = tc_to_i915(tc);
186
187 *domain = tc_phy_cold_off_domain(tc);
188
189 return intel_display_power_get(i915, *domain);
190}
191
192static intel_wakeref_t
193tc_cold_block(struct intel_tc_port *tc)
194{
195 enum intel_display_power_domain domain;
196 intel_wakeref_t wakeref;
197
198 wakeref = __tc_cold_block(tc, &domain);
199#if IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM)
200 tc->lock_power_domain = domain;
201#endif
202 return wakeref;
203}
204
205static void
206__tc_cold_unblock(struct intel_tc_port *tc, enum intel_display_power_domain domain,
207 intel_wakeref_t wakeref)
208{
209 struct drm_i915_private *i915 = tc_to_i915(tc);
210
211 intel_display_power_put(i915, domain, wakeref);
212}
213
214static void
215tc_cold_unblock(struct intel_tc_port *tc, intel_wakeref_t wakeref)
216{
217 enum intel_display_power_domain domain = tc_phy_cold_off_domain(tc);
218
219#if IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM)
220 drm_WARN_ON(&tc_to_i915(tc)->drm, tc->lock_power_domain != domain);
221#endif
222 __tc_cold_unblock(tc, domain, wakeref);
223}
224
225static void
226assert_display_core_power_enabled(struct intel_tc_port *tc)
227{
228 struct drm_i915_private *i915 = tc_to_i915(tc);
229
230 drm_WARN_ON(&i915->drm,
231 !intel_display_power_is_enabled(i915, POWER_DOMAIN_DISPLAY_CORE));
232}
233
234static void
235assert_tc_cold_blocked(struct intel_tc_port *tc)
236{
237 struct drm_i915_private *i915 = tc_to_i915(tc);
238 bool enabled;
239
240 enabled = intel_display_power_is_enabled(i915,
241 tc_phy_cold_off_domain(tc));
242 drm_WARN_ON(&i915->drm, !enabled);
243}
244
245static enum intel_display_power_domain
246tc_port_power_domain(struct intel_tc_port *tc)
247{
248 struct drm_i915_private *i915 = tc_to_i915(tc);
249 enum tc_port tc_port = intel_port_to_tc(i915, tc->dig_port->base.port);
250
251 return POWER_DOMAIN_PORT_DDI_LANES_TC1 + tc_port - TC_PORT_1;
252}
253
254static void
255assert_tc_port_power_enabled(struct intel_tc_port *tc)
256{
257 struct drm_i915_private *i915 = tc_to_i915(tc);
258
259 drm_WARN_ON(&i915->drm,
260 !intel_display_power_is_enabled(i915, tc_port_power_domain(tc)));
261}
262
263static u32 intel_tc_port_get_lane_mask(struct intel_digital_port *dig_port)
264{
265 struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
266 struct intel_tc_port *tc = to_tc_port(dig_port);
267 u32 lane_mask;
268
269 lane_mask = intel_de_read(i915, PORT_TX_DFLEXDPSP(tc->phy_fia));
270
271 drm_WARN_ON(&i915->drm, lane_mask == 0xffffffff);
272 assert_tc_cold_blocked(tc);
273
274 lane_mask &= DP_LANE_ASSIGNMENT_MASK(tc->phy_fia_idx);
275 return lane_mask >> DP_LANE_ASSIGNMENT_SHIFT(tc->phy_fia_idx);
276}
277
278u32 intel_tc_port_get_pin_assignment_mask(struct intel_digital_port *dig_port)
279{
280 struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
281 struct intel_tc_port *tc = to_tc_port(dig_port);
282 u32 pin_mask;
283
284 pin_mask = intel_de_read(i915, PORT_TX_DFLEXPA1(tc->phy_fia));
285
286 drm_WARN_ON(&i915->drm, pin_mask == 0xffffffff);
287 assert_tc_cold_blocked(tc);
288
289 return (pin_mask & DP_PIN_ASSIGNMENT_MASK(tc->phy_fia_idx)) >>
290 DP_PIN_ASSIGNMENT_SHIFT(tc->phy_fia_idx);
291}
292
293static int lnl_tc_port_get_max_lane_count(struct intel_digital_port *dig_port)
294{
295 struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
296 enum tc_port tc_port = intel_port_to_tc(i915, dig_port->base.port);
297 intel_wakeref_t wakeref;
298 u32 val, pin_assignment;
299
300 with_intel_display_power(i915, POWER_DOMAIN_DISPLAY_CORE, wakeref)
301 val = intel_de_read(i915, TCSS_DDI_STATUS(tc_port));
302
303 pin_assignment =
304 REG_FIELD_GET(TCSS_DDI_STATUS_PIN_ASSIGNMENT_MASK, val);
305
306 switch (pin_assignment) {
307 default:
308 MISSING_CASE(pin_assignment);
309 fallthrough;
310 case DP_PIN_ASSIGNMENT_D:
311 return 2;
312 case DP_PIN_ASSIGNMENT_C:
313 case DP_PIN_ASSIGNMENT_E:
314 return 4;
315 }
316}
317
318static int mtl_tc_port_get_max_lane_count(struct intel_digital_port *dig_port)
319{
320 struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
321 intel_wakeref_t wakeref;
322 u32 pin_mask;
323
324 with_intel_display_power(i915, POWER_DOMAIN_DISPLAY_CORE, wakeref)
325 pin_mask = intel_tc_port_get_pin_assignment_mask(dig_port);
326
327 switch (pin_mask) {
328 default:
329 MISSING_CASE(pin_mask);
330 fallthrough;
331 case DP_PIN_ASSIGNMENT_D:
332 return 2;
333 case DP_PIN_ASSIGNMENT_C:
334 case DP_PIN_ASSIGNMENT_E:
335 return 4;
336 }
337}
338
339static int intel_tc_port_get_max_lane_count(struct intel_digital_port *dig_port)
340{
341 struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
342 intel_wakeref_t wakeref;
343 u32 lane_mask = 0;
344
345 with_intel_display_power(i915, POWER_DOMAIN_DISPLAY_CORE, wakeref)
346 lane_mask = intel_tc_port_get_lane_mask(dig_port);
347
348 switch (lane_mask) {
349 default:
350 MISSING_CASE(lane_mask);
351 fallthrough;
352 case 0x1:
353 case 0x2:
354 case 0x4:
355 case 0x8:
356 return 1;
357 case 0x3:
358 case 0xc:
359 return 2;
360 case 0xf:
361 return 4;
362 }
363}
364
365int intel_tc_port_max_lane_count(struct intel_digital_port *dig_port)
366{
367 struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
368 struct intel_tc_port *tc = to_tc_port(dig_port);
369 enum phy phy = intel_port_to_phy(i915, dig_port->base.port);
370
371 if (!intel_phy_is_tc(i915, phy) || tc->mode != TC_PORT_DP_ALT)
372 return 4;
373
374 assert_tc_cold_blocked(tc);
375
376 if (DISPLAY_VER(i915) >= 20)
377 return lnl_tc_port_get_max_lane_count(dig_port);
378
379 if (DISPLAY_VER(i915) >= 14)
380 return mtl_tc_port_get_max_lane_count(dig_port);
381
382 return intel_tc_port_get_max_lane_count(dig_port);
383}
384
385void intel_tc_port_set_fia_lane_count(struct intel_digital_port *dig_port,
386 int required_lanes)
387{
388 struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
389 struct intel_tc_port *tc = to_tc_port(dig_port);
390 bool lane_reversal = dig_port->saved_port_bits & DDI_BUF_PORT_REVERSAL;
391 u32 val;
392
393 drm_WARN_ON(&i915->drm,
394 lane_reversal && tc->mode != TC_PORT_LEGACY);
395
396 assert_tc_cold_blocked(tc);
397
398 val = intel_de_read(i915, PORT_TX_DFLEXDPMLE1(tc->phy_fia));
399 val &= ~DFLEXDPMLE1_DPMLETC_MASK(tc->phy_fia_idx);
400
401 switch (required_lanes) {
402 case 1:
403 val |= lane_reversal ?
404 DFLEXDPMLE1_DPMLETC_ML3(tc->phy_fia_idx) :
405 DFLEXDPMLE1_DPMLETC_ML0(tc->phy_fia_idx);
406 break;
407 case 2:
408 val |= lane_reversal ?
409 DFLEXDPMLE1_DPMLETC_ML3_2(tc->phy_fia_idx) :
410 DFLEXDPMLE1_DPMLETC_ML1_0(tc->phy_fia_idx);
411 break;
412 case 4:
413 val |= DFLEXDPMLE1_DPMLETC_ML3_0(tc->phy_fia_idx);
414 break;
415 default:
416 MISSING_CASE(required_lanes);
417 }
418
419 intel_de_write(i915, PORT_TX_DFLEXDPMLE1(tc->phy_fia), val);
420}
421
422static void tc_port_fixup_legacy_flag(struct intel_tc_port *tc,
423 u32 live_status_mask)
424{
425 struct drm_i915_private *i915 = tc_to_i915(tc);
426 u32 valid_hpd_mask;
427
428 drm_WARN_ON(&i915->drm, tc->mode != TC_PORT_DISCONNECTED);
429
430 if (hweight32(live_status_mask) != 1)
431 return;
432
433 if (tc->legacy_port)
434 valid_hpd_mask = BIT(TC_PORT_LEGACY);
435 else
436 valid_hpd_mask = BIT(TC_PORT_DP_ALT) |
437 BIT(TC_PORT_TBT_ALT);
438
439 if (!(live_status_mask & ~valid_hpd_mask))
440 return;
441
442 /* If live status mismatches the VBT flag, trust the live status. */
443 drm_dbg_kms(&i915->drm,
444 "Port %s: live status %08x mismatches the legacy port flag %08x, fixing flag\n",
445 tc->port_name, live_status_mask, valid_hpd_mask);
446
447 tc->legacy_port = !tc->legacy_port;
448}
449
450static void tc_phy_load_fia_params(struct intel_tc_port *tc, bool modular_fia)
451{
452 struct drm_i915_private *i915 = tc_to_i915(tc);
453 enum port port = tc->dig_port->base.port;
454 enum tc_port tc_port = intel_port_to_tc(i915, port);
455
456 /*
457 * Each Modular FIA instance houses 2 TC ports. In SOC that has more
458 * than two TC ports, there are multiple instances of Modular FIA.
459 */
460 if (modular_fia) {
461 tc->phy_fia = tc_port / 2;
462 tc->phy_fia_idx = tc_port % 2;
463 } else {
464 tc->phy_fia = FIA1;
465 tc->phy_fia_idx = tc_port;
466 }
467}
468
469/*
470 * ICL TC PHY handlers
471 * -------------------
472 */
473static enum intel_display_power_domain
474icl_tc_phy_cold_off_domain(struct intel_tc_port *tc)
475{
476 struct drm_i915_private *i915 = tc_to_i915(tc);
477 struct intel_digital_port *dig_port = tc->dig_port;
478
479 if (tc->legacy_port)
480 return intel_display_power_legacy_aux_domain(i915, dig_port->aux_ch);
481
482 return POWER_DOMAIN_TC_COLD_OFF;
483}
484
485static u32 icl_tc_phy_hpd_live_status(struct intel_tc_port *tc)
486{
487 struct drm_i915_private *i915 = tc_to_i915(tc);
488 struct intel_digital_port *dig_port = tc->dig_port;
489 u32 isr_bit = i915->display.hotplug.pch_hpd[dig_port->base.hpd_pin];
490 intel_wakeref_t wakeref;
491 u32 fia_isr;
492 u32 pch_isr;
493 u32 mask = 0;
494
495 with_intel_display_power(i915, tc_phy_cold_off_domain(tc), wakeref) {
496 fia_isr = intel_de_read(i915, PORT_TX_DFLEXDPSP(tc->phy_fia));
497 pch_isr = intel_de_read(i915, SDEISR);
498 }
499
500 if (fia_isr == 0xffffffff) {
501 drm_dbg_kms(&i915->drm,
502 "Port %s: PHY in TCCOLD, nothing connected\n",
503 tc->port_name);
504 return mask;
505 }
506
507 if (fia_isr & TC_LIVE_STATE_TBT(tc->phy_fia_idx))
508 mask |= BIT(TC_PORT_TBT_ALT);
509 if (fia_isr & TC_LIVE_STATE_TC(tc->phy_fia_idx))
510 mask |= BIT(TC_PORT_DP_ALT);
511
512 if (pch_isr & isr_bit)
513 mask |= BIT(TC_PORT_LEGACY);
514
515 return mask;
516}
517
518/*
519 * Return the PHY status complete flag indicating that display can acquire the
520 * PHY ownership. The IOM firmware sets this flag when a DP-alt or legacy sink
521 * is connected and it's ready to switch the ownership to display. The flag
522 * will be left cleared when a TBT-alt sink is connected, where the PHY is
523 * owned by the TBT subsystem and so switching the ownership to display is not
524 * required.
525 */
526static bool icl_tc_phy_is_ready(struct intel_tc_port *tc)
527{
528 struct drm_i915_private *i915 = tc_to_i915(tc);
529 u32 val;
530
531 assert_tc_cold_blocked(tc);
532
533 val = intel_de_read(i915, PORT_TX_DFLEXDPPMS(tc->phy_fia));
534 if (val == 0xffffffff) {
535 drm_dbg_kms(&i915->drm,
536 "Port %s: PHY in TCCOLD, assuming not ready\n",
537 tc->port_name);
538 return false;
539 }
540
541 return val & DP_PHY_MODE_STATUS_COMPLETED(tc->phy_fia_idx);
542}
543
544static bool icl_tc_phy_take_ownership(struct intel_tc_port *tc,
545 bool take)
546{
547 struct drm_i915_private *i915 = tc_to_i915(tc);
548 u32 val;
549
550 assert_tc_cold_blocked(tc);
551
552 val = intel_de_read(i915, PORT_TX_DFLEXDPCSSS(tc->phy_fia));
553 if (val == 0xffffffff) {
554 drm_dbg_kms(&i915->drm,
555 "Port %s: PHY in TCCOLD, can't %s ownership\n",
556 tc->port_name, take ? "take" : "release");
557
558 return false;
559 }
560
561 val &= ~DP_PHY_MODE_STATUS_NOT_SAFE(tc->phy_fia_idx);
562 if (take)
563 val |= DP_PHY_MODE_STATUS_NOT_SAFE(tc->phy_fia_idx);
564
565 intel_de_write(i915, PORT_TX_DFLEXDPCSSS(tc->phy_fia), val);
566
567 return true;
568}
569
570static bool icl_tc_phy_is_owned(struct intel_tc_port *tc)
571{
572 struct drm_i915_private *i915 = tc_to_i915(tc);
573 u32 val;
574
575 assert_tc_cold_blocked(tc);
576
577 val = intel_de_read(i915, PORT_TX_DFLEXDPCSSS(tc->phy_fia));
578 if (val == 0xffffffff) {
579 drm_dbg_kms(&i915->drm,
580 "Port %s: PHY in TCCOLD, assume not owned\n",
581 tc->port_name);
582 return false;
583 }
584
585 return val & DP_PHY_MODE_STATUS_NOT_SAFE(tc->phy_fia_idx);
586}
587
588static void icl_tc_phy_get_hw_state(struct intel_tc_port *tc)
589{
590 enum intel_display_power_domain domain;
591 intel_wakeref_t tc_cold_wref;
592
593 tc_cold_wref = __tc_cold_block(tc, &domain);
594
595 tc->mode = tc_phy_get_current_mode(tc);
596 if (tc->mode != TC_PORT_DISCONNECTED)
597 tc->lock_wakeref = tc_cold_block(tc);
598
599 __tc_cold_unblock(tc, domain, tc_cold_wref);
600}
601
602/*
603 * This function implements the first part of the Connect Flow described by our
604 * specification, Gen11 TypeC Programming chapter. The rest of the flow (reading
605 * lanes, EDID, etc) is done as needed in the typical places.
606 *
607 * Unlike the other ports, type-C ports are not available to use as soon as we
608 * get a hotplug. The type-C PHYs can be shared between multiple controllers:
609 * display, USB, etc. As a result, handshaking through FIA is required around
610 * connect and disconnect to cleanly transfer ownership with the controller and
611 * set the type-C power state.
612 */
613static bool tc_phy_verify_legacy_or_dp_alt_mode(struct intel_tc_port *tc,
614 int required_lanes)
615{
616 struct drm_i915_private *i915 = tc_to_i915(tc);
617 struct intel_digital_port *dig_port = tc->dig_port;
618 int max_lanes;
619
620 max_lanes = intel_tc_port_max_lane_count(dig_port);
621 if (tc->mode == TC_PORT_LEGACY) {
622 drm_WARN_ON(&i915->drm, max_lanes != 4);
623 return true;
624 }
625
626 drm_WARN_ON(&i915->drm, tc->mode != TC_PORT_DP_ALT);
627
628 /*
629 * Now we have to re-check the live state, in case the port recently
630 * became disconnected. Not necessary for legacy mode.
631 */
632 if (!(tc_phy_hpd_live_status(tc) & BIT(TC_PORT_DP_ALT))) {
633 drm_dbg_kms(&i915->drm, "Port %s: PHY sudden disconnect\n",
634 tc->port_name);
635 return false;
636 }
637
638 if (max_lanes < required_lanes) {
639 drm_dbg_kms(&i915->drm,
640 "Port %s: PHY max lanes %d < required lanes %d\n",
641 tc->port_name,
642 max_lanes, required_lanes);
643 return false;
644 }
645
646 return true;
647}
648
649static bool icl_tc_phy_connect(struct intel_tc_port *tc,
650 int required_lanes)
651{
652 struct drm_i915_private *i915 = tc_to_i915(tc);
653
654 tc->lock_wakeref = tc_cold_block(tc);
655
656 if (tc->mode == TC_PORT_TBT_ALT)
657 return true;
658
659 if ((!tc_phy_is_ready(tc) ||
660 !icl_tc_phy_take_ownership(tc, true)) &&
661 !drm_WARN_ON(&i915->drm, tc->mode == TC_PORT_LEGACY)) {
662 drm_dbg_kms(&i915->drm, "Port %s: can't take PHY ownership (ready %s)\n",
663 tc->port_name,
664 str_yes_no(tc_phy_is_ready(tc)));
665 goto out_unblock_tc_cold;
666 }
667
668
669 if (!tc_phy_verify_legacy_or_dp_alt_mode(tc, required_lanes))
670 goto out_release_phy;
671
672 return true;
673
674out_release_phy:
675 icl_tc_phy_take_ownership(tc, false);
676out_unblock_tc_cold:
677 tc_cold_unblock(tc, fetch_and_zero(&tc->lock_wakeref));
678
679 return false;
680}
681
682/*
683 * See the comment at the connect function. This implements the Disconnect
684 * Flow.
685 */
686static void icl_tc_phy_disconnect(struct intel_tc_port *tc)
687{
688 switch (tc->mode) {
689 case TC_PORT_LEGACY:
690 case TC_PORT_DP_ALT:
691 icl_tc_phy_take_ownership(tc, false);
692 fallthrough;
693 case TC_PORT_TBT_ALT:
694 tc_cold_unblock(tc, fetch_and_zero(&tc->lock_wakeref));
695 break;
696 default:
697 MISSING_CASE(tc->mode);
698 }
699}
700
701static void icl_tc_phy_init(struct intel_tc_port *tc)
702{
703 tc_phy_load_fia_params(tc, false);
704}
705
706static const struct intel_tc_phy_ops icl_tc_phy_ops = {
707 .cold_off_domain = icl_tc_phy_cold_off_domain,
708 .hpd_live_status = icl_tc_phy_hpd_live_status,
709 .is_ready = icl_tc_phy_is_ready,
710 .is_owned = icl_tc_phy_is_owned,
711 .get_hw_state = icl_tc_phy_get_hw_state,
712 .connect = icl_tc_phy_connect,
713 .disconnect = icl_tc_phy_disconnect,
714 .init = icl_tc_phy_init,
715};
716
717/*
718 * TGL TC PHY handlers
719 * -------------------
720 */
721static enum intel_display_power_domain
722tgl_tc_phy_cold_off_domain(struct intel_tc_port *tc)
723{
724 return POWER_DOMAIN_TC_COLD_OFF;
725}
726
727static void tgl_tc_phy_init(struct intel_tc_port *tc)
728{
729 struct drm_i915_private *i915 = tc_to_i915(tc);
730 intel_wakeref_t wakeref;
731 u32 val;
732
733 with_intel_display_power(i915, tc_phy_cold_off_domain(tc), wakeref)
734 val = intel_de_read(i915, PORT_TX_DFLEXDPSP(FIA1));
735
736 drm_WARN_ON(&i915->drm, val == 0xffffffff);
737
738 tc_phy_load_fia_params(tc, val & MODULAR_FIA_MASK);
739}
740
741static const struct intel_tc_phy_ops tgl_tc_phy_ops = {
742 .cold_off_domain = tgl_tc_phy_cold_off_domain,
743 .hpd_live_status = icl_tc_phy_hpd_live_status,
744 .is_ready = icl_tc_phy_is_ready,
745 .is_owned = icl_tc_phy_is_owned,
746 .get_hw_state = icl_tc_phy_get_hw_state,
747 .connect = icl_tc_phy_connect,
748 .disconnect = icl_tc_phy_disconnect,
749 .init = tgl_tc_phy_init,
750};
751
752/*
753 * ADLP TC PHY handlers
754 * --------------------
755 */
756static enum intel_display_power_domain
757adlp_tc_phy_cold_off_domain(struct intel_tc_port *tc)
758{
759 struct drm_i915_private *i915 = tc_to_i915(tc);
760 struct intel_digital_port *dig_port = tc->dig_port;
761
762 if (tc->mode != TC_PORT_TBT_ALT)
763 return intel_display_power_legacy_aux_domain(i915, dig_port->aux_ch);
764
765 return POWER_DOMAIN_TC_COLD_OFF;
766}
767
768static u32 adlp_tc_phy_hpd_live_status(struct intel_tc_port *tc)
769{
770 struct drm_i915_private *i915 = tc_to_i915(tc);
771 struct intel_digital_port *dig_port = tc->dig_port;
772 enum hpd_pin hpd_pin = dig_port->base.hpd_pin;
773 u32 cpu_isr_bits = i915->display.hotplug.hpd[hpd_pin];
774 u32 pch_isr_bit = i915->display.hotplug.pch_hpd[hpd_pin];
775 intel_wakeref_t wakeref;
776 u32 cpu_isr;
777 u32 pch_isr;
778 u32 mask = 0;
779
780 with_intel_display_power(i915, POWER_DOMAIN_DISPLAY_CORE, wakeref) {
781 cpu_isr = intel_de_read(i915, GEN11_DE_HPD_ISR);
782 pch_isr = intel_de_read(i915, SDEISR);
783 }
784
785 if (cpu_isr & (cpu_isr_bits & GEN11_DE_TC_HOTPLUG_MASK))
786 mask |= BIT(TC_PORT_DP_ALT);
787 if (cpu_isr & (cpu_isr_bits & GEN11_DE_TBT_HOTPLUG_MASK))
788 mask |= BIT(TC_PORT_TBT_ALT);
789
790 if (pch_isr & pch_isr_bit)
791 mask |= BIT(TC_PORT_LEGACY);
792
793 return mask;
794}
795
796/*
797 * Return the PHY status complete flag indicating that display can acquire the
798 * PHY ownership. The IOM firmware sets this flag when it's ready to switch
799 * the ownership to display, regardless of what sink is connected (TBT-alt,
800 * DP-alt, legacy or nothing). For TBT-alt sinks the PHY is owned by the TBT
801 * subsystem and so switching the ownership to display is not required.
802 */
803static bool adlp_tc_phy_is_ready(struct intel_tc_port *tc)
804{
805 struct drm_i915_private *i915 = tc_to_i915(tc);
806 enum tc_port tc_port = intel_port_to_tc(i915, tc->dig_port->base.port);
807 u32 val;
808
809 assert_display_core_power_enabled(tc);
810
811 val = intel_de_read(i915, TCSS_DDI_STATUS(tc_port));
812 if (val == 0xffffffff) {
813 drm_dbg_kms(&i915->drm,
814 "Port %s: PHY in TCCOLD, assuming not ready\n",
815 tc->port_name);
816 return false;
817 }
818
819 return val & TCSS_DDI_STATUS_READY;
820}
821
822static bool adlp_tc_phy_take_ownership(struct intel_tc_port *tc,
823 bool take)
824{
825 struct drm_i915_private *i915 = tc_to_i915(tc);
826 enum port port = tc->dig_port->base.port;
827
828 assert_tc_port_power_enabled(tc);
829
830 intel_de_rmw(i915, DDI_BUF_CTL(port), DDI_BUF_CTL_TC_PHY_OWNERSHIP,
831 take ? DDI_BUF_CTL_TC_PHY_OWNERSHIP : 0);
832
833 return true;
834}
835
836static bool adlp_tc_phy_is_owned(struct intel_tc_port *tc)
837{
838 struct drm_i915_private *i915 = tc_to_i915(tc);
839 enum port port = tc->dig_port->base.port;
840 u32 val;
841
842 assert_tc_port_power_enabled(tc);
843
844 val = intel_de_read(i915, DDI_BUF_CTL(port));
845 return val & DDI_BUF_CTL_TC_PHY_OWNERSHIP;
846}
847
848static void adlp_tc_phy_get_hw_state(struct intel_tc_port *tc)
849{
850 struct drm_i915_private *i915 = tc_to_i915(tc);
851 enum intel_display_power_domain port_power_domain =
852 tc_port_power_domain(tc);
853 intel_wakeref_t port_wakeref;
854
855 port_wakeref = intel_display_power_get(i915, port_power_domain);
856
857 tc->mode = tc_phy_get_current_mode(tc);
858 if (tc->mode != TC_PORT_DISCONNECTED)
859 tc->lock_wakeref = tc_cold_block(tc);
860
861 intel_display_power_put(i915, port_power_domain, port_wakeref);
862}
863
864static bool adlp_tc_phy_connect(struct intel_tc_port *tc, int required_lanes)
865{
866 struct drm_i915_private *i915 = tc_to_i915(tc);
867 enum intel_display_power_domain port_power_domain =
868 tc_port_power_domain(tc);
869 intel_wakeref_t port_wakeref;
870
871 if (tc->mode == TC_PORT_TBT_ALT) {
872 tc->lock_wakeref = tc_cold_block(tc);
873 return true;
874 }
875
876 port_wakeref = intel_display_power_get(i915, port_power_domain);
877
878 if (!adlp_tc_phy_take_ownership(tc, true) &&
879 !drm_WARN_ON(&i915->drm, tc->mode == TC_PORT_LEGACY)) {
880 drm_dbg_kms(&i915->drm, "Port %s: can't take PHY ownership\n",
881 tc->port_name);
882 goto out_put_port_power;
883 }
884
885 if (!tc_phy_is_ready(tc) &&
886 !drm_WARN_ON(&i915->drm, tc->mode == TC_PORT_LEGACY)) {
887 drm_dbg_kms(&i915->drm, "Port %s: PHY not ready\n",
888 tc->port_name);
889 goto out_release_phy;
890 }
891
892 tc->lock_wakeref = tc_cold_block(tc);
893
894 if (!tc_phy_verify_legacy_or_dp_alt_mode(tc, required_lanes))
895 goto out_unblock_tc_cold;
896
897 intel_display_power_put(i915, port_power_domain, port_wakeref);
898
899 return true;
900
901out_unblock_tc_cold:
902 tc_cold_unblock(tc, fetch_and_zero(&tc->lock_wakeref));
903out_release_phy:
904 adlp_tc_phy_take_ownership(tc, false);
905out_put_port_power:
906 intel_display_power_put(i915, port_power_domain, port_wakeref);
907
908 return false;
909}
910
911static void adlp_tc_phy_disconnect(struct intel_tc_port *tc)
912{
913 struct drm_i915_private *i915 = tc_to_i915(tc);
914 enum intel_display_power_domain port_power_domain =
915 tc_port_power_domain(tc);
916 intel_wakeref_t port_wakeref;
917
918 port_wakeref = intel_display_power_get(i915, port_power_domain);
919
920 tc_cold_unblock(tc, fetch_and_zero(&tc->lock_wakeref));
921
922 switch (tc->mode) {
923 case TC_PORT_LEGACY:
924 case TC_PORT_DP_ALT:
925 adlp_tc_phy_take_ownership(tc, false);
926 fallthrough;
927 case TC_PORT_TBT_ALT:
928 break;
929 default:
930 MISSING_CASE(tc->mode);
931 }
932
933 intel_display_power_put(i915, port_power_domain, port_wakeref);
934}
935
936static void adlp_tc_phy_init(struct intel_tc_port *tc)
937{
938 tc_phy_load_fia_params(tc, true);
939}
940
941static const struct intel_tc_phy_ops adlp_tc_phy_ops = {
942 .cold_off_domain = adlp_tc_phy_cold_off_domain,
943 .hpd_live_status = adlp_tc_phy_hpd_live_status,
944 .is_ready = adlp_tc_phy_is_ready,
945 .is_owned = adlp_tc_phy_is_owned,
946 .get_hw_state = adlp_tc_phy_get_hw_state,
947 .connect = adlp_tc_phy_connect,
948 .disconnect = adlp_tc_phy_disconnect,
949 .init = adlp_tc_phy_init,
950};
951
952/*
953 * XELPDP TC PHY handlers
954 * ----------------------
955 */
956static u32 xelpdp_tc_phy_hpd_live_status(struct intel_tc_port *tc)
957{
958 struct drm_i915_private *i915 = tc_to_i915(tc);
959 struct intel_digital_port *dig_port = tc->dig_port;
960 enum hpd_pin hpd_pin = dig_port->base.hpd_pin;
961 u32 pica_isr_bits = i915->display.hotplug.hpd[hpd_pin];
962 u32 pch_isr_bit = i915->display.hotplug.pch_hpd[hpd_pin];
963 intel_wakeref_t wakeref;
964 u32 pica_isr;
965 u32 pch_isr;
966 u32 mask = 0;
967
968 with_intel_display_power(i915, POWER_DOMAIN_DISPLAY_CORE, wakeref) {
969 pica_isr = intel_de_read(i915, PICAINTERRUPT_ISR);
970 pch_isr = intel_de_read(i915, SDEISR);
971 }
972
973 if (pica_isr & (pica_isr_bits & XELPDP_DP_ALT_HOTPLUG_MASK))
974 mask |= BIT(TC_PORT_DP_ALT);
975 if (pica_isr & (pica_isr_bits & XELPDP_TBT_HOTPLUG_MASK))
976 mask |= BIT(TC_PORT_TBT_ALT);
977
978 if (tc->legacy_port && (pch_isr & pch_isr_bit))
979 mask |= BIT(TC_PORT_LEGACY);
980
981 return mask;
982}
983
984static bool
985xelpdp_tc_phy_tcss_power_is_enabled(struct intel_tc_port *tc)
986{
987 struct drm_i915_private *i915 = tc_to_i915(tc);
988 enum port port = tc->dig_port->base.port;
989
990 assert_tc_cold_blocked(tc);
991
992 return intel_de_read(i915, XELPDP_PORT_BUF_CTL1(port)) & XELPDP_TCSS_POWER_STATE;
993}
994
995static bool
996xelpdp_tc_phy_wait_for_tcss_power(struct intel_tc_port *tc, bool enabled)
997{
998 struct drm_i915_private *i915 = tc_to_i915(tc);
999
1000 if (wait_for(xelpdp_tc_phy_tcss_power_is_enabled(tc) == enabled, 5)) {
1001 drm_dbg_kms(&i915->drm,
1002 "Port %s: timeout waiting for TCSS power to get %s\n",
1003 enabled ? "enabled" : "disabled",
1004 tc->port_name);
1005 return false;
1006 }
1007
1008 return true;
1009}
1010
1011static void __xelpdp_tc_phy_enable_tcss_power(struct intel_tc_port *tc, bool enable)
1012{
1013 struct drm_i915_private *i915 = tc_to_i915(tc);
1014 enum port port = tc->dig_port->base.port;
1015 u32 val;
1016
1017 assert_tc_cold_blocked(tc);
1018
1019 val = intel_de_read(i915, XELPDP_PORT_BUF_CTL1(port));
1020 if (enable)
1021 val |= XELPDP_TCSS_POWER_REQUEST;
1022 else
1023 val &= ~XELPDP_TCSS_POWER_REQUEST;
1024 intel_de_write(i915, XELPDP_PORT_BUF_CTL1(port), val);
1025}
1026
1027static bool xelpdp_tc_phy_enable_tcss_power(struct intel_tc_port *tc, bool enable)
1028{
1029 struct drm_i915_private *i915 = tc_to_i915(tc);
1030
1031 __xelpdp_tc_phy_enable_tcss_power(tc, enable);
1032
1033 if (enable && !tc_phy_wait_for_ready(tc))
1034 goto out_disable;
1035
1036 if (!xelpdp_tc_phy_wait_for_tcss_power(tc, enable))
1037 goto out_disable;
1038
1039 return true;
1040
1041out_disable:
1042 if (drm_WARN_ON(&i915->drm, tc->mode == TC_PORT_LEGACY))
1043 return false;
1044
1045 if (!enable)
1046 return false;
1047
1048 __xelpdp_tc_phy_enable_tcss_power(tc, false);
1049 xelpdp_tc_phy_wait_for_tcss_power(tc, false);
1050
1051 return false;
1052}
1053
1054static void xelpdp_tc_phy_take_ownership(struct intel_tc_port *tc, bool take)
1055{
1056 struct drm_i915_private *i915 = tc_to_i915(tc);
1057 enum port port = tc->dig_port->base.port;
1058 u32 val;
1059
1060 assert_tc_cold_blocked(tc);
1061
1062 val = intel_de_read(i915, XELPDP_PORT_BUF_CTL1(port));
1063 if (take)
1064 val |= XELPDP_TC_PHY_OWNERSHIP;
1065 else
1066 val &= ~XELPDP_TC_PHY_OWNERSHIP;
1067 intel_de_write(i915, XELPDP_PORT_BUF_CTL1(port), val);
1068}
1069
1070static bool xelpdp_tc_phy_is_owned(struct intel_tc_port *tc)
1071{
1072 struct drm_i915_private *i915 = tc_to_i915(tc);
1073 enum port port = tc->dig_port->base.port;
1074
1075 assert_tc_cold_blocked(tc);
1076
1077 return intel_de_read(i915, XELPDP_PORT_BUF_CTL1(port)) & XELPDP_TC_PHY_OWNERSHIP;
1078}
1079
1080static void xelpdp_tc_phy_get_hw_state(struct intel_tc_port *tc)
1081{
1082 struct drm_i915_private *i915 = tc_to_i915(tc);
1083 intel_wakeref_t tc_cold_wref;
1084 enum intel_display_power_domain domain;
1085
1086 tc_cold_wref = __tc_cold_block(tc, &domain);
1087
1088 tc->mode = tc_phy_get_current_mode(tc);
1089 if (tc->mode != TC_PORT_DISCONNECTED)
1090 tc->lock_wakeref = tc_cold_block(tc);
1091
1092 drm_WARN_ON(&i915->drm,
1093 (tc->mode == TC_PORT_DP_ALT || tc->mode == TC_PORT_LEGACY) &&
1094 !xelpdp_tc_phy_tcss_power_is_enabled(tc));
1095
1096 __tc_cold_unblock(tc, domain, tc_cold_wref);
1097}
1098
1099static bool xelpdp_tc_phy_connect(struct intel_tc_port *tc, int required_lanes)
1100{
1101 tc->lock_wakeref = tc_cold_block(tc);
1102
1103 if (tc->mode == TC_PORT_TBT_ALT)
1104 return true;
1105
1106 if (!xelpdp_tc_phy_enable_tcss_power(tc, true))
1107 goto out_unblock_tccold;
1108
1109 xelpdp_tc_phy_take_ownership(tc, true);
1110
1111 if (!tc_phy_verify_legacy_or_dp_alt_mode(tc, required_lanes))
1112 goto out_release_phy;
1113
1114 return true;
1115
1116out_release_phy:
1117 xelpdp_tc_phy_take_ownership(tc, false);
1118 xelpdp_tc_phy_wait_for_tcss_power(tc, false);
1119
1120out_unblock_tccold:
1121 tc_cold_unblock(tc, fetch_and_zero(&tc->lock_wakeref));
1122
1123 return false;
1124}
1125
1126static void xelpdp_tc_phy_disconnect(struct intel_tc_port *tc)
1127{
1128 switch (tc->mode) {
1129 case TC_PORT_LEGACY:
1130 case TC_PORT_DP_ALT:
1131 xelpdp_tc_phy_take_ownership(tc, false);
1132 xelpdp_tc_phy_enable_tcss_power(tc, false);
1133 fallthrough;
1134 case TC_PORT_TBT_ALT:
1135 tc_cold_unblock(tc, fetch_and_zero(&tc->lock_wakeref));
1136 break;
1137 default:
1138 MISSING_CASE(tc->mode);
1139 }
1140}
1141
1142static const struct intel_tc_phy_ops xelpdp_tc_phy_ops = {
1143 .cold_off_domain = tgl_tc_phy_cold_off_domain,
1144 .hpd_live_status = xelpdp_tc_phy_hpd_live_status,
1145 .is_ready = adlp_tc_phy_is_ready,
1146 .is_owned = xelpdp_tc_phy_is_owned,
1147 .get_hw_state = xelpdp_tc_phy_get_hw_state,
1148 .connect = xelpdp_tc_phy_connect,
1149 .disconnect = xelpdp_tc_phy_disconnect,
1150 .init = adlp_tc_phy_init,
1151};
1152
1153/*
1154 * Generic TC PHY handlers
1155 * -----------------------
1156 */
1157static enum intel_display_power_domain
1158tc_phy_cold_off_domain(struct intel_tc_port *tc)
1159{
1160 return tc->phy_ops->cold_off_domain(tc);
1161}
1162
1163static u32 tc_phy_hpd_live_status(struct intel_tc_port *tc)
1164{
1165 struct drm_i915_private *i915 = tc_to_i915(tc);
1166 u32 mask;
1167
1168 mask = tc->phy_ops->hpd_live_status(tc);
1169
1170 /* The sink can be connected only in a single mode. */
1171 drm_WARN_ON_ONCE(&i915->drm, hweight32(mask) > 1);
1172
1173 return mask;
1174}
1175
1176static bool tc_phy_is_ready(struct intel_tc_port *tc)
1177{
1178 return tc->phy_ops->is_ready(tc);
1179}
1180
1181static bool tc_phy_is_owned(struct intel_tc_port *tc)
1182{
1183 return tc->phy_ops->is_owned(tc);
1184}
1185
1186static void tc_phy_get_hw_state(struct intel_tc_port *tc)
1187{
1188 tc->phy_ops->get_hw_state(tc);
1189}
1190
1191static bool tc_phy_is_ready_and_owned(struct intel_tc_port *tc,
1192 bool phy_is_ready, bool phy_is_owned)
1193{
1194 struct drm_i915_private *i915 = tc_to_i915(tc);
1195
1196 drm_WARN_ON(&i915->drm, phy_is_owned && !phy_is_ready);
1197
1198 return phy_is_ready && phy_is_owned;
1199}
1200
1201static bool tc_phy_is_connected(struct intel_tc_port *tc,
1202 enum icl_port_dpll_id port_pll_type)
1203{
1204 struct intel_encoder *encoder = &tc->dig_port->base;
1205 struct drm_i915_private *i915 = to_i915(encoder->base.dev);
1206 bool phy_is_ready = tc_phy_is_ready(tc);
1207 bool phy_is_owned = tc_phy_is_owned(tc);
1208 bool is_connected;
1209
1210 if (tc_phy_is_ready_and_owned(tc, phy_is_ready, phy_is_owned))
1211 is_connected = port_pll_type == ICL_PORT_DPLL_MG_PHY;
1212 else
1213 is_connected = port_pll_type == ICL_PORT_DPLL_DEFAULT;
1214
1215 drm_dbg_kms(&i915->drm,
1216 "Port %s: PHY connected: %s (ready: %s, owned: %s, pll_type: %s)\n",
1217 tc->port_name,
1218 str_yes_no(is_connected),
1219 str_yes_no(phy_is_ready),
1220 str_yes_no(phy_is_owned),
1221 port_pll_type == ICL_PORT_DPLL_DEFAULT ? "tbt" : "non-tbt");
1222
1223 return is_connected;
1224}
1225
1226static bool tc_phy_wait_for_ready(struct intel_tc_port *tc)
1227{
1228 struct drm_i915_private *i915 = tc_to_i915(tc);
1229
1230 if (wait_for(tc_phy_is_ready(tc), 500)) {
1231 drm_err(&i915->drm, "Port %s: timeout waiting for PHY ready\n",
1232 tc->port_name);
1233
1234 return false;
1235 }
1236
1237 return true;
1238}
1239
1240static enum tc_port_mode
1241hpd_mask_to_tc_mode(u32 live_status_mask)
1242{
1243 if (live_status_mask)
1244 return fls(live_status_mask) - 1;
1245
1246 return TC_PORT_DISCONNECTED;
1247}
1248
1249static enum tc_port_mode
1250tc_phy_hpd_live_mode(struct intel_tc_port *tc)
1251{
1252 u32 live_status_mask = tc_phy_hpd_live_status(tc);
1253
1254 return hpd_mask_to_tc_mode(live_status_mask);
1255}
1256
1257static enum tc_port_mode
1258get_tc_mode_in_phy_owned_state(struct intel_tc_port *tc,
1259 enum tc_port_mode live_mode)
1260{
1261 switch (live_mode) {
1262 case TC_PORT_LEGACY:
1263 case TC_PORT_DP_ALT:
1264 return live_mode;
1265 default:
1266 MISSING_CASE(live_mode);
1267 fallthrough;
1268 case TC_PORT_TBT_ALT:
1269 case TC_PORT_DISCONNECTED:
1270 if (tc->legacy_port)
1271 return TC_PORT_LEGACY;
1272 else
1273 return TC_PORT_DP_ALT;
1274 }
1275}
1276
1277static enum tc_port_mode
1278get_tc_mode_in_phy_not_owned_state(struct intel_tc_port *tc,
1279 enum tc_port_mode live_mode)
1280{
1281 switch (live_mode) {
1282 case TC_PORT_LEGACY:
1283 return TC_PORT_DISCONNECTED;
1284 case TC_PORT_DP_ALT:
1285 case TC_PORT_TBT_ALT:
1286 return TC_PORT_TBT_ALT;
1287 default:
1288 MISSING_CASE(live_mode);
1289 fallthrough;
1290 case TC_PORT_DISCONNECTED:
1291 if (tc->legacy_port)
1292 return TC_PORT_DISCONNECTED;
1293 else
1294 return TC_PORT_TBT_ALT;
1295 }
1296}
1297
1298static enum tc_port_mode
1299tc_phy_get_current_mode(struct intel_tc_port *tc)
1300{
1301 struct drm_i915_private *i915 = tc_to_i915(tc);
1302 enum tc_port_mode live_mode = tc_phy_hpd_live_mode(tc);
1303 bool phy_is_ready;
1304 bool phy_is_owned;
1305 enum tc_port_mode mode;
1306
1307 /*
1308 * For legacy ports the IOM firmware initializes the PHY during boot-up
1309 * and system resume whether or not a sink is connected. Wait here for
1310 * the initialization to get ready.
1311 */
1312 if (tc->legacy_port)
1313 tc_phy_wait_for_ready(tc);
1314
1315 phy_is_ready = tc_phy_is_ready(tc);
1316 phy_is_owned = tc_phy_is_owned(tc);
1317
1318 if (!tc_phy_is_ready_and_owned(tc, phy_is_ready, phy_is_owned)) {
1319 mode = get_tc_mode_in_phy_not_owned_state(tc, live_mode);
1320 } else {
1321 drm_WARN_ON(&i915->drm, live_mode == TC_PORT_TBT_ALT);
1322 mode = get_tc_mode_in_phy_owned_state(tc, live_mode);
1323 }
1324
1325 drm_dbg_kms(&i915->drm,
1326 "Port %s: PHY mode: %s (ready: %s, owned: %s, HPD: %s)\n",
1327 tc->port_name,
1328 tc_port_mode_name(mode),
1329 str_yes_no(phy_is_ready),
1330 str_yes_no(phy_is_owned),
1331 tc_port_mode_name(live_mode));
1332
1333 return mode;
1334}
1335
1336static enum tc_port_mode default_tc_mode(struct intel_tc_port *tc)
1337{
1338 if (tc->legacy_port)
1339 return TC_PORT_LEGACY;
1340
1341 return TC_PORT_TBT_ALT;
1342}
1343
1344static enum tc_port_mode
1345hpd_mask_to_target_mode(struct intel_tc_port *tc, u32 live_status_mask)
1346{
1347 enum tc_port_mode mode = hpd_mask_to_tc_mode(live_status_mask);
1348
1349 if (mode != TC_PORT_DISCONNECTED)
1350 return mode;
1351
1352 return default_tc_mode(tc);
1353}
1354
1355static enum tc_port_mode
1356tc_phy_get_target_mode(struct intel_tc_port *tc)
1357{
1358 u32 live_status_mask = tc_phy_hpd_live_status(tc);
1359
1360 return hpd_mask_to_target_mode(tc, live_status_mask);
1361}
1362
1363static void tc_phy_connect(struct intel_tc_port *tc, int required_lanes)
1364{
1365 struct drm_i915_private *i915 = tc_to_i915(tc);
1366 u32 live_status_mask = tc_phy_hpd_live_status(tc);
1367 bool connected;
1368
1369 tc_port_fixup_legacy_flag(tc, live_status_mask);
1370
1371 tc->mode = hpd_mask_to_target_mode(tc, live_status_mask);
1372
1373 connected = tc->phy_ops->connect(tc, required_lanes);
1374 if (!connected && tc->mode != default_tc_mode(tc)) {
1375 tc->mode = default_tc_mode(tc);
1376 connected = tc->phy_ops->connect(tc, required_lanes);
1377 }
1378
1379 drm_WARN_ON(&i915->drm, !connected);
1380}
1381
1382static void tc_phy_disconnect(struct intel_tc_port *tc)
1383{
1384 if (tc->mode != TC_PORT_DISCONNECTED) {
1385 tc->phy_ops->disconnect(tc);
1386 tc->mode = TC_PORT_DISCONNECTED;
1387 }
1388}
1389
1390static void tc_phy_init(struct intel_tc_port *tc)
1391{
1392 mutex_lock(&tc->lock);
1393 tc->phy_ops->init(tc);
1394 mutex_unlock(&tc->lock);
1395}
1396
1397static void intel_tc_port_reset_mode(struct intel_tc_port *tc,
1398 int required_lanes, bool force_disconnect)
1399{
1400 struct drm_i915_private *i915 = tc_to_i915(tc);
1401 struct intel_digital_port *dig_port = tc->dig_port;
1402 enum tc_port_mode old_tc_mode = tc->mode;
1403
1404 intel_display_power_flush_work(i915);
1405 if (!intel_tc_cold_requires_aux_pw(dig_port)) {
1406 enum intel_display_power_domain aux_domain;
1407 bool aux_powered;
1408
1409 aux_domain = intel_aux_power_domain(dig_port);
1410 aux_powered = intel_display_power_is_enabled(i915, aux_domain);
1411 drm_WARN_ON(&i915->drm, aux_powered);
1412 }
1413
1414 tc_phy_disconnect(tc);
1415 if (!force_disconnect)
1416 tc_phy_connect(tc, required_lanes);
1417
1418 drm_dbg_kms(&i915->drm, "Port %s: TC port mode reset (%s -> %s)\n",
1419 tc->port_name,
1420 tc_port_mode_name(old_tc_mode),
1421 tc_port_mode_name(tc->mode));
1422}
1423
1424static bool intel_tc_port_needs_reset(struct intel_tc_port *tc)
1425{
1426 return tc_phy_get_target_mode(tc) != tc->mode;
1427}
1428
1429static void intel_tc_port_update_mode(struct intel_tc_port *tc,
1430 int required_lanes, bool force_disconnect)
1431{
1432 if (force_disconnect ||
1433 intel_tc_port_needs_reset(tc))
1434 intel_tc_port_reset_mode(tc, required_lanes, force_disconnect);
1435}
1436
1437static void __intel_tc_port_get_link(struct intel_tc_port *tc)
1438{
1439 tc->link_refcount++;
1440}
1441
1442static void __intel_tc_port_put_link(struct intel_tc_port *tc)
1443{
1444 tc->link_refcount--;
1445}
1446
1447static bool tc_port_is_enabled(struct intel_tc_port *tc)
1448{
1449 struct drm_i915_private *i915 = tc_to_i915(tc);
1450 struct intel_digital_port *dig_port = tc->dig_port;
1451
1452 assert_tc_port_power_enabled(tc);
1453
1454 return intel_de_read(i915, DDI_BUF_CTL(dig_port->base.port)) &
1455 DDI_BUF_CTL_ENABLE;
1456}
1457
1458/**
1459 * intel_tc_port_init_mode: Read out HW state and init the given port's TypeC mode
1460 * @dig_port: digital port
1461 *
1462 * Read out the HW state and initialize the TypeC mode of @dig_port. The mode
1463 * will be locked until intel_tc_port_sanitize_mode() is called.
1464 */
1465void intel_tc_port_init_mode(struct intel_digital_port *dig_port)
1466{
1467 struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
1468 struct intel_tc_port *tc = to_tc_port(dig_port);
1469 bool update_mode = false;
1470
1471 mutex_lock(&tc->lock);
1472
1473 drm_WARN_ON(&i915->drm, tc->mode != TC_PORT_DISCONNECTED);
1474 drm_WARN_ON(&i915->drm, tc->lock_wakeref);
1475 drm_WARN_ON(&i915->drm, tc->link_refcount);
1476
1477 tc_phy_get_hw_state(tc);
1478 /*
1479 * Save the initial mode for the state check in
1480 * intel_tc_port_sanitize_mode().
1481 */
1482 tc->init_mode = tc->mode;
1483
1484 /*
1485 * The PHY needs to be connected for AUX to work during HW readout and
1486 * MST topology resume, but the PHY mode can only be changed if the
1487 * port is disabled.
1488 *
1489 * An exception is the case where BIOS leaves the PHY incorrectly
1490 * disconnected on an enabled legacy port. Work around that by
1491 * connecting the PHY even though the port is enabled. This doesn't
1492 * cause a problem as the PHY ownership state is ignored by the
1493 * IOM/TCSS firmware (only display can own the PHY in that case).
1494 */
1495 if (!tc_port_is_enabled(tc)) {
1496 update_mode = true;
1497 } else if (tc->mode == TC_PORT_DISCONNECTED) {
1498 drm_WARN_ON(&i915->drm, !tc->legacy_port);
1499 drm_err(&i915->drm,
1500 "Port %s: PHY disconnected on enabled port, connecting it\n",
1501 tc->port_name);
1502 update_mode = true;
1503 }
1504
1505 if (update_mode)
1506 intel_tc_port_update_mode(tc, 1, false);
1507
1508 /* Prevent changing tc->mode until intel_tc_port_sanitize_mode() is called. */
1509 __intel_tc_port_get_link(tc);
1510
1511 mutex_unlock(&tc->lock);
1512}
1513
1514static bool tc_port_has_active_links(struct intel_tc_port *tc,
1515 const struct intel_crtc_state *crtc_state)
1516{
1517 struct drm_i915_private *i915 = tc_to_i915(tc);
1518 struct intel_digital_port *dig_port = tc->dig_port;
1519 enum icl_port_dpll_id pll_type = ICL_PORT_DPLL_DEFAULT;
1520 int active_links = 0;
1521
1522 if (dig_port->dp.is_mst) {
1523 /* TODO: get the PLL type for MST, once HW readout is done for it. */
1524 active_links = intel_dp_mst_encoder_active_links(dig_port);
1525 } else if (crtc_state && crtc_state->hw.active) {
1526 pll_type = intel_ddi_port_pll_type(&dig_port->base, crtc_state);
1527 active_links = 1;
1528 }
1529
1530 if (active_links && !tc_phy_is_connected(tc, pll_type))
1531 drm_err(&i915->drm,
1532 "Port %s: PHY disconnected with %d active link(s)\n",
1533 tc->port_name, active_links);
1534
1535 return active_links;
1536}
1537
1538/**
1539 * intel_tc_port_sanitize_mode: Sanitize the given port's TypeC mode
1540 * @dig_port: digital port
1541 * @crtc_state: atomic state of CRTC connected to @dig_port
1542 *
1543 * Sanitize @dig_port's TypeC mode wrt. the encoder's state right after driver
1544 * loading and system resume:
1545 * If the encoder is enabled keep the TypeC mode/PHY connected state locked until
1546 * the encoder is disabled.
1547 * If the encoder is disabled make sure the PHY is disconnected.
1548 * @crtc_state is valid if @dig_port is enabled, NULL otherwise.
1549 */
1550void intel_tc_port_sanitize_mode(struct intel_digital_port *dig_port,
1551 const struct intel_crtc_state *crtc_state)
1552{
1553 struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
1554 struct intel_tc_port *tc = to_tc_port(dig_port);
1555
1556 mutex_lock(&tc->lock);
1557
1558 drm_WARN_ON(&i915->drm, tc->link_refcount != 1);
1559 if (!tc_port_has_active_links(tc, crtc_state)) {
1560 /*
1561 * TBT-alt is the default mode in any case the PHY ownership is not
1562 * held (regardless of the sink's connected live state), so
1563 * we'll just switch to disconnected mode from it here without
1564 * a note.
1565 */
1566 if (tc->init_mode != TC_PORT_TBT_ALT &&
1567 tc->init_mode != TC_PORT_DISCONNECTED)
1568 drm_dbg_kms(&i915->drm,
1569 "Port %s: PHY left in %s mode on disabled port, disconnecting it\n",
1570 tc->port_name,
1571 tc_port_mode_name(tc->init_mode));
1572 tc_phy_disconnect(tc);
1573 __intel_tc_port_put_link(tc);
1574 }
1575
1576 drm_dbg_kms(&i915->drm, "Port %s: sanitize mode (%s)\n",
1577 tc->port_name,
1578 tc_port_mode_name(tc->mode));
1579
1580 mutex_unlock(&tc->lock);
1581}
1582
1583/*
1584 * The type-C ports are different because even when they are connected, they may
1585 * not be available/usable by the graphics driver: see the comment on
1586 * icl_tc_phy_connect(). So in our driver instead of adding the additional
1587 * concept of "usable" and make everything check for "connected and usable" we
1588 * define a port as "connected" when it is not only connected, but also when it
1589 * is usable by the rest of the driver. That maintains the old assumption that
1590 * connected ports are usable, and avoids exposing to the users objects they
1591 * can't really use.
1592 */
1593bool intel_tc_port_connected_locked(struct intel_encoder *encoder)
1594{
1595 struct intel_digital_port *dig_port = enc_to_dig_port(encoder);
1596 struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
1597 struct intel_tc_port *tc = to_tc_port(dig_port);
1598 u32 mask = ~0;
1599
1600 drm_WARN_ON(&i915->drm, !intel_tc_port_ref_held(dig_port));
1601
1602 if (tc->mode != TC_PORT_DISCONNECTED)
1603 mask = BIT(tc->mode);
1604
1605 return tc_phy_hpd_live_status(tc) & mask;
1606}
1607
1608bool intel_tc_port_connected(struct intel_encoder *encoder)
1609{
1610 struct intel_digital_port *dig_port = enc_to_dig_port(encoder);
1611 struct intel_tc_port *tc = to_tc_port(dig_port);
1612 bool is_connected;
1613
1614 mutex_lock(&tc->lock);
1615 is_connected = intel_tc_port_connected_locked(encoder);
1616 mutex_unlock(&tc->lock);
1617
1618 return is_connected;
1619}
1620
1621static bool __intel_tc_port_link_needs_reset(struct intel_tc_port *tc)
1622{
1623 bool ret;
1624
1625 mutex_lock(&tc->lock);
1626
1627 ret = tc->link_refcount &&
1628 tc->mode == TC_PORT_DP_ALT &&
1629 intel_tc_port_needs_reset(tc);
1630
1631 mutex_unlock(&tc->lock);
1632
1633 return ret;
1634}
1635
1636bool intel_tc_port_link_needs_reset(struct intel_digital_port *dig_port)
1637{
1638 struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
1639 enum phy phy = intel_port_to_phy(i915, dig_port->base.port);
1640
1641 if (!intel_phy_is_tc(i915, phy))
1642 return false;
1643
1644 return __intel_tc_port_link_needs_reset(to_tc_port(dig_port));
1645}
1646
1647static int reset_link_commit(struct intel_tc_port *tc,
1648 struct intel_atomic_state *state,
1649 struct drm_modeset_acquire_ctx *ctx)
1650{
1651 struct drm_i915_private *i915 = tc_to_i915(tc);
1652 struct intel_digital_port *dig_port = tc->dig_port;
1653 struct intel_dp *intel_dp = enc_to_intel_dp(&dig_port->base);
1654 struct intel_crtc *crtc;
1655 u8 pipe_mask;
1656 int ret;
1657
1658 ret = drm_modeset_lock(&i915->drm.mode_config.connection_mutex, ctx);
1659 if (ret)
1660 return ret;
1661
1662 ret = intel_dp_get_active_pipes(intel_dp, ctx, &pipe_mask);
1663 if (ret)
1664 return ret;
1665
1666 if (!pipe_mask)
1667 return 0;
1668
1669 for_each_intel_crtc_in_pipe_mask(&i915->drm, crtc, pipe_mask) {
1670 struct intel_crtc_state *crtc_state;
1671
1672 crtc_state = intel_atomic_get_crtc_state(&state->base, crtc);
1673 if (IS_ERR(crtc_state))
1674 return PTR_ERR(crtc_state);
1675
1676 crtc_state->uapi.connectors_changed = true;
1677 }
1678
1679 if (!__intel_tc_port_link_needs_reset(tc))
1680 return 0;
1681
1682 return drm_atomic_commit(&state->base);
1683}
1684
1685static int reset_link(struct intel_tc_port *tc)
1686{
1687 struct drm_i915_private *i915 = tc_to_i915(tc);
1688 struct drm_modeset_acquire_ctx ctx;
1689 struct drm_atomic_state *_state;
1690 struct intel_atomic_state *state;
1691 int ret;
1692
1693 _state = drm_atomic_state_alloc(&i915->drm);
1694 if (!_state)
1695 return -ENOMEM;
1696
1697 state = to_intel_atomic_state(_state);
1698 state->internal = true;
1699
1700 intel_modeset_lock_ctx_retry(&ctx, state, 0, ret)
1701 ret = reset_link_commit(tc, state, &ctx);
1702
1703 drm_atomic_state_put(&state->base);
1704
1705 return ret;
1706}
1707
1708static void intel_tc_port_link_reset_work(struct work_struct *work)
1709{
1710 struct intel_tc_port *tc =
1711 container_of(work, struct intel_tc_port, link_reset_work.work);
1712 struct drm_i915_private *i915 = tc_to_i915(tc);
1713 int ret;
1714
1715 if (!__intel_tc_port_link_needs_reset(tc))
1716 return;
1717
1718 mutex_lock(&i915->drm.mode_config.mutex);
1719
1720 drm_dbg_kms(&i915->drm,
1721 "Port %s: TypeC DP-alt sink disconnected, resetting link\n",
1722 tc->port_name);
1723 ret = reset_link(tc);
1724 drm_WARN_ON(&i915->drm, ret);
1725
1726 mutex_unlock(&i915->drm.mode_config.mutex);
1727}
1728
1729bool intel_tc_port_link_reset(struct intel_digital_port *dig_port)
1730{
1731 if (!intel_tc_port_link_needs_reset(dig_port))
1732 return false;
1733
1734 queue_delayed_work(system_unbound_wq,
1735 &to_tc_port(dig_port)->link_reset_work,
1736 msecs_to_jiffies(2000));
1737
1738 return true;
1739}
1740
1741void intel_tc_port_link_cancel_reset_work(struct intel_digital_port *dig_port)
1742{
1743 struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
1744 enum phy phy = intel_port_to_phy(i915, dig_port->base.port);
1745 struct intel_tc_port *tc = to_tc_port(dig_port);
1746
1747 if (!intel_phy_is_tc(i915, phy))
1748 return;
1749
1750 cancel_delayed_work(&tc->link_reset_work);
1751}
1752
1753static void __intel_tc_port_lock(struct intel_tc_port *tc,
1754 int required_lanes)
1755{
1756 struct drm_i915_private *i915 = tc_to_i915(tc);
1757
1758 mutex_lock(&tc->lock);
1759
1760 cancel_delayed_work(&tc->disconnect_phy_work);
1761
1762 if (!tc->link_refcount)
1763 intel_tc_port_update_mode(tc, required_lanes,
1764 false);
1765
1766 drm_WARN_ON(&i915->drm, tc->mode == TC_PORT_DISCONNECTED);
1767 drm_WARN_ON(&i915->drm, tc->mode != TC_PORT_TBT_ALT &&
1768 !tc_phy_is_owned(tc));
1769}
1770
1771void intel_tc_port_lock(struct intel_digital_port *dig_port)
1772{
1773 __intel_tc_port_lock(to_tc_port(dig_port), 1);
1774}
1775
1776/*
1777 * Disconnect the given digital port from its TypeC PHY (handing back the
1778 * control of the PHY to the TypeC subsystem). This will happen in a delayed
1779 * manner after each aux transactions and modeset disables.
1780 */
1781static void intel_tc_port_disconnect_phy_work(struct work_struct *work)
1782{
1783 struct intel_tc_port *tc =
1784 container_of(work, struct intel_tc_port, disconnect_phy_work.work);
1785
1786 mutex_lock(&tc->lock);
1787
1788 if (!tc->link_refcount)
1789 intel_tc_port_update_mode(tc, 1, true);
1790
1791 mutex_unlock(&tc->lock);
1792}
1793
1794/**
1795 * intel_tc_port_flush_work: flush the work disconnecting the PHY
1796 * @dig_port: digital port
1797 *
1798 * Flush the delayed work disconnecting an idle PHY.
1799 */
1800static void intel_tc_port_flush_work(struct intel_digital_port *dig_port)
1801{
1802 flush_delayed_work(&to_tc_port(dig_port)->disconnect_phy_work);
1803}
1804
1805void intel_tc_port_suspend(struct intel_digital_port *dig_port)
1806{
1807 struct intel_tc_port *tc = to_tc_port(dig_port);
1808
1809 cancel_delayed_work_sync(&tc->link_reset_work);
1810 intel_tc_port_flush_work(dig_port);
1811}
1812
1813void intel_tc_port_unlock(struct intel_digital_port *dig_port)
1814{
1815 struct intel_tc_port *tc = to_tc_port(dig_port);
1816
1817 if (!tc->link_refcount && tc->mode != TC_PORT_DISCONNECTED)
1818 queue_delayed_work(system_unbound_wq, &tc->disconnect_phy_work,
1819 msecs_to_jiffies(1000));
1820
1821 mutex_unlock(&tc->lock);
1822}
1823
1824bool intel_tc_port_ref_held(struct intel_digital_port *dig_port)
1825{
1826 struct intel_tc_port *tc = to_tc_port(dig_port);
1827
1828 return mutex_is_locked(&tc->lock) ||
1829 tc->link_refcount;
1830}
1831
1832void intel_tc_port_get_link(struct intel_digital_port *dig_port,
1833 int required_lanes)
1834{
1835 struct intel_tc_port *tc = to_tc_port(dig_port);
1836
1837 __intel_tc_port_lock(tc, required_lanes);
1838 __intel_tc_port_get_link(tc);
1839 intel_tc_port_unlock(dig_port);
1840}
1841
1842void intel_tc_port_put_link(struct intel_digital_port *dig_port)
1843{
1844 struct intel_tc_port *tc = to_tc_port(dig_port);
1845
1846 intel_tc_port_lock(dig_port);
1847 __intel_tc_port_put_link(tc);
1848 intel_tc_port_unlock(dig_port);
1849
1850 /*
1851 * The firmware will not update the HPD status of other TypeC ports
1852 * that are active in DP-alt mode with their sink disconnected, until
1853 * this port is disabled and its PHY gets disconnected. Make sure this
1854 * happens in a timely manner by disconnecting the PHY synchronously.
1855 */
1856 intel_tc_port_flush_work(dig_port);
1857}
1858
1859int intel_tc_port_init(struct intel_digital_port *dig_port, bool is_legacy)
1860{
1861 struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
1862 struct intel_tc_port *tc;
1863 enum port port = dig_port->base.port;
1864 enum tc_port tc_port = intel_port_to_tc(i915, port);
1865
1866 if (drm_WARN_ON(&i915->drm, tc_port == TC_PORT_NONE))
1867 return -EINVAL;
1868
1869 tc = kzalloc(sizeof(*tc), GFP_KERNEL);
1870 if (!tc)
1871 return -ENOMEM;
1872
1873 dig_port->tc = tc;
1874 tc->dig_port = dig_port;
1875
1876 if (DISPLAY_VER(i915) >= 14)
1877 tc->phy_ops = &xelpdp_tc_phy_ops;
1878 else if (DISPLAY_VER(i915) >= 13)
1879 tc->phy_ops = &adlp_tc_phy_ops;
1880 else if (DISPLAY_VER(i915) >= 12)
1881 tc->phy_ops = &tgl_tc_phy_ops;
1882 else
1883 tc->phy_ops = &icl_tc_phy_ops;
1884
1885 tc->port_name = kasprintf(GFP_KERNEL, "%c/TC#%d", port_name(port),
1886 tc_port + 1);
1887 if (!tc->port_name) {
1888 kfree(tc);
1889 return -ENOMEM;
1890 }
1891
1892 mutex_init(&tc->lock);
1893 /* TODO: Combine the two works */
1894 INIT_DELAYED_WORK(&tc->disconnect_phy_work, intel_tc_port_disconnect_phy_work);
1895 INIT_DELAYED_WORK(&tc->link_reset_work, intel_tc_port_link_reset_work);
1896 tc->legacy_port = is_legacy;
1897 tc->mode = TC_PORT_DISCONNECTED;
1898 tc->link_refcount = 0;
1899
1900 tc_phy_init(tc);
1901
1902 intel_tc_port_init_mode(dig_port);
1903
1904 return 0;
1905}
1906
1907void intel_tc_port_cleanup(struct intel_digital_port *dig_port)
1908{
1909 intel_tc_port_suspend(dig_port);
1910
1911 kfree(dig_port->tc->port_name);
1912 kfree(dig_port->tc);
1913 dig_port->tc = NULL;
1914}