Loading...
1// SPDX-License-Identifier: MIT
2/*
3 * Copyright © 2019 Intel Corporation
4 */
5
6#include "i915_drv.h"
7#include "i915_reg.h"
8#include "intel_display.h"
9#include "intel_display_power_map.h"
10#include "intel_display_types.h"
11#include "intel_dkl_phy_regs.h"
12#include "intel_dp_mst.h"
13#include "intel_mg_phy_regs.h"
14#include "intel_tc.h"
15
16static const char *tc_port_mode_name(enum tc_port_mode mode)
17{
18 static const char * const names[] = {
19 [TC_PORT_DISCONNECTED] = "disconnected",
20 [TC_PORT_TBT_ALT] = "tbt-alt",
21 [TC_PORT_DP_ALT] = "dp-alt",
22 [TC_PORT_LEGACY] = "legacy",
23 };
24
25 if (WARN_ON(mode >= ARRAY_SIZE(names)))
26 mode = TC_PORT_DISCONNECTED;
27
28 return names[mode];
29}
30
31static bool intel_tc_port_in_mode(struct intel_digital_port *dig_port,
32 enum tc_port_mode mode)
33{
34 struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
35 enum phy phy = intel_port_to_phy(i915, dig_port->base.port);
36
37 return intel_phy_is_tc(i915, phy) && dig_port->tc_mode == mode;
38}
39
40bool intel_tc_port_in_tbt_alt_mode(struct intel_digital_port *dig_port)
41{
42 return intel_tc_port_in_mode(dig_port, TC_PORT_TBT_ALT);
43}
44
45bool intel_tc_port_in_dp_alt_mode(struct intel_digital_port *dig_port)
46{
47 return intel_tc_port_in_mode(dig_port, TC_PORT_DP_ALT);
48}
49
50bool intel_tc_port_in_legacy_mode(struct intel_digital_port *dig_port)
51{
52 return intel_tc_port_in_mode(dig_port, TC_PORT_LEGACY);
53}
54
55bool intel_tc_cold_requires_aux_pw(struct intel_digital_port *dig_port)
56{
57 struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
58
59 return (DISPLAY_VER(i915) == 11 && dig_port->tc_legacy_port) ||
60 IS_ALDERLAKE_P(i915);
61}
62
63static enum intel_display_power_domain
64tc_cold_get_power_domain(struct intel_digital_port *dig_port, enum tc_port_mode mode)
65{
66 struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
67
68 if (mode == TC_PORT_TBT_ALT || !intel_tc_cold_requires_aux_pw(dig_port))
69 return POWER_DOMAIN_TC_COLD_OFF;
70
71 return intel_display_power_legacy_aux_domain(i915, dig_port->aux_ch);
72}
73
74static intel_wakeref_t
75tc_cold_block_in_mode(struct intel_digital_port *dig_port, enum tc_port_mode mode,
76 enum intel_display_power_domain *domain)
77{
78 struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
79
80 *domain = tc_cold_get_power_domain(dig_port, mode);
81
82 return intel_display_power_get(i915, *domain);
83}
84
85static intel_wakeref_t
86tc_cold_block(struct intel_digital_port *dig_port, enum intel_display_power_domain *domain)
87{
88 return tc_cold_block_in_mode(dig_port, dig_port->tc_mode, domain);
89}
90
91static void
92tc_cold_unblock(struct intel_digital_port *dig_port, enum intel_display_power_domain domain,
93 intel_wakeref_t wakeref)
94{
95 struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
96
97 /*
98 * wakeref == -1, means some error happened saving save_depot_stack but
99 * power should still be put down and 0 is a invalid save_depot_stack
100 * id so can be used to skip it for non TC legacy ports.
101 */
102 if (wakeref == 0)
103 return;
104
105 intel_display_power_put(i915, domain, wakeref);
106}
107
108static void
109assert_tc_cold_blocked(struct intel_digital_port *dig_port)
110{
111 struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
112 bool enabled;
113
114 enabled = intel_display_power_is_enabled(i915,
115 tc_cold_get_power_domain(dig_port,
116 dig_port->tc_mode));
117 drm_WARN_ON(&i915->drm, !enabled);
118}
119
120u32 intel_tc_port_get_lane_mask(struct intel_digital_port *dig_port)
121{
122 struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
123 struct intel_uncore *uncore = &i915->uncore;
124 u32 lane_mask;
125
126 lane_mask = intel_uncore_read(uncore,
127 PORT_TX_DFLEXDPSP(dig_port->tc_phy_fia));
128
129 drm_WARN_ON(&i915->drm, lane_mask == 0xffffffff);
130 assert_tc_cold_blocked(dig_port);
131
132 lane_mask &= DP_LANE_ASSIGNMENT_MASK(dig_port->tc_phy_fia_idx);
133 return lane_mask >> DP_LANE_ASSIGNMENT_SHIFT(dig_port->tc_phy_fia_idx);
134}
135
136u32 intel_tc_port_get_pin_assignment_mask(struct intel_digital_port *dig_port)
137{
138 struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
139 struct intel_uncore *uncore = &i915->uncore;
140 u32 pin_mask;
141
142 pin_mask = intel_uncore_read(uncore,
143 PORT_TX_DFLEXPA1(dig_port->tc_phy_fia));
144
145 drm_WARN_ON(&i915->drm, pin_mask == 0xffffffff);
146 assert_tc_cold_blocked(dig_port);
147
148 return (pin_mask & DP_PIN_ASSIGNMENT_MASK(dig_port->tc_phy_fia_idx)) >>
149 DP_PIN_ASSIGNMENT_SHIFT(dig_port->tc_phy_fia_idx);
150}
151
152int intel_tc_port_fia_max_lane_count(struct intel_digital_port *dig_port)
153{
154 struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
155 intel_wakeref_t wakeref;
156 u32 lane_mask;
157
158 if (dig_port->tc_mode != TC_PORT_DP_ALT)
159 return 4;
160
161 assert_tc_cold_blocked(dig_port);
162
163 lane_mask = 0;
164 with_intel_display_power(i915, POWER_DOMAIN_DISPLAY_CORE, wakeref)
165 lane_mask = intel_tc_port_get_lane_mask(dig_port);
166
167 switch (lane_mask) {
168 default:
169 MISSING_CASE(lane_mask);
170 fallthrough;
171 case 0x1:
172 case 0x2:
173 case 0x4:
174 case 0x8:
175 return 1;
176 case 0x3:
177 case 0xc:
178 return 2;
179 case 0xf:
180 return 4;
181 }
182}
183
184void intel_tc_port_set_fia_lane_count(struct intel_digital_port *dig_port,
185 int required_lanes)
186{
187 struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
188 bool lane_reversal = dig_port->saved_port_bits & DDI_BUF_PORT_REVERSAL;
189 struct intel_uncore *uncore = &i915->uncore;
190 u32 val;
191
192 drm_WARN_ON(&i915->drm,
193 lane_reversal && dig_port->tc_mode != TC_PORT_LEGACY);
194
195 assert_tc_cold_blocked(dig_port);
196
197 val = intel_uncore_read(uncore,
198 PORT_TX_DFLEXDPMLE1(dig_port->tc_phy_fia));
199 val &= ~DFLEXDPMLE1_DPMLETC_MASK(dig_port->tc_phy_fia_idx);
200
201 switch (required_lanes) {
202 case 1:
203 val |= lane_reversal ?
204 DFLEXDPMLE1_DPMLETC_ML3(dig_port->tc_phy_fia_idx) :
205 DFLEXDPMLE1_DPMLETC_ML0(dig_port->tc_phy_fia_idx);
206 break;
207 case 2:
208 val |= lane_reversal ?
209 DFLEXDPMLE1_DPMLETC_ML3_2(dig_port->tc_phy_fia_idx) :
210 DFLEXDPMLE1_DPMLETC_ML1_0(dig_port->tc_phy_fia_idx);
211 break;
212 case 4:
213 val |= DFLEXDPMLE1_DPMLETC_ML3_0(dig_port->tc_phy_fia_idx);
214 break;
215 default:
216 MISSING_CASE(required_lanes);
217 }
218
219 intel_uncore_write(uncore,
220 PORT_TX_DFLEXDPMLE1(dig_port->tc_phy_fia), val);
221}
222
223static void tc_port_fixup_legacy_flag(struct intel_digital_port *dig_port,
224 u32 live_status_mask)
225{
226 struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
227 u32 valid_hpd_mask;
228
229 if (dig_port->tc_legacy_port)
230 valid_hpd_mask = BIT(TC_PORT_LEGACY);
231 else
232 valid_hpd_mask = BIT(TC_PORT_DP_ALT) |
233 BIT(TC_PORT_TBT_ALT);
234
235 if (!(live_status_mask & ~valid_hpd_mask))
236 return;
237
238 /* If live status mismatches the VBT flag, trust the live status. */
239 drm_dbg_kms(&i915->drm,
240 "Port %s: live status %08x mismatches the legacy port flag %08x, fixing flag\n",
241 dig_port->tc_port_name, live_status_mask, valid_hpd_mask);
242
243 dig_port->tc_legacy_port = !dig_port->tc_legacy_port;
244}
245
246static u32 icl_tc_port_live_status_mask(struct intel_digital_port *dig_port)
247{
248 struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
249 struct intel_uncore *uncore = &i915->uncore;
250 u32 isr_bit = i915->display.hotplug.pch_hpd[dig_port->base.hpd_pin];
251 u32 mask = 0;
252 u32 val;
253
254 val = intel_uncore_read(uncore,
255 PORT_TX_DFLEXDPSP(dig_port->tc_phy_fia));
256
257 if (val == 0xffffffff) {
258 drm_dbg_kms(&i915->drm,
259 "Port %s: PHY in TCCOLD, nothing connected\n",
260 dig_port->tc_port_name);
261 return mask;
262 }
263
264 if (val & TC_LIVE_STATE_TBT(dig_port->tc_phy_fia_idx))
265 mask |= BIT(TC_PORT_TBT_ALT);
266 if (val & TC_LIVE_STATE_TC(dig_port->tc_phy_fia_idx))
267 mask |= BIT(TC_PORT_DP_ALT);
268
269 if (intel_uncore_read(uncore, SDEISR) & isr_bit)
270 mask |= BIT(TC_PORT_LEGACY);
271
272 /* The sink can be connected only in a single mode. */
273 if (!drm_WARN_ON_ONCE(&i915->drm, hweight32(mask) > 1))
274 tc_port_fixup_legacy_flag(dig_port, mask);
275
276 return mask;
277}
278
279static u32 adl_tc_port_live_status_mask(struct intel_digital_port *dig_port)
280{
281 struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
282 enum tc_port tc_port = intel_port_to_tc(i915, dig_port->base.port);
283 u32 isr_bit = i915->display.hotplug.pch_hpd[dig_port->base.hpd_pin];
284 struct intel_uncore *uncore = &i915->uncore;
285 u32 val, mask = 0;
286
287 /*
288 * On ADL-P HW/FW will wake from TCCOLD to complete the read access of
289 * registers in IOM. Note that this doesn't apply to PHY and FIA
290 * registers.
291 */
292 val = intel_uncore_read(uncore, TCSS_DDI_STATUS(tc_port));
293 if (val & TCSS_DDI_STATUS_HPD_LIVE_STATUS_ALT)
294 mask |= BIT(TC_PORT_DP_ALT);
295 if (val & TCSS_DDI_STATUS_HPD_LIVE_STATUS_TBT)
296 mask |= BIT(TC_PORT_TBT_ALT);
297
298 if (intel_uncore_read(uncore, SDEISR) & isr_bit)
299 mask |= BIT(TC_PORT_LEGACY);
300
301 /* The sink can be connected only in a single mode. */
302 if (!drm_WARN_ON(&i915->drm, hweight32(mask) > 1))
303 tc_port_fixup_legacy_flag(dig_port, mask);
304
305 return mask;
306}
307
308static u32 tc_port_live_status_mask(struct intel_digital_port *dig_port)
309{
310 struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
311
312 if (IS_ALDERLAKE_P(i915))
313 return adl_tc_port_live_status_mask(dig_port);
314
315 return icl_tc_port_live_status_mask(dig_port);
316}
317
318/*
319 * Return the PHY status complete flag indicating that display can acquire the
320 * PHY ownership. The IOM firmware sets this flag when a DP-alt or legacy sink
321 * is connected and it's ready to switch the ownership to display. The flag
322 * will be left cleared when a TBT-alt sink is connected, where the PHY is
323 * owned by the TBT subsystem and so switching the ownership to display is not
324 * required.
325 */
326static bool icl_tc_phy_status_complete(struct intel_digital_port *dig_port)
327{
328 struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
329 struct intel_uncore *uncore = &i915->uncore;
330 u32 val;
331
332 val = intel_uncore_read(uncore,
333 PORT_TX_DFLEXDPPMS(dig_port->tc_phy_fia));
334 if (val == 0xffffffff) {
335 drm_dbg_kms(&i915->drm,
336 "Port %s: PHY in TCCOLD, assuming not complete\n",
337 dig_port->tc_port_name);
338 return false;
339 }
340
341 return val & DP_PHY_MODE_STATUS_COMPLETED(dig_port->tc_phy_fia_idx);
342}
343
344/*
345 * Return the PHY status complete flag indicating that display can acquire the
346 * PHY ownership. The IOM firmware sets this flag when it's ready to switch
347 * the ownership to display, regardless of what sink is connected (TBT-alt,
348 * DP-alt, legacy or nothing). For TBT-alt sinks the PHY is owned by the TBT
349 * subsystem and so switching the ownership to display is not required.
350 */
351static bool adl_tc_phy_status_complete(struct intel_digital_port *dig_port)
352{
353 struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
354 enum tc_port tc_port = intel_port_to_tc(i915, dig_port->base.port);
355 struct intel_uncore *uncore = &i915->uncore;
356 u32 val;
357
358 val = intel_uncore_read(uncore, TCSS_DDI_STATUS(tc_port));
359 if (val == 0xffffffff) {
360 drm_dbg_kms(&i915->drm,
361 "Port %s: PHY in TCCOLD, assuming not complete\n",
362 dig_port->tc_port_name);
363 return false;
364 }
365
366 return val & TCSS_DDI_STATUS_READY;
367}
368
369static bool tc_phy_status_complete(struct intel_digital_port *dig_port)
370{
371 struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
372
373 if (IS_ALDERLAKE_P(i915))
374 return adl_tc_phy_status_complete(dig_port);
375
376 return icl_tc_phy_status_complete(dig_port);
377}
378
379static bool icl_tc_phy_take_ownership(struct intel_digital_port *dig_port,
380 bool take)
381{
382 struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
383 struct intel_uncore *uncore = &i915->uncore;
384 u32 val;
385
386 val = intel_uncore_read(uncore,
387 PORT_TX_DFLEXDPCSSS(dig_port->tc_phy_fia));
388 if (val == 0xffffffff) {
389 drm_dbg_kms(&i915->drm,
390 "Port %s: PHY in TCCOLD, can't %s ownership\n",
391 dig_port->tc_port_name, take ? "take" : "release");
392
393 return false;
394 }
395
396 val &= ~DP_PHY_MODE_STATUS_NOT_SAFE(dig_port->tc_phy_fia_idx);
397 if (take)
398 val |= DP_PHY_MODE_STATUS_NOT_SAFE(dig_port->tc_phy_fia_idx);
399
400 intel_uncore_write(uncore,
401 PORT_TX_DFLEXDPCSSS(dig_port->tc_phy_fia), val);
402
403 return true;
404}
405
406static bool adl_tc_phy_take_ownership(struct intel_digital_port *dig_port,
407 bool take)
408{
409 struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
410 struct intel_uncore *uncore = &i915->uncore;
411 enum port port = dig_port->base.port;
412
413 intel_uncore_rmw(uncore, DDI_BUF_CTL(port), DDI_BUF_CTL_TC_PHY_OWNERSHIP,
414 take ? DDI_BUF_CTL_TC_PHY_OWNERSHIP : 0);
415
416 return true;
417}
418
419static bool tc_phy_take_ownership(struct intel_digital_port *dig_port, bool take)
420{
421 struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
422
423 if (IS_ALDERLAKE_P(i915))
424 return adl_tc_phy_take_ownership(dig_port, take);
425
426 return icl_tc_phy_take_ownership(dig_port, take);
427}
428
429static bool icl_tc_phy_is_owned(struct intel_digital_port *dig_port)
430{
431 struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
432 struct intel_uncore *uncore = &i915->uncore;
433 u32 val;
434
435 val = intel_uncore_read(uncore,
436 PORT_TX_DFLEXDPCSSS(dig_port->tc_phy_fia));
437 if (val == 0xffffffff) {
438 drm_dbg_kms(&i915->drm,
439 "Port %s: PHY in TCCOLD, assume safe mode\n",
440 dig_port->tc_port_name);
441 return true;
442 }
443
444 return val & DP_PHY_MODE_STATUS_NOT_SAFE(dig_port->tc_phy_fia_idx);
445}
446
447static bool adl_tc_phy_is_owned(struct intel_digital_port *dig_port)
448{
449 struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
450 struct intel_uncore *uncore = &i915->uncore;
451 enum port port = dig_port->base.port;
452 u32 val;
453
454 val = intel_uncore_read(uncore, DDI_BUF_CTL(port));
455 return val & DDI_BUF_CTL_TC_PHY_OWNERSHIP;
456}
457
458static bool tc_phy_is_owned(struct intel_digital_port *dig_port)
459{
460 struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
461
462 if (IS_ALDERLAKE_P(i915))
463 return adl_tc_phy_is_owned(dig_port);
464
465 return icl_tc_phy_is_owned(dig_port);
466}
467
468/*
469 * This function implements the first part of the Connect Flow described by our
470 * specification, Gen11 TypeC Programming chapter. The rest of the flow (reading
471 * lanes, EDID, etc) is done as needed in the typical places.
472 *
473 * Unlike the other ports, type-C ports are not available to use as soon as we
474 * get a hotplug. The type-C PHYs can be shared between multiple controllers:
475 * display, USB, etc. As a result, handshaking through FIA is required around
476 * connect and disconnect to cleanly transfer ownership with the controller and
477 * set the type-C power state.
478 */
479static void icl_tc_phy_connect(struct intel_digital_port *dig_port,
480 int required_lanes)
481{
482 struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
483 u32 live_status_mask;
484 int max_lanes;
485
486 if (!tc_phy_status_complete(dig_port)) {
487 drm_dbg_kms(&i915->drm, "Port %s: PHY not ready\n",
488 dig_port->tc_port_name);
489 goto out_set_tbt_alt_mode;
490 }
491
492 live_status_mask = tc_port_live_status_mask(dig_port);
493 if (!(live_status_mask & (BIT(TC_PORT_DP_ALT) | BIT(TC_PORT_LEGACY))) &&
494 !dig_port->tc_legacy_port) {
495 drm_dbg_kms(&i915->drm, "Port %s: PHY ownership not required (live status %02x)\n",
496 dig_port->tc_port_name, live_status_mask);
497 goto out_set_tbt_alt_mode;
498 }
499
500 if (!tc_phy_take_ownership(dig_port, true) &&
501 !drm_WARN_ON(&i915->drm, dig_port->tc_legacy_port))
502 goto out_set_tbt_alt_mode;
503
504 max_lanes = intel_tc_port_fia_max_lane_count(dig_port);
505 if (dig_port->tc_legacy_port) {
506 drm_WARN_ON(&i915->drm, max_lanes != 4);
507 dig_port->tc_mode = TC_PORT_LEGACY;
508
509 return;
510 }
511
512 /*
513 * Now we have to re-check the live state, in case the port recently
514 * became disconnected. Not necessary for legacy mode.
515 */
516 if (!(tc_port_live_status_mask(dig_port) & BIT(TC_PORT_DP_ALT))) {
517 drm_dbg_kms(&i915->drm, "Port %s: PHY sudden disconnect\n",
518 dig_port->tc_port_name);
519 goto out_release_phy;
520 }
521
522 if (max_lanes < required_lanes) {
523 drm_dbg_kms(&i915->drm,
524 "Port %s: PHY max lanes %d < required lanes %d\n",
525 dig_port->tc_port_name,
526 max_lanes, required_lanes);
527 goto out_release_phy;
528 }
529
530 dig_port->tc_mode = TC_PORT_DP_ALT;
531
532 return;
533
534out_release_phy:
535 tc_phy_take_ownership(dig_port, false);
536out_set_tbt_alt_mode:
537 dig_port->tc_mode = TC_PORT_TBT_ALT;
538}
539
540/*
541 * See the comment at the connect function. This implements the Disconnect
542 * Flow.
543 */
544static void icl_tc_phy_disconnect(struct intel_digital_port *dig_port)
545{
546 switch (dig_port->tc_mode) {
547 case TC_PORT_LEGACY:
548 case TC_PORT_DP_ALT:
549 tc_phy_take_ownership(dig_port, false);
550 fallthrough;
551 case TC_PORT_TBT_ALT:
552 dig_port->tc_mode = TC_PORT_DISCONNECTED;
553 fallthrough;
554 case TC_PORT_DISCONNECTED:
555 break;
556 default:
557 MISSING_CASE(dig_port->tc_mode);
558 }
559}
560
561static bool icl_tc_phy_is_connected(struct intel_digital_port *dig_port)
562{
563 struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
564
565 if (!tc_phy_status_complete(dig_port)) {
566 drm_dbg_kms(&i915->drm, "Port %s: PHY status not complete\n",
567 dig_port->tc_port_name);
568 return dig_port->tc_mode == TC_PORT_TBT_ALT;
569 }
570
571 /* On ADL-P the PHY complete flag is set in TBT mode as well. */
572 if (IS_ALDERLAKE_P(i915) && dig_port->tc_mode == TC_PORT_TBT_ALT)
573 return true;
574
575 if (!tc_phy_is_owned(dig_port)) {
576 drm_dbg_kms(&i915->drm, "Port %s: PHY not owned\n",
577 dig_port->tc_port_name);
578
579 return false;
580 }
581
582 return dig_port->tc_mode == TC_PORT_DP_ALT ||
583 dig_port->tc_mode == TC_PORT_LEGACY;
584}
585
586static enum tc_port_mode
587intel_tc_port_get_current_mode(struct intel_digital_port *dig_port)
588{
589 struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
590 u32 live_status_mask = tc_port_live_status_mask(dig_port);
591 enum tc_port_mode mode;
592
593 if (!tc_phy_is_owned(dig_port) ||
594 drm_WARN_ON(&i915->drm, !tc_phy_status_complete(dig_port)))
595 return TC_PORT_TBT_ALT;
596
597 mode = dig_port->tc_legacy_port ? TC_PORT_LEGACY : TC_PORT_DP_ALT;
598 if (live_status_mask) {
599 enum tc_port_mode live_mode = fls(live_status_mask) - 1;
600
601 if (!drm_WARN_ON(&i915->drm, live_mode == TC_PORT_TBT_ALT))
602 mode = live_mode;
603 }
604
605 return mode;
606}
607
608static enum tc_port_mode
609intel_tc_port_get_target_mode(struct intel_digital_port *dig_port)
610{
611 u32 live_status_mask = tc_port_live_status_mask(dig_port);
612
613 if (live_status_mask)
614 return fls(live_status_mask) - 1;
615
616 return TC_PORT_TBT_ALT;
617}
618
619static void intel_tc_port_reset_mode(struct intel_digital_port *dig_port,
620 int required_lanes, bool force_disconnect)
621{
622 struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
623 enum tc_port_mode old_tc_mode = dig_port->tc_mode;
624
625 intel_display_power_flush_work(i915);
626 if (!intel_tc_cold_requires_aux_pw(dig_port)) {
627 enum intel_display_power_domain aux_domain;
628 bool aux_powered;
629
630 aux_domain = intel_aux_power_domain(dig_port);
631 aux_powered = intel_display_power_is_enabled(i915, aux_domain);
632 drm_WARN_ON(&i915->drm, aux_powered);
633 }
634
635 icl_tc_phy_disconnect(dig_port);
636 if (!force_disconnect)
637 icl_tc_phy_connect(dig_port, required_lanes);
638
639 drm_dbg_kms(&i915->drm, "Port %s: TC port mode reset (%s -> %s)\n",
640 dig_port->tc_port_name,
641 tc_port_mode_name(old_tc_mode),
642 tc_port_mode_name(dig_port->tc_mode));
643}
644
645static bool intel_tc_port_needs_reset(struct intel_digital_port *dig_port)
646{
647 return intel_tc_port_get_target_mode(dig_port) != dig_port->tc_mode;
648}
649
650static void intel_tc_port_update_mode(struct intel_digital_port *dig_port,
651 int required_lanes, bool force_disconnect)
652{
653 enum intel_display_power_domain domain;
654 intel_wakeref_t wref;
655 bool needs_reset = force_disconnect;
656
657 if (!needs_reset) {
658 /* Get power domain required to check the hotplug live status. */
659 wref = tc_cold_block(dig_port, &domain);
660 needs_reset = intel_tc_port_needs_reset(dig_port);
661 tc_cold_unblock(dig_port, domain, wref);
662 }
663
664 if (!needs_reset)
665 return;
666
667 /* Get power domain required for resetting the mode. */
668 wref = tc_cold_block_in_mode(dig_port, TC_PORT_DISCONNECTED, &domain);
669
670 intel_tc_port_reset_mode(dig_port, required_lanes, force_disconnect);
671
672 /* Get power domain matching the new mode after reset. */
673 tc_cold_unblock(dig_port, dig_port->tc_lock_power_domain,
674 fetch_and_zero(&dig_port->tc_lock_wakeref));
675 if (dig_port->tc_mode != TC_PORT_DISCONNECTED)
676 dig_port->tc_lock_wakeref = tc_cold_block(dig_port,
677 &dig_port->tc_lock_power_domain);
678
679 tc_cold_unblock(dig_port, domain, wref);
680}
681
682static void
683intel_tc_port_link_init_refcount(struct intel_digital_port *dig_port,
684 int refcount)
685{
686 dig_port->tc_link_refcount = refcount;
687}
688
689/**
690 * intel_tc_port_init_mode: Read out HW state and init the given port's TypeC mode
691 * @dig_port: digital port
692 *
693 * Read out the HW state and initialize the TypeC mode of @dig_port. The mode
694 * will be locked until intel_tc_port_sanitize_mode() is called.
695 */
696void intel_tc_port_init_mode(struct intel_digital_port *dig_port)
697{
698 struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
699 intel_wakeref_t tc_cold_wref;
700 enum intel_display_power_domain domain;
701
702 mutex_lock(&dig_port->tc_lock);
703
704 drm_WARN_ON(&i915->drm, dig_port->tc_mode != TC_PORT_DISCONNECTED);
705 drm_WARN_ON(&i915->drm, dig_port->tc_lock_wakeref);
706 drm_WARN_ON(&i915->drm, dig_port->tc_link_refcount);
707
708 tc_cold_wref = tc_cold_block(dig_port, &domain);
709
710 dig_port->tc_mode = intel_tc_port_get_current_mode(dig_port);
711 /* Prevent changing dig_port->tc_mode until intel_tc_port_sanitize_mode() is called. */
712 intel_tc_port_link_init_refcount(dig_port, 1);
713 dig_port->tc_lock_wakeref = tc_cold_block(dig_port, &dig_port->tc_lock_power_domain);
714
715 tc_cold_unblock(dig_port, domain, tc_cold_wref);
716
717 drm_dbg_kms(&i915->drm, "Port %s: init mode (%s)\n",
718 dig_port->tc_port_name,
719 tc_port_mode_name(dig_port->tc_mode));
720
721 mutex_unlock(&dig_port->tc_lock);
722}
723
724/**
725 * intel_tc_port_sanitize_mode: Sanitize the given port's TypeC mode
726 * @dig_port: digital port
727 *
728 * Sanitize @dig_port's TypeC mode wrt. the encoder's state right after driver
729 * loading and system resume:
730 * If the encoder is enabled keep the TypeC mode/PHY connected state locked until
731 * the encoder is disabled.
732 * If the encoder is disabled make sure the PHY is disconnected.
733 */
734void intel_tc_port_sanitize_mode(struct intel_digital_port *dig_port)
735{
736 struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
737 struct intel_encoder *encoder = &dig_port->base;
738 int active_links = 0;
739
740 mutex_lock(&dig_port->tc_lock);
741
742 if (dig_port->dp.is_mst)
743 active_links = intel_dp_mst_encoder_active_links(dig_port);
744 else if (encoder->base.crtc)
745 active_links = to_intel_crtc(encoder->base.crtc)->active;
746
747 drm_WARN_ON(&i915->drm, dig_port->tc_link_refcount != 1);
748 intel_tc_port_link_init_refcount(dig_port, active_links);
749
750 if (active_links) {
751 if (!icl_tc_phy_is_connected(dig_port))
752 drm_dbg_kms(&i915->drm,
753 "Port %s: PHY disconnected with %d active link(s)\n",
754 dig_port->tc_port_name, active_links);
755 } else {
756 /*
757 * TBT-alt is the default mode in any case the PHY ownership is not
758 * held (regardless of the sink's connected live state), so
759 * we'll just switch to disconnected mode from it here without
760 * a note.
761 */
762 if (dig_port->tc_mode != TC_PORT_TBT_ALT)
763 drm_dbg_kms(&i915->drm,
764 "Port %s: PHY left in %s mode on disabled port, disconnecting it\n",
765 dig_port->tc_port_name,
766 tc_port_mode_name(dig_port->tc_mode));
767 icl_tc_phy_disconnect(dig_port);
768
769 tc_cold_unblock(dig_port, dig_port->tc_lock_power_domain,
770 fetch_and_zero(&dig_port->tc_lock_wakeref));
771 }
772
773 drm_dbg_kms(&i915->drm, "Port %s: sanitize mode (%s)\n",
774 dig_port->tc_port_name,
775 tc_port_mode_name(dig_port->tc_mode));
776
777 mutex_unlock(&dig_port->tc_lock);
778}
779
780/*
781 * The type-C ports are different because even when they are connected, they may
782 * not be available/usable by the graphics driver: see the comment on
783 * icl_tc_phy_connect(). So in our driver instead of adding the additional
784 * concept of "usable" and make everything check for "connected and usable" we
785 * define a port as "connected" when it is not only connected, but also when it
786 * is usable by the rest of the driver. That maintains the old assumption that
787 * connected ports are usable, and avoids exposing to the users objects they
788 * can't really use.
789 */
790bool intel_tc_port_connected(struct intel_encoder *encoder)
791{
792 struct intel_digital_port *dig_port = enc_to_dig_port(encoder);
793 bool is_connected;
794
795 intel_tc_port_lock(dig_port);
796
797 is_connected = tc_port_live_status_mask(dig_port) &
798 BIT(dig_port->tc_mode);
799
800 intel_tc_port_unlock(dig_port);
801
802 return is_connected;
803}
804
805static void __intel_tc_port_lock(struct intel_digital_port *dig_port,
806 int required_lanes)
807{
808 struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
809
810 mutex_lock(&dig_port->tc_lock);
811
812 cancel_delayed_work(&dig_port->tc_disconnect_phy_work);
813
814 if (!dig_port->tc_link_refcount)
815 intel_tc_port_update_mode(dig_port, required_lanes,
816 false);
817
818 drm_WARN_ON(&i915->drm, dig_port->tc_mode == TC_PORT_DISCONNECTED);
819 drm_WARN_ON(&i915->drm, dig_port->tc_mode != TC_PORT_TBT_ALT &&
820 !tc_phy_is_owned(dig_port));
821}
822
823void intel_tc_port_lock(struct intel_digital_port *dig_port)
824{
825 __intel_tc_port_lock(dig_port, 1);
826}
827
828/**
829 * intel_tc_port_disconnect_phy_work: disconnect TypeC PHY from display port
830 * @dig_port: digital port
831 *
832 * Disconnect the given digital port from its TypeC PHY (handing back the
833 * control of the PHY to the TypeC subsystem). This will happen in a delayed
834 * manner after each aux transactions and modeset disables.
835 */
836static void intel_tc_port_disconnect_phy_work(struct work_struct *work)
837{
838 struct intel_digital_port *dig_port =
839 container_of(work, struct intel_digital_port, tc_disconnect_phy_work.work);
840
841 mutex_lock(&dig_port->tc_lock);
842
843 if (!dig_port->tc_link_refcount)
844 intel_tc_port_update_mode(dig_port, 1, true);
845
846 mutex_unlock(&dig_port->tc_lock);
847}
848
849/**
850 * intel_tc_port_flush_work: flush the work disconnecting the PHY
851 * @dig_port: digital port
852 *
853 * Flush the delayed work disconnecting an idle PHY.
854 */
855void intel_tc_port_flush_work(struct intel_digital_port *dig_port)
856{
857 flush_delayed_work(&dig_port->tc_disconnect_phy_work);
858}
859
860void intel_tc_port_unlock(struct intel_digital_port *dig_port)
861{
862 if (!dig_port->tc_link_refcount && dig_port->tc_mode != TC_PORT_DISCONNECTED)
863 queue_delayed_work(system_unbound_wq, &dig_port->tc_disconnect_phy_work,
864 msecs_to_jiffies(1000));
865
866 mutex_unlock(&dig_port->tc_lock);
867}
868
869bool intel_tc_port_ref_held(struct intel_digital_port *dig_port)
870{
871 return mutex_is_locked(&dig_port->tc_lock) ||
872 dig_port->tc_link_refcount;
873}
874
875void intel_tc_port_get_link(struct intel_digital_port *dig_port,
876 int required_lanes)
877{
878 __intel_tc_port_lock(dig_port, required_lanes);
879 dig_port->tc_link_refcount++;
880 intel_tc_port_unlock(dig_port);
881}
882
883void intel_tc_port_put_link(struct intel_digital_port *dig_port)
884{
885 intel_tc_port_lock(dig_port);
886 --dig_port->tc_link_refcount;
887 intel_tc_port_unlock(dig_port);
888
889 /*
890 * Disconnecting the PHY after the PHY's PLL gets disabled may
891 * hang the system on ADL-P, so disconnect the PHY here synchronously.
892 * TODO: remove this once the root cause of the ordering requirement
893 * is found/fixed.
894 */
895 intel_tc_port_flush_work(dig_port);
896}
897
898static bool
899tc_has_modular_fia(struct drm_i915_private *i915, struct intel_digital_port *dig_port)
900{
901 enum intel_display_power_domain domain;
902 intel_wakeref_t wakeref;
903 u32 val;
904
905 if (!INTEL_INFO(i915)->display.has_modular_fia)
906 return false;
907
908 mutex_lock(&dig_port->tc_lock);
909 wakeref = tc_cold_block(dig_port, &domain);
910 val = intel_uncore_read(&i915->uncore, PORT_TX_DFLEXDPSP(FIA1));
911 tc_cold_unblock(dig_port, domain, wakeref);
912 mutex_unlock(&dig_port->tc_lock);
913
914 drm_WARN_ON(&i915->drm, val == 0xffffffff);
915
916 return val & MODULAR_FIA_MASK;
917}
918
919static void
920tc_port_load_fia_params(struct drm_i915_private *i915, struct intel_digital_port *dig_port)
921{
922 enum port port = dig_port->base.port;
923 enum tc_port tc_port = intel_port_to_tc(i915, port);
924
925 /*
926 * Each Modular FIA instance houses 2 TC ports. In SOC that has more
927 * than two TC ports, there are multiple instances of Modular FIA.
928 */
929 if (tc_has_modular_fia(i915, dig_port)) {
930 dig_port->tc_phy_fia = tc_port / 2;
931 dig_port->tc_phy_fia_idx = tc_port % 2;
932 } else {
933 dig_port->tc_phy_fia = FIA1;
934 dig_port->tc_phy_fia_idx = tc_port;
935 }
936}
937
938void intel_tc_port_init(struct intel_digital_port *dig_port, bool is_legacy)
939{
940 struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
941 enum port port = dig_port->base.port;
942 enum tc_port tc_port = intel_port_to_tc(i915, port);
943
944 if (drm_WARN_ON(&i915->drm, tc_port == TC_PORT_NONE))
945 return;
946
947 snprintf(dig_port->tc_port_name, sizeof(dig_port->tc_port_name),
948 "%c/TC#%d", port_name(port), tc_port + 1);
949
950 mutex_init(&dig_port->tc_lock);
951 INIT_DELAYED_WORK(&dig_port->tc_disconnect_phy_work, intel_tc_port_disconnect_phy_work);
952 dig_port->tc_legacy_port = is_legacy;
953 dig_port->tc_mode = TC_PORT_DISCONNECTED;
954 dig_port->tc_link_refcount = 0;
955 tc_port_load_fia_params(i915, dig_port);
956
957 intel_tc_port_init_mode(dig_port);
958}
1// SPDX-License-Identifier: MIT
2/*
3 * Copyright © 2019 Intel Corporation
4 */
5
6#include "i915_drv.h"
7#include "i915_reg.h"
8#include "intel_atomic.h"
9#include "intel_cx0_phy_regs.h"
10#include "intel_ddi.h"
11#include "intel_de.h"
12#include "intel_display.h"
13#include "intel_display_driver.h"
14#include "intel_display_power_map.h"
15#include "intel_display_types.h"
16#include "intel_dkl_phy_regs.h"
17#include "intel_dp.h"
18#include "intel_dp_mst.h"
19#include "intel_mg_phy_regs.h"
20#include "intel_modeset_lock.h"
21#include "intel_tc.h"
22
23#define DP_PIN_ASSIGNMENT_C 0x3
24#define DP_PIN_ASSIGNMENT_D 0x4
25#define DP_PIN_ASSIGNMENT_E 0x5
26
27enum tc_port_mode {
28 TC_PORT_DISCONNECTED,
29 TC_PORT_TBT_ALT,
30 TC_PORT_DP_ALT,
31 TC_PORT_LEGACY,
32};
33
34struct intel_tc_port;
35
36struct intel_tc_phy_ops {
37 enum intel_display_power_domain (*cold_off_domain)(struct intel_tc_port *tc);
38 u32 (*hpd_live_status)(struct intel_tc_port *tc);
39 bool (*is_ready)(struct intel_tc_port *tc);
40 bool (*is_owned)(struct intel_tc_port *tc);
41 void (*get_hw_state)(struct intel_tc_port *tc);
42 bool (*connect)(struct intel_tc_port *tc, int required_lanes);
43 void (*disconnect)(struct intel_tc_port *tc);
44 void (*init)(struct intel_tc_port *tc);
45};
46
47struct intel_tc_port {
48 struct intel_digital_port *dig_port;
49
50 const struct intel_tc_phy_ops *phy_ops;
51
52 struct mutex lock; /* protects the TypeC port mode */
53 intel_wakeref_t lock_wakeref;
54#if IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM)
55 enum intel_display_power_domain lock_power_domain;
56#endif
57 struct delayed_work disconnect_phy_work;
58 struct delayed_work link_reset_work;
59 int link_refcount;
60 bool legacy_port:1;
61 const char *port_name;
62 enum tc_port_mode mode;
63 enum tc_port_mode init_mode;
64 enum phy_fia phy_fia;
65 u8 phy_fia_idx;
66};
67
68static enum intel_display_power_domain
69tc_phy_cold_off_domain(struct intel_tc_port *);
70static u32 tc_phy_hpd_live_status(struct intel_tc_port *tc);
71static bool tc_phy_is_ready(struct intel_tc_port *tc);
72static bool tc_phy_wait_for_ready(struct intel_tc_port *tc);
73static enum tc_port_mode tc_phy_get_current_mode(struct intel_tc_port *tc);
74
75static const char *tc_port_mode_name(enum tc_port_mode mode)
76{
77 static const char * const names[] = {
78 [TC_PORT_DISCONNECTED] = "disconnected",
79 [TC_PORT_TBT_ALT] = "tbt-alt",
80 [TC_PORT_DP_ALT] = "dp-alt",
81 [TC_PORT_LEGACY] = "legacy",
82 };
83
84 if (WARN_ON(mode >= ARRAY_SIZE(names)))
85 mode = TC_PORT_DISCONNECTED;
86
87 return names[mode];
88}
89
90static struct intel_tc_port *to_tc_port(struct intel_digital_port *dig_port)
91{
92 return dig_port->tc;
93}
94
95static struct drm_i915_private *tc_to_i915(struct intel_tc_port *tc)
96{
97 return to_i915(tc->dig_port->base.base.dev);
98}
99
100static bool intel_tc_port_in_mode(struct intel_digital_port *dig_port,
101 enum tc_port_mode mode)
102{
103 struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
104 enum phy phy = intel_port_to_phy(i915, dig_port->base.port);
105 struct intel_tc_port *tc = to_tc_port(dig_port);
106
107 return intel_phy_is_tc(i915, phy) && tc->mode == mode;
108}
109
110bool intel_tc_port_in_tbt_alt_mode(struct intel_digital_port *dig_port)
111{
112 return intel_tc_port_in_mode(dig_port, TC_PORT_TBT_ALT);
113}
114
115bool intel_tc_port_in_dp_alt_mode(struct intel_digital_port *dig_port)
116{
117 return intel_tc_port_in_mode(dig_port, TC_PORT_DP_ALT);
118}
119
120bool intel_tc_port_in_legacy_mode(struct intel_digital_port *dig_port)
121{
122 return intel_tc_port_in_mode(dig_port, TC_PORT_LEGACY);
123}
124
125/*
126 * The display power domains used for TC ports depending on the
127 * platform and TC mode (legacy, DP-alt, TBT):
128 *
129 * POWER_DOMAIN_DISPLAY_CORE:
130 * --------------------------
131 * ADLP/all modes:
132 * - TCSS/IOM access for PHY ready state.
133 * ADLP+/all modes:
134 * - DE/north-,south-HPD ISR access for HPD live state.
135 *
136 * POWER_DOMAIN_PORT_DDI_LANES_<port>:
137 * -----------------------------------
138 * ICL+/all modes:
139 * - DE/DDI_BUF access for port enabled state.
140 * ADLP/all modes:
141 * - DE/DDI_BUF access for PHY owned state.
142 *
143 * POWER_DOMAIN_AUX_USBC<TC port index>:
144 * -------------------------------------
145 * ICL/legacy mode:
146 * - TCSS/IOM,FIA access for PHY ready, owned and HPD live state
147 * - TCSS/PHY: block TC-cold power state for using the PHY AUX and
148 * main lanes.
149 * ADLP/legacy, DP-alt modes:
150 * - TCSS/PHY: block TC-cold power state for using the PHY AUX and
151 * main lanes.
152 *
153 * POWER_DOMAIN_TC_COLD_OFF:
154 * -------------------------
155 * ICL/DP-alt, TBT mode:
156 * - TCSS/TBT: block TC-cold power state for using the (direct or
157 * TBT DP-IN) AUX and main lanes.
158 *
159 * TGL/all modes:
160 * - TCSS/IOM,FIA access for PHY ready, owned and HPD live state
161 * - TCSS/PHY: block TC-cold power state for using the (direct or
162 * TBT DP-IN) AUX and main lanes.
163 *
164 * ADLP/TBT mode:
165 * - TCSS/TBT: block TC-cold power state for using the (TBT DP-IN)
166 * AUX and main lanes.
167 *
168 * XELPDP+/all modes:
169 * - TCSS/IOM,FIA access for PHY ready, owned state
170 * - TCSS/PHY: block TC-cold power state for using the (direct or
171 * TBT DP-IN) AUX and main lanes.
172 */
173bool intel_tc_cold_requires_aux_pw(struct intel_digital_port *dig_port)
174{
175 struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
176 struct intel_tc_port *tc = to_tc_port(dig_port);
177
178 return tc_phy_cold_off_domain(tc) ==
179 intel_display_power_legacy_aux_domain(i915, dig_port->aux_ch);
180}
181
182static intel_wakeref_t
183__tc_cold_block(struct intel_tc_port *tc, enum intel_display_power_domain *domain)
184{
185 struct drm_i915_private *i915 = tc_to_i915(tc);
186
187 *domain = tc_phy_cold_off_domain(tc);
188
189 return intel_display_power_get(i915, *domain);
190}
191
192static intel_wakeref_t
193tc_cold_block(struct intel_tc_port *tc)
194{
195 enum intel_display_power_domain domain;
196 intel_wakeref_t wakeref;
197
198 wakeref = __tc_cold_block(tc, &domain);
199#if IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM)
200 tc->lock_power_domain = domain;
201#endif
202 return wakeref;
203}
204
205static void
206__tc_cold_unblock(struct intel_tc_port *tc, enum intel_display_power_domain domain,
207 intel_wakeref_t wakeref)
208{
209 struct drm_i915_private *i915 = tc_to_i915(tc);
210
211 intel_display_power_put(i915, domain, wakeref);
212}
213
214static void
215tc_cold_unblock(struct intel_tc_port *tc, intel_wakeref_t wakeref)
216{
217 enum intel_display_power_domain domain = tc_phy_cold_off_domain(tc);
218
219#if IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM)
220 drm_WARN_ON(&tc_to_i915(tc)->drm, tc->lock_power_domain != domain);
221#endif
222 __tc_cold_unblock(tc, domain, wakeref);
223}
224
225static void
226assert_display_core_power_enabled(struct intel_tc_port *tc)
227{
228 struct drm_i915_private *i915 = tc_to_i915(tc);
229
230 drm_WARN_ON(&i915->drm,
231 !intel_display_power_is_enabled(i915, POWER_DOMAIN_DISPLAY_CORE));
232}
233
234static void
235assert_tc_cold_blocked(struct intel_tc_port *tc)
236{
237 struct drm_i915_private *i915 = tc_to_i915(tc);
238 bool enabled;
239
240 enabled = intel_display_power_is_enabled(i915,
241 tc_phy_cold_off_domain(tc));
242 drm_WARN_ON(&i915->drm, !enabled);
243}
244
245static enum intel_display_power_domain
246tc_port_power_domain(struct intel_tc_port *tc)
247{
248 struct drm_i915_private *i915 = tc_to_i915(tc);
249 enum tc_port tc_port = intel_port_to_tc(i915, tc->dig_port->base.port);
250
251 return POWER_DOMAIN_PORT_DDI_LANES_TC1 + tc_port - TC_PORT_1;
252}
253
254static void
255assert_tc_port_power_enabled(struct intel_tc_port *tc)
256{
257 struct drm_i915_private *i915 = tc_to_i915(tc);
258
259 drm_WARN_ON(&i915->drm,
260 !intel_display_power_is_enabled(i915, tc_port_power_domain(tc)));
261}
262
263static u32 intel_tc_port_get_lane_mask(struct intel_digital_port *dig_port)
264{
265 struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
266 struct intel_tc_port *tc = to_tc_port(dig_port);
267 u32 lane_mask;
268
269 lane_mask = intel_de_read(i915, PORT_TX_DFLEXDPSP(tc->phy_fia));
270
271 drm_WARN_ON(&i915->drm, lane_mask == 0xffffffff);
272 assert_tc_cold_blocked(tc);
273
274 lane_mask &= DP_LANE_ASSIGNMENT_MASK(tc->phy_fia_idx);
275 return lane_mask >> DP_LANE_ASSIGNMENT_SHIFT(tc->phy_fia_idx);
276}
277
278u32 intel_tc_port_get_pin_assignment_mask(struct intel_digital_port *dig_port)
279{
280 struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
281 struct intel_tc_port *tc = to_tc_port(dig_port);
282 u32 pin_mask;
283
284 pin_mask = intel_de_read(i915, PORT_TX_DFLEXPA1(tc->phy_fia));
285
286 drm_WARN_ON(&i915->drm, pin_mask == 0xffffffff);
287 assert_tc_cold_blocked(tc);
288
289 return (pin_mask & DP_PIN_ASSIGNMENT_MASK(tc->phy_fia_idx)) >>
290 DP_PIN_ASSIGNMENT_SHIFT(tc->phy_fia_idx);
291}
292
293static int lnl_tc_port_get_max_lane_count(struct intel_digital_port *dig_port)
294{
295 struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
296 enum tc_port tc_port = intel_port_to_tc(i915, dig_port->base.port);
297 intel_wakeref_t wakeref;
298 u32 val, pin_assignment;
299
300 with_intel_display_power(i915, POWER_DOMAIN_DISPLAY_CORE, wakeref)
301 val = intel_de_read(i915, TCSS_DDI_STATUS(tc_port));
302
303 pin_assignment =
304 REG_FIELD_GET(TCSS_DDI_STATUS_PIN_ASSIGNMENT_MASK, val);
305
306 switch (pin_assignment) {
307 default:
308 MISSING_CASE(pin_assignment);
309 fallthrough;
310 case DP_PIN_ASSIGNMENT_D:
311 return 2;
312 case DP_PIN_ASSIGNMENT_C:
313 case DP_PIN_ASSIGNMENT_E:
314 return 4;
315 }
316}
317
318static int mtl_tc_port_get_max_lane_count(struct intel_digital_port *dig_port)
319{
320 struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
321 intel_wakeref_t wakeref;
322 u32 pin_mask;
323
324 with_intel_display_power(i915, POWER_DOMAIN_DISPLAY_CORE, wakeref)
325 pin_mask = intel_tc_port_get_pin_assignment_mask(dig_port);
326
327 switch (pin_mask) {
328 default:
329 MISSING_CASE(pin_mask);
330 fallthrough;
331 case DP_PIN_ASSIGNMENT_D:
332 return 2;
333 case DP_PIN_ASSIGNMENT_C:
334 case DP_PIN_ASSIGNMENT_E:
335 return 4;
336 }
337}
338
339static int intel_tc_port_get_max_lane_count(struct intel_digital_port *dig_port)
340{
341 struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
342 intel_wakeref_t wakeref;
343 u32 lane_mask = 0;
344
345 with_intel_display_power(i915, POWER_DOMAIN_DISPLAY_CORE, wakeref)
346 lane_mask = intel_tc_port_get_lane_mask(dig_port);
347
348 switch (lane_mask) {
349 default:
350 MISSING_CASE(lane_mask);
351 fallthrough;
352 case 0x1:
353 case 0x2:
354 case 0x4:
355 case 0x8:
356 return 1;
357 case 0x3:
358 case 0xc:
359 return 2;
360 case 0xf:
361 return 4;
362 }
363}
364
365int intel_tc_port_max_lane_count(struct intel_digital_port *dig_port)
366{
367 struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
368 struct intel_tc_port *tc = to_tc_port(dig_port);
369 enum phy phy = intel_port_to_phy(i915, dig_port->base.port);
370
371 if (!intel_phy_is_tc(i915, phy) || tc->mode != TC_PORT_DP_ALT)
372 return 4;
373
374 assert_tc_cold_blocked(tc);
375
376 if (DISPLAY_VER(i915) >= 20)
377 return lnl_tc_port_get_max_lane_count(dig_port);
378
379 if (DISPLAY_VER(i915) >= 14)
380 return mtl_tc_port_get_max_lane_count(dig_port);
381
382 return intel_tc_port_get_max_lane_count(dig_port);
383}
384
385void intel_tc_port_set_fia_lane_count(struct intel_digital_port *dig_port,
386 int required_lanes)
387{
388 struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
389 struct intel_tc_port *tc = to_tc_port(dig_port);
390 bool lane_reversal = dig_port->saved_port_bits & DDI_BUF_PORT_REVERSAL;
391 u32 val;
392
393 drm_WARN_ON(&i915->drm,
394 lane_reversal && tc->mode != TC_PORT_LEGACY);
395
396 assert_tc_cold_blocked(tc);
397
398 val = intel_de_read(i915, PORT_TX_DFLEXDPMLE1(tc->phy_fia));
399 val &= ~DFLEXDPMLE1_DPMLETC_MASK(tc->phy_fia_idx);
400
401 switch (required_lanes) {
402 case 1:
403 val |= lane_reversal ?
404 DFLEXDPMLE1_DPMLETC_ML3(tc->phy_fia_idx) :
405 DFLEXDPMLE1_DPMLETC_ML0(tc->phy_fia_idx);
406 break;
407 case 2:
408 val |= lane_reversal ?
409 DFLEXDPMLE1_DPMLETC_ML3_2(tc->phy_fia_idx) :
410 DFLEXDPMLE1_DPMLETC_ML1_0(tc->phy_fia_idx);
411 break;
412 case 4:
413 val |= DFLEXDPMLE1_DPMLETC_ML3_0(tc->phy_fia_idx);
414 break;
415 default:
416 MISSING_CASE(required_lanes);
417 }
418
419 intel_de_write(i915, PORT_TX_DFLEXDPMLE1(tc->phy_fia), val);
420}
421
422static void tc_port_fixup_legacy_flag(struct intel_tc_port *tc,
423 u32 live_status_mask)
424{
425 struct drm_i915_private *i915 = tc_to_i915(tc);
426 u32 valid_hpd_mask;
427
428 drm_WARN_ON(&i915->drm, tc->mode != TC_PORT_DISCONNECTED);
429
430 if (hweight32(live_status_mask) != 1)
431 return;
432
433 if (tc->legacy_port)
434 valid_hpd_mask = BIT(TC_PORT_LEGACY);
435 else
436 valid_hpd_mask = BIT(TC_PORT_DP_ALT) |
437 BIT(TC_PORT_TBT_ALT);
438
439 if (!(live_status_mask & ~valid_hpd_mask))
440 return;
441
442 /* If live status mismatches the VBT flag, trust the live status. */
443 drm_dbg_kms(&i915->drm,
444 "Port %s: live status %08x mismatches the legacy port flag %08x, fixing flag\n",
445 tc->port_name, live_status_mask, valid_hpd_mask);
446
447 tc->legacy_port = !tc->legacy_port;
448}
449
450static void tc_phy_load_fia_params(struct intel_tc_port *tc, bool modular_fia)
451{
452 struct drm_i915_private *i915 = tc_to_i915(tc);
453 enum port port = tc->dig_port->base.port;
454 enum tc_port tc_port = intel_port_to_tc(i915, port);
455
456 /*
457 * Each Modular FIA instance houses 2 TC ports. In SOC that has more
458 * than two TC ports, there are multiple instances of Modular FIA.
459 */
460 if (modular_fia) {
461 tc->phy_fia = tc_port / 2;
462 tc->phy_fia_idx = tc_port % 2;
463 } else {
464 tc->phy_fia = FIA1;
465 tc->phy_fia_idx = tc_port;
466 }
467}
468
469/*
470 * ICL TC PHY handlers
471 * -------------------
472 */
473static enum intel_display_power_domain
474icl_tc_phy_cold_off_domain(struct intel_tc_port *tc)
475{
476 struct drm_i915_private *i915 = tc_to_i915(tc);
477 struct intel_digital_port *dig_port = tc->dig_port;
478
479 if (tc->legacy_port)
480 return intel_display_power_legacy_aux_domain(i915, dig_port->aux_ch);
481
482 return POWER_DOMAIN_TC_COLD_OFF;
483}
484
485static u32 icl_tc_phy_hpd_live_status(struct intel_tc_port *tc)
486{
487 struct drm_i915_private *i915 = tc_to_i915(tc);
488 struct intel_digital_port *dig_port = tc->dig_port;
489 u32 isr_bit = i915->display.hotplug.pch_hpd[dig_port->base.hpd_pin];
490 intel_wakeref_t wakeref;
491 u32 fia_isr;
492 u32 pch_isr;
493 u32 mask = 0;
494
495 with_intel_display_power(i915, tc_phy_cold_off_domain(tc), wakeref) {
496 fia_isr = intel_de_read(i915, PORT_TX_DFLEXDPSP(tc->phy_fia));
497 pch_isr = intel_de_read(i915, SDEISR);
498 }
499
500 if (fia_isr == 0xffffffff) {
501 drm_dbg_kms(&i915->drm,
502 "Port %s: PHY in TCCOLD, nothing connected\n",
503 tc->port_name);
504 return mask;
505 }
506
507 if (fia_isr & TC_LIVE_STATE_TBT(tc->phy_fia_idx))
508 mask |= BIT(TC_PORT_TBT_ALT);
509 if (fia_isr & TC_LIVE_STATE_TC(tc->phy_fia_idx))
510 mask |= BIT(TC_PORT_DP_ALT);
511
512 if (pch_isr & isr_bit)
513 mask |= BIT(TC_PORT_LEGACY);
514
515 return mask;
516}
517
518/*
519 * Return the PHY status complete flag indicating that display can acquire the
520 * PHY ownership. The IOM firmware sets this flag when a DP-alt or legacy sink
521 * is connected and it's ready to switch the ownership to display. The flag
522 * will be left cleared when a TBT-alt sink is connected, where the PHY is
523 * owned by the TBT subsystem and so switching the ownership to display is not
524 * required.
525 */
526static bool icl_tc_phy_is_ready(struct intel_tc_port *tc)
527{
528 struct drm_i915_private *i915 = tc_to_i915(tc);
529 u32 val;
530
531 assert_tc_cold_blocked(tc);
532
533 val = intel_de_read(i915, PORT_TX_DFLEXDPPMS(tc->phy_fia));
534 if (val == 0xffffffff) {
535 drm_dbg_kms(&i915->drm,
536 "Port %s: PHY in TCCOLD, assuming not ready\n",
537 tc->port_name);
538 return false;
539 }
540
541 return val & DP_PHY_MODE_STATUS_COMPLETED(tc->phy_fia_idx);
542}
543
544static bool icl_tc_phy_take_ownership(struct intel_tc_port *tc,
545 bool take)
546{
547 struct drm_i915_private *i915 = tc_to_i915(tc);
548 u32 val;
549
550 assert_tc_cold_blocked(tc);
551
552 val = intel_de_read(i915, PORT_TX_DFLEXDPCSSS(tc->phy_fia));
553 if (val == 0xffffffff) {
554 drm_dbg_kms(&i915->drm,
555 "Port %s: PHY in TCCOLD, can't %s ownership\n",
556 tc->port_name, take ? "take" : "release");
557
558 return false;
559 }
560
561 val &= ~DP_PHY_MODE_STATUS_NOT_SAFE(tc->phy_fia_idx);
562 if (take)
563 val |= DP_PHY_MODE_STATUS_NOT_SAFE(tc->phy_fia_idx);
564
565 intel_de_write(i915, PORT_TX_DFLEXDPCSSS(tc->phy_fia), val);
566
567 return true;
568}
569
570static bool icl_tc_phy_is_owned(struct intel_tc_port *tc)
571{
572 struct drm_i915_private *i915 = tc_to_i915(tc);
573 u32 val;
574
575 assert_tc_cold_blocked(tc);
576
577 val = intel_de_read(i915, PORT_TX_DFLEXDPCSSS(tc->phy_fia));
578 if (val == 0xffffffff) {
579 drm_dbg_kms(&i915->drm,
580 "Port %s: PHY in TCCOLD, assume not owned\n",
581 tc->port_name);
582 return false;
583 }
584
585 return val & DP_PHY_MODE_STATUS_NOT_SAFE(tc->phy_fia_idx);
586}
587
588static void icl_tc_phy_get_hw_state(struct intel_tc_port *tc)
589{
590 enum intel_display_power_domain domain;
591 intel_wakeref_t tc_cold_wref;
592
593 tc_cold_wref = __tc_cold_block(tc, &domain);
594
595 tc->mode = tc_phy_get_current_mode(tc);
596 if (tc->mode != TC_PORT_DISCONNECTED)
597 tc->lock_wakeref = tc_cold_block(tc);
598
599 __tc_cold_unblock(tc, domain, tc_cold_wref);
600}
601
602/*
603 * This function implements the first part of the Connect Flow described by our
604 * specification, Gen11 TypeC Programming chapter. The rest of the flow (reading
605 * lanes, EDID, etc) is done as needed in the typical places.
606 *
607 * Unlike the other ports, type-C ports are not available to use as soon as we
608 * get a hotplug. The type-C PHYs can be shared between multiple controllers:
609 * display, USB, etc. As a result, handshaking through FIA is required around
610 * connect and disconnect to cleanly transfer ownership with the controller and
611 * set the type-C power state.
612 */
613static bool tc_phy_verify_legacy_or_dp_alt_mode(struct intel_tc_port *tc,
614 int required_lanes)
615{
616 struct drm_i915_private *i915 = tc_to_i915(tc);
617 struct intel_digital_port *dig_port = tc->dig_port;
618 int max_lanes;
619
620 max_lanes = intel_tc_port_max_lane_count(dig_port);
621 if (tc->mode == TC_PORT_LEGACY) {
622 drm_WARN_ON(&i915->drm, max_lanes != 4);
623 return true;
624 }
625
626 drm_WARN_ON(&i915->drm, tc->mode != TC_PORT_DP_ALT);
627
628 /*
629 * Now we have to re-check the live state, in case the port recently
630 * became disconnected. Not necessary for legacy mode.
631 */
632 if (!(tc_phy_hpd_live_status(tc) & BIT(TC_PORT_DP_ALT))) {
633 drm_dbg_kms(&i915->drm, "Port %s: PHY sudden disconnect\n",
634 tc->port_name);
635 return false;
636 }
637
638 if (max_lanes < required_lanes) {
639 drm_dbg_kms(&i915->drm,
640 "Port %s: PHY max lanes %d < required lanes %d\n",
641 tc->port_name,
642 max_lanes, required_lanes);
643 return false;
644 }
645
646 return true;
647}
648
649static bool icl_tc_phy_connect(struct intel_tc_port *tc,
650 int required_lanes)
651{
652 struct drm_i915_private *i915 = tc_to_i915(tc);
653
654 tc->lock_wakeref = tc_cold_block(tc);
655
656 if (tc->mode == TC_PORT_TBT_ALT)
657 return true;
658
659 if ((!tc_phy_is_ready(tc) ||
660 !icl_tc_phy_take_ownership(tc, true)) &&
661 !drm_WARN_ON(&i915->drm, tc->mode == TC_PORT_LEGACY)) {
662 drm_dbg_kms(&i915->drm, "Port %s: can't take PHY ownership (ready %s)\n",
663 tc->port_name,
664 str_yes_no(tc_phy_is_ready(tc)));
665 goto out_unblock_tc_cold;
666 }
667
668
669 if (!tc_phy_verify_legacy_or_dp_alt_mode(tc, required_lanes))
670 goto out_release_phy;
671
672 return true;
673
674out_release_phy:
675 icl_tc_phy_take_ownership(tc, false);
676out_unblock_tc_cold:
677 tc_cold_unblock(tc, fetch_and_zero(&tc->lock_wakeref));
678
679 return false;
680}
681
682/*
683 * See the comment at the connect function. This implements the Disconnect
684 * Flow.
685 */
686static void icl_tc_phy_disconnect(struct intel_tc_port *tc)
687{
688 switch (tc->mode) {
689 case TC_PORT_LEGACY:
690 case TC_PORT_DP_ALT:
691 icl_tc_phy_take_ownership(tc, false);
692 fallthrough;
693 case TC_PORT_TBT_ALT:
694 tc_cold_unblock(tc, fetch_and_zero(&tc->lock_wakeref));
695 break;
696 default:
697 MISSING_CASE(tc->mode);
698 }
699}
700
701static void icl_tc_phy_init(struct intel_tc_port *tc)
702{
703 tc_phy_load_fia_params(tc, false);
704}
705
706static const struct intel_tc_phy_ops icl_tc_phy_ops = {
707 .cold_off_domain = icl_tc_phy_cold_off_domain,
708 .hpd_live_status = icl_tc_phy_hpd_live_status,
709 .is_ready = icl_tc_phy_is_ready,
710 .is_owned = icl_tc_phy_is_owned,
711 .get_hw_state = icl_tc_phy_get_hw_state,
712 .connect = icl_tc_phy_connect,
713 .disconnect = icl_tc_phy_disconnect,
714 .init = icl_tc_phy_init,
715};
716
717/*
718 * TGL TC PHY handlers
719 * -------------------
720 */
721static enum intel_display_power_domain
722tgl_tc_phy_cold_off_domain(struct intel_tc_port *tc)
723{
724 return POWER_DOMAIN_TC_COLD_OFF;
725}
726
727static void tgl_tc_phy_init(struct intel_tc_port *tc)
728{
729 struct drm_i915_private *i915 = tc_to_i915(tc);
730 intel_wakeref_t wakeref;
731 u32 val;
732
733 with_intel_display_power(i915, tc_phy_cold_off_domain(tc), wakeref)
734 val = intel_de_read(i915, PORT_TX_DFLEXDPSP(FIA1));
735
736 drm_WARN_ON(&i915->drm, val == 0xffffffff);
737
738 tc_phy_load_fia_params(tc, val & MODULAR_FIA_MASK);
739}
740
741static const struct intel_tc_phy_ops tgl_tc_phy_ops = {
742 .cold_off_domain = tgl_tc_phy_cold_off_domain,
743 .hpd_live_status = icl_tc_phy_hpd_live_status,
744 .is_ready = icl_tc_phy_is_ready,
745 .is_owned = icl_tc_phy_is_owned,
746 .get_hw_state = icl_tc_phy_get_hw_state,
747 .connect = icl_tc_phy_connect,
748 .disconnect = icl_tc_phy_disconnect,
749 .init = tgl_tc_phy_init,
750};
751
752/*
753 * ADLP TC PHY handlers
754 * --------------------
755 */
756static enum intel_display_power_domain
757adlp_tc_phy_cold_off_domain(struct intel_tc_port *tc)
758{
759 struct drm_i915_private *i915 = tc_to_i915(tc);
760 struct intel_digital_port *dig_port = tc->dig_port;
761
762 if (tc->mode != TC_PORT_TBT_ALT)
763 return intel_display_power_legacy_aux_domain(i915, dig_port->aux_ch);
764
765 return POWER_DOMAIN_TC_COLD_OFF;
766}
767
768static u32 adlp_tc_phy_hpd_live_status(struct intel_tc_port *tc)
769{
770 struct drm_i915_private *i915 = tc_to_i915(tc);
771 struct intel_digital_port *dig_port = tc->dig_port;
772 enum hpd_pin hpd_pin = dig_port->base.hpd_pin;
773 u32 cpu_isr_bits = i915->display.hotplug.hpd[hpd_pin];
774 u32 pch_isr_bit = i915->display.hotplug.pch_hpd[hpd_pin];
775 intel_wakeref_t wakeref;
776 u32 cpu_isr;
777 u32 pch_isr;
778 u32 mask = 0;
779
780 with_intel_display_power(i915, POWER_DOMAIN_DISPLAY_CORE, wakeref) {
781 cpu_isr = intel_de_read(i915, GEN11_DE_HPD_ISR);
782 pch_isr = intel_de_read(i915, SDEISR);
783 }
784
785 if (cpu_isr & (cpu_isr_bits & GEN11_DE_TC_HOTPLUG_MASK))
786 mask |= BIT(TC_PORT_DP_ALT);
787 if (cpu_isr & (cpu_isr_bits & GEN11_DE_TBT_HOTPLUG_MASK))
788 mask |= BIT(TC_PORT_TBT_ALT);
789
790 if (pch_isr & pch_isr_bit)
791 mask |= BIT(TC_PORT_LEGACY);
792
793 return mask;
794}
795
796/*
797 * Return the PHY status complete flag indicating that display can acquire the
798 * PHY ownership. The IOM firmware sets this flag when it's ready to switch
799 * the ownership to display, regardless of what sink is connected (TBT-alt,
800 * DP-alt, legacy or nothing). For TBT-alt sinks the PHY is owned by the TBT
801 * subsystem and so switching the ownership to display is not required.
802 */
803static bool adlp_tc_phy_is_ready(struct intel_tc_port *tc)
804{
805 struct drm_i915_private *i915 = tc_to_i915(tc);
806 enum tc_port tc_port = intel_port_to_tc(i915, tc->dig_port->base.port);
807 u32 val;
808
809 assert_display_core_power_enabled(tc);
810
811 val = intel_de_read(i915, TCSS_DDI_STATUS(tc_port));
812 if (val == 0xffffffff) {
813 drm_dbg_kms(&i915->drm,
814 "Port %s: PHY in TCCOLD, assuming not ready\n",
815 tc->port_name);
816 return false;
817 }
818
819 return val & TCSS_DDI_STATUS_READY;
820}
821
822static bool adlp_tc_phy_take_ownership(struct intel_tc_port *tc,
823 bool take)
824{
825 struct drm_i915_private *i915 = tc_to_i915(tc);
826 enum port port = tc->dig_port->base.port;
827
828 assert_tc_port_power_enabled(tc);
829
830 intel_de_rmw(i915, DDI_BUF_CTL(port), DDI_BUF_CTL_TC_PHY_OWNERSHIP,
831 take ? DDI_BUF_CTL_TC_PHY_OWNERSHIP : 0);
832
833 return true;
834}
835
836static bool adlp_tc_phy_is_owned(struct intel_tc_port *tc)
837{
838 struct drm_i915_private *i915 = tc_to_i915(tc);
839 enum port port = tc->dig_port->base.port;
840 u32 val;
841
842 assert_tc_port_power_enabled(tc);
843
844 val = intel_de_read(i915, DDI_BUF_CTL(port));
845 return val & DDI_BUF_CTL_TC_PHY_OWNERSHIP;
846}
847
848static void adlp_tc_phy_get_hw_state(struct intel_tc_port *tc)
849{
850 struct drm_i915_private *i915 = tc_to_i915(tc);
851 enum intel_display_power_domain port_power_domain =
852 tc_port_power_domain(tc);
853 intel_wakeref_t port_wakeref;
854
855 port_wakeref = intel_display_power_get(i915, port_power_domain);
856
857 tc->mode = tc_phy_get_current_mode(tc);
858 if (tc->mode != TC_PORT_DISCONNECTED)
859 tc->lock_wakeref = tc_cold_block(tc);
860
861 intel_display_power_put(i915, port_power_domain, port_wakeref);
862}
863
864static bool adlp_tc_phy_connect(struct intel_tc_port *tc, int required_lanes)
865{
866 struct drm_i915_private *i915 = tc_to_i915(tc);
867 enum intel_display_power_domain port_power_domain =
868 tc_port_power_domain(tc);
869 intel_wakeref_t port_wakeref;
870
871 if (tc->mode == TC_PORT_TBT_ALT) {
872 tc->lock_wakeref = tc_cold_block(tc);
873 return true;
874 }
875
876 port_wakeref = intel_display_power_get(i915, port_power_domain);
877
878 if (!adlp_tc_phy_take_ownership(tc, true) &&
879 !drm_WARN_ON(&i915->drm, tc->mode == TC_PORT_LEGACY)) {
880 drm_dbg_kms(&i915->drm, "Port %s: can't take PHY ownership\n",
881 tc->port_name);
882 goto out_put_port_power;
883 }
884
885 if (!tc_phy_is_ready(tc) &&
886 !drm_WARN_ON(&i915->drm, tc->mode == TC_PORT_LEGACY)) {
887 drm_dbg_kms(&i915->drm, "Port %s: PHY not ready\n",
888 tc->port_name);
889 goto out_release_phy;
890 }
891
892 tc->lock_wakeref = tc_cold_block(tc);
893
894 if (!tc_phy_verify_legacy_or_dp_alt_mode(tc, required_lanes))
895 goto out_unblock_tc_cold;
896
897 intel_display_power_put(i915, port_power_domain, port_wakeref);
898
899 return true;
900
901out_unblock_tc_cold:
902 tc_cold_unblock(tc, fetch_and_zero(&tc->lock_wakeref));
903out_release_phy:
904 adlp_tc_phy_take_ownership(tc, false);
905out_put_port_power:
906 intel_display_power_put(i915, port_power_domain, port_wakeref);
907
908 return false;
909}
910
911static void adlp_tc_phy_disconnect(struct intel_tc_port *tc)
912{
913 struct drm_i915_private *i915 = tc_to_i915(tc);
914 enum intel_display_power_domain port_power_domain =
915 tc_port_power_domain(tc);
916 intel_wakeref_t port_wakeref;
917
918 port_wakeref = intel_display_power_get(i915, port_power_domain);
919
920 tc_cold_unblock(tc, fetch_and_zero(&tc->lock_wakeref));
921
922 switch (tc->mode) {
923 case TC_PORT_LEGACY:
924 case TC_PORT_DP_ALT:
925 adlp_tc_phy_take_ownership(tc, false);
926 fallthrough;
927 case TC_PORT_TBT_ALT:
928 break;
929 default:
930 MISSING_CASE(tc->mode);
931 }
932
933 intel_display_power_put(i915, port_power_domain, port_wakeref);
934}
935
936static void adlp_tc_phy_init(struct intel_tc_port *tc)
937{
938 tc_phy_load_fia_params(tc, true);
939}
940
941static const struct intel_tc_phy_ops adlp_tc_phy_ops = {
942 .cold_off_domain = adlp_tc_phy_cold_off_domain,
943 .hpd_live_status = adlp_tc_phy_hpd_live_status,
944 .is_ready = adlp_tc_phy_is_ready,
945 .is_owned = adlp_tc_phy_is_owned,
946 .get_hw_state = adlp_tc_phy_get_hw_state,
947 .connect = adlp_tc_phy_connect,
948 .disconnect = adlp_tc_phy_disconnect,
949 .init = adlp_tc_phy_init,
950};
951
952/*
953 * XELPDP TC PHY handlers
954 * ----------------------
955 */
956static u32 xelpdp_tc_phy_hpd_live_status(struct intel_tc_port *tc)
957{
958 struct drm_i915_private *i915 = tc_to_i915(tc);
959 struct intel_digital_port *dig_port = tc->dig_port;
960 enum hpd_pin hpd_pin = dig_port->base.hpd_pin;
961 u32 pica_isr_bits = i915->display.hotplug.hpd[hpd_pin];
962 u32 pch_isr_bit = i915->display.hotplug.pch_hpd[hpd_pin];
963 intel_wakeref_t wakeref;
964 u32 pica_isr;
965 u32 pch_isr;
966 u32 mask = 0;
967
968 with_intel_display_power(i915, POWER_DOMAIN_DISPLAY_CORE, wakeref) {
969 pica_isr = intel_de_read(i915, PICAINTERRUPT_ISR);
970 pch_isr = intel_de_read(i915, SDEISR);
971 }
972
973 if (pica_isr & (pica_isr_bits & XELPDP_DP_ALT_HOTPLUG_MASK))
974 mask |= BIT(TC_PORT_DP_ALT);
975 if (pica_isr & (pica_isr_bits & XELPDP_TBT_HOTPLUG_MASK))
976 mask |= BIT(TC_PORT_TBT_ALT);
977
978 if (tc->legacy_port && (pch_isr & pch_isr_bit))
979 mask |= BIT(TC_PORT_LEGACY);
980
981 return mask;
982}
983
984static bool
985xelpdp_tc_phy_tcss_power_is_enabled(struct intel_tc_port *tc)
986{
987 struct drm_i915_private *i915 = tc_to_i915(tc);
988 enum port port = tc->dig_port->base.port;
989
990 assert_tc_cold_blocked(tc);
991
992 return intel_de_read(i915, XELPDP_PORT_BUF_CTL1(port)) & XELPDP_TCSS_POWER_STATE;
993}
994
995static bool
996xelpdp_tc_phy_wait_for_tcss_power(struct intel_tc_port *tc, bool enabled)
997{
998 struct drm_i915_private *i915 = tc_to_i915(tc);
999
1000 if (wait_for(xelpdp_tc_phy_tcss_power_is_enabled(tc) == enabled, 5)) {
1001 drm_dbg_kms(&i915->drm,
1002 "Port %s: timeout waiting for TCSS power to get %s\n",
1003 enabled ? "enabled" : "disabled",
1004 tc->port_name);
1005 return false;
1006 }
1007
1008 return true;
1009}
1010
1011static void __xelpdp_tc_phy_enable_tcss_power(struct intel_tc_port *tc, bool enable)
1012{
1013 struct drm_i915_private *i915 = tc_to_i915(tc);
1014 enum port port = tc->dig_port->base.port;
1015 u32 val;
1016
1017 assert_tc_cold_blocked(tc);
1018
1019 val = intel_de_read(i915, XELPDP_PORT_BUF_CTL1(port));
1020 if (enable)
1021 val |= XELPDP_TCSS_POWER_REQUEST;
1022 else
1023 val &= ~XELPDP_TCSS_POWER_REQUEST;
1024 intel_de_write(i915, XELPDP_PORT_BUF_CTL1(port), val);
1025}
1026
1027static bool xelpdp_tc_phy_enable_tcss_power(struct intel_tc_port *tc, bool enable)
1028{
1029 struct drm_i915_private *i915 = tc_to_i915(tc);
1030
1031 __xelpdp_tc_phy_enable_tcss_power(tc, enable);
1032
1033 if (enable && !tc_phy_wait_for_ready(tc))
1034 goto out_disable;
1035
1036 if (!xelpdp_tc_phy_wait_for_tcss_power(tc, enable))
1037 goto out_disable;
1038
1039 return true;
1040
1041out_disable:
1042 if (drm_WARN_ON(&i915->drm, tc->mode == TC_PORT_LEGACY))
1043 return false;
1044
1045 if (!enable)
1046 return false;
1047
1048 __xelpdp_tc_phy_enable_tcss_power(tc, false);
1049 xelpdp_tc_phy_wait_for_tcss_power(tc, false);
1050
1051 return false;
1052}
1053
1054static void xelpdp_tc_phy_take_ownership(struct intel_tc_port *tc, bool take)
1055{
1056 struct drm_i915_private *i915 = tc_to_i915(tc);
1057 enum port port = tc->dig_port->base.port;
1058 u32 val;
1059
1060 assert_tc_cold_blocked(tc);
1061
1062 val = intel_de_read(i915, XELPDP_PORT_BUF_CTL1(port));
1063 if (take)
1064 val |= XELPDP_TC_PHY_OWNERSHIP;
1065 else
1066 val &= ~XELPDP_TC_PHY_OWNERSHIP;
1067 intel_de_write(i915, XELPDP_PORT_BUF_CTL1(port), val);
1068}
1069
1070static bool xelpdp_tc_phy_is_owned(struct intel_tc_port *tc)
1071{
1072 struct drm_i915_private *i915 = tc_to_i915(tc);
1073 enum port port = tc->dig_port->base.port;
1074
1075 assert_tc_cold_blocked(tc);
1076
1077 return intel_de_read(i915, XELPDP_PORT_BUF_CTL1(port)) & XELPDP_TC_PHY_OWNERSHIP;
1078}
1079
1080static void xelpdp_tc_phy_get_hw_state(struct intel_tc_port *tc)
1081{
1082 struct drm_i915_private *i915 = tc_to_i915(tc);
1083 intel_wakeref_t tc_cold_wref;
1084 enum intel_display_power_domain domain;
1085
1086 tc_cold_wref = __tc_cold_block(tc, &domain);
1087
1088 tc->mode = tc_phy_get_current_mode(tc);
1089 if (tc->mode != TC_PORT_DISCONNECTED)
1090 tc->lock_wakeref = tc_cold_block(tc);
1091
1092 drm_WARN_ON(&i915->drm,
1093 (tc->mode == TC_PORT_DP_ALT || tc->mode == TC_PORT_LEGACY) &&
1094 !xelpdp_tc_phy_tcss_power_is_enabled(tc));
1095
1096 __tc_cold_unblock(tc, domain, tc_cold_wref);
1097}
1098
1099static bool xelpdp_tc_phy_connect(struct intel_tc_port *tc, int required_lanes)
1100{
1101 tc->lock_wakeref = tc_cold_block(tc);
1102
1103 if (tc->mode == TC_PORT_TBT_ALT)
1104 return true;
1105
1106 if (!xelpdp_tc_phy_enable_tcss_power(tc, true))
1107 goto out_unblock_tccold;
1108
1109 xelpdp_tc_phy_take_ownership(tc, true);
1110
1111 if (!tc_phy_verify_legacy_or_dp_alt_mode(tc, required_lanes))
1112 goto out_release_phy;
1113
1114 return true;
1115
1116out_release_phy:
1117 xelpdp_tc_phy_take_ownership(tc, false);
1118 xelpdp_tc_phy_wait_for_tcss_power(tc, false);
1119
1120out_unblock_tccold:
1121 tc_cold_unblock(tc, fetch_and_zero(&tc->lock_wakeref));
1122
1123 return false;
1124}
1125
1126static void xelpdp_tc_phy_disconnect(struct intel_tc_port *tc)
1127{
1128 switch (tc->mode) {
1129 case TC_PORT_LEGACY:
1130 case TC_PORT_DP_ALT:
1131 xelpdp_tc_phy_take_ownership(tc, false);
1132 xelpdp_tc_phy_enable_tcss_power(tc, false);
1133 fallthrough;
1134 case TC_PORT_TBT_ALT:
1135 tc_cold_unblock(tc, fetch_and_zero(&tc->lock_wakeref));
1136 break;
1137 default:
1138 MISSING_CASE(tc->mode);
1139 }
1140}
1141
1142static const struct intel_tc_phy_ops xelpdp_tc_phy_ops = {
1143 .cold_off_domain = tgl_tc_phy_cold_off_domain,
1144 .hpd_live_status = xelpdp_tc_phy_hpd_live_status,
1145 .is_ready = adlp_tc_phy_is_ready,
1146 .is_owned = xelpdp_tc_phy_is_owned,
1147 .get_hw_state = xelpdp_tc_phy_get_hw_state,
1148 .connect = xelpdp_tc_phy_connect,
1149 .disconnect = xelpdp_tc_phy_disconnect,
1150 .init = adlp_tc_phy_init,
1151};
1152
1153/*
1154 * Generic TC PHY handlers
1155 * -----------------------
1156 */
1157static enum intel_display_power_domain
1158tc_phy_cold_off_domain(struct intel_tc_port *tc)
1159{
1160 return tc->phy_ops->cold_off_domain(tc);
1161}
1162
1163static u32 tc_phy_hpd_live_status(struct intel_tc_port *tc)
1164{
1165 struct drm_i915_private *i915 = tc_to_i915(tc);
1166 u32 mask;
1167
1168 mask = tc->phy_ops->hpd_live_status(tc);
1169
1170 /* The sink can be connected only in a single mode. */
1171 drm_WARN_ON_ONCE(&i915->drm, hweight32(mask) > 1);
1172
1173 return mask;
1174}
1175
1176static bool tc_phy_is_ready(struct intel_tc_port *tc)
1177{
1178 return tc->phy_ops->is_ready(tc);
1179}
1180
1181static bool tc_phy_is_owned(struct intel_tc_port *tc)
1182{
1183 return tc->phy_ops->is_owned(tc);
1184}
1185
1186static void tc_phy_get_hw_state(struct intel_tc_port *tc)
1187{
1188 tc->phy_ops->get_hw_state(tc);
1189}
1190
1191static bool tc_phy_is_ready_and_owned(struct intel_tc_port *tc,
1192 bool phy_is_ready, bool phy_is_owned)
1193{
1194 struct drm_i915_private *i915 = tc_to_i915(tc);
1195
1196 drm_WARN_ON(&i915->drm, phy_is_owned && !phy_is_ready);
1197
1198 return phy_is_ready && phy_is_owned;
1199}
1200
1201static bool tc_phy_is_connected(struct intel_tc_port *tc,
1202 enum icl_port_dpll_id port_pll_type)
1203{
1204 struct intel_encoder *encoder = &tc->dig_port->base;
1205 struct drm_i915_private *i915 = to_i915(encoder->base.dev);
1206 bool phy_is_ready = tc_phy_is_ready(tc);
1207 bool phy_is_owned = tc_phy_is_owned(tc);
1208 bool is_connected;
1209
1210 if (tc_phy_is_ready_and_owned(tc, phy_is_ready, phy_is_owned))
1211 is_connected = port_pll_type == ICL_PORT_DPLL_MG_PHY;
1212 else
1213 is_connected = port_pll_type == ICL_PORT_DPLL_DEFAULT;
1214
1215 drm_dbg_kms(&i915->drm,
1216 "Port %s: PHY connected: %s (ready: %s, owned: %s, pll_type: %s)\n",
1217 tc->port_name,
1218 str_yes_no(is_connected),
1219 str_yes_no(phy_is_ready),
1220 str_yes_no(phy_is_owned),
1221 port_pll_type == ICL_PORT_DPLL_DEFAULT ? "tbt" : "non-tbt");
1222
1223 return is_connected;
1224}
1225
1226static bool tc_phy_wait_for_ready(struct intel_tc_port *tc)
1227{
1228 struct drm_i915_private *i915 = tc_to_i915(tc);
1229
1230 if (wait_for(tc_phy_is_ready(tc), 500)) {
1231 drm_err(&i915->drm, "Port %s: timeout waiting for PHY ready\n",
1232 tc->port_name);
1233
1234 return false;
1235 }
1236
1237 return true;
1238}
1239
1240static enum tc_port_mode
1241hpd_mask_to_tc_mode(u32 live_status_mask)
1242{
1243 if (live_status_mask)
1244 return fls(live_status_mask) - 1;
1245
1246 return TC_PORT_DISCONNECTED;
1247}
1248
1249static enum tc_port_mode
1250tc_phy_hpd_live_mode(struct intel_tc_port *tc)
1251{
1252 u32 live_status_mask = tc_phy_hpd_live_status(tc);
1253
1254 return hpd_mask_to_tc_mode(live_status_mask);
1255}
1256
1257static enum tc_port_mode
1258get_tc_mode_in_phy_owned_state(struct intel_tc_port *tc,
1259 enum tc_port_mode live_mode)
1260{
1261 switch (live_mode) {
1262 case TC_PORT_LEGACY:
1263 case TC_PORT_DP_ALT:
1264 return live_mode;
1265 default:
1266 MISSING_CASE(live_mode);
1267 fallthrough;
1268 case TC_PORT_TBT_ALT:
1269 case TC_PORT_DISCONNECTED:
1270 if (tc->legacy_port)
1271 return TC_PORT_LEGACY;
1272 else
1273 return TC_PORT_DP_ALT;
1274 }
1275}
1276
1277static enum tc_port_mode
1278get_tc_mode_in_phy_not_owned_state(struct intel_tc_port *tc,
1279 enum tc_port_mode live_mode)
1280{
1281 switch (live_mode) {
1282 case TC_PORT_LEGACY:
1283 return TC_PORT_DISCONNECTED;
1284 case TC_PORT_DP_ALT:
1285 case TC_PORT_TBT_ALT:
1286 return TC_PORT_TBT_ALT;
1287 default:
1288 MISSING_CASE(live_mode);
1289 fallthrough;
1290 case TC_PORT_DISCONNECTED:
1291 if (tc->legacy_port)
1292 return TC_PORT_DISCONNECTED;
1293 else
1294 return TC_PORT_TBT_ALT;
1295 }
1296}
1297
1298static enum tc_port_mode
1299tc_phy_get_current_mode(struct intel_tc_port *tc)
1300{
1301 struct drm_i915_private *i915 = tc_to_i915(tc);
1302 enum tc_port_mode live_mode = tc_phy_hpd_live_mode(tc);
1303 bool phy_is_ready;
1304 bool phy_is_owned;
1305 enum tc_port_mode mode;
1306
1307 /*
1308 * For legacy ports the IOM firmware initializes the PHY during boot-up
1309 * and system resume whether or not a sink is connected. Wait here for
1310 * the initialization to get ready.
1311 */
1312 if (tc->legacy_port)
1313 tc_phy_wait_for_ready(tc);
1314
1315 phy_is_ready = tc_phy_is_ready(tc);
1316 phy_is_owned = tc_phy_is_owned(tc);
1317
1318 if (!tc_phy_is_ready_and_owned(tc, phy_is_ready, phy_is_owned)) {
1319 mode = get_tc_mode_in_phy_not_owned_state(tc, live_mode);
1320 } else {
1321 drm_WARN_ON(&i915->drm, live_mode == TC_PORT_TBT_ALT);
1322 mode = get_tc_mode_in_phy_owned_state(tc, live_mode);
1323 }
1324
1325 drm_dbg_kms(&i915->drm,
1326 "Port %s: PHY mode: %s (ready: %s, owned: %s, HPD: %s)\n",
1327 tc->port_name,
1328 tc_port_mode_name(mode),
1329 str_yes_no(phy_is_ready),
1330 str_yes_no(phy_is_owned),
1331 tc_port_mode_name(live_mode));
1332
1333 return mode;
1334}
1335
1336static enum tc_port_mode default_tc_mode(struct intel_tc_port *tc)
1337{
1338 if (tc->legacy_port)
1339 return TC_PORT_LEGACY;
1340
1341 return TC_PORT_TBT_ALT;
1342}
1343
1344static enum tc_port_mode
1345hpd_mask_to_target_mode(struct intel_tc_port *tc, u32 live_status_mask)
1346{
1347 enum tc_port_mode mode = hpd_mask_to_tc_mode(live_status_mask);
1348
1349 if (mode != TC_PORT_DISCONNECTED)
1350 return mode;
1351
1352 return default_tc_mode(tc);
1353}
1354
1355static enum tc_port_mode
1356tc_phy_get_target_mode(struct intel_tc_port *tc)
1357{
1358 u32 live_status_mask = tc_phy_hpd_live_status(tc);
1359
1360 return hpd_mask_to_target_mode(tc, live_status_mask);
1361}
1362
1363static void tc_phy_connect(struct intel_tc_port *tc, int required_lanes)
1364{
1365 struct drm_i915_private *i915 = tc_to_i915(tc);
1366 u32 live_status_mask = tc_phy_hpd_live_status(tc);
1367 bool connected;
1368
1369 tc_port_fixup_legacy_flag(tc, live_status_mask);
1370
1371 tc->mode = hpd_mask_to_target_mode(tc, live_status_mask);
1372
1373 connected = tc->phy_ops->connect(tc, required_lanes);
1374 if (!connected && tc->mode != default_tc_mode(tc)) {
1375 tc->mode = default_tc_mode(tc);
1376 connected = tc->phy_ops->connect(tc, required_lanes);
1377 }
1378
1379 drm_WARN_ON(&i915->drm, !connected);
1380}
1381
1382static void tc_phy_disconnect(struct intel_tc_port *tc)
1383{
1384 if (tc->mode != TC_PORT_DISCONNECTED) {
1385 tc->phy_ops->disconnect(tc);
1386 tc->mode = TC_PORT_DISCONNECTED;
1387 }
1388}
1389
1390static void tc_phy_init(struct intel_tc_port *tc)
1391{
1392 mutex_lock(&tc->lock);
1393 tc->phy_ops->init(tc);
1394 mutex_unlock(&tc->lock);
1395}
1396
1397static void intel_tc_port_reset_mode(struct intel_tc_port *tc,
1398 int required_lanes, bool force_disconnect)
1399{
1400 struct drm_i915_private *i915 = tc_to_i915(tc);
1401 struct intel_digital_port *dig_port = tc->dig_port;
1402 enum tc_port_mode old_tc_mode = tc->mode;
1403
1404 intel_display_power_flush_work(i915);
1405 if (!intel_tc_cold_requires_aux_pw(dig_port)) {
1406 enum intel_display_power_domain aux_domain;
1407 bool aux_powered;
1408
1409 aux_domain = intel_aux_power_domain(dig_port);
1410 aux_powered = intel_display_power_is_enabled(i915, aux_domain);
1411 drm_WARN_ON(&i915->drm, aux_powered);
1412 }
1413
1414 tc_phy_disconnect(tc);
1415 if (!force_disconnect)
1416 tc_phy_connect(tc, required_lanes);
1417
1418 drm_dbg_kms(&i915->drm, "Port %s: TC port mode reset (%s -> %s)\n",
1419 tc->port_name,
1420 tc_port_mode_name(old_tc_mode),
1421 tc_port_mode_name(tc->mode));
1422}
1423
1424static bool intel_tc_port_needs_reset(struct intel_tc_port *tc)
1425{
1426 return tc_phy_get_target_mode(tc) != tc->mode;
1427}
1428
1429static void intel_tc_port_update_mode(struct intel_tc_port *tc,
1430 int required_lanes, bool force_disconnect)
1431{
1432 if (force_disconnect ||
1433 intel_tc_port_needs_reset(tc))
1434 intel_tc_port_reset_mode(tc, required_lanes, force_disconnect);
1435}
1436
1437static void __intel_tc_port_get_link(struct intel_tc_port *tc)
1438{
1439 tc->link_refcount++;
1440}
1441
1442static void __intel_tc_port_put_link(struct intel_tc_port *tc)
1443{
1444 tc->link_refcount--;
1445}
1446
1447static bool tc_port_is_enabled(struct intel_tc_port *tc)
1448{
1449 struct drm_i915_private *i915 = tc_to_i915(tc);
1450 struct intel_digital_port *dig_port = tc->dig_port;
1451
1452 assert_tc_port_power_enabled(tc);
1453
1454 return intel_de_read(i915, DDI_BUF_CTL(dig_port->base.port)) &
1455 DDI_BUF_CTL_ENABLE;
1456}
1457
1458/**
1459 * intel_tc_port_init_mode: Read out HW state and init the given port's TypeC mode
1460 * @dig_port: digital port
1461 *
1462 * Read out the HW state and initialize the TypeC mode of @dig_port. The mode
1463 * will be locked until intel_tc_port_sanitize_mode() is called.
1464 */
1465void intel_tc_port_init_mode(struct intel_digital_port *dig_port)
1466{
1467 struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
1468 struct intel_tc_port *tc = to_tc_port(dig_port);
1469 bool update_mode = false;
1470
1471 mutex_lock(&tc->lock);
1472
1473 drm_WARN_ON(&i915->drm, tc->mode != TC_PORT_DISCONNECTED);
1474 drm_WARN_ON(&i915->drm, tc->lock_wakeref);
1475 drm_WARN_ON(&i915->drm, tc->link_refcount);
1476
1477 tc_phy_get_hw_state(tc);
1478 /*
1479 * Save the initial mode for the state check in
1480 * intel_tc_port_sanitize_mode().
1481 */
1482 tc->init_mode = tc->mode;
1483
1484 /*
1485 * The PHY needs to be connected for AUX to work during HW readout and
1486 * MST topology resume, but the PHY mode can only be changed if the
1487 * port is disabled.
1488 *
1489 * An exception is the case where BIOS leaves the PHY incorrectly
1490 * disconnected on an enabled legacy port. Work around that by
1491 * connecting the PHY even though the port is enabled. This doesn't
1492 * cause a problem as the PHY ownership state is ignored by the
1493 * IOM/TCSS firmware (only display can own the PHY in that case).
1494 */
1495 if (!tc_port_is_enabled(tc)) {
1496 update_mode = true;
1497 } else if (tc->mode == TC_PORT_DISCONNECTED) {
1498 drm_WARN_ON(&i915->drm, !tc->legacy_port);
1499 drm_err(&i915->drm,
1500 "Port %s: PHY disconnected on enabled port, connecting it\n",
1501 tc->port_name);
1502 update_mode = true;
1503 }
1504
1505 if (update_mode)
1506 intel_tc_port_update_mode(tc, 1, false);
1507
1508 /* Prevent changing tc->mode until intel_tc_port_sanitize_mode() is called. */
1509 __intel_tc_port_get_link(tc);
1510
1511 mutex_unlock(&tc->lock);
1512}
1513
1514static bool tc_port_has_active_links(struct intel_tc_port *tc,
1515 const struct intel_crtc_state *crtc_state)
1516{
1517 struct drm_i915_private *i915 = tc_to_i915(tc);
1518 struct intel_digital_port *dig_port = tc->dig_port;
1519 enum icl_port_dpll_id pll_type = ICL_PORT_DPLL_DEFAULT;
1520 int active_links = 0;
1521
1522 if (dig_port->dp.is_mst) {
1523 /* TODO: get the PLL type for MST, once HW readout is done for it. */
1524 active_links = intel_dp_mst_encoder_active_links(dig_port);
1525 } else if (crtc_state && crtc_state->hw.active) {
1526 pll_type = intel_ddi_port_pll_type(&dig_port->base, crtc_state);
1527 active_links = 1;
1528 }
1529
1530 if (active_links && !tc_phy_is_connected(tc, pll_type))
1531 drm_err(&i915->drm,
1532 "Port %s: PHY disconnected with %d active link(s)\n",
1533 tc->port_name, active_links);
1534
1535 return active_links;
1536}
1537
1538/**
1539 * intel_tc_port_sanitize_mode: Sanitize the given port's TypeC mode
1540 * @dig_port: digital port
1541 * @crtc_state: atomic state of CRTC connected to @dig_port
1542 *
1543 * Sanitize @dig_port's TypeC mode wrt. the encoder's state right after driver
1544 * loading and system resume:
1545 * If the encoder is enabled keep the TypeC mode/PHY connected state locked until
1546 * the encoder is disabled.
1547 * If the encoder is disabled make sure the PHY is disconnected.
1548 * @crtc_state is valid if @dig_port is enabled, NULL otherwise.
1549 */
1550void intel_tc_port_sanitize_mode(struct intel_digital_port *dig_port,
1551 const struct intel_crtc_state *crtc_state)
1552{
1553 struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
1554 struct intel_tc_port *tc = to_tc_port(dig_port);
1555
1556 mutex_lock(&tc->lock);
1557
1558 drm_WARN_ON(&i915->drm, tc->link_refcount != 1);
1559 if (!tc_port_has_active_links(tc, crtc_state)) {
1560 /*
1561 * TBT-alt is the default mode in any case the PHY ownership is not
1562 * held (regardless of the sink's connected live state), so
1563 * we'll just switch to disconnected mode from it here without
1564 * a note.
1565 */
1566 if (tc->init_mode != TC_PORT_TBT_ALT &&
1567 tc->init_mode != TC_PORT_DISCONNECTED)
1568 drm_dbg_kms(&i915->drm,
1569 "Port %s: PHY left in %s mode on disabled port, disconnecting it\n",
1570 tc->port_name,
1571 tc_port_mode_name(tc->init_mode));
1572 tc_phy_disconnect(tc);
1573 __intel_tc_port_put_link(tc);
1574 }
1575
1576 drm_dbg_kms(&i915->drm, "Port %s: sanitize mode (%s)\n",
1577 tc->port_name,
1578 tc_port_mode_name(tc->mode));
1579
1580 mutex_unlock(&tc->lock);
1581}
1582
1583/*
1584 * The type-C ports are different because even when they are connected, they may
1585 * not be available/usable by the graphics driver: see the comment on
1586 * icl_tc_phy_connect(). So in our driver instead of adding the additional
1587 * concept of "usable" and make everything check for "connected and usable" we
1588 * define a port as "connected" when it is not only connected, but also when it
1589 * is usable by the rest of the driver. That maintains the old assumption that
1590 * connected ports are usable, and avoids exposing to the users objects they
1591 * can't really use.
1592 */
1593bool intel_tc_port_connected_locked(struct intel_encoder *encoder)
1594{
1595 struct intel_digital_port *dig_port = enc_to_dig_port(encoder);
1596 struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
1597 struct intel_tc_port *tc = to_tc_port(dig_port);
1598 u32 mask = ~0;
1599
1600 drm_WARN_ON(&i915->drm, !intel_tc_port_ref_held(dig_port));
1601
1602 if (tc->mode != TC_PORT_DISCONNECTED)
1603 mask = BIT(tc->mode);
1604
1605 return tc_phy_hpd_live_status(tc) & mask;
1606}
1607
1608bool intel_tc_port_connected(struct intel_encoder *encoder)
1609{
1610 struct intel_digital_port *dig_port = enc_to_dig_port(encoder);
1611 struct intel_tc_port *tc = to_tc_port(dig_port);
1612 bool is_connected;
1613
1614 mutex_lock(&tc->lock);
1615 is_connected = intel_tc_port_connected_locked(encoder);
1616 mutex_unlock(&tc->lock);
1617
1618 return is_connected;
1619}
1620
1621static bool __intel_tc_port_link_needs_reset(struct intel_tc_port *tc)
1622{
1623 bool ret;
1624
1625 mutex_lock(&tc->lock);
1626
1627 ret = tc->link_refcount &&
1628 tc->mode == TC_PORT_DP_ALT &&
1629 intel_tc_port_needs_reset(tc);
1630
1631 mutex_unlock(&tc->lock);
1632
1633 return ret;
1634}
1635
1636bool intel_tc_port_link_needs_reset(struct intel_digital_port *dig_port)
1637{
1638 struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
1639 enum phy phy = intel_port_to_phy(i915, dig_port->base.port);
1640
1641 if (!intel_phy_is_tc(i915, phy))
1642 return false;
1643
1644 return __intel_tc_port_link_needs_reset(to_tc_port(dig_port));
1645}
1646
1647static int reset_link_commit(struct intel_tc_port *tc,
1648 struct intel_atomic_state *state,
1649 struct drm_modeset_acquire_ctx *ctx)
1650{
1651 struct drm_i915_private *i915 = tc_to_i915(tc);
1652 struct intel_digital_port *dig_port = tc->dig_port;
1653 struct intel_dp *intel_dp = enc_to_intel_dp(&dig_port->base);
1654 struct intel_crtc *crtc;
1655 u8 pipe_mask;
1656 int ret;
1657
1658 ret = drm_modeset_lock(&i915->drm.mode_config.connection_mutex, ctx);
1659 if (ret)
1660 return ret;
1661
1662 ret = intel_dp_get_active_pipes(intel_dp, ctx, &pipe_mask);
1663 if (ret)
1664 return ret;
1665
1666 if (!pipe_mask)
1667 return 0;
1668
1669 for_each_intel_crtc_in_pipe_mask(&i915->drm, crtc, pipe_mask) {
1670 struct intel_crtc_state *crtc_state;
1671
1672 crtc_state = intel_atomic_get_crtc_state(&state->base, crtc);
1673 if (IS_ERR(crtc_state))
1674 return PTR_ERR(crtc_state);
1675
1676 crtc_state->uapi.connectors_changed = true;
1677 }
1678
1679 if (!__intel_tc_port_link_needs_reset(tc))
1680 return 0;
1681
1682 return drm_atomic_commit(&state->base);
1683}
1684
1685static int reset_link(struct intel_tc_port *tc)
1686{
1687 struct drm_i915_private *i915 = tc_to_i915(tc);
1688 struct drm_modeset_acquire_ctx ctx;
1689 struct drm_atomic_state *_state;
1690 struct intel_atomic_state *state;
1691 int ret;
1692
1693 _state = drm_atomic_state_alloc(&i915->drm);
1694 if (!_state)
1695 return -ENOMEM;
1696
1697 state = to_intel_atomic_state(_state);
1698 state->internal = true;
1699
1700 intel_modeset_lock_ctx_retry(&ctx, state, 0, ret)
1701 ret = reset_link_commit(tc, state, &ctx);
1702
1703 drm_atomic_state_put(&state->base);
1704
1705 return ret;
1706}
1707
1708static void intel_tc_port_link_reset_work(struct work_struct *work)
1709{
1710 struct intel_tc_port *tc =
1711 container_of(work, struct intel_tc_port, link_reset_work.work);
1712 struct drm_i915_private *i915 = tc_to_i915(tc);
1713 int ret;
1714
1715 if (!__intel_tc_port_link_needs_reset(tc))
1716 return;
1717
1718 mutex_lock(&i915->drm.mode_config.mutex);
1719
1720 drm_dbg_kms(&i915->drm,
1721 "Port %s: TypeC DP-alt sink disconnected, resetting link\n",
1722 tc->port_name);
1723 ret = reset_link(tc);
1724 drm_WARN_ON(&i915->drm, ret);
1725
1726 mutex_unlock(&i915->drm.mode_config.mutex);
1727}
1728
1729bool intel_tc_port_link_reset(struct intel_digital_port *dig_port)
1730{
1731 if (!intel_tc_port_link_needs_reset(dig_port))
1732 return false;
1733
1734 queue_delayed_work(system_unbound_wq,
1735 &to_tc_port(dig_port)->link_reset_work,
1736 msecs_to_jiffies(2000));
1737
1738 return true;
1739}
1740
1741void intel_tc_port_link_cancel_reset_work(struct intel_digital_port *dig_port)
1742{
1743 struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
1744 enum phy phy = intel_port_to_phy(i915, dig_port->base.port);
1745 struct intel_tc_port *tc = to_tc_port(dig_port);
1746
1747 if (!intel_phy_is_tc(i915, phy))
1748 return;
1749
1750 cancel_delayed_work(&tc->link_reset_work);
1751}
1752
1753static void __intel_tc_port_lock(struct intel_tc_port *tc,
1754 int required_lanes)
1755{
1756 struct drm_i915_private *i915 = tc_to_i915(tc);
1757
1758 mutex_lock(&tc->lock);
1759
1760 cancel_delayed_work(&tc->disconnect_phy_work);
1761
1762 if (!tc->link_refcount)
1763 intel_tc_port_update_mode(tc, required_lanes,
1764 false);
1765
1766 drm_WARN_ON(&i915->drm, tc->mode == TC_PORT_DISCONNECTED);
1767 drm_WARN_ON(&i915->drm, tc->mode != TC_PORT_TBT_ALT &&
1768 !tc_phy_is_owned(tc));
1769}
1770
1771void intel_tc_port_lock(struct intel_digital_port *dig_port)
1772{
1773 __intel_tc_port_lock(to_tc_port(dig_port), 1);
1774}
1775
1776/*
1777 * Disconnect the given digital port from its TypeC PHY (handing back the
1778 * control of the PHY to the TypeC subsystem). This will happen in a delayed
1779 * manner after each aux transactions and modeset disables.
1780 */
1781static void intel_tc_port_disconnect_phy_work(struct work_struct *work)
1782{
1783 struct intel_tc_port *tc =
1784 container_of(work, struct intel_tc_port, disconnect_phy_work.work);
1785
1786 mutex_lock(&tc->lock);
1787
1788 if (!tc->link_refcount)
1789 intel_tc_port_update_mode(tc, 1, true);
1790
1791 mutex_unlock(&tc->lock);
1792}
1793
1794/**
1795 * intel_tc_port_flush_work: flush the work disconnecting the PHY
1796 * @dig_port: digital port
1797 *
1798 * Flush the delayed work disconnecting an idle PHY.
1799 */
1800static void intel_tc_port_flush_work(struct intel_digital_port *dig_port)
1801{
1802 flush_delayed_work(&to_tc_port(dig_port)->disconnect_phy_work);
1803}
1804
1805void intel_tc_port_suspend(struct intel_digital_port *dig_port)
1806{
1807 struct intel_tc_port *tc = to_tc_port(dig_port);
1808
1809 cancel_delayed_work_sync(&tc->link_reset_work);
1810 intel_tc_port_flush_work(dig_port);
1811}
1812
1813void intel_tc_port_unlock(struct intel_digital_port *dig_port)
1814{
1815 struct intel_tc_port *tc = to_tc_port(dig_port);
1816
1817 if (!tc->link_refcount && tc->mode != TC_PORT_DISCONNECTED)
1818 queue_delayed_work(system_unbound_wq, &tc->disconnect_phy_work,
1819 msecs_to_jiffies(1000));
1820
1821 mutex_unlock(&tc->lock);
1822}
1823
1824bool intel_tc_port_ref_held(struct intel_digital_port *dig_port)
1825{
1826 struct intel_tc_port *tc = to_tc_port(dig_port);
1827
1828 return mutex_is_locked(&tc->lock) ||
1829 tc->link_refcount;
1830}
1831
1832void intel_tc_port_get_link(struct intel_digital_port *dig_port,
1833 int required_lanes)
1834{
1835 struct intel_tc_port *tc = to_tc_port(dig_port);
1836
1837 __intel_tc_port_lock(tc, required_lanes);
1838 __intel_tc_port_get_link(tc);
1839 intel_tc_port_unlock(dig_port);
1840}
1841
1842void intel_tc_port_put_link(struct intel_digital_port *dig_port)
1843{
1844 struct intel_tc_port *tc = to_tc_port(dig_port);
1845
1846 intel_tc_port_lock(dig_port);
1847 __intel_tc_port_put_link(tc);
1848 intel_tc_port_unlock(dig_port);
1849
1850 /*
1851 * The firmware will not update the HPD status of other TypeC ports
1852 * that are active in DP-alt mode with their sink disconnected, until
1853 * this port is disabled and its PHY gets disconnected. Make sure this
1854 * happens in a timely manner by disconnecting the PHY synchronously.
1855 */
1856 intel_tc_port_flush_work(dig_port);
1857}
1858
1859int intel_tc_port_init(struct intel_digital_port *dig_port, bool is_legacy)
1860{
1861 struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
1862 struct intel_tc_port *tc;
1863 enum port port = dig_port->base.port;
1864 enum tc_port tc_port = intel_port_to_tc(i915, port);
1865
1866 if (drm_WARN_ON(&i915->drm, tc_port == TC_PORT_NONE))
1867 return -EINVAL;
1868
1869 tc = kzalloc(sizeof(*tc), GFP_KERNEL);
1870 if (!tc)
1871 return -ENOMEM;
1872
1873 dig_port->tc = tc;
1874 tc->dig_port = dig_port;
1875
1876 if (DISPLAY_VER(i915) >= 14)
1877 tc->phy_ops = &xelpdp_tc_phy_ops;
1878 else if (DISPLAY_VER(i915) >= 13)
1879 tc->phy_ops = &adlp_tc_phy_ops;
1880 else if (DISPLAY_VER(i915) >= 12)
1881 tc->phy_ops = &tgl_tc_phy_ops;
1882 else
1883 tc->phy_ops = &icl_tc_phy_ops;
1884
1885 tc->port_name = kasprintf(GFP_KERNEL, "%c/TC#%d", port_name(port),
1886 tc_port + 1);
1887 if (!tc->port_name) {
1888 kfree(tc);
1889 return -ENOMEM;
1890 }
1891
1892 mutex_init(&tc->lock);
1893 /* TODO: Combine the two works */
1894 INIT_DELAYED_WORK(&tc->disconnect_phy_work, intel_tc_port_disconnect_phy_work);
1895 INIT_DELAYED_WORK(&tc->link_reset_work, intel_tc_port_link_reset_work);
1896 tc->legacy_port = is_legacy;
1897 tc->mode = TC_PORT_DISCONNECTED;
1898 tc->link_refcount = 0;
1899
1900 tc_phy_init(tc);
1901
1902 intel_tc_port_init_mode(dig_port);
1903
1904 return 0;
1905}
1906
1907void intel_tc_port_cleanup(struct intel_digital_port *dig_port)
1908{
1909 intel_tc_port_suspend(dig_port);
1910
1911 kfree(dig_port->tc->port_name);
1912 kfree(dig_port->tc);
1913 dig_port->tc = NULL;
1914}