Loading...
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Thunderbolt link controller support
4 *
5 * Copyright (C) 2019, Intel Corporation
6 * Author: Mika Westerberg <mika.westerberg@linux.intel.com>
7 */
8
9#include "tb.h"
10
11/**
12 * tb_lc_read_uuid() - Read switch UUID from link controller common register
13 * @sw: Switch whose UUID is read
14 * @uuid: UUID is placed here
15 */
16int tb_lc_read_uuid(struct tb_switch *sw, u32 *uuid)
17{
18 if (!sw->cap_lc)
19 return -EINVAL;
20 return tb_sw_read(sw, uuid, TB_CFG_SWITCH, sw->cap_lc + TB_LC_FUSE, 4);
21}
22
23static int read_lc_desc(struct tb_switch *sw, u32 *desc)
24{
25 if (!sw->cap_lc)
26 return -EINVAL;
27 return tb_sw_read(sw, desc, TB_CFG_SWITCH, sw->cap_lc + TB_LC_DESC, 1);
28}
29
30static int find_port_lc_cap(struct tb_port *port)
31{
32 struct tb_switch *sw = port->sw;
33 int start, phys, ret, size;
34 u32 desc;
35
36 ret = read_lc_desc(sw, &desc);
37 if (ret)
38 return ret;
39
40 /* Start of port LC registers */
41 start = (desc & TB_LC_DESC_SIZE_MASK) >> TB_LC_DESC_SIZE_SHIFT;
42 size = (desc & TB_LC_DESC_PORT_SIZE_MASK) >> TB_LC_DESC_PORT_SIZE_SHIFT;
43 phys = tb_phy_port_from_link(port->port);
44
45 return sw->cap_lc + start + phys * size;
46}
47
48static int tb_lc_set_port_configured(struct tb_port *port, bool configured)
49{
50 bool upstream = tb_is_upstream_port(port);
51 struct tb_switch *sw = port->sw;
52 u32 ctrl, lane;
53 int cap, ret;
54
55 if (sw->generation < 2)
56 return 0;
57
58 cap = find_port_lc_cap(port);
59 if (cap < 0)
60 return cap;
61
62 ret = tb_sw_read(sw, &ctrl, TB_CFG_SWITCH, cap + TB_LC_SX_CTRL, 1);
63 if (ret)
64 return ret;
65
66 /* Resolve correct lane */
67 if (port->port % 2)
68 lane = TB_LC_SX_CTRL_L1C;
69 else
70 lane = TB_LC_SX_CTRL_L2C;
71
72 if (configured) {
73 ctrl |= lane;
74 if (upstream)
75 ctrl |= TB_LC_SX_CTRL_UPSTREAM;
76 } else {
77 ctrl &= ~lane;
78 if (upstream)
79 ctrl &= ~TB_LC_SX_CTRL_UPSTREAM;
80 }
81
82 return tb_sw_write(sw, &ctrl, TB_CFG_SWITCH, cap + TB_LC_SX_CTRL, 1);
83}
84
85/**
86 * tb_lc_configure_port() - Let LC know about configured port
87 * @port: Port that is set as configured
88 *
89 * Sets the port configured for power management purposes.
90 */
91int tb_lc_configure_port(struct tb_port *port)
92{
93 return tb_lc_set_port_configured(port, true);
94}
95
96/**
97 * tb_lc_unconfigure_port() - Let LC know about unconfigured port
98 * @port: Port that is set as configured
99 *
100 * Sets the port unconfigured for power management purposes.
101 */
102void tb_lc_unconfigure_port(struct tb_port *port)
103{
104 tb_lc_set_port_configured(port, false);
105}
106
107static int tb_lc_set_xdomain_configured(struct tb_port *port, bool configure)
108{
109 struct tb_switch *sw = port->sw;
110 u32 ctrl, lane;
111 int cap, ret;
112
113 if (sw->generation < 2)
114 return 0;
115
116 cap = find_port_lc_cap(port);
117 if (cap < 0)
118 return cap;
119
120 ret = tb_sw_read(sw, &ctrl, TB_CFG_SWITCH, cap + TB_LC_SX_CTRL, 1);
121 if (ret)
122 return ret;
123
124 /* Resolve correct lane */
125 if (port->port % 2)
126 lane = TB_LC_SX_CTRL_L1D;
127 else
128 lane = TB_LC_SX_CTRL_L2D;
129
130 if (configure)
131 ctrl |= lane;
132 else
133 ctrl &= ~lane;
134
135 return tb_sw_write(sw, &ctrl, TB_CFG_SWITCH, cap + TB_LC_SX_CTRL, 1);
136}
137
138/**
139 * tb_lc_configure_xdomain() - Inform LC that the link is XDomain
140 * @port: Switch downstream port connected to another host
141 *
142 * Sets the lane configured for XDomain accordingly so that the LC knows
143 * about this. Returns %0 in success and negative errno in failure.
144 */
145int tb_lc_configure_xdomain(struct tb_port *port)
146{
147 return tb_lc_set_xdomain_configured(port, true);
148}
149
150/**
151 * tb_lc_unconfigure_xdomain() - Unconfigure XDomain from port
152 * @port: Switch downstream port that was connected to another host
153 *
154 * Unsets the lane XDomain configuration.
155 */
156void tb_lc_unconfigure_xdomain(struct tb_port *port)
157{
158 tb_lc_set_xdomain_configured(port, false);
159}
160
161/**
162 * tb_lc_start_lane_initialization() - Start lane initialization
163 * @port: Device router lane 0 adapter
164 *
165 * Starts lane initialization for @port after the router resumed from
166 * sleep. Should be called for those downstream lane adapters that were
167 * not connected (tb_lc_configure_port() was not called) before sleep.
168 *
169 * Returns %0 in success and negative errno in case of failure.
170 */
171int tb_lc_start_lane_initialization(struct tb_port *port)
172{
173 struct tb_switch *sw = port->sw;
174 int ret, cap;
175 u32 ctrl;
176
177 if (!tb_route(sw))
178 return 0;
179
180 if (sw->generation < 2)
181 return 0;
182
183 cap = find_port_lc_cap(port);
184 if (cap < 0)
185 return cap;
186
187 ret = tb_sw_read(sw, &ctrl, TB_CFG_SWITCH, cap + TB_LC_SX_CTRL, 1);
188 if (ret)
189 return ret;
190
191 ctrl |= TB_LC_SX_CTRL_SLI;
192
193 return tb_sw_write(sw, &ctrl, TB_CFG_SWITCH, cap + TB_LC_SX_CTRL, 1);
194}
195
196/**
197 * tb_lc_is_clx_supported() - Check whether CLx is supported by the lane adapter
198 * @port: Lane adapter
199 *
200 * TB_LC_LINK_ATTR_CPS bit reflects if the link supports CLx including
201 * active cables (if connected on the link).
202 */
203bool tb_lc_is_clx_supported(struct tb_port *port)
204{
205 struct tb_switch *sw = port->sw;
206 int cap, ret;
207 u32 val;
208
209 cap = find_port_lc_cap(port);
210 if (cap < 0)
211 return false;
212
213 ret = tb_sw_read(sw, &val, TB_CFG_SWITCH, cap + TB_LC_LINK_ATTR, 1);
214 if (ret)
215 return false;
216
217 return !!(val & TB_LC_LINK_ATTR_CPS);
218}
219
220/**
221 * tb_lc_is_usb_plugged() - Is there USB device connected to port
222 * @port: Device router lane 0 adapter
223 *
224 * Returns true if the @port has USB type-C device connected.
225 */
226bool tb_lc_is_usb_plugged(struct tb_port *port)
227{
228 struct tb_switch *sw = port->sw;
229 int cap, ret;
230 u32 val;
231
232 if (sw->generation != 3)
233 return false;
234
235 cap = find_port_lc_cap(port);
236 if (cap < 0)
237 return false;
238
239 ret = tb_sw_read(sw, &val, TB_CFG_SWITCH, cap + TB_LC_CS_42, 1);
240 if (ret)
241 return false;
242
243 return !!(val & TB_LC_CS_42_USB_PLUGGED);
244}
245
246/**
247 * tb_lc_is_xhci_connected() - Is the internal xHCI connected
248 * @port: Device router lane 0 adapter
249 *
250 * Returns true if the internal xHCI has been connected to @port.
251 */
252bool tb_lc_is_xhci_connected(struct tb_port *port)
253{
254 struct tb_switch *sw = port->sw;
255 int cap, ret;
256 u32 val;
257
258 if (sw->generation != 3)
259 return false;
260
261 cap = find_port_lc_cap(port);
262 if (cap < 0)
263 return false;
264
265 ret = tb_sw_read(sw, &val, TB_CFG_SWITCH, cap + TB_LC_LINK_REQ, 1);
266 if (ret)
267 return false;
268
269 return !!(val & TB_LC_LINK_REQ_XHCI_CONNECT);
270}
271
272static int __tb_lc_xhci_connect(struct tb_port *port, bool connect)
273{
274 struct tb_switch *sw = port->sw;
275 int cap, ret;
276 u32 val;
277
278 if (sw->generation != 3)
279 return -EINVAL;
280
281 cap = find_port_lc_cap(port);
282 if (cap < 0)
283 return cap;
284
285 ret = tb_sw_read(sw, &val, TB_CFG_SWITCH, cap + TB_LC_LINK_REQ, 1);
286 if (ret)
287 return ret;
288
289 if (connect)
290 val |= TB_LC_LINK_REQ_XHCI_CONNECT;
291 else
292 val &= ~TB_LC_LINK_REQ_XHCI_CONNECT;
293
294 return tb_sw_write(sw, &val, TB_CFG_SWITCH, cap + TB_LC_LINK_REQ, 1);
295}
296
297/**
298 * tb_lc_xhci_connect() - Connect internal xHCI
299 * @port: Device router lane 0 adapter
300 *
301 * Tells LC to connect the internal xHCI to @port. Returns %0 on success
302 * and negative errno in case of failure. Can be called for Thunderbolt 3
303 * routers only.
304 */
305int tb_lc_xhci_connect(struct tb_port *port)
306{
307 int ret;
308
309 ret = __tb_lc_xhci_connect(port, true);
310 if (ret)
311 return ret;
312
313 tb_port_dbg(port, "xHCI connected\n");
314 return 0;
315}
316
317/**
318 * tb_lc_xhci_disconnect() - Disconnect internal xHCI
319 * @port: Device router lane 0 adapter
320 *
321 * Tells LC to disconnect the internal xHCI from @port. Can be called
322 * for Thunderbolt 3 routers only.
323 */
324void tb_lc_xhci_disconnect(struct tb_port *port)
325{
326 __tb_lc_xhci_connect(port, false);
327 tb_port_dbg(port, "xHCI disconnected\n");
328}
329
330static int tb_lc_set_wake_one(struct tb_switch *sw, unsigned int offset,
331 unsigned int flags)
332{
333 u32 ctrl;
334 int ret;
335
336 /*
337 * Enable wake on PCIe and USB4 (wake coming from another
338 * router).
339 */
340 ret = tb_sw_read(sw, &ctrl, TB_CFG_SWITCH,
341 offset + TB_LC_SX_CTRL, 1);
342 if (ret)
343 return ret;
344
345 ctrl &= ~(TB_LC_SX_CTRL_WOC | TB_LC_SX_CTRL_WOD | TB_LC_SX_CTRL_WODPC |
346 TB_LC_SX_CTRL_WODPD | TB_LC_SX_CTRL_WOP | TB_LC_SX_CTRL_WOU4);
347
348 if (flags & TB_WAKE_ON_CONNECT)
349 ctrl |= TB_LC_SX_CTRL_WOC | TB_LC_SX_CTRL_WOD;
350 if (flags & TB_WAKE_ON_USB4)
351 ctrl |= TB_LC_SX_CTRL_WOU4;
352 if (flags & TB_WAKE_ON_PCIE)
353 ctrl |= TB_LC_SX_CTRL_WOP;
354 if (flags & TB_WAKE_ON_DP)
355 ctrl |= TB_LC_SX_CTRL_WODPC | TB_LC_SX_CTRL_WODPD;
356
357 return tb_sw_write(sw, &ctrl, TB_CFG_SWITCH, offset + TB_LC_SX_CTRL, 1);
358}
359
360/**
361 * tb_lc_set_wake() - Enable/disable wake
362 * @sw: Switch whose wakes to configure
363 * @flags: Wakeup flags (%0 to disable)
364 *
365 * For each LC sets wake bits accordingly.
366 */
367int tb_lc_set_wake(struct tb_switch *sw, unsigned int flags)
368{
369 int start, size, nlc, ret, i;
370 u32 desc;
371
372 if (sw->generation < 2)
373 return 0;
374
375 if (!tb_route(sw))
376 return 0;
377
378 ret = read_lc_desc(sw, &desc);
379 if (ret)
380 return ret;
381
382 /* Figure out number of link controllers */
383 nlc = desc & TB_LC_DESC_NLC_MASK;
384 start = (desc & TB_LC_DESC_SIZE_MASK) >> TB_LC_DESC_SIZE_SHIFT;
385 size = (desc & TB_LC_DESC_PORT_SIZE_MASK) >> TB_LC_DESC_PORT_SIZE_SHIFT;
386
387 /* For each link controller set sleep bit */
388 for (i = 0; i < nlc; i++) {
389 unsigned int offset = sw->cap_lc + start + i * size;
390
391 ret = tb_lc_set_wake_one(sw, offset, flags);
392 if (ret)
393 return ret;
394 }
395
396 return 0;
397}
398
399/**
400 * tb_lc_set_sleep() - Inform LC that the switch is going to sleep
401 * @sw: Switch to set sleep
402 *
403 * Let the switch link controllers know that the switch is going to
404 * sleep.
405 */
406int tb_lc_set_sleep(struct tb_switch *sw)
407{
408 int start, size, nlc, ret, i;
409 u32 desc;
410
411 if (sw->generation < 2)
412 return 0;
413
414 ret = read_lc_desc(sw, &desc);
415 if (ret)
416 return ret;
417
418 /* Figure out number of link controllers */
419 nlc = desc & TB_LC_DESC_NLC_MASK;
420 start = (desc & TB_LC_DESC_SIZE_MASK) >> TB_LC_DESC_SIZE_SHIFT;
421 size = (desc & TB_LC_DESC_PORT_SIZE_MASK) >> TB_LC_DESC_PORT_SIZE_SHIFT;
422
423 /* For each link controller set sleep bit */
424 for (i = 0; i < nlc; i++) {
425 unsigned int offset = sw->cap_lc + start + i * size;
426 u32 ctrl;
427
428 ret = tb_sw_read(sw, &ctrl, TB_CFG_SWITCH,
429 offset + TB_LC_SX_CTRL, 1);
430 if (ret)
431 return ret;
432
433 ctrl |= TB_LC_SX_CTRL_SLP;
434 ret = tb_sw_write(sw, &ctrl, TB_CFG_SWITCH,
435 offset + TB_LC_SX_CTRL, 1);
436 if (ret)
437 return ret;
438 }
439
440 return 0;
441}
442
443/**
444 * tb_lc_lane_bonding_possible() - Is lane bonding possible towards switch
445 * @sw: Switch to check
446 *
447 * Checks whether conditions for lane bonding from parent to @sw are
448 * possible.
449 */
450bool tb_lc_lane_bonding_possible(struct tb_switch *sw)
451{
452 struct tb_port *up;
453 int cap, ret;
454 u32 val;
455
456 if (sw->generation < 2)
457 return false;
458
459 up = tb_upstream_port(sw);
460 cap = find_port_lc_cap(up);
461 if (cap < 0)
462 return false;
463
464 ret = tb_sw_read(sw, &val, TB_CFG_SWITCH, cap + TB_LC_PORT_ATTR, 1);
465 if (ret)
466 return false;
467
468 return !!(val & TB_LC_PORT_ATTR_BE);
469}
470
471static int tb_lc_dp_sink_from_port(const struct tb_switch *sw,
472 struct tb_port *in)
473{
474 struct tb_port *port;
475
476 /* The first DP IN port is sink 0 and second is sink 1 */
477 tb_switch_for_each_port(sw, port) {
478 if (tb_port_is_dpin(port))
479 return in != port;
480 }
481
482 return -EINVAL;
483}
484
485static int tb_lc_dp_sink_available(struct tb_switch *sw, int sink)
486{
487 u32 val, alloc;
488 int ret;
489
490 ret = tb_sw_read(sw, &val, TB_CFG_SWITCH,
491 sw->cap_lc + TB_LC_SNK_ALLOCATION, 1);
492 if (ret)
493 return ret;
494
495 /*
496 * Sink is available for CM/SW to use if the allocation valie is
497 * either 0 or 1.
498 */
499 if (!sink) {
500 alloc = val & TB_LC_SNK_ALLOCATION_SNK0_MASK;
501 if (!alloc || alloc == TB_LC_SNK_ALLOCATION_SNK0_CM)
502 return 0;
503 } else {
504 alloc = (val & TB_LC_SNK_ALLOCATION_SNK1_MASK) >>
505 TB_LC_SNK_ALLOCATION_SNK1_SHIFT;
506 if (!alloc || alloc == TB_LC_SNK_ALLOCATION_SNK1_CM)
507 return 0;
508 }
509
510 return -EBUSY;
511}
512
513/**
514 * tb_lc_dp_sink_query() - Is DP sink available for DP IN port
515 * @sw: Switch whose DP sink is queried
516 * @in: DP IN port to check
517 *
518 * Queries through LC SNK_ALLOCATION registers whether DP sink is available
519 * for the given DP IN port or not.
520 */
521bool tb_lc_dp_sink_query(struct tb_switch *sw, struct tb_port *in)
522{
523 int sink;
524
525 /*
526 * For older generations sink is always available as there is no
527 * allocation mechanism.
528 */
529 if (sw->generation < 3)
530 return true;
531
532 sink = tb_lc_dp_sink_from_port(sw, in);
533 if (sink < 0)
534 return false;
535
536 return !tb_lc_dp_sink_available(sw, sink);
537}
538
539/**
540 * tb_lc_dp_sink_alloc() - Allocate DP sink
541 * @sw: Switch whose DP sink is allocated
542 * @in: DP IN port the DP sink is allocated for
543 *
544 * Allocate DP sink for @in via LC SNK_ALLOCATION registers. If the
545 * resource is available and allocation is successful returns %0. In all
546 * other cases returs negative errno. In particular %-EBUSY is returned if
547 * the resource was not available.
548 */
549int tb_lc_dp_sink_alloc(struct tb_switch *sw, struct tb_port *in)
550{
551 int ret, sink;
552 u32 val;
553
554 if (sw->generation < 3)
555 return 0;
556
557 sink = tb_lc_dp_sink_from_port(sw, in);
558 if (sink < 0)
559 return sink;
560
561 ret = tb_lc_dp_sink_available(sw, sink);
562 if (ret)
563 return ret;
564
565 ret = tb_sw_read(sw, &val, TB_CFG_SWITCH,
566 sw->cap_lc + TB_LC_SNK_ALLOCATION, 1);
567 if (ret)
568 return ret;
569
570 if (!sink) {
571 val &= ~TB_LC_SNK_ALLOCATION_SNK0_MASK;
572 val |= TB_LC_SNK_ALLOCATION_SNK0_CM;
573 } else {
574 val &= ~TB_LC_SNK_ALLOCATION_SNK1_MASK;
575 val |= TB_LC_SNK_ALLOCATION_SNK1_CM <<
576 TB_LC_SNK_ALLOCATION_SNK1_SHIFT;
577 }
578
579 ret = tb_sw_write(sw, &val, TB_CFG_SWITCH,
580 sw->cap_lc + TB_LC_SNK_ALLOCATION, 1);
581
582 if (ret)
583 return ret;
584
585 tb_port_dbg(in, "sink %d allocated\n", sink);
586 return 0;
587}
588
589/**
590 * tb_lc_dp_sink_dealloc() - De-allocate DP sink
591 * @sw: Switch whose DP sink is de-allocated
592 * @in: DP IN port whose DP sink is de-allocated
593 *
594 * De-allocate DP sink from @in using LC SNK_ALLOCATION registers.
595 */
596int tb_lc_dp_sink_dealloc(struct tb_switch *sw, struct tb_port *in)
597{
598 int ret, sink;
599 u32 val;
600
601 if (sw->generation < 3)
602 return 0;
603
604 sink = tb_lc_dp_sink_from_port(sw, in);
605 if (sink < 0)
606 return sink;
607
608 /* Needs to be owned by CM/SW */
609 ret = tb_lc_dp_sink_available(sw, sink);
610 if (ret)
611 return ret;
612
613 ret = tb_sw_read(sw, &val, TB_CFG_SWITCH,
614 sw->cap_lc + TB_LC_SNK_ALLOCATION, 1);
615 if (ret)
616 return ret;
617
618 if (!sink)
619 val &= ~TB_LC_SNK_ALLOCATION_SNK0_MASK;
620 else
621 val &= ~TB_LC_SNK_ALLOCATION_SNK1_MASK;
622
623 ret = tb_sw_write(sw, &val, TB_CFG_SWITCH,
624 sw->cap_lc + TB_LC_SNK_ALLOCATION, 1);
625 if (ret)
626 return ret;
627
628 tb_port_dbg(in, "sink %d de-allocated\n", sink);
629 return 0;
630}
631
632/**
633 * tb_lc_force_power() - Forces LC to be powered on
634 * @sw: Thunderbolt switch
635 *
636 * This is useful to let authentication cycle pass even without
637 * a Thunderbolt link present.
638 */
639int tb_lc_force_power(struct tb_switch *sw)
640{
641 u32 in = 0xffff;
642
643 return tb_sw_write(sw, &in, TB_CFG_SWITCH, TB_LC_POWER, 1);
644}
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Thunderbolt link controller support
4 *
5 * Copyright (C) 2019, Intel Corporation
6 * Author: Mika Westerberg <mika.westerberg@linux.intel.com>
7 */
8
9#include "tb.h"
10
11/**
12 * tb_lc_read_uuid() - Read switch UUID from link controller common register
13 * @sw: Switch whose UUID is read
14 * @uuid: UUID is placed here
15 */
16int tb_lc_read_uuid(struct tb_switch *sw, u32 *uuid)
17{
18 if (!sw->cap_lc)
19 return -EINVAL;
20 return tb_sw_read(sw, uuid, TB_CFG_SWITCH, sw->cap_lc + TB_LC_FUSE, 4);
21}
22
23static int read_lc_desc(struct tb_switch *sw, u32 *desc)
24{
25 if (!sw->cap_lc)
26 return -EINVAL;
27 return tb_sw_read(sw, desc, TB_CFG_SWITCH, sw->cap_lc + TB_LC_DESC, 1);
28}
29
30static int find_port_lc_cap(struct tb_port *port)
31{
32 struct tb_switch *sw = port->sw;
33 int start, phys, ret, size;
34 u32 desc;
35
36 ret = read_lc_desc(sw, &desc);
37 if (ret)
38 return ret;
39
40 /* Start of port LC registers */
41 start = (desc & TB_LC_DESC_SIZE_MASK) >> TB_LC_DESC_SIZE_SHIFT;
42 size = (desc & TB_LC_DESC_PORT_SIZE_MASK) >> TB_LC_DESC_PORT_SIZE_SHIFT;
43 phys = tb_phy_port_from_link(port->port);
44
45 return sw->cap_lc + start + phys * size;
46}
47
48static int tb_lc_configure_lane(struct tb_port *port, bool configure)
49{
50 bool upstream = tb_is_upstream_port(port);
51 struct tb_switch *sw = port->sw;
52 u32 ctrl, lane;
53 int cap, ret;
54
55 if (sw->generation < 2)
56 return 0;
57
58 cap = find_port_lc_cap(port);
59 if (cap < 0)
60 return cap;
61
62 ret = tb_sw_read(sw, &ctrl, TB_CFG_SWITCH, cap + TB_LC_SX_CTRL, 1);
63 if (ret)
64 return ret;
65
66 /* Resolve correct lane */
67 if (port->port % 2)
68 lane = TB_LC_SX_CTRL_L1C;
69 else
70 lane = TB_LC_SX_CTRL_L2C;
71
72 if (configure) {
73 ctrl |= lane;
74 if (upstream)
75 ctrl |= TB_LC_SX_CTRL_UPSTREAM;
76 } else {
77 ctrl &= ~lane;
78 if (upstream)
79 ctrl &= ~TB_LC_SX_CTRL_UPSTREAM;
80 }
81
82 return tb_sw_write(sw, &ctrl, TB_CFG_SWITCH, cap + TB_LC_SX_CTRL, 1);
83}
84
85/**
86 * tb_lc_configure_link() - Let LC know about configured link
87 * @sw: Switch that is being added
88 *
89 * Informs LC of both parent switch and @sw that there is established
90 * link between the two.
91 */
92int tb_lc_configure_link(struct tb_switch *sw)
93{
94 struct tb_port *up, *down;
95 int ret;
96
97 if (!tb_route(sw) || tb_switch_is_icm(sw))
98 return 0;
99
100 up = tb_upstream_port(sw);
101 down = tb_port_at(tb_route(sw), tb_to_switch(sw->dev.parent));
102
103 /* Configure parent link toward this switch */
104 ret = tb_lc_configure_lane(down, true);
105 if (ret)
106 return ret;
107
108 /* Configure upstream link from this switch to the parent */
109 ret = tb_lc_configure_lane(up, true);
110 if (ret)
111 tb_lc_configure_lane(down, false);
112
113 return ret;
114}
115
116/**
117 * tb_lc_unconfigure_link() - Let LC know about unconfigured link
118 * @sw: Switch to unconfigure
119 *
120 * Informs LC of both parent switch and @sw that the link between the
121 * two does not exist anymore.
122 */
123void tb_lc_unconfigure_link(struct tb_switch *sw)
124{
125 struct tb_port *up, *down;
126
127 if (sw->is_unplugged || !tb_route(sw) || tb_switch_is_icm(sw))
128 return;
129
130 up = tb_upstream_port(sw);
131 down = tb_port_at(tb_route(sw), tb_to_switch(sw->dev.parent));
132
133 tb_lc_configure_lane(up, false);
134 tb_lc_configure_lane(down, false);
135}
136
137/**
138 * tb_lc_set_sleep() - Inform LC that the switch is going to sleep
139 * @sw: Switch to set sleep
140 *
141 * Let the switch link controllers know that the switch is going to
142 * sleep.
143 */
144int tb_lc_set_sleep(struct tb_switch *sw)
145{
146 int start, size, nlc, ret, i;
147 u32 desc;
148
149 if (sw->generation < 2)
150 return 0;
151
152 ret = read_lc_desc(sw, &desc);
153 if (ret)
154 return ret;
155
156 /* Figure out number of link controllers */
157 nlc = desc & TB_LC_DESC_NLC_MASK;
158 start = (desc & TB_LC_DESC_SIZE_MASK) >> TB_LC_DESC_SIZE_SHIFT;
159 size = (desc & TB_LC_DESC_PORT_SIZE_MASK) >> TB_LC_DESC_PORT_SIZE_SHIFT;
160
161 /* For each link controller set sleep bit */
162 for (i = 0; i < nlc; i++) {
163 unsigned int offset = sw->cap_lc + start + i * size;
164 u32 ctrl;
165
166 ret = tb_sw_read(sw, &ctrl, TB_CFG_SWITCH,
167 offset + TB_LC_SX_CTRL, 1);
168 if (ret)
169 return ret;
170
171 ctrl |= TB_LC_SX_CTRL_SLP;
172 ret = tb_sw_write(sw, &ctrl, TB_CFG_SWITCH,
173 offset + TB_LC_SX_CTRL, 1);
174 if (ret)
175 return ret;
176 }
177
178 return 0;
179}
180
181/**
182 * tb_lc_lane_bonding_possible() - Is lane bonding possible towards switch
183 * @sw: Switch to check
184 *
185 * Checks whether conditions for lane bonding from parent to @sw are
186 * possible.
187 */
188bool tb_lc_lane_bonding_possible(struct tb_switch *sw)
189{
190 struct tb_port *up;
191 int cap, ret;
192 u32 val;
193
194 if (sw->generation < 2)
195 return false;
196
197 up = tb_upstream_port(sw);
198 cap = find_port_lc_cap(up);
199 if (cap < 0)
200 return false;
201
202 ret = tb_sw_read(sw, &val, TB_CFG_SWITCH, cap + TB_LC_PORT_ATTR, 1);
203 if (ret)
204 return false;
205
206 return !!(val & TB_LC_PORT_ATTR_BE);
207}
208
209static int tb_lc_dp_sink_from_port(const struct tb_switch *sw,
210 struct tb_port *in)
211{
212 struct tb_port *port;
213
214 /* The first DP IN port is sink 0 and second is sink 1 */
215 tb_switch_for_each_port(sw, port) {
216 if (tb_port_is_dpin(port))
217 return in != port;
218 }
219
220 return -EINVAL;
221}
222
223static int tb_lc_dp_sink_available(struct tb_switch *sw, int sink)
224{
225 u32 val, alloc;
226 int ret;
227
228 ret = tb_sw_read(sw, &val, TB_CFG_SWITCH,
229 sw->cap_lc + TB_LC_SNK_ALLOCATION, 1);
230 if (ret)
231 return ret;
232
233 /*
234 * Sink is available for CM/SW to use if the allocation valie is
235 * either 0 or 1.
236 */
237 if (!sink) {
238 alloc = val & TB_LC_SNK_ALLOCATION_SNK0_MASK;
239 if (!alloc || alloc == TB_LC_SNK_ALLOCATION_SNK0_CM)
240 return 0;
241 } else {
242 alloc = (val & TB_LC_SNK_ALLOCATION_SNK1_MASK) >>
243 TB_LC_SNK_ALLOCATION_SNK1_SHIFT;
244 if (!alloc || alloc == TB_LC_SNK_ALLOCATION_SNK1_CM)
245 return 0;
246 }
247
248 return -EBUSY;
249}
250
251/**
252 * tb_lc_dp_sink_query() - Is DP sink available for DP IN port
253 * @sw: Switch whose DP sink is queried
254 * @in: DP IN port to check
255 *
256 * Queries through LC SNK_ALLOCATION registers whether DP sink is available
257 * for the given DP IN port or not.
258 */
259bool tb_lc_dp_sink_query(struct tb_switch *sw, struct tb_port *in)
260{
261 int sink;
262
263 /*
264 * For older generations sink is always available as there is no
265 * allocation mechanism.
266 */
267 if (sw->generation < 3)
268 return true;
269
270 sink = tb_lc_dp_sink_from_port(sw, in);
271 if (sink < 0)
272 return false;
273
274 return !tb_lc_dp_sink_available(sw, sink);
275}
276
277/**
278 * tb_lc_dp_sink_alloc() - Allocate DP sink
279 * @sw: Switch whose DP sink is allocated
280 * @in: DP IN port the DP sink is allocated for
281 *
282 * Allocate DP sink for @in via LC SNK_ALLOCATION registers. If the
283 * resource is available and allocation is successful returns %0. In all
284 * other cases returs negative errno. In particular %-EBUSY is returned if
285 * the resource was not available.
286 */
287int tb_lc_dp_sink_alloc(struct tb_switch *sw, struct tb_port *in)
288{
289 int ret, sink;
290 u32 val;
291
292 if (sw->generation < 3)
293 return 0;
294
295 sink = tb_lc_dp_sink_from_port(sw, in);
296 if (sink < 0)
297 return sink;
298
299 ret = tb_lc_dp_sink_available(sw, sink);
300 if (ret)
301 return ret;
302
303 ret = tb_sw_read(sw, &val, TB_CFG_SWITCH,
304 sw->cap_lc + TB_LC_SNK_ALLOCATION, 1);
305 if (ret)
306 return ret;
307
308 if (!sink) {
309 val &= ~TB_LC_SNK_ALLOCATION_SNK0_MASK;
310 val |= TB_LC_SNK_ALLOCATION_SNK0_CM;
311 } else {
312 val &= ~TB_LC_SNK_ALLOCATION_SNK1_MASK;
313 val |= TB_LC_SNK_ALLOCATION_SNK1_CM <<
314 TB_LC_SNK_ALLOCATION_SNK1_SHIFT;
315 }
316
317 ret = tb_sw_write(sw, &val, TB_CFG_SWITCH,
318 sw->cap_lc + TB_LC_SNK_ALLOCATION, 1);
319
320 if (ret)
321 return ret;
322
323 tb_port_dbg(in, "sink %d allocated\n", sink);
324 return 0;
325}
326
327/**
328 * tb_lc_dp_sink_dealloc() - De-allocate DP sink
329 * @sw: Switch whose DP sink is de-allocated
330 * @in: DP IN port whose DP sink is de-allocated
331 *
332 * De-allocate DP sink from @in using LC SNK_ALLOCATION registers.
333 */
334int tb_lc_dp_sink_dealloc(struct tb_switch *sw, struct tb_port *in)
335{
336 int ret, sink;
337 u32 val;
338
339 if (sw->generation < 3)
340 return 0;
341
342 sink = tb_lc_dp_sink_from_port(sw, in);
343 if (sink < 0)
344 return sink;
345
346 /* Needs to be owned by CM/SW */
347 ret = tb_lc_dp_sink_available(sw, sink);
348 if (ret)
349 return ret;
350
351 ret = tb_sw_read(sw, &val, TB_CFG_SWITCH,
352 sw->cap_lc + TB_LC_SNK_ALLOCATION, 1);
353 if (ret)
354 return ret;
355
356 if (!sink)
357 val &= ~TB_LC_SNK_ALLOCATION_SNK0_MASK;
358 else
359 val &= ~TB_LC_SNK_ALLOCATION_SNK1_MASK;
360
361 ret = tb_sw_write(sw, &val, TB_CFG_SWITCH,
362 sw->cap_lc + TB_LC_SNK_ALLOCATION, 1);
363 if (ret)
364 return ret;
365
366 tb_port_dbg(in, "sink %d de-allocated\n", sink);
367 return 0;
368}
369
370/**
371 * tb_lc_force_power() - Forces LC to be powered on
372 * @sw: Thunderbolt switch
373 *
374 * This is useful to let authentication cycle pass even without
375 * a Thunderbolt link present.
376 */
377int tb_lc_force_power(struct tb_switch *sw)
378{
379 u32 in = 0xffff;
380
381 return tb_sw_write(sw, &in, TB_CFG_SWITCH, TB_LC_POWER, 1);
382}