Loading...
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Thunderbolt driver - Tunneling support
4 *
5 * Copyright (c) 2014 Andreas Noever <andreas.noever@gmail.com>
6 * Copyright (C) 2019, Intel Corporation
7 */
8
9#include <linux/delay.h>
10#include <linux/slab.h>
11#include <linux/list.h>
12
13#include "tunnel.h"
14#include "tb.h"
15
16/* PCIe adapters use always HopID of 8 for both directions */
17#define TB_PCI_HOPID 8
18
19#define TB_PCI_PATH_DOWN 0
20#define TB_PCI_PATH_UP 1
21
22/* USB3 adapters use always HopID of 8 for both directions */
23#define TB_USB3_HOPID 8
24
25#define TB_USB3_PATH_DOWN 0
26#define TB_USB3_PATH_UP 1
27
28/* DP adapters use HopID 8 for AUX and 9 for Video */
29#define TB_DP_AUX_TX_HOPID 8
30#define TB_DP_AUX_RX_HOPID 8
31#define TB_DP_VIDEO_HOPID 9
32
33#define TB_DP_VIDEO_PATH_OUT 0
34#define TB_DP_AUX_PATH_OUT 1
35#define TB_DP_AUX_PATH_IN 2
36
37/* Minimum number of credits needed for PCIe path */
38#define TB_MIN_PCIE_CREDITS 6U
39/*
40 * Number of credits we try to allocate for each DMA path if not limited
41 * by the host router baMaxHI.
42 */
43#define TB_DMA_CREDITS 14U
44/* Minimum number of credits for DMA path */
45#define TB_MIN_DMA_CREDITS 1U
46
47static const char * const tb_tunnel_names[] = { "PCI", "DP", "DMA", "USB3" };
48
49#define __TB_TUNNEL_PRINT(level, tunnel, fmt, arg...) \
50 do { \
51 struct tb_tunnel *__tunnel = (tunnel); \
52 level(__tunnel->tb, "%llx:%x <-> %llx:%x (%s): " fmt, \
53 tb_route(__tunnel->src_port->sw), \
54 __tunnel->src_port->port, \
55 tb_route(__tunnel->dst_port->sw), \
56 __tunnel->dst_port->port, \
57 tb_tunnel_names[__tunnel->type], \
58 ## arg); \
59 } while (0)
60
61#define tb_tunnel_WARN(tunnel, fmt, arg...) \
62 __TB_TUNNEL_PRINT(tb_WARN, tunnel, fmt, ##arg)
63#define tb_tunnel_warn(tunnel, fmt, arg...) \
64 __TB_TUNNEL_PRINT(tb_warn, tunnel, fmt, ##arg)
65#define tb_tunnel_info(tunnel, fmt, arg...) \
66 __TB_TUNNEL_PRINT(tb_info, tunnel, fmt, ##arg)
67#define tb_tunnel_dbg(tunnel, fmt, arg...) \
68 __TB_TUNNEL_PRINT(tb_dbg, tunnel, fmt, ##arg)
69
70static inline unsigned int tb_usable_credits(const struct tb_port *port)
71{
72 return port->total_credits - port->ctl_credits;
73}
74
75/**
76 * tb_available_credits() - Available credits for PCIe and DMA
77 * @port: Lane adapter to check
78 * @max_dp_streams: If non-%NULL stores maximum number of simultaneous DP
79 * streams possible through this lane adapter
80 */
81static unsigned int tb_available_credits(const struct tb_port *port,
82 size_t *max_dp_streams)
83{
84 const struct tb_switch *sw = port->sw;
85 int credits, usb3, pcie, spare;
86 size_t ndp;
87
88 usb3 = tb_acpi_may_tunnel_usb3() ? sw->max_usb3_credits : 0;
89 pcie = tb_acpi_may_tunnel_pcie() ? sw->max_pcie_credits : 0;
90
91 if (tb_acpi_is_xdomain_allowed()) {
92 spare = min_not_zero(sw->max_dma_credits, TB_DMA_CREDITS);
93 /* Add some credits for potential second DMA tunnel */
94 spare += TB_MIN_DMA_CREDITS;
95 } else {
96 spare = 0;
97 }
98
99 credits = tb_usable_credits(port);
100 if (tb_acpi_may_tunnel_dp()) {
101 /*
102 * Maximum number of DP streams possible through the
103 * lane adapter.
104 */
105 if (sw->min_dp_aux_credits + sw->min_dp_main_credits)
106 ndp = (credits - (usb3 + pcie + spare)) /
107 (sw->min_dp_aux_credits + sw->min_dp_main_credits);
108 else
109 ndp = 0;
110 } else {
111 ndp = 0;
112 }
113 credits -= ndp * (sw->min_dp_aux_credits + sw->min_dp_main_credits);
114 credits -= usb3;
115
116 if (max_dp_streams)
117 *max_dp_streams = ndp;
118
119 return credits > 0 ? credits : 0;
120}
121
122static struct tb_tunnel *tb_tunnel_alloc(struct tb *tb, size_t npaths,
123 enum tb_tunnel_type type)
124{
125 struct tb_tunnel *tunnel;
126
127 tunnel = kzalloc(sizeof(*tunnel), GFP_KERNEL);
128 if (!tunnel)
129 return NULL;
130
131 tunnel->paths = kcalloc(npaths, sizeof(tunnel->paths[0]), GFP_KERNEL);
132 if (!tunnel->paths) {
133 tb_tunnel_free(tunnel);
134 return NULL;
135 }
136
137 INIT_LIST_HEAD(&tunnel->list);
138 tunnel->tb = tb;
139 tunnel->npaths = npaths;
140 tunnel->type = type;
141
142 return tunnel;
143}
144
145static int tb_pci_activate(struct tb_tunnel *tunnel, bool activate)
146{
147 int res;
148
149 res = tb_pci_port_enable(tunnel->src_port, activate);
150 if (res)
151 return res;
152
153 if (tb_port_is_pcie_up(tunnel->dst_port))
154 return tb_pci_port_enable(tunnel->dst_port, activate);
155
156 return 0;
157}
158
159static int tb_pci_init_credits(struct tb_path_hop *hop)
160{
161 struct tb_port *port = hop->in_port;
162 struct tb_switch *sw = port->sw;
163 unsigned int credits;
164
165 if (tb_port_use_credit_allocation(port)) {
166 unsigned int available;
167
168 available = tb_available_credits(port, NULL);
169 credits = min(sw->max_pcie_credits, available);
170
171 if (credits < TB_MIN_PCIE_CREDITS)
172 return -ENOSPC;
173
174 credits = max(TB_MIN_PCIE_CREDITS, credits);
175 } else {
176 if (tb_port_is_null(port))
177 credits = port->bonded ? 32 : 16;
178 else
179 credits = 7;
180 }
181
182 hop->initial_credits = credits;
183 return 0;
184}
185
186static int tb_pci_init_path(struct tb_path *path)
187{
188 struct tb_path_hop *hop;
189
190 path->egress_fc_enable = TB_PATH_SOURCE | TB_PATH_INTERNAL;
191 path->egress_shared_buffer = TB_PATH_NONE;
192 path->ingress_fc_enable = TB_PATH_ALL;
193 path->ingress_shared_buffer = TB_PATH_NONE;
194 path->priority = 3;
195 path->weight = 1;
196 path->drop_packages = 0;
197
198 tb_path_for_each_hop(path, hop) {
199 int ret;
200
201 ret = tb_pci_init_credits(hop);
202 if (ret)
203 return ret;
204 }
205
206 return 0;
207}
208
209/**
210 * tb_tunnel_discover_pci() - Discover existing PCIe tunnels
211 * @tb: Pointer to the domain structure
212 * @down: PCIe downstream adapter
213 * @alloc_hopid: Allocate HopIDs from visited ports
214 *
215 * If @down adapter is active, follows the tunnel to the PCIe upstream
216 * adapter and back. Returns the discovered tunnel or %NULL if there was
217 * no tunnel.
218 */
219struct tb_tunnel *tb_tunnel_discover_pci(struct tb *tb, struct tb_port *down,
220 bool alloc_hopid)
221{
222 struct tb_tunnel *tunnel;
223 struct tb_path *path;
224
225 if (!tb_pci_port_is_enabled(down))
226 return NULL;
227
228 tunnel = tb_tunnel_alloc(tb, 2, TB_TUNNEL_PCI);
229 if (!tunnel)
230 return NULL;
231
232 tunnel->activate = tb_pci_activate;
233 tunnel->src_port = down;
234
235 /*
236 * Discover both paths even if they are not complete. We will
237 * clean them up by calling tb_tunnel_deactivate() below in that
238 * case.
239 */
240 path = tb_path_discover(down, TB_PCI_HOPID, NULL, -1,
241 &tunnel->dst_port, "PCIe Up", alloc_hopid);
242 if (!path) {
243 /* Just disable the downstream port */
244 tb_pci_port_enable(down, false);
245 goto err_free;
246 }
247 tunnel->paths[TB_PCI_PATH_UP] = path;
248 if (tb_pci_init_path(tunnel->paths[TB_PCI_PATH_UP]))
249 goto err_free;
250
251 path = tb_path_discover(tunnel->dst_port, -1, down, TB_PCI_HOPID, NULL,
252 "PCIe Down", alloc_hopid);
253 if (!path)
254 goto err_deactivate;
255 tunnel->paths[TB_PCI_PATH_DOWN] = path;
256 if (tb_pci_init_path(tunnel->paths[TB_PCI_PATH_DOWN]))
257 goto err_deactivate;
258
259 /* Validate that the tunnel is complete */
260 if (!tb_port_is_pcie_up(tunnel->dst_port)) {
261 tb_port_warn(tunnel->dst_port,
262 "path does not end on a PCIe adapter, cleaning up\n");
263 goto err_deactivate;
264 }
265
266 if (down != tunnel->src_port) {
267 tb_tunnel_warn(tunnel, "path is not complete, cleaning up\n");
268 goto err_deactivate;
269 }
270
271 if (!tb_pci_port_is_enabled(tunnel->dst_port)) {
272 tb_tunnel_warn(tunnel,
273 "tunnel is not fully activated, cleaning up\n");
274 goto err_deactivate;
275 }
276
277 tb_tunnel_dbg(tunnel, "discovered\n");
278 return tunnel;
279
280err_deactivate:
281 tb_tunnel_deactivate(tunnel);
282err_free:
283 tb_tunnel_free(tunnel);
284
285 return NULL;
286}
287
288/**
289 * tb_tunnel_alloc_pci() - allocate a pci tunnel
290 * @tb: Pointer to the domain structure
291 * @up: PCIe upstream adapter port
292 * @down: PCIe downstream adapter port
293 *
294 * Allocate a PCI tunnel. The ports must be of type TB_TYPE_PCIE_UP and
295 * TB_TYPE_PCIE_DOWN.
296 *
297 * Return: Returns a tb_tunnel on success or NULL on failure.
298 */
299struct tb_tunnel *tb_tunnel_alloc_pci(struct tb *tb, struct tb_port *up,
300 struct tb_port *down)
301{
302 struct tb_tunnel *tunnel;
303 struct tb_path *path;
304
305 tunnel = tb_tunnel_alloc(tb, 2, TB_TUNNEL_PCI);
306 if (!tunnel)
307 return NULL;
308
309 tunnel->activate = tb_pci_activate;
310 tunnel->src_port = down;
311 tunnel->dst_port = up;
312
313 path = tb_path_alloc(tb, down, TB_PCI_HOPID, up, TB_PCI_HOPID, 0,
314 "PCIe Down");
315 if (!path)
316 goto err_free;
317 tunnel->paths[TB_PCI_PATH_DOWN] = path;
318 if (tb_pci_init_path(path))
319 goto err_free;
320
321 path = tb_path_alloc(tb, up, TB_PCI_HOPID, down, TB_PCI_HOPID, 0,
322 "PCIe Up");
323 if (!path)
324 goto err_free;
325 tunnel->paths[TB_PCI_PATH_UP] = path;
326 if (tb_pci_init_path(path))
327 goto err_free;
328
329 return tunnel;
330
331err_free:
332 tb_tunnel_free(tunnel);
333 return NULL;
334}
335
336static bool tb_dp_is_usb4(const struct tb_switch *sw)
337{
338 /* Titan Ridge DP adapters need the same treatment as USB4 */
339 return tb_switch_is_usb4(sw) || tb_switch_is_titan_ridge(sw);
340}
341
342static int tb_dp_cm_handshake(struct tb_port *in, struct tb_port *out)
343{
344 int timeout = 10;
345 u32 val;
346 int ret;
347
348 /* Both ends need to support this */
349 if (!tb_dp_is_usb4(in->sw) || !tb_dp_is_usb4(out->sw))
350 return 0;
351
352 ret = tb_port_read(out, &val, TB_CFG_PORT,
353 out->cap_adap + DP_STATUS_CTRL, 1);
354 if (ret)
355 return ret;
356
357 val |= DP_STATUS_CTRL_UF | DP_STATUS_CTRL_CMHS;
358
359 ret = tb_port_write(out, &val, TB_CFG_PORT,
360 out->cap_adap + DP_STATUS_CTRL, 1);
361 if (ret)
362 return ret;
363
364 do {
365 ret = tb_port_read(out, &val, TB_CFG_PORT,
366 out->cap_adap + DP_STATUS_CTRL, 1);
367 if (ret)
368 return ret;
369 if (!(val & DP_STATUS_CTRL_CMHS))
370 return 0;
371 usleep_range(10, 100);
372 } while (timeout--);
373
374 return -ETIMEDOUT;
375}
376
377static inline u32 tb_dp_cap_get_rate(u32 val)
378{
379 u32 rate = (val & DP_COMMON_CAP_RATE_MASK) >> DP_COMMON_CAP_RATE_SHIFT;
380
381 switch (rate) {
382 case DP_COMMON_CAP_RATE_RBR:
383 return 1620;
384 case DP_COMMON_CAP_RATE_HBR:
385 return 2700;
386 case DP_COMMON_CAP_RATE_HBR2:
387 return 5400;
388 case DP_COMMON_CAP_RATE_HBR3:
389 return 8100;
390 default:
391 return 0;
392 }
393}
394
395static inline u32 tb_dp_cap_set_rate(u32 val, u32 rate)
396{
397 val &= ~DP_COMMON_CAP_RATE_MASK;
398 switch (rate) {
399 default:
400 WARN(1, "invalid rate %u passed, defaulting to 1620 MB/s\n", rate);
401 fallthrough;
402 case 1620:
403 val |= DP_COMMON_CAP_RATE_RBR << DP_COMMON_CAP_RATE_SHIFT;
404 break;
405 case 2700:
406 val |= DP_COMMON_CAP_RATE_HBR << DP_COMMON_CAP_RATE_SHIFT;
407 break;
408 case 5400:
409 val |= DP_COMMON_CAP_RATE_HBR2 << DP_COMMON_CAP_RATE_SHIFT;
410 break;
411 case 8100:
412 val |= DP_COMMON_CAP_RATE_HBR3 << DP_COMMON_CAP_RATE_SHIFT;
413 break;
414 }
415 return val;
416}
417
418static inline u32 tb_dp_cap_get_lanes(u32 val)
419{
420 u32 lanes = (val & DP_COMMON_CAP_LANES_MASK) >> DP_COMMON_CAP_LANES_SHIFT;
421
422 switch (lanes) {
423 case DP_COMMON_CAP_1_LANE:
424 return 1;
425 case DP_COMMON_CAP_2_LANES:
426 return 2;
427 case DP_COMMON_CAP_4_LANES:
428 return 4;
429 default:
430 return 0;
431 }
432}
433
434static inline u32 tb_dp_cap_set_lanes(u32 val, u32 lanes)
435{
436 val &= ~DP_COMMON_CAP_LANES_MASK;
437 switch (lanes) {
438 default:
439 WARN(1, "invalid number of lanes %u passed, defaulting to 1\n",
440 lanes);
441 fallthrough;
442 case 1:
443 val |= DP_COMMON_CAP_1_LANE << DP_COMMON_CAP_LANES_SHIFT;
444 break;
445 case 2:
446 val |= DP_COMMON_CAP_2_LANES << DP_COMMON_CAP_LANES_SHIFT;
447 break;
448 case 4:
449 val |= DP_COMMON_CAP_4_LANES << DP_COMMON_CAP_LANES_SHIFT;
450 break;
451 }
452 return val;
453}
454
455static unsigned int tb_dp_bandwidth(unsigned int rate, unsigned int lanes)
456{
457 /* Tunneling removes the DP 8b/10b encoding */
458 return rate * lanes * 8 / 10;
459}
460
461static int tb_dp_reduce_bandwidth(int max_bw, u32 in_rate, u32 in_lanes,
462 u32 out_rate, u32 out_lanes, u32 *new_rate,
463 u32 *new_lanes)
464{
465 static const u32 dp_bw[][2] = {
466 /* Mb/s, lanes */
467 { 8100, 4 }, /* 25920 Mb/s */
468 { 5400, 4 }, /* 17280 Mb/s */
469 { 8100, 2 }, /* 12960 Mb/s */
470 { 2700, 4 }, /* 8640 Mb/s */
471 { 5400, 2 }, /* 8640 Mb/s */
472 { 8100, 1 }, /* 6480 Mb/s */
473 { 1620, 4 }, /* 5184 Mb/s */
474 { 5400, 1 }, /* 4320 Mb/s */
475 { 2700, 2 }, /* 4320 Mb/s */
476 { 1620, 2 }, /* 2592 Mb/s */
477 { 2700, 1 }, /* 2160 Mb/s */
478 { 1620, 1 }, /* 1296 Mb/s */
479 };
480 unsigned int i;
481
482 /*
483 * Find a combination that can fit into max_bw and does not
484 * exceed the maximum rate and lanes supported by the DP OUT and
485 * DP IN adapters.
486 */
487 for (i = 0; i < ARRAY_SIZE(dp_bw); i++) {
488 if (dp_bw[i][0] > out_rate || dp_bw[i][1] > out_lanes)
489 continue;
490
491 if (dp_bw[i][0] > in_rate || dp_bw[i][1] > in_lanes)
492 continue;
493
494 if (tb_dp_bandwidth(dp_bw[i][0], dp_bw[i][1]) <= max_bw) {
495 *new_rate = dp_bw[i][0];
496 *new_lanes = dp_bw[i][1];
497 return 0;
498 }
499 }
500
501 return -ENOSR;
502}
503
504static int tb_dp_xchg_caps(struct tb_tunnel *tunnel)
505{
506 u32 out_dp_cap, out_rate, out_lanes, in_dp_cap, in_rate, in_lanes, bw;
507 struct tb_port *out = tunnel->dst_port;
508 struct tb_port *in = tunnel->src_port;
509 int ret, max_bw;
510
511 /*
512 * Copy DP_LOCAL_CAP register to DP_REMOTE_CAP register for
513 * newer generation hardware.
514 */
515 if (in->sw->generation < 2 || out->sw->generation < 2)
516 return 0;
517
518 /*
519 * Perform connection manager handshake between IN and OUT ports
520 * before capabilities exchange can take place.
521 */
522 ret = tb_dp_cm_handshake(in, out);
523 if (ret)
524 return ret;
525
526 /* Read both DP_LOCAL_CAP registers */
527 ret = tb_port_read(in, &in_dp_cap, TB_CFG_PORT,
528 in->cap_adap + DP_LOCAL_CAP, 1);
529 if (ret)
530 return ret;
531
532 ret = tb_port_read(out, &out_dp_cap, TB_CFG_PORT,
533 out->cap_adap + DP_LOCAL_CAP, 1);
534 if (ret)
535 return ret;
536
537 /* Write IN local caps to OUT remote caps */
538 ret = tb_port_write(out, &in_dp_cap, TB_CFG_PORT,
539 out->cap_adap + DP_REMOTE_CAP, 1);
540 if (ret)
541 return ret;
542
543 in_rate = tb_dp_cap_get_rate(in_dp_cap);
544 in_lanes = tb_dp_cap_get_lanes(in_dp_cap);
545 tb_port_dbg(in, "maximum supported bandwidth %u Mb/s x%u = %u Mb/s\n",
546 in_rate, in_lanes, tb_dp_bandwidth(in_rate, in_lanes));
547
548 /*
549 * If the tunnel bandwidth is limited (max_bw is set) then see
550 * if we need to reduce bandwidth to fit there.
551 */
552 out_rate = tb_dp_cap_get_rate(out_dp_cap);
553 out_lanes = tb_dp_cap_get_lanes(out_dp_cap);
554 bw = tb_dp_bandwidth(out_rate, out_lanes);
555 tb_port_dbg(out, "maximum supported bandwidth %u Mb/s x%u = %u Mb/s\n",
556 out_rate, out_lanes, bw);
557
558 if (in->sw->config.depth < out->sw->config.depth)
559 max_bw = tunnel->max_down;
560 else
561 max_bw = tunnel->max_up;
562
563 if (max_bw && bw > max_bw) {
564 u32 new_rate, new_lanes, new_bw;
565
566 ret = tb_dp_reduce_bandwidth(max_bw, in_rate, in_lanes,
567 out_rate, out_lanes, &new_rate,
568 &new_lanes);
569 if (ret) {
570 tb_port_info(out, "not enough bandwidth for DP tunnel\n");
571 return ret;
572 }
573
574 new_bw = tb_dp_bandwidth(new_rate, new_lanes);
575 tb_port_dbg(out, "bandwidth reduced to %u Mb/s x%u = %u Mb/s\n",
576 new_rate, new_lanes, new_bw);
577
578 /*
579 * Set new rate and number of lanes before writing it to
580 * the IN port remote caps.
581 */
582 out_dp_cap = tb_dp_cap_set_rate(out_dp_cap, new_rate);
583 out_dp_cap = tb_dp_cap_set_lanes(out_dp_cap, new_lanes);
584 }
585
586 /*
587 * Titan Ridge does not disable AUX timers when it gets
588 * SET_CONFIG with SET_LTTPR_MODE set. This causes problems with
589 * DP tunneling.
590 */
591 if (tb_route(out->sw) && tb_switch_is_titan_ridge(out->sw)) {
592 out_dp_cap |= DP_COMMON_CAP_LTTPR_NS;
593 tb_port_dbg(out, "disabling LTTPR\n");
594 }
595
596 return tb_port_write(in, &out_dp_cap, TB_CFG_PORT,
597 in->cap_adap + DP_REMOTE_CAP, 1);
598}
599
600static int tb_dp_activate(struct tb_tunnel *tunnel, bool active)
601{
602 int ret;
603
604 if (active) {
605 struct tb_path **paths;
606 int last;
607
608 paths = tunnel->paths;
609 last = paths[TB_DP_VIDEO_PATH_OUT]->path_length - 1;
610
611 tb_dp_port_set_hops(tunnel->src_port,
612 paths[TB_DP_VIDEO_PATH_OUT]->hops[0].in_hop_index,
613 paths[TB_DP_AUX_PATH_OUT]->hops[0].in_hop_index,
614 paths[TB_DP_AUX_PATH_IN]->hops[last].next_hop_index);
615
616 tb_dp_port_set_hops(tunnel->dst_port,
617 paths[TB_DP_VIDEO_PATH_OUT]->hops[last].next_hop_index,
618 paths[TB_DP_AUX_PATH_IN]->hops[0].in_hop_index,
619 paths[TB_DP_AUX_PATH_OUT]->hops[last].next_hop_index);
620 } else {
621 tb_dp_port_hpd_clear(tunnel->src_port);
622 tb_dp_port_set_hops(tunnel->src_port, 0, 0, 0);
623 if (tb_port_is_dpout(tunnel->dst_port))
624 tb_dp_port_set_hops(tunnel->dst_port, 0, 0, 0);
625 }
626
627 ret = tb_dp_port_enable(tunnel->src_port, active);
628 if (ret)
629 return ret;
630
631 if (tb_port_is_dpout(tunnel->dst_port))
632 return tb_dp_port_enable(tunnel->dst_port, active);
633
634 return 0;
635}
636
637static int tb_dp_consumed_bandwidth(struct tb_tunnel *tunnel, int *consumed_up,
638 int *consumed_down)
639{
640 struct tb_port *in = tunnel->src_port;
641 const struct tb_switch *sw = in->sw;
642 u32 val, rate = 0, lanes = 0;
643 int ret;
644
645 if (tb_dp_is_usb4(sw)) {
646 int timeout = 20;
647
648 /*
649 * Wait for DPRX done. Normally it should be already set
650 * for active tunnel.
651 */
652 do {
653 ret = tb_port_read(in, &val, TB_CFG_PORT,
654 in->cap_adap + DP_COMMON_CAP, 1);
655 if (ret)
656 return ret;
657
658 if (val & DP_COMMON_CAP_DPRX_DONE) {
659 rate = tb_dp_cap_get_rate(val);
660 lanes = tb_dp_cap_get_lanes(val);
661 break;
662 }
663 msleep(250);
664 } while (timeout--);
665
666 if (!timeout)
667 return -ETIMEDOUT;
668 } else if (sw->generation >= 2) {
669 /*
670 * Read from the copied remote cap so that we take into
671 * account if capabilities were reduced during exchange.
672 */
673 ret = tb_port_read(in, &val, TB_CFG_PORT,
674 in->cap_adap + DP_REMOTE_CAP, 1);
675 if (ret)
676 return ret;
677
678 rate = tb_dp_cap_get_rate(val);
679 lanes = tb_dp_cap_get_lanes(val);
680 } else {
681 /* No bandwidth management for legacy devices */
682 *consumed_up = 0;
683 *consumed_down = 0;
684 return 0;
685 }
686
687 if (in->sw->config.depth < tunnel->dst_port->sw->config.depth) {
688 *consumed_up = 0;
689 *consumed_down = tb_dp_bandwidth(rate, lanes);
690 } else {
691 *consumed_up = tb_dp_bandwidth(rate, lanes);
692 *consumed_down = 0;
693 }
694
695 return 0;
696}
697
698static void tb_dp_init_aux_credits(struct tb_path_hop *hop)
699{
700 struct tb_port *port = hop->in_port;
701 struct tb_switch *sw = port->sw;
702
703 if (tb_port_use_credit_allocation(port))
704 hop->initial_credits = sw->min_dp_aux_credits;
705 else
706 hop->initial_credits = 1;
707}
708
709static void tb_dp_init_aux_path(struct tb_path *path)
710{
711 struct tb_path_hop *hop;
712
713 path->egress_fc_enable = TB_PATH_SOURCE | TB_PATH_INTERNAL;
714 path->egress_shared_buffer = TB_PATH_NONE;
715 path->ingress_fc_enable = TB_PATH_ALL;
716 path->ingress_shared_buffer = TB_PATH_NONE;
717 path->priority = 2;
718 path->weight = 1;
719
720 tb_path_for_each_hop(path, hop)
721 tb_dp_init_aux_credits(hop);
722}
723
724static int tb_dp_init_video_credits(struct tb_path_hop *hop)
725{
726 struct tb_port *port = hop->in_port;
727 struct tb_switch *sw = port->sw;
728
729 if (tb_port_use_credit_allocation(port)) {
730 unsigned int nfc_credits;
731 size_t max_dp_streams;
732
733 tb_available_credits(port, &max_dp_streams);
734 /*
735 * Read the number of currently allocated NFC credits
736 * from the lane adapter. Since we only use them for DP
737 * tunneling we can use that to figure out how many DP
738 * tunnels already go through the lane adapter.
739 */
740 nfc_credits = port->config.nfc_credits &
741 ADP_CS_4_NFC_BUFFERS_MASK;
742 if (nfc_credits / sw->min_dp_main_credits > max_dp_streams)
743 return -ENOSPC;
744
745 hop->nfc_credits = sw->min_dp_main_credits;
746 } else {
747 hop->nfc_credits = min(port->total_credits - 2, 12U);
748 }
749
750 return 0;
751}
752
753static int tb_dp_init_video_path(struct tb_path *path)
754{
755 struct tb_path_hop *hop;
756
757 path->egress_fc_enable = TB_PATH_NONE;
758 path->egress_shared_buffer = TB_PATH_NONE;
759 path->ingress_fc_enable = TB_PATH_NONE;
760 path->ingress_shared_buffer = TB_PATH_NONE;
761 path->priority = 1;
762 path->weight = 1;
763
764 tb_path_for_each_hop(path, hop) {
765 int ret;
766
767 ret = tb_dp_init_video_credits(hop);
768 if (ret)
769 return ret;
770 }
771
772 return 0;
773}
774
775/**
776 * tb_tunnel_discover_dp() - Discover existing Display Port tunnels
777 * @tb: Pointer to the domain structure
778 * @in: DP in adapter
779 * @alloc_hopid: Allocate HopIDs from visited ports
780 *
781 * If @in adapter is active, follows the tunnel to the DP out adapter
782 * and back. Returns the discovered tunnel or %NULL if there was no
783 * tunnel.
784 *
785 * Return: DP tunnel or %NULL if no tunnel found.
786 */
787struct tb_tunnel *tb_tunnel_discover_dp(struct tb *tb, struct tb_port *in,
788 bool alloc_hopid)
789{
790 struct tb_tunnel *tunnel;
791 struct tb_port *port;
792 struct tb_path *path;
793
794 if (!tb_dp_port_is_enabled(in))
795 return NULL;
796
797 tunnel = tb_tunnel_alloc(tb, 3, TB_TUNNEL_DP);
798 if (!tunnel)
799 return NULL;
800
801 tunnel->init = tb_dp_xchg_caps;
802 tunnel->activate = tb_dp_activate;
803 tunnel->consumed_bandwidth = tb_dp_consumed_bandwidth;
804 tunnel->src_port = in;
805
806 path = tb_path_discover(in, TB_DP_VIDEO_HOPID, NULL, -1,
807 &tunnel->dst_port, "Video", alloc_hopid);
808 if (!path) {
809 /* Just disable the DP IN port */
810 tb_dp_port_enable(in, false);
811 goto err_free;
812 }
813 tunnel->paths[TB_DP_VIDEO_PATH_OUT] = path;
814 if (tb_dp_init_video_path(tunnel->paths[TB_DP_VIDEO_PATH_OUT]))
815 goto err_free;
816
817 path = tb_path_discover(in, TB_DP_AUX_TX_HOPID, NULL, -1, NULL, "AUX TX",
818 alloc_hopid);
819 if (!path)
820 goto err_deactivate;
821 tunnel->paths[TB_DP_AUX_PATH_OUT] = path;
822 tb_dp_init_aux_path(tunnel->paths[TB_DP_AUX_PATH_OUT]);
823
824 path = tb_path_discover(tunnel->dst_port, -1, in, TB_DP_AUX_RX_HOPID,
825 &port, "AUX RX", alloc_hopid);
826 if (!path)
827 goto err_deactivate;
828 tunnel->paths[TB_DP_AUX_PATH_IN] = path;
829 tb_dp_init_aux_path(tunnel->paths[TB_DP_AUX_PATH_IN]);
830
831 /* Validate that the tunnel is complete */
832 if (!tb_port_is_dpout(tunnel->dst_port)) {
833 tb_port_warn(in, "path does not end on a DP adapter, cleaning up\n");
834 goto err_deactivate;
835 }
836
837 if (!tb_dp_port_is_enabled(tunnel->dst_port))
838 goto err_deactivate;
839
840 if (!tb_dp_port_hpd_is_active(tunnel->dst_port))
841 goto err_deactivate;
842
843 if (port != tunnel->src_port) {
844 tb_tunnel_warn(tunnel, "path is not complete, cleaning up\n");
845 goto err_deactivate;
846 }
847
848 tb_tunnel_dbg(tunnel, "discovered\n");
849 return tunnel;
850
851err_deactivate:
852 tb_tunnel_deactivate(tunnel);
853err_free:
854 tb_tunnel_free(tunnel);
855
856 return NULL;
857}
858
859/**
860 * tb_tunnel_alloc_dp() - allocate a Display Port tunnel
861 * @tb: Pointer to the domain structure
862 * @in: DP in adapter port
863 * @out: DP out adapter port
864 * @link_nr: Preferred lane adapter when the link is not bonded
865 * @max_up: Maximum available upstream bandwidth for the DP tunnel (%0
866 * if not limited)
867 * @max_down: Maximum available downstream bandwidth for the DP tunnel
868 * (%0 if not limited)
869 *
870 * Allocates a tunnel between @in and @out that is capable of tunneling
871 * Display Port traffic.
872 *
873 * Return: Returns a tb_tunnel on success or NULL on failure.
874 */
875struct tb_tunnel *tb_tunnel_alloc_dp(struct tb *tb, struct tb_port *in,
876 struct tb_port *out, int link_nr,
877 int max_up, int max_down)
878{
879 struct tb_tunnel *tunnel;
880 struct tb_path **paths;
881 struct tb_path *path;
882
883 if (WARN_ON(!in->cap_adap || !out->cap_adap))
884 return NULL;
885
886 tunnel = tb_tunnel_alloc(tb, 3, TB_TUNNEL_DP);
887 if (!tunnel)
888 return NULL;
889
890 tunnel->init = tb_dp_xchg_caps;
891 tunnel->activate = tb_dp_activate;
892 tunnel->consumed_bandwidth = tb_dp_consumed_bandwidth;
893 tunnel->src_port = in;
894 tunnel->dst_port = out;
895 tunnel->max_up = max_up;
896 tunnel->max_down = max_down;
897
898 paths = tunnel->paths;
899
900 path = tb_path_alloc(tb, in, TB_DP_VIDEO_HOPID, out, TB_DP_VIDEO_HOPID,
901 link_nr, "Video");
902 if (!path)
903 goto err_free;
904 tb_dp_init_video_path(path);
905 paths[TB_DP_VIDEO_PATH_OUT] = path;
906
907 path = tb_path_alloc(tb, in, TB_DP_AUX_TX_HOPID, out,
908 TB_DP_AUX_TX_HOPID, link_nr, "AUX TX");
909 if (!path)
910 goto err_free;
911 tb_dp_init_aux_path(path);
912 paths[TB_DP_AUX_PATH_OUT] = path;
913
914 path = tb_path_alloc(tb, out, TB_DP_AUX_RX_HOPID, in,
915 TB_DP_AUX_RX_HOPID, link_nr, "AUX RX");
916 if (!path)
917 goto err_free;
918 tb_dp_init_aux_path(path);
919 paths[TB_DP_AUX_PATH_IN] = path;
920
921 return tunnel;
922
923err_free:
924 tb_tunnel_free(tunnel);
925 return NULL;
926}
927
928static unsigned int tb_dma_available_credits(const struct tb_port *port)
929{
930 const struct tb_switch *sw = port->sw;
931 int credits;
932
933 credits = tb_available_credits(port, NULL);
934 if (tb_acpi_may_tunnel_pcie())
935 credits -= sw->max_pcie_credits;
936 credits -= port->dma_credits;
937
938 return credits > 0 ? credits : 0;
939}
940
941static int tb_dma_reserve_credits(struct tb_path_hop *hop, unsigned int credits)
942{
943 struct tb_port *port = hop->in_port;
944
945 if (tb_port_use_credit_allocation(port)) {
946 unsigned int available = tb_dma_available_credits(port);
947
948 /*
949 * Need to have at least TB_MIN_DMA_CREDITS, otherwise
950 * DMA path cannot be established.
951 */
952 if (available < TB_MIN_DMA_CREDITS)
953 return -ENOSPC;
954
955 while (credits > available)
956 credits--;
957
958 tb_port_dbg(port, "reserving %u credits for DMA path\n",
959 credits);
960
961 port->dma_credits += credits;
962 } else {
963 if (tb_port_is_null(port))
964 credits = port->bonded ? 14 : 6;
965 else
966 credits = min(port->total_credits, credits);
967 }
968
969 hop->initial_credits = credits;
970 return 0;
971}
972
973/* Path from lane adapter to NHI */
974static int tb_dma_init_rx_path(struct tb_path *path, unsigned int credits)
975{
976 struct tb_path_hop *hop;
977 unsigned int i, tmp;
978
979 path->egress_fc_enable = TB_PATH_SOURCE | TB_PATH_INTERNAL;
980 path->ingress_fc_enable = TB_PATH_ALL;
981 path->egress_shared_buffer = TB_PATH_NONE;
982 path->ingress_shared_buffer = TB_PATH_NONE;
983 path->priority = 5;
984 path->weight = 1;
985 path->clear_fc = true;
986
987 /*
988 * First lane adapter is the one connected to the remote host.
989 * We don't tunnel other traffic over this link so can use all
990 * the credits (except the ones reserved for control traffic).
991 */
992 hop = &path->hops[0];
993 tmp = min(tb_usable_credits(hop->in_port), credits);
994 hop->initial_credits = tmp;
995 hop->in_port->dma_credits += tmp;
996
997 for (i = 1; i < path->path_length; i++) {
998 int ret;
999
1000 ret = tb_dma_reserve_credits(&path->hops[i], credits);
1001 if (ret)
1002 return ret;
1003 }
1004
1005 return 0;
1006}
1007
1008/* Path from NHI to lane adapter */
1009static int tb_dma_init_tx_path(struct tb_path *path, unsigned int credits)
1010{
1011 struct tb_path_hop *hop;
1012
1013 path->egress_fc_enable = TB_PATH_ALL;
1014 path->ingress_fc_enable = TB_PATH_ALL;
1015 path->egress_shared_buffer = TB_PATH_NONE;
1016 path->ingress_shared_buffer = TB_PATH_NONE;
1017 path->priority = 5;
1018 path->weight = 1;
1019 path->clear_fc = true;
1020
1021 tb_path_for_each_hop(path, hop) {
1022 int ret;
1023
1024 ret = tb_dma_reserve_credits(hop, credits);
1025 if (ret)
1026 return ret;
1027 }
1028
1029 return 0;
1030}
1031
1032static void tb_dma_release_credits(struct tb_path_hop *hop)
1033{
1034 struct tb_port *port = hop->in_port;
1035
1036 if (tb_port_use_credit_allocation(port)) {
1037 port->dma_credits -= hop->initial_credits;
1038
1039 tb_port_dbg(port, "released %u DMA path credits\n",
1040 hop->initial_credits);
1041 }
1042}
1043
1044static void tb_dma_deinit_path(struct tb_path *path)
1045{
1046 struct tb_path_hop *hop;
1047
1048 tb_path_for_each_hop(path, hop)
1049 tb_dma_release_credits(hop);
1050}
1051
1052static void tb_dma_deinit(struct tb_tunnel *tunnel)
1053{
1054 int i;
1055
1056 for (i = 0; i < tunnel->npaths; i++) {
1057 if (!tunnel->paths[i])
1058 continue;
1059 tb_dma_deinit_path(tunnel->paths[i]);
1060 }
1061}
1062
1063/**
1064 * tb_tunnel_alloc_dma() - allocate a DMA tunnel
1065 * @tb: Pointer to the domain structure
1066 * @nhi: Host controller port
1067 * @dst: Destination null port which the other domain is connected to
1068 * @transmit_path: HopID used for transmitting packets
1069 * @transmit_ring: NHI ring number used to send packets towards the
1070 * other domain. Set to %-1 if TX path is not needed.
1071 * @receive_path: HopID used for receiving packets
1072 * @receive_ring: NHI ring number used to receive packets from the
1073 * other domain. Set to %-1 if RX path is not needed.
1074 *
1075 * Return: Returns a tb_tunnel on success or NULL on failure.
1076 */
1077struct tb_tunnel *tb_tunnel_alloc_dma(struct tb *tb, struct tb_port *nhi,
1078 struct tb_port *dst, int transmit_path,
1079 int transmit_ring, int receive_path,
1080 int receive_ring)
1081{
1082 struct tb_tunnel *tunnel;
1083 size_t npaths = 0, i = 0;
1084 struct tb_path *path;
1085 int credits;
1086
1087 if (receive_ring > 0)
1088 npaths++;
1089 if (transmit_ring > 0)
1090 npaths++;
1091
1092 if (WARN_ON(!npaths))
1093 return NULL;
1094
1095 tunnel = tb_tunnel_alloc(tb, npaths, TB_TUNNEL_DMA);
1096 if (!tunnel)
1097 return NULL;
1098
1099 tunnel->src_port = nhi;
1100 tunnel->dst_port = dst;
1101 tunnel->deinit = tb_dma_deinit;
1102
1103 credits = min_not_zero(TB_DMA_CREDITS, nhi->sw->max_dma_credits);
1104
1105 if (receive_ring > 0) {
1106 path = tb_path_alloc(tb, dst, receive_path, nhi, receive_ring, 0,
1107 "DMA RX");
1108 if (!path)
1109 goto err_free;
1110 tunnel->paths[i++] = path;
1111 if (tb_dma_init_rx_path(path, credits)) {
1112 tb_tunnel_dbg(tunnel, "not enough buffers for RX path\n");
1113 goto err_free;
1114 }
1115 }
1116
1117 if (transmit_ring > 0) {
1118 path = tb_path_alloc(tb, nhi, transmit_ring, dst, transmit_path, 0,
1119 "DMA TX");
1120 if (!path)
1121 goto err_free;
1122 tunnel->paths[i++] = path;
1123 if (tb_dma_init_tx_path(path, credits)) {
1124 tb_tunnel_dbg(tunnel, "not enough buffers for TX path\n");
1125 goto err_free;
1126 }
1127 }
1128
1129 return tunnel;
1130
1131err_free:
1132 tb_tunnel_free(tunnel);
1133 return NULL;
1134}
1135
1136/**
1137 * tb_tunnel_match_dma() - Match DMA tunnel
1138 * @tunnel: Tunnel to match
1139 * @transmit_path: HopID used for transmitting packets. Pass %-1 to ignore.
1140 * @transmit_ring: NHI ring number used to send packets towards the
1141 * other domain. Pass %-1 to ignore.
1142 * @receive_path: HopID used for receiving packets. Pass %-1 to ignore.
1143 * @receive_ring: NHI ring number used to receive packets from the
1144 * other domain. Pass %-1 to ignore.
1145 *
1146 * This function can be used to match specific DMA tunnel, if there are
1147 * multiple DMA tunnels going through the same XDomain connection.
1148 * Returns true if there is match and false otherwise.
1149 */
1150bool tb_tunnel_match_dma(const struct tb_tunnel *tunnel, int transmit_path,
1151 int transmit_ring, int receive_path, int receive_ring)
1152{
1153 const struct tb_path *tx_path = NULL, *rx_path = NULL;
1154 int i;
1155
1156 if (!receive_ring || !transmit_ring)
1157 return false;
1158
1159 for (i = 0; i < tunnel->npaths; i++) {
1160 const struct tb_path *path = tunnel->paths[i];
1161
1162 if (!path)
1163 continue;
1164
1165 if (tb_port_is_nhi(path->hops[0].in_port))
1166 tx_path = path;
1167 else if (tb_port_is_nhi(path->hops[path->path_length - 1].out_port))
1168 rx_path = path;
1169 }
1170
1171 if (transmit_ring > 0 || transmit_path > 0) {
1172 if (!tx_path)
1173 return false;
1174 if (transmit_ring > 0 &&
1175 (tx_path->hops[0].in_hop_index != transmit_ring))
1176 return false;
1177 if (transmit_path > 0 &&
1178 (tx_path->hops[tx_path->path_length - 1].next_hop_index != transmit_path))
1179 return false;
1180 }
1181
1182 if (receive_ring > 0 || receive_path > 0) {
1183 if (!rx_path)
1184 return false;
1185 if (receive_path > 0 &&
1186 (rx_path->hops[0].in_hop_index != receive_path))
1187 return false;
1188 if (receive_ring > 0 &&
1189 (rx_path->hops[rx_path->path_length - 1].next_hop_index != receive_ring))
1190 return false;
1191 }
1192
1193 return true;
1194}
1195
1196static int tb_usb3_max_link_rate(struct tb_port *up, struct tb_port *down)
1197{
1198 int ret, up_max_rate, down_max_rate;
1199
1200 ret = usb4_usb3_port_max_link_rate(up);
1201 if (ret < 0)
1202 return ret;
1203 up_max_rate = ret;
1204
1205 ret = usb4_usb3_port_max_link_rate(down);
1206 if (ret < 0)
1207 return ret;
1208 down_max_rate = ret;
1209
1210 return min(up_max_rate, down_max_rate);
1211}
1212
1213static int tb_usb3_init(struct tb_tunnel *tunnel)
1214{
1215 tb_tunnel_dbg(tunnel, "allocating initial bandwidth %d/%d Mb/s\n",
1216 tunnel->allocated_up, tunnel->allocated_down);
1217
1218 return usb4_usb3_port_allocate_bandwidth(tunnel->src_port,
1219 &tunnel->allocated_up,
1220 &tunnel->allocated_down);
1221}
1222
1223static int tb_usb3_activate(struct tb_tunnel *tunnel, bool activate)
1224{
1225 int res;
1226
1227 res = tb_usb3_port_enable(tunnel->src_port, activate);
1228 if (res)
1229 return res;
1230
1231 if (tb_port_is_usb3_up(tunnel->dst_port))
1232 return tb_usb3_port_enable(tunnel->dst_port, activate);
1233
1234 return 0;
1235}
1236
1237static int tb_usb3_consumed_bandwidth(struct tb_tunnel *tunnel,
1238 int *consumed_up, int *consumed_down)
1239{
1240 int pcie_enabled = tb_acpi_may_tunnel_pcie();
1241
1242 /*
1243 * PCIe tunneling, if enabled, affects the USB3 bandwidth so
1244 * take that it into account here.
1245 */
1246 *consumed_up = tunnel->allocated_up * (3 + pcie_enabled) / 3;
1247 *consumed_down = tunnel->allocated_down * (3 + pcie_enabled) / 3;
1248 return 0;
1249}
1250
1251static int tb_usb3_release_unused_bandwidth(struct tb_tunnel *tunnel)
1252{
1253 int ret;
1254
1255 ret = usb4_usb3_port_release_bandwidth(tunnel->src_port,
1256 &tunnel->allocated_up,
1257 &tunnel->allocated_down);
1258 if (ret)
1259 return ret;
1260
1261 tb_tunnel_dbg(tunnel, "decreased bandwidth allocation to %d/%d Mb/s\n",
1262 tunnel->allocated_up, tunnel->allocated_down);
1263 return 0;
1264}
1265
1266static void tb_usb3_reclaim_available_bandwidth(struct tb_tunnel *tunnel,
1267 int *available_up,
1268 int *available_down)
1269{
1270 int ret, max_rate, allocate_up, allocate_down;
1271
1272 ret = usb4_usb3_port_actual_link_rate(tunnel->src_port);
1273 if (ret < 0) {
1274 tb_tunnel_warn(tunnel, "failed to read actual link rate\n");
1275 return;
1276 } else if (!ret) {
1277 /* Use maximum link rate if the link valid is not set */
1278 ret = tb_usb3_max_link_rate(tunnel->dst_port, tunnel->src_port);
1279 if (ret < 0) {
1280 tb_tunnel_warn(tunnel, "failed to read maximum link rate\n");
1281 return;
1282 }
1283 }
1284
1285 /*
1286 * 90% of the max rate can be allocated for isochronous
1287 * transfers.
1288 */
1289 max_rate = ret * 90 / 100;
1290
1291 /* No need to reclaim if already at maximum */
1292 if (tunnel->allocated_up >= max_rate &&
1293 tunnel->allocated_down >= max_rate)
1294 return;
1295
1296 /* Don't go lower than what is already allocated */
1297 allocate_up = min(max_rate, *available_up);
1298 if (allocate_up < tunnel->allocated_up)
1299 allocate_up = tunnel->allocated_up;
1300
1301 allocate_down = min(max_rate, *available_down);
1302 if (allocate_down < tunnel->allocated_down)
1303 allocate_down = tunnel->allocated_down;
1304
1305 /* If no changes no need to do more */
1306 if (allocate_up == tunnel->allocated_up &&
1307 allocate_down == tunnel->allocated_down)
1308 return;
1309
1310 ret = usb4_usb3_port_allocate_bandwidth(tunnel->src_port, &allocate_up,
1311 &allocate_down);
1312 if (ret) {
1313 tb_tunnel_info(tunnel, "failed to allocate bandwidth\n");
1314 return;
1315 }
1316
1317 tunnel->allocated_up = allocate_up;
1318 *available_up -= tunnel->allocated_up;
1319
1320 tunnel->allocated_down = allocate_down;
1321 *available_down -= tunnel->allocated_down;
1322
1323 tb_tunnel_dbg(tunnel, "increased bandwidth allocation to %d/%d Mb/s\n",
1324 tunnel->allocated_up, tunnel->allocated_down);
1325}
1326
1327static void tb_usb3_init_credits(struct tb_path_hop *hop)
1328{
1329 struct tb_port *port = hop->in_port;
1330 struct tb_switch *sw = port->sw;
1331 unsigned int credits;
1332
1333 if (tb_port_use_credit_allocation(port)) {
1334 credits = sw->max_usb3_credits;
1335 } else {
1336 if (tb_port_is_null(port))
1337 credits = port->bonded ? 32 : 16;
1338 else
1339 credits = 7;
1340 }
1341
1342 hop->initial_credits = credits;
1343}
1344
1345static void tb_usb3_init_path(struct tb_path *path)
1346{
1347 struct tb_path_hop *hop;
1348
1349 path->egress_fc_enable = TB_PATH_SOURCE | TB_PATH_INTERNAL;
1350 path->egress_shared_buffer = TB_PATH_NONE;
1351 path->ingress_fc_enable = TB_PATH_ALL;
1352 path->ingress_shared_buffer = TB_PATH_NONE;
1353 path->priority = 3;
1354 path->weight = 3;
1355 path->drop_packages = 0;
1356
1357 tb_path_for_each_hop(path, hop)
1358 tb_usb3_init_credits(hop);
1359}
1360
1361/**
1362 * tb_tunnel_discover_usb3() - Discover existing USB3 tunnels
1363 * @tb: Pointer to the domain structure
1364 * @down: USB3 downstream adapter
1365 * @alloc_hopid: Allocate HopIDs from visited ports
1366 *
1367 * If @down adapter is active, follows the tunnel to the USB3 upstream
1368 * adapter and back. Returns the discovered tunnel or %NULL if there was
1369 * no tunnel.
1370 */
1371struct tb_tunnel *tb_tunnel_discover_usb3(struct tb *tb, struct tb_port *down,
1372 bool alloc_hopid)
1373{
1374 struct tb_tunnel *tunnel;
1375 struct tb_path *path;
1376
1377 if (!tb_usb3_port_is_enabled(down))
1378 return NULL;
1379
1380 tunnel = tb_tunnel_alloc(tb, 2, TB_TUNNEL_USB3);
1381 if (!tunnel)
1382 return NULL;
1383
1384 tunnel->activate = tb_usb3_activate;
1385 tunnel->src_port = down;
1386
1387 /*
1388 * Discover both paths even if they are not complete. We will
1389 * clean them up by calling tb_tunnel_deactivate() below in that
1390 * case.
1391 */
1392 path = tb_path_discover(down, TB_USB3_HOPID, NULL, -1,
1393 &tunnel->dst_port, "USB3 Down", alloc_hopid);
1394 if (!path) {
1395 /* Just disable the downstream port */
1396 tb_usb3_port_enable(down, false);
1397 goto err_free;
1398 }
1399 tunnel->paths[TB_USB3_PATH_DOWN] = path;
1400 tb_usb3_init_path(tunnel->paths[TB_USB3_PATH_DOWN]);
1401
1402 path = tb_path_discover(tunnel->dst_port, -1, down, TB_USB3_HOPID, NULL,
1403 "USB3 Up", alloc_hopid);
1404 if (!path)
1405 goto err_deactivate;
1406 tunnel->paths[TB_USB3_PATH_UP] = path;
1407 tb_usb3_init_path(tunnel->paths[TB_USB3_PATH_UP]);
1408
1409 /* Validate that the tunnel is complete */
1410 if (!tb_port_is_usb3_up(tunnel->dst_port)) {
1411 tb_port_warn(tunnel->dst_port,
1412 "path does not end on an USB3 adapter, cleaning up\n");
1413 goto err_deactivate;
1414 }
1415
1416 if (down != tunnel->src_port) {
1417 tb_tunnel_warn(tunnel, "path is not complete, cleaning up\n");
1418 goto err_deactivate;
1419 }
1420
1421 if (!tb_usb3_port_is_enabled(tunnel->dst_port)) {
1422 tb_tunnel_warn(tunnel,
1423 "tunnel is not fully activated, cleaning up\n");
1424 goto err_deactivate;
1425 }
1426
1427 if (!tb_route(down->sw)) {
1428 int ret;
1429
1430 /*
1431 * Read the initial bandwidth allocation for the first
1432 * hop tunnel.
1433 */
1434 ret = usb4_usb3_port_allocated_bandwidth(down,
1435 &tunnel->allocated_up, &tunnel->allocated_down);
1436 if (ret)
1437 goto err_deactivate;
1438
1439 tb_tunnel_dbg(tunnel, "currently allocated bandwidth %d/%d Mb/s\n",
1440 tunnel->allocated_up, tunnel->allocated_down);
1441
1442 tunnel->init = tb_usb3_init;
1443 tunnel->consumed_bandwidth = tb_usb3_consumed_bandwidth;
1444 tunnel->release_unused_bandwidth =
1445 tb_usb3_release_unused_bandwidth;
1446 tunnel->reclaim_available_bandwidth =
1447 tb_usb3_reclaim_available_bandwidth;
1448 }
1449
1450 tb_tunnel_dbg(tunnel, "discovered\n");
1451 return tunnel;
1452
1453err_deactivate:
1454 tb_tunnel_deactivate(tunnel);
1455err_free:
1456 tb_tunnel_free(tunnel);
1457
1458 return NULL;
1459}
1460
1461/**
1462 * tb_tunnel_alloc_usb3() - allocate a USB3 tunnel
1463 * @tb: Pointer to the domain structure
1464 * @up: USB3 upstream adapter port
1465 * @down: USB3 downstream adapter port
1466 * @max_up: Maximum available upstream bandwidth for the USB3 tunnel (%0
1467 * if not limited).
1468 * @max_down: Maximum available downstream bandwidth for the USB3 tunnel
1469 * (%0 if not limited).
1470 *
1471 * Allocate an USB3 tunnel. The ports must be of type @TB_TYPE_USB3_UP and
1472 * @TB_TYPE_USB3_DOWN.
1473 *
1474 * Return: Returns a tb_tunnel on success or %NULL on failure.
1475 */
1476struct tb_tunnel *tb_tunnel_alloc_usb3(struct tb *tb, struct tb_port *up,
1477 struct tb_port *down, int max_up,
1478 int max_down)
1479{
1480 struct tb_tunnel *tunnel;
1481 struct tb_path *path;
1482 int max_rate = 0;
1483
1484 /*
1485 * Check that we have enough bandwidth available for the new
1486 * USB3 tunnel.
1487 */
1488 if (max_up > 0 || max_down > 0) {
1489 max_rate = tb_usb3_max_link_rate(down, up);
1490 if (max_rate < 0)
1491 return NULL;
1492
1493 /* Only 90% can be allocated for USB3 isochronous transfers */
1494 max_rate = max_rate * 90 / 100;
1495 tb_port_dbg(up, "required bandwidth for USB3 tunnel %d Mb/s\n",
1496 max_rate);
1497
1498 if (max_rate > max_up || max_rate > max_down) {
1499 tb_port_warn(up, "not enough bandwidth for USB3 tunnel\n");
1500 return NULL;
1501 }
1502 }
1503
1504 tunnel = tb_tunnel_alloc(tb, 2, TB_TUNNEL_USB3);
1505 if (!tunnel)
1506 return NULL;
1507
1508 tunnel->activate = tb_usb3_activate;
1509 tunnel->src_port = down;
1510 tunnel->dst_port = up;
1511 tunnel->max_up = max_up;
1512 tunnel->max_down = max_down;
1513
1514 path = tb_path_alloc(tb, down, TB_USB3_HOPID, up, TB_USB3_HOPID, 0,
1515 "USB3 Down");
1516 if (!path) {
1517 tb_tunnel_free(tunnel);
1518 return NULL;
1519 }
1520 tb_usb3_init_path(path);
1521 tunnel->paths[TB_USB3_PATH_DOWN] = path;
1522
1523 path = tb_path_alloc(tb, up, TB_USB3_HOPID, down, TB_USB3_HOPID, 0,
1524 "USB3 Up");
1525 if (!path) {
1526 tb_tunnel_free(tunnel);
1527 return NULL;
1528 }
1529 tb_usb3_init_path(path);
1530 tunnel->paths[TB_USB3_PATH_UP] = path;
1531
1532 if (!tb_route(down->sw)) {
1533 tunnel->allocated_up = max_rate;
1534 tunnel->allocated_down = max_rate;
1535
1536 tunnel->init = tb_usb3_init;
1537 tunnel->consumed_bandwidth = tb_usb3_consumed_bandwidth;
1538 tunnel->release_unused_bandwidth =
1539 tb_usb3_release_unused_bandwidth;
1540 tunnel->reclaim_available_bandwidth =
1541 tb_usb3_reclaim_available_bandwidth;
1542 }
1543
1544 return tunnel;
1545}
1546
1547/**
1548 * tb_tunnel_free() - free a tunnel
1549 * @tunnel: Tunnel to be freed
1550 *
1551 * Frees a tunnel. The tunnel does not need to be deactivated.
1552 */
1553void tb_tunnel_free(struct tb_tunnel *tunnel)
1554{
1555 int i;
1556
1557 if (!tunnel)
1558 return;
1559
1560 if (tunnel->deinit)
1561 tunnel->deinit(tunnel);
1562
1563 for (i = 0; i < tunnel->npaths; i++) {
1564 if (tunnel->paths[i])
1565 tb_path_free(tunnel->paths[i]);
1566 }
1567
1568 kfree(tunnel->paths);
1569 kfree(tunnel);
1570}
1571
1572/**
1573 * tb_tunnel_is_invalid - check whether an activated path is still valid
1574 * @tunnel: Tunnel to check
1575 */
1576bool tb_tunnel_is_invalid(struct tb_tunnel *tunnel)
1577{
1578 int i;
1579
1580 for (i = 0; i < tunnel->npaths; i++) {
1581 WARN_ON(!tunnel->paths[i]->activated);
1582 if (tb_path_is_invalid(tunnel->paths[i]))
1583 return true;
1584 }
1585
1586 return false;
1587}
1588
1589/**
1590 * tb_tunnel_restart() - activate a tunnel after a hardware reset
1591 * @tunnel: Tunnel to restart
1592 *
1593 * Return: 0 on success and negative errno in case if failure
1594 */
1595int tb_tunnel_restart(struct tb_tunnel *tunnel)
1596{
1597 int res, i;
1598
1599 tb_tunnel_dbg(tunnel, "activating\n");
1600
1601 /*
1602 * Make sure all paths are properly disabled before enabling
1603 * them again.
1604 */
1605 for (i = 0; i < tunnel->npaths; i++) {
1606 if (tunnel->paths[i]->activated) {
1607 tb_path_deactivate(tunnel->paths[i]);
1608 tunnel->paths[i]->activated = false;
1609 }
1610 }
1611
1612 if (tunnel->init) {
1613 res = tunnel->init(tunnel);
1614 if (res)
1615 return res;
1616 }
1617
1618 for (i = 0; i < tunnel->npaths; i++) {
1619 res = tb_path_activate(tunnel->paths[i]);
1620 if (res)
1621 goto err;
1622 }
1623
1624 if (tunnel->activate) {
1625 res = tunnel->activate(tunnel, true);
1626 if (res)
1627 goto err;
1628 }
1629
1630 return 0;
1631
1632err:
1633 tb_tunnel_warn(tunnel, "activation failed\n");
1634 tb_tunnel_deactivate(tunnel);
1635 return res;
1636}
1637
1638/**
1639 * tb_tunnel_activate() - activate a tunnel
1640 * @tunnel: Tunnel to activate
1641 *
1642 * Return: Returns 0 on success or an error code on failure.
1643 */
1644int tb_tunnel_activate(struct tb_tunnel *tunnel)
1645{
1646 int i;
1647
1648 for (i = 0; i < tunnel->npaths; i++) {
1649 if (tunnel->paths[i]->activated) {
1650 tb_tunnel_WARN(tunnel,
1651 "trying to activate an already activated tunnel\n");
1652 return -EINVAL;
1653 }
1654 }
1655
1656 return tb_tunnel_restart(tunnel);
1657}
1658
1659/**
1660 * tb_tunnel_deactivate() - deactivate a tunnel
1661 * @tunnel: Tunnel to deactivate
1662 */
1663void tb_tunnel_deactivate(struct tb_tunnel *tunnel)
1664{
1665 int i;
1666
1667 tb_tunnel_dbg(tunnel, "deactivating\n");
1668
1669 if (tunnel->activate)
1670 tunnel->activate(tunnel, false);
1671
1672 for (i = 0; i < tunnel->npaths; i++) {
1673 if (tunnel->paths[i] && tunnel->paths[i]->activated)
1674 tb_path_deactivate(tunnel->paths[i]);
1675 }
1676}
1677
1678/**
1679 * tb_tunnel_port_on_path() - Does the tunnel go through port
1680 * @tunnel: Tunnel to check
1681 * @port: Port to check
1682 *
1683 * Returns true if @tunnel goes through @port (direction does not matter),
1684 * false otherwise.
1685 */
1686bool tb_tunnel_port_on_path(const struct tb_tunnel *tunnel,
1687 const struct tb_port *port)
1688{
1689 int i;
1690
1691 for (i = 0; i < tunnel->npaths; i++) {
1692 if (!tunnel->paths[i])
1693 continue;
1694
1695 if (tb_path_port_on_path(tunnel->paths[i], port))
1696 return true;
1697 }
1698
1699 return false;
1700}
1701
1702static bool tb_tunnel_is_active(const struct tb_tunnel *tunnel)
1703{
1704 int i;
1705
1706 for (i = 0; i < tunnel->npaths; i++) {
1707 if (!tunnel->paths[i])
1708 return false;
1709 if (!tunnel->paths[i]->activated)
1710 return false;
1711 }
1712
1713 return true;
1714}
1715
1716/**
1717 * tb_tunnel_consumed_bandwidth() - Return bandwidth consumed by the tunnel
1718 * @tunnel: Tunnel to check
1719 * @consumed_up: Consumed bandwidth in Mb/s from @dst_port to @src_port.
1720 * Can be %NULL.
1721 * @consumed_down: Consumed bandwidth in Mb/s from @src_port to @dst_port.
1722 * Can be %NULL.
1723 *
1724 * Stores the amount of isochronous bandwidth @tunnel consumes in
1725 * @consumed_up and @consumed_down. In case of success returns %0,
1726 * negative errno otherwise.
1727 */
1728int tb_tunnel_consumed_bandwidth(struct tb_tunnel *tunnel, int *consumed_up,
1729 int *consumed_down)
1730{
1731 int up_bw = 0, down_bw = 0;
1732
1733 if (!tb_tunnel_is_active(tunnel))
1734 goto out;
1735
1736 if (tunnel->consumed_bandwidth) {
1737 int ret;
1738
1739 ret = tunnel->consumed_bandwidth(tunnel, &up_bw, &down_bw);
1740 if (ret)
1741 return ret;
1742
1743 tb_tunnel_dbg(tunnel, "consumed bandwidth %d/%d Mb/s\n", up_bw,
1744 down_bw);
1745 }
1746
1747out:
1748 if (consumed_up)
1749 *consumed_up = up_bw;
1750 if (consumed_down)
1751 *consumed_down = down_bw;
1752
1753 return 0;
1754}
1755
1756/**
1757 * tb_tunnel_release_unused_bandwidth() - Release unused bandwidth
1758 * @tunnel: Tunnel whose unused bandwidth to release
1759 *
1760 * If tunnel supports dynamic bandwidth management (USB3 tunnels at the
1761 * moment) this function makes it to release all the unused bandwidth.
1762 *
1763 * Returns %0 in case of success and negative errno otherwise.
1764 */
1765int tb_tunnel_release_unused_bandwidth(struct tb_tunnel *tunnel)
1766{
1767 if (!tb_tunnel_is_active(tunnel))
1768 return 0;
1769
1770 if (tunnel->release_unused_bandwidth) {
1771 int ret;
1772
1773 ret = tunnel->release_unused_bandwidth(tunnel);
1774 if (ret)
1775 return ret;
1776 }
1777
1778 return 0;
1779}
1780
1781/**
1782 * tb_tunnel_reclaim_available_bandwidth() - Reclaim available bandwidth
1783 * @tunnel: Tunnel reclaiming available bandwidth
1784 * @available_up: Available upstream bandwidth (in Mb/s)
1785 * @available_down: Available downstream bandwidth (in Mb/s)
1786 *
1787 * Reclaims bandwidth from @available_up and @available_down and updates
1788 * the variables accordingly (e.g decreases both according to what was
1789 * reclaimed by the tunnel). If nothing was reclaimed the values are
1790 * kept as is.
1791 */
1792void tb_tunnel_reclaim_available_bandwidth(struct tb_tunnel *tunnel,
1793 int *available_up,
1794 int *available_down)
1795{
1796 if (!tb_tunnel_is_active(tunnel))
1797 return;
1798
1799 if (tunnel->reclaim_available_bandwidth)
1800 tunnel->reclaim_available_bandwidth(tunnel, available_up,
1801 available_down);
1802}
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Thunderbolt driver - Tunneling support
4 *
5 * Copyright (c) 2014 Andreas Noever <andreas.noever@gmail.com>
6 * Copyright (C) 2019, Intel Corporation
7 */
8
9#include <linux/delay.h>
10#include <linux/slab.h>
11#include <linux/list.h>
12#include <linux/ktime.h>
13#include <linux/string_helpers.h>
14
15#include "tunnel.h"
16#include "tb.h"
17
18/* PCIe adapters use always HopID of 8 for both directions */
19#define TB_PCI_HOPID 8
20
21#define TB_PCI_PATH_DOWN 0
22#define TB_PCI_PATH_UP 1
23
24#define TB_PCI_PRIORITY 3
25#define TB_PCI_WEIGHT 1
26
27/* USB3 adapters use always HopID of 8 for both directions */
28#define TB_USB3_HOPID 8
29
30#define TB_USB3_PATH_DOWN 0
31#define TB_USB3_PATH_UP 1
32
33#define TB_USB3_PRIORITY 3
34#define TB_USB3_WEIGHT 2
35
36/* DP adapters use HopID 8 for AUX and 9 for Video */
37#define TB_DP_AUX_TX_HOPID 8
38#define TB_DP_AUX_RX_HOPID 8
39#define TB_DP_VIDEO_HOPID 9
40
41#define TB_DP_VIDEO_PATH_OUT 0
42#define TB_DP_AUX_PATH_OUT 1
43#define TB_DP_AUX_PATH_IN 2
44
45#define TB_DP_VIDEO_PRIORITY 1
46#define TB_DP_VIDEO_WEIGHT 1
47
48#define TB_DP_AUX_PRIORITY 2
49#define TB_DP_AUX_WEIGHT 1
50
51/* Minimum number of credits needed for PCIe path */
52#define TB_MIN_PCIE_CREDITS 6U
53/*
54 * Number of credits we try to allocate for each DMA path if not limited
55 * by the host router baMaxHI.
56 */
57#define TB_DMA_CREDITS 14
58/* Minimum number of credits for DMA path */
59#define TB_MIN_DMA_CREDITS 1
60
61#define TB_DMA_PRIORITY 5
62#define TB_DMA_WEIGHT 1
63
64/*
65 * Reserve additional bandwidth for USB 3.x and PCIe bulk traffic
66 * according to USB4 v2 Connection Manager guide. This ends up reserving
67 * 1500 Mb/s for PCIe and 3000 Mb/s for USB 3.x taking weights into
68 * account.
69 */
70#define USB4_V2_PCI_MIN_BANDWIDTH (1500 * TB_PCI_WEIGHT)
71#define USB4_V2_USB3_MIN_BANDWIDTH (1500 * TB_USB3_WEIGHT)
72
73static unsigned int dma_credits = TB_DMA_CREDITS;
74module_param(dma_credits, uint, 0444);
75MODULE_PARM_DESC(dma_credits, "specify custom credits for DMA tunnels (default: "
76 __MODULE_STRING(TB_DMA_CREDITS) ")");
77
78static bool bw_alloc_mode = true;
79module_param(bw_alloc_mode, bool, 0444);
80MODULE_PARM_DESC(bw_alloc_mode,
81 "enable bandwidth allocation mode if supported (default: true)");
82
83static const char * const tb_tunnel_names[] = { "PCI", "DP", "DMA", "USB3" };
84
85static inline unsigned int tb_usable_credits(const struct tb_port *port)
86{
87 return port->total_credits - port->ctl_credits;
88}
89
90/**
91 * tb_available_credits() - Available credits for PCIe and DMA
92 * @port: Lane adapter to check
93 * @max_dp_streams: If non-%NULL stores maximum number of simultaneous DP
94 * streams possible through this lane adapter
95 */
96static unsigned int tb_available_credits(const struct tb_port *port,
97 size_t *max_dp_streams)
98{
99 const struct tb_switch *sw = port->sw;
100 int credits, usb3, pcie, spare;
101 size_t ndp;
102
103 usb3 = tb_acpi_may_tunnel_usb3() ? sw->max_usb3_credits : 0;
104 pcie = tb_acpi_may_tunnel_pcie() ? sw->max_pcie_credits : 0;
105
106 if (tb_acpi_is_xdomain_allowed()) {
107 spare = min_not_zero(sw->max_dma_credits, dma_credits);
108 /* Add some credits for potential second DMA tunnel */
109 spare += TB_MIN_DMA_CREDITS;
110 } else {
111 spare = 0;
112 }
113
114 credits = tb_usable_credits(port);
115 if (tb_acpi_may_tunnel_dp()) {
116 /*
117 * Maximum number of DP streams possible through the
118 * lane adapter.
119 */
120 if (sw->min_dp_aux_credits + sw->min_dp_main_credits)
121 ndp = (credits - (usb3 + pcie + spare)) /
122 (sw->min_dp_aux_credits + sw->min_dp_main_credits);
123 else
124 ndp = 0;
125 } else {
126 ndp = 0;
127 }
128 credits -= ndp * (sw->min_dp_aux_credits + sw->min_dp_main_credits);
129 credits -= usb3;
130
131 if (max_dp_streams)
132 *max_dp_streams = ndp;
133
134 return credits > 0 ? credits : 0;
135}
136
137static void tb_init_pm_support(struct tb_path_hop *hop)
138{
139 struct tb_port *out_port = hop->out_port;
140 struct tb_port *in_port = hop->in_port;
141
142 if (tb_port_is_null(in_port) && tb_port_is_null(out_port) &&
143 usb4_switch_version(in_port->sw) >= 2)
144 hop->pm_support = true;
145}
146
147static struct tb_tunnel *tb_tunnel_alloc(struct tb *tb, size_t npaths,
148 enum tb_tunnel_type type)
149{
150 struct tb_tunnel *tunnel;
151
152 tunnel = kzalloc(sizeof(*tunnel), GFP_KERNEL);
153 if (!tunnel)
154 return NULL;
155
156 tunnel->paths = kcalloc(npaths, sizeof(tunnel->paths[0]), GFP_KERNEL);
157 if (!tunnel->paths) {
158 tb_tunnel_free(tunnel);
159 return NULL;
160 }
161
162 INIT_LIST_HEAD(&tunnel->list);
163 tunnel->tb = tb;
164 tunnel->npaths = npaths;
165 tunnel->type = type;
166
167 return tunnel;
168}
169
170static int tb_pci_set_ext_encapsulation(struct tb_tunnel *tunnel, bool enable)
171{
172 struct tb_port *port = tb_upstream_port(tunnel->dst_port->sw);
173 int ret;
174
175 /* Only supported of both routers are at least USB4 v2 */
176 if ((usb4_switch_version(tunnel->src_port->sw) < 2) ||
177 (usb4_switch_version(tunnel->dst_port->sw) < 2))
178 return 0;
179
180 if (enable && tb_port_get_link_generation(port) < 4)
181 return 0;
182
183 ret = usb4_pci_port_set_ext_encapsulation(tunnel->src_port, enable);
184 if (ret)
185 return ret;
186
187 /*
188 * Downstream router could be unplugged so disable of encapsulation
189 * in upstream router is still possible.
190 */
191 ret = usb4_pci_port_set_ext_encapsulation(tunnel->dst_port, enable);
192 if (ret) {
193 if (enable)
194 return ret;
195 if (ret != -ENODEV)
196 return ret;
197 }
198
199 tb_tunnel_dbg(tunnel, "extended encapsulation %s\n",
200 str_enabled_disabled(enable));
201 return 0;
202}
203
204static int tb_pci_activate(struct tb_tunnel *tunnel, bool activate)
205{
206 int res;
207
208 if (activate) {
209 res = tb_pci_set_ext_encapsulation(tunnel, activate);
210 if (res)
211 return res;
212 }
213
214 if (activate)
215 res = tb_pci_port_enable(tunnel->dst_port, activate);
216 else
217 res = tb_pci_port_enable(tunnel->src_port, activate);
218 if (res)
219 return res;
220
221
222 if (activate) {
223 res = tb_pci_port_enable(tunnel->src_port, activate);
224 if (res)
225 return res;
226 } else {
227 /* Downstream router could be unplugged */
228 tb_pci_port_enable(tunnel->dst_port, activate);
229 }
230
231 return activate ? 0 : tb_pci_set_ext_encapsulation(tunnel, activate);
232}
233
234static int tb_pci_init_credits(struct tb_path_hop *hop)
235{
236 struct tb_port *port = hop->in_port;
237 struct tb_switch *sw = port->sw;
238 unsigned int credits;
239
240 if (tb_port_use_credit_allocation(port)) {
241 unsigned int available;
242
243 available = tb_available_credits(port, NULL);
244 credits = min(sw->max_pcie_credits, available);
245
246 if (credits < TB_MIN_PCIE_CREDITS)
247 return -ENOSPC;
248
249 credits = max(TB_MIN_PCIE_CREDITS, credits);
250 } else {
251 if (tb_port_is_null(port))
252 credits = port->bonded ? 32 : 16;
253 else
254 credits = 7;
255 }
256
257 hop->initial_credits = credits;
258 return 0;
259}
260
261static int tb_pci_init_path(struct tb_path *path)
262{
263 struct tb_path_hop *hop;
264
265 path->egress_fc_enable = TB_PATH_SOURCE | TB_PATH_INTERNAL;
266 path->egress_shared_buffer = TB_PATH_NONE;
267 path->ingress_fc_enable = TB_PATH_ALL;
268 path->ingress_shared_buffer = TB_PATH_NONE;
269 path->priority = TB_PCI_PRIORITY;
270 path->weight = TB_PCI_WEIGHT;
271 path->drop_packages = 0;
272
273 tb_path_for_each_hop(path, hop) {
274 int ret;
275
276 ret = tb_pci_init_credits(hop);
277 if (ret)
278 return ret;
279 }
280
281 return 0;
282}
283
284/**
285 * tb_tunnel_discover_pci() - Discover existing PCIe tunnels
286 * @tb: Pointer to the domain structure
287 * @down: PCIe downstream adapter
288 * @alloc_hopid: Allocate HopIDs from visited ports
289 *
290 * If @down adapter is active, follows the tunnel to the PCIe upstream
291 * adapter and back. Returns the discovered tunnel or %NULL if there was
292 * no tunnel.
293 */
294struct tb_tunnel *tb_tunnel_discover_pci(struct tb *tb, struct tb_port *down,
295 bool alloc_hopid)
296{
297 struct tb_tunnel *tunnel;
298 struct tb_path *path;
299
300 if (!tb_pci_port_is_enabled(down))
301 return NULL;
302
303 tunnel = tb_tunnel_alloc(tb, 2, TB_TUNNEL_PCI);
304 if (!tunnel)
305 return NULL;
306
307 tunnel->activate = tb_pci_activate;
308 tunnel->src_port = down;
309
310 /*
311 * Discover both paths even if they are not complete. We will
312 * clean them up by calling tb_tunnel_deactivate() below in that
313 * case.
314 */
315 path = tb_path_discover(down, TB_PCI_HOPID, NULL, -1,
316 &tunnel->dst_port, "PCIe Up", alloc_hopid);
317 if (!path) {
318 /* Just disable the downstream port */
319 tb_pci_port_enable(down, false);
320 goto err_free;
321 }
322 tunnel->paths[TB_PCI_PATH_UP] = path;
323 if (tb_pci_init_path(tunnel->paths[TB_PCI_PATH_UP]))
324 goto err_free;
325
326 path = tb_path_discover(tunnel->dst_port, -1, down, TB_PCI_HOPID, NULL,
327 "PCIe Down", alloc_hopid);
328 if (!path)
329 goto err_deactivate;
330 tunnel->paths[TB_PCI_PATH_DOWN] = path;
331 if (tb_pci_init_path(tunnel->paths[TB_PCI_PATH_DOWN]))
332 goto err_deactivate;
333
334 /* Validate that the tunnel is complete */
335 if (!tb_port_is_pcie_up(tunnel->dst_port)) {
336 tb_port_warn(tunnel->dst_port,
337 "path does not end on a PCIe adapter, cleaning up\n");
338 goto err_deactivate;
339 }
340
341 if (down != tunnel->src_port) {
342 tb_tunnel_warn(tunnel, "path is not complete, cleaning up\n");
343 goto err_deactivate;
344 }
345
346 if (!tb_pci_port_is_enabled(tunnel->dst_port)) {
347 tb_tunnel_warn(tunnel,
348 "tunnel is not fully activated, cleaning up\n");
349 goto err_deactivate;
350 }
351
352 tb_tunnel_dbg(tunnel, "discovered\n");
353 return tunnel;
354
355err_deactivate:
356 tb_tunnel_deactivate(tunnel);
357err_free:
358 tb_tunnel_free(tunnel);
359
360 return NULL;
361}
362
363/**
364 * tb_tunnel_alloc_pci() - allocate a pci tunnel
365 * @tb: Pointer to the domain structure
366 * @up: PCIe upstream adapter port
367 * @down: PCIe downstream adapter port
368 *
369 * Allocate a PCI tunnel. The ports must be of type TB_TYPE_PCIE_UP and
370 * TB_TYPE_PCIE_DOWN.
371 *
372 * Return: Returns a tb_tunnel on success or NULL on failure.
373 */
374struct tb_tunnel *tb_tunnel_alloc_pci(struct tb *tb, struct tb_port *up,
375 struct tb_port *down)
376{
377 struct tb_tunnel *tunnel;
378 struct tb_path *path;
379
380 tunnel = tb_tunnel_alloc(tb, 2, TB_TUNNEL_PCI);
381 if (!tunnel)
382 return NULL;
383
384 tunnel->activate = tb_pci_activate;
385 tunnel->src_port = down;
386 tunnel->dst_port = up;
387
388 path = tb_path_alloc(tb, down, TB_PCI_HOPID, up, TB_PCI_HOPID, 0,
389 "PCIe Down");
390 if (!path)
391 goto err_free;
392 tunnel->paths[TB_PCI_PATH_DOWN] = path;
393 if (tb_pci_init_path(path))
394 goto err_free;
395
396 path = tb_path_alloc(tb, up, TB_PCI_HOPID, down, TB_PCI_HOPID, 0,
397 "PCIe Up");
398 if (!path)
399 goto err_free;
400 tunnel->paths[TB_PCI_PATH_UP] = path;
401 if (tb_pci_init_path(path))
402 goto err_free;
403
404 return tunnel;
405
406err_free:
407 tb_tunnel_free(tunnel);
408 return NULL;
409}
410
411/**
412 * tb_tunnel_reserved_pci() - Amount of bandwidth to reserve for PCIe
413 * @port: Lane 0 adapter
414 * @reserved_up: Upstream bandwidth in Mb/s to reserve
415 * @reserved_down: Downstream bandwidth in Mb/s to reserve
416 *
417 * Can be called to any connected lane 0 adapter to find out how much
418 * bandwidth needs to be left in reserve for possible PCIe bulk traffic.
419 * Returns true if there is something to be reserved and writes the
420 * amount to @reserved_down/@reserved_up. Otherwise returns false and
421 * does not touch the parameters.
422 */
423bool tb_tunnel_reserved_pci(struct tb_port *port, int *reserved_up,
424 int *reserved_down)
425{
426 if (WARN_ON_ONCE(!port->remote))
427 return false;
428
429 if (!tb_acpi_may_tunnel_pcie())
430 return false;
431
432 if (tb_port_get_link_generation(port) < 4)
433 return false;
434
435 /* Must have PCIe adapters */
436 if (tb_is_upstream_port(port)) {
437 if (!tb_switch_find_port(port->sw, TB_TYPE_PCIE_UP))
438 return false;
439 if (!tb_switch_find_port(port->remote->sw, TB_TYPE_PCIE_DOWN))
440 return false;
441 } else {
442 if (!tb_switch_find_port(port->sw, TB_TYPE_PCIE_DOWN))
443 return false;
444 if (!tb_switch_find_port(port->remote->sw, TB_TYPE_PCIE_UP))
445 return false;
446 }
447
448 *reserved_up = USB4_V2_PCI_MIN_BANDWIDTH;
449 *reserved_down = USB4_V2_PCI_MIN_BANDWIDTH;
450
451 tb_port_dbg(port, "reserving %u/%u Mb/s for PCIe\n", *reserved_up,
452 *reserved_down);
453 return true;
454}
455
456static bool tb_dp_is_usb4(const struct tb_switch *sw)
457{
458 /* Titan Ridge DP adapters need the same treatment as USB4 */
459 return tb_switch_is_usb4(sw) || tb_switch_is_titan_ridge(sw);
460}
461
462static int tb_dp_cm_handshake(struct tb_port *in, struct tb_port *out,
463 int timeout_msec)
464{
465 ktime_t timeout = ktime_add_ms(ktime_get(), timeout_msec);
466 u32 val;
467 int ret;
468
469 /* Both ends need to support this */
470 if (!tb_dp_is_usb4(in->sw) || !tb_dp_is_usb4(out->sw))
471 return 0;
472
473 ret = tb_port_read(out, &val, TB_CFG_PORT,
474 out->cap_adap + DP_STATUS_CTRL, 1);
475 if (ret)
476 return ret;
477
478 val |= DP_STATUS_CTRL_UF | DP_STATUS_CTRL_CMHS;
479
480 ret = tb_port_write(out, &val, TB_CFG_PORT,
481 out->cap_adap + DP_STATUS_CTRL, 1);
482 if (ret)
483 return ret;
484
485 do {
486 ret = tb_port_read(out, &val, TB_CFG_PORT,
487 out->cap_adap + DP_STATUS_CTRL, 1);
488 if (ret)
489 return ret;
490 if (!(val & DP_STATUS_CTRL_CMHS))
491 return 0;
492 usleep_range(100, 150);
493 } while (ktime_before(ktime_get(), timeout));
494
495 return -ETIMEDOUT;
496}
497
498/*
499 * Returns maximum possible rate from capability supporting only DP 2.0
500 * and below. Used when DP BW allocation mode is not enabled.
501 */
502static inline u32 tb_dp_cap_get_rate(u32 val)
503{
504 u32 rate = (val & DP_COMMON_CAP_RATE_MASK) >> DP_COMMON_CAP_RATE_SHIFT;
505
506 switch (rate) {
507 case DP_COMMON_CAP_RATE_RBR:
508 return 1620;
509 case DP_COMMON_CAP_RATE_HBR:
510 return 2700;
511 case DP_COMMON_CAP_RATE_HBR2:
512 return 5400;
513 case DP_COMMON_CAP_RATE_HBR3:
514 return 8100;
515 default:
516 return 0;
517 }
518}
519
520/*
521 * Returns maximum possible rate from capability supporting DP 2.1
522 * UHBR20, 13.5 and 10 rates as well. Use only when DP BW allocation
523 * mode is enabled.
524 */
525static inline u32 tb_dp_cap_get_rate_ext(u32 val)
526{
527 if (val & DP_COMMON_CAP_UHBR20)
528 return 20000;
529 else if (val & DP_COMMON_CAP_UHBR13_5)
530 return 13500;
531 else if (val & DP_COMMON_CAP_UHBR10)
532 return 10000;
533
534 return tb_dp_cap_get_rate(val);
535}
536
537static inline bool tb_dp_is_uhbr_rate(unsigned int rate)
538{
539 return rate >= 10000;
540}
541
542static inline u32 tb_dp_cap_set_rate(u32 val, u32 rate)
543{
544 val &= ~DP_COMMON_CAP_RATE_MASK;
545 switch (rate) {
546 default:
547 WARN(1, "invalid rate %u passed, defaulting to 1620 MB/s\n", rate);
548 fallthrough;
549 case 1620:
550 val |= DP_COMMON_CAP_RATE_RBR << DP_COMMON_CAP_RATE_SHIFT;
551 break;
552 case 2700:
553 val |= DP_COMMON_CAP_RATE_HBR << DP_COMMON_CAP_RATE_SHIFT;
554 break;
555 case 5400:
556 val |= DP_COMMON_CAP_RATE_HBR2 << DP_COMMON_CAP_RATE_SHIFT;
557 break;
558 case 8100:
559 val |= DP_COMMON_CAP_RATE_HBR3 << DP_COMMON_CAP_RATE_SHIFT;
560 break;
561 }
562 return val;
563}
564
565static inline u32 tb_dp_cap_get_lanes(u32 val)
566{
567 u32 lanes = (val & DP_COMMON_CAP_LANES_MASK) >> DP_COMMON_CAP_LANES_SHIFT;
568
569 switch (lanes) {
570 case DP_COMMON_CAP_1_LANE:
571 return 1;
572 case DP_COMMON_CAP_2_LANES:
573 return 2;
574 case DP_COMMON_CAP_4_LANES:
575 return 4;
576 default:
577 return 0;
578 }
579}
580
581static inline u32 tb_dp_cap_set_lanes(u32 val, u32 lanes)
582{
583 val &= ~DP_COMMON_CAP_LANES_MASK;
584 switch (lanes) {
585 default:
586 WARN(1, "invalid number of lanes %u passed, defaulting to 1\n",
587 lanes);
588 fallthrough;
589 case 1:
590 val |= DP_COMMON_CAP_1_LANE << DP_COMMON_CAP_LANES_SHIFT;
591 break;
592 case 2:
593 val |= DP_COMMON_CAP_2_LANES << DP_COMMON_CAP_LANES_SHIFT;
594 break;
595 case 4:
596 val |= DP_COMMON_CAP_4_LANES << DP_COMMON_CAP_LANES_SHIFT;
597 break;
598 }
599 return val;
600}
601
602static unsigned int tb_dp_bandwidth(unsigned int rate, unsigned int lanes)
603{
604 /* Tunneling removes the DP 8b/10b 128/132b encoding */
605 if (tb_dp_is_uhbr_rate(rate))
606 return rate * lanes * 128 / 132;
607 return rate * lanes * 8 / 10;
608}
609
610static int tb_dp_reduce_bandwidth(int max_bw, u32 in_rate, u32 in_lanes,
611 u32 out_rate, u32 out_lanes, u32 *new_rate,
612 u32 *new_lanes)
613{
614 static const u32 dp_bw[][2] = {
615 /* Mb/s, lanes */
616 { 8100, 4 }, /* 25920 Mb/s */
617 { 5400, 4 }, /* 17280 Mb/s */
618 { 8100, 2 }, /* 12960 Mb/s */
619 { 2700, 4 }, /* 8640 Mb/s */
620 { 5400, 2 }, /* 8640 Mb/s */
621 { 8100, 1 }, /* 6480 Mb/s */
622 { 1620, 4 }, /* 5184 Mb/s */
623 { 5400, 1 }, /* 4320 Mb/s */
624 { 2700, 2 }, /* 4320 Mb/s */
625 { 1620, 2 }, /* 2592 Mb/s */
626 { 2700, 1 }, /* 2160 Mb/s */
627 { 1620, 1 }, /* 1296 Mb/s */
628 };
629 unsigned int i;
630
631 /*
632 * Find a combination that can fit into max_bw and does not
633 * exceed the maximum rate and lanes supported by the DP OUT and
634 * DP IN adapters.
635 */
636 for (i = 0; i < ARRAY_SIZE(dp_bw); i++) {
637 if (dp_bw[i][0] > out_rate || dp_bw[i][1] > out_lanes)
638 continue;
639
640 if (dp_bw[i][0] > in_rate || dp_bw[i][1] > in_lanes)
641 continue;
642
643 if (tb_dp_bandwidth(dp_bw[i][0], dp_bw[i][1]) <= max_bw) {
644 *new_rate = dp_bw[i][0];
645 *new_lanes = dp_bw[i][1];
646 return 0;
647 }
648 }
649
650 return -ENOSR;
651}
652
653static int tb_dp_xchg_caps(struct tb_tunnel *tunnel)
654{
655 u32 out_dp_cap, out_rate, out_lanes, in_dp_cap, in_rate, in_lanes, bw;
656 struct tb_port *out = tunnel->dst_port;
657 struct tb_port *in = tunnel->src_port;
658 int ret, max_bw;
659
660 /*
661 * Copy DP_LOCAL_CAP register to DP_REMOTE_CAP register for
662 * newer generation hardware.
663 */
664 if (in->sw->generation < 2 || out->sw->generation < 2)
665 return 0;
666
667 /*
668 * Perform connection manager handshake between IN and OUT ports
669 * before capabilities exchange can take place.
670 */
671 ret = tb_dp_cm_handshake(in, out, 3000);
672 if (ret)
673 return ret;
674
675 /* Read both DP_LOCAL_CAP registers */
676 ret = tb_port_read(in, &in_dp_cap, TB_CFG_PORT,
677 in->cap_adap + DP_LOCAL_CAP, 1);
678 if (ret)
679 return ret;
680
681 ret = tb_port_read(out, &out_dp_cap, TB_CFG_PORT,
682 out->cap_adap + DP_LOCAL_CAP, 1);
683 if (ret)
684 return ret;
685
686 /* Write IN local caps to OUT remote caps */
687 ret = tb_port_write(out, &in_dp_cap, TB_CFG_PORT,
688 out->cap_adap + DP_REMOTE_CAP, 1);
689 if (ret)
690 return ret;
691
692 in_rate = tb_dp_cap_get_rate(in_dp_cap);
693 in_lanes = tb_dp_cap_get_lanes(in_dp_cap);
694 tb_tunnel_dbg(tunnel,
695 "DP IN maximum supported bandwidth %u Mb/s x%u = %u Mb/s\n",
696 in_rate, in_lanes, tb_dp_bandwidth(in_rate, in_lanes));
697
698 /*
699 * If the tunnel bandwidth is limited (max_bw is set) then see
700 * if we need to reduce bandwidth to fit there.
701 */
702 out_rate = tb_dp_cap_get_rate(out_dp_cap);
703 out_lanes = tb_dp_cap_get_lanes(out_dp_cap);
704 bw = tb_dp_bandwidth(out_rate, out_lanes);
705 tb_tunnel_dbg(tunnel,
706 "DP OUT maximum supported bandwidth %u Mb/s x%u = %u Mb/s\n",
707 out_rate, out_lanes, bw);
708
709 if (tb_port_path_direction_downstream(in, out))
710 max_bw = tunnel->max_down;
711 else
712 max_bw = tunnel->max_up;
713
714 if (max_bw && bw > max_bw) {
715 u32 new_rate, new_lanes, new_bw;
716
717 ret = tb_dp_reduce_bandwidth(max_bw, in_rate, in_lanes,
718 out_rate, out_lanes, &new_rate,
719 &new_lanes);
720 if (ret) {
721 tb_tunnel_info(tunnel, "not enough bandwidth\n");
722 return ret;
723 }
724
725 new_bw = tb_dp_bandwidth(new_rate, new_lanes);
726 tb_tunnel_dbg(tunnel,
727 "bandwidth reduced to %u Mb/s x%u = %u Mb/s\n",
728 new_rate, new_lanes, new_bw);
729
730 /*
731 * Set new rate and number of lanes before writing it to
732 * the IN port remote caps.
733 */
734 out_dp_cap = tb_dp_cap_set_rate(out_dp_cap, new_rate);
735 out_dp_cap = tb_dp_cap_set_lanes(out_dp_cap, new_lanes);
736 }
737
738 /*
739 * Titan Ridge does not disable AUX timers when it gets
740 * SET_CONFIG with SET_LTTPR_MODE set. This causes problems with
741 * DP tunneling.
742 */
743 if (tb_route(out->sw) && tb_switch_is_titan_ridge(out->sw)) {
744 out_dp_cap |= DP_COMMON_CAP_LTTPR_NS;
745 tb_tunnel_dbg(tunnel, "disabling LTTPR\n");
746 }
747
748 return tb_port_write(in, &out_dp_cap, TB_CFG_PORT,
749 in->cap_adap + DP_REMOTE_CAP, 1);
750}
751
752static int tb_dp_bandwidth_alloc_mode_enable(struct tb_tunnel *tunnel)
753{
754 int ret, estimated_bw, granularity, tmp;
755 struct tb_port *out = tunnel->dst_port;
756 struct tb_port *in = tunnel->src_port;
757 u32 out_dp_cap, out_rate, out_lanes;
758 u32 in_dp_cap, in_rate, in_lanes;
759 u32 rate, lanes;
760
761 if (!bw_alloc_mode)
762 return 0;
763
764 ret = usb4_dp_port_set_cm_bandwidth_mode_supported(in, true);
765 if (ret)
766 return ret;
767
768 ret = usb4_dp_port_set_group_id(in, in->group->index);
769 if (ret)
770 return ret;
771
772 /*
773 * Get the non-reduced rate and lanes based on the lowest
774 * capability of both adapters.
775 */
776 ret = tb_port_read(in, &in_dp_cap, TB_CFG_PORT,
777 in->cap_adap + DP_LOCAL_CAP, 1);
778 if (ret)
779 return ret;
780
781 ret = tb_port_read(out, &out_dp_cap, TB_CFG_PORT,
782 out->cap_adap + DP_LOCAL_CAP, 1);
783 if (ret)
784 return ret;
785
786 in_rate = tb_dp_cap_get_rate(in_dp_cap);
787 in_lanes = tb_dp_cap_get_lanes(in_dp_cap);
788 out_rate = tb_dp_cap_get_rate(out_dp_cap);
789 out_lanes = tb_dp_cap_get_lanes(out_dp_cap);
790
791 rate = min(in_rate, out_rate);
792 lanes = min(in_lanes, out_lanes);
793 tmp = tb_dp_bandwidth(rate, lanes);
794
795 tb_tunnel_dbg(tunnel, "non-reduced bandwidth %u Mb/s x%u = %u Mb/s\n",
796 rate, lanes, tmp);
797
798 ret = usb4_dp_port_set_nrd(in, rate, lanes);
799 if (ret)
800 return ret;
801
802 /*
803 * Pick up granularity that supports maximum possible bandwidth.
804 * For that we use the UHBR rates too.
805 */
806 in_rate = tb_dp_cap_get_rate_ext(in_dp_cap);
807 out_rate = tb_dp_cap_get_rate_ext(out_dp_cap);
808 rate = min(in_rate, out_rate);
809 tmp = tb_dp_bandwidth(rate, lanes);
810
811 tb_tunnel_dbg(tunnel,
812 "maximum bandwidth through allocation mode %u Mb/s x%u = %u Mb/s\n",
813 rate, lanes, tmp);
814
815 for (granularity = 250; tmp / granularity > 255 && granularity <= 1000;
816 granularity *= 2)
817 ;
818
819 tb_tunnel_dbg(tunnel, "granularity %d Mb/s\n", granularity);
820
821 /*
822 * Returns -EINVAL if granularity above is outside of the
823 * accepted ranges.
824 */
825 ret = usb4_dp_port_set_granularity(in, granularity);
826 if (ret)
827 return ret;
828
829 /*
830 * Bandwidth estimation is pretty much what we have in
831 * max_up/down fields. For discovery we just read what the
832 * estimation was set to.
833 */
834 if (tb_port_path_direction_downstream(in, out))
835 estimated_bw = tunnel->max_down;
836 else
837 estimated_bw = tunnel->max_up;
838
839 tb_tunnel_dbg(tunnel, "estimated bandwidth %d Mb/s\n", estimated_bw);
840
841 ret = usb4_dp_port_set_estimated_bandwidth(in, estimated_bw);
842 if (ret)
843 return ret;
844
845 /* Initial allocation should be 0 according the spec */
846 ret = usb4_dp_port_allocate_bandwidth(in, 0);
847 if (ret)
848 return ret;
849
850 tb_tunnel_dbg(tunnel, "bandwidth allocation mode enabled\n");
851 return 0;
852}
853
854static int tb_dp_init(struct tb_tunnel *tunnel)
855{
856 struct tb_port *in = tunnel->src_port;
857 struct tb_switch *sw = in->sw;
858 struct tb *tb = in->sw->tb;
859 int ret;
860
861 ret = tb_dp_xchg_caps(tunnel);
862 if (ret)
863 return ret;
864
865 if (!tb_switch_is_usb4(sw))
866 return 0;
867
868 if (!usb4_dp_port_bandwidth_mode_supported(in))
869 return 0;
870
871 tb_tunnel_dbg(tunnel, "bandwidth allocation mode supported\n");
872
873 ret = usb4_dp_port_set_cm_id(in, tb->index);
874 if (ret)
875 return ret;
876
877 return tb_dp_bandwidth_alloc_mode_enable(tunnel);
878}
879
880static void tb_dp_deinit(struct tb_tunnel *tunnel)
881{
882 struct tb_port *in = tunnel->src_port;
883
884 if (!usb4_dp_port_bandwidth_mode_supported(in))
885 return;
886 if (usb4_dp_port_bandwidth_mode_enabled(in)) {
887 usb4_dp_port_set_cm_bandwidth_mode_supported(in, false);
888 tb_tunnel_dbg(tunnel, "bandwidth allocation mode disabled\n");
889 }
890}
891
892static int tb_dp_activate(struct tb_tunnel *tunnel, bool active)
893{
894 int ret;
895
896 if (active) {
897 struct tb_path **paths;
898 int last;
899
900 paths = tunnel->paths;
901 last = paths[TB_DP_VIDEO_PATH_OUT]->path_length - 1;
902
903 tb_dp_port_set_hops(tunnel->src_port,
904 paths[TB_DP_VIDEO_PATH_OUT]->hops[0].in_hop_index,
905 paths[TB_DP_AUX_PATH_OUT]->hops[0].in_hop_index,
906 paths[TB_DP_AUX_PATH_IN]->hops[last].next_hop_index);
907
908 tb_dp_port_set_hops(tunnel->dst_port,
909 paths[TB_DP_VIDEO_PATH_OUT]->hops[last].next_hop_index,
910 paths[TB_DP_AUX_PATH_IN]->hops[0].in_hop_index,
911 paths[TB_DP_AUX_PATH_OUT]->hops[last].next_hop_index);
912 } else {
913 tb_dp_port_hpd_clear(tunnel->src_port);
914 tb_dp_port_set_hops(tunnel->src_port, 0, 0, 0);
915 if (tb_port_is_dpout(tunnel->dst_port))
916 tb_dp_port_set_hops(tunnel->dst_port, 0, 0, 0);
917 }
918
919 ret = tb_dp_port_enable(tunnel->src_port, active);
920 if (ret)
921 return ret;
922
923 if (tb_port_is_dpout(tunnel->dst_port))
924 return tb_dp_port_enable(tunnel->dst_port, active);
925
926 return 0;
927}
928
929/* max_bw is rounded up to next granularity */
930static int tb_dp_bandwidth_mode_maximum_bandwidth(struct tb_tunnel *tunnel,
931 int *max_bw)
932{
933 struct tb_port *in = tunnel->src_port;
934 int ret, rate, lanes, nrd_bw;
935 u32 cap;
936
937 /*
938 * DP IN adapter DP_LOCAL_CAP gets updated to the lowest AUX
939 * read parameter values so this so we can use this to determine
940 * the maximum possible bandwidth over this link.
941 *
942 * See USB4 v2 spec 1.0 10.4.4.5.
943 */
944 ret = tb_port_read(in, &cap, TB_CFG_PORT,
945 in->cap_adap + DP_LOCAL_CAP, 1);
946 if (ret)
947 return ret;
948
949 rate = tb_dp_cap_get_rate_ext(cap);
950 if (tb_dp_is_uhbr_rate(rate)) {
951 /*
952 * When UHBR is used there is no reduction in lanes so
953 * we can use this directly.
954 */
955 lanes = tb_dp_cap_get_lanes(cap);
956 } else {
957 /*
958 * If there is no UHBR supported then check the
959 * non-reduced rate and lanes.
960 */
961 ret = usb4_dp_port_nrd(in, &rate, &lanes);
962 if (ret)
963 return ret;
964 }
965
966 nrd_bw = tb_dp_bandwidth(rate, lanes);
967
968 if (max_bw) {
969 ret = usb4_dp_port_granularity(in);
970 if (ret < 0)
971 return ret;
972 *max_bw = roundup(nrd_bw, ret);
973 }
974
975 return nrd_bw;
976}
977
978static int tb_dp_bandwidth_mode_consumed_bandwidth(struct tb_tunnel *tunnel,
979 int *consumed_up,
980 int *consumed_down)
981{
982 struct tb_port *out = tunnel->dst_port;
983 struct tb_port *in = tunnel->src_port;
984 int ret, allocated_bw, max_bw;
985
986 if (!usb4_dp_port_bandwidth_mode_enabled(in))
987 return -EOPNOTSUPP;
988
989 if (!tunnel->bw_mode)
990 return -EOPNOTSUPP;
991
992 /* Read what was allocated previously if any */
993 ret = usb4_dp_port_allocated_bandwidth(in);
994 if (ret < 0)
995 return ret;
996 allocated_bw = ret;
997
998 ret = tb_dp_bandwidth_mode_maximum_bandwidth(tunnel, &max_bw);
999 if (ret < 0)
1000 return ret;
1001 if (allocated_bw == max_bw)
1002 allocated_bw = ret;
1003
1004 if (tb_port_path_direction_downstream(in, out)) {
1005 *consumed_up = 0;
1006 *consumed_down = allocated_bw;
1007 } else {
1008 *consumed_up = allocated_bw;
1009 *consumed_down = 0;
1010 }
1011
1012 return 0;
1013}
1014
1015static int tb_dp_allocated_bandwidth(struct tb_tunnel *tunnel, int *allocated_up,
1016 int *allocated_down)
1017{
1018 struct tb_port *out = tunnel->dst_port;
1019 struct tb_port *in = tunnel->src_port;
1020
1021 /*
1022 * If we have already set the allocated bandwidth then use that.
1023 * Otherwise we read it from the DPRX.
1024 */
1025 if (usb4_dp_port_bandwidth_mode_enabled(in) && tunnel->bw_mode) {
1026 int ret, allocated_bw, max_bw;
1027
1028 ret = usb4_dp_port_allocated_bandwidth(in);
1029 if (ret < 0)
1030 return ret;
1031 allocated_bw = ret;
1032
1033 ret = tb_dp_bandwidth_mode_maximum_bandwidth(tunnel, &max_bw);
1034 if (ret < 0)
1035 return ret;
1036 if (allocated_bw == max_bw)
1037 allocated_bw = ret;
1038
1039 if (tb_port_path_direction_downstream(in, out)) {
1040 *allocated_up = 0;
1041 *allocated_down = allocated_bw;
1042 } else {
1043 *allocated_up = allocated_bw;
1044 *allocated_down = 0;
1045 }
1046 return 0;
1047 }
1048
1049 return tunnel->consumed_bandwidth(tunnel, allocated_up,
1050 allocated_down);
1051}
1052
1053static int tb_dp_alloc_bandwidth(struct tb_tunnel *tunnel, int *alloc_up,
1054 int *alloc_down)
1055{
1056 struct tb_port *out = tunnel->dst_port;
1057 struct tb_port *in = tunnel->src_port;
1058 int max_bw, ret, tmp;
1059
1060 if (!usb4_dp_port_bandwidth_mode_enabled(in))
1061 return -EOPNOTSUPP;
1062
1063 ret = tb_dp_bandwidth_mode_maximum_bandwidth(tunnel, &max_bw);
1064 if (ret < 0)
1065 return ret;
1066
1067 if (tb_port_path_direction_downstream(in, out)) {
1068 tmp = min(*alloc_down, max_bw);
1069 ret = usb4_dp_port_allocate_bandwidth(in, tmp);
1070 if (ret)
1071 return ret;
1072 *alloc_down = tmp;
1073 *alloc_up = 0;
1074 } else {
1075 tmp = min(*alloc_up, max_bw);
1076 ret = usb4_dp_port_allocate_bandwidth(in, tmp);
1077 if (ret)
1078 return ret;
1079 *alloc_down = 0;
1080 *alloc_up = tmp;
1081 }
1082
1083 /* Now we can use BW mode registers to figure out the bandwidth */
1084 /* TODO: need to handle discovery too */
1085 tunnel->bw_mode = true;
1086 return 0;
1087}
1088
1089static int tb_dp_wait_dprx(struct tb_tunnel *tunnel, int timeout_msec)
1090{
1091 ktime_t timeout = ktime_add_ms(ktime_get(), timeout_msec);
1092 struct tb_port *in = tunnel->src_port;
1093
1094 /*
1095 * Wait for DPRX done. Normally it should be already set for
1096 * active tunnel.
1097 */
1098 do {
1099 u32 val;
1100 int ret;
1101
1102 ret = tb_port_read(in, &val, TB_CFG_PORT,
1103 in->cap_adap + DP_COMMON_CAP, 1);
1104 if (ret)
1105 return ret;
1106
1107 if (val & DP_COMMON_CAP_DPRX_DONE) {
1108 tb_tunnel_dbg(tunnel, "DPRX read done\n");
1109 return 0;
1110 }
1111 usleep_range(100, 150);
1112 } while (ktime_before(ktime_get(), timeout));
1113
1114 tb_tunnel_dbg(tunnel, "DPRX read timeout\n");
1115 return -ETIMEDOUT;
1116}
1117
1118/* Read cap from tunnel DP IN */
1119static int tb_dp_read_cap(struct tb_tunnel *tunnel, unsigned int cap, u32 *rate,
1120 u32 *lanes)
1121{
1122 struct tb_port *in = tunnel->src_port;
1123 u32 val;
1124 int ret;
1125
1126 switch (cap) {
1127 case DP_LOCAL_CAP:
1128 case DP_REMOTE_CAP:
1129 case DP_COMMON_CAP:
1130 break;
1131
1132 default:
1133 tb_tunnel_WARN(tunnel, "invalid capability index %#x\n", cap);
1134 return -EINVAL;
1135 }
1136
1137 /*
1138 * Read from the copied remote cap so that we take into account
1139 * if capabilities were reduced during exchange.
1140 */
1141 ret = tb_port_read(in, &val, TB_CFG_PORT, in->cap_adap + cap, 1);
1142 if (ret)
1143 return ret;
1144
1145 *rate = tb_dp_cap_get_rate(val);
1146 *lanes = tb_dp_cap_get_lanes(val);
1147 return 0;
1148}
1149
1150static int tb_dp_maximum_bandwidth(struct tb_tunnel *tunnel, int *max_up,
1151 int *max_down)
1152{
1153 struct tb_port *in = tunnel->src_port;
1154 int ret;
1155
1156 if (!usb4_dp_port_bandwidth_mode_enabled(in))
1157 return -EOPNOTSUPP;
1158
1159 ret = tb_dp_bandwidth_mode_maximum_bandwidth(tunnel, NULL);
1160 if (ret < 0)
1161 return ret;
1162
1163 if (tb_port_path_direction_downstream(in, tunnel->dst_port)) {
1164 *max_up = 0;
1165 *max_down = ret;
1166 } else {
1167 *max_up = ret;
1168 *max_down = 0;
1169 }
1170
1171 return 0;
1172}
1173
1174static int tb_dp_consumed_bandwidth(struct tb_tunnel *tunnel, int *consumed_up,
1175 int *consumed_down)
1176{
1177 struct tb_port *in = tunnel->src_port;
1178 const struct tb_switch *sw = in->sw;
1179 u32 rate = 0, lanes = 0;
1180 int ret;
1181
1182 if (tb_dp_is_usb4(sw)) {
1183 /*
1184 * On USB4 routers check if the bandwidth allocation
1185 * mode is enabled first and then read the bandwidth
1186 * through those registers.
1187 */
1188 ret = tb_dp_bandwidth_mode_consumed_bandwidth(tunnel, consumed_up,
1189 consumed_down);
1190 if (ret < 0) {
1191 if (ret != -EOPNOTSUPP)
1192 return ret;
1193 } else if (!ret) {
1194 return 0;
1195 }
1196 /*
1197 * Then see if the DPRX negotiation is ready and if yes
1198 * return that bandwidth (it may be smaller than the
1199 * reduced one). Otherwise return the remote (possibly
1200 * reduced) caps.
1201 */
1202 ret = tb_dp_wait_dprx(tunnel, 150);
1203 if (ret) {
1204 if (ret == -ETIMEDOUT)
1205 ret = tb_dp_read_cap(tunnel, DP_REMOTE_CAP,
1206 &rate, &lanes);
1207 if (ret)
1208 return ret;
1209 }
1210 ret = tb_dp_read_cap(tunnel, DP_COMMON_CAP, &rate, &lanes);
1211 if (ret)
1212 return ret;
1213 } else if (sw->generation >= 2) {
1214 ret = tb_dp_read_cap(tunnel, DP_REMOTE_CAP, &rate, &lanes);
1215 if (ret)
1216 return ret;
1217 } else {
1218 /* No bandwidth management for legacy devices */
1219 *consumed_up = 0;
1220 *consumed_down = 0;
1221 return 0;
1222 }
1223
1224 if (tb_port_path_direction_downstream(in, tunnel->dst_port)) {
1225 *consumed_up = 0;
1226 *consumed_down = tb_dp_bandwidth(rate, lanes);
1227 } else {
1228 *consumed_up = tb_dp_bandwidth(rate, lanes);
1229 *consumed_down = 0;
1230 }
1231
1232 return 0;
1233}
1234
1235static void tb_dp_init_aux_credits(struct tb_path_hop *hop)
1236{
1237 struct tb_port *port = hop->in_port;
1238 struct tb_switch *sw = port->sw;
1239
1240 if (tb_port_use_credit_allocation(port))
1241 hop->initial_credits = sw->min_dp_aux_credits;
1242 else
1243 hop->initial_credits = 1;
1244}
1245
1246static void tb_dp_init_aux_path(struct tb_path *path, bool pm_support)
1247{
1248 struct tb_path_hop *hop;
1249
1250 path->egress_fc_enable = TB_PATH_SOURCE | TB_PATH_INTERNAL;
1251 path->egress_shared_buffer = TB_PATH_NONE;
1252 path->ingress_fc_enable = TB_PATH_ALL;
1253 path->ingress_shared_buffer = TB_PATH_NONE;
1254 path->priority = TB_DP_AUX_PRIORITY;
1255 path->weight = TB_DP_AUX_WEIGHT;
1256
1257 tb_path_for_each_hop(path, hop) {
1258 tb_dp_init_aux_credits(hop);
1259 if (pm_support)
1260 tb_init_pm_support(hop);
1261 }
1262}
1263
1264static int tb_dp_init_video_credits(struct tb_path_hop *hop)
1265{
1266 struct tb_port *port = hop->in_port;
1267 struct tb_switch *sw = port->sw;
1268
1269 if (tb_port_use_credit_allocation(port)) {
1270 unsigned int nfc_credits;
1271 size_t max_dp_streams;
1272
1273 tb_available_credits(port, &max_dp_streams);
1274 /*
1275 * Read the number of currently allocated NFC credits
1276 * from the lane adapter. Since we only use them for DP
1277 * tunneling we can use that to figure out how many DP
1278 * tunnels already go through the lane adapter.
1279 */
1280 nfc_credits = port->config.nfc_credits &
1281 ADP_CS_4_NFC_BUFFERS_MASK;
1282 if (nfc_credits / sw->min_dp_main_credits > max_dp_streams)
1283 return -ENOSPC;
1284
1285 hop->nfc_credits = sw->min_dp_main_credits;
1286 } else {
1287 hop->nfc_credits = min(port->total_credits - 2, 12U);
1288 }
1289
1290 return 0;
1291}
1292
1293static int tb_dp_init_video_path(struct tb_path *path, bool pm_support)
1294{
1295 struct tb_path_hop *hop;
1296
1297 path->egress_fc_enable = TB_PATH_NONE;
1298 path->egress_shared_buffer = TB_PATH_NONE;
1299 path->ingress_fc_enable = TB_PATH_NONE;
1300 path->ingress_shared_buffer = TB_PATH_NONE;
1301 path->priority = TB_DP_VIDEO_PRIORITY;
1302 path->weight = TB_DP_VIDEO_WEIGHT;
1303
1304 tb_path_for_each_hop(path, hop) {
1305 int ret;
1306
1307 ret = tb_dp_init_video_credits(hop);
1308 if (ret)
1309 return ret;
1310 if (pm_support)
1311 tb_init_pm_support(hop);
1312 }
1313
1314 return 0;
1315}
1316
1317static void tb_dp_dump(struct tb_tunnel *tunnel)
1318{
1319 struct tb_port *in, *out;
1320 u32 dp_cap, rate, lanes;
1321
1322 in = tunnel->src_port;
1323 out = tunnel->dst_port;
1324
1325 if (tb_port_read(in, &dp_cap, TB_CFG_PORT,
1326 in->cap_adap + DP_LOCAL_CAP, 1))
1327 return;
1328
1329 rate = tb_dp_cap_get_rate(dp_cap);
1330 lanes = tb_dp_cap_get_lanes(dp_cap);
1331
1332 tb_tunnel_dbg(tunnel,
1333 "DP IN maximum supported bandwidth %u Mb/s x%u = %u Mb/s\n",
1334 rate, lanes, tb_dp_bandwidth(rate, lanes));
1335
1336 if (tb_port_read(out, &dp_cap, TB_CFG_PORT,
1337 out->cap_adap + DP_LOCAL_CAP, 1))
1338 return;
1339
1340 rate = tb_dp_cap_get_rate(dp_cap);
1341 lanes = tb_dp_cap_get_lanes(dp_cap);
1342
1343 tb_tunnel_dbg(tunnel,
1344 "DP OUT maximum supported bandwidth %u Mb/s x%u = %u Mb/s\n",
1345 rate, lanes, tb_dp_bandwidth(rate, lanes));
1346
1347 if (tb_port_read(in, &dp_cap, TB_CFG_PORT,
1348 in->cap_adap + DP_REMOTE_CAP, 1))
1349 return;
1350
1351 rate = tb_dp_cap_get_rate(dp_cap);
1352 lanes = tb_dp_cap_get_lanes(dp_cap);
1353
1354 tb_tunnel_dbg(tunnel, "reduced bandwidth %u Mb/s x%u = %u Mb/s\n",
1355 rate, lanes, tb_dp_bandwidth(rate, lanes));
1356}
1357
1358/**
1359 * tb_tunnel_discover_dp() - Discover existing Display Port tunnels
1360 * @tb: Pointer to the domain structure
1361 * @in: DP in adapter
1362 * @alloc_hopid: Allocate HopIDs from visited ports
1363 *
1364 * If @in adapter is active, follows the tunnel to the DP out adapter
1365 * and back. Returns the discovered tunnel or %NULL if there was no
1366 * tunnel.
1367 *
1368 * Return: DP tunnel or %NULL if no tunnel found.
1369 */
1370struct tb_tunnel *tb_tunnel_discover_dp(struct tb *tb, struct tb_port *in,
1371 bool alloc_hopid)
1372{
1373 struct tb_tunnel *tunnel;
1374 struct tb_port *port;
1375 struct tb_path *path;
1376
1377 if (!tb_dp_port_is_enabled(in))
1378 return NULL;
1379
1380 tunnel = tb_tunnel_alloc(tb, 3, TB_TUNNEL_DP);
1381 if (!tunnel)
1382 return NULL;
1383
1384 tunnel->init = tb_dp_init;
1385 tunnel->deinit = tb_dp_deinit;
1386 tunnel->activate = tb_dp_activate;
1387 tunnel->maximum_bandwidth = tb_dp_maximum_bandwidth;
1388 tunnel->allocated_bandwidth = tb_dp_allocated_bandwidth;
1389 tunnel->alloc_bandwidth = tb_dp_alloc_bandwidth;
1390 tunnel->consumed_bandwidth = tb_dp_consumed_bandwidth;
1391 tunnel->src_port = in;
1392
1393 path = tb_path_discover(in, TB_DP_VIDEO_HOPID, NULL, -1,
1394 &tunnel->dst_port, "Video", alloc_hopid);
1395 if (!path) {
1396 /* Just disable the DP IN port */
1397 tb_dp_port_enable(in, false);
1398 goto err_free;
1399 }
1400 tunnel->paths[TB_DP_VIDEO_PATH_OUT] = path;
1401 if (tb_dp_init_video_path(tunnel->paths[TB_DP_VIDEO_PATH_OUT], false))
1402 goto err_free;
1403
1404 path = tb_path_discover(in, TB_DP_AUX_TX_HOPID, NULL, -1, NULL, "AUX TX",
1405 alloc_hopid);
1406 if (!path)
1407 goto err_deactivate;
1408 tunnel->paths[TB_DP_AUX_PATH_OUT] = path;
1409 tb_dp_init_aux_path(tunnel->paths[TB_DP_AUX_PATH_OUT], false);
1410
1411 path = tb_path_discover(tunnel->dst_port, -1, in, TB_DP_AUX_RX_HOPID,
1412 &port, "AUX RX", alloc_hopid);
1413 if (!path)
1414 goto err_deactivate;
1415 tunnel->paths[TB_DP_AUX_PATH_IN] = path;
1416 tb_dp_init_aux_path(tunnel->paths[TB_DP_AUX_PATH_IN], false);
1417
1418 /* Validate that the tunnel is complete */
1419 if (!tb_port_is_dpout(tunnel->dst_port)) {
1420 tb_port_warn(in, "path does not end on a DP adapter, cleaning up\n");
1421 goto err_deactivate;
1422 }
1423
1424 if (!tb_dp_port_is_enabled(tunnel->dst_port))
1425 goto err_deactivate;
1426
1427 if (!tb_dp_port_hpd_is_active(tunnel->dst_port))
1428 goto err_deactivate;
1429
1430 if (port != tunnel->src_port) {
1431 tb_tunnel_warn(tunnel, "path is not complete, cleaning up\n");
1432 goto err_deactivate;
1433 }
1434
1435 tb_dp_dump(tunnel);
1436
1437 tb_tunnel_dbg(tunnel, "discovered\n");
1438 return tunnel;
1439
1440err_deactivate:
1441 tb_tunnel_deactivate(tunnel);
1442err_free:
1443 tb_tunnel_free(tunnel);
1444
1445 return NULL;
1446}
1447
1448/**
1449 * tb_tunnel_alloc_dp() - allocate a Display Port tunnel
1450 * @tb: Pointer to the domain structure
1451 * @in: DP in adapter port
1452 * @out: DP out adapter port
1453 * @link_nr: Preferred lane adapter when the link is not bonded
1454 * @max_up: Maximum available upstream bandwidth for the DP tunnel (%0
1455 * if not limited)
1456 * @max_down: Maximum available downstream bandwidth for the DP tunnel
1457 * (%0 if not limited)
1458 *
1459 * Allocates a tunnel between @in and @out that is capable of tunneling
1460 * Display Port traffic.
1461 *
1462 * Return: Returns a tb_tunnel on success or NULL on failure.
1463 */
1464struct tb_tunnel *tb_tunnel_alloc_dp(struct tb *tb, struct tb_port *in,
1465 struct tb_port *out, int link_nr,
1466 int max_up, int max_down)
1467{
1468 struct tb_tunnel *tunnel;
1469 struct tb_path **paths;
1470 struct tb_path *path;
1471 bool pm_support;
1472
1473 if (WARN_ON(!in->cap_adap || !out->cap_adap))
1474 return NULL;
1475
1476 tunnel = tb_tunnel_alloc(tb, 3, TB_TUNNEL_DP);
1477 if (!tunnel)
1478 return NULL;
1479
1480 tunnel->init = tb_dp_init;
1481 tunnel->deinit = tb_dp_deinit;
1482 tunnel->activate = tb_dp_activate;
1483 tunnel->maximum_bandwidth = tb_dp_maximum_bandwidth;
1484 tunnel->allocated_bandwidth = tb_dp_allocated_bandwidth;
1485 tunnel->alloc_bandwidth = tb_dp_alloc_bandwidth;
1486 tunnel->consumed_bandwidth = tb_dp_consumed_bandwidth;
1487 tunnel->src_port = in;
1488 tunnel->dst_port = out;
1489 tunnel->max_up = max_up;
1490 tunnel->max_down = max_down;
1491
1492 paths = tunnel->paths;
1493 pm_support = usb4_switch_version(in->sw) >= 2;
1494
1495 path = tb_path_alloc(tb, in, TB_DP_VIDEO_HOPID, out, TB_DP_VIDEO_HOPID,
1496 link_nr, "Video");
1497 if (!path)
1498 goto err_free;
1499 tb_dp_init_video_path(path, pm_support);
1500 paths[TB_DP_VIDEO_PATH_OUT] = path;
1501
1502 path = tb_path_alloc(tb, in, TB_DP_AUX_TX_HOPID, out,
1503 TB_DP_AUX_TX_HOPID, link_nr, "AUX TX");
1504 if (!path)
1505 goto err_free;
1506 tb_dp_init_aux_path(path, pm_support);
1507 paths[TB_DP_AUX_PATH_OUT] = path;
1508
1509 path = tb_path_alloc(tb, out, TB_DP_AUX_RX_HOPID, in,
1510 TB_DP_AUX_RX_HOPID, link_nr, "AUX RX");
1511 if (!path)
1512 goto err_free;
1513 tb_dp_init_aux_path(path, pm_support);
1514 paths[TB_DP_AUX_PATH_IN] = path;
1515
1516 return tunnel;
1517
1518err_free:
1519 tb_tunnel_free(tunnel);
1520 return NULL;
1521}
1522
1523static unsigned int tb_dma_available_credits(const struct tb_port *port)
1524{
1525 const struct tb_switch *sw = port->sw;
1526 int credits;
1527
1528 credits = tb_available_credits(port, NULL);
1529 if (tb_acpi_may_tunnel_pcie())
1530 credits -= sw->max_pcie_credits;
1531 credits -= port->dma_credits;
1532
1533 return credits > 0 ? credits : 0;
1534}
1535
1536static int tb_dma_reserve_credits(struct tb_path_hop *hop, unsigned int credits)
1537{
1538 struct tb_port *port = hop->in_port;
1539
1540 if (tb_port_use_credit_allocation(port)) {
1541 unsigned int available = tb_dma_available_credits(port);
1542
1543 /*
1544 * Need to have at least TB_MIN_DMA_CREDITS, otherwise
1545 * DMA path cannot be established.
1546 */
1547 if (available < TB_MIN_DMA_CREDITS)
1548 return -ENOSPC;
1549
1550 while (credits > available)
1551 credits--;
1552
1553 tb_port_dbg(port, "reserving %u credits for DMA path\n",
1554 credits);
1555
1556 port->dma_credits += credits;
1557 } else {
1558 if (tb_port_is_null(port))
1559 credits = port->bonded ? 14 : 6;
1560 else
1561 credits = min(port->total_credits, credits);
1562 }
1563
1564 hop->initial_credits = credits;
1565 return 0;
1566}
1567
1568/* Path from lane adapter to NHI */
1569static int tb_dma_init_rx_path(struct tb_path *path, unsigned int credits)
1570{
1571 struct tb_path_hop *hop;
1572 unsigned int i, tmp;
1573
1574 path->egress_fc_enable = TB_PATH_SOURCE | TB_PATH_INTERNAL;
1575 path->ingress_fc_enable = TB_PATH_ALL;
1576 path->egress_shared_buffer = TB_PATH_NONE;
1577 path->ingress_shared_buffer = TB_PATH_NONE;
1578 path->priority = TB_DMA_PRIORITY;
1579 path->weight = TB_DMA_WEIGHT;
1580 path->clear_fc = true;
1581
1582 /*
1583 * First lane adapter is the one connected to the remote host.
1584 * We don't tunnel other traffic over this link so can use all
1585 * the credits (except the ones reserved for control traffic).
1586 */
1587 hop = &path->hops[0];
1588 tmp = min(tb_usable_credits(hop->in_port), credits);
1589 hop->initial_credits = tmp;
1590 hop->in_port->dma_credits += tmp;
1591
1592 for (i = 1; i < path->path_length; i++) {
1593 int ret;
1594
1595 ret = tb_dma_reserve_credits(&path->hops[i], credits);
1596 if (ret)
1597 return ret;
1598 }
1599
1600 return 0;
1601}
1602
1603/* Path from NHI to lane adapter */
1604static int tb_dma_init_tx_path(struct tb_path *path, unsigned int credits)
1605{
1606 struct tb_path_hop *hop;
1607
1608 path->egress_fc_enable = TB_PATH_ALL;
1609 path->ingress_fc_enable = TB_PATH_ALL;
1610 path->egress_shared_buffer = TB_PATH_NONE;
1611 path->ingress_shared_buffer = TB_PATH_NONE;
1612 path->priority = TB_DMA_PRIORITY;
1613 path->weight = TB_DMA_WEIGHT;
1614 path->clear_fc = true;
1615
1616 tb_path_for_each_hop(path, hop) {
1617 int ret;
1618
1619 ret = tb_dma_reserve_credits(hop, credits);
1620 if (ret)
1621 return ret;
1622 }
1623
1624 return 0;
1625}
1626
1627static void tb_dma_release_credits(struct tb_path_hop *hop)
1628{
1629 struct tb_port *port = hop->in_port;
1630
1631 if (tb_port_use_credit_allocation(port)) {
1632 port->dma_credits -= hop->initial_credits;
1633
1634 tb_port_dbg(port, "released %u DMA path credits\n",
1635 hop->initial_credits);
1636 }
1637}
1638
1639static void tb_dma_deinit_path(struct tb_path *path)
1640{
1641 struct tb_path_hop *hop;
1642
1643 tb_path_for_each_hop(path, hop)
1644 tb_dma_release_credits(hop);
1645}
1646
1647static void tb_dma_deinit(struct tb_tunnel *tunnel)
1648{
1649 int i;
1650
1651 for (i = 0; i < tunnel->npaths; i++) {
1652 if (!tunnel->paths[i])
1653 continue;
1654 tb_dma_deinit_path(tunnel->paths[i]);
1655 }
1656}
1657
1658/**
1659 * tb_tunnel_alloc_dma() - allocate a DMA tunnel
1660 * @tb: Pointer to the domain structure
1661 * @nhi: Host controller port
1662 * @dst: Destination null port which the other domain is connected to
1663 * @transmit_path: HopID used for transmitting packets
1664 * @transmit_ring: NHI ring number used to send packets towards the
1665 * other domain. Set to %-1 if TX path is not needed.
1666 * @receive_path: HopID used for receiving packets
1667 * @receive_ring: NHI ring number used to receive packets from the
1668 * other domain. Set to %-1 if RX path is not needed.
1669 *
1670 * Return: Returns a tb_tunnel on success or NULL on failure.
1671 */
1672struct tb_tunnel *tb_tunnel_alloc_dma(struct tb *tb, struct tb_port *nhi,
1673 struct tb_port *dst, int transmit_path,
1674 int transmit_ring, int receive_path,
1675 int receive_ring)
1676{
1677 struct tb_tunnel *tunnel;
1678 size_t npaths = 0, i = 0;
1679 struct tb_path *path;
1680 int credits;
1681
1682 /* Ring 0 is reserved for control channel */
1683 if (WARN_ON(!receive_ring || !transmit_ring))
1684 return NULL;
1685
1686 if (receive_ring > 0)
1687 npaths++;
1688 if (transmit_ring > 0)
1689 npaths++;
1690
1691 if (WARN_ON(!npaths))
1692 return NULL;
1693
1694 tunnel = tb_tunnel_alloc(tb, npaths, TB_TUNNEL_DMA);
1695 if (!tunnel)
1696 return NULL;
1697
1698 tunnel->src_port = nhi;
1699 tunnel->dst_port = dst;
1700 tunnel->deinit = tb_dma_deinit;
1701
1702 credits = min_not_zero(dma_credits, nhi->sw->max_dma_credits);
1703
1704 if (receive_ring > 0) {
1705 path = tb_path_alloc(tb, dst, receive_path, nhi, receive_ring, 0,
1706 "DMA RX");
1707 if (!path)
1708 goto err_free;
1709 tunnel->paths[i++] = path;
1710 if (tb_dma_init_rx_path(path, credits)) {
1711 tb_tunnel_dbg(tunnel, "not enough buffers for RX path\n");
1712 goto err_free;
1713 }
1714 }
1715
1716 if (transmit_ring > 0) {
1717 path = tb_path_alloc(tb, nhi, transmit_ring, dst, transmit_path, 0,
1718 "DMA TX");
1719 if (!path)
1720 goto err_free;
1721 tunnel->paths[i++] = path;
1722 if (tb_dma_init_tx_path(path, credits)) {
1723 tb_tunnel_dbg(tunnel, "not enough buffers for TX path\n");
1724 goto err_free;
1725 }
1726 }
1727
1728 return tunnel;
1729
1730err_free:
1731 tb_tunnel_free(tunnel);
1732 return NULL;
1733}
1734
1735/**
1736 * tb_tunnel_match_dma() - Match DMA tunnel
1737 * @tunnel: Tunnel to match
1738 * @transmit_path: HopID used for transmitting packets. Pass %-1 to ignore.
1739 * @transmit_ring: NHI ring number used to send packets towards the
1740 * other domain. Pass %-1 to ignore.
1741 * @receive_path: HopID used for receiving packets. Pass %-1 to ignore.
1742 * @receive_ring: NHI ring number used to receive packets from the
1743 * other domain. Pass %-1 to ignore.
1744 *
1745 * This function can be used to match specific DMA tunnel, if there are
1746 * multiple DMA tunnels going through the same XDomain connection.
1747 * Returns true if there is match and false otherwise.
1748 */
1749bool tb_tunnel_match_dma(const struct tb_tunnel *tunnel, int transmit_path,
1750 int transmit_ring, int receive_path, int receive_ring)
1751{
1752 const struct tb_path *tx_path = NULL, *rx_path = NULL;
1753 int i;
1754
1755 if (!receive_ring || !transmit_ring)
1756 return false;
1757
1758 for (i = 0; i < tunnel->npaths; i++) {
1759 const struct tb_path *path = tunnel->paths[i];
1760
1761 if (!path)
1762 continue;
1763
1764 if (tb_port_is_nhi(path->hops[0].in_port))
1765 tx_path = path;
1766 else if (tb_port_is_nhi(path->hops[path->path_length - 1].out_port))
1767 rx_path = path;
1768 }
1769
1770 if (transmit_ring > 0 || transmit_path > 0) {
1771 if (!tx_path)
1772 return false;
1773 if (transmit_ring > 0 &&
1774 (tx_path->hops[0].in_hop_index != transmit_ring))
1775 return false;
1776 if (transmit_path > 0 &&
1777 (tx_path->hops[tx_path->path_length - 1].next_hop_index != transmit_path))
1778 return false;
1779 }
1780
1781 if (receive_ring > 0 || receive_path > 0) {
1782 if (!rx_path)
1783 return false;
1784 if (receive_path > 0 &&
1785 (rx_path->hops[0].in_hop_index != receive_path))
1786 return false;
1787 if (receive_ring > 0 &&
1788 (rx_path->hops[rx_path->path_length - 1].next_hop_index != receive_ring))
1789 return false;
1790 }
1791
1792 return true;
1793}
1794
1795static int tb_usb3_max_link_rate(struct tb_port *up, struct tb_port *down)
1796{
1797 int ret, up_max_rate, down_max_rate;
1798
1799 ret = usb4_usb3_port_max_link_rate(up);
1800 if (ret < 0)
1801 return ret;
1802 up_max_rate = ret;
1803
1804 ret = usb4_usb3_port_max_link_rate(down);
1805 if (ret < 0)
1806 return ret;
1807 down_max_rate = ret;
1808
1809 return min(up_max_rate, down_max_rate);
1810}
1811
1812static int tb_usb3_init(struct tb_tunnel *tunnel)
1813{
1814 tb_tunnel_dbg(tunnel, "allocating initial bandwidth %d/%d Mb/s\n",
1815 tunnel->allocated_up, tunnel->allocated_down);
1816
1817 return usb4_usb3_port_allocate_bandwidth(tunnel->src_port,
1818 &tunnel->allocated_up,
1819 &tunnel->allocated_down);
1820}
1821
1822static int tb_usb3_activate(struct tb_tunnel *tunnel, bool activate)
1823{
1824 int res;
1825
1826 res = tb_usb3_port_enable(tunnel->src_port, activate);
1827 if (res)
1828 return res;
1829
1830 if (tb_port_is_usb3_up(tunnel->dst_port))
1831 return tb_usb3_port_enable(tunnel->dst_port, activate);
1832
1833 return 0;
1834}
1835
1836static int tb_usb3_consumed_bandwidth(struct tb_tunnel *tunnel,
1837 int *consumed_up, int *consumed_down)
1838{
1839 struct tb_port *port = tb_upstream_port(tunnel->dst_port->sw);
1840 int pcie_weight = tb_acpi_may_tunnel_pcie() ? TB_PCI_WEIGHT : 0;
1841
1842 /*
1843 * PCIe tunneling, if enabled, affects the USB3 bandwidth so
1844 * take that it into account here.
1845 */
1846 *consumed_up = tunnel->allocated_up *
1847 (TB_USB3_WEIGHT + pcie_weight) / TB_USB3_WEIGHT;
1848 *consumed_down = tunnel->allocated_down *
1849 (TB_USB3_WEIGHT + pcie_weight) / TB_USB3_WEIGHT;
1850
1851 if (tb_port_get_link_generation(port) >= 4) {
1852 *consumed_up = max(*consumed_up, USB4_V2_USB3_MIN_BANDWIDTH);
1853 *consumed_down = max(*consumed_down, USB4_V2_USB3_MIN_BANDWIDTH);
1854 }
1855
1856 return 0;
1857}
1858
1859static int tb_usb3_release_unused_bandwidth(struct tb_tunnel *tunnel)
1860{
1861 int ret;
1862
1863 ret = usb4_usb3_port_release_bandwidth(tunnel->src_port,
1864 &tunnel->allocated_up,
1865 &tunnel->allocated_down);
1866 if (ret)
1867 return ret;
1868
1869 tb_tunnel_dbg(tunnel, "decreased bandwidth allocation to %d/%d Mb/s\n",
1870 tunnel->allocated_up, tunnel->allocated_down);
1871 return 0;
1872}
1873
1874static void tb_usb3_reclaim_available_bandwidth(struct tb_tunnel *tunnel,
1875 int *available_up,
1876 int *available_down)
1877{
1878 int ret, max_rate, allocate_up, allocate_down;
1879
1880 ret = tb_usb3_max_link_rate(tunnel->dst_port, tunnel->src_port);
1881 if (ret < 0) {
1882 tb_tunnel_warn(tunnel, "failed to read maximum link rate\n");
1883 return;
1884 }
1885
1886 /*
1887 * 90% of the max rate can be allocated for isochronous
1888 * transfers.
1889 */
1890 max_rate = ret * 90 / 100;
1891
1892 /* No need to reclaim if already at maximum */
1893 if (tunnel->allocated_up >= max_rate &&
1894 tunnel->allocated_down >= max_rate)
1895 return;
1896
1897 /* Don't go lower than what is already allocated */
1898 allocate_up = min(max_rate, *available_up);
1899 if (allocate_up < tunnel->allocated_up)
1900 allocate_up = tunnel->allocated_up;
1901
1902 allocate_down = min(max_rate, *available_down);
1903 if (allocate_down < tunnel->allocated_down)
1904 allocate_down = tunnel->allocated_down;
1905
1906 /* If no changes no need to do more */
1907 if (allocate_up == tunnel->allocated_up &&
1908 allocate_down == tunnel->allocated_down)
1909 return;
1910
1911 ret = usb4_usb3_port_allocate_bandwidth(tunnel->src_port, &allocate_up,
1912 &allocate_down);
1913 if (ret) {
1914 tb_tunnel_info(tunnel, "failed to allocate bandwidth\n");
1915 return;
1916 }
1917
1918 tunnel->allocated_up = allocate_up;
1919 *available_up -= tunnel->allocated_up;
1920
1921 tunnel->allocated_down = allocate_down;
1922 *available_down -= tunnel->allocated_down;
1923
1924 tb_tunnel_dbg(tunnel, "increased bandwidth allocation to %d/%d Mb/s\n",
1925 tunnel->allocated_up, tunnel->allocated_down);
1926}
1927
1928static void tb_usb3_init_credits(struct tb_path_hop *hop)
1929{
1930 struct tb_port *port = hop->in_port;
1931 struct tb_switch *sw = port->sw;
1932 unsigned int credits;
1933
1934 if (tb_port_use_credit_allocation(port)) {
1935 credits = sw->max_usb3_credits;
1936 } else {
1937 if (tb_port_is_null(port))
1938 credits = port->bonded ? 32 : 16;
1939 else
1940 credits = 7;
1941 }
1942
1943 hop->initial_credits = credits;
1944}
1945
1946static void tb_usb3_init_path(struct tb_path *path)
1947{
1948 struct tb_path_hop *hop;
1949
1950 path->egress_fc_enable = TB_PATH_SOURCE | TB_PATH_INTERNAL;
1951 path->egress_shared_buffer = TB_PATH_NONE;
1952 path->ingress_fc_enable = TB_PATH_ALL;
1953 path->ingress_shared_buffer = TB_PATH_NONE;
1954 path->priority = TB_USB3_PRIORITY;
1955 path->weight = TB_USB3_WEIGHT;
1956 path->drop_packages = 0;
1957
1958 tb_path_for_each_hop(path, hop)
1959 tb_usb3_init_credits(hop);
1960}
1961
1962/**
1963 * tb_tunnel_discover_usb3() - Discover existing USB3 tunnels
1964 * @tb: Pointer to the domain structure
1965 * @down: USB3 downstream adapter
1966 * @alloc_hopid: Allocate HopIDs from visited ports
1967 *
1968 * If @down adapter is active, follows the tunnel to the USB3 upstream
1969 * adapter and back. Returns the discovered tunnel or %NULL if there was
1970 * no tunnel.
1971 */
1972struct tb_tunnel *tb_tunnel_discover_usb3(struct tb *tb, struct tb_port *down,
1973 bool alloc_hopid)
1974{
1975 struct tb_tunnel *tunnel;
1976 struct tb_path *path;
1977
1978 if (!tb_usb3_port_is_enabled(down))
1979 return NULL;
1980
1981 tunnel = tb_tunnel_alloc(tb, 2, TB_TUNNEL_USB3);
1982 if (!tunnel)
1983 return NULL;
1984
1985 tunnel->activate = tb_usb3_activate;
1986 tunnel->src_port = down;
1987
1988 /*
1989 * Discover both paths even if they are not complete. We will
1990 * clean them up by calling tb_tunnel_deactivate() below in that
1991 * case.
1992 */
1993 path = tb_path_discover(down, TB_USB3_HOPID, NULL, -1,
1994 &tunnel->dst_port, "USB3 Down", alloc_hopid);
1995 if (!path) {
1996 /* Just disable the downstream port */
1997 tb_usb3_port_enable(down, false);
1998 goto err_free;
1999 }
2000 tunnel->paths[TB_USB3_PATH_DOWN] = path;
2001 tb_usb3_init_path(tunnel->paths[TB_USB3_PATH_DOWN]);
2002
2003 path = tb_path_discover(tunnel->dst_port, -1, down, TB_USB3_HOPID, NULL,
2004 "USB3 Up", alloc_hopid);
2005 if (!path)
2006 goto err_deactivate;
2007 tunnel->paths[TB_USB3_PATH_UP] = path;
2008 tb_usb3_init_path(tunnel->paths[TB_USB3_PATH_UP]);
2009
2010 /* Validate that the tunnel is complete */
2011 if (!tb_port_is_usb3_up(tunnel->dst_port)) {
2012 tb_port_warn(tunnel->dst_port,
2013 "path does not end on an USB3 adapter, cleaning up\n");
2014 goto err_deactivate;
2015 }
2016
2017 if (down != tunnel->src_port) {
2018 tb_tunnel_warn(tunnel, "path is not complete, cleaning up\n");
2019 goto err_deactivate;
2020 }
2021
2022 if (!tb_usb3_port_is_enabled(tunnel->dst_port)) {
2023 tb_tunnel_warn(tunnel,
2024 "tunnel is not fully activated, cleaning up\n");
2025 goto err_deactivate;
2026 }
2027
2028 if (!tb_route(down->sw)) {
2029 int ret;
2030
2031 /*
2032 * Read the initial bandwidth allocation for the first
2033 * hop tunnel.
2034 */
2035 ret = usb4_usb3_port_allocated_bandwidth(down,
2036 &tunnel->allocated_up, &tunnel->allocated_down);
2037 if (ret)
2038 goto err_deactivate;
2039
2040 tb_tunnel_dbg(tunnel, "currently allocated bandwidth %d/%d Mb/s\n",
2041 tunnel->allocated_up, tunnel->allocated_down);
2042
2043 tunnel->init = tb_usb3_init;
2044 tunnel->consumed_bandwidth = tb_usb3_consumed_bandwidth;
2045 tunnel->release_unused_bandwidth =
2046 tb_usb3_release_unused_bandwidth;
2047 tunnel->reclaim_available_bandwidth =
2048 tb_usb3_reclaim_available_bandwidth;
2049 }
2050
2051 tb_tunnel_dbg(tunnel, "discovered\n");
2052 return tunnel;
2053
2054err_deactivate:
2055 tb_tunnel_deactivate(tunnel);
2056err_free:
2057 tb_tunnel_free(tunnel);
2058
2059 return NULL;
2060}
2061
2062/**
2063 * tb_tunnel_alloc_usb3() - allocate a USB3 tunnel
2064 * @tb: Pointer to the domain structure
2065 * @up: USB3 upstream adapter port
2066 * @down: USB3 downstream adapter port
2067 * @max_up: Maximum available upstream bandwidth for the USB3 tunnel (%0
2068 * if not limited).
2069 * @max_down: Maximum available downstream bandwidth for the USB3 tunnel
2070 * (%0 if not limited).
2071 *
2072 * Allocate an USB3 tunnel. The ports must be of type @TB_TYPE_USB3_UP and
2073 * @TB_TYPE_USB3_DOWN.
2074 *
2075 * Return: Returns a tb_tunnel on success or %NULL on failure.
2076 */
2077struct tb_tunnel *tb_tunnel_alloc_usb3(struct tb *tb, struct tb_port *up,
2078 struct tb_port *down, int max_up,
2079 int max_down)
2080{
2081 struct tb_tunnel *tunnel;
2082 struct tb_path *path;
2083 int max_rate = 0;
2084
2085 /*
2086 * Check that we have enough bandwidth available for the new
2087 * USB3 tunnel.
2088 */
2089 if (max_up > 0 || max_down > 0) {
2090 max_rate = tb_usb3_max_link_rate(down, up);
2091 if (max_rate < 0)
2092 return NULL;
2093
2094 /* Only 90% can be allocated for USB3 isochronous transfers */
2095 max_rate = max_rate * 90 / 100;
2096 tb_port_dbg(up, "required bandwidth for USB3 tunnel %d Mb/s\n",
2097 max_rate);
2098
2099 if (max_rate > max_up || max_rate > max_down) {
2100 tb_port_warn(up, "not enough bandwidth for USB3 tunnel\n");
2101 return NULL;
2102 }
2103 }
2104
2105 tunnel = tb_tunnel_alloc(tb, 2, TB_TUNNEL_USB3);
2106 if (!tunnel)
2107 return NULL;
2108
2109 tunnel->activate = tb_usb3_activate;
2110 tunnel->src_port = down;
2111 tunnel->dst_port = up;
2112 tunnel->max_up = max_up;
2113 tunnel->max_down = max_down;
2114
2115 path = tb_path_alloc(tb, down, TB_USB3_HOPID, up, TB_USB3_HOPID, 0,
2116 "USB3 Down");
2117 if (!path) {
2118 tb_tunnel_free(tunnel);
2119 return NULL;
2120 }
2121 tb_usb3_init_path(path);
2122 tunnel->paths[TB_USB3_PATH_DOWN] = path;
2123
2124 path = tb_path_alloc(tb, up, TB_USB3_HOPID, down, TB_USB3_HOPID, 0,
2125 "USB3 Up");
2126 if (!path) {
2127 tb_tunnel_free(tunnel);
2128 return NULL;
2129 }
2130 tb_usb3_init_path(path);
2131 tunnel->paths[TB_USB3_PATH_UP] = path;
2132
2133 if (!tb_route(down->sw)) {
2134 tunnel->allocated_up = max_rate;
2135 tunnel->allocated_down = max_rate;
2136
2137 tunnel->init = tb_usb3_init;
2138 tunnel->consumed_bandwidth = tb_usb3_consumed_bandwidth;
2139 tunnel->release_unused_bandwidth =
2140 tb_usb3_release_unused_bandwidth;
2141 tunnel->reclaim_available_bandwidth =
2142 tb_usb3_reclaim_available_bandwidth;
2143 }
2144
2145 return tunnel;
2146}
2147
2148/**
2149 * tb_tunnel_free() - free a tunnel
2150 * @tunnel: Tunnel to be freed
2151 *
2152 * Frees a tunnel. The tunnel does not need to be deactivated.
2153 */
2154void tb_tunnel_free(struct tb_tunnel *tunnel)
2155{
2156 int i;
2157
2158 if (!tunnel)
2159 return;
2160
2161 if (tunnel->deinit)
2162 tunnel->deinit(tunnel);
2163
2164 for (i = 0; i < tunnel->npaths; i++) {
2165 if (tunnel->paths[i])
2166 tb_path_free(tunnel->paths[i]);
2167 }
2168
2169 kfree(tunnel->paths);
2170 kfree(tunnel);
2171}
2172
2173/**
2174 * tb_tunnel_is_invalid - check whether an activated path is still valid
2175 * @tunnel: Tunnel to check
2176 */
2177bool tb_tunnel_is_invalid(struct tb_tunnel *tunnel)
2178{
2179 int i;
2180
2181 for (i = 0; i < tunnel->npaths; i++) {
2182 WARN_ON(!tunnel->paths[i]->activated);
2183 if (tb_path_is_invalid(tunnel->paths[i]))
2184 return true;
2185 }
2186
2187 return false;
2188}
2189
2190/**
2191 * tb_tunnel_restart() - activate a tunnel after a hardware reset
2192 * @tunnel: Tunnel to restart
2193 *
2194 * Return: 0 on success and negative errno in case if failure
2195 */
2196int tb_tunnel_restart(struct tb_tunnel *tunnel)
2197{
2198 int res, i;
2199
2200 tb_tunnel_dbg(tunnel, "activating\n");
2201
2202 /*
2203 * Make sure all paths are properly disabled before enabling
2204 * them again.
2205 */
2206 for (i = 0; i < tunnel->npaths; i++) {
2207 if (tunnel->paths[i]->activated) {
2208 tb_path_deactivate(tunnel->paths[i]);
2209 tunnel->paths[i]->activated = false;
2210 }
2211 }
2212
2213 if (tunnel->init) {
2214 res = tunnel->init(tunnel);
2215 if (res)
2216 return res;
2217 }
2218
2219 for (i = 0; i < tunnel->npaths; i++) {
2220 res = tb_path_activate(tunnel->paths[i]);
2221 if (res)
2222 goto err;
2223 }
2224
2225 if (tunnel->activate) {
2226 res = tunnel->activate(tunnel, true);
2227 if (res)
2228 goto err;
2229 }
2230
2231 return 0;
2232
2233err:
2234 tb_tunnel_warn(tunnel, "activation failed\n");
2235 tb_tunnel_deactivate(tunnel);
2236 return res;
2237}
2238
2239/**
2240 * tb_tunnel_activate() - activate a tunnel
2241 * @tunnel: Tunnel to activate
2242 *
2243 * Return: Returns 0 on success or an error code on failure.
2244 */
2245int tb_tunnel_activate(struct tb_tunnel *tunnel)
2246{
2247 int i;
2248
2249 for (i = 0; i < tunnel->npaths; i++) {
2250 if (tunnel->paths[i]->activated) {
2251 tb_tunnel_WARN(tunnel,
2252 "trying to activate an already activated tunnel\n");
2253 return -EINVAL;
2254 }
2255 }
2256
2257 return tb_tunnel_restart(tunnel);
2258}
2259
2260/**
2261 * tb_tunnel_deactivate() - deactivate a tunnel
2262 * @tunnel: Tunnel to deactivate
2263 */
2264void tb_tunnel_deactivate(struct tb_tunnel *tunnel)
2265{
2266 int i;
2267
2268 tb_tunnel_dbg(tunnel, "deactivating\n");
2269
2270 if (tunnel->activate)
2271 tunnel->activate(tunnel, false);
2272
2273 for (i = 0; i < tunnel->npaths; i++) {
2274 if (tunnel->paths[i] && tunnel->paths[i]->activated)
2275 tb_path_deactivate(tunnel->paths[i]);
2276 }
2277}
2278
2279/**
2280 * tb_tunnel_port_on_path() - Does the tunnel go through port
2281 * @tunnel: Tunnel to check
2282 * @port: Port to check
2283 *
2284 * Returns true if @tunnel goes through @port (direction does not matter),
2285 * false otherwise.
2286 */
2287bool tb_tunnel_port_on_path(const struct tb_tunnel *tunnel,
2288 const struct tb_port *port)
2289{
2290 int i;
2291
2292 for (i = 0; i < tunnel->npaths; i++) {
2293 if (!tunnel->paths[i])
2294 continue;
2295
2296 if (tb_path_port_on_path(tunnel->paths[i], port))
2297 return true;
2298 }
2299
2300 return false;
2301}
2302
2303static bool tb_tunnel_is_active(const struct tb_tunnel *tunnel)
2304{
2305 int i;
2306
2307 for (i = 0; i < tunnel->npaths; i++) {
2308 if (!tunnel->paths[i])
2309 return false;
2310 if (!tunnel->paths[i]->activated)
2311 return false;
2312 }
2313
2314 return true;
2315}
2316
2317/**
2318 * tb_tunnel_maximum_bandwidth() - Return maximum possible bandwidth
2319 * @tunnel: Tunnel to check
2320 * @max_up: Maximum upstream bandwidth in Mb/s
2321 * @max_down: Maximum downstream bandwidth in Mb/s
2322 *
2323 * Returns maximum possible bandwidth this tunnel can go if not limited
2324 * by other bandwidth clients. If the tunnel does not support this
2325 * returns %-EOPNOTSUPP.
2326 */
2327int tb_tunnel_maximum_bandwidth(struct tb_tunnel *tunnel, int *max_up,
2328 int *max_down)
2329{
2330 if (!tb_tunnel_is_active(tunnel))
2331 return -EINVAL;
2332
2333 if (tunnel->maximum_bandwidth)
2334 return tunnel->maximum_bandwidth(tunnel, max_up, max_down);
2335 return -EOPNOTSUPP;
2336}
2337
2338/**
2339 * tb_tunnel_allocated_bandwidth() - Return bandwidth allocated for the tunnel
2340 * @tunnel: Tunnel to check
2341 * @allocated_up: Currently allocated upstream bandwidth in Mb/s is stored here
2342 * @allocated_down: Currently allocated downstream bandwidth in Mb/s is
2343 * stored here
2344 *
2345 * Returns the bandwidth allocated for the tunnel. This may be higher
2346 * than what the tunnel actually consumes.
2347 */
2348int tb_tunnel_allocated_bandwidth(struct tb_tunnel *tunnel, int *allocated_up,
2349 int *allocated_down)
2350{
2351 if (!tb_tunnel_is_active(tunnel))
2352 return -EINVAL;
2353
2354 if (tunnel->allocated_bandwidth)
2355 return tunnel->allocated_bandwidth(tunnel, allocated_up,
2356 allocated_down);
2357 return -EOPNOTSUPP;
2358}
2359
2360/**
2361 * tb_tunnel_alloc_bandwidth() - Change tunnel bandwidth allocation
2362 * @tunnel: Tunnel whose bandwidth allocation to change
2363 * @alloc_up: New upstream bandwidth in Mb/s
2364 * @alloc_down: New downstream bandwidth in Mb/s
2365 *
2366 * Tries to change tunnel bandwidth allocation. If succeeds returns %0
2367 * and updates @alloc_up and @alloc_down to that was actually allocated
2368 * (it may not be the same as passed originally). Returns negative errno
2369 * in case of failure.
2370 */
2371int tb_tunnel_alloc_bandwidth(struct tb_tunnel *tunnel, int *alloc_up,
2372 int *alloc_down)
2373{
2374 if (!tb_tunnel_is_active(tunnel))
2375 return -EINVAL;
2376
2377 if (tunnel->alloc_bandwidth)
2378 return tunnel->alloc_bandwidth(tunnel, alloc_up, alloc_down);
2379
2380 return -EOPNOTSUPP;
2381}
2382
2383/**
2384 * tb_tunnel_consumed_bandwidth() - Return bandwidth consumed by the tunnel
2385 * @tunnel: Tunnel to check
2386 * @consumed_up: Consumed bandwidth in Mb/s from @dst_port to @src_port.
2387 * Can be %NULL.
2388 * @consumed_down: Consumed bandwidth in Mb/s from @src_port to @dst_port.
2389 * Can be %NULL.
2390 *
2391 * Stores the amount of isochronous bandwidth @tunnel consumes in
2392 * @consumed_up and @consumed_down. In case of success returns %0,
2393 * negative errno otherwise.
2394 */
2395int tb_tunnel_consumed_bandwidth(struct tb_tunnel *tunnel, int *consumed_up,
2396 int *consumed_down)
2397{
2398 int up_bw = 0, down_bw = 0;
2399
2400 if (!tb_tunnel_is_active(tunnel))
2401 goto out;
2402
2403 if (tunnel->consumed_bandwidth) {
2404 int ret;
2405
2406 ret = tunnel->consumed_bandwidth(tunnel, &up_bw, &down_bw);
2407 if (ret)
2408 return ret;
2409
2410 tb_tunnel_dbg(tunnel, "consumed bandwidth %d/%d Mb/s\n", up_bw,
2411 down_bw);
2412 }
2413
2414out:
2415 if (consumed_up)
2416 *consumed_up = up_bw;
2417 if (consumed_down)
2418 *consumed_down = down_bw;
2419
2420 return 0;
2421}
2422
2423/**
2424 * tb_tunnel_release_unused_bandwidth() - Release unused bandwidth
2425 * @tunnel: Tunnel whose unused bandwidth to release
2426 *
2427 * If tunnel supports dynamic bandwidth management (USB3 tunnels at the
2428 * moment) this function makes it to release all the unused bandwidth.
2429 *
2430 * Returns %0 in case of success and negative errno otherwise.
2431 */
2432int tb_tunnel_release_unused_bandwidth(struct tb_tunnel *tunnel)
2433{
2434 if (!tb_tunnel_is_active(tunnel))
2435 return 0;
2436
2437 if (tunnel->release_unused_bandwidth) {
2438 int ret;
2439
2440 ret = tunnel->release_unused_bandwidth(tunnel);
2441 if (ret)
2442 return ret;
2443 }
2444
2445 return 0;
2446}
2447
2448/**
2449 * tb_tunnel_reclaim_available_bandwidth() - Reclaim available bandwidth
2450 * @tunnel: Tunnel reclaiming available bandwidth
2451 * @available_up: Available upstream bandwidth (in Mb/s)
2452 * @available_down: Available downstream bandwidth (in Mb/s)
2453 *
2454 * Reclaims bandwidth from @available_up and @available_down and updates
2455 * the variables accordingly (e.g decreases both according to what was
2456 * reclaimed by the tunnel). If nothing was reclaimed the values are
2457 * kept as is.
2458 */
2459void tb_tunnel_reclaim_available_bandwidth(struct tb_tunnel *tunnel,
2460 int *available_up,
2461 int *available_down)
2462{
2463 if (!tb_tunnel_is_active(tunnel))
2464 return;
2465
2466 if (tunnel->reclaim_available_bandwidth)
2467 tunnel->reclaim_available_bandwidth(tunnel, available_up,
2468 available_down);
2469}
2470
2471const char *tb_tunnel_type_name(const struct tb_tunnel *tunnel)
2472{
2473 return tb_tunnel_names[tunnel->type];
2474}