Linux Audio

Check our new training course

Loading...
v6.8
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 * Thunderbolt driver - Tunneling support
   4 *
   5 * Copyright (c) 2014 Andreas Noever <andreas.noever@gmail.com>
   6 * Copyright (C) 2019, Intel Corporation
   7 */
   8
   9#include <linux/delay.h>
  10#include <linux/slab.h>
  11#include <linux/list.h>
  12#include <linux/ktime.h>
  13#include <linux/string_helpers.h>
  14
  15#include "tunnel.h"
  16#include "tb.h"
  17
  18/* PCIe adapters use always HopID of 8 for both directions */
  19#define TB_PCI_HOPID			8
  20
  21#define TB_PCI_PATH_DOWN		0
  22#define TB_PCI_PATH_UP			1
  23
  24#define TB_PCI_PRIORITY			3
  25#define TB_PCI_WEIGHT			1
  26
  27/* USB3 adapters use always HopID of 8 for both directions */
  28#define TB_USB3_HOPID			8
  29
  30#define TB_USB3_PATH_DOWN		0
  31#define TB_USB3_PATH_UP			1
  32
  33#define TB_USB3_PRIORITY		3
  34#define TB_USB3_WEIGHT			2
  35
  36/* DP adapters use HopID 8 for AUX and 9 for Video */
  37#define TB_DP_AUX_TX_HOPID		8
  38#define TB_DP_AUX_RX_HOPID		8
  39#define TB_DP_VIDEO_HOPID		9
  40
  41#define TB_DP_VIDEO_PATH_OUT		0
  42#define TB_DP_AUX_PATH_OUT		1
  43#define TB_DP_AUX_PATH_IN		2
  44
  45#define TB_DP_VIDEO_PRIORITY		1
  46#define TB_DP_VIDEO_WEIGHT		1
  47
  48#define TB_DP_AUX_PRIORITY		2
  49#define TB_DP_AUX_WEIGHT		1
  50
  51/* Minimum number of credits needed for PCIe path */
  52#define TB_MIN_PCIE_CREDITS		6U
  53/*
  54 * Number of credits we try to allocate for each DMA path if not limited
  55 * by the host router baMaxHI.
  56 */
  57#define TB_DMA_CREDITS			14
  58/* Minimum number of credits for DMA path */
  59#define TB_MIN_DMA_CREDITS		1
  60
  61#define TB_DMA_PRIORITY			5
  62#define TB_DMA_WEIGHT			1
  63
  64/*
  65 * Reserve additional bandwidth for USB 3.x and PCIe bulk traffic
  66 * according to USB4 v2 Connection Manager guide. This ends up reserving
  67 * 1500 Mb/s for PCIe and 3000 Mb/s for USB 3.x taking weights into
  68 * account.
  69 */
  70#define USB4_V2_PCI_MIN_BANDWIDTH	(1500 * TB_PCI_WEIGHT)
  71#define USB4_V2_USB3_MIN_BANDWIDTH	(1500 * TB_USB3_WEIGHT)
  72
  73static unsigned int dma_credits = TB_DMA_CREDITS;
  74module_param(dma_credits, uint, 0444);
  75MODULE_PARM_DESC(dma_credits, "specify custom credits for DMA tunnels (default: "
  76                __MODULE_STRING(TB_DMA_CREDITS) ")");
  77
  78static bool bw_alloc_mode = true;
  79module_param(bw_alloc_mode, bool, 0444);
  80MODULE_PARM_DESC(bw_alloc_mode,
  81		 "enable bandwidth allocation mode if supported (default: true)");
  82
  83static const char * const tb_tunnel_names[] = { "PCI", "DP", "DMA", "USB3" };
  84
  85static inline unsigned int tb_usable_credits(const struct tb_port *port)
  86{
  87	return port->total_credits - port->ctl_credits;
  88}
  89
  90/**
  91 * tb_available_credits() - Available credits for PCIe and DMA
  92 * @port: Lane adapter to check
  93 * @max_dp_streams: If non-%NULL stores maximum number of simultaneous DP
  94 *		    streams possible through this lane adapter
  95 */
  96static unsigned int tb_available_credits(const struct tb_port *port,
  97					 size_t *max_dp_streams)
  98{
  99	const struct tb_switch *sw = port->sw;
 100	int credits, usb3, pcie, spare;
 101	size_t ndp;
 102
 103	usb3 = tb_acpi_may_tunnel_usb3() ? sw->max_usb3_credits : 0;
 104	pcie = tb_acpi_may_tunnel_pcie() ? sw->max_pcie_credits : 0;
 105
 106	if (tb_acpi_is_xdomain_allowed()) {
 107		spare = min_not_zero(sw->max_dma_credits, dma_credits);
 108		/* Add some credits for potential second DMA tunnel */
 109		spare += TB_MIN_DMA_CREDITS;
 110	} else {
 111		spare = 0;
 112	}
 113
 114	credits = tb_usable_credits(port);
 115	if (tb_acpi_may_tunnel_dp()) {
 116		/*
 117		 * Maximum number of DP streams possible through the
 118		 * lane adapter.
 119		 */
 120		if (sw->min_dp_aux_credits + sw->min_dp_main_credits)
 121			ndp = (credits - (usb3 + pcie + spare)) /
 122			      (sw->min_dp_aux_credits + sw->min_dp_main_credits);
 123		else
 124			ndp = 0;
 125	} else {
 126		ndp = 0;
 127	}
 128	credits -= ndp * (sw->min_dp_aux_credits + sw->min_dp_main_credits);
 129	credits -= usb3;
 130
 131	if (max_dp_streams)
 132		*max_dp_streams = ndp;
 133
 134	return credits > 0 ? credits : 0;
 135}
 136
 137static void tb_init_pm_support(struct tb_path_hop *hop)
 138{
 139	struct tb_port *out_port = hop->out_port;
 140	struct tb_port *in_port = hop->in_port;
 141
 142	if (tb_port_is_null(in_port) && tb_port_is_null(out_port) &&
 143	    usb4_switch_version(in_port->sw) >= 2)
 144		hop->pm_support = true;
 145}
 146
 147static struct tb_tunnel *tb_tunnel_alloc(struct tb *tb, size_t npaths,
 148					 enum tb_tunnel_type type)
 149{
 150	struct tb_tunnel *tunnel;
 151
 152	tunnel = kzalloc(sizeof(*tunnel), GFP_KERNEL);
 153	if (!tunnel)
 154		return NULL;
 155
 156	tunnel->paths = kcalloc(npaths, sizeof(tunnel->paths[0]), GFP_KERNEL);
 157	if (!tunnel->paths) {
 158		tb_tunnel_free(tunnel);
 159		return NULL;
 160	}
 161
 162	INIT_LIST_HEAD(&tunnel->list);
 163	tunnel->tb = tb;
 164	tunnel->npaths = npaths;
 165	tunnel->type = type;
 166
 167	return tunnel;
 168}
 169
 170static int tb_pci_set_ext_encapsulation(struct tb_tunnel *tunnel, bool enable)
 171{
 172	struct tb_port *port = tb_upstream_port(tunnel->dst_port->sw);
 173	int ret;
 174
 175	/* Only supported of both routers are at least USB4 v2 */
 176	if ((usb4_switch_version(tunnel->src_port->sw) < 2) ||
 177	   (usb4_switch_version(tunnel->dst_port->sw) < 2))
 178		return 0;
 179
 180	if (enable && tb_port_get_link_generation(port) < 4)
 181		return 0;
 182
 183	ret = usb4_pci_port_set_ext_encapsulation(tunnel->src_port, enable);
 184	if (ret)
 185		return ret;
 186
 187	/*
 188	 * Downstream router could be unplugged so disable of encapsulation
 189	 * in upstream router is still possible.
 190	 */
 191	ret = usb4_pci_port_set_ext_encapsulation(tunnel->dst_port, enable);
 192	if (ret) {
 193		if (enable)
 194			return ret;
 195		if (ret != -ENODEV)
 196			return ret;
 197	}
 198
 199	tb_tunnel_dbg(tunnel, "extended encapsulation %s\n",
 200		      str_enabled_disabled(enable));
 201	return 0;
 202}
 203
 204static int tb_pci_activate(struct tb_tunnel *tunnel, bool activate)
 205{
 206	int res;
 207
 208	if (activate) {
 209		res = tb_pci_set_ext_encapsulation(tunnel, activate);
 210		if (res)
 211			return res;
 212	}
 213
 214	if (activate)
 215		res = tb_pci_port_enable(tunnel->dst_port, activate);
 216	else
 217		res = tb_pci_port_enable(tunnel->src_port, activate);
 218	if (res)
 219		return res;
 220
 221
 222	if (activate) {
 223		res = tb_pci_port_enable(tunnel->src_port, activate);
 224		if (res)
 225			return res;
 226	} else {
 227		/* Downstream router could be unplugged */
 228		tb_pci_port_enable(tunnel->dst_port, activate);
 229	}
 230
 231	return activate ? 0 : tb_pci_set_ext_encapsulation(tunnel, activate);
 232}
 233
 234static int tb_pci_init_credits(struct tb_path_hop *hop)
 235{
 236	struct tb_port *port = hop->in_port;
 237	struct tb_switch *sw = port->sw;
 238	unsigned int credits;
 239
 240	if (tb_port_use_credit_allocation(port)) {
 241		unsigned int available;
 242
 243		available = tb_available_credits(port, NULL);
 244		credits = min(sw->max_pcie_credits, available);
 245
 246		if (credits < TB_MIN_PCIE_CREDITS)
 247			return -ENOSPC;
 248
 249		credits = max(TB_MIN_PCIE_CREDITS, credits);
 250	} else {
 251		if (tb_port_is_null(port))
 252			credits = port->bonded ? 32 : 16;
 253		else
 254			credits = 7;
 255	}
 256
 257	hop->initial_credits = credits;
 258	return 0;
 259}
 260
 261static int tb_pci_init_path(struct tb_path *path)
 262{
 263	struct tb_path_hop *hop;
 264
 265	path->egress_fc_enable = TB_PATH_SOURCE | TB_PATH_INTERNAL;
 266	path->egress_shared_buffer = TB_PATH_NONE;
 267	path->ingress_fc_enable = TB_PATH_ALL;
 268	path->ingress_shared_buffer = TB_PATH_NONE;
 269	path->priority = TB_PCI_PRIORITY;
 270	path->weight = TB_PCI_WEIGHT;
 271	path->drop_packages = 0;
 272
 273	tb_path_for_each_hop(path, hop) {
 274		int ret;
 275
 276		ret = tb_pci_init_credits(hop);
 277		if (ret)
 278			return ret;
 279	}
 280
 281	return 0;
 282}
 283
 284/**
 285 * tb_tunnel_discover_pci() - Discover existing PCIe tunnels
 286 * @tb: Pointer to the domain structure
 287 * @down: PCIe downstream adapter
 288 * @alloc_hopid: Allocate HopIDs from visited ports
 289 *
 290 * If @down adapter is active, follows the tunnel to the PCIe upstream
 291 * adapter and back. Returns the discovered tunnel or %NULL if there was
 292 * no tunnel.
 293 */
 294struct tb_tunnel *tb_tunnel_discover_pci(struct tb *tb, struct tb_port *down,
 295					 bool alloc_hopid)
 296{
 297	struct tb_tunnel *tunnel;
 298	struct tb_path *path;
 299
 300	if (!tb_pci_port_is_enabled(down))
 301		return NULL;
 302
 303	tunnel = tb_tunnel_alloc(tb, 2, TB_TUNNEL_PCI);
 304	if (!tunnel)
 305		return NULL;
 306
 307	tunnel->activate = tb_pci_activate;
 308	tunnel->src_port = down;
 309
 310	/*
 311	 * Discover both paths even if they are not complete. We will
 312	 * clean them up by calling tb_tunnel_deactivate() below in that
 313	 * case.
 314	 */
 315	path = tb_path_discover(down, TB_PCI_HOPID, NULL, -1,
 316				&tunnel->dst_port, "PCIe Up", alloc_hopid);
 317	if (!path) {
 318		/* Just disable the downstream port */
 319		tb_pci_port_enable(down, false);
 320		goto err_free;
 321	}
 322	tunnel->paths[TB_PCI_PATH_UP] = path;
 323	if (tb_pci_init_path(tunnel->paths[TB_PCI_PATH_UP]))
 324		goto err_free;
 325
 326	path = tb_path_discover(tunnel->dst_port, -1, down, TB_PCI_HOPID, NULL,
 327				"PCIe Down", alloc_hopid);
 328	if (!path)
 329		goto err_deactivate;
 330	tunnel->paths[TB_PCI_PATH_DOWN] = path;
 331	if (tb_pci_init_path(tunnel->paths[TB_PCI_PATH_DOWN]))
 332		goto err_deactivate;
 333
 334	/* Validate that the tunnel is complete */
 335	if (!tb_port_is_pcie_up(tunnel->dst_port)) {
 336		tb_port_warn(tunnel->dst_port,
 337			     "path does not end on a PCIe adapter, cleaning up\n");
 338		goto err_deactivate;
 339	}
 340
 341	if (down != tunnel->src_port) {
 342		tb_tunnel_warn(tunnel, "path is not complete, cleaning up\n");
 343		goto err_deactivate;
 344	}
 345
 346	if (!tb_pci_port_is_enabled(tunnel->dst_port)) {
 347		tb_tunnel_warn(tunnel,
 348			       "tunnel is not fully activated, cleaning up\n");
 349		goto err_deactivate;
 350	}
 351
 352	tb_tunnel_dbg(tunnel, "discovered\n");
 353	return tunnel;
 354
 355err_deactivate:
 356	tb_tunnel_deactivate(tunnel);
 357err_free:
 358	tb_tunnel_free(tunnel);
 359
 360	return NULL;
 361}
 362
 363/**
 364 * tb_tunnel_alloc_pci() - allocate a pci tunnel
 365 * @tb: Pointer to the domain structure
 366 * @up: PCIe upstream adapter port
 367 * @down: PCIe downstream adapter port
 368 *
 369 * Allocate a PCI tunnel. The ports must be of type TB_TYPE_PCIE_UP and
 370 * TB_TYPE_PCIE_DOWN.
 371 *
 372 * Return: Returns a tb_tunnel on success or NULL on failure.
 373 */
 374struct tb_tunnel *tb_tunnel_alloc_pci(struct tb *tb, struct tb_port *up,
 375				      struct tb_port *down)
 376{
 377	struct tb_tunnel *tunnel;
 378	struct tb_path *path;
 379
 380	tunnel = tb_tunnel_alloc(tb, 2, TB_TUNNEL_PCI);
 381	if (!tunnel)
 382		return NULL;
 383
 384	tunnel->activate = tb_pci_activate;
 385	tunnel->src_port = down;
 386	tunnel->dst_port = up;
 387
 388	path = tb_path_alloc(tb, down, TB_PCI_HOPID, up, TB_PCI_HOPID, 0,
 389			     "PCIe Down");
 390	if (!path)
 391		goto err_free;
 392	tunnel->paths[TB_PCI_PATH_DOWN] = path;
 393	if (tb_pci_init_path(path))
 394		goto err_free;
 395
 396	path = tb_path_alloc(tb, up, TB_PCI_HOPID, down, TB_PCI_HOPID, 0,
 397			     "PCIe Up");
 398	if (!path)
 399		goto err_free;
 400	tunnel->paths[TB_PCI_PATH_UP] = path;
 401	if (tb_pci_init_path(path))
 402		goto err_free;
 403
 404	return tunnel;
 405
 406err_free:
 407	tb_tunnel_free(tunnel);
 408	return NULL;
 409}
 410
 411/**
 412 * tb_tunnel_reserved_pci() - Amount of bandwidth to reserve for PCIe
 413 * @port: Lane 0 adapter
 414 * @reserved_up: Upstream bandwidth in Mb/s to reserve
 415 * @reserved_down: Downstream bandwidth in Mb/s to reserve
 416 *
 417 * Can be called to any connected lane 0 adapter to find out how much
 418 * bandwidth needs to be left in reserve for possible PCIe bulk traffic.
 419 * Returns true if there is something to be reserved and writes the
 420 * amount to @reserved_down/@reserved_up. Otherwise returns false and
 421 * does not touch the parameters.
 422 */
 423bool tb_tunnel_reserved_pci(struct tb_port *port, int *reserved_up,
 424			    int *reserved_down)
 425{
 426	if (WARN_ON_ONCE(!port->remote))
 427		return false;
 428
 429	if (!tb_acpi_may_tunnel_pcie())
 430		return false;
 431
 432	if (tb_port_get_link_generation(port) < 4)
 433		return false;
 434
 435	/* Must have PCIe adapters */
 436	if (tb_is_upstream_port(port)) {
 437		if (!tb_switch_find_port(port->sw, TB_TYPE_PCIE_UP))
 438			return false;
 439		if (!tb_switch_find_port(port->remote->sw, TB_TYPE_PCIE_DOWN))
 440			return false;
 441	} else {
 442		if (!tb_switch_find_port(port->sw, TB_TYPE_PCIE_DOWN))
 443			return false;
 444		if (!tb_switch_find_port(port->remote->sw, TB_TYPE_PCIE_UP))
 445			return false;
 446	}
 447
 448	*reserved_up = USB4_V2_PCI_MIN_BANDWIDTH;
 449	*reserved_down = USB4_V2_PCI_MIN_BANDWIDTH;
 450
 451	tb_port_dbg(port, "reserving %u/%u Mb/s for PCIe\n", *reserved_up,
 452		    *reserved_down);
 453	return true;
 454}
 455
 456static bool tb_dp_is_usb4(const struct tb_switch *sw)
 457{
 458	/* Titan Ridge DP adapters need the same treatment as USB4 */
 459	return tb_switch_is_usb4(sw) || tb_switch_is_titan_ridge(sw);
 460}
 461
 462static int tb_dp_cm_handshake(struct tb_port *in, struct tb_port *out,
 463			      int timeout_msec)
 464{
 465	ktime_t timeout = ktime_add_ms(ktime_get(), timeout_msec);
 466	u32 val;
 467	int ret;
 468
 469	/* Both ends need to support this */
 470	if (!tb_dp_is_usb4(in->sw) || !tb_dp_is_usb4(out->sw))
 471		return 0;
 472
 473	ret = tb_port_read(out, &val, TB_CFG_PORT,
 474			   out->cap_adap + DP_STATUS_CTRL, 1);
 475	if (ret)
 476		return ret;
 477
 478	val |= DP_STATUS_CTRL_UF | DP_STATUS_CTRL_CMHS;
 479
 480	ret = tb_port_write(out, &val, TB_CFG_PORT,
 481			    out->cap_adap + DP_STATUS_CTRL, 1);
 482	if (ret)
 483		return ret;
 484
 485	do {
 486		ret = tb_port_read(out, &val, TB_CFG_PORT,
 487				   out->cap_adap + DP_STATUS_CTRL, 1);
 488		if (ret)
 489			return ret;
 490		if (!(val & DP_STATUS_CTRL_CMHS))
 491			return 0;
 492		usleep_range(100, 150);
 493	} while (ktime_before(ktime_get(), timeout));
 494
 495	return -ETIMEDOUT;
 496}
 497
 498/*
 499 * Returns maximum possible rate from capability supporting only DP 2.0
 500 * and below. Used when DP BW allocation mode is not enabled.
 501 */
 502static inline u32 tb_dp_cap_get_rate(u32 val)
 503{
 504	u32 rate = (val & DP_COMMON_CAP_RATE_MASK) >> DP_COMMON_CAP_RATE_SHIFT;
 505
 506	switch (rate) {
 507	case DP_COMMON_CAP_RATE_RBR:
 508		return 1620;
 509	case DP_COMMON_CAP_RATE_HBR:
 510		return 2700;
 511	case DP_COMMON_CAP_RATE_HBR2:
 512		return 5400;
 513	case DP_COMMON_CAP_RATE_HBR3:
 514		return 8100;
 515	default:
 516		return 0;
 517	}
 518}
 519
 520/*
 521 * Returns maximum possible rate from capability supporting DP 2.1
 522 * UHBR20, 13.5 and 10 rates as well. Use only when DP BW allocation
 523 * mode is enabled.
 524 */
 525static inline u32 tb_dp_cap_get_rate_ext(u32 val)
 526{
 527	if (val & DP_COMMON_CAP_UHBR20)
 528		return 20000;
 529	else if (val & DP_COMMON_CAP_UHBR13_5)
 530		return 13500;
 531	else if (val & DP_COMMON_CAP_UHBR10)
 532		return 10000;
 533
 534	return tb_dp_cap_get_rate(val);
 535}
 536
 537static inline bool tb_dp_is_uhbr_rate(unsigned int rate)
 538{
 539	return rate >= 10000;
 540}
 541
 542static inline u32 tb_dp_cap_set_rate(u32 val, u32 rate)
 543{
 544	val &= ~DP_COMMON_CAP_RATE_MASK;
 545	switch (rate) {
 546	default:
 547		WARN(1, "invalid rate %u passed, defaulting to 1620 MB/s\n", rate);
 548		fallthrough;
 549	case 1620:
 550		val |= DP_COMMON_CAP_RATE_RBR << DP_COMMON_CAP_RATE_SHIFT;
 551		break;
 552	case 2700:
 553		val |= DP_COMMON_CAP_RATE_HBR << DP_COMMON_CAP_RATE_SHIFT;
 554		break;
 555	case 5400:
 556		val |= DP_COMMON_CAP_RATE_HBR2 << DP_COMMON_CAP_RATE_SHIFT;
 557		break;
 558	case 8100:
 559		val |= DP_COMMON_CAP_RATE_HBR3 << DP_COMMON_CAP_RATE_SHIFT;
 560		break;
 561	}
 562	return val;
 563}
 564
 565static inline u32 tb_dp_cap_get_lanes(u32 val)
 566{
 567	u32 lanes = (val & DP_COMMON_CAP_LANES_MASK) >> DP_COMMON_CAP_LANES_SHIFT;
 568
 569	switch (lanes) {
 570	case DP_COMMON_CAP_1_LANE:
 571		return 1;
 572	case DP_COMMON_CAP_2_LANES:
 573		return 2;
 574	case DP_COMMON_CAP_4_LANES:
 575		return 4;
 576	default:
 577		return 0;
 578	}
 579}
 580
 581static inline u32 tb_dp_cap_set_lanes(u32 val, u32 lanes)
 582{
 583	val &= ~DP_COMMON_CAP_LANES_MASK;
 584	switch (lanes) {
 585	default:
 586		WARN(1, "invalid number of lanes %u passed, defaulting to 1\n",
 587		     lanes);
 588		fallthrough;
 589	case 1:
 590		val |= DP_COMMON_CAP_1_LANE << DP_COMMON_CAP_LANES_SHIFT;
 591		break;
 592	case 2:
 593		val |= DP_COMMON_CAP_2_LANES << DP_COMMON_CAP_LANES_SHIFT;
 594		break;
 595	case 4:
 596		val |= DP_COMMON_CAP_4_LANES << DP_COMMON_CAP_LANES_SHIFT;
 597		break;
 598	}
 599	return val;
 600}
 601
 602static unsigned int tb_dp_bandwidth(unsigned int rate, unsigned int lanes)
 603{
 604	/* Tunneling removes the DP 8b/10b 128/132b encoding */
 605	if (tb_dp_is_uhbr_rate(rate))
 606		return rate * lanes * 128 / 132;
 607	return rate * lanes * 8 / 10;
 608}
 609
 610static int tb_dp_reduce_bandwidth(int max_bw, u32 in_rate, u32 in_lanes,
 611				  u32 out_rate, u32 out_lanes, u32 *new_rate,
 612				  u32 *new_lanes)
 613{
 614	static const u32 dp_bw[][2] = {
 615		/* Mb/s, lanes */
 616		{ 8100, 4 }, /* 25920 Mb/s */
 617		{ 5400, 4 }, /* 17280 Mb/s */
 618		{ 8100, 2 }, /* 12960 Mb/s */
 619		{ 2700, 4 }, /* 8640 Mb/s */
 620		{ 5400, 2 }, /* 8640 Mb/s */
 621		{ 8100, 1 }, /* 6480 Mb/s */
 622		{ 1620, 4 }, /* 5184 Mb/s */
 623		{ 5400, 1 }, /* 4320 Mb/s */
 624		{ 2700, 2 }, /* 4320 Mb/s */
 625		{ 1620, 2 }, /* 2592 Mb/s */
 626		{ 2700, 1 }, /* 2160 Mb/s */
 627		{ 1620, 1 }, /* 1296 Mb/s */
 628	};
 629	unsigned int i;
 630
 631	/*
 632	 * Find a combination that can fit into max_bw and does not
 633	 * exceed the maximum rate and lanes supported by the DP OUT and
 634	 * DP IN adapters.
 635	 */
 636	for (i = 0; i < ARRAY_SIZE(dp_bw); i++) {
 637		if (dp_bw[i][0] > out_rate || dp_bw[i][1] > out_lanes)
 638			continue;
 639
 640		if (dp_bw[i][0] > in_rate || dp_bw[i][1] > in_lanes)
 641			continue;
 642
 643		if (tb_dp_bandwidth(dp_bw[i][0], dp_bw[i][1]) <= max_bw) {
 644			*new_rate = dp_bw[i][0];
 645			*new_lanes = dp_bw[i][1];
 646			return 0;
 647		}
 648	}
 649
 650	return -ENOSR;
 651}
 652
 653static int tb_dp_xchg_caps(struct tb_tunnel *tunnel)
 654{
 655	u32 out_dp_cap, out_rate, out_lanes, in_dp_cap, in_rate, in_lanes, bw;
 656	struct tb_port *out = tunnel->dst_port;
 657	struct tb_port *in = tunnel->src_port;
 658	int ret, max_bw;
 659
 660	/*
 661	 * Copy DP_LOCAL_CAP register to DP_REMOTE_CAP register for
 662	 * newer generation hardware.
 663	 */
 664	if (in->sw->generation < 2 || out->sw->generation < 2)
 665		return 0;
 666
 667	/*
 668	 * Perform connection manager handshake between IN and OUT ports
 669	 * before capabilities exchange can take place.
 670	 */
 671	ret = tb_dp_cm_handshake(in, out, 3000);
 672	if (ret)
 673		return ret;
 674
 675	/* Read both DP_LOCAL_CAP registers */
 676	ret = tb_port_read(in, &in_dp_cap, TB_CFG_PORT,
 677			   in->cap_adap + DP_LOCAL_CAP, 1);
 678	if (ret)
 679		return ret;
 680
 681	ret = tb_port_read(out, &out_dp_cap, TB_CFG_PORT,
 682			   out->cap_adap + DP_LOCAL_CAP, 1);
 683	if (ret)
 684		return ret;
 685
 686	/* Write IN local caps to OUT remote caps */
 687	ret = tb_port_write(out, &in_dp_cap, TB_CFG_PORT,
 688			    out->cap_adap + DP_REMOTE_CAP, 1);
 689	if (ret)
 690		return ret;
 691
 692	in_rate = tb_dp_cap_get_rate(in_dp_cap);
 693	in_lanes = tb_dp_cap_get_lanes(in_dp_cap);
 694	tb_tunnel_dbg(tunnel,
 695		      "DP IN maximum supported bandwidth %u Mb/s x%u = %u Mb/s\n",
 696		      in_rate, in_lanes, tb_dp_bandwidth(in_rate, in_lanes));
 697
 698	/*
 699	 * If the tunnel bandwidth is limited (max_bw is set) then see
 700	 * if we need to reduce bandwidth to fit there.
 701	 */
 702	out_rate = tb_dp_cap_get_rate(out_dp_cap);
 703	out_lanes = tb_dp_cap_get_lanes(out_dp_cap);
 704	bw = tb_dp_bandwidth(out_rate, out_lanes);
 705	tb_tunnel_dbg(tunnel,
 706		      "DP OUT maximum supported bandwidth %u Mb/s x%u = %u Mb/s\n",
 707		      out_rate, out_lanes, bw);
 708
 709	if (tb_port_path_direction_downstream(in, out))
 710		max_bw = tunnel->max_down;
 711	else
 712		max_bw = tunnel->max_up;
 713
 714	if (max_bw && bw > max_bw) {
 715		u32 new_rate, new_lanes, new_bw;
 716
 717		ret = tb_dp_reduce_bandwidth(max_bw, in_rate, in_lanes,
 718					     out_rate, out_lanes, &new_rate,
 719					     &new_lanes);
 720		if (ret) {
 721			tb_tunnel_info(tunnel, "not enough bandwidth\n");
 722			return ret;
 723		}
 724
 725		new_bw = tb_dp_bandwidth(new_rate, new_lanes);
 726		tb_tunnel_dbg(tunnel,
 727			      "bandwidth reduced to %u Mb/s x%u = %u Mb/s\n",
 728			      new_rate, new_lanes, new_bw);
 729
 730		/*
 731		 * Set new rate and number of lanes before writing it to
 732		 * the IN port remote caps.
 733		 */
 734		out_dp_cap = tb_dp_cap_set_rate(out_dp_cap, new_rate);
 735		out_dp_cap = tb_dp_cap_set_lanes(out_dp_cap, new_lanes);
 736	}
 737
 738	/*
 739	 * Titan Ridge does not disable AUX timers when it gets
 740	 * SET_CONFIG with SET_LTTPR_MODE set. This causes problems with
 741	 * DP tunneling.
 742	 */
 743	if (tb_route(out->sw) && tb_switch_is_titan_ridge(out->sw)) {
 744		out_dp_cap |= DP_COMMON_CAP_LTTPR_NS;
 745		tb_tunnel_dbg(tunnel, "disabling LTTPR\n");
 746	}
 747
 748	return tb_port_write(in, &out_dp_cap, TB_CFG_PORT,
 749			     in->cap_adap + DP_REMOTE_CAP, 1);
 750}
 751
 752static int tb_dp_bandwidth_alloc_mode_enable(struct tb_tunnel *tunnel)
 753{
 754	int ret, estimated_bw, granularity, tmp;
 755	struct tb_port *out = tunnel->dst_port;
 756	struct tb_port *in = tunnel->src_port;
 757	u32 out_dp_cap, out_rate, out_lanes;
 758	u32 in_dp_cap, in_rate, in_lanes;
 759	u32 rate, lanes;
 760
 761	if (!bw_alloc_mode)
 762		return 0;
 763
 764	ret = usb4_dp_port_set_cm_bandwidth_mode_supported(in, true);
 765	if (ret)
 766		return ret;
 767
 768	ret = usb4_dp_port_set_group_id(in, in->group->index);
 769	if (ret)
 770		return ret;
 771
 772	/*
 773	 * Get the non-reduced rate and lanes based on the lowest
 774	 * capability of both adapters.
 775	 */
 776	ret = tb_port_read(in, &in_dp_cap, TB_CFG_PORT,
 777			   in->cap_adap + DP_LOCAL_CAP, 1);
 778	if (ret)
 779		return ret;
 780
 781	ret = tb_port_read(out, &out_dp_cap, TB_CFG_PORT,
 782			   out->cap_adap + DP_LOCAL_CAP, 1);
 783	if (ret)
 784		return ret;
 785
 786	in_rate = tb_dp_cap_get_rate(in_dp_cap);
 787	in_lanes = tb_dp_cap_get_lanes(in_dp_cap);
 788	out_rate = tb_dp_cap_get_rate(out_dp_cap);
 789	out_lanes = tb_dp_cap_get_lanes(out_dp_cap);
 790
 791	rate = min(in_rate, out_rate);
 792	lanes = min(in_lanes, out_lanes);
 793	tmp = tb_dp_bandwidth(rate, lanes);
 794
 795	tb_tunnel_dbg(tunnel, "non-reduced bandwidth %u Mb/s x%u = %u Mb/s\n",
 796		      rate, lanes, tmp);
 797
 798	ret = usb4_dp_port_set_nrd(in, rate, lanes);
 799	if (ret)
 800		return ret;
 801
 802	/*
 803	 * Pick up granularity that supports maximum possible bandwidth.
 804	 * For that we use the UHBR rates too.
 805	 */
 806	in_rate = tb_dp_cap_get_rate_ext(in_dp_cap);
 807	out_rate = tb_dp_cap_get_rate_ext(out_dp_cap);
 808	rate = min(in_rate, out_rate);
 809	tmp = tb_dp_bandwidth(rate, lanes);
 810
 811	tb_tunnel_dbg(tunnel,
 812		      "maximum bandwidth through allocation mode %u Mb/s x%u = %u Mb/s\n",
 813		      rate, lanes, tmp);
 814
 815	for (granularity = 250; tmp / granularity > 255 && granularity <= 1000;
 816	     granularity *= 2)
 817		;
 818
 819	tb_tunnel_dbg(tunnel, "granularity %d Mb/s\n", granularity);
 820
 821	/*
 822	 * Returns -EINVAL if granularity above is outside of the
 823	 * accepted ranges.
 824	 */
 825	ret = usb4_dp_port_set_granularity(in, granularity);
 826	if (ret)
 827		return ret;
 828
 829	/*
 830	 * Bandwidth estimation is pretty much what we have in
 831	 * max_up/down fields. For discovery we just read what the
 832	 * estimation was set to.
 833	 */
 834	if (tb_port_path_direction_downstream(in, out))
 835		estimated_bw = tunnel->max_down;
 836	else
 837		estimated_bw = tunnel->max_up;
 838
 839	tb_tunnel_dbg(tunnel, "estimated bandwidth %d Mb/s\n", estimated_bw);
 840
 841	ret = usb4_dp_port_set_estimated_bandwidth(in, estimated_bw);
 842	if (ret)
 843		return ret;
 844
 845	/* Initial allocation should be 0 according the spec */
 846	ret = usb4_dp_port_allocate_bandwidth(in, 0);
 847	if (ret)
 848		return ret;
 849
 850	tb_tunnel_dbg(tunnel, "bandwidth allocation mode enabled\n");
 851	return 0;
 852}
 853
 854static int tb_dp_init(struct tb_tunnel *tunnel)
 855{
 856	struct tb_port *in = tunnel->src_port;
 857	struct tb_switch *sw = in->sw;
 858	struct tb *tb = in->sw->tb;
 859	int ret;
 860
 861	ret = tb_dp_xchg_caps(tunnel);
 862	if (ret)
 863		return ret;
 864
 865	if (!tb_switch_is_usb4(sw))
 866		return 0;
 867
 868	if (!usb4_dp_port_bandwidth_mode_supported(in))
 869		return 0;
 870
 871	tb_tunnel_dbg(tunnel, "bandwidth allocation mode supported\n");
 872
 873	ret = usb4_dp_port_set_cm_id(in, tb->index);
 874	if (ret)
 875		return ret;
 876
 877	return tb_dp_bandwidth_alloc_mode_enable(tunnel);
 878}
 879
 880static void tb_dp_deinit(struct tb_tunnel *tunnel)
 881{
 882	struct tb_port *in = tunnel->src_port;
 883
 884	if (!usb4_dp_port_bandwidth_mode_supported(in))
 885		return;
 886	if (usb4_dp_port_bandwidth_mode_enabled(in)) {
 887		usb4_dp_port_set_cm_bandwidth_mode_supported(in, false);
 888		tb_tunnel_dbg(tunnel, "bandwidth allocation mode disabled\n");
 889	}
 890}
 891
 892static int tb_dp_activate(struct tb_tunnel *tunnel, bool active)
 893{
 894	int ret;
 895
 896	if (active) {
 897		struct tb_path **paths;
 898		int last;
 899
 900		paths = tunnel->paths;
 901		last = paths[TB_DP_VIDEO_PATH_OUT]->path_length - 1;
 902
 903		tb_dp_port_set_hops(tunnel->src_port,
 904			paths[TB_DP_VIDEO_PATH_OUT]->hops[0].in_hop_index,
 905			paths[TB_DP_AUX_PATH_OUT]->hops[0].in_hop_index,
 906			paths[TB_DP_AUX_PATH_IN]->hops[last].next_hop_index);
 907
 908		tb_dp_port_set_hops(tunnel->dst_port,
 909			paths[TB_DP_VIDEO_PATH_OUT]->hops[last].next_hop_index,
 910			paths[TB_DP_AUX_PATH_IN]->hops[0].in_hop_index,
 911			paths[TB_DP_AUX_PATH_OUT]->hops[last].next_hop_index);
 912	} else {
 913		tb_dp_port_hpd_clear(tunnel->src_port);
 914		tb_dp_port_set_hops(tunnel->src_port, 0, 0, 0);
 915		if (tb_port_is_dpout(tunnel->dst_port))
 916			tb_dp_port_set_hops(tunnel->dst_port, 0, 0, 0);
 917	}
 918
 919	ret = tb_dp_port_enable(tunnel->src_port, active);
 920	if (ret)
 921		return ret;
 922
 923	if (tb_port_is_dpout(tunnel->dst_port))
 924		return tb_dp_port_enable(tunnel->dst_port, active);
 925
 926	return 0;
 927}
 928
 929/* max_bw is rounded up to next granularity */
 
 
 
 
 
 
 930static int tb_dp_bandwidth_mode_maximum_bandwidth(struct tb_tunnel *tunnel,
 931						  int *max_bw)
 932{
 933	struct tb_port *in = tunnel->src_port;
 934	int ret, rate, lanes, nrd_bw;
 935	u32 cap;
 936
 937	/*
 938	 * DP IN adapter DP_LOCAL_CAP gets updated to the lowest AUX
 939	 * read parameter values so this so we can use this to determine
 940	 * the maximum possible bandwidth over this link.
 941	 *
 942	 * See USB4 v2 spec 1.0 10.4.4.5.
 943	 */
 944	ret = tb_port_read(in, &cap, TB_CFG_PORT,
 945			   in->cap_adap + DP_LOCAL_CAP, 1);
 946	if (ret)
 947		return ret;
 948
 949	rate = tb_dp_cap_get_rate_ext(cap);
 950	if (tb_dp_is_uhbr_rate(rate)) {
 951		/*
 952		 * When UHBR is used there is no reduction in lanes so
 953		 * we can use this directly.
 954		 */
 955		lanes = tb_dp_cap_get_lanes(cap);
 956	} else {
 957		/*
 958		 * If there is no UHBR supported then check the
 959		 * non-reduced rate and lanes.
 960		 */
 961		ret = usb4_dp_port_nrd(in, &rate, &lanes);
 962		if (ret)
 963			return ret;
 964	}
 965
 966	nrd_bw = tb_dp_bandwidth(rate, lanes);
 967
 968	if (max_bw) {
 969		ret = usb4_dp_port_granularity(in);
 970		if (ret < 0)
 971			return ret;
 972		*max_bw = roundup(nrd_bw, ret);
 973	}
 974
 975	return nrd_bw;
 976}
 977
 978static int tb_dp_bandwidth_mode_consumed_bandwidth(struct tb_tunnel *tunnel,
 979						   int *consumed_up,
 980						   int *consumed_down)
 981{
 982	struct tb_port *out = tunnel->dst_port;
 983	struct tb_port *in = tunnel->src_port;
 984	int ret, allocated_bw, max_bw;
 985
 986	if (!usb4_dp_port_bandwidth_mode_enabled(in))
 987		return -EOPNOTSUPP;
 988
 989	if (!tunnel->bw_mode)
 990		return -EOPNOTSUPP;
 991
 992	/* Read what was allocated previously if any */
 993	ret = usb4_dp_port_allocated_bandwidth(in);
 994	if (ret < 0)
 995		return ret;
 996	allocated_bw = ret;
 997
 998	ret = tb_dp_bandwidth_mode_maximum_bandwidth(tunnel, &max_bw);
 999	if (ret < 0)
1000		return ret;
1001	if (allocated_bw == max_bw)
1002		allocated_bw = ret;
1003
1004	if (tb_port_path_direction_downstream(in, out)) {
1005		*consumed_up = 0;
1006		*consumed_down = allocated_bw;
1007	} else {
1008		*consumed_up = allocated_bw;
1009		*consumed_down = 0;
1010	}
1011
1012	return 0;
1013}
1014
1015static int tb_dp_allocated_bandwidth(struct tb_tunnel *tunnel, int *allocated_up,
1016				     int *allocated_down)
1017{
1018	struct tb_port *out = tunnel->dst_port;
1019	struct tb_port *in = tunnel->src_port;
1020
1021	/*
1022	 * If we have already set the allocated bandwidth then use that.
1023	 * Otherwise we read it from the DPRX.
1024	 */
1025	if (usb4_dp_port_bandwidth_mode_enabled(in) && tunnel->bw_mode) {
1026		int ret, allocated_bw, max_bw;
1027
1028		ret = usb4_dp_port_allocated_bandwidth(in);
1029		if (ret < 0)
1030			return ret;
1031		allocated_bw = ret;
1032
1033		ret = tb_dp_bandwidth_mode_maximum_bandwidth(tunnel, &max_bw);
 
1034		if (ret < 0)
1035			return ret;
1036		if (allocated_bw == max_bw)
1037			allocated_bw = ret;
1038
1039		if (tb_port_path_direction_downstream(in, out)) {
1040			*allocated_up = 0;
1041			*allocated_down = allocated_bw;
1042		} else {
1043			*allocated_up = allocated_bw;
1044			*allocated_down = 0;
1045		}
1046		return 0;
1047	}
1048
1049	return tunnel->consumed_bandwidth(tunnel, allocated_up,
1050					  allocated_down);
1051}
1052
1053static int tb_dp_alloc_bandwidth(struct tb_tunnel *tunnel, int *alloc_up,
1054				 int *alloc_down)
1055{
1056	struct tb_port *out = tunnel->dst_port;
1057	struct tb_port *in = tunnel->src_port;
1058	int max_bw, ret, tmp;
1059
1060	if (!usb4_dp_port_bandwidth_mode_enabled(in))
1061		return -EOPNOTSUPP;
1062
1063	ret = tb_dp_bandwidth_mode_maximum_bandwidth(tunnel, &max_bw);
1064	if (ret < 0)
1065		return ret;
1066
1067	if (tb_port_path_direction_downstream(in, out)) {
1068		tmp = min(*alloc_down, max_bw);
1069		ret = usb4_dp_port_allocate_bandwidth(in, tmp);
1070		if (ret)
1071			return ret;
1072		*alloc_down = tmp;
1073		*alloc_up = 0;
1074	} else {
1075		tmp = min(*alloc_up, max_bw);
1076		ret = usb4_dp_port_allocate_bandwidth(in, tmp);
1077		if (ret)
1078			return ret;
1079		*alloc_down = 0;
1080		*alloc_up = tmp;
1081	}
1082
1083	/* Now we can use BW mode registers to figure out the bandwidth */
1084	/* TODO: need to handle discovery too */
1085	tunnel->bw_mode = true;
1086	return 0;
1087}
1088
1089static int tb_dp_wait_dprx(struct tb_tunnel *tunnel, int timeout_msec)
1090{
1091	ktime_t timeout = ktime_add_ms(ktime_get(), timeout_msec);
1092	struct tb_port *in = tunnel->src_port;
1093
1094	/*
1095	 * Wait for DPRX done. Normally it should be already set for
1096	 * active tunnel.
1097	 */
1098	do {
1099		u32 val;
1100		int ret;
1101
1102		ret = tb_port_read(in, &val, TB_CFG_PORT,
1103				   in->cap_adap + DP_COMMON_CAP, 1);
1104		if (ret)
1105			return ret;
1106
1107		if (val & DP_COMMON_CAP_DPRX_DONE) {
1108			tb_tunnel_dbg(tunnel, "DPRX read done\n");
1109			return 0;
1110		}
1111		usleep_range(100, 150);
1112	} while (ktime_before(ktime_get(), timeout));
1113
1114	tb_tunnel_dbg(tunnel, "DPRX read timeout\n");
1115	return -ETIMEDOUT;
1116}
1117
1118/* Read cap from tunnel DP IN */
1119static int tb_dp_read_cap(struct tb_tunnel *tunnel, unsigned int cap, u32 *rate,
1120			  u32 *lanes)
1121{
1122	struct tb_port *in = tunnel->src_port;
1123	u32 val;
1124	int ret;
1125
1126	switch (cap) {
1127	case DP_LOCAL_CAP:
1128	case DP_REMOTE_CAP:
1129	case DP_COMMON_CAP:
1130		break;
1131
1132	default:
1133		tb_tunnel_WARN(tunnel, "invalid capability index %#x\n", cap);
1134		return -EINVAL;
1135	}
1136
1137	/*
1138	 * Read from the copied remote cap so that we take into account
1139	 * if capabilities were reduced during exchange.
1140	 */
1141	ret = tb_port_read(in, &val, TB_CFG_PORT, in->cap_adap + cap, 1);
1142	if (ret)
1143		return ret;
1144
1145	*rate = tb_dp_cap_get_rate(val);
1146	*lanes = tb_dp_cap_get_lanes(val);
1147	return 0;
1148}
1149
1150static int tb_dp_maximum_bandwidth(struct tb_tunnel *tunnel, int *max_up,
1151				   int *max_down)
1152{
1153	struct tb_port *in = tunnel->src_port;
1154	int ret;
1155
1156	if (!usb4_dp_port_bandwidth_mode_enabled(in))
1157		return -EOPNOTSUPP;
1158
1159	ret = tb_dp_bandwidth_mode_maximum_bandwidth(tunnel, NULL);
1160	if (ret < 0)
1161		return ret;
1162
1163	if (tb_port_path_direction_downstream(in, tunnel->dst_port)) {
1164		*max_up = 0;
1165		*max_down = ret;
1166	} else {
1167		*max_up = ret;
1168		*max_down = 0;
1169	}
1170
1171	return 0;
1172}
1173
1174static int tb_dp_consumed_bandwidth(struct tb_tunnel *tunnel, int *consumed_up,
1175				    int *consumed_down)
1176{
1177	struct tb_port *in = tunnel->src_port;
1178	const struct tb_switch *sw = in->sw;
1179	u32 rate = 0, lanes = 0;
1180	int ret;
1181
1182	if (tb_dp_is_usb4(sw)) {
1183		/*
1184		 * On USB4 routers check if the bandwidth allocation
1185		 * mode is enabled first and then read the bandwidth
1186		 * through those registers.
1187		 */
1188		ret = tb_dp_bandwidth_mode_consumed_bandwidth(tunnel, consumed_up,
1189							      consumed_down);
1190		if (ret < 0) {
1191			if (ret != -EOPNOTSUPP)
1192				return ret;
1193		} else if (!ret) {
1194			return 0;
1195		}
1196		/*
1197		 * Then see if the DPRX negotiation is ready and if yes
1198		 * return that bandwidth (it may be smaller than the
1199		 * reduced one). Otherwise return the remote (possibly
1200		 * reduced) caps.
 
1201		 */
1202		ret = tb_dp_wait_dprx(tunnel, 150);
1203		if (ret) {
1204			if (ret == -ETIMEDOUT)
1205				ret = tb_dp_read_cap(tunnel, DP_REMOTE_CAP,
1206						     &rate, &lanes);
1207			if (ret)
1208				return ret;
1209		}
1210		ret = tb_dp_read_cap(tunnel, DP_COMMON_CAP, &rate, &lanes);
1211		if (ret)
1212			return ret;
1213	} else if (sw->generation >= 2) {
1214		ret = tb_dp_read_cap(tunnel, DP_REMOTE_CAP, &rate, &lanes);
1215		if (ret)
1216			return ret;
1217	} else {
1218		/* No bandwidth management for legacy devices  */
1219		*consumed_up = 0;
1220		*consumed_down = 0;
1221		return 0;
1222	}
1223
1224	if (tb_port_path_direction_downstream(in, tunnel->dst_port)) {
1225		*consumed_up = 0;
1226		*consumed_down = tb_dp_bandwidth(rate, lanes);
1227	} else {
1228		*consumed_up = tb_dp_bandwidth(rate, lanes);
1229		*consumed_down = 0;
1230	}
1231
1232	return 0;
1233}
1234
1235static void tb_dp_init_aux_credits(struct tb_path_hop *hop)
1236{
1237	struct tb_port *port = hop->in_port;
1238	struct tb_switch *sw = port->sw;
1239
1240	if (tb_port_use_credit_allocation(port))
1241		hop->initial_credits = sw->min_dp_aux_credits;
1242	else
1243		hop->initial_credits = 1;
1244}
1245
1246static void tb_dp_init_aux_path(struct tb_path *path, bool pm_support)
1247{
1248	struct tb_path_hop *hop;
1249
1250	path->egress_fc_enable = TB_PATH_SOURCE | TB_PATH_INTERNAL;
1251	path->egress_shared_buffer = TB_PATH_NONE;
1252	path->ingress_fc_enable = TB_PATH_ALL;
1253	path->ingress_shared_buffer = TB_PATH_NONE;
1254	path->priority = TB_DP_AUX_PRIORITY;
1255	path->weight = TB_DP_AUX_WEIGHT;
1256
1257	tb_path_for_each_hop(path, hop) {
1258		tb_dp_init_aux_credits(hop);
1259		if (pm_support)
1260			tb_init_pm_support(hop);
1261	}
1262}
1263
1264static int tb_dp_init_video_credits(struct tb_path_hop *hop)
1265{
1266	struct tb_port *port = hop->in_port;
1267	struct tb_switch *sw = port->sw;
1268
1269	if (tb_port_use_credit_allocation(port)) {
1270		unsigned int nfc_credits;
1271		size_t max_dp_streams;
1272
1273		tb_available_credits(port, &max_dp_streams);
1274		/*
1275		 * Read the number of currently allocated NFC credits
1276		 * from the lane adapter. Since we only use them for DP
1277		 * tunneling we can use that to figure out how many DP
1278		 * tunnels already go through the lane adapter.
1279		 */
1280		nfc_credits = port->config.nfc_credits &
1281				ADP_CS_4_NFC_BUFFERS_MASK;
1282		if (nfc_credits / sw->min_dp_main_credits > max_dp_streams)
1283			return -ENOSPC;
1284
1285		hop->nfc_credits = sw->min_dp_main_credits;
1286	} else {
1287		hop->nfc_credits = min(port->total_credits - 2, 12U);
1288	}
1289
1290	return 0;
1291}
1292
1293static int tb_dp_init_video_path(struct tb_path *path, bool pm_support)
1294{
1295	struct tb_path_hop *hop;
1296
1297	path->egress_fc_enable = TB_PATH_NONE;
1298	path->egress_shared_buffer = TB_PATH_NONE;
1299	path->ingress_fc_enable = TB_PATH_NONE;
1300	path->ingress_shared_buffer = TB_PATH_NONE;
1301	path->priority = TB_DP_VIDEO_PRIORITY;
1302	path->weight = TB_DP_VIDEO_WEIGHT;
1303
1304	tb_path_for_each_hop(path, hop) {
1305		int ret;
1306
1307		ret = tb_dp_init_video_credits(hop);
1308		if (ret)
1309			return ret;
1310		if (pm_support)
1311			tb_init_pm_support(hop);
1312	}
1313
1314	return 0;
1315}
1316
1317static void tb_dp_dump(struct tb_tunnel *tunnel)
1318{
1319	struct tb_port *in, *out;
1320	u32 dp_cap, rate, lanes;
1321
1322	in = tunnel->src_port;
1323	out = tunnel->dst_port;
1324
1325	if (tb_port_read(in, &dp_cap, TB_CFG_PORT,
1326			 in->cap_adap + DP_LOCAL_CAP, 1))
1327		return;
1328
1329	rate = tb_dp_cap_get_rate(dp_cap);
1330	lanes = tb_dp_cap_get_lanes(dp_cap);
1331
1332	tb_tunnel_dbg(tunnel,
1333		      "DP IN maximum supported bandwidth %u Mb/s x%u = %u Mb/s\n",
1334		      rate, lanes, tb_dp_bandwidth(rate, lanes));
1335
1336	if (tb_port_read(out, &dp_cap, TB_CFG_PORT,
1337			 out->cap_adap + DP_LOCAL_CAP, 1))
1338		return;
1339
1340	rate = tb_dp_cap_get_rate(dp_cap);
1341	lanes = tb_dp_cap_get_lanes(dp_cap);
1342
1343	tb_tunnel_dbg(tunnel,
1344		      "DP OUT maximum supported bandwidth %u Mb/s x%u = %u Mb/s\n",
1345		      rate, lanes, tb_dp_bandwidth(rate, lanes));
1346
1347	if (tb_port_read(in, &dp_cap, TB_CFG_PORT,
1348			 in->cap_adap + DP_REMOTE_CAP, 1))
1349		return;
1350
1351	rate = tb_dp_cap_get_rate(dp_cap);
1352	lanes = tb_dp_cap_get_lanes(dp_cap);
1353
1354	tb_tunnel_dbg(tunnel, "reduced bandwidth %u Mb/s x%u = %u Mb/s\n",
1355		      rate, lanes, tb_dp_bandwidth(rate, lanes));
1356}
1357
1358/**
1359 * tb_tunnel_discover_dp() - Discover existing Display Port tunnels
1360 * @tb: Pointer to the domain structure
1361 * @in: DP in adapter
1362 * @alloc_hopid: Allocate HopIDs from visited ports
1363 *
1364 * If @in adapter is active, follows the tunnel to the DP out adapter
1365 * and back. Returns the discovered tunnel or %NULL if there was no
1366 * tunnel.
1367 *
1368 * Return: DP tunnel or %NULL if no tunnel found.
1369 */
1370struct tb_tunnel *tb_tunnel_discover_dp(struct tb *tb, struct tb_port *in,
1371					bool alloc_hopid)
1372{
1373	struct tb_tunnel *tunnel;
1374	struct tb_port *port;
1375	struct tb_path *path;
1376
1377	if (!tb_dp_port_is_enabled(in))
1378		return NULL;
1379
1380	tunnel = tb_tunnel_alloc(tb, 3, TB_TUNNEL_DP);
1381	if (!tunnel)
1382		return NULL;
1383
1384	tunnel->init = tb_dp_init;
1385	tunnel->deinit = tb_dp_deinit;
1386	tunnel->activate = tb_dp_activate;
1387	tunnel->maximum_bandwidth = tb_dp_maximum_bandwidth;
1388	tunnel->allocated_bandwidth = tb_dp_allocated_bandwidth;
1389	tunnel->alloc_bandwidth = tb_dp_alloc_bandwidth;
1390	tunnel->consumed_bandwidth = tb_dp_consumed_bandwidth;
1391	tunnel->src_port = in;
1392
1393	path = tb_path_discover(in, TB_DP_VIDEO_HOPID, NULL, -1,
1394				&tunnel->dst_port, "Video", alloc_hopid);
1395	if (!path) {
1396		/* Just disable the DP IN port */
1397		tb_dp_port_enable(in, false);
1398		goto err_free;
1399	}
1400	tunnel->paths[TB_DP_VIDEO_PATH_OUT] = path;
1401	if (tb_dp_init_video_path(tunnel->paths[TB_DP_VIDEO_PATH_OUT], false))
1402		goto err_free;
1403
1404	path = tb_path_discover(in, TB_DP_AUX_TX_HOPID, NULL, -1, NULL, "AUX TX",
1405				alloc_hopid);
1406	if (!path)
1407		goto err_deactivate;
1408	tunnel->paths[TB_DP_AUX_PATH_OUT] = path;
1409	tb_dp_init_aux_path(tunnel->paths[TB_DP_AUX_PATH_OUT], false);
1410
1411	path = tb_path_discover(tunnel->dst_port, -1, in, TB_DP_AUX_RX_HOPID,
1412				&port, "AUX RX", alloc_hopid);
1413	if (!path)
1414		goto err_deactivate;
1415	tunnel->paths[TB_DP_AUX_PATH_IN] = path;
1416	tb_dp_init_aux_path(tunnel->paths[TB_DP_AUX_PATH_IN], false);
1417
1418	/* Validate that the tunnel is complete */
1419	if (!tb_port_is_dpout(tunnel->dst_port)) {
1420		tb_port_warn(in, "path does not end on a DP adapter, cleaning up\n");
1421		goto err_deactivate;
1422	}
1423
1424	if (!tb_dp_port_is_enabled(tunnel->dst_port))
1425		goto err_deactivate;
1426
1427	if (!tb_dp_port_hpd_is_active(tunnel->dst_port))
1428		goto err_deactivate;
1429
1430	if (port != tunnel->src_port) {
1431		tb_tunnel_warn(tunnel, "path is not complete, cleaning up\n");
1432		goto err_deactivate;
1433	}
1434
1435	tb_dp_dump(tunnel);
1436
1437	tb_tunnel_dbg(tunnel, "discovered\n");
1438	return tunnel;
1439
1440err_deactivate:
1441	tb_tunnel_deactivate(tunnel);
1442err_free:
1443	tb_tunnel_free(tunnel);
1444
1445	return NULL;
1446}
1447
1448/**
1449 * tb_tunnel_alloc_dp() - allocate a Display Port tunnel
1450 * @tb: Pointer to the domain structure
1451 * @in: DP in adapter port
1452 * @out: DP out adapter port
1453 * @link_nr: Preferred lane adapter when the link is not bonded
1454 * @max_up: Maximum available upstream bandwidth for the DP tunnel (%0
1455 *	    if not limited)
1456 * @max_down: Maximum available downstream bandwidth for the DP tunnel
1457 *	      (%0 if not limited)
1458 *
1459 * Allocates a tunnel between @in and @out that is capable of tunneling
1460 * Display Port traffic.
1461 *
1462 * Return: Returns a tb_tunnel on success or NULL on failure.
1463 */
1464struct tb_tunnel *tb_tunnel_alloc_dp(struct tb *tb, struct tb_port *in,
1465				     struct tb_port *out, int link_nr,
1466				     int max_up, int max_down)
1467{
1468	struct tb_tunnel *tunnel;
1469	struct tb_path **paths;
1470	struct tb_path *path;
1471	bool pm_support;
1472
1473	if (WARN_ON(!in->cap_adap || !out->cap_adap))
1474		return NULL;
1475
1476	tunnel = tb_tunnel_alloc(tb, 3, TB_TUNNEL_DP);
1477	if (!tunnel)
1478		return NULL;
1479
1480	tunnel->init = tb_dp_init;
1481	tunnel->deinit = tb_dp_deinit;
1482	tunnel->activate = tb_dp_activate;
1483	tunnel->maximum_bandwidth = tb_dp_maximum_bandwidth;
1484	tunnel->allocated_bandwidth = tb_dp_allocated_bandwidth;
1485	tunnel->alloc_bandwidth = tb_dp_alloc_bandwidth;
1486	tunnel->consumed_bandwidth = tb_dp_consumed_bandwidth;
1487	tunnel->src_port = in;
1488	tunnel->dst_port = out;
1489	tunnel->max_up = max_up;
1490	tunnel->max_down = max_down;
1491
1492	paths = tunnel->paths;
1493	pm_support = usb4_switch_version(in->sw) >= 2;
1494
1495	path = tb_path_alloc(tb, in, TB_DP_VIDEO_HOPID, out, TB_DP_VIDEO_HOPID,
1496			     link_nr, "Video");
1497	if (!path)
1498		goto err_free;
1499	tb_dp_init_video_path(path, pm_support);
1500	paths[TB_DP_VIDEO_PATH_OUT] = path;
1501
1502	path = tb_path_alloc(tb, in, TB_DP_AUX_TX_HOPID, out,
1503			     TB_DP_AUX_TX_HOPID, link_nr, "AUX TX");
1504	if (!path)
1505		goto err_free;
1506	tb_dp_init_aux_path(path, pm_support);
1507	paths[TB_DP_AUX_PATH_OUT] = path;
1508
1509	path = tb_path_alloc(tb, out, TB_DP_AUX_RX_HOPID, in,
1510			     TB_DP_AUX_RX_HOPID, link_nr, "AUX RX");
1511	if (!path)
1512		goto err_free;
1513	tb_dp_init_aux_path(path, pm_support);
1514	paths[TB_DP_AUX_PATH_IN] = path;
1515
1516	return tunnel;
1517
1518err_free:
1519	tb_tunnel_free(tunnel);
1520	return NULL;
1521}
1522
1523static unsigned int tb_dma_available_credits(const struct tb_port *port)
1524{
1525	const struct tb_switch *sw = port->sw;
1526	int credits;
1527
1528	credits = tb_available_credits(port, NULL);
1529	if (tb_acpi_may_tunnel_pcie())
1530		credits -= sw->max_pcie_credits;
1531	credits -= port->dma_credits;
1532
1533	return credits > 0 ? credits : 0;
1534}
1535
1536static int tb_dma_reserve_credits(struct tb_path_hop *hop, unsigned int credits)
1537{
1538	struct tb_port *port = hop->in_port;
1539
1540	if (tb_port_use_credit_allocation(port)) {
1541		unsigned int available = tb_dma_available_credits(port);
1542
1543		/*
1544		 * Need to have at least TB_MIN_DMA_CREDITS, otherwise
1545		 * DMA path cannot be established.
1546		 */
1547		if (available < TB_MIN_DMA_CREDITS)
1548			return -ENOSPC;
1549
1550		while (credits > available)
1551			credits--;
1552
1553		tb_port_dbg(port, "reserving %u credits for DMA path\n",
1554			    credits);
1555
1556		port->dma_credits += credits;
1557	} else {
1558		if (tb_port_is_null(port))
1559			credits = port->bonded ? 14 : 6;
1560		else
1561			credits = min(port->total_credits, credits);
1562	}
1563
1564	hop->initial_credits = credits;
1565	return 0;
1566}
1567
1568/* Path from lane adapter to NHI */
1569static int tb_dma_init_rx_path(struct tb_path *path, unsigned int credits)
1570{
1571	struct tb_path_hop *hop;
1572	unsigned int i, tmp;
1573
1574	path->egress_fc_enable = TB_PATH_SOURCE | TB_PATH_INTERNAL;
1575	path->ingress_fc_enable = TB_PATH_ALL;
1576	path->egress_shared_buffer = TB_PATH_NONE;
1577	path->ingress_shared_buffer = TB_PATH_NONE;
1578	path->priority = TB_DMA_PRIORITY;
1579	path->weight = TB_DMA_WEIGHT;
1580	path->clear_fc = true;
1581
1582	/*
1583	 * First lane adapter is the one connected to the remote host.
1584	 * We don't tunnel other traffic over this link so can use all
1585	 * the credits (except the ones reserved for control traffic).
1586	 */
1587	hop = &path->hops[0];
1588	tmp = min(tb_usable_credits(hop->in_port), credits);
1589	hop->initial_credits = tmp;
1590	hop->in_port->dma_credits += tmp;
1591
1592	for (i = 1; i < path->path_length; i++) {
1593		int ret;
1594
1595		ret = tb_dma_reserve_credits(&path->hops[i], credits);
1596		if (ret)
1597			return ret;
1598	}
1599
1600	return 0;
1601}
1602
1603/* Path from NHI to lane adapter */
1604static int tb_dma_init_tx_path(struct tb_path *path, unsigned int credits)
1605{
1606	struct tb_path_hop *hop;
1607
1608	path->egress_fc_enable = TB_PATH_ALL;
1609	path->ingress_fc_enable = TB_PATH_ALL;
1610	path->egress_shared_buffer = TB_PATH_NONE;
1611	path->ingress_shared_buffer = TB_PATH_NONE;
1612	path->priority = TB_DMA_PRIORITY;
1613	path->weight = TB_DMA_WEIGHT;
1614	path->clear_fc = true;
1615
1616	tb_path_for_each_hop(path, hop) {
1617		int ret;
1618
1619		ret = tb_dma_reserve_credits(hop, credits);
1620		if (ret)
1621			return ret;
1622	}
1623
1624	return 0;
1625}
1626
1627static void tb_dma_release_credits(struct tb_path_hop *hop)
1628{
1629	struct tb_port *port = hop->in_port;
1630
1631	if (tb_port_use_credit_allocation(port)) {
1632		port->dma_credits -= hop->initial_credits;
1633
1634		tb_port_dbg(port, "released %u DMA path credits\n",
1635			    hop->initial_credits);
1636	}
1637}
1638
1639static void tb_dma_deinit_path(struct tb_path *path)
1640{
1641	struct tb_path_hop *hop;
1642
1643	tb_path_for_each_hop(path, hop)
1644		tb_dma_release_credits(hop);
1645}
1646
1647static void tb_dma_deinit(struct tb_tunnel *tunnel)
1648{
1649	int i;
1650
1651	for (i = 0; i < tunnel->npaths; i++) {
1652		if (!tunnel->paths[i])
1653			continue;
1654		tb_dma_deinit_path(tunnel->paths[i]);
1655	}
1656}
1657
1658/**
1659 * tb_tunnel_alloc_dma() - allocate a DMA tunnel
1660 * @tb: Pointer to the domain structure
1661 * @nhi: Host controller port
1662 * @dst: Destination null port which the other domain is connected to
1663 * @transmit_path: HopID used for transmitting packets
1664 * @transmit_ring: NHI ring number used to send packets towards the
1665 *		   other domain. Set to %-1 if TX path is not needed.
1666 * @receive_path: HopID used for receiving packets
1667 * @receive_ring: NHI ring number used to receive packets from the
1668 *		  other domain. Set to %-1 if RX path is not needed.
1669 *
1670 * Return: Returns a tb_tunnel on success or NULL on failure.
1671 */
1672struct tb_tunnel *tb_tunnel_alloc_dma(struct tb *tb, struct tb_port *nhi,
1673				      struct tb_port *dst, int transmit_path,
1674				      int transmit_ring, int receive_path,
1675				      int receive_ring)
1676{
1677	struct tb_tunnel *tunnel;
1678	size_t npaths = 0, i = 0;
1679	struct tb_path *path;
1680	int credits;
1681
1682	/* Ring 0 is reserved for control channel */
1683	if (WARN_ON(!receive_ring || !transmit_ring))
1684		return NULL;
1685
1686	if (receive_ring > 0)
1687		npaths++;
1688	if (transmit_ring > 0)
1689		npaths++;
1690
1691	if (WARN_ON(!npaths))
1692		return NULL;
1693
1694	tunnel = tb_tunnel_alloc(tb, npaths, TB_TUNNEL_DMA);
1695	if (!tunnel)
1696		return NULL;
1697
1698	tunnel->src_port = nhi;
1699	tunnel->dst_port = dst;
1700	tunnel->deinit = tb_dma_deinit;
1701
1702	credits = min_not_zero(dma_credits, nhi->sw->max_dma_credits);
1703
1704	if (receive_ring > 0) {
1705		path = tb_path_alloc(tb, dst, receive_path, nhi, receive_ring, 0,
1706				     "DMA RX");
1707		if (!path)
1708			goto err_free;
1709		tunnel->paths[i++] = path;
1710		if (tb_dma_init_rx_path(path, credits)) {
1711			tb_tunnel_dbg(tunnel, "not enough buffers for RX path\n");
1712			goto err_free;
1713		}
1714	}
1715
1716	if (transmit_ring > 0) {
1717		path = tb_path_alloc(tb, nhi, transmit_ring, dst, transmit_path, 0,
1718				     "DMA TX");
1719		if (!path)
1720			goto err_free;
1721		tunnel->paths[i++] = path;
1722		if (tb_dma_init_tx_path(path, credits)) {
1723			tb_tunnel_dbg(tunnel, "not enough buffers for TX path\n");
1724			goto err_free;
1725		}
1726	}
1727
1728	return tunnel;
1729
1730err_free:
1731	tb_tunnel_free(tunnel);
1732	return NULL;
1733}
1734
1735/**
1736 * tb_tunnel_match_dma() - Match DMA tunnel
1737 * @tunnel: Tunnel to match
1738 * @transmit_path: HopID used for transmitting packets. Pass %-1 to ignore.
1739 * @transmit_ring: NHI ring number used to send packets towards the
1740 *		   other domain. Pass %-1 to ignore.
1741 * @receive_path: HopID used for receiving packets. Pass %-1 to ignore.
1742 * @receive_ring: NHI ring number used to receive packets from the
1743 *		  other domain. Pass %-1 to ignore.
1744 *
1745 * This function can be used to match specific DMA tunnel, if there are
1746 * multiple DMA tunnels going through the same XDomain connection.
1747 * Returns true if there is match and false otherwise.
1748 */
1749bool tb_tunnel_match_dma(const struct tb_tunnel *tunnel, int transmit_path,
1750			 int transmit_ring, int receive_path, int receive_ring)
1751{
1752	const struct tb_path *tx_path = NULL, *rx_path = NULL;
1753	int i;
1754
1755	if (!receive_ring || !transmit_ring)
1756		return false;
1757
1758	for (i = 0; i < tunnel->npaths; i++) {
1759		const struct tb_path *path = tunnel->paths[i];
1760
1761		if (!path)
1762			continue;
1763
1764		if (tb_port_is_nhi(path->hops[0].in_port))
1765			tx_path = path;
1766		else if (tb_port_is_nhi(path->hops[path->path_length - 1].out_port))
1767			rx_path = path;
1768	}
1769
1770	if (transmit_ring > 0 || transmit_path > 0) {
1771		if (!tx_path)
1772			return false;
1773		if (transmit_ring > 0 &&
1774		    (tx_path->hops[0].in_hop_index != transmit_ring))
1775			return false;
1776		if (transmit_path > 0 &&
1777		    (tx_path->hops[tx_path->path_length - 1].next_hop_index != transmit_path))
1778			return false;
1779	}
1780
1781	if (receive_ring > 0 || receive_path > 0) {
1782		if (!rx_path)
1783			return false;
1784		if (receive_path > 0 &&
1785		    (rx_path->hops[0].in_hop_index != receive_path))
1786			return false;
1787		if (receive_ring > 0 &&
1788		    (rx_path->hops[rx_path->path_length - 1].next_hop_index != receive_ring))
1789			return false;
1790	}
1791
1792	return true;
1793}
1794
1795static int tb_usb3_max_link_rate(struct tb_port *up, struct tb_port *down)
1796{
1797	int ret, up_max_rate, down_max_rate;
1798
1799	ret = usb4_usb3_port_max_link_rate(up);
1800	if (ret < 0)
1801		return ret;
1802	up_max_rate = ret;
1803
1804	ret = usb4_usb3_port_max_link_rate(down);
1805	if (ret < 0)
1806		return ret;
1807	down_max_rate = ret;
1808
1809	return min(up_max_rate, down_max_rate);
1810}
1811
1812static int tb_usb3_init(struct tb_tunnel *tunnel)
1813{
1814	tb_tunnel_dbg(tunnel, "allocating initial bandwidth %d/%d Mb/s\n",
1815		      tunnel->allocated_up, tunnel->allocated_down);
1816
1817	return usb4_usb3_port_allocate_bandwidth(tunnel->src_port,
1818						 &tunnel->allocated_up,
1819						 &tunnel->allocated_down);
1820}
1821
1822static int tb_usb3_activate(struct tb_tunnel *tunnel, bool activate)
1823{
1824	int res;
1825
1826	res = tb_usb3_port_enable(tunnel->src_port, activate);
1827	if (res)
1828		return res;
1829
1830	if (tb_port_is_usb3_up(tunnel->dst_port))
1831		return tb_usb3_port_enable(tunnel->dst_port, activate);
1832
1833	return 0;
1834}
1835
1836static int tb_usb3_consumed_bandwidth(struct tb_tunnel *tunnel,
1837		int *consumed_up, int *consumed_down)
1838{
1839	struct tb_port *port = tb_upstream_port(tunnel->dst_port->sw);
1840	int pcie_weight = tb_acpi_may_tunnel_pcie() ? TB_PCI_WEIGHT : 0;
1841
1842	/*
1843	 * PCIe tunneling, if enabled, affects the USB3 bandwidth so
1844	 * take that it into account here.
1845	 */
1846	*consumed_up = tunnel->allocated_up *
1847		(TB_USB3_WEIGHT + pcie_weight) / TB_USB3_WEIGHT;
1848	*consumed_down = tunnel->allocated_down *
1849		(TB_USB3_WEIGHT + pcie_weight) / TB_USB3_WEIGHT;
1850
1851	if (tb_port_get_link_generation(port) >= 4) {
1852		*consumed_up = max(*consumed_up, USB4_V2_USB3_MIN_BANDWIDTH);
1853		*consumed_down = max(*consumed_down, USB4_V2_USB3_MIN_BANDWIDTH);
1854	}
1855
1856	return 0;
1857}
1858
1859static int tb_usb3_release_unused_bandwidth(struct tb_tunnel *tunnel)
1860{
1861	int ret;
1862
1863	ret = usb4_usb3_port_release_bandwidth(tunnel->src_port,
1864					       &tunnel->allocated_up,
1865					       &tunnel->allocated_down);
1866	if (ret)
1867		return ret;
1868
1869	tb_tunnel_dbg(tunnel, "decreased bandwidth allocation to %d/%d Mb/s\n",
1870		      tunnel->allocated_up, tunnel->allocated_down);
1871	return 0;
1872}
1873
1874static void tb_usb3_reclaim_available_bandwidth(struct tb_tunnel *tunnel,
1875						int *available_up,
1876						int *available_down)
1877{
1878	int ret, max_rate, allocate_up, allocate_down;
1879
1880	ret = tb_usb3_max_link_rate(tunnel->dst_port, tunnel->src_port);
1881	if (ret < 0) {
1882		tb_tunnel_warn(tunnel, "failed to read maximum link rate\n");
1883		return;
1884	}
1885
1886	/*
1887	 * 90% of the max rate can be allocated for isochronous
1888	 * transfers.
1889	 */
1890	max_rate = ret * 90 / 100;
1891
1892	/* No need to reclaim if already at maximum */
1893	if (tunnel->allocated_up >= max_rate &&
1894	    tunnel->allocated_down >= max_rate)
1895		return;
1896
1897	/* Don't go lower than what is already allocated */
1898	allocate_up = min(max_rate, *available_up);
1899	if (allocate_up < tunnel->allocated_up)
1900		allocate_up = tunnel->allocated_up;
1901
1902	allocate_down = min(max_rate, *available_down);
1903	if (allocate_down < tunnel->allocated_down)
1904		allocate_down = tunnel->allocated_down;
1905
1906	/* If no changes no need to do more */
1907	if (allocate_up == tunnel->allocated_up &&
1908	    allocate_down == tunnel->allocated_down)
1909		return;
1910
1911	ret = usb4_usb3_port_allocate_bandwidth(tunnel->src_port, &allocate_up,
1912						&allocate_down);
1913	if (ret) {
1914		tb_tunnel_info(tunnel, "failed to allocate bandwidth\n");
1915		return;
1916	}
1917
1918	tunnel->allocated_up = allocate_up;
1919	*available_up -= tunnel->allocated_up;
1920
1921	tunnel->allocated_down = allocate_down;
1922	*available_down -= tunnel->allocated_down;
1923
1924	tb_tunnel_dbg(tunnel, "increased bandwidth allocation to %d/%d Mb/s\n",
1925		      tunnel->allocated_up, tunnel->allocated_down);
1926}
1927
1928static void tb_usb3_init_credits(struct tb_path_hop *hop)
1929{
1930	struct tb_port *port = hop->in_port;
1931	struct tb_switch *sw = port->sw;
1932	unsigned int credits;
1933
1934	if (tb_port_use_credit_allocation(port)) {
1935		credits = sw->max_usb3_credits;
1936	} else {
1937		if (tb_port_is_null(port))
1938			credits = port->bonded ? 32 : 16;
1939		else
1940			credits = 7;
1941	}
1942
1943	hop->initial_credits = credits;
1944}
1945
1946static void tb_usb3_init_path(struct tb_path *path)
1947{
1948	struct tb_path_hop *hop;
1949
1950	path->egress_fc_enable = TB_PATH_SOURCE | TB_PATH_INTERNAL;
1951	path->egress_shared_buffer = TB_PATH_NONE;
1952	path->ingress_fc_enable = TB_PATH_ALL;
1953	path->ingress_shared_buffer = TB_PATH_NONE;
1954	path->priority = TB_USB3_PRIORITY;
1955	path->weight = TB_USB3_WEIGHT;
1956	path->drop_packages = 0;
1957
1958	tb_path_for_each_hop(path, hop)
1959		tb_usb3_init_credits(hop);
1960}
1961
1962/**
1963 * tb_tunnel_discover_usb3() - Discover existing USB3 tunnels
1964 * @tb: Pointer to the domain structure
1965 * @down: USB3 downstream adapter
1966 * @alloc_hopid: Allocate HopIDs from visited ports
1967 *
1968 * If @down adapter is active, follows the tunnel to the USB3 upstream
1969 * adapter and back. Returns the discovered tunnel or %NULL if there was
1970 * no tunnel.
1971 */
1972struct tb_tunnel *tb_tunnel_discover_usb3(struct tb *tb, struct tb_port *down,
1973					  bool alloc_hopid)
1974{
1975	struct tb_tunnel *tunnel;
1976	struct tb_path *path;
1977
1978	if (!tb_usb3_port_is_enabled(down))
1979		return NULL;
1980
1981	tunnel = tb_tunnel_alloc(tb, 2, TB_TUNNEL_USB3);
1982	if (!tunnel)
1983		return NULL;
1984
1985	tunnel->activate = tb_usb3_activate;
1986	tunnel->src_port = down;
1987
1988	/*
1989	 * Discover both paths even if they are not complete. We will
1990	 * clean them up by calling tb_tunnel_deactivate() below in that
1991	 * case.
1992	 */
1993	path = tb_path_discover(down, TB_USB3_HOPID, NULL, -1,
1994				&tunnel->dst_port, "USB3 Down", alloc_hopid);
1995	if (!path) {
1996		/* Just disable the downstream port */
1997		tb_usb3_port_enable(down, false);
1998		goto err_free;
1999	}
2000	tunnel->paths[TB_USB3_PATH_DOWN] = path;
2001	tb_usb3_init_path(tunnel->paths[TB_USB3_PATH_DOWN]);
2002
2003	path = tb_path_discover(tunnel->dst_port, -1, down, TB_USB3_HOPID, NULL,
2004				"USB3 Up", alloc_hopid);
2005	if (!path)
2006		goto err_deactivate;
2007	tunnel->paths[TB_USB3_PATH_UP] = path;
2008	tb_usb3_init_path(tunnel->paths[TB_USB3_PATH_UP]);
2009
2010	/* Validate that the tunnel is complete */
2011	if (!tb_port_is_usb3_up(tunnel->dst_port)) {
2012		tb_port_warn(tunnel->dst_port,
2013			     "path does not end on an USB3 adapter, cleaning up\n");
2014		goto err_deactivate;
2015	}
2016
2017	if (down != tunnel->src_port) {
2018		tb_tunnel_warn(tunnel, "path is not complete, cleaning up\n");
2019		goto err_deactivate;
2020	}
2021
2022	if (!tb_usb3_port_is_enabled(tunnel->dst_port)) {
2023		tb_tunnel_warn(tunnel,
2024			       "tunnel is not fully activated, cleaning up\n");
2025		goto err_deactivate;
2026	}
2027
2028	if (!tb_route(down->sw)) {
2029		int ret;
2030
2031		/*
2032		 * Read the initial bandwidth allocation for the first
2033		 * hop tunnel.
2034		 */
2035		ret = usb4_usb3_port_allocated_bandwidth(down,
2036			&tunnel->allocated_up, &tunnel->allocated_down);
2037		if (ret)
2038			goto err_deactivate;
2039
2040		tb_tunnel_dbg(tunnel, "currently allocated bandwidth %d/%d Mb/s\n",
2041			      tunnel->allocated_up, tunnel->allocated_down);
2042
2043		tunnel->init = tb_usb3_init;
2044		tunnel->consumed_bandwidth = tb_usb3_consumed_bandwidth;
2045		tunnel->release_unused_bandwidth =
2046			tb_usb3_release_unused_bandwidth;
2047		tunnel->reclaim_available_bandwidth =
2048			tb_usb3_reclaim_available_bandwidth;
2049	}
2050
2051	tb_tunnel_dbg(tunnel, "discovered\n");
2052	return tunnel;
2053
2054err_deactivate:
2055	tb_tunnel_deactivate(tunnel);
2056err_free:
2057	tb_tunnel_free(tunnel);
2058
2059	return NULL;
2060}
2061
2062/**
2063 * tb_tunnel_alloc_usb3() - allocate a USB3 tunnel
2064 * @tb: Pointer to the domain structure
2065 * @up: USB3 upstream adapter port
2066 * @down: USB3 downstream adapter port
2067 * @max_up: Maximum available upstream bandwidth for the USB3 tunnel (%0
2068 *	    if not limited).
2069 * @max_down: Maximum available downstream bandwidth for the USB3 tunnel
2070 *	      (%0 if not limited).
2071 *
2072 * Allocate an USB3 tunnel. The ports must be of type @TB_TYPE_USB3_UP and
2073 * @TB_TYPE_USB3_DOWN.
2074 *
2075 * Return: Returns a tb_tunnel on success or %NULL on failure.
2076 */
2077struct tb_tunnel *tb_tunnel_alloc_usb3(struct tb *tb, struct tb_port *up,
2078				       struct tb_port *down, int max_up,
2079				       int max_down)
2080{
2081	struct tb_tunnel *tunnel;
2082	struct tb_path *path;
2083	int max_rate = 0;
2084
2085	/*
2086	 * Check that we have enough bandwidth available for the new
2087	 * USB3 tunnel.
2088	 */
2089	if (max_up > 0 || max_down > 0) {
2090		max_rate = tb_usb3_max_link_rate(down, up);
2091		if (max_rate < 0)
2092			return NULL;
2093
2094		/* Only 90% can be allocated for USB3 isochronous transfers */
2095		max_rate = max_rate * 90 / 100;
2096		tb_port_dbg(up, "required bandwidth for USB3 tunnel %d Mb/s\n",
2097			    max_rate);
2098
2099		if (max_rate > max_up || max_rate > max_down) {
2100			tb_port_warn(up, "not enough bandwidth for USB3 tunnel\n");
2101			return NULL;
2102		}
2103	}
2104
2105	tunnel = tb_tunnel_alloc(tb, 2, TB_TUNNEL_USB3);
2106	if (!tunnel)
2107		return NULL;
2108
2109	tunnel->activate = tb_usb3_activate;
2110	tunnel->src_port = down;
2111	tunnel->dst_port = up;
2112	tunnel->max_up = max_up;
2113	tunnel->max_down = max_down;
2114
2115	path = tb_path_alloc(tb, down, TB_USB3_HOPID, up, TB_USB3_HOPID, 0,
2116			     "USB3 Down");
2117	if (!path) {
2118		tb_tunnel_free(tunnel);
2119		return NULL;
2120	}
2121	tb_usb3_init_path(path);
2122	tunnel->paths[TB_USB3_PATH_DOWN] = path;
2123
2124	path = tb_path_alloc(tb, up, TB_USB3_HOPID, down, TB_USB3_HOPID, 0,
2125			     "USB3 Up");
2126	if (!path) {
2127		tb_tunnel_free(tunnel);
2128		return NULL;
2129	}
2130	tb_usb3_init_path(path);
2131	tunnel->paths[TB_USB3_PATH_UP] = path;
2132
2133	if (!tb_route(down->sw)) {
2134		tunnel->allocated_up = max_rate;
2135		tunnel->allocated_down = max_rate;
2136
2137		tunnel->init = tb_usb3_init;
2138		tunnel->consumed_bandwidth = tb_usb3_consumed_bandwidth;
2139		tunnel->release_unused_bandwidth =
2140			tb_usb3_release_unused_bandwidth;
2141		tunnel->reclaim_available_bandwidth =
2142			tb_usb3_reclaim_available_bandwidth;
2143	}
2144
2145	return tunnel;
2146}
2147
2148/**
2149 * tb_tunnel_free() - free a tunnel
2150 * @tunnel: Tunnel to be freed
2151 *
2152 * Frees a tunnel. The tunnel does not need to be deactivated.
2153 */
2154void tb_tunnel_free(struct tb_tunnel *tunnel)
2155{
2156	int i;
2157
2158	if (!tunnel)
2159		return;
2160
2161	if (tunnel->deinit)
2162		tunnel->deinit(tunnel);
2163
2164	for (i = 0; i < tunnel->npaths; i++) {
2165		if (tunnel->paths[i])
2166			tb_path_free(tunnel->paths[i]);
2167	}
2168
2169	kfree(tunnel->paths);
2170	kfree(tunnel);
2171}
2172
2173/**
2174 * tb_tunnel_is_invalid - check whether an activated path is still valid
2175 * @tunnel: Tunnel to check
2176 */
2177bool tb_tunnel_is_invalid(struct tb_tunnel *tunnel)
2178{
2179	int i;
2180
2181	for (i = 0; i < tunnel->npaths; i++) {
2182		WARN_ON(!tunnel->paths[i]->activated);
2183		if (tb_path_is_invalid(tunnel->paths[i]))
2184			return true;
2185	}
2186
2187	return false;
2188}
2189
2190/**
2191 * tb_tunnel_restart() - activate a tunnel after a hardware reset
2192 * @tunnel: Tunnel to restart
2193 *
2194 * Return: 0 on success and negative errno in case if failure
2195 */
2196int tb_tunnel_restart(struct tb_tunnel *tunnel)
2197{
2198	int res, i;
2199
2200	tb_tunnel_dbg(tunnel, "activating\n");
2201
2202	/*
2203	 * Make sure all paths are properly disabled before enabling
2204	 * them again.
2205	 */
2206	for (i = 0; i < tunnel->npaths; i++) {
2207		if (tunnel->paths[i]->activated) {
2208			tb_path_deactivate(tunnel->paths[i]);
2209			tunnel->paths[i]->activated = false;
2210		}
2211	}
2212
2213	if (tunnel->init) {
2214		res = tunnel->init(tunnel);
2215		if (res)
2216			return res;
2217	}
2218
2219	for (i = 0; i < tunnel->npaths; i++) {
2220		res = tb_path_activate(tunnel->paths[i]);
2221		if (res)
2222			goto err;
2223	}
2224
2225	if (tunnel->activate) {
2226		res = tunnel->activate(tunnel, true);
2227		if (res)
2228			goto err;
2229	}
2230
2231	return 0;
2232
2233err:
2234	tb_tunnel_warn(tunnel, "activation failed\n");
2235	tb_tunnel_deactivate(tunnel);
2236	return res;
2237}
2238
2239/**
2240 * tb_tunnel_activate() - activate a tunnel
2241 * @tunnel: Tunnel to activate
2242 *
2243 * Return: Returns 0 on success or an error code on failure.
2244 */
2245int tb_tunnel_activate(struct tb_tunnel *tunnel)
2246{
2247	int i;
2248
2249	for (i = 0; i < tunnel->npaths; i++) {
2250		if (tunnel->paths[i]->activated) {
2251			tb_tunnel_WARN(tunnel,
2252				       "trying to activate an already activated tunnel\n");
2253			return -EINVAL;
2254		}
2255	}
2256
2257	return tb_tunnel_restart(tunnel);
2258}
2259
2260/**
2261 * tb_tunnel_deactivate() - deactivate a tunnel
2262 * @tunnel: Tunnel to deactivate
2263 */
2264void tb_tunnel_deactivate(struct tb_tunnel *tunnel)
2265{
2266	int i;
2267
2268	tb_tunnel_dbg(tunnel, "deactivating\n");
2269
2270	if (tunnel->activate)
2271		tunnel->activate(tunnel, false);
2272
2273	for (i = 0; i < tunnel->npaths; i++) {
2274		if (tunnel->paths[i] && tunnel->paths[i]->activated)
2275			tb_path_deactivate(tunnel->paths[i]);
2276	}
2277}
2278
2279/**
2280 * tb_tunnel_port_on_path() - Does the tunnel go through port
2281 * @tunnel: Tunnel to check
2282 * @port: Port to check
2283 *
2284 * Returns true if @tunnel goes through @port (direction does not matter),
2285 * false otherwise.
2286 */
2287bool tb_tunnel_port_on_path(const struct tb_tunnel *tunnel,
2288			    const struct tb_port *port)
2289{
2290	int i;
2291
2292	for (i = 0; i < tunnel->npaths; i++) {
2293		if (!tunnel->paths[i])
2294			continue;
2295
2296		if (tb_path_port_on_path(tunnel->paths[i], port))
2297			return true;
2298	}
2299
2300	return false;
2301}
2302
2303static bool tb_tunnel_is_active(const struct tb_tunnel *tunnel)
2304{
2305	int i;
2306
2307	for (i = 0; i < tunnel->npaths; i++) {
2308		if (!tunnel->paths[i])
2309			return false;
2310		if (!tunnel->paths[i]->activated)
2311			return false;
2312	}
2313
2314	return true;
2315}
2316
2317/**
2318 * tb_tunnel_maximum_bandwidth() - Return maximum possible bandwidth
2319 * @tunnel: Tunnel to check
2320 * @max_up: Maximum upstream bandwidth in Mb/s
2321 * @max_down: Maximum downstream bandwidth in Mb/s
2322 *
2323 * Returns maximum possible bandwidth this tunnel can go if not limited
2324 * by other bandwidth clients. If the tunnel does not support this
2325 * returns %-EOPNOTSUPP.
2326 */
2327int tb_tunnel_maximum_bandwidth(struct tb_tunnel *tunnel, int *max_up,
2328				int *max_down)
2329{
2330	if (!tb_tunnel_is_active(tunnel))
2331		return -EINVAL;
2332
2333	if (tunnel->maximum_bandwidth)
2334		return tunnel->maximum_bandwidth(tunnel, max_up, max_down);
2335	return -EOPNOTSUPP;
2336}
2337
2338/**
2339 * tb_tunnel_allocated_bandwidth() - Return bandwidth allocated for the tunnel
2340 * @tunnel: Tunnel to check
2341 * @allocated_up: Currently allocated upstream bandwidth in Mb/s is stored here
2342 * @allocated_down: Currently allocated downstream bandwidth in Mb/s is
2343 *		    stored here
2344 *
2345 * Returns the bandwidth allocated for the tunnel. This may be higher
2346 * than what the tunnel actually consumes.
2347 */
2348int tb_tunnel_allocated_bandwidth(struct tb_tunnel *tunnel, int *allocated_up,
2349				  int *allocated_down)
2350{
2351	if (!tb_tunnel_is_active(tunnel))
2352		return -EINVAL;
2353
2354	if (tunnel->allocated_bandwidth)
2355		return tunnel->allocated_bandwidth(tunnel, allocated_up,
2356						   allocated_down);
2357	return -EOPNOTSUPP;
2358}
2359
2360/**
2361 * tb_tunnel_alloc_bandwidth() - Change tunnel bandwidth allocation
2362 * @tunnel: Tunnel whose bandwidth allocation to change
2363 * @alloc_up: New upstream bandwidth in Mb/s
2364 * @alloc_down: New downstream bandwidth in Mb/s
2365 *
2366 * Tries to change tunnel bandwidth allocation. If succeeds returns %0
2367 * and updates @alloc_up and @alloc_down to that was actually allocated
2368 * (it may not be the same as passed originally). Returns negative errno
2369 * in case of failure.
2370 */
2371int tb_tunnel_alloc_bandwidth(struct tb_tunnel *tunnel, int *alloc_up,
2372			      int *alloc_down)
2373{
2374	if (!tb_tunnel_is_active(tunnel))
2375		return -EINVAL;
2376
2377	if (tunnel->alloc_bandwidth)
2378		return tunnel->alloc_bandwidth(tunnel, alloc_up, alloc_down);
2379
2380	return -EOPNOTSUPP;
2381}
2382
2383/**
2384 * tb_tunnel_consumed_bandwidth() - Return bandwidth consumed by the tunnel
2385 * @tunnel: Tunnel to check
2386 * @consumed_up: Consumed bandwidth in Mb/s from @dst_port to @src_port.
2387 *		 Can be %NULL.
2388 * @consumed_down: Consumed bandwidth in Mb/s from @src_port to @dst_port.
2389 *		   Can be %NULL.
2390 *
2391 * Stores the amount of isochronous bandwidth @tunnel consumes in
2392 * @consumed_up and @consumed_down. In case of success returns %0,
2393 * negative errno otherwise.
2394 */
2395int tb_tunnel_consumed_bandwidth(struct tb_tunnel *tunnel, int *consumed_up,
2396				 int *consumed_down)
2397{
2398	int up_bw = 0, down_bw = 0;
2399
2400	if (!tb_tunnel_is_active(tunnel))
2401		goto out;
2402
2403	if (tunnel->consumed_bandwidth) {
2404		int ret;
2405
2406		ret = tunnel->consumed_bandwidth(tunnel, &up_bw, &down_bw);
2407		if (ret)
2408			return ret;
2409
2410		tb_tunnel_dbg(tunnel, "consumed bandwidth %d/%d Mb/s\n", up_bw,
2411			      down_bw);
2412	}
2413
2414out:
2415	if (consumed_up)
2416		*consumed_up = up_bw;
2417	if (consumed_down)
2418		*consumed_down = down_bw;
2419
2420	return 0;
2421}
2422
2423/**
2424 * tb_tunnel_release_unused_bandwidth() - Release unused bandwidth
2425 * @tunnel: Tunnel whose unused bandwidth to release
2426 *
2427 * If tunnel supports dynamic bandwidth management (USB3 tunnels at the
2428 * moment) this function makes it to release all the unused bandwidth.
2429 *
2430 * Returns %0 in case of success and negative errno otherwise.
2431 */
2432int tb_tunnel_release_unused_bandwidth(struct tb_tunnel *tunnel)
2433{
2434	if (!tb_tunnel_is_active(tunnel))
2435		return 0;
2436
2437	if (tunnel->release_unused_bandwidth) {
2438		int ret;
2439
2440		ret = tunnel->release_unused_bandwidth(tunnel);
2441		if (ret)
2442			return ret;
2443	}
2444
2445	return 0;
2446}
2447
2448/**
2449 * tb_tunnel_reclaim_available_bandwidth() - Reclaim available bandwidth
2450 * @tunnel: Tunnel reclaiming available bandwidth
2451 * @available_up: Available upstream bandwidth (in Mb/s)
2452 * @available_down: Available downstream bandwidth (in Mb/s)
2453 *
2454 * Reclaims bandwidth from @available_up and @available_down and updates
2455 * the variables accordingly (e.g decreases both according to what was
2456 * reclaimed by the tunnel). If nothing was reclaimed the values are
2457 * kept as is.
2458 */
2459void tb_tunnel_reclaim_available_bandwidth(struct tb_tunnel *tunnel,
2460					   int *available_up,
2461					   int *available_down)
2462{
2463	if (!tb_tunnel_is_active(tunnel))
2464		return;
2465
2466	if (tunnel->reclaim_available_bandwidth)
2467		tunnel->reclaim_available_bandwidth(tunnel, available_up,
2468						    available_down);
2469}
2470
2471const char *tb_tunnel_type_name(const struct tb_tunnel *tunnel)
2472{
2473	return tb_tunnel_names[tunnel->type];
2474}
v6.9.4
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 * Thunderbolt driver - Tunneling support
   4 *
   5 * Copyright (c) 2014 Andreas Noever <andreas.noever@gmail.com>
   6 * Copyright (C) 2019, Intel Corporation
   7 */
   8
   9#include <linux/delay.h>
  10#include <linux/slab.h>
  11#include <linux/list.h>
  12#include <linux/ktime.h>
  13#include <linux/string_helpers.h>
  14
  15#include "tunnel.h"
  16#include "tb.h"
  17
  18/* PCIe adapters use always HopID of 8 for both directions */
  19#define TB_PCI_HOPID			8
  20
  21#define TB_PCI_PATH_DOWN		0
  22#define TB_PCI_PATH_UP			1
  23
  24#define TB_PCI_PRIORITY			3
  25#define TB_PCI_WEIGHT			1
  26
  27/* USB3 adapters use always HopID of 8 for both directions */
  28#define TB_USB3_HOPID			8
  29
  30#define TB_USB3_PATH_DOWN		0
  31#define TB_USB3_PATH_UP			1
  32
  33#define TB_USB3_PRIORITY		3
  34#define TB_USB3_WEIGHT			2
  35
  36/* DP adapters use HopID 8 for AUX and 9 for Video */
  37#define TB_DP_AUX_TX_HOPID		8
  38#define TB_DP_AUX_RX_HOPID		8
  39#define TB_DP_VIDEO_HOPID		9
  40
  41#define TB_DP_VIDEO_PATH_OUT		0
  42#define TB_DP_AUX_PATH_OUT		1
  43#define TB_DP_AUX_PATH_IN		2
  44
  45#define TB_DP_VIDEO_PRIORITY		1
  46#define TB_DP_VIDEO_WEIGHT		1
  47
  48#define TB_DP_AUX_PRIORITY		2
  49#define TB_DP_AUX_WEIGHT		1
  50
  51/* Minimum number of credits needed for PCIe path */
  52#define TB_MIN_PCIE_CREDITS		6U
  53/*
  54 * Number of credits we try to allocate for each DMA path if not limited
  55 * by the host router baMaxHI.
  56 */
  57#define TB_DMA_CREDITS			14
  58/* Minimum number of credits for DMA path */
  59#define TB_MIN_DMA_CREDITS		1
  60
  61#define TB_DMA_PRIORITY			5
  62#define TB_DMA_WEIGHT			1
  63
  64/*
  65 * Reserve additional bandwidth for USB 3.x and PCIe bulk traffic
  66 * according to USB4 v2 Connection Manager guide. This ends up reserving
  67 * 1500 Mb/s for PCIe and 3000 Mb/s for USB 3.x taking weights into
  68 * account.
  69 */
  70#define USB4_V2_PCI_MIN_BANDWIDTH	(1500 * TB_PCI_WEIGHT)
  71#define USB4_V2_USB3_MIN_BANDWIDTH	(1500 * TB_USB3_WEIGHT)
  72
  73static unsigned int dma_credits = TB_DMA_CREDITS;
  74module_param(dma_credits, uint, 0444);
  75MODULE_PARM_DESC(dma_credits, "specify custom credits for DMA tunnels (default: "
  76                __MODULE_STRING(TB_DMA_CREDITS) ")");
  77
  78static bool bw_alloc_mode = true;
  79module_param(bw_alloc_mode, bool, 0444);
  80MODULE_PARM_DESC(bw_alloc_mode,
  81		 "enable bandwidth allocation mode if supported (default: true)");
  82
  83static const char * const tb_tunnel_names[] = { "PCI", "DP", "DMA", "USB3" };
  84
  85static inline unsigned int tb_usable_credits(const struct tb_port *port)
  86{
  87	return port->total_credits - port->ctl_credits;
  88}
  89
  90/**
  91 * tb_available_credits() - Available credits for PCIe and DMA
  92 * @port: Lane adapter to check
  93 * @max_dp_streams: If non-%NULL stores maximum number of simultaneous DP
  94 *		    streams possible through this lane adapter
  95 */
  96static unsigned int tb_available_credits(const struct tb_port *port,
  97					 size_t *max_dp_streams)
  98{
  99	const struct tb_switch *sw = port->sw;
 100	int credits, usb3, pcie, spare;
 101	size_t ndp;
 102
 103	usb3 = tb_acpi_may_tunnel_usb3() ? sw->max_usb3_credits : 0;
 104	pcie = tb_acpi_may_tunnel_pcie() ? sw->max_pcie_credits : 0;
 105
 106	if (tb_acpi_is_xdomain_allowed()) {
 107		spare = min_not_zero(sw->max_dma_credits, dma_credits);
 108		/* Add some credits for potential second DMA tunnel */
 109		spare += TB_MIN_DMA_CREDITS;
 110	} else {
 111		spare = 0;
 112	}
 113
 114	credits = tb_usable_credits(port);
 115	if (tb_acpi_may_tunnel_dp()) {
 116		/*
 117		 * Maximum number of DP streams possible through the
 118		 * lane adapter.
 119		 */
 120		if (sw->min_dp_aux_credits + sw->min_dp_main_credits)
 121			ndp = (credits - (usb3 + pcie + spare)) /
 122			      (sw->min_dp_aux_credits + sw->min_dp_main_credits);
 123		else
 124			ndp = 0;
 125	} else {
 126		ndp = 0;
 127	}
 128	credits -= ndp * (sw->min_dp_aux_credits + sw->min_dp_main_credits);
 129	credits -= usb3;
 130
 131	if (max_dp_streams)
 132		*max_dp_streams = ndp;
 133
 134	return credits > 0 ? credits : 0;
 135}
 136
 137static void tb_init_pm_support(struct tb_path_hop *hop)
 138{
 139	struct tb_port *out_port = hop->out_port;
 140	struct tb_port *in_port = hop->in_port;
 141
 142	if (tb_port_is_null(in_port) && tb_port_is_null(out_port) &&
 143	    usb4_switch_version(in_port->sw) >= 2)
 144		hop->pm_support = true;
 145}
 146
 147static struct tb_tunnel *tb_tunnel_alloc(struct tb *tb, size_t npaths,
 148					 enum tb_tunnel_type type)
 149{
 150	struct tb_tunnel *tunnel;
 151
 152	tunnel = kzalloc(sizeof(*tunnel), GFP_KERNEL);
 153	if (!tunnel)
 154		return NULL;
 155
 156	tunnel->paths = kcalloc(npaths, sizeof(tunnel->paths[0]), GFP_KERNEL);
 157	if (!tunnel->paths) {
 158		tb_tunnel_free(tunnel);
 159		return NULL;
 160	}
 161
 162	INIT_LIST_HEAD(&tunnel->list);
 163	tunnel->tb = tb;
 164	tunnel->npaths = npaths;
 165	tunnel->type = type;
 166
 167	return tunnel;
 168}
 169
 170static int tb_pci_set_ext_encapsulation(struct tb_tunnel *tunnel, bool enable)
 171{
 172	struct tb_port *port = tb_upstream_port(tunnel->dst_port->sw);
 173	int ret;
 174
 175	/* Only supported of both routers are at least USB4 v2 */
 176	if ((usb4_switch_version(tunnel->src_port->sw) < 2) ||
 177	   (usb4_switch_version(tunnel->dst_port->sw) < 2))
 178		return 0;
 179
 180	if (enable && tb_port_get_link_generation(port) < 4)
 181		return 0;
 182
 183	ret = usb4_pci_port_set_ext_encapsulation(tunnel->src_port, enable);
 184	if (ret)
 185		return ret;
 186
 187	/*
 188	 * Downstream router could be unplugged so disable of encapsulation
 189	 * in upstream router is still possible.
 190	 */
 191	ret = usb4_pci_port_set_ext_encapsulation(tunnel->dst_port, enable);
 192	if (ret) {
 193		if (enable)
 194			return ret;
 195		if (ret != -ENODEV)
 196			return ret;
 197	}
 198
 199	tb_tunnel_dbg(tunnel, "extended encapsulation %s\n",
 200		      str_enabled_disabled(enable));
 201	return 0;
 202}
 203
 204static int tb_pci_activate(struct tb_tunnel *tunnel, bool activate)
 205{
 206	int res;
 207
 208	if (activate) {
 209		res = tb_pci_set_ext_encapsulation(tunnel, activate);
 210		if (res)
 211			return res;
 212	}
 213
 214	if (activate)
 215		res = tb_pci_port_enable(tunnel->dst_port, activate);
 216	else
 217		res = tb_pci_port_enable(tunnel->src_port, activate);
 218	if (res)
 219		return res;
 220
 221
 222	if (activate) {
 223		res = tb_pci_port_enable(tunnel->src_port, activate);
 224		if (res)
 225			return res;
 226	} else {
 227		/* Downstream router could be unplugged */
 228		tb_pci_port_enable(tunnel->dst_port, activate);
 229	}
 230
 231	return activate ? 0 : tb_pci_set_ext_encapsulation(tunnel, activate);
 232}
 233
 234static int tb_pci_init_credits(struct tb_path_hop *hop)
 235{
 236	struct tb_port *port = hop->in_port;
 237	struct tb_switch *sw = port->sw;
 238	unsigned int credits;
 239
 240	if (tb_port_use_credit_allocation(port)) {
 241		unsigned int available;
 242
 243		available = tb_available_credits(port, NULL);
 244		credits = min(sw->max_pcie_credits, available);
 245
 246		if (credits < TB_MIN_PCIE_CREDITS)
 247			return -ENOSPC;
 248
 249		credits = max(TB_MIN_PCIE_CREDITS, credits);
 250	} else {
 251		if (tb_port_is_null(port))
 252			credits = port->bonded ? 32 : 16;
 253		else
 254			credits = 7;
 255	}
 256
 257	hop->initial_credits = credits;
 258	return 0;
 259}
 260
 261static int tb_pci_init_path(struct tb_path *path)
 262{
 263	struct tb_path_hop *hop;
 264
 265	path->egress_fc_enable = TB_PATH_SOURCE | TB_PATH_INTERNAL;
 266	path->egress_shared_buffer = TB_PATH_NONE;
 267	path->ingress_fc_enable = TB_PATH_ALL;
 268	path->ingress_shared_buffer = TB_PATH_NONE;
 269	path->priority = TB_PCI_PRIORITY;
 270	path->weight = TB_PCI_WEIGHT;
 271	path->drop_packages = 0;
 272
 273	tb_path_for_each_hop(path, hop) {
 274		int ret;
 275
 276		ret = tb_pci_init_credits(hop);
 277		if (ret)
 278			return ret;
 279	}
 280
 281	return 0;
 282}
 283
 284/**
 285 * tb_tunnel_discover_pci() - Discover existing PCIe tunnels
 286 * @tb: Pointer to the domain structure
 287 * @down: PCIe downstream adapter
 288 * @alloc_hopid: Allocate HopIDs from visited ports
 289 *
 290 * If @down adapter is active, follows the tunnel to the PCIe upstream
 291 * adapter and back. Returns the discovered tunnel or %NULL if there was
 292 * no tunnel.
 293 */
 294struct tb_tunnel *tb_tunnel_discover_pci(struct tb *tb, struct tb_port *down,
 295					 bool alloc_hopid)
 296{
 297	struct tb_tunnel *tunnel;
 298	struct tb_path *path;
 299
 300	if (!tb_pci_port_is_enabled(down))
 301		return NULL;
 302
 303	tunnel = tb_tunnel_alloc(tb, 2, TB_TUNNEL_PCI);
 304	if (!tunnel)
 305		return NULL;
 306
 307	tunnel->activate = tb_pci_activate;
 308	tunnel->src_port = down;
 309
 310	/*
 311	 * Discover both paths even if they are not complete. We will
 312	 * clean them up by calling tb_tunnel_deactivate() below in that
 313	 * case.
 314	 */
 315	path = tb_path_discover(down, TB_PCI_HOPID, NULL, -1,
 316				&tunnel->dst_port, "PCIe Up", alloc_hopid);
 317	if (!path) {
 318		/* Just disable the downstream port */
 319		tb_pci_port_enable(down, false);
 320		goto err_free;
 321	}
 322	tunnel->paths[TB_PCI_PATH_UP] = path;
 323	if (tb_pci_init_path(tunnel->paths[TB_PCI_PATH_UP]))
 324		goto err_free;
 325
 326	path = tb_path_discover(tunnel->dst_port, -1, down, TB_PCI_HOPID, NULL,
 327				"PCIe Down", alloc_hopid);
 328	if (!path)
 329		goto err_deactivate;
 330	tunnel->paths[TB_PCI_PATH_DOWN] = path;
 331	if (tb_pci_init_path(tunnel->paths[TB_PCI_PATH_DOWN]))
 332		goto err_deactivate;
 333
 334	/* Validate that the tunnel is complete */
 335	if (!tb_port_is_pcie_up(tunnel->dst_port)) {
 336		tb_port_warn(tunnel->dst_port,
 337			     "path does not end on a PCIe adapter, cleaning up\n");
 338		goto err_deactivate;
 339	}
 340
 341	if (down != tunnel->src_port) {
 342		tb_tunnel_warn(tunnel, "path is not complete, cleaning up\n");
 343		goto err_deactivate;
 344	}
 345
 346	if (!tb_pci_port_is_enabled(tunnel->dst_port)) {
 347		tb_tunnel_warn(tunnel,
 348			       "tunnel is not fully activated, cleaning up\n");
 349		goto err_deactivate;
 350	}
 351
 352	tb_tunnel_dbg(tunnel, "discovered\n");
 353	return tunnel;
 354
 355err_deactivate:
 356	tb_tunnel_deactivate(tunnel);
 357err_free:
 358	tb_tunnel_free(tunnel);
 359
 360	return NULL;
 361}
 362
 363/**
 364 * tb_tunnel_alloc_pci() - allocate a pci tunnel
 365 * @tb: Pointer to the domain structure
 366 * @up: PCIe upstream adapter port
 367 * @down: PCIe downstream adapter port
 368 *
 369 * Allocate a PCI tunnel. The ports must be of type TB_TYPE_PCIE_UP and
 370 * TB_TYPE_PCIE_DOWN.
 371 *
 372 * Return: Returns a tb_tunnel on success or NULL on failure.
 373 */
 374struct tb_tunnel *tb_tunnel_alloc_pci(struct tb *tb, struct tb_port *up,
 375				      struct tb_port *down)
 376{
 377	struct tb_tunnel *tunnel;
 378	struct tb_path *path;
 379
 380	tunnel = tb_tunnel_alloc(tb, 2, TB_TUNNEL_PCI);
 381	if (!tunnel)
 382		return NULL;
 383
 384	tunnel->activate = tb_pci_activate;
 385	tunnel->src_port = down;
 386	tunnel->dst_port = up;
 387
 388	path = tb_path_alloc(tb, down, TB_PCI_HOPID, up, TB_PCI_HOPID, 0,
 389			     "PCIe Down");
 390	if (!path)
 391		goto err_free;
 392	tunnel->paths[TB_PCI_PATH_DOWN] = path;
 393	if (tb_pci_init_path(path))
 394		goto err_free;
 395
 396	path = tb_path_alloc(tb, up, TB_PCI_HOPID, down, TB_PCI_HOPID, 0,
 397			     "PCIe Up");
 398	if (!path)
 399		goto err_free;
 400	tunnel->paths[TB_PCI_PATH_UP] = path;
 401	if (tb_pci_init_path(path))
 402		goto err_free;
 403
 404	return tunnel;
 405
 406err_free:
 407	tb_tunnel_free(tunnel);
 408	return NULL;
 409}
 410
 411/**
 412 * tb_tunnel_reserved_pci() - Amount of bandwidth to reserve for PCIe
 413 * @port: Lane 0 adapter
 414 * @reserved_up: Upstream bandwidth in Mb/s to reserve
 415 * @reserved_down: Downstream bandwidth in Mb/s to reserve
 416 *
 417 * Can be called to any connected lane 0 adapter to find out how much
 418 * bandwidth needs to be left in reserve for possible PCIe bulk traffic.
 419 * Returns true if there is something to be reserved and writes the
 420 * amount to @reserved_down/@reserved_up. Otherwise returns false and
 421 * does not touch the parameters.
 422 */
 423bool tb_tunnel_reserved_pci(struct tb_port *port, int *reserved_up,
 424			    int *reserved_down)
 425{
 426	if (WARN_ON_ONCE(!port->remote))
 427		return false;
 428
 429	if (!tb_acpi_may_tunnel_pcie())
 430		return false;
 431
 432	if (tb_port_get_link_generation(port) < 4)
 433		return false;
 434
 435	/* Must have PCIe adapters */
 436	if (tb_is_upstream_port(port)) {
 437		if (!tb_switch_find_port(port->sw, TB_TYPE_PCIE_UP))
 438			return false;
 439		if (!tb_switch_find_port(port->remote->sw, TB_TYPE_PCIE_DOWN))
 440			return false;
 441	} else {
 442		if (!tb_switch_find_port(port->sw, TB_TYPE_PCIE_DOWN))
 443			return false;
 444		if (!tb_switch_find_port(port->remote->sw, TB_TYPE_PCIE_UP))
 445			return false;
 446	}
 447
 448	*reserved_up = USB4_V2_PCI_MIN_BANDWIDTH;
 449	*reserved_down = USB4_V2_PCI_MIN_BANDWIDTH;
 450
 451	tb_port_dbg(port, "reserving %u/%u Mb/s for PCIe\n", *reserved_up,
 452		    *reserved_down);
 453	return true;
 454}
 455
 456static bool tb_dp_is_usb4(const struct tb_switch *sw)
 457{
 458	/* Titan Ridge DP adapters need the same treatment as USB4 */
 459	return tb_switch_is_usb4(sw) || tb_switch_is_titan_ridge(sw);
 460}
 461
 462static int tb_dp_cm_handshake(struct tb_port *in, struct tb_port *out,
 463			      int timeout_msec)
 464{
 465	ktime_t timeout = ktime_add_ms(ktime_get(), timeout_msec);
 466	u32 val;
 467	int ret;
 468
 469	/* Both ends need to support this */
 470	if (!tb_dp_is_usb4(in->sw) || !tb_dp_is_usb4(out->sw))
 471		return 0;
 472
 473	ret = tb_port_read(out, &val, TB_CFG_PORT,
 474			   out->cap_adap + DP_STATUS_CTRL, 1);
 475	if (ret)
 476		return ret;
 477
 478	val |= DP_STATUS_CTRL_UF | DP_STATUS_CTRL_CMHS;
 479
 480	ret = tb_port_write(out, &val, TB_CFG_PORT,
 481			    out->cap_adap + DP_STATUS_CTRL, 1);
 482	if (ret)
 483		return ret;
 484
 485	do {
 486		ret = tb_port_read(out, &val, TB_CFG_PORT,
 487				   out->cap_adap + DP_STATUS_CTRL, 1);
 488		if (ret)
 489			return ret;
 490		if (!(val & DP_STATUS_CTRL_CMHS))
 491			return 0;
 492		usleep_range(100, 150);
 493	} while (ktime_before(ktime_get(), timeout));
 494
 495	return -ETIMEDOUT;
 496}
 497
 498/*
 499 * Returns maximum possible rate from capability supporting only DP 2.0
 500 * and below. Used when DP BW allocation mode is not enabled.
 501 */
 502static inline u32 tb_dp_cap_get_rate(u32 val)
 503{
 504	u32 rate = (val & DP_COMMON_CAP_RATE_MASK) >> DP_COMMON_CAP_RATE_SHIFT;
 505
 506	switch (rate) {
 507	case DP_COMMON_CAP_RATE_RBR:
 508		return 1620;
 509	case DP_COMMON_CAP_RATE_HBR:
 510		return 2700;
 511	case DP_COMMON_CAP_RATE_HBR2:
 512		return 5400;
 513	case DP_COMMON_CAP_RATE_HBR3:
 514		return 8100;
 515	default:
 516		return 0;
 517	}
 518}
 519
 520/*
 521 * Returns maximum possible rate from capability supporting DP 2.1
 522 * UHBR20, 13.5 and 10 rates as well. Use only when DP BW allocation
 523 * mode is enabled.
 524 */
 525static inline u32 tb_dp_cap_get_rate_ext(u32 val)
 526{
 527	if (val & DP_COMMON_CAP_UHBR20)
 528		return 20000;
 529	else if (val & DP_COMMON_CAP_UHBR13_5)
 530		return 13500;
 531	else if (val & DP_COMMON_CAP_UHBR10)
 532		return 10000;
 533
 534	return tb_dp_cap_get_rate(val);
 535}
 536
 537static inline bool tb_dp_is_uhbr_rate(unsigned int rate)
 538{
 539	return rate >= 10000;
 540}
 541
 542static inline u32 tb_dp_cap_set_rate(u32 val, u32 rate)
 543{
 544	val &= ~DP_COMMON_CAP_RATE_MASK;
 545	switch (rate) {
 546	default:
 547		WARN(1, "invalid rate %u passed, defaulting to 1620 MB/s\n", rate);
 548		fallthrough;
 549	case 1620:
 550		val |= DP_COMMON_CAP_RATE_RBR << DP_COMMON_CAP_RATE_SHIFT;
 551		break;
 552	case 2700:
 553		val |= DP_COMMON_CAP_RATE_HBR << DP_COMMON_CAP_RATE_SHIFT;
 554		break;
 555	case 5400:
 556		val |= DP_COMMON_CAP_RATE_HBR2 << DP_COMMON_CAP_RATE_SHIFT;
 557		break;
 558	case 8100:
 559		val |= DP_COMMON_CAP_RATE_HBR3 << DP_COMMON_CAP_RATE_SHIFT;
 560		break;
 561	}
 562	return val;
 563}
 564
 565static inline u32 tb_dp_cap_get_lanes(u32 val)
 566{
 567	u32 lanes = (val & DP_COMMON_CAP_LANES_MASK) >> DP_COMMON_CAP_LANES_SHIFT;
 568
 569	switch (lanes) {
 570	case DP_COMMON_CAP_1_LANE:
 571		return 1;
 572	case DP_COMMON_CAP_2_LANES:
 573		return 2;
 574	case DP_COMMON_CAP_4_LANES:
 575		return 4;
 576	default:
 577		return 0;
 578	}
 579}
 580
 581static inline u32 tb_dp_cap_set_lanes(u32 val, u32 lanes)
 582{
 583	val &= ~DP_COMMON_CAP_LANES_MASK;
 584	switch (lanes) {
 585	default:
 586		WARN(1, "invalid number of lanes %u passed, defaulting to 1\n",
 587		     lanes);
 588		fallthrough;
 589	case 1:
 590		val |= DP_COMMON_CAP_1_LANE << DP_COMMON_CAP_LANES_SHIFT;
 591		break;
 592	case 2:
 593		val |= DP_COMMON_CAP_2_LANES << DP_COMMON_CAP_LANES_SHIFT;
 594		break;
 595	case 4:
 596		val |= DP_COMMON_CAP_4_LANES << DP_COMMON_CAP_LANES_SHIFT;
 597		break;
 598	}
 599	return val;
 600}
 601
 602static unsigned int tb_dp_bandwidth(unsigned int rate, unsigned int lanes)
 603{
 604	/* Tunneling removes the DP 8b/10b 128/132b encoding */
 605	if (tb_dp_is_uhbr_rate(rate))
 606		return rate * lanes * 128 / 132;
 607	return rate * lanes * 8 / 10;
 608}
 609
 610static int tb_dp_reduce_bandwidth(int max_bw, u32 in_rate, u32 in_lanes,
 611				  u32 out_rate, u32 out_lanes, u32 *new_rate,
 612				  u32 *new_lanes)
 613{
 614	static const u32 dp_bw[][2] = {
 615		/* Mb/s, lanes */
 616		{ 8100, 4 }, /* 25920 Mb/s */
 617		{ 5400, 4 }, /* 17280 Mb/s */
 618		{ 8100, 2 }, /* 12960 Mb/s */
 619		{ 2700, 4 }, /* 8640 Mb/s */
 620		{ 5400, 2 }, /* 8640 Mb/s */
 621		{ 8100, 1 }, /* 6480 Mb/s */
 622		{ 1620, 4 }, /* 5184 Mb/s */
 623		{ 5400, 1 }, /* 4320 Mb/s */
 624		{ 2700, 2 }, /* 4320 Mb/s */
 625		{ 1620, 2 }, /* 2592 Mb/s */
 626		{ 2700, 1 }, /* 2160 Mb/s */
 627		{ 1620, 1 }, /* 1296 Mb/s */
 628	};
 629	unsigned int i;
 630
 631	/*
 632	 * Find a combination that can fit into max_bw and does not
 633	 * exceed the maximum rate and lanes supported by the DP OUT and
 634	 * DP IN adapters.
 635	 */
 636	for (i = 0; i < ARRAY_SIZE(dp_bw); i++) {
 637		if (dp_bw[i][0] > out_rate || dp_bw[i][1] > out_lanes)
 638			continue;
 639
 640		if (dp_bw[i][0] > in_rate || dp_bw[i][1] > in_lanes)
 641			continue;
 642
 643		if (tb_dp_bandwidth(dp_bw[i][0], dp_bw[i][1]) <= max_bw) {
 644			*new_rate = dp_bw[i][0];
 645			*new_lanes = dp_bw[i][1];
 646			return 0;
 647		}
 648	}
 649
 650	return -ENOSR;
 651}
 652
 653static int tb_dp_xchg_caps(struct tb_tunnel *tunnel)
 654{
 655	u32 out_dp_cap, out_rate, out_lanes, in_dp_cap, in_rate, in_lanes, bw;
 656	struct tb_port *out = tunnel->dst_port;
 657	struct tb_port *in = tunnel->src_port;
 658	int ret, max_bw;
 659
 660	/*
 661	 * Copy DP_LOCAL_CAP register to DP_REMOTE_CAP register for
 662	 * newer generation hardware.
 663	 */
 664	if (in->sw->generation < 2 || out->sw->generation < 2)
 665		return 0;
 666
 667	/*
 668	 * Perform connection manager handshake between IN and OUT ports
 669	 * before capabilities exchange can take place.
 670	 */
 671	ret = tb_dp_cm_handshake(in, out, 3000);
 672	if (ret)
 673		return ret;
 674
 675	/* Read both DP_LOCAL_CAP registers */
 676	ret = tb_port_read(in, &in_dp_cap, TB_CFG_PORT,
 677			   in->cap_adap + DP_LOCAL_CAP, 1);
 678	if (ret)
 679		return ret;
 680
 681	ret = tb_port_read(out, &out_dp_cap, TB_CFG_PORT,
 682			   out->cap_adap + DP_LOCAL_CAP, 1);
 683	if (ret)
 684		return ret;
 685
 686	/* Write IN local caps to OUT remote caps */
 687	ret = tb_port_write(out, &in_dp_cap, TB_CFG_PORT,
 688			    out->cap_adap + DP_REMOTE_CAP, 1);
 689	if (ret)
 690		return ret;
 691
 692	in_rate = tb_dp_cap_get_rate(in_dp_cap);
 693	in_lanes = tb_dp_cap_get_lanes(in_dp_cap);
 694	tb_tunnel_dbg(tunnel,
 695		      "DP IN maximum supported bandwidth %u Mb/s x%u = %u Mb/s\n",
 696		      in_rate, in_lanes, tb_dp_bandwidth(in_rate, in_lanes));
 697
 698	/*
 699	 * If the tunnel bandwidth is limited (max_bw is set) then see
 700	 * if we need to reduce bandwidth to fit there.
 701	 */
 702	out_rate = tb_dp_cap_get_rate(out_dp_cap);
 703	out_lanes = tb_dp_cap_get_lanes(out_dp_cap);
 704	bw = tb_dp_bandwidth(out_rate, out_lanes);
 705	tb_tunnel_dbg(tunnel,
 706		      "DP OUT maximum supported bandwidth %u Mb/s x%u = %u Mb/s\n",
 707		      out_rate, out_lanes, bw);
 708
 709	if (tb_tunnel_direction_downstream(tunnel))
 710		max_bw = tunnel->max_down;
 711	else
 712		max_bw = tunnel->max_up;
 713
 714	if (max_bw && bw > max_bw) {
 715		u32 new_rate, new_lanes, new_bw;
 716
 717		ret = tb_dp_reduce_bandwidth(max_bw, in_rate, in_lanes,
 718					     out_rate, out_lanes, &new_rate,
 719					     &new_lanes);
 720		if (ret) {
 721			tb_tunnel_info(tunnel, "not enough bandwidth\n");
 722			return ret;
 723		}
 724
 725		new_bw = tb_dp_bandwidth(new_rate, new_lanes);
 726		tb_tunnel_dbg(tunnel,
 727			      "bandwidth reduced to %u Mb/s x%u = %u Mb/s\n",
 728			      new_rate, new_lanes, new_bw);
 729
 730		/*
 731		 * Set new rate and number of lanes before writing it to
 732		 * the IN port remote caps.
 733		 */
 734		out_dp_cap = tb_dp_cap_set_rate(out_dp_cap, new_rate);
 735		out_dp_cap = tb_dp_cap_set_lanes(out_dp_cap, new_lanes);
 736	}
 737
 738	/*
 739	 * Titan Ridge does not disable AUX timers when it gets
 740	 * SET_CONFIG with SET_LTTPR_MODE set. This causes problems with
 741	 * DP tunneling.
 742	 */
 743	if (tb_route(out->sw) && tb_switch_is_titan_ridge(out->sw)) {
 744		out_dp_cap |= DP_COMMON_CAP_LTTPR_NS;
 745		tb_tunnel_dbg(tunnel, "disabling LTTPR\n");
 746	}
 747
 748	return tb_port_write(in, &out_dp_cap, TB_CFG_PORT,
 749			     in->cap_adap + DP_REMOTE_CAP, 1);
 750}
 751
 752static int tb_dp_bandwidth_alloc_mode_enable(struct tb_tunnel *tunnel)
 753{
 754	int ret, estimated_bw, granularity, tmp;
 755	struct tb_port *out = tunnel->dst_port;
 756	struct tb_port *in = tunnel->src_port;
 757	u32 out_dp_cap, out_rate, out_lanes;
 758	u32 in_dp_cap, in_rate, in_lanes;
 759	u32 rate, lanes;
 760
 761	if (!bw_alloc_mode)
 762		return 0;
 763
 764	ret = usb4_dp_port_set_cm_bandwidth_mode_supported(in, true);
 765	if (ret)
 766		return ret;
 767
 768	ret = usb4_dp_port_set_group_id(in, in->group->index);
 769	if (ret)
 770		return ret;
 771
 772	/*
 773	 * Get the non-reduced rate and lanes based on the lowest
 774	 * capability of both adapters.
 775	 */
 776	ret = tb_port_read(in, &in_dp_cap, TB_CFG_PORT,
 777			   in->cap_adap + DP_LOCAL_CAP, 1);
 778	if (ret)
 779		return ret;
 780
 781	ret = tb_port_read(out, &out_dp_cap, TB_CFG_PORT,
 782			   out->cap_adap + DP_LOCAL_CAP, 1);
 783	if (ret)
 784		return ret;
 785
 786	in_rate = tb_dp_cap_get_rate(in_dp_cap);
 787	in_lanes = tb_dp_cap_get_lanes(in_dp_cap);
 788	out_rate = tb_dp_cap_get_rate(out_dp_cap);
 789	out_lanes = tb_dp_cap_get_lanes(out_dp_cap);
 790
 791	rate = min(in_rate, out_rate);
 792	lanes = min(in_lanes, out_lanes);
 793	tmp = tb_dp_bandwidth(rate, lanes);
 794
 795	tb_tunnel_dbg(tunnel, "non-reduced bandwidth %u Mb/s x%u = %u Mb/s\n",
 796		      rate, lanes, tmp);
 797
 798	ret = usb4_dp_port_set_nrd(in, rate, lanes);
 799	if (ret)
 800		return ret;
 801
 802	/*
 803	 * Pick up granularity that supports maximum possible bandwidth.
 804	 * For that we use the UHBR rates too.
 805	 */
 806	in_rate = tb_dp_cap_get_rate_ext(in_dp_cap);
 807	out_rate = tb_dp_cap_get_rate_ext(out_dp_cap);
 808	rate = min(in_rate, out_rate);
 809	tmp = tb_dp_bandwidth(rate, lanes);
 810
 811	tb_tunnel_dbg(tunnel,
 812		      "maximum bandwidth through allocation mode %u Mb/s x%u = %u Mb/s\n",
 813		      rate, lanes, tmp);
 814
 815	for (granularity = 250; tmp / granularity > 255 && granularity <= 1000;
 816	     granularity *= 2)
 817		;
 818
 819	tb_tunnel_dbg(tunnel, "granularity %d Mb/s\n", granularity);
 820
 821	/*
 822	 * Returns -EINVAL if granularity above is outside of the
 823	 * accepted ranges.
 824	 */
 825	ret = usb4_dp_port_set_granularity(in, granularity);
 826	if (ret)
 827		return ret;
 828
 829	/*
 830	 * Bandwidth estimation is pretty much what we have in
 831	 * max_up/down fields. For discovery we just read what the
 832	 * estimation was set to.
 833	 */
 834	if (tb_tunnel_direction_downstream(tunnel))
 835		estimated_bw = tunnel->max_down;
 836	else
 837		estimated_bw = tunnel->max_up;
 838
 839	tb_tunnel_dbg(tunnel, "estimated bandwidth %d Mb/s\n", estimated_bw);
 840
 841	ret = usb4_dp_port_set_estimated_bandwidth(in, estimated_bw);
 842	if (ret)
 843		return ret;
 844
 845	/* Initial allocation should be 0 according the spec */
 846	ret = usb4_dp_port_allocate_bandwidth(in, 0);
 847	if (ret)
 848		return ret;
 849
 850	tb_tunnel_dbg(tunnel, "bandwidth allocation mode enabled\n");
 851	return 0;
 852}
 853
 854static int tb_dp_init(struct tb_tunnel *tunnel)
 855{
 856	struct tb_port *in = tunnel->src_port;
 857	struct tb_switch *sw = in->sw;
 858	struct tb *tb = in->sw->tb;
 859	int ret;
 860
 861	ret = tb_dp_xchg_caps(tunnel);
 862	if (ret)
 863		return ret;
 864
 865	if (!tb_switch_is_usb4(sw))
 866		return 0;
 867
 868	if (!usb4_dp_port_bandwidth_mode_supported(in))
 869		return 0;
 870
 871	tb_tunnel_dbg(tunnel, "bandwidth allocation mode supported\n");
 872
 873	ret = usb4_dp_port_set_cm_id(in, tb->index);
 874	if (ret)
 875		return ret;
 876
 877	return tb_dp_bandwidth_alloc_mode_enable(tunnel);
 878}
 879
 880static void tb_dp_deinit(struct tb_tunnel *tunnel)
 881{
 882	struct tb_port *in = tunnel->src_port;
 883
 884	if (!usb4_dp_port_bandwidth_mode_supported(in))
 885		return;
 886	if (usb4_dp_port_bandwidth_mode_enabled(in)) {
 887		usb4_dp_port_set_cm_bandwidth_mode_supported(in, false);
 888		tb_tunnel_dbg(tunnel, "bandwidth allocation mode disabled\n");
 889	}
 890}
 891
 892static int tb_dp_activate(struct tb_tunnel *tunnel, bool active)
 893{
 894	int ret;
 895
 896	if (active) {
 897		struct tb_path **paths;
 898		int last;
 899
 900		paths = tunnel->paths;
 901		last = paths[TB_DP_VIDEO_PATH_OUT]->path_length - 1;
 902
 903		tb_dp_port_set_hops(tunnel->src_port,
 904			paths[TB_DP_VIDEO_PATH_OUT]->hops[0].in_hop_index,
 905			paths[TB_DP_AUX_PATH_OUT]->hops[0].in_hop_index,
 906			paths[TB_DP_AUX_PATH_IN]->hops[last].next_hop_index);
 907
 908		tb_dp_port_set_hops(tunnel->dst_port,
 909			paths[TB_DP_VIDEO_PATH_OUT]->hops[last].next_hop_index,
 910			paths[TB_DP_AUX_PATH_IN]->hops[0].in_hop_index,
 911			paths[TB_DP_AUX_PATH_OUT]->hops[last].next_hop_index);
 912	} else {
 913		tb_dp_port_hpd_clear(tunnel->src_port);
 914		tb_dp_port_set_hops(tunnel->src_port, 0, 0, 0);
 915		if (tb_port_is_dpout(tunnel->dst_port))
 916			tb_dp_port_set_hops(tunnel->dst_port, 0, 0, 0);
 917	}
 918
 919	ret = tb_dp_port_enable(tunnel->src_port, active);
 920	if (ret)
 921		return ret;
 922
 923	if (tb_port_is_dpout(tunnel->dst_port))
 924		return tb_dp_port_enable(tunnel->dst_port, active);
 925
 926	return 0;
 927}
 928
 929/**
 930 * tb_dp_bandwidth_mode_maximum_bandwidth() - Maximum possible bandwidth
 931 * @tunnel: DP tunnel to check
 932 * @max_bw_rounded: Maximum bandwidth in Mb/s rounded up to the next granularity
 933 *
 934 * Returns maximum possible bandwidth for this tunnel in Mb/s.
 935 */
 936static int tb_dp_bandwidth_mode_maximum_bandwidth(struct tb_tunnel *tunnel,
 937						  int *max_bw_rounded)
 938{
 939	struct tb_port *in = tunnel->src_port;
 940	int ret, rate, lanes, max_bw;
 941	u32 cap;
 942
 943	/*
 944	 * DP IN adapter DP_LOCAL_CAP gets updated to the lowest AUX
 945	 * read parameter values so this so we can use this to determine
 946	 * the maximum possible bandwidth over this link.
 947	 *
 948	 * See USB4 v2 spec 1.0 10.4.4.5.
 949	 */
 950	ret = tb_port_read(in, &cap, TB_CFG_PORT,
 951			   in->cap_adap + DP_LOCAL_CAP, 1);
 952	if (ret)
 953		return ret;
 954
 955	rate = tb_dp_cap_get_rate_ext(cap);
 956	lanes = tb_dp_cap_get_lanes(cap);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 957
 958	max_bw = tb_dp_bandwidth(rate, lanes);
 959
 960	if (max_bw_rounded) {
 961		ret = usb4_dp_port_granularity(in);
 962		if (ret < 0)
 963			return ret;
 964		*max_bw_rounded = roundup(max_bw, ret);
 965	}
 966
 967	return max_bw;
 968}
 969
 970static int tb_dp_bandwidth_mode_consumed_bandwidth(struct tb_tunnel *tunnel,
 971						   int *consumed_up,
 972						   int *consumed_down)
 973{
 
 974	struct tb_port *in = tunnel->src_port;
 975	int ret, allocated_bw, max_bw_rounded;
 976
 977	if (!usb4_dp_port_bandwidth_mode_enabled(in))
 978		return -EOPNOTSUPP;
 979
 980	if (!tunnel->bw_mode)
 981		return -EOPNOTSUPP;
 982
 983	/* Read what was allocated previously if any */
 984	ret = usb4_dp_port_allocated_bandwidth(in);
 985	if (ret < 0)
 986		return ret;
 987	allocated_bw = ret;
 988
 989	ret = tb_dp_bandwidth_mode_maximum_bandwidth(tunnel, &max_bw_rounded);
 990	if (ret < 0)
 991		return ret;
 992	if (allocated_bw == max_bw_rounded)
 993		allocated_bw = ret;
 994
 995	if (tb_tunnel_direction_downstream(tunnel)) {
 996		*consumed_up = 0;
 997		*consumed_down = allocated_bw;
 998	} else {
 999		*consumed_up = allocated_bw;
1000		*consumed_down = 0;
1001	}
1002
1003	return 0;
1004}
1005
1006static int tb_dp_allocated_bandwidth(struct tb_tunnel *tunnel, int *allocated_up,
1007				     int *allocated_down)
1008{
 
1009	struct tb_port *in = tunnel->src_port;
1010
1011	/*
1012	 * If we have already set the allocated bandwidth then use that.
1013	 * Otherwise we read it from the DPRX.
1014	 */
1015	if (usb4_dp_port_bandwidth_mode_enabled(in) && tunnel->bw_mode) {
1016		int ret, allocated_bw, max_bw_rounded;
1017
1018		ret = usb4_dp_port_allocated_bandwidth(in);
1019		if (ret < 0)
1020			return ret;
1021		allocated_bw = ret;
1022
1023		ret = tb_dp_bandwidth_mode_maximum_bandwidth(tunnel,
1024							     &max_bw_rounded);
1025		if (ret < 0)
1026			return ret;
1027		if (allocated_bw == max_bw_rounded)
1028			allocated_bw = ret;
1029
1030		if (tb_tunnel_direction_downstream(tunnel)) {
1031			*allocated_up = 0;
1032			*allocated_down = allocated_bw;
1033		} else {
1034			*allocated_up = allocated_bw;
1035			*allocated_down = 0;
1036		}
1037		return 0;
1038	}
1039
1040	return tunnel->consumed_bandwidth(tunnel, allocated_up,
1041					  allocated_down);
1042}
1043
1044static int tb_dp_alloc_bandwidth(struct tb_tunnel *tunnel, int *alloc_up,
1045				 int *alloc_down)
1046{
 
1047	struct tb_port *in = tunnel->src_port;
1048	int max_bw_rounded, ret, tmp;
1049
1050	if (!usb4_dp_port_bandwidth_mode_enabled(in))
1051		return -EOPNOTSUPP;
1052
1053	ret = tb_dp_bandwidth_mode_maximum_bandwidth(tunnel, &max_bw_rounded);
1054	if (ret < 0)
1055		return ret;
1056
1057	if (tb_tunnel_direction_downstream(tunnel)) {
1058		tmp = min(*alloc_down, max_bw_rounded);
1059		ret = usb4_dp_port_allocate_bandwidth(in, tmp);
1060		if (ret)
1061			return ret;
1062		*alloc_down = tmp;
1063		*alloc_up = 0;
1064	} else {
1065		tmp = min(*alloc_up, max_bw_rounded);
1066		ret = usb4_dp_port_allocate_bandwidth(in, tmp);
1067		if (ret)
1068			return ret;
1069		*alloc_down = 0;
1070		*alloc_up = tmp;
1071	}
1072
1073	/* Now we can use BW mode registers to figure out the bandwidth */
1074	/* TODO: need to handle discovery too */
1075	tunnel->bw_mode = true;
1076	return 0;
1077}
1078
1079static int tb_dp_wait_dprx(struct tb_tunnel *tunnel, int timeout_msec)
1080{
1081	ktime_t timeout = ktime_add_ms(ktime_get(), timeout_msec);
1082	struct tb_port *in = tunnel->src_port;
1083
1084	/*
1085	 * Wait for DPRX done. Normally it should be already set for
1086	 * active tunnel.
1087	 */
1088	do {
1089		u32 val;
1090		int ret;
1091
1092		ret = tb_port_read(in, &val, TB_CFG_PORT,
1093				   in->cap_adap + DP_COMMON_CAP, 1);
1094		if (ret)
1095			return ret;
1096
1097		if (val & DP_COMMON_CAP_DPRX_DONE) {
1098			tb_tunnel_dbg(tunnel, "DPRX read done\n");
1099			return 0;
1100		}
1101		usleep_range(100, 150);
1102	} while (ktime_before(ktime_get(), timeout));
1103
1104	tb_tunnel_dbg(tunnel, "DPRX read timeout\n");
1105	return -ETIMEDOUT;
1106}
1107
1108/* Read cap from tunnel DP IN */
1109static int tb_dp_read_cap(struct tb_tunnel *tunnel, unsigned int cap, u32 *rate,
1110			  u32 *lanes)
1111{
1112	struct tb_port *in = tunnel->src_port;
1113	u32 val;
1114	int ret;
1115
1116	switch (cap) {
1117	case DP_LOCAL_CAP:
1118	case DP_REMOTE_CAP:
1119	case DP_COMMON_CAP:
1120		break;
1121
1122	default:
1123		tb_tunnel_WARN(tunnel, "invalid capability index %#x\n", cap);
1124		return -EINVAL;
1125	}
1126
1127	/*
1128	 * Read from the copied remote cap so that we take into account
1129	 * if capabilities were reduced during exchange.
1130	 */
1131	ret = tb_port_read(in, &val, TB_CFG_PORT, in->cap_adap + cap, 1);
1132	if (ret)
1133		return ret;
1134
1135	*rate = tb_dp_cap_get_rate(val);
1136	*lanes = tb_dp_cap_get_lanes(val);
1137	return 0;
1138}
1139
1140static int tb_dp_maximum_bandwidth(struct tb_tunnel *tunnel, int *max_up,
1141				   int *max_down)
1142{
 
1143	int ret;
1144
1145	if (!usb4_dp_port_bandwidth_mode_enabled(tunnel->src_port))
1146		return -EOPNOTSUPP;
1147
1148	ret = tb_dp_bandwidth_mode_maximum_bandwidth(tunnel, NULL);
1149	if (ret < 0)
1150		return ret;
1151
1152	if (tb_tunnel_direction_downstream(tunnel)) {
1153		*max_up = 0;
1154		*max_down = ret;
1155	} else {
1156		*max_up = ret;
1157		*max_down = 0;
1158	}
1159
1160	return 0;
1161}
1162
1163static int tb_dp_consumed_bandwidth(struct tb_tunnel *tunnel, int *consumed_up,
1164				    int *consumed_down)
1165{
1166	const struct tb_switch *sw = tunnel->src_port->sw;
 
1167	u32 rate = 0, lanes = 0;
1168	int ret;
1169
1170	if (tb_dp_is_usb4(sw)) {
1171		/*
1172		 * On USB4 routers check if the bandwidth allocation
1173		 * mode is enabled first and then read the bandwidth
1174		 * through those registers.
1175		 */
1176		ret = tb_dp_bandwidth_mode_consumed_bandwidth(tunnel, consumed_up,
1177							      consumed_down);
1178		if (ret < 0) {
1179			if (ret != -EOPNOTSUPP)
1180				return ret;
1181		} else if (!ret) {
1182			return 0;
1183		}
1184		/*
1185		 * Then see if the DPRX negotiation is ready and if yes
1186		 * return that bandwidth (it may be smaller than the
1187		 * reduced one). According to VESA spec, the DPRX
1188		 * negotiation shall compete in 5 seconds after tunnel
1189		 * established. We give it 100ms extra just in case.
1190		 */
1191		ret = tb_dp_wait_dprx(tunnel, 5100);
1192		if (ret)
1193			return ret;
 
 
 
 
 
1194		ret = tb_dp_read_cap(tunnel, DP_COMMON_CAP, &rate, &lanes);
1195		if (ret)
1196			return ret;
1197	} else if (sw->generation >= 2) {
1198		ret = tb_dp_read_cap(tunnel, DP_REMOTE_CAP, &rate, &lanes);
1199		if (ret)
1200			return ret;
1201	} else {
1202		/* No bandwidth management for legacy devices  */
1203		*consumed_up = 0;
1204		*consumed_down = 0;
1205		return 0;
1206	}
1207
1208	if (tb_tunnel_direction_downstream(tunnel)) {
1209		*consumed_up = 0;
1210		*consumed_down = tb_dp_bandwidth(rate, lanes);
1211	} else {
1212		*consumed_up = tb_dp_bandwidth(rate, lanes);
1213		*consumed_down = 0;
1214	}
1215
1216	return 0;
1217}
1218
1219static void tb_dp_init_aux_credits(struct tb_path_hop *hop)
1220{
1221	struct tb_port *port = hop->in_port;
1222	struct tb_switch *sw = port->sw;
1223
1224	if (tb_port_use_credit_allocation(port))
1225		hop->initial_credits = sw->min_dp_aux_credits;
1226	else
1227		hop->initial_credits = 1;
1228}
1229
1230static void tb_dp_init_aux_path(struct tb_path *path, bool pm_support)
1231{
1232	struct tb_path_hop *hop;
1233
1234	path->egress_fc_enable = TB_PATH_SOURCE | TB_PATH_INTERNAL;
1235	path->egress_shared_buffer = TB_PATH_NONE;
1236	path->ingress_fc_enable = TB_PATH_ALL;
1237	path->ingress_shared_buffer = TB_PATH_NONE;
1238	path->priority = TB_DP_AUX_PRIORITY;
1239	path->weight = TB_DP_AUX_WEIGHT;
1240
1241	tb_path_for_each_hop(path, hop) {
1242		tb_dp_init_aux_credits(hop);
1243		if (pm_support)
1244			tb_init_pm_support(hop);
1245	}
1246}
1247
1248static int tb_dp_init_video_credits(struct tb_path_hop *hop)
1249{
1250	struct tb_port *port = hop->in_port;
1251	struct tb_switch *sw = port->sw;
1252
1253	if (tb_port_use_credit_allocation(port)) {
1254		unsigned int nfc_credits;
1255		size_t max_dp_streams;
1256
1257		tb_available_credits(port, &max_dp_streams);
1258		/*
1259		 * Read the number of currently allocated NFC credits
1260		 * from the lane adapter. Since we only use them for DP
1261		 * tunneling we can use that to figure out how many DP
1262		 * tunnels already go through the lane adapter.
1263		 */
1264		nfc_credits = port->config.nfc_credits &
1265				ADP_CS_4_NFC_BUFFERS_MASK;
1266		if (nfc_credits / sw->min_dp_main_credits > max_dp_streams)
1267			return -ENOSPC;
1268
1269		hop->nfc_credits = sw->min_dp_main_credits;
1270	} else {
1271		hop->nfc_credits = min(port->total_credits - 2, 12U);
1272	}
1273
1274	return 0;
1275}
1276
1277static int tb_dp_init_video_path(struct tb_path *path, bool pm_support)
1278{
1279	struct tb_path_hop *hop;
1280
1281	path->egress_fc_enable = TB_PATH_NONE;
1282	path->egress_shared_buffer = TB_PATH_NONE;
1283	path->ingress_fc_enable = TB_PATH_NONE;
1284	path->ingress_shared_buffer = TB_PATH_NONE;
1285	path->priority = TB_DP_VIDEO_PRIORITY;
1286	path->weight = TB_DP_VIDEO_WEIGHT;
1287
1288	tb_path_for_each_hop(path, hop) {
1289		int ret;
1290
1291		ret = tb_dp_init_video_credits(hop);
1292		if (ret)
1293			return ret;
1294		if (pm_support)
1295			tb_init_pm_support(hop);
1296	}
1297
1298	return 0;
1299}
1300
1301static void tb_dp_dump(struct tb_tunnel *tunnel)
1302{
1303	struct tb_port *in, *out;
1304	u32 dp_cap, rate, lanes;
1305
1306	in = tunnel->src_port;
1307	out = tunnel->dst_port;
1308
1309	if (tb_port_read(in, &dp_cap, TB_CFG_PORT,
1310			 in->cap_adap + DP_LOCAL_CAP, 1))
1311		return;
1312
1313	rate = tb_dp_cap_get_rate(dp_cap);
1314	lanes = tb_dp_cap_get_lanes(dp_cap);
1315
1316	tb_tunnel_dbg(tunnel,
1317		      "DP IN maximum supported bandwidth %u Mb/s x%u = %u Mb/s\n",
1318		      rate, lanes, tb_dp_bandwidth(rate, lanes));
1319
1320	if (tb_port_read(out, &dp_cap, TB_CFG_PORT,
1321			 out->cap_adap + DP_LOCAL_CAP, 1))
1322		return;
1323
1324	rate = tb_dp_cap_get_rate(dp_cap);
1325	lanes = tb_dp_cap_get_lanes(dp_cap);
1326
1327	tb_tunnel_dbg(tunnel,
1328		      "DP OUT maximum supported bandwidth %u Mb/s x%u = %u Mb/s\n",
1329		      rate, lanes, tb_dp_bandwidth(rate, lanes));
1330
1331	if (tb_port_read(in, &dp_cap, TB_CFG_PORT,
1332			 in->cap_adap + DP_REMOTE_CAP, 1))
1333		return;
1334
1335	rate = tb_dp_cap_get_rate(dp_cap);
1336	lanes = tb_dp_cap_get_lanes(dp_cap);
1337
1338	tb_tunnel_dbg(tunnel, "reduced bandwidth %u Mb/s x%u = %u Mb/s\n",
1339		      rate, lanes, tb_dp_bandwidth(rate, lanes));
1340}
1341
1342/**
1343 * tb_tunnel_discover_dp() - Discover existing Display Port tunnels
1344 * @tb: Pointer to the domain structure
1345 * @in: DP in adapter
1346 * @alloc_hopid: Allocate HopIDs from visited ports
1347 *
1348 * If @in adapter is active, follows the tunnel to the DP out adapter
1349 * and back. Returns the discovered tunnel or %NULL if there was no
1350 * tunnel.
1351 *
1352 * Return: DP tunnel or %NULL if no tunnel found.
1353 */
1354struct tb_tunnel *tb_tunnel_discover_dp(struct tb *tb, struct tb_port *in,
1355					bool alloc_hopid)
1356{
1357	struct tb_tunnel *tunnel;
1358	struct tb_port *port;
1359	struct tb_path *path;
1360
1361	if (!tb_dp_port_is_enabled(in))
1362		return NULL;
1363
1364	tunnel = tb_tunnel_alloc(tb, 3, TB_TUNNEL_DP);
1365	if (!tunnel)
1366		return NULL;
1367
1368	tunnel->init = tb_dp_init;
1369	tunnel->deinit = tb_dp_deinit;
1370	tunnel->activate = tb_dp_activate;
1371	tunnel->maximum_bandwidth = tb_dp_maximum_bandwidth;
1372	tunnel->allocated_bandwidth = tb_dp_allocated_bandwidth;
1373	tunnel->alloc_bandwidth = tb_dp_alloc_bandwidth;
1374	tunnel->consumed_bandwidth = tb_dp_consumed_bandwidth;
1375	tunnel->src_port = in;
1376
1377	path = tb_path_discover(in, TB_DP_VIDEO_HOPID, NULL, -1,
1378				&tunnel->dst_port, "Video", alloc_hopid);
1379	if (!path) {
1380		/* Just disable the DP IN port */
1381		tb_dp_port_enable(in, false);
1382		goto err_free;
1383	}
1384	tunnel->paths[TB_DP_VIDEO_PATH_OUT] = path;
1385	if (tb_dp_init_video_path(tunnel->paths[TB_DP_VIDEO_PATH_OUT], false))
1386		goto err_free;
1387
1388	path = tb_path_discover(in, TB_DP_AUX_TX_HOPID, NULL, -1, NULL, "AUX TX",
1389				alloc_hopid);
1390	if (!path)
1391		goto err_deactivate;
1392	tunnel->paths[TB_DP_AUX_PATH_OUT] = path;
1393	tb_dp_init_aux_path(tunnel->paths[TB_DP_AUX_PATH_OUT], false);
1394
1395	path = tb_path_discover(tunnel->dst_port, -1, in, TB_DP_AUX_RX_HOPID,
1396				&port, "AUX RX", alloc_hopid);
1397	if (!path)
1398		goto err_deactivate;
1399	tunnel->paths[TB_DP_AUX_PATH_IN] = path;
1400	tb_dp_init_aux_path(tunnel->paths[TB_DP_AUX_PATH_IN], false);
1401
1402	/* Validate that the tunnel is complete */
1403	if (!tb_port_is_dpout(tunnel->dst_port)) {
1404		tb_port_warn(in, "path does not end on a DP adapter, cleaning up\n");
1405		goto err_deactivate;
1406	}
1407
1408	if (!tb_dp_port_is_enabled(tunnel->dst_port))
1409		goto err_deactivate;
1410
1411	if (!tb_dp_port_hpd_is_active(tunnel->dst_port))
1412		goto err_deactivate;
1413
1414	if (port != tunnel->src_port) {
1415		tb_tunnel_warn(tunnel, "path is not complete, cleaning up\n");
1416		goto err_deactivate;
1417	}
1418
1419	tb_dp_dump(tunnel);
1420
1421	tb_tunnel_dbg(tunnel, "discovered\n");
1422	return tunnel;
1423
1424err_deactivate:
1425	tb_tunnel_deactivate(tunnel);
1426err_free:
1427	tb_tunnel_free(tunnel);
1428
1429	return NULL;
1430}
1431
1432/**
1433 * tb_tunnel_alloc_dp() - allocate a Display Port tunnel
1434 * @tb: Pointer to the domain structure
1435 * @in: DP in adapter port
1436 * @out: DP out adapter port
1437 * @link_nr: Preferred lane adapter when the link is not bonded
1438 * @max_up: Maximum available upstream bandwidth for the DP tunnel (%0
1439 *	    if not limited)
1440 * @max_down: Maximum available downstream bandwidth for the DP tunnel
1441 *	      (%0 if not limited)
1442 *
1443 * Allocates a tunnel between @in and @out that is capable of tunneling
1444 * Display Port traffic.
1445 *
1446 * Return: Returns a tb_tunnel on success or NULL on failure.
1447 */
1448struct tb_tunnel *tb_tunnel_alloc_dp(struct tb *tb, struct tb_port *in,
1449				     struct tb_port *out, int link_nr,
1450				     int max_up, int max_down)
1451{
1452	struct tb_tunnel *tunnel;
1453	struct tb_path **paths;
1454	struct tb_path *path;
1455	bool pm_support;
1456
1457	if (WARN_ON(!in->cap_adap || !out->cap_adap))
1458		return NULL;
1459
1460	tunnel = tb_tunnel_alloc(tb, 3, TB_TUNNEL_DP);
1461	if (!tunnel)
1462		return NULL;
1463
1464	tunnel->init = tb_dp_init;
1465	tunnel->deinit = tb_dp_deinit;
1466	tunnel->activate = tb_dp_activate;
1467	tunnel->maximum_bandwidth = tb_dp_maximum_bandwidth;
1468	tunnel->allocated_bandwidth = tb_dp_allocated_bandwidth;
1469	tunnel->alloc_bandwidth = tb_dp_alloc_bandwidth;
1470	tunnel->consumed_bandwidth = tb_dp_consumed_bandwidth;
1471	tunnel->src_port = in;
1472	tunnel->dst_port = out;
1473	tunnel->max_up = max_up;
1474	tunnel->max_down = max_down;
1475
1476	paths = tunnel->paths;
1477	pm_support = usb4_switch_version(in->sw) >= 2;
1478
1479	path = tb_path_alloc(tb, in, TB_DP_VIDEO_HOPID, out, TB_DP_VIDEO_HOPID,
1480			     link_nr, "Video");
1481	if (!path)
1482		goto err_free;
1483	tb_dp_init_video_path(path, pm_support);
1484	paths[TB_DP_VIDEO_PATH_OUT] = path;
1485
1486	path = tb_path_alloc(tb, in, TB_DP_AUX_TX_HOPID, out,
1487			     TB_DP_AUX_TX_HOPID, link_nr, "AUX TX");
1488	if (!path)
1489		goto err_free;
1490	tb_dp_init_aux_path(path, pm_support);
1491	paths[TB_DP_AUX_PATH_OUT] = path;
1492
1493	path = tb_path_alloc(tb, out, TB_DP_AUX_RX_HOPID, in,
1494			     TB_DP_AUX_RX_HOPID, link_nr, "AUX RX");
1495	if (!path)
1496		goto err_free;
1497	tb_dp_init_aux_path(path, pm_support);
1498	paths[TB_DP_AUX_PATH_IN] = path;
1499
1500	return tunnel;
1501
1502err_free:
1503	tb_tunnel_free(tunnel);
1504	return NULL;
1505}
1506
1507static unsigned int tb_dma_available_credits(const struct tb_port *port)
1508{
1509	const struct tb_switch *sw = port->sw;
1510	int credits;
1511
1512	credits = tb_available_credits(port, NULL);
1513	if (tb_acpi_may_tunnel_pcie())
1514		credits -= sw->max_pcie_credits;
1515	credits -= port->dma_credits;
1516
1517	return credits > 0 ? credits : 0;
1518}
1519
1520static int tb_dma_reserve_credits(struct tb_path_hop *hop, unsigned int credits)
1521{
1522	struct tb_port *port = hop->in_port;
1523
1524	if (tb_port_use_credit_allocation(port)) {
1525		unsigned int available = tb_dma_available_credits(port);
1526
1527		/*
1528		 * Need to have at least TB_MIN_DMA_CREDITS, otherwise
1529		 * DMA path cannot be established.
1530		 */
1531		if (available < TB_MIN_DMA_CREDITS)
1532			return -ENOSPC;
1533
1534		while (credits > available)
1535			credits--;
1536
1537		tb_port_dbg(port, "reserving %u credits for DMA path\n",
1538			    credits);
1539
1540		port->dma_credits += credits;
1541	} else {
1542		if (tb_port_is_null(port))
1543			credits = port->bonded ? 14 : 6;
1544		else
1545			credits = min(port->total_credits, credits);
1546	}
1547
1548	hop->initial_credits = credits;
1549	return 0;
1550}
1551
1552/* Path from lane adapter to NHI */
1553static int tb_dma_init_rx_path(struct tb_path *path, unsigned int credits)
1554{
1555	struct tb_path_hop *hop;
1556	unsigned int i, tmp;
1557
1558	path->egress_fc_enable = TB_PATH_SOURCE | TB_PATH_INTERNAL;
1559	path->ingress_fc_enable = TB_PATH_ALL;
1560	path->egress_shared_buffer = TB_PATH_NONE;
1561	path->ingress_shared_buffer = TB_PATH_NONE;
1562	path->priority = TB_DMA_PRIORITY;
1563	path->weight = TB_DMA_WEIGHT;
1564	path->clear_fc = true;
1565
1566	/*
1567	 * First lane adapter is the one connected to the remote host.
1568	 * We don't tunnel other traffic over this link so can use all
1569	 * the credits (except the ones reserved for control traffic).
1570	 */
1571	hop = &path->hops[0];
1572	tmp = min(tb_usable_credits(hop->in_port), credits);
1573	hop->initial_credits = tmp;
1574	hop->in_port->dma_credits += tmp;
1575
1576	for (i = 1; i < path->path_length; i++) {
1577		int ret;
1578
1579		ret = tb_dma_reserve_credits(&path->hops[i], credits);
1580		if (ret)
1581			return ret;
1582	}
1583
1584	return 0;
1585}
1586
1587/* Path from NHI to lane adapter */
1588static int tb_dma_init_tx_path(struct tb_path *path, unsigned int credits)
1589{
1590	struct tb_path_hop *hop;
1591
1592	path->egress_fc_enable = TB_PATH_ALL;
1593	path->ingress_fc_enable = TB_PATH_ALL;
1594	path->egress_shared_buffer = TB_PATH_NONE;
1595	path->ingress_shared_buffer = TB_PATH_NONE;
1596	path->priority = TB_DMA_PRIORITY;
1597	path->weight = TB_DMA_WEIGHT;
1598	path->clear_fc = true;
1599
1600	tb_path_for_each_hop(path, hop) {
1601		int ret;
1602
1603		ret = tb_dma_reserve_credits(hop, credits);
1604		if (ret)
1605			return ret;
1606	}
1607
1608	return 0;
1609}
1610
1611static void tb_dma_release_credits(struct tb_path_hop *hop)
1612{
1613	struct tb_port *port = hop->in_port;
1614
1615	if (tb_port_use_credit_allocation(port)) {
1616		port->dma_credits -= hop->initial_credits;
1617
1618		tb_port_dbg(port, "released %u DMA path credits\n",
1619			    hop->initial_credits);
1620	}
1621}
1622
1623static void tb_dma_deinit_path(struct tb_path *path)
1624{
1625	struct tb_path_hop *hop;
1626
1627	tb_path_for_each_hop(path, hop)
1628		tb_dma_release_credits(hop);
1629}
1630
1631static void tb_dma_deinit(struct tb_tunnel *tunnel)
1632{
1633	int i;
1634
1635	for (i = 0; i < tunnel->npaths; i++) {
1636		if (!tunnel->paths[i])
1637			continue;
1638		tb_dma_deinit_path(tunnel->paths[i]);
1639	}
1640}
1641
1642/**
1643 * tb_tunnel_alloc_dma() - allocate a DMA tunnel
1644 * @tb: Pointer to the domain structure
1645 * @nhi: Host controller port
1646 * @dst: Destination null port which the other domain is connected to
1647 * @transmit_path: HopID used for transmitting packets
1648 * @transmit_ring: NHI ring number used to send packets towards the
1649 *		   other domain. Set to %-1 if TX path is not needed.
1650 * @receive_path: HopID used for receiving packets
1651 * @receive_ring: NHI ring number used to receive packets from the
1652 *		  other domain. Set to %-1 if RX path is not needed.
1653 *
1654 * Return: Returns a tb_tunnel on success or NULL on failure.
1655 */
1656struct tb_tunnel *tb_tunnel_alloc_dma(struct tb *tb, struct tb_port *nhi,
1657				      struct tb_port *dst, int transmit_path,
1658				      int transmit_ring, int receive_path,
1659				      int receive_ring)
1660{
1661	struct tb_tunnel *tunnel;
1662	size_t npaths = 0, i = 0;
1663	struct tb_path *path;
1664	int credits;
1665
1666	/* Ring 0 is reserved for control channel */
1667	if (WARN_ON(!receive_ring || !transmit_ring))
1668		return NULL;
1669
1670	if (receive_ring > 0)
1671		npaths++;
1672	if (transmit_ring > 0)
1673		npaths++;
1674
1675	if (WARN_ON(!npaths))
1676		return NULL;
1677
1678	tunnel = tb_tunnel_alloc(tb, npaths, TB_TUNNEL_DMA);
1679	if (!tunnel)
1680		return NULL;
1681
1682	tunnel->src_port = nhi;
1683	tunnel->dst_port = dst;
1684	tunnel->deinit = tb_dma_deinit;
1685
1686	credits = min_not_zero(dma_credits, nhi->sw->max_dma_credits);
1687
1688	if (receive_ring > 0) {
1689		path = tb_path_alloc(tb, dst, receive_path, nhi, receive_ring, 0,
1690				     "DMA RX");
1691		if (!path)
1692			goto err_free;
1693		tunnel->paths[i++] = path;
1694		if (tb_dma_init_rx_path(path, credits)) {
1695			tb_tunnel_dbg(tunnel, "not enough buffers for RX path\n");
1696			goto err_free;
1697		}
1698	}
1699
1700	if (transmit_ring > 0) {
1701		path = tb_path_alloc(tb, nhi, transmit_ring, dst, transmit_path, 0,
1702				     "DMA TX");
1703		if (!path)
1704			goto err_free;
1705		tunnel->paths[i++] = path;
1706		if (tb_dma_init_tx_path(path, credits)) {
1707			tb_tunnel_dbg(tunnel, "not enough buffers for TX path\n");
1708			goto err_free;
1709		}
1710	}
1711
1712	return tunnel;
1713
1714err_free:
1715	tb_tunnel_free(tunnel);
1716	return NULL;
1717}
1718
1719/**
1720 * tb_tunnel_match_dma() - Match DMA tunnel
1721 * @tunnel: Tunnel to match
1722 * @transmit_path: HopID used for transmitting packets. Pass %-1 to ignore.
1723 * @transmit_ring: NHI ring number used to send packets towards the
1724 *		   other domain. Pass %-1 to ignore.
1725 * @receive_path: HopID used for receiving packets. Pass %-1 to ignore.
1726 * @receive_ring: NHI ring number used to receive packets from the
1727 *		  other domain. Pass %-1 to ignore.
1728 *
1729 * This function can be used to match specific DMA tunnel, if there are
1730 * multiple DMA tunnels going through the same XDomain connection.
1731 * Returns true if there is match and false otherwise.
1732 */
1733bool tb_tunnel_match_dma(const struct tb_tunnel *tunnel, int transmit_path,
1734			 int transmit_ring, int receive_path, int receive_ring)
1735{
1736	const struct tb_path *tx_path = NULL, *rx_path = NULL;
1737	int i;
1738
1739	if (!receive_ring || !transmit_ring)
1740		return false;
1741
1742	for (i = 0; i < tunnel->npaths; i++) {
1743		const struct tb_path *path = tunnel->paths[i];
1744
1745		if (!path)
1746			continue;
1747
1748		if (tb_port_is_nhi(path->hops[0].in_port))
1749			tx_path = path;
1750		else if (tb_port_is_nhi(path->hops[path->path_length - 1].out_port))
1751			rx_path = path;
1752	}
1753
1754	if (transmit_ring > 0 || transmit_path > 0) {
1755		if (!tx_path)
1756			return false;
1757		if (transmit_ring > 0 &&
1758		    (tx_path->hops[0].in_hop_index != transmit_ring))
1759			return false;
1760		if (transmit_path > 0 &&
1761		    (tx_path->hops[tx_path->path_length - 1].next_hop_index != transmit_path))
1762			return false;
1763	}
1764
1765	if (receive_ring > 0 || receive_path > 0) {
1766		if (!rx_path)
1767			return false;
1768		if (receive_path > 0 &&
1769		    (rx_path->hops[0].in_hop_index != receive_path))
1770			return false;
1771		if (receive_ring > 0 &&
1772		    (rx_path->hops[rx_path->path_length - 1].next_hop_index != receive_ring))
1773			return false;
1774	}
1775
1776	return true;
1777}
1778
1779static int tb_usb3_max_link_rate(struct tb_port *up, struct tb_port *down)
1780{
1781	int ret, up_max_rate, down_max_rate;
1782
1783	ret = usb4_usb3_port_max_link_rate(up);
1784	if (ret < 0)
1785		return ret;
1786	up_max_rate = ret;
1787
1788	ret = usb4_usb3_port_max_link_rate(down);
1789	if (ret < 0)
1790		return ret;
1791	down_max_rate = ret;
1792
1793	return min(up_max_rate, down_max_rate);
1794}
1795
1796static int tb_usb3_init(struct tb_tunnel *tunnel)
1797{
1798	tb_tunnel_dbg(tunnel, "allocating initial bandwidth %d/%d Mb/s\n",
1799		      tunnel->allocated_up, tunnel->allocated_down);
1800
1801	return usb4_usb3_port_allocate_bandwidth(tunnel->src_port,
1802						 &tunnel->allocated_up,
1803						 &tunnel->allocated_down);
1804}
1805
1806static int tb_usb3_activate(struct tb_tunnel *tunnel, bool activate)
1807{
1808	int res;
1809
1810	res = tb_usb3_port_enable(tunnel->src_port, activate);
1811	if (res)
1812		return res;
1813
1814	if (tb_port_is_usb3_up(tunnel->dst_port))
1815		return tb_usb3_port_enable(tunnel->dst_port, activate);
1816
1817	return 0;
1818}
1819
1820static int tb_usb3_consumed_bandwidth(struct tb_tunnel *tunnel,
1821		int *consumed_up, int *consumed_down)
1822{
1823	struct tb_port *port = tb_upstream_port(tunnel->dst_port->sw);
1824	int pcie_weight = tb_acpi_may_tunnel_pcie() ? TB_PCI_WEIGHT : 0;
1825
1826	/*
1827	 * PCIe tunneling, if enabled, affects the USB3 bandwidth so
1828	 * take that it into account here.
1829	 */
1830	*consumed_up = tunnel->allocated_up *
1831		(TB_USB3_WEIGHT + pcie_weight) / TB_USB3_WEIGHT;
1832	*consumed_down = tunnel->allocated_down *
1833		(TB_USB3_WEIGHT + pcie_weight) / TB_USB3_WEIGHT;
1834
1835	if (tb_port_get_link_generation(port) >= 4) {
1836		*consumed_up = max(*consumed_up, USB4_V2_USB3_MIN_BANDWIDTH);
1837		*consumed_down = max(*consumed_down, USB4_V2_USB3_MIN_BANDWIDTH);
1838	}
1839
1840	return 0;
1841}
1842
1843static int tb_usb3_release_unused_bandwidth(struct tb_tunnel *tunnel)
1844{
1845	int ret;
1846
1847	ret = usb4_usb3_port_release_bandwidth(tunnel->src_port,
1848					       &tunnel->allocated_up,
1849					       &tunnel->allocated_down);
1850	if (ret)
1851		return ret;
1852
1853	tb_tunnel_dbg(tunnel, "decreased bandwidth allocation to %d/%d Mb/s\n",
1854		      tunnel->allocated_up, tunnel->allocated_down);
1855	return 0;
1856}
1857
1858static void tb_usb3_reclaim_available_bandwidth(struct tb_tunnel *tunnel,
1859						int *available_up,
1860						int *available_down)
1861{
1862	int ret, max_rate, allocate_up, allocate_down;
1863
1864	ret = tb_usb3_max_link_rate(tunnel->dst_port, tunnel->src_port);
1865	if (ret < 0) {
1866		tb_tunnel_warn(tunnel, "failed to read maximum link rate\n");
1867		return;
1868	}
1869
1870	/*
1871	 * 90% of the max rate can be allocated for isochronous
1872	 * transfers.
1873	 */
1874	max_rate = ret * 90 / 100;
1875
1876	/* No need to reclaim if already at maximum */
1877	if (tunnel->allocated_up >= max_rate &&
1878	    tunnel->allocated_down >= max_rate)
1879		return;
1880
1881	/* Don't go lower than what is already allocated */
1882	allocate_up = min(max_rate, *available_up);
1883	if (allocate_up < tunnel->allocated_up)
1884		allocate_up = tunnel->allocated_up;
1885
1886	allocate_down = min(max_rate, *available_down);
1887	if (allocate_down < tunnel->allocated_down)
1888		allocate_down = tunnel->allocated_down;
1889
1890	/* If no changes no need to do more */
1891	if (allocate_up == tunnel->allocated_up &&
1892	    allocate_down == tunnel->allocated_down)
1893		return;
1894
1895	ret = usb4_usb3_port_allocate_bandwidth(tunnel->src_port, &allocate_up,
1896						&allocate_down);
1897	if (ret) {
1898		tb_tunnel_info(tunnel, "failed to allocate bandwidth\n");
1899		return;
1900	}
1901
1902	tunnel->allocated_up = allocate_up;
1903	*available_up -= tunnel->allocated_up;
1904
1905	tunnel->allocated_down = allocate_down;
1906	*available_down -= tunnel->allocated_down;
1907
1908	tb_tunnel_dbg(tunnel, "increased bandwidth allocation to %d/%d Mb/s\n",
1909		      tunnel->allocated_up, tunnel->allocated_down);
1910}
1911
1912static void tb_usb3_init_credits(struct tb_path_hop *hop)
1913{
1914	struct tb_port *port = hop->in_port;
1915	struct tb_switch *sw = port->sw;
1916	unsigned int credits;
1917
1918	if (tb_port_use_credit_allocation(port)) {
1919		credits = sw->max_usb3_credits;
1920	} else {
1921		if (tb_port_is_null(port))
1922			credits = port->bonded ? 32 : 16;
1923		else
1924			credits = 7;
1925	}
1926
1927	hop->initial_credits = credits;
1928}
1929
1930static void tb_usb3_init_path(struct tb_path *path)
1931{
1932	struct tb_path_hop *hop;
1933
1934	path->egress_fc_enable = TB_PATH_SOURCE | TB_PATH_INTERNAL;
1935	path->egress_shared_buffer = TB_PATH_NONE;
1936	path->ingress_fc_enable = TB_PATH_ALL;
1937	path->ingress_shared_buffer = TB_PATH_NONE;
1938	path->priority = TB_USB3_PRIORITY;
1939	path->weight = TB_USB3_WEIGHT;
1940	path->drop_packages = 0;
1941
1942	tb_path_for_each_hop(path, hop)
1943		tb_usb3_init_credits(hop);
1944}
1945
1946/**
1947 * tb_tunnel_discover_usb3() - Discover existing USB3 tunnels
1948 * @tb: Pointer to the domain structure
1949 * @down: USB3 downstream adapter
1950 * @alloc_hopid: Allocate HopIDs from visited ports
1951 *
1952 * If @down adapter is active, follows the tunnel to the USB3 upstream
1953 * adapter and back. Returns the discovered tunnel or %NULL if there was
1954 * no tunnel.
1955 */
1956struct tb_tunnel *tb_tunnel_discover_usb3(struct tb *tb, struct tb_port *down,
1957					  bool alloc_hopid)
1958{
1959	struct tb_tunnel *tunnel;
1960	struct tb_path *path;
1961
1962	if (!tb_usb3_port_is_enabled(down))
1963		return NULL;
1964
1965	tunnel = tb_tunnel_alloc(tb, 2, TB_TUNNEL_USB3);
1966	if (!tunnel)
1967		return NULL;
1968
1969	tunnel->activate = tb_usb3_activate;
1970	tunnel->src_port = down;
1971
1972	/*
1973	 * Discover both paths even if they are not complete. We will
1974	 * clean them up by calling tb_tunnel_deactivate() below in that
1975	 * case.
1976	 */
1977	path = tb_path_discover(down, TB_USB3_HOPID, NULL, -1,
1978				&tunnel->dst_port, "USB3 Down", alloc_hopid);
1979	if (!path) {
1980		/* Just disable the downstream port */
1981		tb_usb3_port_enable(down, false);
1982		goto err_free;
1983	}
1984	tunnel->paths[TB_USB3_PATH_DOWN] = path;
1985	tb_usb3_init_path(tunnel->paths[TB_USB3_PATH_DOWN]);
1986
1987	path = tb_path_discover(tunnel->dst_port, -1, down, TB_USB3_HOPID, NULL,
1988				"USB3 Up", alloc_hopid);
1989	if (!path)
1990		goto err_deactivate;
1991	tunnel->paths[TB_USB3_PATH_UP] = path;
1992	tb_usb3_init_path(tunnel->paths[TB_USB3_PATH_UP]);
1993
1994	/* Validate that the tunnel is complete */
1995	if (!tb_port_is_usb3_up(tunnel->dst_port)) {
1996		tb_port_warn(tunnel->dst_port,
1997			     "path does not end on an USB3 adapter, cleaning up\n");
1998		goto err_deactivate;
1999	}
2000
2001	if (down != tunnel->src_port) {
2002		tb_tunnel_warn(tunnel, "path is not complete, cleaning up\n");
2003		goto err_deactivate;
2004	}
2005
2006	if (!tb_usb3_port_is_enabled(tunnel->dst_port)) {
2007		tb_tunnel_warn(tunnel,
2008			       "tunnel is not fully activated, cleaning up\n");
2009		goto err_deactivate;
2010	}
2011
2012	if (!tb_route(down->sw)) {
2013		int ret;
2014
2015		/*
2016		 * Read the initial bandwidth allocation for the first
2017		 * hop tunnel.
2018		 */
2019		ret = usb4_usb3_port_allocated_bandwidth(down,
2020			&tunnel->allocated_up, &tunnel->allocated_down);
2021		if (ret)
2022			goto err_deactivate;
2023
2024		tb_tunnel_dbg(tunnel, "currently allocated bandwidth %d/%d Mb/s\n",
2025			      tunnel->allocated_up, tunnel->allocated_down);
2026
2027		tunnel->init = tb_usb3_init;
2028		tunnel->consumed_bandwidth = tb_usb3_consumed_bandwidth;
2029		tunnel->release_unused_bandwidth =
2030			tb_usb3_release_unused_bandwidth;
2031		tunnel->reclaim_available_bandwidth =
2032			tb_usb3_reclaim_available_bandwidth;
2033	}
2034
2035	tb_tunnel_dbg(tunnel, "discovered\n");
2036	return tunnel;
2037
2038err_deactivate:
2039	tb_tunnel_deactivate(tunnel);
2040err_free:
2041	tb_tunnel_free(tunnel);
2042
2043	return NULL;
2044}
2045
2046/**
2047 * tb_tunnel_alloc_usb3() - allocate a USB3 tunnel
2048 * @tb: Pointer to the domain structure
2049 * @up: USB3 upstream adapter port
2050 * @down: USB3 downstream adapter port
2051 * @max_up: Maximum available upstream bandwidth for the USB3 tunnel (%0
2052 *	    if not limited).
2053 * @max_down: Maximum available downstream bandwidth for the USB3 tunnel
2054 *	      (%0 if not limited).
2055 *
2056 * Allocate an USB3 tunnel. The ports must be of type @TB_TYPE_USB3_UP and
2057 * @TB_TYPE_USB3_DOWN.
2058 *
2059 * Return: Returns a tb_tunnel on success or %NULL on failure.
2060 */
2061struct tb_tunnel *tb_tunnel_alloc_usb3(struct tb *tb, struct tb_port *up,
2062				       struct tb_port *down, int max_up,
2063				       int max_down)
2064{
2065	struct tb_tunnel *tunnel;
2066	struct tb_path *path;
2067	int max_rate = 0;
2068
2069	/*
2070	 * Check that we have enough bandwidth available for the new
2071	 * USB3 tunnel.
2072	 */
2073	if (max_up > 0 || max_down > 0) {
2074		max_rate = tb_usb3_max_link_rate(down, up);
2075		if (max_rate < 0)
2076			return NULL;
2077
2078		/* Only 90% can be allocated for USB3 isochronous transfers */
2079		max_rate = max_rate * 90 / 100;
2080		tb_port_dbg(up, "required bandwidth for USB3 tunnel %d Mb/s\n",
2081			    max_rate);
2082
2083		if (max_rate > max_up || max_rate > max_down) {
2084			tb_port_warn(up, "not enough bandwidth for USB3 tunnel\n");
2085			return NULL;
2086		}
2087	}
2088
2089	tunnel = tb_tunnel_alloc(tb, 2, TB_TUNNEL_USB3);
2090	if (!tunnel)
2091		return NULL;
2092
2093	tunnel->activate = tb_usb3_activate;
2094	tunnel->src_port = down;
2095	tunnel->dst_port = up;
2096	tunnel->max_up = max_up;
2097	tunnel->max_down = max_down;
2098
2099	path = tb_path_alloc(tb, down, TB_USB3_HOPID, up, TB_USB3_HOPID, 0,
2100			     "USB3 Down");
2101	if (!path) {
2102		tb_tunnel_free(tunnel);
2103		return NULL;
2104	}
2105	tb_usb3_init_path(path);
2106	tunnel->paths[TB_USB3_PATH_DOWN] = path;
2107
2108	path = tb_path_alloc(tb, up, TB_USB3_HOPID, down, TB_USB3_HOPID, 0,
2109			     "USB3 Up");
2110	if (!path) {
2111		tb_tunnel_free(tunnel);
2112		return NULL;
2113	}
2114	tb_usb3_init_path(path);
2115	tunnel->paths[TB_USB3_PATH_UP] = path;
2116
2117	if (!tb_route(down->sw)) {
2118		tunnel->allocated_up = max_rate;
2119		tunnel->allocated_down = max_rate;
2120
2121		tunnel->init = tb_usb3_init;
2122		tunnel->consumed_bandwidth = tb_usb3_consumed_bandwidth;
2123		tunnel->release_unused_bandwidth =
2124			tb_usb3_release_unused_bandwidth;
2125		tunnel->reclaim_available_bandwidth =
2126			tb_usb3_reclaim_available_bandwidth;
2127	}
2128
2129	return tunnel;
2130}
2131
2132/**
2133 * tb_tunnel_free() - free a tunnel
2134 * @tunnel: Tunnel to be freed
2135 *
2136 * Frees a tunnel. The tunnel does not need to be deactivated.
2137 */
2138void tb_tunnel_free(struct tb_tunnel *tunnel)
2139{
2140	int i;
2141
2142	if (!tunnel)
2143		return;
2144
2145	if (tunnel->deinit)
2146		tunnel->deinit(tunnel);
2147
2148	for (i = 0; i < tunnel->npaths; i++) {
2149		if (tunnel->paths[i])
2150			tb_path_free(tunnel->paths[i]);
2151	}
2152
2153	kfree(tunnel->paths);
2154	kfree(tunnel);
2155}
2156
2157/**
2158 * tb_tunnel_is_invalid - check whether an activated path is still valid
2159 * @tunnel: Tunnel to check
2160 */
2161bool tb_tunnel_is_invalid(struct tb_tunnel *tunnel)
2162{
2163	int i;
2164
2165	for (i = 0; i < tunnel->npaths; i++) {
2166		WARN_ON(!tunnel->paths[i]->activated);
2167		if (tb_path_is_invalid(tunnel->paths[i]))
2168			return true;
2169	}
2170
2171	return false;
2172}
2173
2174/**
2175 * tb_tunnel_restart() - activate a tunnel after a hardware reset
2176 * @tunnel: Tunnel to restart
2177 *
2178 * Return: 0 on success and negative errno in case if failure
2179 */
2180int tb_tunnel_restart(struct tb_tunnel *tunnel)
2181{
2182	int res, i;
2183
2184	tb_tunnel_dbg(tunnel, "activating\n");
2185
2186	/*
2187	 * Make sure all paths are properly disabled before enabling
2188	 * them again.
2189	 */
2190	for (i = 0; i < tunnel->npaths; i++) {
2191		if (tunnel->paths[i]->activated) {
2192			tb_path_deactivate(tunnel->paths[i]);
2193			tunnel->paths[i]->activated = false;
2194		}
2195	}
2196
2197	if (tunnel->init) {
2198		res = tunnel->init(tunnel);
2199		if (res)
2200			return res;
2201	}
2202
2203	for (i = 0; i < tunnel->npaths; i++) {
2204		res = tb_path_activate(tunnel->paths[i]);
2205		if (res)
2206			goto err;
2207	}
2208
2209	if (tunnel->activate) {
2210		res = tunnel->activate(tunnel, true);
2211		if (res)
2212			goto err;
2213	}
2214
2215	return 0;
2216
2217err:
2218	tb_tunnel_warn(tunnel, "activation failed\n");
2219	tb_tunnel_deactivate(tunnel);
2220	return res;
2221}
2222
2223/**
2224 * tb_tunnel_activate() - activate a tunnel
2225 * @tunnel: Tunnel to activate
2226 *
2227 * Return: Returns 0 on success or an error code on failure.
2228 */
2229int tb_tunnel_activate(struct tb_tunnel *tunnel)
2230{
2231	int i;
2232
2233	for (i = 0; i < tunnel->npaths; i++) {
2234		if (tunnel->paths[i]->activated) {
2235			tb_tunnel_WARN(tunnel,
2236				       "trying to activate an already activated tunnel\n");
2237			return -EINVAL;
2238		}
2239	}
2240
2241	return tb_tunnel_restart(tunnel);
2242}
2243
2244/**
2245 * tb_tunnel_deactivate() - deactivate a tunnel
2246 * @tunnel: Tunnel to deactivate
2247 */
2248void tb_tunnel_deactivate(struct tb_tunnel *tunnel)
2249{
2250	int i;
2251
2252	tb_tunnel_dbg(tunnel, "deactivating\n");
2253
2254	if (tunnel->activate)
2255		tunnel->activate(tunnel, false);
2256
2257	for (i = 0; i < tunnel->npaths; i++) {
2258		if (tunnel->paths[i] && tunnel->paths[i]->activated)
2259			tb_path_deactivate(tunnel->paths[i]);
2260	}
2261}
2262
2263/**
2264 * tb_tunnel_port_on_path() - Does the tunnel go through port
2265 * @tunnel: Tunnel to check
2266 * @port: Port to check
2267 *
2268 * Returns true if @tunnel goes through @port (direction does not matter),
2269 * false otherwise.
2270 */
2271bool tb_tunnel_port_on_path(const struct tb_tunnel *tunnel,
2272			    const struct tb_port *port)
2273{
2274	int i;
2275
2276	for (i = 0; i < tunnel->npaths; i++) {
2277		if (!tunnel->paths[i])
2278			continue;
2279
2280		if (tb_path_port_on_path(tunnel->paths[i], port))
2281			return true;
2282	}
2283
2284	return false;
2285}
2286
2287static bool tb_tunnel_is_active(const struct tb_tunnel *tunnel)
2288{
2289	int i;
2290
2291	for (i = 0; i < tunnel->npaths; i++) {
2292		if (!tunnel->paths[i])
2293			return false;
2294		if (!tunnel->paths[i]->activated)
2295			return false;
2296	}
2297
2298	return true;
2299}
2300
2301/**
2302 * tb_tunnel_maximum_bandwidth() - Return maximum possible bandwidth
2303 * @tunnel: Tunnel to check
2304 * @max_up: Maximum upstream bandwidth in Mb/s
2305 * @max_down: Maximum downstream bandwidth in Mb/s
2306 *
2307 * Returns maximum possible bandwidth this tunnel can go if not limited
2308 * by other bandwidth clients. If the tunnel does not support this
2309 * returns %-EOPNOTSUPP.
2310 */
2311int tb_tunnel_maximum_bandwidth(struct tb_tunnel *tunnel, int *max_up,
2312				int *max_down)
2313{
2314	if (!tb_tunnel_is_active(tunnel))
2315		return -EINVAL;
2316
2317	if (tunnel->maximum_bandwidth)
2318		return tunnel->maximum_bandwidth(tunnel, max_up, max_down);
2319	return -EOPNOTSUPP;
2320}
2321
2322/**
2323 * tb_tunnel_allocated_bandwidth() - Return bandwidth allocated for the tunnel
2324 * @tunnel: Tunnel to check
2325 * @allocated_up: Currently allocated upstream bandwidth in Mb/s is stored here
2326 * @allocated_down: Currently allocated downstream bandwidth in Mb/s is
2327 *		    stored here
2328 *
2329 * Returns the bandwidth allocated for the tunnel. This may be higher
2330 * than what the tunnel actually consumes.
2331 */
2332int tb_tunnel_allocated_bandwidth(struct tb_tunnel *tunnel, int *allocated_up,
2333				  int *allocated_down)
2334{
2335	if (!tb_tunnel_is_active(tunnel))
2336		return -EINVAL;
2337
2338	if (tunnel->allocated_bandwidth)
2339		return tunnel->allocated_bandwidth(tunnel, allocated_up,
2340						   allocated_down);
2341	return -EOPNOTSUPP;
2342}
2343
2344/**
2345 * tb_tunnel_alloc_bandwidth() - Change tunnel bandwidth allocation
2346 * @tunnel: Tunnel whose bandwidth allocation to change
2347 * @alloc_up: New upstream bandwidth in Mb/s
2348 * @alloc_down: New downstream bandwidth in Mb/s
2349 *
2350 * Tries to change tunnel bandwidth allocation. If succeeds returns %0
2351 * and updates @alloc_up and @alloc_down to that was actually allocated
2352 * (it may not be the same as passed originally). Returns negative errno
2353 * in case of failure.
2354 */
2355int tb_tunnel_alloc_bandwidth(struct tb_tunnel *tunnel, int *alloc_up,
2356			      int *alloc_down)
2357{
2358	if (!tb_tunnel_is_active(tunnel))
2359		return -EINVAL;
2360
2361	if (tunnel->alloc_bandwidth)
2362		return tunnel->alloc_bandwidth(tunnel, alloc_up, alloc_down);
2363
2364	return -EOPNOTSUPP;
2365}
2366
2367/**
2368 * tb_tunnel_consumed_bandwidth() - Return bandwidth consumed by the tunnel
2369 * @tunnel: Tunnel to check
2370 * @consumed_up: Consumed bandwidth in Mb/s from @dst_port to @src_port.
2371 *		 Can be %NULL.
2372 * @consumed_down: Consumed bandwidth in Mb/s from @src_port to @dst_port.
2373 *		   Can be %NULL.
2374 *
2375 * Stores the amount of isochronous bandwidth @tunnel consumes in
2376 * @consumed_up and @consumed_down. In case of success returns %0,
2377 * negative errno otherwise.
2378 */
2379int tb_tunnel_consumed_bandwidth(struct tb_tunnel *tunnel, int *consumed_up,
2380				 int *consumed_down)
2381{
2382	int up_bw = 0, down_bw = 0;
2383
2384	if (!tb_tunnel_is_active(tunnel))
2385		goto out;
2386
2387	if (tunnel->consumed_bandwidth) {
2388		int ret;
2389
2390		ret = tunnel->consumed_bandwidth(tunnel, &up_bw, &down_bw);
2391		if (ret)
2392			return ret;
2393
2394		tb_tunnel_dbg(tunnel, "consumed bandwidth %d/%d Mb/s\n", up_bw,
2395			      down_bw);
2396	}
2397
2398out:
2399	if (consumed_up)
2400		*consumed_up = up_bw;
2401	if (consumed_down)
2402		*consumed_down = down_bw;
2403
2404	return 0;
2405}
2406
2407/**
2408 * tb_tunnel_release_unused_bandwidth() - Release unused bandwidth
2409 * @tunnel: Tunnel whose unused bandwidth to release
2410 *
2411 * If tunnel supports dynamic bandwidth management (USB3 tunnels at the
2412 * moment) this function makes it to release all the unused bandwidth.
2413 *
2414 * Returns %0 in case of success and negative errno otherwise.
2415 */
2416int tb_tunnel_release_unused_bandwidth(struct tb_tunnel *tunnel)
2417{
2418	if (!tb_tunnel_is_active(tunnel))
2419		return 0;
2420
2421	if (tunnel->release_unused_bandwidth) {
2422		int ret;
2423
2424		ret = tunnel->release_unused_bandwidth(tunnel);
2425		if (ret)
2426			return ret;
2427	}
2428
2429	return 0;
2430}
2431
2432/**
2433 * tb_tunnel_reclaim_available_bandwidth() - Reclaim available bandwidth
2434 * @tunnel: Tunnel reclaiming available bandwidth
2435 * @available_up: Available upstream bandwidth (in Mb/s)
2436 * @available_down: Available downstream bandwidth (in Mb/s)
2437 *
2438 * Reclaims bandwidth from @available_up and @available_down and updates
2439 * the variables accordingly (e.g decreases both according to what was
2440 * reclaimed by the tunnel). If nothing was reclaimed the values are
2441 * kept as is.
2442 */
2443void tb_tunnel_reclaim_available_bandwidth(struct tb_tunnel *tunnel,
2444					   int *available_up,
2445					   int *available_down)
2446{
2447	if (!tb_tunnel_is_active(tunnel))
2448		return;
2449
2450	if (tunnel->reclaim_available_bandwidth)
2451		tunnel->reclaim_available_bandwidth(tunnel, available_up,
2452						    available_down);
2453}
2454
2455const char *tb_tunnel_type_name(const struct tb_tunnel *tunnel)
2456{
2457	return tb_tunnel_names[tunnel->type];
2458}