Linux Audio

Check our new training course

Loading...
v5.9
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 * Thunderbolt driver - Tunneling support
   4 *
   5 * Copyright (c) 2014 Andreas Noever <andreas.noever@gmail.com>
   6 * Copyright (C) 2019, Intel Corporation
   7 */
   8
   9#include <linux/delay.h>
  10#include <linux/slab.h>
  11#include <linux/list.h>
  12
  13#include "tunnel.h"
  14#include "tb.h"
  15
  16/* PCIe adapters use always HopID of 8 for both directions */
  17#define TB_PCI_HOPID			8
  18
  19#define TB_PCI_PATH_DOWN		0
  20#define TB_PCI_PATH_UP			1
  21
  22/* USB3 adapters use always HopID of 8 for both directions */
  23#define TB_USB3_HOPID			8
  24
  25#define TB_USB3_PATH_DOWN		0
  26#define TB_USB3_PATH_UP			1
  27
  28/* DP adapters use HopID 8 for AUX and 9 for Video */
  29#define TB_DP_AUX_TX_HOPID		8
  30#define TB_DP_AUX_RX_HOPID		8
  31#define TB_DP_VIDEO_HOPID		9
  32
  33#define TB_DP_VIDEO_PATH_OUT		0
  34#define TB_DP_AUX_PATH_OUT		1
  35#define TB_DP_AUX_PATH_IN		2
  36
  37#define TB_DMA_PATH_OUT			0
  38#define TB_DMA_PATH_IN			1
 
 
 
 
 
 
 
  39
  40static const char * const tb_tunnel_names[] = { "PCI", "DP", "DMA", "USB3" };
  41
  42#define __TB_TUNNEL_PRINT(level, tunnel, fmt, arg...)                   \
  43	do {                                                            \
  44		struct tb_tunnel *__tunnel = (tunnel);                  \
  45		level(__tunnel->tb, "%llx:%x <-> %llx:%x (%s): " fmt,   \
  46		      tb_route(__tunnel->src_port->sw),                 \
  47		      __tunnel->src_port->port,                         \
  48		      tb_route(__tunnel->dst_port->sw),                 \
  49		      __tunnel->dst_port->port,                         \
  50		      tb_tunnel_names[__tunnel->type],			\
  51		      ## arg);                                          \
  52	} while (0)
  53
  54#define tb_tunnel_WARN(tunnel, fmt, arg...) \
  55	__TB_TUNNEL_PRINT(tb_WARN, tunnel, fmt, ##arg)
  56#define tb_tunnel_warn(tunnel, fmt, arg...) \
  57	__TB_TUNNEL_PRINT(tb_warn, tunnel, fmt, ##arg)
  58#define tb_tunnel_info(tunnel, fmt, arg...) \
  59	__TB_TUNNEL_PRINT(tb_info, tunnel, fmt, ##arg)
  60#define tb_tunnel_dbg(tunnel, fmt, arg...) \
  61	__TB_TUNNEL_PRINT(tb_dbg, tunnel, fmt, ##arg)
  62
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
  63static struct tb_tunnel *tb_tunnel_alloc(struct tb *tb, size_t npaths,
  64					 enum tb_tunnel_type type)
  65{
  66	struct tb_tunnel *tunnel;
  67
  68	tunnel = kzalloc(sizeof(*tunnel), GFP_KERNEL);
  69	if (!tunnel)
  70		return NULL;
  71
  72	tunnel->paths = kcalloc(npaths, sizeof(tunnel->paths[0]), GFP_KERNEL);
  73	if (!tunnel->paths) {
  74		tb_tunnel_free(tunnel);
  75		return NULL;
  76	}
  77
  78	INIT_LIST_HEAD(&tunnel->list);
  79	tunnel->tb = tb;
  80	tunnel->npaths = npaths;
  81	tunnel->type = type;
  82
  83	return tunnel;
  84}
  85
  86static int tb_pci_activate(struct tb_tunnel *tunnel, bool activate)
  87{
  88	int res;
  89
  90	res = tb_pci_port_enable(tunnel->src_port, activate);
  91	if (res)
  92		return res;
  93
  94	if (tb_port_is_pcie_up(tunnel->dst_port))
  95		return tb_pci_port_enable(tunnel->dst_port, activate);
  96
  97	return 0;
  98}
  99
 100static int tb_initial_credits(const struct tb_switch *sw)
 101{
 102	/* If the path is complete sw is not NULL */
 103	if (sw) {
 104		/* More credits for faster link */
 105		switch (sw->link_speed * sw->link_width) {
 106		case 40:
 107			return 32;
 108		case 20:
 109			return 24;
 110		}
 
 
 
 
 
 
 
 
 
 
 111	}
 112
 113	return 16;
 
 114}
 115
 116static void tb_pci_init_path(struct tb_path *path)
 117{
 
 
 118	path->egress_fc_enable = TB_PATH_SOURCE | TB_PATH_INTERNAL;
 119	path->egress_shared_buffer = TB_PATH_NONE;
 120	path->ingress_fc_enable = TB_PATH_ALL;
 121	path->ingress_shared_buffer = TB_PATH_NONE;
 122	path->priority = 3;
 123	path->weight = 1;
 124	path->drop_packages = 0;
 125	path->nfc_credits = 0;
 126	path->hops[0].initial_credits = 7;
 127	if (path->path_length > 1)
 128		path->hops[1].initial_credits =
 129			tb_initial_credits(path->hops[1].in_port->sw);
 
 
 
 
 
 130}
 131
 132/**
 133 * tb_tunnel_discover_pci() - Discover existing PCIe tunnels
 134 * @tb: Pointer to the domain structure
 135 * @down: PCIe downstream adapter
 136 *
 137 * If @down adapter is active, follows the tunnel to the PCIe upstream
 138 * adapter and back. Returns the discovered tunnel or %NULL if there was
 139 * no tunnel.
 140 */
 141struct tb_tunnel *tb_tunnel_discover_pci(struct tb *tb, struct tb_port *down)
 142{
 143	struct tb_tunnel *tunnel;
 144	struct tb_path *path;
 145
 146	if (!tb_pci_port_is_enabled(down))
 147		return NULL;
 148
 149	tunnel = tb_tunnel_alloc(tb, 2, TB_TUNNEL_PCI);
 150	if (!tunnel)
 151		return NULL;
 152
 153	tunnel->activate = tb_pci_activate;
 154	tunnel->src_port = down;
 155
 156	/*
 157	 * Discover both paths even if they are not complete. We will
 158	 * clean them up by calling tb_tunnel_deactivate() below in that
 159	 * case.
 160	 */
 161	path = tb_path_discover(down, TB_PCI_HOPID, NULL, -1,
 162				&tunnel->dst_port, "PCIe Up");
 163	if (!path) {
 164		/* Just disable the downstream port */
 165		tb_pci_port_enable(down, false);
 166		goto err_free;
 167	}
 168	tunnel->paths[TB_PCI_PATH_UP] = path;
 169	tb_pci_init_path(tunnel->paths[TB_PCI_PATH_UP]);
 
 170
 171	path = tb_path_discover(tunnel->dst_port, -1, down, TB_PCI_HOPID, NULL,
 172				"PCIe Down");
 173	if (!path)
 174		goto err_deactivate;
 175	tunnel->paths[TB_PCI_PATH_DOWN] = path;
 176	tb_pci_init_path(tunnel->paths[TB_PCI_PATH_DOWN]);
 
 177
 178	/* Validate that the tunnel is complete */
 179	if (!tb_port_is_pcie_up(tunnel->dst_port)) {
 180		tb_port_warn(tunnel->dst_port,
 181			     "path does not end on a PCIe adapter, cleaning up\n");
 182		goto err_deactivate;
 183	}
 184
 185	if (down != tunnel->src_port) {
 186		tb_tunnel_warn(tunnel, "path is not complete, cleaning up\n");
 187		goto err_deactivate;
 188	}
 189
 190	if (!tb_pci_port_is_enabled(tunnel->dst_port)) {
 191		tb_tunnel_warn(tunnel,
 192			       "tunnel is not fully activated, cleaning up\n");
 193		goto err_deactivate;
 194	}
 195
 196	tb_tunnel_dbg(tunnel, "discovered\n");
 197	return tunnel;
 198
 199err_deactivate:
 200	tb_tunnel_deactivate(tunnel);
 201err_free:
 202	tb_tunnel_free(tunnel);
 203
 204	return NULL;
 205}
 206
 207/**
 208 * tb_tunnel_alloc_pci() - allocate a pci tunnel
 209 * @tb: Pointer to the domain structure
 210 * @up: PCIe upstream adapter port
 211 * @down: PCIe downstream adapter port
 212 *
 213 * Allocate a PCI tunnel. The ports must be of type TB_TYPE_PCIE_UP and
 214 * TB_TYPE_PCIE_DOWN.
 215 *
 216 * Return: Returns a tb_tunnel on success or NULL on failure.
 217 */
 218struct tb_tunnel *tb_tunnel_alloc_pci(struct tb *tb, struct tb_port *up,
 219				      struct tb_port *down)
 220{
 221	struct tb_tunnel *tunnel;
 222	struct tb_path *path;
 223
 224	tunnel = tb_tunnel_alloc(tb, 2, TB_TUNNEL_PCI);
 225	if (!tunnel)
 226		return NULL;
 227
 228	tunnel->activate = tb_pci_activate;
 229	tunnel->src_port = down;
 230	tunnel->dst_port = up;
 231
 232	path = tb_path_alloc(tb, down, TB_PCI_HOPID, up, TB_PCI_HOPID, 0,
 233			     "PCIe Down");
 234	if (!path) {
 235		tb_tunnel_free(tunnel);
 236		return NULL;
 237	}
 238	tb_pci_init_path(path);
 239	tunnel->paths[TB_PCI_PATH_DOWN] = path;
 
 
 240
 241	path = tb_path_alloc(tb, up, TB_PCI_HOPID, down, TB_PCI_HOPID, 0,
 242			     "PCIe Up");
 243	if (!path) {
 244		tb_tunnel_free(tunnel);
 245		return NULL;
 246	}
 247	tb_pci_init_path(path);
 248	tunnel->paths[TB_PCI_PATH_UP] = path;
 
 
 249
 250	return tunnel;
 
 
 
 
 251}
 252
 253static bool tb_dp_is_usb4(const struct tb_switch *sw)
 254{
 255	/* Titan Ridge DP adapters need the same treatment as USB4 */
 256	return tb_switch_is_usb4(sw) || tb_switch_is_titan_ridge(sw);
 257}
 258
 259static int tb_dp_cm_handshake(struct tb_port *in, struct tb_port *out)
 260{
 261	int timeout = 10;
 262	u32 val;
 263	int ret;
 264
 265	/* Both ends need to support this */
 266	if (!tb_dp_is_usb4(in->sw) || !tb_dp_is_usb4(out->sw))
 267		return 0;
 268
 269	ret = tb_port_read(out, &val, TB_CFG_PORT,
 270			   out->cap_adap + DP_STATUS_CTRL, 1);
 271	if (ret)
 272		return ret;
 273
 274	val |= DP_STATUS_CTRL_UF | DP_STATUS_CTRL_CMHS;
 275
 276	ret = tb_port_write(out, &val, TB_CFG_PORT,
 277			    out->cap_adap + DP_STATUS_CTRL, 1);
 278	if (ret)
 279		return ret;
 280
 281	do {
 282		ret = tb_port_read(out, &val, TB_CFG_PORT,
 283				   out->cap_adap + DP_STATUS_CTRL, 1);
 284		if (ret)
 285			return ret;
 286		if (!(val & DP_STATUS_CTRL_CMHS))
 287			return 0;
 288		usleep_range(10, 100);
 289	} while (timeout--);
 290
 291	return -ETIMEDOUT;
 292}
 293
 294static inline u32 tb_dp_cap_get_rate(u32 val)
 295{
 296	u32 rate = (val & DP_COMMON_CAP_RATE_MASK) >> DP_COMMON_CAP_RATE_SHIFT;
 297
 298	switch (rate) {
 299	case DP_COMMON_CAP_RATE_RBR:
 300		return 1620;
 301	case DP_COMMON_CAP_RATE_HBR:
 302		return 2700;
 303	case DP_COMMON_CAP_RATE_HBR2:
 304		return 5400;
 305	case DP_COMMON_CAP_RATE_HBR3:
 306		return 8100;
 307	default:
 308		return 0;
 309	}
 310}
 311
 312static inline u32 tb_dp_cap_set_rate(u32 val, u32 rate)
 313{
 314	val &= ~DP_COMMON_CAP_RATE_MASK;
 315	switch (rate) {
 316	default:
 317		WARN(1, "invalid rate %u passed, defaulting to 1620 MB/s\n", rate);
 318		fallthrough;
 319	case 1620:
 320		val |= DP_COMMON_CAP_RATE_RBR << DP_COMMON_CAP_RATE_SHIFT;
 321		break;
 322	case 2700:
 323		val |= DP_COMMON_CAP_RATE_HBR << DP_COMMON_CAP_RATE_SHIFT;
 324		break;
 325	case 5400:
 326		val |= DP_COMMON_CAP_RATE_HBR2 << DP_COMMON_CAP_RATE_SHIFT;
 327		break;
 328	case 8100:
 329		val |= DP_COMMON_CAP_RATE_HBR3 << DP_COMMON_CAP_RATE_SHIFT;
 330		break;
 331	}
 332	return val;
 333}
 334
 335static inline u32 tb_dp_cap_get_lanes(u32 val)
 336{
 337	u32 lanes = (val & DP_COMMON_CAP_LANES_MASK) >> DP_COMMON_CAP_LANES_SHIFT;
 338
 339	switch (lanes) {
 340	case DP_COMMON_CAP_1_LANE:
 341		return 1;
 342	case DP_COMMON_CAP_2_LANES:
 343		return 2;
 344	case DP_COMMON_CAP_4_LANES:
 345		return 4;
 346	default:
 347		return 0;
 348	}
 349}
 350
 351static inline u32 tb_dp_cap_set_lanes(u32 val, u32 lanes)
 352{
 353	val &= ~DP_COMMON_CAP_LANES_MASK;
 354	switch (lanes) {
 355	default:
 356		WARN(1, "invalid number of lanes %u passed, defaulting to 1\n",
 357		     lanes);
 358		fallthrough;
 359	case 1:
 360		val |= DP_COMMON_CAP_1_LANE << DP_COMMON_CAP_LANES_SHIFT;
 361		break;
 362	case 2:
 363		val |= DP_COMMON_CAP_2_LANES << DP_COMMON_CAP_LANES_SHIFT;
 364		break;
 365	case 4:
 366		val |= DP_COMMON_CAP_4_LANES << DP_COMMON_CAP_LANES_SHIFT;
 367		break;
 368	}
 369	return val;
 370}
 371
 372static unsigned int tb_dp_bandwidth(unsigned int rate, unsigned int lanes)
 373{
 374	/* Tunneling removes the DP 8b/10b encoding */
 375	return rate * lanes * 8 / 10;
 376}
 377
 378static int tb_dp_reduce_bandwidth(int max_bw, u32 in_rate, u32 in_lanes,
 379				  u32 out_rate, u32 out_lanes, u32 *new_rate,
 380				  u32 *new_lanes)
 381{
 382	static const u32 dp_bw[][2] = {
 383		/* Mb/s, lanes */
 384		{ 8100, 4 }, /* 25920 Mb/s */
 385		{ 5400, 4 }, /* 17280 Mb/s */
 386		{ 8100, 2 }, /* 12960 Mb/s */
 387		{ 2700, 4 }, /* 8640 Mb/s */
 388		{ 5400, 2 }, /* 8640 Mb/s */
 389		{ 8100, 1 }, /* 6480 Mb/s */
 390		{ 1620, 4 }, /* 5184 Mb/s */
 391		{ 5400, 1 }, /* 4320 Mb/s */
 392		{ 2700, 2 }, /* 4320 Mb/s */
 393		{ 1620, 2 }, /* 2592 Mb/s */
 394		{ 2700, 1 }, /* 2160 Mb/s */
 395		{ 1620, 1 }, /* 1296 Mb/s */
 396	};
 397	unsigned int i;
 398
 399	/*
 400	 * Find a combination that can fit into max_bw and does not
 401	 * exceed the maximum rate and lanes supported by the DP OUT and
 402	 * DP IN adapters.
 403	 */
 404	for (i = 0; i < ARRAY_SIZE(dp_bw); i++) {
 405		if (dp_bw[i][0] > out_rate || dp_bw[i][1] > out_lanes)
 406			continue;
 407
 408		if (dp_bw[i][0] > in_rate || dp_bw[i][1] > in_lanes)
 409			continue;
 410
 411		if (tb_dp_bandwidth(dp_bw[i][0], dp_bw[i][1]) <= max_bw) {
 412			*new_rate = dp_bw[i][0];
 413			*new_lanes = dp_bw[i][1];
 414			return 0;
 415		}
 416	}
 417
 418	return -ENOSR;
 419}
 420
 421static int tb_dp_xchg_caps(struct tb_tunnel *tunnel)
 422{
 423	u32 out_dp_cap, out_rate, out_lanes, in_dp_cap, in_rate, in_lanes, bw;
 424	struct tb_port *out = tunnel->dst_port;
 425	struct tb_port *in = tunnel->src_port;
 426	int ret, max_bw;
 427
 428	/*
 429	 * Copy DP_LOCAL_CAP register to DP_REMOTE_CAP register for
 430	 * newer generation hardware.
 431	 */
 432	if (in->sw->generation < 2 || out->sw->generation < 2)
 433		return 0;
 434
 435	/*
 436	 * Perform connection manager handshake between IN and OUT ports
 437	 * before capabilities exchange can take place.
 438	 */
 439	ret = tb_dp_cm_handshake(in, out);
 440	if (ret)
 441		return ret;
 442
 443	/* Read both DP_LOCAL_CAP registers */
 444	ret = tb_port_read(in, &in_dp_cap, TB_CFG_PORT,
 445			   in->cap_adap + DP_LOCAL_CAP, 1);
 446	if (ret)
 447		return ret;
 448
 449	ret = tb_port_read(out, &out_dp_cap, TB_CFG_PORT,
 450			   out->cap_adap + DP_LOCAL_CAP, 1);
 451	if (ret)
 452		return ret;
 453
 454	/* Write IN local caps to OUT remote caps */
 455	ret = tb_port_write(out, &in_dp_cap, TB_CFG_PORT,
 456			    out->cap_adap + DP_REMOTE_CAP, 1);
 457	if (ret)
 458		return ret;
 459
 460	in_rate = tb_dp_cap_get_rate(in_dp_cap);
 461	in_lanes = tb_dp_cap_get_lanes(in_dp_cap);
 462	tb_port_dbg(in, "maximum supported bandwidth %u Mb/s x%u = %u Mb/s\n",
 463		    in_rate, in_lanes, tb_dp_bandwidth(in_rate, in_lanes));
 464
 465	/*
 466	 * If the tunnel bandwidth is limited (max_bw is set) then see
 467	 * if we need to reduce bandwidth to fit there.
 468	 */
 469	out_rate = tb_dp_cap_get_rate(out_dp_cap);
 470	out_lanes = tb_dp_cap_get_lanes(out_dp_cap);
 471	bw = tb_dp_bandwidth(out_rate, out_lanes);
 472	tb_port_dbg(out, "maximum supported bandwidth %u Mb/s x%u = %u Mb/s\n",
 473		    out_rate, out_lanes, bw);
 474
 475	if (in->sw->config.depth < out->sw->config.depth)
 476		max_bw = tunnel->max_down;
 477	else
 478		max_bw = tunnel->max_up;
 479
 480	if (max_bw && bw > max_bw) {
 481		u32 new_rate, new_lanes, new_bw;
 482
 483		ret = tb_dp_reduce_bandwidth(max_bw, in_rate, in_lanes,
 484					     out_rate, out_lanes, &new_rate,
 485					     &new_lanes);
 486		if (ret) {
 487			tb_port_info(out, "not enough bandwidth for DP tunnel\n");
 488			return ret;
 489		}
 490
 491		new_bw = tb_dp_bandwidth(new_rate, new_lanes);
 492		tb_port_dbg(out, "bandwidth reduced to %u Mb/s x%u = %u Mb/s\n",
 493			    new_rate, new_lanes, new_bw);
 494
 495		/*
 496		 * Set new rate and number of lanes before writing it to
 497		 * the IN port remote caps.
 498		 */
 499		out_dp_cap = tb_dp_cap_set_rate(out_dp_cap, new_rate);
 500		out_dp_cap = tb_dp_cap_set_lanes(out_dp_cap, new_lanes);
 501	}
 502
 503	return tb_port_write(in, &out_dp_cap, TB_CFG_PORT,
 504			     in->cap_adap + DP_REMOTE_CAP, 1);
 505}
 506
 507static int tb_dp_activate(struct tb_tunnel *tunnel, bool active)
 508{
 509	int ret;
 510
 511	if (active) {
 512		struct tb_path **paths;
 513		int last;
 514
 515		paths = tunnel->paths;
 516		last = paths[TB_DP_VIDEO_PATH_OUT]->path_length - 1;
 517
 518		tb_dp_port_set_hops(tunnel->src_port,
 519			paths[TB_DP_VIDEO_PATH_OUT]->hops[0].in_hop_index,
 520			paths[TB_DP_AUX_PATH_OUT]->hops[0].in_hop_index,
 521			paths[TB_DP_AUX_PATH_IN]->hops[last].next_hop_index);
 522
 523		tb_dp_port_set_hops(tunnel->dst_port,
 524			paths[TB_DP_VIDEO_PATH_OUT]->hops[last].next_hop_index,
 525			paths[TB_DP_AUX_PATH_IN]->hops[0].in_hop_index,
 526			paths[TB_DP_AUX_PATH_OUT]->hops[last].next_hop_index);
 527	} else {
 528		tb_dp_port_hpd_clear(tunnel->src_port);
 529		tb_dp_port_set_hops(tunnel->src_port, 0, 0, 0);
 530		if (tb_port_is_dpout(tunnel->dst_port))
 531			tb_dp_port_set_hops(tunnel->dst_port, 0, 0, 0);
 532	}
 533
 534	ret = tb_dp_port_enable(tunnel->src_port, active);
 535	if (ret)
 536		return ret;
 537
 538	if (tb_port_is_dpout(tunnel->dst_port))
 539		return tb_dp_port_enable(tunnel->dst_port, active);
 540
 541	return 0;
 542}
 543
 544static int tb_dp_consumed_bandwidth(struct tb_tunnel *tunnel, int *consumed_up,
 545				    int *consumed_down)
 546{
 547	struct tb_port *in = tunnel->src_port;
 548	const struct tb_switch *sw = in->sw;
 549	u32 val, rate = 0, lanes = 0;
 550	int ret;
 551
 552	if (tb_dp_is_usb4(sw)) {
 553		int timeout = 20;
 554
 555		/*
 556		 * Wait for DPRX done. Normally it should be already set
 557		 * for active tunnel.
 558		 */
 559		do {
 560			ret = tb_port_read(in, &val, TB_CFG_PORT,
 561					   in->cap_adap + DP_COMMON_CAP, 1);
 562			if (ret)
 563				return ret;
 564
 565			if (val & DP_COMMON_CAP_DPRX_DONE) {
 566				rate = tb_dp_cap_get_rate(val);
 567				lanes = tb_dp_cap_get_lanes(val);
 568				break;
 569			}
 570			msleep(250);
 571		} while (timeout--);
 572
 573		if (!timeout)
 574			return -ETIMEDOUT;
 575	} else if (sw->generation >= 2) {
 576		/*
 577		 * Read from the copied remote cap so that we take into
 578		 * account if capabilities were reduced during exchange.
 579		 */
 580		ret = tb_port_read(in, &val, TB_CFG_PORT,
 581				   in->cap_adap + DP_REMOTE_CAP, 1);
 582		if (ret)
 583			return ret;
 584
 585		rate = tb_dp_cap_get_rate(val);
 586		lanes = tb_dp_cap_get_lanes(val);
 587	} else {
 588		/* No bandwidth management for legacy devices  */
 589		*consumed_up = 0;
 590		*consumed_down = 0;
 591		return 0;
 592	}
 593
 594	if (in->sw->config.depth < tunnel->dst_port->sw->config.depth) {
 595		*consumed_up = 0;
 596		*consumed_down = tb_dp_bandwidth(rate, lanes);
 597	} else {
 598		*consumed_up = tb_dp_bandwidth(rate, lanes);
 599		*consumed_down = 0;
 600	}
 601
 602	return 0;
 603}
 604
 
 
 
 
 
 
 
 
 
 
 
 605static void tb_dp_init_aux_path(struct tb_path *path)
 606{
 607	int i;
 608
 609	path->egress_fc_enable = TB_PATH_SOURCE | TB_PATH_INTERNAL;
 610	path->egress_shared_buffer = TB_PATH_NONE;
 611	path->ingress_fc_enable = TB_PATH_ALL;
 612	path->ingress_shared_buffer = TB_PATH_NONE;
 613	path->priority = 2;
 614	path->weight = 1;
 615
 616	for (i = 0; i < path->path_length; i++)
 617		path->hops[i].initial_credits = 1;
 618}
 619
 620static void tb_dp_init_video_path(struct tb_path *path, bool discover)
 621{
 622	u32 nfc_credits = path->hops[0].in_port->config.nfc_credits;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 623
 624	path->egress_fc_enable = TB_PATH_NONE;
 625	path->egress_shared_buffer = TB_PATH_NONE;
 626	path->ingress_fc_enable = TB_PATH_NONE;
 627	path->ingress_shared_buffer = TB_PATH_NONE;
 628	path->priority = 1;
 629	path->weight = 1;
 630
 631	if (discover) {
 632		path->nfc_credits = nfc_credits & ADP_CS_4_NFC_BUFFERS_MASK;
 633	} else {
 634		u32 max_credits;
 635
 636		max_credits = (nfc_credits & ADP_CS_4_TOTAL_BUFFERS_MASK) >>
 637			ADP_CS_4_TOTAL_BUFFERS_SHIFT;
 638		/* Leave some credits for AUX path */
 639		path->nfc_credits = min(max_credits - 2, 12U);
 640	}
 
 
 641}
 642
 643/**
 644 * tb_tunnel_discover_dp() - Discover existing Display Port tunnels
 645 * @tb: Pointer to the domain structure
 646 * @in: DP in adapter
 647 *
 648 * If @in adapter is active, follows the tunnel to the DP out adapter
 649 * and back. Returns the discovered tunnel or %NULL if there was no
 650 * tunnel.
 651 *
 652 * Return: DP tunnel or %NULL if no tunnel found.
 653 */
 654struct tb_tunnel *tb_tunnel_discover_dp(struct tb *tb, struct tb_port *in)
 655{
 656	struct tb_tunnel *tunnel;
 657	struct tb_port *port;
 658	struct tb_path *path;
 659
 660	if (!tb_dp_port_is_enabled(in))
 661		return NULL;
 662
 663	tunnel = tb_tunnel_alloc(tb, 3, TB_TUNNEL_DP);
 664	if (!tunnel)
 665		return NULL;
 666
 667	tunnel->init = tb_dp_xchg_caps;
 668	tunnel->activate = tb_dp_activate;
 669	tunnel->consumed_bandwidth = tb_dp_consumed_bandwidth;
 670	tunnel->src_port = in;
 671
 672	path = tb_path_discover(in, TB_DP_VIDEO_HOPID, NULL, -1,
 673				&tunnel->dst_port, "Video");
 674	if (!path) {
 675		/* Just disable the DP IN port */
 676		tb_dp_port_enable(in, false);
 677		goto err_free;
 678	}
 679	tunnel->paths[TB_DP_VIDEO_PATH_OUT] = path;
 680	tb_dp_init_video_path(tunnel->paths[TB_DP_VIDEO_PATH_OUT], true);
 
 681
 682	path = tb_path_discover(in, TB_DP_AUX_TX_HOPID, NULL, -1, NULL, "AUX TX");
 683	if (!path)
 684		goto err_deactivate;
 685	tunnel->paths[TB_DP_AUX_PATH_OUT] = path;
 686	tb_dp_init_aux_path(tunnel->paths[TB_DP_AUX_PATH_OUT]);
 687
 688	path = tb_path_discover(tunnel->dst_port, -1, in, TB_DP_AUX_RX_HOPID,
 689				&port, "AUX RX");
 690	if (!path)
 691		goto err_deactivate;
 692	tunnel->paths[TB_DP_AUX_PATH_IN] = path;
 693	tb_dp_init_aux_path(tunnel->paths[TB_DP_AUX_PATH_IN]);
 694
 695	/* Validate that the tunnel is complete */
 696	if (!tb_port_is_dpout(tunnel->dst_port)) {
 697		tb_port_warn(in, "path does not end on a DP adapter, cleaning up\n");
 698		goto err_deactivate;
 699	}
 700
 701	if (!tb_dp_port_is_enabled(tunnel->dst_port))
 702		goto err_deactivate;
 703
 704	if (!tb_dp_port_hpd_is_active(tunnel->dst_port))
 705		goto err_deactivate;
 706
 707	if (port != tunnel->src_port) {
 708		tb_tunnel_warn(tunnel, "path is not complete, cleaning up\n");
 709		goto err_deactivate;
 710	}
 711
 712	tb_tunnel_dbg(tunnel, "discovered\n");
 713	return tunnel;
 714
 715err_deactivate:
 716	tb_tunnel_deactivate(tunnel);
 717err_free:
 718	tb_tunnel_free(tunnel);
 719
 720	return NULL;
 721}
 722
 723/**
 724 * tb_tunnel_alloc_dp() - allocate a Display Port tunnel
 725 * @tb: Pointer to the domain structure
 726 * @in: DP in adapter port
 727 * @out: DP out adapter port
 728 * @max_up: Maximum available upstream bandwidth for the DP tunnel (%0
 729 *	    if not limited)
 730 * @max_down: Maximum available downstream bandwidth for the DP tunnel
 731 *	      (%0 if not limited)
 732 *
 733 * Allocates a tunnel between @in and @out that is capable of tunneling
 734 * Display Port traffic.
 735 *
 736 * Return: Returns a tb_tunnel on success or NULL on failure.
 737 */
 738struct tb_tunnel *tb_tunnel_alloc_dp(struct tb *tb, struct tb_port *in,
 739				     struct tb_port *out, int max_up,
 740				     int max_down)
 741{
 742	struct tb_tunnel *tunnel;
 743	struct tb_path **paths;
 744	struct tb_path *path;
 745
 746	if (WARN_ON(!in->cap_adap || !out->cap_adap))
 747		return NULL;
 748
 749	tunnel = tb_tunnel_alloc(tb, 3, TB_TUNNEL_DP);
 750	if (!tunnel)
 751		return NULL;
 752
 753	tunnel->init = tb_dp_xchg_caps;
 754	tunnel->activate = tb_dp_activate;
 755	tunnel->consumed_bandwidth = tb_dp_consumed_bandwidth;
 756	tunnel->src_port = in;
 757	tunnel->dst_port = out;
 758	tunnel->max_up = max_up;
 759	tunnel->max_down = max_down;
 760
 761	paths = tunnel->paths;
 762
 763	path = tb_path_alloc(tb, in, TB_DP_VIDEO_HOPID, out, TB_DP_VIDEO_HOPID,
 764			     1, "Video");
 765	if (!path)
 766		goto err_free;
 767	tb_dp_init_video_path(path, false);
 768	paths[TB_DP_VIDEO_PATH_OUT] = path;
 769
 770	path = tb_path_alloc(tb, in, TB_DP_AUX_TX_HOPID, out,
 771			     TB_DP_AUX_TX_HOPID, 1, "AUX TX");
 772	if (!path)
 773		goto err_free;
 774	tb_dp_init_aux_path(path);
 775	paths[TB_DP_AUX_PATH_OUT] = path;
 776
 777	path = tb_path_alloc(tb, out, TB_DP_AUX_RX_HOPID, in,
 778			     TB_DP_AUX_RX_HOPID, 1, "AUX RX");
 779	if (!path)
 780		goto err_free;
 781	tb_dp_init_aux_path(path);
 782	paths[TB_DP_AUX_PATH_IN] = path;
 783
 784	return tunnel;
 785
 786err_free:
 787	tb_tunnel_free(tunnel);
 788	return NULL;
 789}
 790
 791static u32 tb_dma_credits(struct tb_port *nhi)
 792{
 793	u32 max_credits;
 
 
 
 
 
 
 794
 795	max_credits = (nhi->config.nfc_credits & ADP_CS_4_TOTAL_BUFFERS_MASK) >>
 796		ADP_CS_4_TOTAL_BUFFERS_SHIFT;
 797	return min(max_credits, 13U);
 798}
 799
 800static int tb_dma_activate(struct tb_tunnel *tunnel, bool active)
 801{
 802	struct tb_port *nhi = tunnel->src_port;
 803	u32 credits;
 804
 805	credits = active ? tb_dma_credits(nhi) : 0;
 806	return tb_port_set_initial_credits(nhi, credits);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 807}
 808
 809static void tb_dma_init_path(struct tb_path *path, unsigned int isb,
 810			     unsigned int efc, u32 credits)
 811{
 812	int i;
 
 813
 814	path->egress_fc_enable = efc;
 815	path->ingress_fc_enable = TB_PATH_ALL;
 816	path->egress_shared_buffer = TB_PATH_NONE;
 817	path->ingress_shared_buffer = isb;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 818	path->priority = 5;
 819	path->weight = 1;
 820	path->clear_fc = true;
 821
 822	for (i = 0; i < path->path_length; i++)
 823		path->hops[i].initial_credits = credits;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 824}
 825
 826/**
 827 * tb_tunnel_alloc_dma() - allocate a DMA tunnel
 828 * @tb: Pointer to the domain structure
 829 * @nhi: Host controller port
 830 * @dst: Destination null port which the other domain is connected to
 831 * @transmit_ring: NHI ring number used to send packets towards the
 832 *		   other domain
 833 * @transmit_path: HopID used for transmitting packets
 
 
 
 834 * @receive_ring: NHI ring number used to receive packets from the
 835 *		  other domain
 836 * @reveive_path: HopID used for receiving packets
 837 *
 838 * Return: Returns a tb_tunnel on success or NULL on failure.
 839 */
 840struct tb_tunnel *tb_tunnel_alloc_dma(struct tb *tb, struct tb_port *nhi,
 841				      struct tb_port *dst, int transmit_ring,
 842				      int transmit_path, int receive_ring,
 843				      int receive_path)
 844{
 845	struct tb_tunnel *tunnel;
 
 846	struct tb_path *path;
 847	u32 credits;
 
 
 
 
 
 
 
 
 848
 849	tunnel = tb_tunnel_alloc(tb, 2, TB_TUNNEL_DMA);
 850	if (!tunnel)
 851		return NULL;
 852
 853	tunnel->activate = tb_dma_activate;
 854	tunnel->src_port = nhi;
 855	tunnel->dst_port = dst;
 
 856
 857	credits = tb_dma_credits(nhi);
 858
 859	path = tb_path_alloc(tb, dst, receive_path, nhi, receive_ring, 0, "DMA RX");
 860	if (!path) {
 861		tb_tunnel_free(tunnel);
 862		return NULL;
 
 
 
 
 
 
 863	}
 864	tb_dma_init_path(path, TB_PATH_NONE, TB_PATH_SOURCE | TB_PATH_INTERNAL,
 865			 credits);
 866	tunnel->paths[TB_DMA_PATH_IN] = path;
 867
 868	path = tb_path_alloc(tb, nhi, transmit_ring, dst, transmit_path, 0, "DMA TX");
 869	if (!path) {
 870		tb_tunnel_free(tunnel);
 871		return NULL;
 
 
 
 
 
 
 872	}
 873	tb_dma_init_path(path, TB_PATH_SOURCE, TB_PATH_ALL, credits);
 874	tunnel->paths[TB_DMA_PATH_OUT] = path;
 875
 876	return tunnel;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 877}
 878
 879static int tb_usb3_max_link_rate(struct tb_port *up, struct tb_port *down)
 880{
 881	int ret, up_max_rate, down_max_rate;
 882
 883	ret = usb4_usb3_port_max_link_rate(up);
 884	if (ret < 0)
 885		return ret;
 886	up_max_rate = ret;
 887
 888	ret = usb4_usb3_port_max_link_rate(down);
 889	if (ret < 0)
 890		return ret;
 891	down_max_rate = ret;
 892
 893	return min(up_max_rate, down_max_rate);
 894}
 895
 896static int tb_usb3_init(struct tb_tunnel *tunnel)
 897{
 898	tb_tunnel_dbg(tunnel, "allocating initial bandwidth %d/%d Mb/s\n",
 899		      tunnel->allocated_up, tunnel->allocated_down);
 900
 901	return usb4_usb3_port_allocate_bandwidth(tunnel->src_port,
 902						 &tunnel->allocated_up,
 903						 &tunnel->allocated_down);
 904}
 905
 906static int tb_usb3_activate(struct tb_tunnel *tunnel, bool activate)
 907{
 908	int res;
 909
 910	res = tb_usb3_port_enable(tunnel->src_port, activate);
 911	if (res)
 912		return res;
 913
 914	if (tb_port_is_usb3_up(tunnel->dst_port))
 915		return tb_usb3_port_enable(tunnel->dst_port, activate);
 916
 917	return 0;
 918}
 919
 920static int tb_usb3_consumed_bandwidth(struct tb_tunnel *tunnel,
 921		int *consumed_up, int *consumed_down)
 922{
 
 
 923	/*
 924	 * PCIe tunneling affects the USB3 bandwidth so take that it
 925	 * into account here.
 926	 */
 927	*consumed_up = tunnel->allocated_up * (3 + 1) / 3;
 928	*consumed_down = tunnel->allocated_down * (3 + 1) / 3;
 929	return 0;
 930}
 931
 932static int tb_usb3_release_unused_bandwidth(struct tb_tunnel *tunnel)
 933{
 934	int ret;
 935
 936	ret = usb4_usb3_port_release_bandwidth(tunnel->src_port,
 937					       &tunnel->allocated_up,
 938					       &tunnel->allocated_down);
 939	if (ret)
 940		return ret;
 941
 942	tb_tunnel_dbg(tunnel, "decreased bandwidth allocation to %d/%d Mb/s\n",
 943		      tunnel->allocated_up, tunnel->allocated_down);
 944	return 0;
 945}
 946
 947static void tb_usb3_reclaim_available_bandwidth(struct tb_tunnel *tunnel,
 948						int *available_up,
 949						int *available_down)
 950{
 951	int ret, max_rate, allocate_up, allocate_down;
 952
 953	ret = usb4_usb3_port_actual_link_rate(tunnel->src_port);
 954	if (ret < 0) {
 955		tb_tunnel_warn(tunnel, "failed to read actual link rate\n");
 956		return;
 957	} else if (!ret) {
 958		/* Use maximum link rate if the link valid is not set */
 959		ret = usb4_usb3_port_max_link_rate(tunnel->src_port);
 960		if (ret < 0) {
 961			tb_tunnel_warn(tunnel, "failed to read maximum link rate\n");
 962			return;
 963		}
 964	}
 965
 966	/*
 967	 * 90% of the max rate can be allocated for isochronous
 968	 * transfers.
 969	 */
 970	max_rate = ret * 90 / 100;
 971
 972	/* No need to reclaim if already at maximum */
 973	if (tunnel->allocated_up >= max_rate &&
 974	    tunnel->allocated_down >= max_rate)
 975		return;
 976
 977	/* Don't go lower than what is already allocated */
 978	allocate_up = min(max_rate, *available_up);
 979	if (allocate_up < tunnel->allocated_up)
 980		allocate_up = tunnel->allocated_up;
 981
 982	allocate_down = min(max_rate, *available_down);
 983	if (allocate_down < tunnel->allocated_down)
 984		allocate_down = tunnel->allocated_down;
 985
 986	/* If no changes no need to do more */
 987	if (allocate_up == tunnel->allocated_up &&
 988	    allocate_down == tunnel->allocated_down)
 989		return;
 990
 991	ret = usb4_usb3_port_allocate_bandwidth(tunnel->src_port, &allocate_up,
 992						&allocate_down);
 993	if (ret) {
 994		tb_tunnel_info(tunnel, "failed to allocate bandwidth\n");
 995		return;
 996	}
 997
 998	tunnel->allocated_up = allocate_up;
 999	*available_up -= tunnel->allocated_up;
1000
1001	tunnel->allocated_down = allocate_down;
1002	*available_down -= tunnel->allocated_down;
1003
1004	tb_tunnel_dbg(tunnel, "increased bandwidth allocation to %d/%d Mb/s\n",
1005		      tunnel->allocated_up, tunnel->allocated_down);
1006}
1007
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1008static void tb_usb3_init_path(struct tb_path *path)
1009{
 
 
1010	path->egress_fc_enable = TB_PATH_SOURCE | TB_PATH_INTERNAL;
1011	path->egress_shared_buffer = TB_PATH_NONE;
1012	path->ingress_fc_enable = TB_PATH_ALL;
1013	path->ingress_shared_buffer = TB_PATH_NONE;
1014	path->priority = 3;
1015	path->weight = 3;
1016	path->drop_packages = 0;
1017	path->nfc_credits = 0;
1018	path->hops[0].initial_credits = 7;
1019	if (path->path_length > 1)
1020		path->hops[1].initial_credits =
1021			tb_initial_credits(path->hops[1].in_port->sw);
1022}
1023
1024/**
1025 * tb_tunnel_discover_usb3() - Discover existing USB3 tunnels
1026 * @tb: Pointer to the domain structure
1027 * @down: USB3 downstream adapter
1028 *
1029 * If @down adapter is active, follows the tunnel to the USB3 upstream
1030 * adapter and back. Returns the discovered tunnel or %NULL if there was
1031 * no tunnel.
1032 */
1033struct tb_tunnel *tb_tunnel_discover_usb3(struct tb *tb, struct tb_port *down)
1034{
1035	struct tb_tunnel *tunnel;
1036	struct tb_path *path;
1037
1038	if (!tb_usb3_port_is_enabled(down))
1039		return NULL;
1040
1041	tunnel = tb_tunnel_alloc(tb, 2, TB_TUNNEL_USB3);
1042	if (!tunnel)
1043		return NULL;
1044
1045	tunnel->activate = tb_usb3_activate;
1046	tunnel->src_port = down;
1047
1048	/*
1049	 * Discover both paths even if they are not complete. We will
1050	 * clean them up by calling tb_tunnel_deactivate() below in that
1051	 * case.
1052	 */
1053	path = tb_path_discover(down, TB_USB3_HOPID, NULL, -1,
1054				&tunnel->dst_port, "USB3 Down");
1055	if (!path) {
1056		/* Just disable the downstream port */
1057		tb_usb3_port_enable(down, false);
1058		goto err_free;
1059	}
1060	tunnel->paths[TB_USB3_PATH_DOWN] = path;
1061	tb_usb3_init_path(tunnel->paths[TB_USB3_PATH_DOWN]);
1062
1063	path = tb_path_discover(tunnel->dst_port, -1, down, TB_USB3_HOPID, NULL,
1064				"USB3 Up");
1065	if (!path)
1066		goto err_deactivate;
1067	tunnel->paths[TB_USB3_PATH_UP] = path;
1068	tb_usb3_init_path(tunnel->paths[TB_USB3_PATH_UP]);
1069
1070	/* Validate that the tunnel is complete */
1071	if (!tb_port_is_usb3_up(tunnel->dst_port)) {
1072		tb_port_warn(tunnel->dst_port,
1073			     "path does not end on an USB3 adapter, cleaning up\n");
1074		goto err_deactivate;
1075	}
1076
1077	if (down != tunnel->src_port) {
1078		tb_tunnel_warn(tunnel, "path is not complete, cleaning up\n");
1079		goto err_deactivate;
1080	}
1081
1082	if (!tb_usb3_port_is_enabled(tunnel->dst_port)) {
1083		tb_tunnel_warn(tunnel,
1084			       "tunnel is not fully activated, cleaning up\n");
1085		goto err_deactivate;
1086	}
1087
1088	if (!tb_route(down->sw)) {
1089		int ret;
1090
1091		/*
1092		 * Read the initial bandwidth allocation for the first
1093		 * hop tunnel.
1094		 */
1095		ret = usb4_usb3_port_allocated_bandwidth(down,
1096			&tunnel->allocated_up, &tunnel->allocated_down);
1097		if (ret)
1098			goto err_deactivate;
1099
1100		tb_tunnel_dbg(tunnel, "currently allocated bandwidth %d/%d Mb/s\n",
1101			      tunnel->allocated_up, tunnel->allocated_down);
1102
1103		tunnel->init = tb_usb3_init;
1104		tunnel->consumed_bandwidth = tb_usb3_consumed_bandwidth;
1105		tunnel->release_unused_bandwidth =
1106			tb_usb3_release_unused_bandwidth;
1107		tunnel->reclaim_available_bandwidth =
1108			tb_usb3_reclaim_available_bandwidth;
1109	}
1110
1111	tb_tunnel_dbg(tunnel, "discovered\n");
1112	return tunnel;
1113
1114err_deactivate:
1115	tb_tunnel_deactivate(tunnel);
1116err_free:
1117	tb_tunnel_free(tunnel);
1118
1119	return NULL;
1120}
1121
1122/**
1123 * tb_tunnel_alloc_usb3() - allocate a USB3 tunnel
1124 * @tb: Pointer to the domain structure
1125 * @up: USB3 upstream adapter port
1126 * @down: USB3 downstream adapter port
1127 * @max_up: Maximum available upstream bandwidth for the USB3 tunnel (%0
1128 *	    if not limited).
1129 * @max_down: Maximum available downstream bandwidth for the USB3 tunnel
1130 *	      (%0 if not limited).
1131 *
1132 * Allocate an USB3 tunnel. The ports must be of type @TB_TYPE_USB3_UP and
1133 * @TB_TYPE_USB3_DOWN.
1134 *
1135 * Return: Returns a tb_tunnel on success or %NULL on failure.
1136 */
1137struct tb_tunnel *tb_tunnel_alloc_usb3(struct tb *tb, struct tb_port *up,
1138				       struct tb_port *down, int max_up,
1139				       int max_down)
1140{
1141	struct tb_tunnel *tunnel;
1142	struct tb_path *path;
1143	int max_rate = 0;
1144
1145	/*
1146	 * Check that we have enough bandwidth available for the new
1147	 * USB3 tunnel.
1148	 */
1149	if (max_up > 0 || max_down > 0) {
1150		max_rate = tb_usb3_max_link_rate(down, up);
1151		if (max_rate < 0)
1152			return NULL;
1153
1154		/* Only 90% can be allocated for USB3 isochronous transfers */
1155		max_rate = max_rate * 90 / 100;
1156		tb_port_dbg(up, "required bandwidth for USB3 tunnel %d Mb/s\n",
1157			    max_rate);
1158
1159		if (max_rate > max_up || max_rate > max_down) {
1160			tb_port_warn(up, "not enough bandwidth for USB3 tunnel\n");
1161			return NULL;
1162		}
1163	}
1164
1165	tunnel = tb_tunnel_alloc(tb, 2, TB_TUNNEL_USB3);
1166	if (!tunnel)
1167		return NULL;
1168
1169	tunnel->activate = tb_usb3_activate;
1170	tunnel->src_port = down;
1171	tunnel->dst_port = up;
1172	tunnel->max_up = max_up;
1173	tunnel->max_down = max_down;
1174
1175	path = tb_path_alloc(tb, down, TB_USB3_HOPID, up, TB_USB3_HOPID, 0,
1176			     "USB3 Down");
1177	if (!path) {
1178		tb_tunnel_free(tunnel);
1179		return NULL;
1180	}
1181	tb_usb3_init_path(path);
1182	tunnel->paths[TB_USB3_PATH_DOWN] = path;
1183
1184	path = tb_path_alloc(tb, up, TB_USB3_HOPID, down, TB_USB3_HOPID, 0,
1185			     "USB3 Up");
1186	if (!path) {
1187		tb_tunnel_free(tunnel);
1188		return NULL;
1189	}
1190	tb_usb3_init_path(path);
1191	tunnel->paths[TB_USB3_PATH_UP] = path;
1192
1193	if (!tb_route(down->sw)) {
1194		tunnel->allocated_up = max_rate;
1195		tunnel->allocated_down = max_rate;
1196
1197		tunnel->init = tb_usb3_init;
1198		tunnel->consumed_bandwidth = tb_usb3_consumed_bandwidth;
1199		tunnel->release_unused_bandwidth =
1200			tb_usb3_release_unused_bandwidth;
1201		tunnel->reclaim_available_bandwidth =
1202			tb_usb3_reclaim_available_bandwidth;
1203	}
1204
1205	return tunnel;
1206}
1207
1208/**
1209 * tb_tunnel_free() - free a tunnel
1210 * @tunnel: Tunnel to be freed
1211 *
1212 * Frees a tunnel. The tunnel does not need to be deactivated.
1213 */
1214void tb_tunnel_free(struct tb_tunnel *tunnel)
1215{
1216	int i;
1217
1218	if (!tunnel)
1219		return;
 
 
 
1220
1221	for (i = 0; i < tunnel->npaths; i++) {
1222		if (tunnel->paths[i])
1223			tb_path_free(tunnel->paths[i]);
1224	}
1225
1226	kfree(tunnel->paths);
1227	kfree(tunnel);
1228}
1229
1230/**
1231 * tb_tunnel_is_invalid - check whether an activated path is still valid
1232 * @tunnel: Tunnel to check
1233 */
1234bool tb_tunnel_is_invalid(struct tb_tunnel *tunnel)
1235{
1236	int i;
1237
1238	for (i = 0; i < tunnel->npaths; i++) {
1239		WARN_ON(!tunnel->paths[i]->activated);
1240		if (tb_path_is_invalid(tunnel->paths[i]))
1241			return true;
1242	}
1243
1244	return false;
1245}
1246
1247/**
1248 * tb_tunnel_restart() - activate a tunnel after a hardware reset
1249 * @tunnel: Tunnel to restart
1250 *
1251 * Return: 0 on success and negative errno in case if failure
1252 */
1253int tb_tunnel_restart(struct tb_tunnel *tunnel)
1254{
1255	int res, i;
1256
1257	tb_tunnel_dbg(tunnel, "activating\n");
1258
1259	/*
1260	 * Make sure all paths are properly disabled before enabling
1261	 * them again.
1262	 */
1263	for (i = 0; i < tunnel->npaths; i++) {
1264		if (tunnel->paths[i]->activated) {
1265			tb_path_deactivate(tunnel->paths[i]);
1266			tunnel->paths[i]->activated = false;
1267		}
1268	}
1269
1270	if (tunnel->init) {
1271		res = tunnel->init(tunnel);
1272		if (res)
1273			return res;
1274	}
1275
1276	for (i = 0; i < tunnel->npaths; i++) {
1277		res = tb_path_activate(tunnel->paths[i]);
1278		if (res)
1279			goto err;
1280	}
1281
1282	if (tunnel->activate) {
1283		res = tunnel->activate(tunnel, true);
1284		if (res)
1285			goto err;
1286	}
1287
1288	return 0;
1289
1290err:
1291	tb_tunnel_warn(tunnel, "activation failed\n");
1292	tb_tunnel_deactivate(tunnel);
1293	return res;
1294}
1295
1296/**
1297 * tb_tunnel_activate() - activate a tunnel
1298 * @tunnel: Tunnel to activate
1299 *
1300 * Return: Returns 0 on success or an error code on failure.
1301 */
1302int tb_tunnel_activate(struct tb_tunnel *tunnel)
1303{
1304	int i;
1305
1306	for (i = 0; i < tunnel->npaths; i++) {
1307		if (tunnel->paths[i]->activated) {
1308			tb_tunnel_WARN(tunnel,
1309				       "trying to activate an already activated tunnel\n");
1310			return -EINVAL;
1311		}
1312	}
1313
1314	return tb_tunnel_restart(tunnel);
1315}
1316
1317/**
1318 * tb_tunnel_deactivate() - deactivate a tunnel
1319 * @tunnel: Tunnel to deactivate
1320 */
1321void tb_tunnel_deactivate(struct tb_tunnel *tunnel)
1322{
1323	int i;
1324
1325	tb_tunnel_dbg(tunnel, "deactivating\n");
1326
1327	if (tunnel->activate)
1328		tunnel->activate(tunnel, false);
1329
1330	for (i = 0; i < tunnel->npaths; i++) {
1331		if (tunnel->paths[i] && tunnel->paths[i]->activated)
1332			tb_path_deactivate(tunnel->paths[i]);
1333	}
1334}
1335
1336/**
1337 * tb_tunnel_port_on_path() - Does the tunnel go through port
1338 * @tunnel: Tunnel to check
1339 * @port: Port to check
1340 *
1341 * Returns true if @tunnel goes through @port (direction does not matter),
1342 * false otherwise.
1343 */
1344bool tb_tunnel_port_on_path(const struct tb_tunnel *tunnel,
1345			    const struct tb_port *port)
1346{
1347	int i;
1348
1349	for (i = 0; i < tunnel->npaths; i++) {
1350		if (!tunnel->paths[i])
1351			continue;
1352
1353		if (tb_path_port_on_path(tunnel->paths[i], port))
1354			return true;
1355	}
1356
1357	return false;
1358}
1359
1360static bool tb_tunnel_is_active(const struct tb_tunnel *tunnel)
1361{
1362	int i;
1363
1364	for (i = 0; i < tunnel->npaths; i++) {
1365		if (!tunnel->paths[i])
1366			return false;
1367		if (!tunnel->paths[i]->activated)
1368			return false;
1369	}
1370
1371	return true;
1372}
1373
1374/**
1375 * tb_tunnel_consumed_bandwidth() - Return bandwidth consumed by the tunnel
1376 * @tunnel: Tunnel to check
1377 * @consumed_up: Consumed bandwidth in Mb/s from @dst_port to @src_port.
1378 *		 Can be %NULL.
1379 * @consumed_down: Consumed bandwidth in Mb/s from @src_port to @dst_port.
1380 *		   Can be %NULL.
1381 *
1382 * Stores the amount of isochronous bandwidth @tunnel consumes in
1383 * @consumed_up and @consumed_down. In case of success returns %0,
1384 * negative errno otherwise.
1385 */
1386int tb_tunnel_consumed_bandwidth(struct tb_tunnel *tunnel, int *consumed_up,
1387				 int *consumed_down)
1388{
1389	int up_bw = 0, down_bw = 0;
1390
1391	if (!tb_tunnel_is_active(tunnel))
1392		goto out;
1393
1394	if (tunnel->consumed_bandwidth) {
1395		int ret;
1396
1397		ret = tunnel->consumed_bandwidth(tunnel, &up_bw, &down_bw);
1398		if (ret)
1399			return ret;
1400
1401		tb_tunnel_dbg(tunnel, "consumed bandwidth %d/%d Mb/s\n", up_bw,
1402			      down_bw);
1403	}
1404
1405out:
1406	if (consumed_up)
1407		*consumed_up = up_bw;
1408	if (consumed_down)
1409		*consumed_down = down_bw;
1410
1411	return 0;
1412}
1413
1414/**
1415 * tb_tunnel_release_unused_bandwidth() - Release unused bandwidth
1416 * @tunnel: Tunnel whose unused bandwidth to release
1417 *
1418 * If tunnel supports dynamic bandwidth management (USB3 tunnels at the
1419 * moment) this function makes it to release all the unused bandwidth.
1420 *
1421 * Returns %0 in case of success and negative errno otherwise.
1422 */
1423int tb_tunnel_release_unused_bandwidth(struct tb_tunnel *tunnel)
1424{
1425	if (!tb_tunnel_is_active(tunnel))
1426		return 0;
1427
1428	if (tunnel->release_unused_bandwidth) {
1429		int ret;
1430
1431		ret = tunnel->release_unused_bandwidth(tunnel);
1432		if (ret)
1433			return ret;
1434	}
1435
1436	return 0;
1437}
1438
1439/**
1440 * tb_tunnel_reclaim_available_bandwidth() - Reclaim available bandwidth
1441 * @tunnel: Tunnel reclaiming available bandwidth
1442 * @available_up: Available upstream bandwidth (in Mb/s)
1443 * @available_down: Available downstream bandwidth (in Mb/s)
1444 *
1445 * Reclaims bandwidth from @available_up and @available_down and updates
1446 * the variables accordingly (e.g decreases both according to what was
1447 * reclaimed by the tunnel). If nothing was reclaimed the values are
1448 * kept as is.
1449 */
1450void tb_tunnel_reclaim_available_bandwidth(struct tb_tunnel *tunnel,
1451					   int *available_up,
1452					   int *available_down)
1453{
1454	if (!tb_tunnel_is_active(tunnel))
1455		return;
1456
1457	if (tunnel->reclaim_available_bandwidth)
1458		tunnel->reclaim_available_bandwidth(tunnel, available_up,
1459						    available_down);
1460}
v5.14.15
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 * Thunderbolt driver - Tunneling support
   4 *
   5 * Copyright (c) 2014 Andreas Noever <andreas.noever@gmail.com>
   6 * Copyright (C) 2019, Intel Corporation
   7 */
   8
   9#include <linux/delay.h>
  10#include <linux/slab.h>
  11#include <linux/list.h>
  12
  13#include "tunnel.h"
  14#include "tb.h"
  15
  16/* PCIe adapters use always HopID of 8 for both directions */
  17#define TB_PCI_HOPID			8
  18
  19#define TB_PCI_PATH_DOWN		0
  20#define TB_PCI_PATH_UP			1
  21
  22/* USB3 adapters use always HopID of 8 for both directions */
  23#define TB_USB3_HOPID			8
  24
  25#define TB_USB3_PATH_DOWN		0
  26#define TB_USB3_PATH_UP			1
  27
  28/* DP adapters use HopID 8 for AUX and 9 for Video */
  29#define TB_DP_AUX_TX_HOPID		8
  30#define TB_DP_AUX_RX_HOPID		8
  31#define TB_DP_VIDEO_HOPID		9
  32
  33#define TB_DP_VIDEO_PATH_OUT		0
  34#define TB_DP_AUX_PATH_OUT		1
  35#define TB_DP_AUX_PATH_IN		2
  36
  37/* Minimum number of credits needed for PCIe path */
  38#define TB_MIN_PCIE_CREDITS		6U
  39/*
  40 * Number of credits we try to allocate for each DMA path if not limited
  41 * by the host router baMaxHI.
  42 */
  43#define TB_DMA_CREDITS			14U
  44/* Minimum number of credits for DMA path */
  45#define TB_MIN_DMA_CREDITS		1U
  46
  47static const char * const tb_tunnel_names[] = { "PCI", "DP", "DMA", "USB3" };
  48
  49#define __TB_TUNNEL_PRINT(level, tunnel, fmt, arg...)                   \
  50	do {                                                            \
  51		struct tb_tunnel *__tunnel = (tunnel);                  \
  52		level(__tunnel->tb, "%llx:%x <-> %llx:%x (%s): " fmt,   \
  53		      tb_route(__tunnel->src_port->sw),                 \
  54		      __tunnel->src_port->port,                         \
  55		      tb_route(__tunnel->dst_port->sw),                 \
  56		      __tunnel->dst_port->port,                         \
  57		      tb_tunnel_names[__tunnel->type],			\
  58		      ## arg);                                          \
  59	} while (0)
  60
  61#define tb_tunnel_WARN(tunnel, fmt, arg...) \
  62	__TB_TUNNEL_PRINT(tb_WARN, tunnel, fmt, ##arg)
  63#define tb_tunnel_warn(tunnel, fmt, arg...) \
  64	__TB_TUNNEL_PRINT(tb_warn, tunnel, fmt, ##arg)
  65#define tb_tunnel_info(tunnel, fmt, arg...) \
  66	__TB_TUNNEL_PRINT(tb_info, tunnel, fmt, ##arg)
  67#define tb_tunnel_dbg(tunnel, fmt, arg...) \
  68	__TB_TUNNEL_PRINT(tb_dbg, tunnel, fmt, ##arg)
  69
  70static inline unsigned int tb_usable_credits(const struct tb_port *port)
  71{
  72	return port->total_credits - port->ctl_credits;
  73}
  74
  75/**
  76 * tb_available_credits() - Available credits for PCIe and DMA
  77 * @port: Lane adapter to check
  78 * @max_dp_streams: If non-%NULL stores maximum number of simultaneous DP
  79 *		    streams possible through this lane adapter
  80 */
  81static unsigned int tb_available_credits(const struct tb_port *port,
  82					 size_t *max_dp_streams)
  83{
  84	const struct tb_switch *sw = port->sw;
  85	int credits, usb3, pcie, spare;
  86	size_t ndp;
  87
  88	usb3 = tb_acpi_may_tunnel_usb3() ? sw->max_usb3_credits : 0;
  89	pcie = tb_acpi_may_tunnel_pcie() ? sw->max_pcie_credits : 0;
  90
  91	if (tb_acpi_is_xdomain_allowed()) {
  92		spare = min_not_zero(sw->max_dma_credits, TB_DMA_CREDITS);
  93		/* Add some credits for potential second DMA tunnel */
  94		spare += TB_MIN_DMA_CREDITS;
  95	} else {
  96		spare = 0;
  97	}
  98
  99	credits = tb_usable_credits(port);
 100	if (tb_acpi_may_tunnel_dp()) {
 101		/*
 102		 * Maximum number of DP streams possible through the
 103		 * lane adapter.
 104		 */
 105		ndp = (credits - (usb3 + pcie + spare)) /
 106		      (sw->min_dp_aux_credits + sw->min_dp_main_credits);
 107	} else {
 108		ndp = 0;
 109	}
 110	credits -= ndp * (sw->min_dp_aux_credits + sw->min_dp_main_credits);
 111	credits -= usb3;
 112
 113	if (max_dp_streams)
 114		*max_dp_streams = ndp;
 115
 116	return credits > 0 ? credits : 0;
 117}
 118
 119static struct tb_tunnel *tb_tunnel_alloc(struct tb *tb, size_t npaths,
 120					 enum tb_tunnel_type type)
 121{
 122	struct tb_tunnel *tunnel;
 123
 124	tunnel = kzalloc(sizeof(*tunnel), GFP_KERNEL);
 125	if (!tunnel)
 126		return NULL;
 127
 128	tunnel->paths = kcalloc(npaths, sizeof(tunnel->paths[0]), GFP_KERNEL);
 129	if (!tunnel->paths) {
 130		tb_tunnel_free(tunnel);
 131		return NULL;
 132	}
 133
 134	INIT_LIST_HEAD(&tunnel->list);
 135	tunnel->tb = tb;
 136	tunnel->npaths = npaths;
 137	tunnel->type = type;
 138
 139	return tunnel;
 140}
 141
 142static int tb_pci_activate(struct tb_tunnel *tunnel, bool activate)
 143{
 144	int res;
 145
 146	res = tb_pci_port_enable(tunnel->src_port, activate);
 147	if (res)
 148		return res;
 149
 150	if (tb_port_is_pcie_up(tunnel->dst_port))
 151		return tb_pci_port_enable(tunnel->dst_port, activate);
 152
 153	return 0;
 154}
 155
 156static int tb_pci_init_credits(struct tb_path_hop *hop)
 157{
 158	struct tb_port *port = hop->in_port;
 159	struct tb_switch *sw = port->sw;
 160	unsigned int credits;
 161
 162	if (tb_port_use_credit_allocation(port)) {
 163		unsigned int available;
 164
 165		available = tb_available_credits(port, NULL);
 166		credits = min(sw->max_pcie_credits, available);
 167
 168		if (credits < TB_MIN_PCIE_CREDITS)
 169			return -ENOSPC;
 170
 171		credits = max(TB_MIN_PCIE_CREDITS, credits);
 172	} else {
 173		if (tb_port_is_null(port))
 174			credits = port->bonded ? 32 : 16;
 175		else
 176			credits = 7;
 177	}
 178
 179	hop->initial_credits = credits;
 180	return 0;
 181}
 182
 183static int tb_pci_init_path(struct tb_path *path)
 184{
 185	struct tb_path_hop *hop;
 186
 187	path->egress_fc_enable = TB_PATH_SOURCE | TB_PATH_INTERNAL;
 188	path->egress_shared_buffer = TB_PATH_NONE;
 189	path->ingress_fc_enable = TB_PATH_ALL;
 190	path->ingress_shared_buffer = TB_PATH_NONE;
 191	path->priority = 3;
 192	path->weight = 1;
 193	path->drop_packages = 0;
 194
 195	tb_path_for_each_hop(path, hop) {
 196		int ret;
 197
 198		ret = tb_pci_init_credits(hop);
 199		if (ret)
 200			return ret;
 201	}
 202
 203	return 0;
 204}
 205
 206/**
 207 * tb_tunnel_discover_pci() - Discover existing PCIe tunnels
 208 * @tb: Pointer to the domain structure
 209 * @down: PCIe downstream adapter
 210 *
 211 * If @down adapter is active, follows the tunnel to the PCIe upstream
 212 * adapter and back. Returns the discovered tunnel or %NULL if there was
 213 * no tunnel.
 214 */
 215struct tb_tunnel *tb_tunnel_discover_pci(struct tb *tb, struct tb_port *down)
 216{
 217	struct tb_tunnel *tunnel;
 218	struct tb_path *path;
 219
 220	if (!tb_pci_port_is_enabled(down))
 221		return NULL;
 222
 223	tunnel = tb_tunnel_alloc(tb, 2, TB_TUNNEL_PCI);
 224	if (!tunnel)
 225		return NULL;
 226
 227	tunnel->activate = tb_pci_activate;
 228	tunnel->src_port = down;
 229
 230	/*
 231	 * Discover both paths even if they are not complete. We will
 232	 * clean them up by calling tb_tunnel_deactivate() below in that
 233	 * case.
 234	 */
 235	path = tb_path_discover(down, TB_PCI_HOPID, NULL, -1,
 236				&tunnel->dst_port, "PCIe Up");
 237	if (!path) {
 238		/* Just disable the downstream port */
 239		tb_pci_port_enable(down, false);
 240		goto err_free;
 241	}
 242	tunnel->paths[TB_PCI_PATH_UP] = path;
 243	if (tb_pci_init_path(tunnel->paths[TB_PCI_PATH_UP]))
 244		goto err_free;
 245
 246	path = tb_path_discover(tunnel->dst_port, -1, down, TB_PCI_HOPID, NULL,
 247				"PCIe Down");
 248	if (!path)
 249		goto err_deactivate;
 250	tunnel->paths[TB_PCI_PATH_DOWN] = path;
 251	if (tb_pci_init_path(tunnel->paths[TB_PCI_PATH_DOWN]))
 252		goto err_deactivate;
 253
 254	/* Validate that the tunnel is complete */
 255	if (!tb_port_is_pcie_up(tunnel->dst_port)) {
 256		tb_port_warn(tunnel->dst_port,
 257			     "path does not end on a PCIe adapter, cleaning up\n");
 258		goto err_deactivate;
 259	}
 260
 261	if (down != tunnel->src_port) {
 262		tb_tunnel_warn(tunnel, "path is not complete, cleaning up\n");
 263		goto err_deactivate;
 264	}
 265
 266	if (!tb_pci_port_is_enabled(tunnel->dst_port)) {
 267		tb_tunnel_warn(tunnel,
 268			       "tunnel is not fully activated, cleaning up\n");
 269		goto err_deactivate;
 270	}
 271
 272	tb_tunnel_dbg(tunnel, "discovered\n");
 273	return tunnel;
 274
 275err_deactivate:
 276	tb_tunnel_deactivate(tunnel);
 277err_free:
 278	tb_tunnel_free(tunnel);
 279
 280	return NULL;
 281}
 282
 283/**
 284 * tb_tunnel_alloc_pci() - allocate a pci tunnel
 285 * @tb: Pointer to the domain structure
 286 * @up: PCIe upstream adapter port
 287 * @down: PCIe downstream adapter port
 288 *
 289 * Allocate a PCI tunnel. The ports must be of type TB_TYPE_PCIE_UP and
 290 * TB_TYPE_PCIE_DOWN.
 291 *
 292 * Return: Returns a tb_tunnel on success or NULL on failure.
 293 */
 294struct tb_tunnel *tb_tunnel_alloc_pci(struct tb *tb, struct tb_port *up,
 295				      struct tb_port *down)
 296{
 297	struct tb_tunnel *tunnel;
 298	struct tb_path *path;
 299
 300	tunnel = tb_tunnel_alloc(tb, 2, TB_TUNNEL_PCI);
 301	if (!tunnel)
 302		return NULL;
 303
 304	tunnel->activate = tb_pci_activate;
 305	tunnel->src_port = down;
 306	tunnel->dst_port = up;
 307
 308	path = tb_path_alloc(tb, down, TB_PCI_HOPID, up, TB_PCI_HOPID, 0,
 309			     "PCIe Down");
 310	if (!path)
 311		goto err_free;
 
 
 
 312	tunnel->paths[TB_PCI_PATH_DOWN] = path;
 313	if (tb_pci_init_path(path))
 314		goto err_free;
 315
 316	path = tb_path_alloc(tb, up, TB_PCI_HOPID, down, TB_PCI_HOPID, 0,
 317			     "PCIe Up");
 318	if (!path)
 319		goto err_free;
 
 
 
 320	tunnel->paths[TB_PCI_PATH_UP] = path;
 321	if (tb_pci_init_path(path))
 322		goto err_free;
 323
 324	return tunnel;
 325
 326err_free:
 327	tb_tunnel_free(tunnel);
 328	return NULL;
 329}
 330
 331static bool tb_dp_is_usb4(const struct tb_switch *sw)
 332{
 333	/* Titan Ridge DP adapters need the same treatment as USB4 */
 334	return tb_switch_is_usb4(sw) || tb_switch_is_titan_ridge(sw);
 335}
 336
 337static int tb_dp_cm_handshake(struct tb_port *in, struct tb_port *out)
 338{
 339	int timeout = 10;
 340	u32 val;
 341	int ret;
 342
 343	/* Both ends need to support this */
 344	if (!tb_dp_is_usb4(in->sw) || !tb_dp_is_usb4(out->sw))
 345		return 0;
 346
 347	ret = tb_port_read(out, &val, TB_CFG_PORT,
 348			   out->cap_adap + DP_STATUS_CTRL, 1);
 349	if (ret)
 350		return ret;
 351
 352	val |= DP_STATUS_CTRL_UF | DP_STATUS_CTRL_CMHS;
 353
 354	ret = tb_port_write(out, &val, TB_CFG_PORT,
 355			    out->cap_adap + DP_STATUS_CTRL, 1);
 356	if (ret)
 357		return ret;
 358
 359	do {
 360		ret = tb_port_read(out, &val, TB_CFG_PORT,
 361				   out->cap_adap + DP_STATUS_CTRL, 1);
 362		if (ret)
 363			return ret;
 364		if (!(val & DP_STATUS_CTRL_CMHS))
 365			return 0;
 366		usleep_range(10, 100);
 367	} while (timeout--);
 368
 369	return -ETIMEDOUT;
 370}
 371
 372static inline u32 tb_dp_cap_get_rate(u32 val)
 373{
 374	u32 rate = (val & DP_COMMON_CAP_RATE_MASK) >> DP_COMMON_CAP_RATE_SHIFT;
 375
 376	switch (rate) {
 377	case DP_COMMON_CAP_RATE_RBR:
 378		return 1620;
 379	case DP_COMMON_CAP_RATE_HBR:
 380		return 2700;
 381	case DP_COMMON_CAP_RATE_HBR2:
 382		return 5400;
 383	case DP_COMMON_CAP_RATE_HBR3:
 384		return 8100;
 385	default:
 386		return 0;
 387	}
 388}
 389
 390static inline u32 tb_dp_cap_set_rate(u32 val, u32 rate)
 391{
 392	val &= ~DP_COMMON_CAP_RATE_MASK;
 393	switch (rate) {
 394	default:
 395		WARN(1, "invalid rate %u passed, defaulting to 1620 MB/s\n", rate);
 396		fallthrough;
 397	case 1620:
 398		val |= DP_COMMON_CAP_RATE_RBR << DP_COMMON_CAP_RATE_SHIFT;
 399		break;
 400	case 2700:
 401		val |= DP_COMMON_CAP_RATE_HBR << DP_COMMON_CAP_RATE_SHIFT;
 402		break;
 403	case 5400:
 404		val |= DP_COMMON_CAP_RATE_HBR2 << DP_COMMON_CAP_RATE_SHIFT;
 405		break;
 406	case 8100:
 407		val |= DP_COMMON_CAP_RATE_HBR3 << DP_COMMON_CAP_RATE_SHIFT;
 408		break;
 409	}
 410	return val;
 411}
 412
 413static inline u32 tb_dp_cap_get_lanes(u32 val)
 414{
 415	u32 lanes = (val & DP_COMMON_CAP_LANES_MASK) >> DP_COMMON_CAP_LANES_SHIFT;
 416
 417	switch (lanes) {
 418	case DP_COMMON_CAP_1_LANE:
 419		return 1;
 420	case DP_COMMON_CAP_2_LANES:
 421		return 2;
 422	case DP_COMMON_CAP_4_LANES:
 423		return 4;
 424	default:
 425		return 0;
 426	}
 427}
 428
 429static inline u32 tb_dp_cap_set_lanes(u32 val, u32 lanes)
 430{
 431	val &= ~DP_COMMON_CAP_LANES_MASK;
 432	switch (lanes) {
 433	default:
 434		WARN(1, "invalid number of lanes %u passed, defaulting to 1\n",
 435		     lanes);
 436		fallthrough;
 437	case 1:
 438		val |= DP_COMMON_CAP_1_LANE << DP_COMMON_CAP_LANES_SHIFT;
 439		break;
 440	case 2:
 441		val |= DP_COMMON_CAP_2_LANES << DP_COMMON_CAP_LANES_SHIFT;
 442		break;
 443	case 4:
 444		val |= DP_COMMON_CAP_4_LANES << DP_COMMON_CAP_LANES_SHIFT;
 445		break;
 446	}
 447	return val;
 448}
 449
 450static unsigned int tb_dp_bandwidth(unsigned int rate, unsigned int lanes)
 451{
 452	/* Tunneling removes the DP 8b/10b encoding */
 453	return rate * lanes * 8 / 10;
 454}
 455
 456static int tb_dp_reduce_bandwidth(int max_bw, u32 in_rate, u32 in_lanes,
 457				  u32 out_rate, u32 out_lanes, u32 *new_rate,
 458				  u32 *new_lanes)
 459{
 460	static const u32 dp_bw[][2] = {
 461		/* Mb/s, lanes */
 462		{ 8100, 4 }, /* 25920 Mb/s */
 463		{ 5400, 4 }, /* 17280 Mb/s */
 464		{ 8100, 2 }, /* 12960 Mb/s */
 465		{ 2700, 4 }, /* 8640 Mb/s */
 466		{ 5400, 2 }, /* 8640 Mb/s */
 467		{ 8100, 1 }, /* 6480 Mb/s */
 468		{ 1620, 4 }, /* 5184 Mb/s */
 469		{ 5400, 1 }, /* 4320 Mb/s */
 470		{ 2700, 2 }, /* 4320 Mb/s */
 471		{ 1620, 2 }, /* 2592 Mb/s */
 472		{ 2700, 1 }, /* 2160 Mb/s */
 473		{ 1620, 1 }, /* 1296 Mb/s */
 474	};
 475	unsigned int i;
 476
 477	/*
 478	 * Find a combination that can fit into max_bw and does not
 479	 * exceed the maximum rate and lanes supported by the DP OUT and
 480	 * DP IN adapters.
 481	 */
 482	for (i = 0; i < ARRAY_SIZE(dp_bw); i++) {
 483		if (dp_bw[i][0] > out_rate || dp_bw[i][1] > out_lanes)
 484			continue;
 485
 486		if (dp_bw[i][0] > in_rate || dp_bw[i][1] > in_lanes)
 487			continue;
 488
 489		if (tb_dp_bandwidth(dp_bw[i][0], dp_bw[i][1]) <= max_bw) {
 490			*new_rate = dp_bw[i][0];
 491			*new_lanes = dp_bw[i][1];
 492			return 0;
 493		}
 494	}
 495
 496	return -ENOSR;
 497}
 498
 499static int tb_dp_xchg_caps(struct tb_tunnel *tunnel)
 500{
 501	u32 out_dp_cap, out_rate, out_lanes, in_dp_cap, in_rate, in_lanes, bw;
 502	struct tb_port *out = tunnel->dst_port;
 503	struct tb_port *in = tunnel->src_port;
 504	int ret, max_bw;
 505
 506	/*
 507	 * Copy DP_LOCAL_CAP register to DP_REMOTE_CAP register for
 508	 * newer generation hardware.
 509	 */
 510	if (in->sw->generation < 2 || out->sw->generation < 2)
 511		return 0;
 512
 513	/*
 514	 * Perform connection manager handshake between IN and OUT ports
 515	 * before capabilities exchange can take place.
 516	 */
 517	ret = tb_dp_cm_handshake(in, out);
 518	if (ret)
 519		return ret;
 520
 521	/* Read both DP_LOCAL_CAP registers */
 522	ret = tb_port_read(in, &in_dp_cap, TB_CFG_PORT,
 523			   in->cap_adap + DP_LOCAL_CAP, 1);
 524	if (ret)
 525		return ret;
 526
 527	ret = tb_port_read(out, &out_dp_cap, TB_CFG_PORT,
 528			   out->cap_adap + DP_LOCAL_CAP, 1);
 529	if (ret)
 530		return ret;
 531
 532	/* Write IN local caps to OUT remote caps */
 533	ret = tb_port_write(out, &in_dp_cap, TB_CFG_PORT,
 534			    out->cap_adap + DP_REMOTE_CAP, 1);
 535	if (ret)
 536		return ret;
 537
 538	in_rate = tb_dp_cap_get_rate(in_dp_cap);
 539	in_lanes = tb_dp_cap_get_lanes(in_dp_cap);
 540	tb_port_dbg(in, "maximum supported bandwidth %u Mb/s x%u = %u Mb/s\n",
 541		    in_rate, in_lanes, tb_dp_bandwidth(in_rate, in_lanes));
 542
 543	/*
 544	 * If the tunnel bandwidth is limited (max_bw is set) then see
 545	 * if we need to reduce bandwidth to fit there.
 546	 */
 547	out_rate = tb_dp_cap_get_rate(out_dp_cap);
 548	out_lanes = tb_dp_cap_get_lanes(out_dp_cap);
 549	bw = tb_dp_bandwidth(out_rate, out_lanes);
 550	tb_port_dbg(out, "maximum supported bandwidth %u Mb/s x%u = %u Mb/s\n",
 551		    out_rate, out_lanes, bw);
 552
 553	if (in->sw->config.depth < out->sw->config.depth)
 554		max_bw = tunnel->max_down;
 555	else
 556		max_bw = tunnel->max_up;
 557
 558	if (max_bw && bw > max_bw) {
 559		u32 new_rate, new_lanes, new_bw;
 560
 561		ret = tb_dp_reduce_bandwidth(max_bw, in_rate, in_lanes,
 562					     out_rate, out_lanes, &new_rate,
 563					     &new_lanes);
 564		if (ret) {
 565			tb_port_info(out, "not enough bandwidth for DP tunnel\n");
 566			return ret;
 567		}
 568
 569		new_bw = tb_dp_bandwidth(new_rate, new_lanes);
 570		tb_port_dbg(out, "bandwidth reduced to %u Mb/s x%u = %u Mb/s\n",
 571			    new_rate, new_lanes, new_bw);
 572
 573		/*
 574		 * Set new rate and number of lanes before writing it to
 575		 * the IN port remote caps.
 576		 */
 577		out_dp_cap = tb_dp_cap_set_rate(out_dp_cap, new_rate);
 578		out_dp_cap = tb_dp_cap_set_lanes(out_dp_cap, new_lanes);
 579	}
 580
 581	return tb_port_write(in, &out_dp_cap, TB_CFG_PORT,
 582			     in->cap_adap + DP_REMOTE_CAP, 1);
 583}
 584
 585static int tb_dp_activate(struct tb_tunnel *tunnel, bool active)
 586{
 587	int ret;
 588
 589	if (active) {
 590		struct tb_path **paths;
 591		int last;
 592
 593		paths = tunnel->paths;
 594		last = paths[TB_DP_VIDEO_PATH_OUT]->path_length - 1;
 595
 596		tb_dp_port_set_hops(tunnel->src_port,
 597			paths[TB_DP_VIDEO_PATH_OUT]->hops[0].in_hop_index,
 598			paths[TB_DP_AUX_PATH_OUT]->hops[0].in_hop_index,
 599			paths[TB_DP_AUX_PATH_IN]->hops[last].next_hop_index);
 600
 601		tb_dp_port_set_hops(tunnel->dst_port,
 602			paths[TB_DP_VIDEO_PATH_OUT]->hops[last].next_hop_index,
 603			paths[TB_DP_AUX_PATH_IN]->hops[0].in_hop_index,
 604			paths[TB_DP_AUX_PATH_OUT]->hops[last].next_hop_index);
 605	} else {
 606		tb_dp_port_hpd_clear(tunnel->src_port);
 607		tb_dp_port_set_hops(tunnel->src_port, 0, 0, 0);
 608		if (tb_port_is_dpout(tunnel->dst_port))
 609			tb_dp_port_set_hops(tunnel->dst_port, 0, 0, 0);
 610	}
 611
 612	ret = tb_dp_port_enable(tunnel->src_port, active);
 613	if (ret)
 614		return ret;
 615
 616	if (tb_port_is_dpout(tunnel->dst_port))
 617		return tb_dp_port_enable(tunnel->dst_port, active);
 618
 619	return 0;
 620}
 621
 622static int tb_dp_consumed_bandwidth(struct tb_tunnel *tunnel, int *consumed_up,
 623				    int *consumed_down)
 624{
 625	struct tb_port *in = tunnel->src_port;
 626	const struct tb_switch *sw = in->sw;
 627	u32 val, rate = 0, lanes = 0;
 628	int ret;
 629
 630	if (tb_dp_is_usb4(sw)) {
 631		int timeout = 20;
 632
 633		/*
 634		 * Wait for DPRX done. Normally it should be already set
 635		 * for active tunnel.
 636		 */
 637		do {
 638			ret = tb_port_read(in, &val, TB_CFG_PORT,
 639					   in->cap_adap + DP_COMMON_CAP, 1);
 640			if (ret)
 641				return ret;
 642
 643			if (val & DP_COMMON_CAP_DPRX_DONE) {
 644				rate = tb_dp_cap_get_rate(val);
 645				lanes = tb_dp_cap_get_lanes(val);
 646				break;
 647			}
 648			msleep(250);
 649		} while (timeout--);
 650
 651		if (!timeout)
 652			return -ETIMEDOUT;
 653	} else if (sw->generation >= 2) {
 654		/*
 655		 * Read from the copied remote cap so that we take into
 656		 * account if capabilities were reduced during exchange.
 657		 */
 658		ret = tb_port_read(in, &val, TB_CFG_PORT,
 659				   in->cap_adap + DP_REMOTE_CAP, 1);
 660		if (ret)
 661			return ret;
 662
 663		rate = tb_dp_cap_get_rate(val);
 664		lanes = tb_dp_cap_get_lanes(val);
 665	} else {
 666		/* No bandwidth management for legacy devices  */
 667		*consumed_up = 0;
 668		*consumed_down = 0;
 669		return 0;
 670	}
 671
 672	if (in->sw->config.depth < tunnel->dst_port->sw->config.depth) {
 673		*consumed_up = 0;
 674		*consumed_down = tb_dp_bandwidth(rate, lanes);
 675	} else {
 676		*consumed_up = tb_dp_bandwidth(rate, lanes);
 677		*consumed_down = 0;
 678	}
 679
 680	return 0;
 681}
 682
 683static void tb_dp_init_aux_credits(struct tb_path_hop *hop)
 684{
 685	struct tb_port *port = hop->in_port;
 686	struct tb_switch *sw = port->sw;
 687
 688	if (tb_port_use_credit_allocation(port))
 689		hop->initial_credits = sw->min_dp_aux_credits;
 690	else
 691		hop->initial_credits = 1;
 692}
 693
 694static void tb_dp_init_aux_path(struct tb_path *path)
 695{
 696	struct tb_path_hop *hop;
 697
 698	path->egress_fc_enable = TB_PATH_SOURCE | TB_PATH_INTERNAL;
 699	path->egress_shared_buffer = TB_PATH_NONE;
 700	path->ingress_fc_enable = TB_PATH_ALL;
 701	path->ingress_shared_buffer = TB_PATH_NONE;
 702	path->priority = 2;
 703	path->weight = 1;
 704
 705	tb_path_for_each_hop(path, hop)
 706		tb_dp_init_aux_credits(hop);
 707}
 708
 709static int tb_dp_init_video_credits(struct tb_path_hop *hop)
 710{
 711	struct tb_port *port = hop->in_port;
 712	struct tb_switch *sw = port->sw;
 713
 714	if (tb_port_use_credit_allocation(port)) {
 715		unsigned int nfc_credits;
 716		size_t max_dp_streams;
 717
 718		tb_available_credits(port, &max_dp_streams);
 719		/*
 720		 * Read the number of currently allocated NFC credits
 721		 * from the lane adapter. Since we only use them for DP
 722		 * tunneling we can use that to figure out how many DP
 723		 * tunnels already go through the lane adapter.
 724		 */
 725		nfc_credits = port->config.nfc_credits &
 726				ADP_CS_4_NFC_BUFFERS_MASK;
 727		if (nfc_credits / sw->min_dp_main_credits > max_dp_streams)
 728			return -ENOSPC;
 729
 730		hop->nfc_credits = sw->min_dp_main_credits;
 731	} else {
 732		hop->nfc_credits = min(port->total_credits - 2, 12U);
 733	}
 734
 735	return 0;
 736}
 737
 738static int tb_dp_init_video_path(struct tb_path *path)
 739{
 740	struct tb_path_hop *hop;
 741
 742	path->egress_fc_enable = TB_PATH_NONE;
 743	path->egress_shared_buffer = TB_PATH_NONE;
 744	path->ingress_fc_enable = TB_PATH_NONE;
 745	path->ingress_shared_buffer = TB_PATH_NONE;
 746	path->priority = 1;
 747	path->weight = 1;
 748
 749	tb_path_for_each_hop(path, hop) {
 750		int ret;
 
 
 751
 752		ret = tb_dp_init_video_credits(hop);
 753		if (ret)
 754			return ret;
 
 755	}
 756
 757	return 0;
 758}
 759
 760/**
 761 * tb_tunnel_discover_dp() - Discover existing Display Port tunnels
 762 * @tb: Pointer to the domain structure
 763 * @in: DP in adapter
 764 *
 765 * If @in adapter is active, follows the tunnel to the DP out adapter
 766 * and back. Returns the discovered tunnel or %NULL if there was no
 767 * tunnel.
 768 *
 769 * Return: DP tunnel or %NULL if no tunnel found.
 770 */
 771struct tb_tunnel *tb_tunnel_discover_dp(struct tb *tb, struct tb_port *in)
 772{
 773	struct tb_tunnel *tunnel;
 774	struct tb_port *port;
 775	struct tb_path *path;
 776
 777	if (!tb_dp_port_is_enabled(in))
 778		return NULL;
 779
 780	tunnel = tb_tunnel_alloc(tb, 3, TB_TUNNEL_DP);
 781	if (!tunnel)
 782		return NULL;
 783
 784	tunnel->init = tb_dp_xchg_caps;
 785	tunnel->activate = tb_dp_activate;
 786	tunnel->consumed_bandwidth = tb_dp_consumed_bandwidth;
 787	tunnel->src_port = in;
 788
 789	path = tb_path_discover(in, TB_DP_VIDEO_HOPID, NULL, -1,
 790				&tunnel->dst_port, "Video");
 791	if (!path) {
 792		/* Just disable the DP IN port */
 793		tb_dp_port_enable(in, false);
 794		goto err_free;
 795	}
 796	tunnel->paths[TB_DP_VIDEO_PATH_OUT] = path;
 797	if (tb_dp_init_video_path(tunnel->paths[TB_DP_VIDEO_PATH_OUT]))
 798		goto err_free;
 799
 800	path = tb_path_discover(in, TB_DP_AUX_TX_HOPID, NULL, -1, NULL, "AUX TX");
 801	if (!path)
 802		goto err_deactivate;
 803	tunnel->paths[TB_DP_AUX_PATH_OUT] = path;
 804	tb_dp_init_aux_path(tunnel->paths[TB_DP_AUX_PATH_OUT]);
 805
 806	path = tb_path_discover(tunnel->dst_port, -1, in, TB_DP_AUX_RX_HOPID,
 807				&port, "AUX RX");
 808	if (!path)
 809		goto err_deactivate;
 810	tunnel->paths[TB_DP_AUX_PATH_IN] = path;
 811	tb_dp_init_aux_path(tunnel->paths[TB_DP_AUX_PATH_IN]);
 812
 813	/* Validate that the tunnel is complete */
 814	if (!tb_port_is_dpout(tunnel->dst_port)) {
 815		tb_port_warn(in, "path does not end on a DP adapter, cleaning up\n");
 816		goto err_deactivate;
 817	}
 818
 819	if (!tb_dp_port_is_enabled(tunnel->dst_port))
 820		goto err_deactivate;
 821
 822	if (!tb_dp_port_hpd_is_active(tunnel->dst_port))
 823		goto err_deactivate;
 824
 825	if (port != tunnel->src_port) {
 826		tb_tunnel_warn(tunnel, "path is not complete, cleaning up\n");
 827		goto err_deactivate;
 828	}
 829
 830	tb_tunnel_dbg(tunnel, "discovered\n");
 831	return tunnel;
 832
 833err_deactivate:
 834	tb_tunnel_deactivate(tunnel);
 835err_free:
 836	tb_tunnel_free(tunnel);
 837
 838	return NULL;
 839}
 840
 841/**
 842 * tb_tunnel_alloc_dp() - allocate a Display Port tunnel
 843 * @tb: Pointer to the domain structure
 844 * @in: DP in adapter port
 845 * @out: DP out adapter port
 846 * @max_up: Maximum available upstream bandwidth for the DP tunnel (%0
 847 *	    if not limited)
 848 * @max_down: Maximum available downstream bandwidth for the DP tunnel
 849 *	      (%0 if not limited)
 850 *
 851 * Allocates a tunnel between @in and @out that is capable of tunneling
 852 * Display Port traffic.
 853 *
 854 * Return: Returns a tb_tunnel on success or NULL on failure.
 855 */
 856struct tb_tunnel *tb_tunnel_alloc_dp(struct tb *tb, struct tb_port *in,
 857				     struct tb_port *out, int max_up,
 858				     int max_down)
 859{
 860	struct tb_tunnel *tunnel;
 861	struct tb_path **paths;
 862	struct tb_path *path;
 863
 864	if (WARN_ON(!in->cap_adap || !out->cap_adap))
 865		return NULL;
 866
 867	tunnel = tb_tunnel_alloc(tb, 3, TB_TUNNEL_DP);
 868	if (!tunnel)
 869		return NULL;
 870
 871	tunnel->init = tb_dp_xchg_caps;
 872	tunnel->activate = tb_dp_activate;
 873	tunnel->consumed_bandwidth = tb_dp_consumed_bandwidth;
 874	tunnel->src_port = in;
 875	tunnel->dst_port = out;
 876	tunnel->max_up = max_up;
 877	tunnel->max_down = max_down;
 878
 879	paths = tunnel->paths;
 880
 881	path = tb_path_alloc(tb, in, TB_DP_VIDEO_HOPID, out, TB_DP_VIDEO_HOPID,
 882			     1, "Video");
 883	if (!path)
 884		goto err_free;
 885	tb_dp_init_video_path(path);
 886	paths[TB_DP_VIDEO_PATH_OUT] = path;
 887
 888	path = tb_path_alloc(tb, in, TB_DP_AUX_TX_HOPID, out,
 889			     TB_DP_AUX_TX_HOPID, 1, "AUX TX");
 890	if (!path)
 891		goto err_free;
 892	tb_dp_init_aux_path(path);
 893	paths[TB_DP_AUX_PATH_OUT] = path;
 894
 895	path = tb_path_alloc(tb, out, TB_DP_AUX_RX_HOPID, in,
 896			     TB_DP_AUX_RX_HOPID, 1, "AUX RX");
 897	if (!path)
 898		goto err_free;
 899	tb_dp_init_aux_path(path);
 900	paths[TB_DP_AUX_PATH_IN] = path;
 901
 902	return tunnel;
 903
 904err_free:
 905	tb_tunnel_free(tunnel);
 906	return NULL;
 907}
 908
 909static unsigned int tb_dma_available_credits(const struct tb_port *port)
 910{
 911	const struct tb_switch *sw = port->sw;
 912	int credits;
 913
 914	credits = tb_available_credits(port, NULL);
 915	if (tb_acpi_may_tunnel_pcie())
 916		credits -= sw->max_pcie_credits;
 917	credits -= port->dma_credits;
 918
 919	return credits > 0 ? credits : 0;
 
 
 920}
 921
 922static int tb_dma_reserve_credits(struct tb_path_hop *hop, unsigned int credits)
 923{
 924	struct tb_port *port = hop->in_port;
 
 925
 926	if (tb_port_use_credit_allocation(port)) {
 927		unsigned int available = tb_dma_available_credits(port);
 928
 929		/*
 930		 * Need to have at least TB_MIN_DMA_CREDITS, otherwise
 931		 * DMA path cannot be established.
 932		 */
 933		if (available < TB_MIN_DMA_CREDITS)
 934			return -ENOSPC;
 935
 936		while (credits > available)
 937			credits--;
 938
 939		tb_port_dbg(port, "reserving %u credits for DMA path\n",
 940			    credits);
 941
 942		port->dma_credits += credits;
 943	} else {
 944		if (tb_port_is_null(port))
 945			credits = port->bonded ? 14 : 6;
 946		else
 947			credits = min(port->total_credits, credits);
 948	}
 949
 950	hop->initial_credits = credits;
 951	return 0;
 952}
 953
 954/* Path from lane adapter to NHI */
 955static int tb_dma_init_rx_path(struct tb_path *path, unsigned int credits)
 956{
 957	struct tb_path_hop *hop;
 958	unsigned int i, tmp;
 959
 960	path->egress_fc_enable = TB_PATH_SOURCE | TB_PATH_INTERNAL;
 961	path->ingress_fc_enable = TB_PATH_ALL;
 962	path->egress_shared_buffer = TB_PATH_NONE;
 963	path->ingress_shared_buffer = TB_PATH_NONE;
 964	path->priority = 5;
 965	path->weight = 1;
 966	path->clear_fc = true;
 967
 968	/*
 969	 * First lane adapter is the one connected to the remote host.
 970	 * We don't tunnel other traffic over this link so can use all
 971	 * the credits (except the ones reserved for control traffic).
 972	 */
 973	hop = &path->hops[0];
 974	tmp = min(tb_usable_credits(hop->in_port), credits);
 975	hop->initial_credits = tmp;
 976	hop->in_port->dma_credits += tmp;
 977
 978	for (i = 1; i < path->path_length; i++) {
 979		int ret;
 980
 981		ret = tb_dma_reserve_credits(&path->hops[i], credits);
 982		if (ret)
 983			return ret;
 984	}
 985
 986	return 0;
 987}
 988
 989/* Path from NHI to lane adapter */
 990static int tb_dma_init_tx_path(struct tb_path *path, unsigned int credits)
 991{
 992	struct tb_path_hop *hop;
 993
 994	path->egress_fc_enable = TB_PATH_ALL;
 995	path->ingress_fc_enable = TB_PATH_ALL;
 996	path->egress_shared_buffer = TB_PATH_NONE;
 997	path->ingress_shared_buffer = TB_PATH_NONE;
 998	path->priority = 5;
 999	path->weight = 1;
1000	path->clear_fc = true;
1001
1002	tb_path_for_each_hop(path, hop) {
1003		int ret;
1004
1005		ret = tb_dma_reserve_credits(hop, credits);
1006		if (ret)
1007			return ret;
1008	}
1009
1010	return 0;
1011}
1012
1013static void tb_dma_release_credits(struct tb_path_hop *hop)
1014{
1015	struct tb_port *port = hop->in_port;
1016
1017	if (tb_port_use_credit_allocation(port)) {
1018		port->dma_credits -= hop->initial_credits;
1019
1020		tb_port_dbg(port, "released %u DMA path credits\n",
1021			    hop->initial_credits);
1022	}
1023}
1024
1025static void tb_dma_deinit_path(struct tb_path *path)
1026{
1027	struct tb_path_hop *hop;
1028
1029	tb_path_for_each_hop(path, hop)
1030		tb_dma_release_credits(hop);
1031}
1032
1033static void tb_dma_deinit(struct tb_tunnel *tunnel)
1034{
1035	int i;
1036
1037	for (i = 0; i < tunnel->npaths; i++) {
1038		if (!tunnel->paths[i])
1039			continue;
1040		tb_dma_deinit_path(tunnel->paths[i]);
1041	}
1042}
1043
1044/**
1045 * tb_tunnel_alloc_dma() - allocate a DMA tunnel
1046 * @tb: Pointer to the domain structure
1047 * @nhi: Host controller port
1048 * @dst: Destination null port which the other domain is connected to
 
 
1049 * @transmit_path: HopID used for transmitting packets
1050 * @transmit_ring: NHI ring number used to send packets towards the
1051 *		   other domain. Set to %-1 if TX path is not needed.
1052 * @receive_path: HopID used for receiving packets
1053 * @receive_ring: NHI ring number used to receive packets from the
1054 *		  other domain. Set to %-1 if RX path is not needed.
 
1055 *
1056 * Return: Returns a tb_tunnel on success or NULL on failure.
1057 */
1058struct tb_tunnel *tb_tunnel_alloc_dma(struct tb *tb, struct tb_port *nhi,
1059				      struct tb_port *dst, int transmit_path,
1060				      int transmit_ring, int receive_path,
1061				      int receive_ring)
1062{
1063	struct tb_tunnel *tunnel;
1064	size_t npaths = 0, i = 0;
1065	struct tb_path *path;
1066	int credits;
1067
1068	if (receive_ring > 0)
1069		npaths++;
1070	if (transmit_ring > 0)
1071		npaths++;
1072
1073	if (WARN_ON(!npaths))
1074		return NULL;
1075
1076	tunnel = tb_tunnel_alloc(tb, npaths, TB_TUNNEL_DMA);
1077	if (!tunnel)
1078		return NULL;
1079
 
1080	tunnel->src_port = nhi;
1081	tunnel->dst_port = dst;
1082	tunnel->deinit = tb_dma_deinit;
1083
1084	credits = min_not_zero(TB_DMA_CREDITS, nhi->sw->max_dma_credits);
1085
1086	if (receive_ring > 0) {
1087		path = tb_path_alloc(tb, dst, receive_path, nhi, receive_ring, 0,
1088				     "DMA RX");
1089		if (!path)
1090			goto err_free;
1091		tunnel->paths[i++] = path;
1092		if (tb_dma_init_rx_path(path, credits)) {
1093			tb_tunnel_dbg(tunnel, "not enough buffers for RX path\n");
1094			goto err_free;
1095		}
1096	}
 
 
 
1097
1098	if (transmit_ring > 0) {
1099		path = tb_path_alloc(tb, nhi, transmit_ring, dst, transmit_path, 0,
1100				     "DMA TX");
1101		if (!path)
1102			goto err_free;
1103		tunnel->paths[i++] = path;
1104		if (tb_dma_init_tx_path(path, credits)) {
1105			tb_tunnel_dbg(tunnel, "not enough buffers for TX path\n");
1106			goto err_free;
1107		}
1108	}
 
 
1109
1110	return tunnel;
1111
1112err_free:
1113	tb_tunnel_free(tunnel);
1114	return NULL;
1115}
1116
1117/**
1118 * tb_tunnel_match_dma() - Match DMA tunnel
1119 * @tunnel: Tunnel to match
1120 * @transmit_path: HopID used for transmitting packets. Pass %-1 to ignore.
1121 * @transmit_ring: NHI ring number used to send packets towards the
1122 *		   other domain. Pass %-1 to ignore.
1123 * @receive_path: HopID used for receiving packets. Pass %-1 to ignore.
1124 * @receive_ring: NHI ring number used to receive packets from the
1125 *		  other domain. Pass %-1 to ignore.
1126 *
1127 * This function can be used to match specific DMA tunnel, if there are
1128 * multiple DMA tunnels going through the same XDomain connection.
1129 * Returns true if there is match and false otherwise.
1130 */
1131bool tb_tunnel_match_dma(const struct tb_tunnel *tunnel, int transmit_path,
1132			 int transmit_ring, int receive_path, int receive_ring)
1133{
1134	const struct tb_path *tx_path = NULL, *rx_path = NULL;
1135	int i;
1136
1137	if (!receive_ring || !transmit_ring)
1138		return false;
1139
1140	for (i = 0; i < tunnel->npaths; i++) {
1141		const struct tb_path *path = tunnel->paths[i];
1142
1143		if (!path)
1144			continue;
1145
1146		if (tb_port_is_nhi(path->hops[0].in_port))
1147			tx_path = path;
1148		else if (tb_port_is_nhi(path->hops[path->path_length - 1].out_port))
1149			rx_path = path;
1150	}
1151
1152	if (transmit_ring > 0 || transmit_path > 0) {
1153		if (!tx_path)
1154			return false;
1155		if (transmit_ring > 0 &&
1156		    (tx_path->hops[0].in_hop_index != transmit_ring))
1157			return false;
1158		if (transmit_path > 0 &&
1159		    (tx_path->hops[tx_path->path_length - 1].next_hop_index != transmit_path))
1160			return false;
1161	}
1162
1163	if (receive_ring > 0 || receive_path > 0) {
1164		if (!rx_path)
1165			return false;
1166		if (receive_path > 0 &&
1167		    (rx_path->hops[0].in_hop_index != receive_path))
1168			return false;
1169		if (receive_ring > 0 &&
1170		    (rx_path->hops[rx_path->path_length - 1].next_hop_index != receive_ring))
1171			return false;
1172	}
1173
1174	return true;
1175}
1176
1177static int tb_usb3_max_link_rate(struct tb_port *up, struct tb_port *down)
1178{
1179	int ret, up_max_rate, down_max_rate;
1180
1181	ret = usb4_usb3_port_max_link_rate(up);
1182	if (ret < 0)
1183		return ret;
1184	up_max_rate = ret;
1185
1186	ret = usb4_usb3_port_max_link_rate(down);
1187	if (ret < 0)
1188		return ret;
1189	down_max_rate = ret;
1190
1191	return min(up_max_rate, down_max_rate);
1192}
1193
1194static int tb_usb3_init(struct tb_tunnel *tunnel)
1195{
1196	tb_tunnel_dbg(tunnel, "allocating initial bandwidth %d/%d Mb/s\n",
1197		      tunnel->allocated_up, tunnel->allocated_down);
1198
1199	return usb4_usb3_port_allocate_bandwidth(tunnel->src_port,
1200						 &tunnel->allocated_up,
1201						 &tunnel->allocated_down);
1202}
1203
1204static int tb_usb3_activate(struct tb_tunnel *tunnel, bool activate)
1205{
1206	int res;
1207
1208	res = tb_usb3_port_enable(tunnel->src_port, activate);
1209	if (res)
1210		return res;
1211
1212	if (tb_port_is_usb3_up(tunnel->dst_port))
1213		return tb_usb3_port_enable(tunnel->dst_port, activate);
1214
1215	return 0;
1216}
1217
1218static int tb_usb3_consumed_bandwidth(struct tb_tunnel *tunnel,
1219		int *consumed_up, int *consumed_down)
1220{
1221	int pcie_enabled = tb_acpi_may_tunnel_pcie();
1222
1223	/*
1224	 * PCIe tunneling, if enabled, affects the USB3 bandwidth so
1225	 * take that it into account here.
1226	 */
1227	*consumed_up = tunnel->allocated_up * (3 + pcie_enabled) / 3;
1228	*consumed_down = tunnel->allocated_down * (3 + pcie_enabled) / 3;
1229	return 0;
1230}
1231
1232static int tb_usb3_release_unused_bandwidth(struct tb_tunnel *tunnel)
1233{
1234	int ret;
1235
1236	ret = usb4_usb3_port_release_bandwidth(tunnel->src_port,
1237					       &tunnel->allocated_up,
1238					       &tunnel->allocated_down);
1239	if (ret)
1240		return ret;
1241
1242	tb_tunnel_dbg(tunnel, "decreased bandwidth allocation to %d/%d Mb/s\n",
1243		      tunnel->allocated_up, tunnel->allocated_down);
1244	return 0;
1245}
1246
1247static void tb_usb3_reclaim_available_bandwidth(struct tb_tunnel *tunnel,
1248						int *available_up,
1249						int *available_down)
1250{
1251	int ret, max_rate, allocate_up, allocate_down;
1252
1253	ret = usb4_usb3_port_actual_link_rate(tunnel->src_port);
1254	if (ret < 0) {
1255		tb_tunnel_warn(tunnel, "failed to read actual link rate\n");
1256		return;
1257	} else if (!ret) {
1258		/* Use maximum link rate if the link valid is not set */
1259		ret = usb4_usb3_port_max_link_rate(tunnel->src_port);
1260		if (ret < 0) {
1261			tb_tunnel_warn(tunnel, "failed to read maximum link rate\n");
1262			return;
1263		}
1264	}
1265
1266	/*
1267	 * 90% of the max rate can be allocated for isochronous
1268	 * transfers.
1269	 */
1270	max_rate = ret * 90 / 100;
1271
1272	/* No need to reclaim if already at maximum */
1273	if (tunnel->allocated_up >= max_rate &&
1274	    tunnel->allocated_down >= max_rate)
1275		return;
1276
1277	/* Don't go lower than what is already allocated */
1278	allocate_up = min(max_rate, *available_up);
1279	if (allocate_up < tunnel->allocated_up)
1280		allocate_up = tunnel->allocated_up;
1281
1282	allocate_down = min(max_rate, *available_down);
1283	if (allocate_down < tunnel->allocated_down)
1284		allocate_down = tunnel->allocated_down;
1285
1286	/* If no changes no need to do more */
1287	if (allocate_up == tunnel->allocated_up &&
1288	    allocate_down == tunnel->allocated_down)
1289		return;
1290
1291	ret = usb4_usb3_port_allocate_bandwidth(tunnel->src_port, &allocate_up,
1292						&allocate_down);
1293	if (ret) {
1294		tb_tunnel_info(tunnel, "failed to allocate bandwidth\n");
1295		return;
1296	}
1297
1298	tunnel->allocated_up = allocate_up;
1299	*available_up -= tunnel->allocated_up;
1300
1301	tunnel->allocated_down = allocate_down;
1302	*available_down -= tunnel->allocated_down;
1303
1304	tb_tunnel_dbg(tunnel, "increased bandwidth allocation to %d/%d Mb/s\n",
1305		      tunnel->allocated_up, tunnel->allocated_down);
1306}
1307
1308static void tb_usb3_init_credits(struct tb_path_hop *hop)
1309{
1310	struct tb_port *port = hop->in_port;
1311	struct tb_switch *sw = port->sw;
1312	unsigned int credits;
1313
1314	if (tb_port_use_credit_allocation(port)) {
1315		credits = sw->max_usb3_credits;
1316	} else {
1317		if (tb_port_is_null(port))
1318			credits = port->bonded ? 32 : 16;
1319		else
1320			credits = 7;
1321	}
1322
1323	hop->initial_credits = credits;
1324}
1325
1326static void tb_usb3_init_path(struct tb_path *path)
1327{
1328	struct tb_path_hop *hop;
1329
1330	path->egress_fc_enable = TB_PATH_SOURCE | TB_PATH_INTERNAL;
1331	path->egress_shared_buffer = TB_PATH_NONE;
1332	path->ingress_fc_enable = TB_PATH_ALL;
1333	path->ingress_shared_buffer = TB_PATH_NONE;
1334	path->priority = 3;
1335	path->weight = 3;
1336	path->drop_packages = 0;
1337
1338	tb_path_for_each_hop(path, hop)
1339		tb_usb3_init_credits(hop);
 
 
1340}
1341
1342/**
1343 * tb_tunnel_discover_usb3() - Discover existing USB3 tunnels
1344 * @tb: Pointer to the domain structure
1345 * @down: USB3 downstream adapter
1346 *
1347 * If @down adapter is active, follows the tunnel to the USB3 upstream
1348 * adapter and back. Returns the discovered tunnel or %NULL if there was
1349 * no tunnel.
1350 */
1351struct tb_tunnel *tb_tunnel_discover_usb3(struct tb *tb, struct tb_port *down)
1352{
1353	struct tb_tunnel *tunnel;
1354	struct tb_path *path;
1355
1356	if (!tb_usb3_port_is_enabled(down))
1357		return NULL;
1358
1359	tunnel = tb_tunnel_alloc(tb, 2, TB_TUNNEL_USB3);
1360	if (!tunnel)
1361		return NULL;
1362
1363	tunnel->activate = tb_usb3_activate;
1364	tunnel->src_port = down;
1365
1366	/*
1367	 * Discover both paths even if they are not complete. We will
1368	 * clean them up by calling tb_tunnel_deactivate() below in that
1369	 * case.
1370	 */
1371	path = tb_path_discover(down, TB_USB3_HOPID, NULL, -1,
1372				&tunnel->dst_port, "USB3 Down");
1373	if (!path) {
1374		/* Just disable the downstream port */
1375		tb_usb3_port_enable(down, false);
1376		goto err_free;
1377	}
1378	tunnel->paths[TB_USB3_PATH_DOWN] = path;
1379	tb_usb3_init_path(tunnel->paths[TB_USB3_PATH_DOWN]);
1380
1381	path = tb_path_discover(tunnel->dst_port, -1, down, TB_USB3_HOPID, NULL,
1382				"USB3 Up");
1383	if (!path)
1384		goto err_deactivate;
1385	tunnel->paths[TB_USB3_PATH_UP] = path;
1386	tb_usb3_init_path(tunnel->paths[TB_USB3_PATH_UP]);
1387
1388	/* Validate that the tunnel is complete */
1389	if (!tb_port_is_usb3_up(tunnel->dst_port)) {
1390		tb_port_warn(tunnel->dst_port,
1391			     "path does not end on an USB3 adapter, cleaning up\n");
1392		goto err_deactivate;
1393	}
1394
1395	if (down != tunnel->src_port) {
1396		tb_tunnel_warn(tunnel, "path is not complete, cleaning up\n");
1397		goto err_deactivate;
1398	}
1399
1400	if (!tb_usb3_port_is_enabled(tunnel->dst_port)) {
1401		tb_tunnel_warn(tunnel,
1402			       "tunnel is not fully activated, cleaning up\n");
1403		goto err_deactivate;
1404	}
1405
1406	if (!tb_route(down->sw)) {
1407		int ret;
1408
1409		/*
1410		 * Read the initial bandwidth allocation for the first
1411		 * hop tunnel.
1412		 */
1413		ret = usb4_usb3_port_allocated_bandwidth(down,
1414			&tunnel->allocated_up, &tunnel->allocated_down);
1415		if (ret)
1416			goto err_deactivate;
1417
1418		tb_tunnel_dbg(tunnel, "currently allocated bandwidth %d/%d Mb/s\n",
1419			      tunnel->allocated_up, tunnel->allocated_down);
1420
1421		tunnel->init = tb_usb3_init;
1422		tunnel->consumed_bandwidth = tb_usb3_consumed_bandwidth;
1423		tunnel->release_unused_bandwidth =
1424			tb_usb3_release_unused_bandwidth;
1425		tunnel->reclaim_available_bandwidth =
1426			tb_usb3_reclaim_available_bandwidth;
1427	}
1428
1429	tb_tunnel_dbg(tunnel, "discovered\n");
1430	return tunnel;
1431
1432err_deactivate:
1433	tb_tunnel_deactivate(tunnel);
1434err_free:
1435	tb_tunnel_free(tunnel);
1436
1437	return NULL;
1438}
1439
1440/**
1441 * tb_tunnel_alloc_usb3() - allocate a USB3 tunnel
1442 * @tb: Pointer to the domain structure
1443 * @up: USB3 upstream adapter port
1444 * @down: USB3 downstream adapter port
1445 * @max_up: Maximum available upstream bandwidth for the USB3 tunnel (%0
1446 *	    if not limited).
1447 * @max_down: Maximum available downstream bandwidth for the USB3 tunnel
1448 *	      (%0 if not limited).
1449 *
1450 * Allocate an USB3 tunnel. The ports must be of type @TB_TYPE_USB3_UP and
1451 * @TB_TYPE_USB3_DOWN.
1452 *
1453 * Return: Returns a tb_tunnel on success or %NULL on failure.
1454 */
1455struct tb_tunnel *tb_tunnel_alloc_usb3(struct tb *tb, struct tb_port *up,
1456				       struct tb_port *down, int max_up,
1457				       int max_down)
1458{
1459	struct tb_tunnel *tunnel;
1460	struct tb_path *path;
1461	int max_rate = 0;
1462
1463	/*
1464	 * Check that we have enough bandwidth available for the new
1465	 * USB3 tunnel.
1466	 */
1467	if (max_up > 0 || max_down > 0) {
1468		max_rate = tb_usb3_max_link_rate(down, up);
1469		if (max_rate < 0)
1470			return NULL;
1471
1472		/* Only 90% can be allocated for USB3 isochronous transfers */
1473		max_rate = max_rate * 90 / 100;
1474		tb_port_dbg(up, "required bandwidth for USB3 tunnel %d Mb/s\n",
1475			    max_rate);
1476
1477		if (max_rate > max_up || max_rate > max_down) {
1478			tb_port_warn(up, "not enough bandwidth for USB3 tunnel\n");
1479			return NULL;
1480		}
1481	}
1482
1483	tunnel = tb_tunnel_alloc(tb, 2, TB_TUNNEL_USB3);
1484	if (!tunnel)
1485		return NULL;
1486
1487	tunnel->activate = tb_usb3_activate;
1488	tunnel->src_port = down;
1489	tunnel->dst_port = up;
1490	tunnel->max_up = max_up;
1491	tunnel->max_down = max_down;
1492
1493	path = tb_path_alloc(tb, down, TB_USB3_HOPID, up, TB_USB3_HOPID, 0,
1494			     "USB3 Down");
1495	if (!path) {
1496		tb_tunnel_free(tunnel);
1497		return NULL;
1498	}
1499	tb_usb3_init_path(path);
1500	tunnel->paths[TB_USB3_PATH_DOWN] = path;
1501
1502	path = tb_path_alloc(tb, up, TB_USB3_HOPID, down, TB_USB3_HOPID, 0,
1503			     "USB3 Up");
1504	if (!path) {
1505		tb_tunnel_free(tunnel);
1506		return NULL;
1507	}
1508	tb_usb3_init_path(path);
1509	tunnel->paths[TB_USB3_PATH_UP] = path;
1510
1511	if (!tb_route(down->sw)) {
1512		tunnel->allocated_up = max_rate;
1513		tunnel->allocated_down = max_rate;
1514
1515		tunnel->init = tb_usb3_init;
1516		tunnel->consumed_bandwidth = tb_usb3_consumed_bandwidth;
1517		tunnel->release_unused_bandwidth =
1518			tb_usb3_release_unused_bandwidth;
1519		tunnel->reclaim_available_bandwidth =
1520			tb_usb3_reclaim_available_bandwidth;
1521	}
1522
1523	return tunnel;
1524}
1525
1526/**
1527 * tb_tunnel_free() - free a tunnel
1528 * @tunnel: Tunnel to be freed
1529 *
1530 * Frees a tunnel. The tunnel does not need to be deactivated.
1531 */
1532void tb_tunnel_free(struct tb_tunnel *tunnel)
1533{
1534	int i;
1535
1536	if (!tunnel)
1537		return;
1538
1539	if (tunnel->deinit)
1540		tunnel->deinit(tunnel);
1541
1542	for (i = 0; i < tunnel->npaths; i++) {
1543		if (tunnel->paths[i])
1544			tb_path_free(tunnel->paths[i]);
1545	}
1546
1547	kfree(tunnel->paths);
1548	kfree(tunnel);
1549}
1550
1551/**
1552 * tb_tunnel_is_invalid - check whether an activated path is still valid
1553 * @tunnel: Tunnel to check
1554 */
1555bool tb_tunnel_is_invalid(struct tb_tunnel *tunnel)
1556{
1557	int i;
1558
1559	for (i = 0; i < tunnel->npaths; i++) {
1560		WARN_ON(!tunnel->paths[i]->activated);
1561		if (tb_path_is_invalid(tunnel->paths[i]))
1562			return true;
1563	}
1564
1565	return false;
1566}
1567
1568/**
1569 * tb_tunnel_restart() - activate a tunnel after a hardware reset
1570 * @tunnel: Tunnel to restart
1571 *
1572 * Return: 0 on success and negative errno in case if failure
1573 */
1574int tb_tunnel_restart(struct tb_tunnel *tunnel)
1575{
1576	int res, i;
1577
1578	tb_tunnel_dbg(tunnel, "activating\n");
1579
1580	/*
1581	 * Make sure all paths are properly disabled before enabling
1582	 * them again.
1583	 */
1584	for (i = 0; i < tunnel->npaths; i++) {
1585		if (tunnel->paths[i]->activated) {
1586			tb_path_deactivate(tunnel->paths[i]);
1587			tunnel->paths[i]->activated = false;
1588		}
1589	}
1590
1591	if (tunnel->init) {
1592		res = tunnel->init(tunnel);
1593		if (res)
1594			return res;
1595	}
1596
1597	for (i = 0; i < tunnel->npaths; i++) {
1598		res = tb_path_activate(tunnel->paths[i]);
1599		if (res)
1600			goto err;
1601	}
1602
1603	if (tunnel->activate) {
1604		res = tunnel->activate(tunnel, true);
1605		if (res)
1606			goto err;
1607	}
1608
1609	return 0;
1610
1611err:
1612	tb_tunnel_warn(tunnel, "activation failed\n");
1613	tb_tunnel_deactivate(tunnel);
1614	return res;
1615}
1616
1617/**
1618 * tb_tunnel_activate() - activate a tunnel
1619 * @tunnel: Tunnel to activate
1620 *
1621 * Return: Returns 0 on success or an error code on failure.
1622 */
1623int tb_tunnel_activate(struct tb_tunnel *tunnel)
1624{
1625	int i;
1626
1627	for (i = 0; i < tunnel->npaths; i++) {
1628		if (tunnel->paths[i]->activated) {
1629			tb_tunnel_WARN(tunnel,
1630				       "trying to activate an already activated tunnel\n");
1631			return -EINVAL;
1632		}
1633	}
1634
1635	return tb_tunnel_restart(tunnel);
1636}
1637
1638/**
1639 * tb_tunnel_deactivate() - deactivate a tunnel
1640 * @tunnel: Tunnel to deactivate
1641 */
1642void tb_tunnel_deactivate(struct tb_tunnel *tunnel)
1643{
1644	int i;
1645
1646	tb_tunnel_dbg(tunnel, "deactivating\n");
1647
1648	if (tunnel->activate)
1649		tunnel->activate(tunnel, false);
1650
1651	for (i = 0; i < tunnel->npaths; i++) {
1652		if (tunnel->paths[i] && tunnel->paths[i]->activated)
1653			tb_path_deactivate(tunnel->paths[i]);
1654	}
1655}
1656
1657/**
1658 * tb_tunnel_port_on_path() - Does the tunnel go through port
1659 * @tunnel: Tunnel to check
1660 * @port: Port to check
1661 *
1662 * Returns true if @tunnel goes through @port (direction does not matter),
1663 * false otherwise.
1664 */
1665bool tb_tunnel_port_on_path(const struct tb_tunnel *tunnel,
1666			    const struct tb_port *port)
1667{
1668	int i;
1669
1670	for (i = 0; i < tunnel->npaths; i++) {
1671		if (!tunnel->paths[i])
1672			continue;
1673
1674		if (tb_path_port_on_path(tunnel->paths[i], port))
1675			return true;
1676	}
1677
1678	return false;
1679}
1680
1681static bool tb_tunnel_is_active(const struct tb_tunnel *tunnel)
1682{
1683	int i;
1684
1685	for (i = 0; i < tunnel->npaths; i++) {
1686		if (!tunnel->paths[i])
1687			return false;
1688		if (!tunnel->paths[i]->activated)
1689			return false;
1690	}
1691
1692	return true;
1693}
1694
1695/**
1696 * tb_tunnel_consumed_bandwidth() - Return bandwidth consumed by the tunnel
1697 * @tunnel: Tunnel to check
1698 * @consumed_up: Consumed bandwidth in Mb/s from @dst_port to @src_port.
1699 *		 Can be %NULL.
1700 * @consumed_down: Consumed bandwidth in Mb/s from @src_port to @dst_port.
1701 *		   Can be %NULL.
1702 *
1703 * Stores the amount of isochronous bandwidth @tunnel consumes in
1704 * @consumed_up and @consumed_down. In case of success returns %0,
1705 * negative errno otherwise.
1706 */
1707int tb_tunnel_consumed_bandwidth(struct tb_tunnel *tunnel, int *consumed_up,
1708				 int *consumed_down)
1709{
1710	int up_bw = 0, down_bw = 0;
1711
1712	if (!tb_tunnel_is_active(tunnel))
1713		goto out;
1714
1715	if (tunnel->consumed_bandwidth) {
1716		int ret;
1717
1718		ret = tunnel->consumed_bandwidth(tunnel, &up_bw, &down_bw);
1719		if (ret)
1720			return ret;
1721
1722		tb_tunnel_dbg(tunnel, "consumed bandwidth %d/%d Mb/s\n", up_bw,
1723			      down_bw);
1724	}
1725
1726out:
1727	if (consumed_up)
1728		*consumed_up = up_bw;
1729	if (consumed_down)
1730		*consumed_down = down_bw;
1731
1732	return 0;
1733}
1734
1735/**
1736 * tb_tunnel_release_unused_bandwidth() - Release unused bandwidth
1737 * @tunnel: Tunnel whose unused bandwidth to release
1738 *
1739 * If tunnel supports dynamic bandwidth management (USB3 tunnels at the
1740 * moment) this function makes it to release all the unused bandwidth.
1741 *
1742 * Returns %0 in case of success and negative errno otherwise.
1743 */
1744int tb_tunnel_release_unused_bandwidth(struct tb_tunnel *tunnel)
1745{
1746	if (!tb_tunnel_is_active(tunnel))
1747		return 0;
1748
1749	if (tunnel->release_unused_bandwidth) {
1750		int ret;
1751
1752		ret = tunnel->release_unused_bandwidth(tunnel);
1753		if (ret)
1754			return ret;
1755	}
1756
1757	return 0;
1758}
1759
1760/**
1761 * tb_tunnel_reclaim_available_bandwidth() - Reclaim available bandwidth
1762 * @tunnel: Tunnel reclaiming available bandwidth
1763 * @available_up: Available upstream bandwidth (in Mb/s)
1764 * @available_down: Available downstream bandwidth (in Mb/s)
1765 *
1766 * Reclaims bandwidth from @available_up and @available_down and updates
1767 * the variables accordingly (e.g decreases both according to what was
1768 * reclaimed by the tunnel). If nothing was reclaimed the values are
1769 * kept as is.
1770 */
1771void tb_tunnel_reclaim_available_bandwidth(struct tb_tunnel *tunnel,
1772					   int *available_up,
1773					   int *available_down)
1774{
1775	if (!tb_tunnel_is_active(tunnel))
1776		return;
1777
1778	if (tunnel->reclaim_available_bandwidth)
1779		tunnel->reclaim_available_bandwidth(tunnel, available_up,
1780						    available_down);
1781}