Linux Audio

Check our new training course

Loading...
Note: File does not exist in v3.1.
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 * Thunderbolt driver - switch/port utility functions
   4 *
   5 * Copyright (c) 2014 Andreas Noever <andreas.noever@gmail.com>
   6 * Copyright (C) 2018, Intel Corporation
   7 */
   8
   9#include <linux/delay.h>
  10#include <linux/idr.h>
  11#include <linux/module.h>
  12#include <linux/nvmem-provider.h>
  13#include <linux/pm_runtime.h>
  14#include <linux/sched/signal.h>
  15#include <linux/sizes.h>
  16#include <linux/slab.h>
  17#include <linux/string_helpers.h>
  18
  19#include "tb.h"
  20
  21/* Switch NVM support */
  22
  23struct nvm_auth_status {
  24	struct list_head list;
  25	uuid_t uuid;
  26	u32 status;
  27};
  28
  29static bool clx_enabled = true;
  30module_param_named(clx, clx_enabled, bool, 0444);
  31MODULE_PARM_DESC(clx, "allow low power states on the high-speed lanes (default: true)");
  32
  33/*
  34 * Hold NVM authentication failure status per switch This information
  35 * needs to stay around even when the switch gets power cycled so we
  36 * keep it separately.
  37 */
  38static LIST_HEAD(nvm_auth_status_cache);
  39static DEFINE_MUTEX(nvm_auth_status_lock);
  40
  41static struct nvm_auth_status *__nvm_get_auth_status(const struct tb_switch *sw)
  42{
  43	struct nvm_auth_status *st;
  44
  45	list_for_each_entry(st, &nvm_auth_status_cache, list) {
  46		if (uuid_equal(&st->uuid, sw->uuid))
  47			return st;
  48	}
  49
  50	return NULL;
  51}
  52
  53static void nvm_get_auth_status(const struct tb_switch *sw, u32 *status)
  54{
  55	struct nvm_auth_status *st;
  56
  57	mutex_lock(&nvm_auth_status_lock);
  58	st = __nvm_get_auth_status(sw);
  59	mutex_unlock(&nvm_auth_status_lock);
  60
  61	*status = st ? st->status : 0;
  62}
  63
  64static void nvm_set_auth_status(const struct tb_switch *sw, u32 status)
  65{
  66	struct nvm_auth_status *st;
  67
  68	if (WARN_ON(!sw->uuid))
  69		return;
  70
  71	mutex_lock(&nvm_auth_status_lock);
  72	st = __nvm_get_auth_status(sw);
  73
  74	if (!st) {
  75		st = kzalloc(sizeof(*st), GFP_KERNEL);
  76		if (!st)
  77			goto unlock;
  78
  79		memcpy(&st->uuid, sw->uuid, sizeof(st->uuid));
  80		INIT_LIST_HEAD(&st->list);
  81		list_add_tail(&st->list, &nvm_auth_status_cache);
  82	}
  83
  84	st->status = status;
  85unlock:
  86	mutex_unlock(&nvm_auth_status_lock);
  87}
  88
  89static void nvm_clear_auth_status(const struct tb_switch *sw)
  90{
  91	struct nvm_auth_status *st;
  92
  93	mutex_lock(&nvm_auth_status_lock);
  94	st = __nvm_get_auth_status(sw);
  95	if (st) {
  96		list_del(&st->list);
  97		kfree(st);
  98	}
  99	mutex_unlock(&nvm_auth_status_lock);
 100}
 101
 102static int nvm_validate_and_write(struct tb_switch *sw)
 103{
 104	unsigned int image_size;
 105	const u8 *buf;
 106	int ret;
 107
 108	ret = tb_nvm_validate(sw->nvm);
 109	if (ret)
 110		return ret;
 111
 112	ret = tb_nvm_write_headers(sw->nvm);
 113	if (ret)
 114		return ret;
 115
 116	buf = sw->nvm->buf_data_start;
 117	image_size = sw->nvm->buf_data_size;
 118
 119	if (tb_switch_is_usb4(sw))
 120		ret = usb4_switch_nvm_write(sw, 0, buf, image_size);
 121	else
 122		ret = dma_port_flash_write(sw->dma_port, 0, buf, image_size);
 123	if (ret)
 124		return ret;
 125
 126	sw->nvm->flushed = true;
 127	return 0;
 128}
 129
 130static int nvm_authenticate_host_dma_port(struct tb_switch *sw)
 131{
 132	int ret = 0;
 133
 134	/*
 135	 * Root switch NVM upgrade requires that we disconnect the
 136	 * existing paths first (in case it is not in safe mode
 137	 * already).
 138	 */
 139	if (!sw->safe_mode) {
 140		u32 status;
 141
 142		ret = tb_domain_disconnect_all_paths(sw->tb);
 143		if (ret)
 144			return ret;
 145		/*
 146		 * The host controller goes away pretty soon after this if
 147		 * everything goes well so getting timeout is expected.
 148		 */
 149		ret = dma_port_flash_update_auth(sw->dma_port);
 150		if (!ret || ret == -ETIMEDOUT)
 151			return 0;
 152
 153		/*
 154		 * Any error from update auth operation requires power
 155		 * cycling of the host router.
 156		 */
 157		tb_sw_warn(sw, "failed to authenticate NVM, power cycling\n");
 158		if (dma_port_flash_update_auth_status(sw->dma_port, &status) > 0)
 159			nvm_set_auth_status(sw, status);
 160	}
 161
 162	/*
 163	 * From safe mode we can get out by just power cycling the
 164	 * switch.
 165	 */
 166	dma_port_power_cycle(sw->dma_port);
 167	return ret;
 168}
 169
 170static int nvm_authenticate_device_dma_port(struct tb_switch *sw)
 171{
 172	int ret, retries = 10;
 173
 174	ret = dma_port_flash_update_auth(sw->dma_port);
 175	switch (ret) {
 176	case 0:
 177	case -ETIMEDOUT:
 178	case -EACCES:
 179	case -EINVAL:
 180		/* Power cycle is required */
 181		break;
 182	default:
 183		return ret;
 184	}
 185
 186	/*
 187	 * Poll here for the authentication status. It takes some time
 188	 * for the device to respond (we get timeout for a while). Once
 189	 * we get response the device needs to be power cycled in order
 190	 * to the new NVM to be taken into use.
 191	 */
 192	do {
 193		u32 status;
 194
 195		ret = dma_port_flash_update_auth_status(sw->dma_port, &status);
 196		if (ret < 0 && ret != -ETIMEDOUT)
 197			return ret;
 198		if (ret > 0) {
 199			if (status) {
 200				tb_sw_warn(sw, "failed to authenticate NVM\n");
 201				nvm_set_auth_status(sw, status);
 202			}
 203
 204			tb_sw_info(sw, "power cycling the switch now\n");
 205			dma_port_power_cycle(sw->dma_port);
 206			return 0;
 207		}
 208
 209		msleep(500);
 210	} while (--retries);
 211
 212	return -ETIMEDOUT;
 213}
 214
 215static void nvm_authenticate_start_dma_port(struct tb_switch *sw)
 216{
 217	struct pci_dev *root_port;
 218
 219	/*
 220	 * During host router NVM upgrade we should not allow root port to
 221	 * go into D3cold because some root ports cannot trigger PME
 222	 * itself. To be on the safe side keep the root port in D0 during
 223	 * the whole upgrade process.
 224	 */
 225	root_port = pcie_find_root_port(sw->tb->nhi->pdev);
 226	if (root_port)
 227		pm_runtime_get_noresume(&root_port->dev);
 228}
 229
 230static void nvm_authenticate_complete_dma_port(struct tb_switch *sw)
 231{
 232	struct pci_dev *root_port;
 233
 234	root_port = pcie_find_root_port(sw->tb->nhi->pdev);
 235	if (root_port)
 236		pm_runtime_put(&root_port->dev);
 237}
 238
 239static inline bool nvm_readable(struct tb_switch *sw)
 240{
 241	if (tb_switch_is_usb4(sw)) {
 242		/*
 243		 * USB4 devices must support NVM operations but it is
 244		 * optional for hosts. Therefore we query the NVM sector
 245		 * size here and if it is supported assume NVM
 246		 * operations are implemented.
 247		 */
 248		return usb4_switch_nvm_sector_size(sw) > 0;
 249	}
 250
 251	/* Thunderbolt 2 and 3 devices support NVM through DMA port */
 252	return !!sw->dma_port;
 253}
 254
 255static inline bool nvm_upgradeable(struct tb_switch *sw)
 256{
 257	if (sw->no_nvm_upgrade)
 258		return false;
 259	return nvm_readable(sw);
 260}
 261
 262static int nvm_authenticate(struct tb_switch *sw, bool auth_only)
 263{
 264	int ret;
 265
 266	if (tb_switch_is_usb4(sw)) {
 267		if (auth_only) {
 268			ret = usb4_switch_nvm_set_offset(sw, 0);
 269			if (ret)
 270				return ret;
 271		}
 272		sw->nvm->authenticating = true;
 273		return usb4_switch_nvm_authenticate(sw);
 274	} else if (auth_only) {
 275		return -EOPNOTSUPP;
 276	}
 277
 278	sw->nvm->authenticating = true;
 279	if (!tb_route(sw)) {
 280		nvm_authenticate_start_dma_port(sw);
 281		ret = nvm_authenticate_host_dma_port(sw);
 282	} else {
 283		ret = nvm_authenticate_device_dma_port(sw);
 284	}
 285
 286	return ret;
 287}
 288
 289/**
 290 * tb_switch_nvm_read() - Read router NVM
 291 * @sw: Router whose NVM to read
 292 * @address: Start address on the NVM
 293 * @buf: Buffer where the read data is copied
 294 * @size: Size of the buffer in bytes
 295 *
 296 * Reads from router NVM and returns the requested data in @buf. Locking
 297 * is up to the caller. Returns %0 in success and negative errno in case
 298 * of failure.
 299 */
 300int tb_switch_nvm_read(struct tb_switch *sw, unsigned int address, void *buf,
 301		       size_t size)
 302{
 303	if (tb_switch_is_usb4(sw))
 304		return usb4_switch_nvm_read(sw, address, buf, size);
 305	return dma_port_flash_read(sw->dma_port, address, buf, size);
 306}
 307
 308static int nvm_read(void *priv, unsigned int offset, void *val, size_t bytes)
 309{
 310	struct tb_nvm *nvm = priv;
 311	struct tb_switch *sw = tb_to_switch(nvm->dev);
 312	int ret;
 313
 314	pm_runtime_get_sync(&sw->dev);
 315
 316	if (!mutex_trylock(&sw->tb->lock)) {
 317		ret = restart_syscall();
 318		goto out;
 319	}
 320
 321	ret = tb_switch_nvm_read(sw, offset, val, bytes);
 322	mutex_unlock(&sw->tb->lock);
 323
 324out:
 325	pm_runtime_mark_last_busy(&sw->dev);
 326	pm_runtime_put_autosuspend(&sw->dev);
 327
 328	return ret;
 329}
 330
 331static int nvm_write(void *priv, unsigned int offset, void *val, size_t bytes)
 332{
 333	struct tb_nvm *nvm = priv;
 334	struct tb_switch *sw = tb_to_switch(nvm->dev);
 335	int ret;
 336
 337	if (!mutex_trylock(&sw->tb->lock))
 338		return restart_syscall();
 339
 340	/*
 341	 * Since writing the NVM image might require some special steps,
 342	 * for example when CSS headers are written, we cache the image
 343	 * locally here and handle the special cases when the user asks
 344	 * us to authenticate the image.
 345	 */
 346	ret = tb_nvm_write_buf(nvm, offset, val, bytes);
 347	mutex_unlock(&sw->tb->lock);
 348
 349	return ret;
 350}
 351
 352static int tb_switch_nvm_add(struct tb_switch *sw)
 353{
 354	struct tb_nvm *nvm;
 355	int ret;
 356
 357	if (!nvm_readable(sw))
 358		return 0;
 359
 360	nvm = tb_nvm_alloc(&sw->dev);
 361	if (IS_ERR(nvm)) {
 362		ret = PTR_ERR(nvm) == -EOPNOTSUPP ? 0 : PTR_ERR(nvm);
 363		goto err_nvm;
 364	}
 365
 366	ret = tb_nvm_read_version(nvm);
 367	if (ret)
 368		goto err_nvm;
 369
 370	/*
 371	 * If the switch is in safe-mode the only accessible portion of
 372	 * the NVM is the non-active one where userspace is expected to
 373	 * write new functional NVM.
 374	 */
 375	if (!sw->safe_mode) {
 376		ret = tb_nvm_add_active(nvm, nvm_read);
 377		if (ret)
 378			goto err_nvm;
 379	}
 380
 381	if (!sw->no_nvm_upgrade) {
 382		ret = tb_nvm_add_non_active(nvm, nvm_write);
 383		if (ret)
 384			goto err_nvm;
 385	}
 386
 387	sw->nvm = nvm;
 388	return 0;
 389
 390err_nvm:
 391	tb_sw_dbg(sw, "NVM upgrade disabled\n");
 392	sw->no_nvm_upgrade = true;
 393	if (!IS_ERR(nvm))
 394		tb_nvm_free(nvm);
 395
 396	return ret;
 397}
 398
 399static void tb_switch_nvm_remove(struct tb_switch *sw)
 400{
 401	struct tb_nvm *nvm;
 402
 403	nvm = sw->nvm;
 404	sw->nvm = NULL;
 405
 406	if (!nvm)
 407		return;
 408
 409	/* Remove authentication status in case the switch is unplugged */
 410	if (!nvm->authenticating)
 411		nvm_clear_auth_status(sw);
 412
 413	tb_nvm_free(nvm);
 414}
 415
 416/* port utility functions */
 417
 418static const char *tb_port_type(const struct tb_regs_port_header *port)
 419{
 420	switch (port->type >> 16) {
 421	case 0:
 422		switch ((u8) port->type) {
 423		case 0:
 424			return "Inactive";
 425		case 1:
 426			return "Port";
 427		case 2:
 428			return "NHI";
 429		default:
 430			return "unknown";
 431		}
 432	case 0x2:
 433		return "Ethernet";
 434	case 0x8:
 435		return "SATA";
 436	case 0xe:
 437		return "DP/HDMI";
 438	case 0x10:
 439		return "PCIe";
 440	case 0x20:
 441		return "USB";
 442	default:
 443		return "unknown";
 444	}
 445}
 446
 447static void tb_dump_port(struct tb *tb, const struct tb_port *port)
 448{
 449	const struct tb_regs_port_header *regs = &port->config;
 450
 451	tb_dbg(tb,
 452	       " Port %d: %x:%x (Revision: %d, TB Version: %d, Type: %s (%#x))\n",
 453	       regs->port_number, regs->vendor_id, regs->device_id,
 454	       regs->revision, regs->thunderbolt_version, tb_port_type(regs),
 455	       regs->type);
 456	tb_dbg(tb, "  Max hop id (in/out): %d/%d\n",
 457	       regs->max_in_hop_id, regs->max_out_hop_id);
 458	tb_dbg(tb, "  Max counters: %d\n", regs->max_counters);
 459	tb_dbg(tb, "  NFC Credits: %#x\n", regs->nfc_credits);
 460	tb_dbg(tb, "  Credits (total/control): %u/%u\n", port->total_credits,
 461	       port->ctl_credits);
 462}
 463
 464/**
 465 * tb_port_state() - get connectedness state of a port
 466 * @port: the port to check
 467 *
 468 * The port must have a TB_CAP_PHY (i.e. it should be a real port).
 469 *
 470 * Return: Returns an enum tb_port_state on success or an error code on failure.
 471 */
 472int tb_port_state(struct tb_port *port)
 473{
 474	struct tb_cap_phy phy;
 475	int res;
 476	if (port->cap_phy == 0) {
 477		tb_port_WARN(port, "does not have a PHY\n");
 478		return -EINVAL;
 479	}
 480	res = tb_port_read(port, &phy, TB_CFG_PORT, port->cap_phy, 2);
 481	if (res)
 482		return res;
 483	return phy.state;
 484}
 485
 486/**
 487 * tb_wait_for_port() - wait for a port to become ready
 488 * @port: Port to wait
 489 * @wait_if_unplugged: Wait also when port is unplugged
 490 *
 491 * Wait up to 1 second for a port to reach state TB_PORT_UP. If
 492 * wait_if_unplugged is set then we also wait if the port is in state
 493 * TB_PORT_UNPLUGGED (it takes a while for the device to be registered after
 494 * switch resume). Otherwise we only wait if a device is registered but the link
 495 * has not yet been established.
 496 *
 497 * Return: Returns an error code on failure. Returns 0 if the port is not
 498 * connected or failed to reach state TB_PORT_UP within one second. Returns 1
 499 * if the port is connected and in state TB_PORT_UP.
 500 */
 501int tb_wait_for_port(struct tb_port *port, bool wait_if_unplugged)
 502{
 503	int retries = 10;
 504	int state;
 505	if (!port->cap_phy) {
 506		tb_port_WARN(port, "does not have PHY\n");
 507		return -EINVAL;
 508	}
 509	if (tb_is_upstream_port(port)) {
 510		tb_port_WARN(port, "is the upstream port\n");
 511		return -EINVAL;
 512	}
 513
 514	while (retries--) {
 515		state = tb_port_state(port);
 516		if (state < 0)
 517			return state;
 518		if (state == TB_PORT_DISABLED) {
 519			tb_port_dbg(port, "is disabled (state: 0)\n");
 520			return 0;
 521		}
 522		if (state == TB_PORT_UNPLUGGED) {
 523			if (wait_if_unplugged) {
 524				/* used during resume */
 525				tb_port_dbg(port,
 526					    "is unplugged (state: 7), retrying...\n");
 527				msleep(100);
 528				continue;
 529			}
 530			tb_port_dbg(port, "is unplugged (state: 7)\n");
 531			return 0;
 532		}
 533		if (state == TB_PORT_UP) {
 534			tb_port_dbg(port, "is connected, link is up (state: 2)\n");
 535			return 1;
 536		}
 537
 538		/*
 539		 * After plug-in the state is TB_PORT_CONNECTING. Give it some
 540		 * time.
 541		 */
 542		tb_port_dbg(port,
 543			    "is connected, link is not up (state: %d), retrying...\n",
 544			    state);
 545		msleep(100);
 546	}
 547	tb_port_warn(port,
 548		     "failed to reach state TB_PORT_UP. Ignoring port...\n");
 549	return 0;
 550}
 551
 552/**
 553 * tb_port_add_nfc_credits() - add/remove non flow controlled credits to port
 554 * @port: Port to add/remove NFC credits
 555 * @credits: Credits to add/remove
 556 *
 557 * Change the number of NFC credits allocated to @port by @credits. To remove
 558 * NFC credits pass a negative amount of credits.
 559 *
 560 * Return: Returns 0 on success or an error code on failure.
 561 */
 562int tb_port_add_nfc_credits(struct tb_port *port, int credits)
 563{
 564	u32 nfc_credits;
 565
 566	if (credits == 0 || port->sw->is_unplugged)
 567		return 0;
 568
 569	/*
 570	 * USB4 restricts programming NFC buffers to lane adapters only
 571	 * so skip other ports.
 572	 */
 573	if (tb_switch_is_usb4(port->sw) && !tb_port_is_null(port))
 574		return 0;
 575
 576	nfc_credits = port->config.nfc_credits & ADP_CS_4_NFC_BUFFERS_MASK;
 577	if (credits < 0)
 578		credits = max_t(int, -nfc_credits, credits);
 579
 580	nfc_credits += credits;
 581
 582	tb_port_dbg(port, "adding %d NFC credits to %lu", credits,
 583		    port->config.nfc_credits & ADP_CS_4_NFC_BUFFERS_MASK);
 584
 585	port->config.nfc_credits &= ~ADP_CS_4_NFC_BUFFERS_MASK;
 586	port->config.nfc_credits |= nfc_credits;
 587
 588	return tb_port_write(port, &port->config.nfc_credits,
 589			     TB_CFG_PORT, ADP_CS_4, 1);
 590}
 591
 592/**
 593 * tb_port_clear_counter() - clear a counter in TB_CFG_COUNTER
 594 * @port: Port whose counters to clear
 595 * @counter: Counter index to clear
 596 *
 597 * Return: Returns 0 on success or an error code on failure.
 598 */
 599int tb_port_clear_counter(struct tb_port *port, int counter)
 600{
 601	u32 zero[3] = { 0, 0, 0 };
 602	tb_port_dbg(port, "clearing counter %d\n", counter);
 603	return tb_port_write(port, zero, TB_CFG_COUNTERS, 3 * counter, 3);
 604}
 605
 606/**
 607 * tb_port_unlock() - Unlock downstream port
 608 * @port: Port to unlock
 609 *
 610 * Needed for USB4 but can be called for any CIO/USB4 ports. Makes the
 611 * downstream router accessible for CM.
 612 */
 613int tb_port_unlock(struct tb_port *port)
 614{
 615	if (tb_switch_is_icm(port->sw))
 616		return 0;
 617	if (!tb_port_is_null(port))
 618		return -EINVAL;
 619	if (tb_switch_is_usb4(port->sw))
 620		return usb4_port_unlock(port);
 621	return 0;
 622}
 623
 624static int __tb_port_enable(struct tb_port *port, bool enable)
 625{
 626	int ret;
 627	u32 phy;
 628
 629	if (!tb_port_is_null(port))
 630		return -EINVAL;
 631
 632	ret = tb_port_read(port, &phy, TB_CFG_PORT,
 633			   port->cap_phy + LANE_ADP_CS_1, 1);
 634	if (ret)
 635		return ret;
 636
 637	if (enable)
 638		phy &= ~LANE_ADP_CS_1_LD;
 639	else
 640		phy |= LANE_ADP_CS_1_LD;
 641
 642
 643	ret = tb_port_write(port, &phy, TB_CFG_PORT,
 644			    port->cap_phy + LANE_ADP_CS_1, 1);
 645	if (ret)
 646		return ret;
 647
 648	tb_port_dbg(port, "lane %s\n", str_enabled_disabled(enable));
 649	return 0;
 650}
 651
 652/**
 653 * tb_port_enable() - Enable lane adapter
 654 * @port: Port to enable (can be %NULL)
 655 *
 656 * This is used for lane 0 and 1 adapters to enable it.
 657 */
 658int tb_port_enable(struct tb_port *port)
 659{
 660	return __tb_port_enable(port, true);
 661}
 662
 663/**
 664 * tb_port_disable() - Disable lane adapter
 665 * @port: Port to disable (can be %NULL)
 666 *
 667 * This is used for lane 0 and 1 adapters to disable it.
 668 */
 669int tb_port_disable(struct tb_port *port)
 670{
 671	return __tb_port_enable(port, false);
 672}
 673
 674/*
 675 * tb_init_port() - initialize a port
 676 *
 677 * This is a helper method for tb_switch_alloc. Does not check or initialize
 678 * any downstream switches.
 679 *
 680 * Return: Returns 0 on success or an error code on failure.
 681 */
 682static int tb_init_port(struct tb_port *port)
 683{
 684	int res;
 685	int cap;
 686
 687	INIT_LIST_HEAD(&port->list);
 688
 689	/* Control adapter does not have configuration space */
 690	if (!port->port)
 691		return 0;
 692
 693	res = tb_port_read(port, &port->config, TB_CFG_PORT, 0, 8);
 694	if (res) {
 695		if (res == -ENODEV) {
 696			tb_dbg(port->sw->tb, " Port %d: not implemented\n",
 697			       port->port);
 698			port->disabled = true;
 699			return 0;
 700		}
 701		return res;
 702	}
 703
 704	/* Port 0 is the switch itself and has no PHY. */
 705	if (port->config.type == TB_TYPE_PORT) {
 706		cap = tb_port_find_cap(port, TB_PORT_CAP_PHY);
 707
 708		if (cap > 0)
 709			port->cap_phy = cap;
 710		else
 711			tb_port_WARN(port, "non switch port without a PHY\n");
 712
 713		cap = tb_port_find_cap(port, TB_PORT_CAP_USB4);
 714		if (cap > 0)
 715			port->cap_usb4 = cap;
 716
 717		/*
 718		 * USB4 ports the buffers allocated for the control path
 719		 * can be read from the path config space. Legacy
 720		 * devices we use hard-coded value.
 721		 */
 722		if (tb_switch_is_usb4(port->sw)) {
 723			struct tb_regs_hop hop;
 724
 725			if (!tb_port_read(port, &hop, TB_CFG_HOPS, 0, 2))
 726				port->ctl_credits = hop.initial_credits;
 727		}
 728		if (!port->ctl_credits)
 729			port->ctl_credits = 2;
 730
 731	} else {
 732		cap = tb_port_find_cap(port, TB_PORT_CAP_ADAP);
 733		if (cap > 0)
 734			port->cap_adap = cap;
 735	}
 736
 737	port->total_credits =
 738		(port->config.nfc_credits & ADP_CS_4_TOTAL_BUFFERS_MASK) >>
 739		ADP_CS_4_TOTAL_BUFFERS_SHIFT;
 740
 741	tb_dump_port(port->sw->tb, port);
 742	return 0;
 743}
 744
 745static int tb_port_alloc_hopid(struct tb_port *port, bool in, int min_hopid,
 746			       int max_hopid)
 747{
 748	int port_max_hopid;
 749	struct ida *ida;
 750
 751	if (in) {
 752		port_max_hopid = port->config.max_in_hop_id;
 753		ida = &port->in_hopids;
 754	} else {
 755		port_max_hopid = port->config.max_out_hop_id;
 756		ida = &port->out_hopids;
 757	}
 758
 759	/*
 760	 * NHI can use HopIDs 1-max for other adapters HopIDs 0-7 are
 761	 * reserved.
 762	 */
 763	if (!tb_port_is_nhi(port) && min_hopid < TB_PATH_MIN_HOPID)
 764		min_hopid = TB_PATH_MIN_HOPID;
 765
 766	if (max_hopid < 0 || max_hopid > port_max_hopid)
 767		max_hopid = port_max_hopid;
 768
 769	return ida_simple_get(ida, min_hopid, max_hopid + 1, GFP_KERNEL);
 770}
 771
 772/**
 773 * tb_port_alloc_in_hopid() - Allocate input HopID from port
 774 * @port: Port to allocate HopID for
 775 * @min_hopid: Minimum acceptable input HopID
 776 * @max_hopid: Maximum acceptable input HopID
 777 *
 778 * Return: HopID between @min_hopid and @max_hopid or negative errno in
 779 * case of error.
 780 */
 781int tb_port_alloc_in_hopid(struct tb_port *port, int min_hopid, int max_hopid)
 782{
 783	return tb_port_alloc_hopid(port, true, min_hopid, max_hopid);
 784}
 785
 786/**
 787 * tb_port_alloc_out_hopid() - Allocate output HopID from port
 788 * @port: Port to allocate HopID for
 789 * @min_hopid: Minimum acceptable output HopID
 790 * @max_hopid: Maximum acceptable output HopID
 791 *
 792 * Return: HopID between @min_hopid and @max_hopid or negative errno in
 793 * case of error.
 794 */
 795int tb_port_alloc_out_hopid(struct tb_port *port, int min_hopid, int max_hopid)
 796{
 797	return tb_port_alloc_hopid(port, false, min_hopid, max_hopid);
 798}
 799
 800/**
 801 * tb_port_release_in_hopid() - Release allocated input HopID from port
 802 * @port: Port whose HopID to release
 803 * @hopid: HopID to release
 804 */
 805void tb_port_release_in_hopid(struct tb_port *port, int hopid)
 806{
 807	ida_simple_remove(&port->in_hopids, hopid);
 808}
 809
 810/**
 811 * tb_port_release_out_hopid() - Release allocated output HopID from port
 812 * @port: Port whose HopID to release
 813 * @hopid: HopID to release
 814 */
 815void tb_port_release_out_hopid(struct tb_port *port, int hopid)
 816{
 817	ida_simple_remove(&port->out_hopids, hopid);
 818}
 819
 820static inline bool tb_switch_is_reachable(const struct tb_switch *parent,
 821					  const struct tb_switch *sw)
 822{
 823	u64 mask = (1ULL << parent->config.depth * 8) - 1;
 824	return (tb_route(parent) & mask) == (tb_route(sw) & mask);
 825}
 826
 827/**
 828 * tb_next_port_on_path() - Return next port for given port on a path
 829 * @start: Start port of the walk
 830 * @end: End port of the walk
 831 * @prev: Previous port (%NULL if this is the first)
 832 *
 833 * This function can be used to walk from one port to another if they
 834 * are connected through zero or more switches. If the @prev is dual
 835 * link port, the function follows that link and returns another end on
 836 * that same link.
 837 *
 838 * If the @end port has been reached, return %NULL.
 839 *
 840 * Domain tb->lock must be held when this function is called.
 841 */
 842struct tb_port *tb_next_port_on_path(struct tb_port *start, struct tb_port *end,
 843				     struct tb_port *prev)
 844{
 845	struct tb_port *next;
 846
 847	if (!prev)
 848		return start;
 849
 850	if (prev->sw == end->sw) {
 851		if (prev == end)
 852			return NULL;
 853		return end;
 854	}
 855
 856	if (tb_switch_is_reachable(prev->sw, end->sw)) {
 857		next = tb_port_at(tb_route(end->sw), prev->sw);
 858		/* Walk down the topology if next == prev */
 859		if (prev->remote &&
 860		    (next == prev || next->dual_link_port == prev))
 861			next = prev->remote;
 862	} else {
 863		if (tb_is_upstream_port(prev)) {
 864			next = prev->remote;
 865		} else {
 866			next = tb_upstream_port(prev->sw);
 867			/*
 868			 * Keep the same link if prev and next are both
 869			 * dual link ports.
 870			 */
 871			if (next->dual_link_port &&
 872			    next->link_nr != prev->link_nr) {
 873				next = next->dual_link_port;
 874			}
 875		}
 876	}
 877
 878	return next != prev ? next : NULL;
 879}
 880
 881/**
 882 * tb_port_get_link_speed() - Get current link speed
 883 * @port: Port to check (USB4 or CIO)
 884 *
 885 * Returns link speed in Gb/s or negative errno in case of failure.
 886 */
 887int tb_port_get_link_speed(struct tb_port *port)
 888{
 889	u32 val, speed;
 890	int ret;
 891
 892	if (!port->cap_phy)
 893		return -EINVAL;
 894
 895	ret = tb_port_read(port, &val, TB_CFG_PORT,
 896			   port->cap_phy + LANE_ADP_CS_1, 1);
 897	if (ret)
 898		return ret;
 899
 900	speed = (val & LANE_ADP_CS_1_CURRENT_SPEED_MASK) >>
 901		LANE_ADP_CS_1_CURRENT_SPEED_SHIFT;
 902	return speed == LANE_ADP_CS_1_CURRENT_SPEED_GEN3 ? 20 : 10;
 903}
 904
 905/**
 906 * tb_port_get_link_width() - Get current link width
 907 * @port: Port to check (USB4 or CIO)
 908 *
 909 * Returns link width. Return values can be 1 (Single-Lane), 2 (Dual-Lane)
 910 * or negative errno in case of failure.
 911 */
 912int tb_port_get_link_width(struct tb_port *port)
 913{
 914	u32 val;
 915	int ret;
 916
 917	if (!port->cap_phy)
 918		return -EINVAL;
 919
 920	ret = tb_port_read(port, &val, TB_CFG_PORT,
 921			   port->cap_phy + LANE_ADP_CS_1, 1);
 922	if (ret)
 923		return ret;
 924
 925	return (val & LANE_ADP_CS_1_CURRENT_WIDTH_MASK) >>
 926		LANE_ADP_CS_1_CURRENT_WIDTH_SHIFT;
 927}
 928
 929static bool tb_port_is_width_supported(struct tb_port *port, int width)
 930{
 931	u32 phy, widths;
 932	int ret;
 933
 934	if (!port->cap_phy)
 935		return false;
 936
 937	ret = tb_port_read(port, &phy, TB_CFG_PORT,
 938			   port->cap_phy + LANE_ADP_CS_0, 1);
 939	if (ret)
 940		return false;
 941
 942	widths = (phy & LANE_ADP_CS_0_SUPPORTED_WIDTH_MASK) >>
 943		LANE_ADP_CS_0_SUPPORTED_WIDTH_SHIFT;
 944
 945	return !!(widths & width);
 946}
 947
 948/**
 949 * tb_port_set_link_width() - Set target link width of the lane adapter
 950 * @port: Lane adapter
 951 * @width: Target link width (%1 or %2)
 952 *
 953 * Sets the target link width of the lane adapter to @width. Does not
 954 * enable/disable lane bonding. For that call tb_port_set_lane_bonding().
 955 *
 956 * Return: %0 in case of success and negative errno in case of error
 957 */
 958int tb_port_set_link_width(struct tb_port *port, unsigned int width)
 959{
 960	u32 val;
 961	int ret;
 962
 963	if (!port->cap_phy)
 964		return -EINVAL;
 965
 966	ret = tb_port_read(port, &val, TB_CFG_PORT,
 967			   port->cap_phy + LANE_ADP_CS_1, 1);
 968	if (ret)
 969		return ret;
 970
 971	val &= ~LANE_ADP_CS_1_TARGET_WIDTH_MASK;
 972	switch (width) {
 973	case 1:
 974		val |= LANE_ADP_CS_1_TARGET_WIDTH_SINGLE <<
 975			LANE_ADP_CS_1_TARGET_WIDTH_SHIFT;
 976		break;
 977	case 2:
 978		val |= LANE_ADP_CS_1_TARGET_WIDTH_DUAL <<
 979			LANE_ADP_CS_1_TARGET_WIDTH_SHIFT;
 980		break;
 981	default:
 982		return -EINVAL;
 983	}
 984
 985	return tb_port_write(port, &val, TB_CFG_PORT,
 986			     port->cap_phy + LANE_ADP_CS_1, 1);
 987}
 988
 989/**
 990 * tb_port_set_lane_bonding() - Enable/disable lane bonding
 991 * @port: Lane adapter
 992 * @bonding: enable/disable bonding
 993 *
 994 * Enables or disables lane bonding. This should be called after target
 995 * link width has been set (tb_port_set_link_width()). Note in most
 996 * cases one should use tb_port_lane_bonding_enable() instead to enable
 997 * lane bonding.
 998 *
 999 * As a side effect sets @port->bonding accordingly (and does the same
1000 * for lane 1 too).
1001 *
1002 * Return: %0 in case of success and negative errno in case of error
1003 */
1004int tb_port_set_lane_bonding(struct tb_port *port, bool bonding)
1005{
1006	u32 val;
1007	int ret;
1008
1009	if (!port->cap_phy)
1010		return -EINVAL;
1011
1012	ret = tb_port_read(port, &val, TB_CFG_PORT,
1013			   port->cap_phy + LANE_ADP_CS_1, 1);
1014	if (ret)
1015		return ret;
1016
1017	if (bonding)
1018		val |= LANE_ADP_CS_1_LB;
1019	else
1020		val &= ~LANE_ADP_CS_1_LB;
1021
1022	ret = tb_port_write(port, &val, TB_CFG_PORT,
1023			    port->cap_phy + LANE_ADP_CS_1, 1);
1024	if (ret)
1025		return ret;
1026
1027	/*
1028	 * When lane 0 bonding is set it will affect lane 1 too so
1029	 * update both.
1030	 */
1031	port->bonded = bonding;
1032	port->dual_link_port->bonded = bonding;
1033
1034	return 0;
1035}
1036
1037/**
1038 * tb_port_lane_bonding_enable() - Enable bonding on port
1039 * @port: port to enable
1040 *
1041 * Enable bonding by setting the link width of the port and the other
1042 * port in case of dual link port. Does not wait for the link to
1043 * actually reach the bonded state so caller needs to call
1044 * tb_port_wait_for_link_width() before enabling any paths through the
1045 * link to make sure the link is in expected state.
1046 *
1047 * Return: %0 in case of success and negative errno in case of error
1048 */
1049int tb_port_lane_bonding_enable(struct tb_port *port)
1050{
1051	int ret;
1052
1053	/*
1054	 * Enable lane bonding for both links if not already enabled by
1055	 * for example the boot firmware.
1056	 */
1057	ret = tb_port_get_link_width(port);
1058	if (ret == 1) {
1059		ret = tb_port_set_link_width(port, 2);
1060		if (ret)
1061			goto err_lane0;
1062	}
1063
1064	ret = tb_port_get_link_width(port->dual_link_port);
1065	if (ret == 1) {
1066		ret = tb_port_set_link_width(port->dual_link_port, 2);
1067		if (ret)
1068			goto err_lane0;
1069	}
1070
1071	ret = tb_port_set_lane_bonding(port, true);
1072	if (ret)
1073		goto err_lane1;
1074
1075	return 0;
1076
1077err_lane1:
1078	tb_port_set_link_width(port->dual_link_port, 1);
1079err_lane0:
1080	tb_port_set_link_width(port, 1);
1081	return ret;
1082}
1083
1084/**
1085 * tb_port_lane_bonding_disable() - Disable bonding on port
1086 * @port: port to disable
1087 *
1088 * Disable bonding by setting the link width of the port and the
1089 * other port in case of dual link port.
1090 */
1091void tb_port_lane_bonding_disable(struct tb_port *port)
1092{
1093	tb_port_set_lane_bonding(port, false);
1094	tb_port_set_link_width(port->dual_link_port, 1);
1095	tb_port_set_link_width(port, 1);
1096}
1097
1098/**
1099 * tb_port_wait_for_link_width() - Wait until link reaches specific width
1100 * @port: Port to wait for
1101 * @width: Expected link width (%1 or %2)
1102 * @timeout_msec: Timeout in ms how long to wait
1103 *
1104 * Should be used after both ends of the link have been bonded (or
1105 * bonding has been disabled) to wait until the link actually reaches
1106 * the expected state. Returns %-ETIMEDOUT if the @width was not reached
1107 * within the given timeout, %0 if it did.
1108 */
1109int tb_port_wait_for_link_width(struct tb_port *port, int width,
1110				int timeout_msec)
1111{
1112	ktime_t timeout = ktime_add_ms(ktime_get(), timeout_msec);
1113	int ret;
1114
1115	do {
1116		ret = tb_port_get_link_width(port);
1117		if (ret < 0) {
1118			/*
1119			 * Sometimes we get port locked error when
1120			 * polling the lanes so we can ignore it and
1121			 * retry.
1122			 */
1123			if (ret != -EACCES)
1124				return ret;
1125		} else if (ret == width) {
1126			return 0;
1127		}
1128
1129		usleep_range(1000, 2000);
1130	} while (ktime_before(ktime_get(), timeout));
1131
1132	return -ETIMEDOUT;
1133}
1134
1135static int tb_port_do_update_credits(struct tb_port *port)
1136{
1137	u32 nfc_credits;
1138	int ret;
1139
1140	ret = tb_port_read(port, &nfc_credits, TB_CFG_PORT, ADP_CS_4, 1);
1141	if (ret)
1142		return ret;
1143
1144	if (nfc_credits != port->config.nfc_credits) {
1145		u32 total;
1146
1147		total = (nfc_credits & ADP_CS_4_TOTAL_BUFFERS_MASK) >>
1148			ADP_CS_4_TOTAL_BUFFERS_SHIFT;
1149
1150		tb_port_dbg(port, "total credits changed %u -> %u\n",
1151			    port->total_credits, total);
1152
1153		port->config.nfc_credits = nfc_credits;
1154		port->total_credits = total;
1155	}
1156
1157	return 0;
1158}
1159
1160/**
1161 * tb_port_update_credits() - Re-read port total credits
1162 * @port: Port to update
1163 *
1164 * After the link is bonded (or bonding was disabled) the port total
1165 * credits may change, so this function needs to be called to re-read
1166 * the credits. Updates also the second lane adapter.
1167 */
1168int tb_port_update_credits(struct tb_port *port)
1169{
1170	int ret;
1171
1172	ret = tb_port_do_update_credits(port);
1173	if (ret)
1174		return ret;
1175	return tb_port_do_update_credits(port->dual_link_port);
1176}
1177
1178static int __tb_port_pm_secondary_set(struct tb_port *port, bool secondary)
1179{
1180	u32 phy;
1181	int ret;
1182
1183	ret = tb_port_read(port, &phy, TB_CFG_PORT,
1184			   port->cap_phy + LANE_ADP_CS_1, 1);
1185	if (ret)
1186		return ret;
1187
1188	if (secondary)
1189		phy |= LANE_ADP_CS_1_PMS;
1190	else
1191		phy &= ~LANE_ADP_CS_1_PMS;
1192
1193	return tb_port_write(port, &phy, TB_CFG_PORT,
1194			     port->cap_phy + LANE_ADP_CS_1, 1);
1195}
1196
1197static int tb_port_pm_secondary_enable(struct tb_port *port)
1198{
1199	return __tb_port_pm_secondary_set(port, true);
1200}
1201
1202static int tb_port_pm_secondary_disable(struct tb_port *port)
1203{
1204	return __tb_port_pm_secondary_set(port, false);
1205}
1206
1207/* Called for USB4 or Titan Ridge routers only */
1208static bool tb_port_clx_supported(struct tb_port *port, unsigned int clx_mask)
1209{
1210	u32 val, mask = 0;
1211	bool ret;
1212
1213	/* Don't enable CLx in case of two single-lane links */
1214	if (!port->bonded && port->dual_link_port)
1215		return false;
1216
1217	/* Don't enable CLx in case of inter-domain link */
1218	if (port->xdomain)
1219		return false;
1220
1221	if (tb_switch_is_usb4(port->sw)) {
1222		if (!usb4_port_clx_supported(port))
1223			return false;
1224	} else if (!tb_lc_is_clx_supported(port)) {
1225		return false;
1226	}
1227
1228	if (clx_mask & TB_CL1) {
1229		/* CL0s and CL1 are enabled and supported together */
1230		mask |= LANE_ADP_CS_0_CL0S_SUPPORT | LANE_ADP_CS_0_CL1_SUPPORT;
1231	}
1232	if (clx_mask & TB_CL2)
1233		mask |= LANE_ADP_CS_0_CL2_SUPPORT;
1234
1235	ret = tb_port_read(port, &val, TB_CFG_PORT,
1236			   port->cap_phy + LANE_ADP_CS_0, 1);
1237	if (ret)
1238		return false;
1239
1240	return !!(val & mask);
1241}
1242
1243static int __tb_port_clx_set(struct tb_port *port, enum tb_clx clx, bool enable)
1244{
1245	u32 phy, mask;
1246	int ret;
1247
1248	/* CL0s and CL1 are enabled and supported together */
1249	if (clx == TB_CL1)
1250		mask = LANE_ADP_CS_1_CL0S_ENABLE | LANE_ADP_CS_1_CL1_ENABLE;
1251	else
1252		/* For now we support only CL0s and CL1. Not CL2 */
1253		return -EOPNOTSUPP;
1254
1255	ret = tb_port_read(port, &phy, TB_CFG_PORT,
1256			   port->cap_phy + LANE_ADP_CS_1, 1);
1257	if (ret)
1258		return ret;
1259
1260	if (enable)
1261		phy |= mask;
1262	else
1263		phy &= ~mask;
1264
1265	return tb_port_write(port, &phy, TB_CFG_PORT,
1266			     port->cap_phy + LANE_ADP_CS_1, 1);
1267}
1268
1269static int tb_port_clx_disable(struct tb_port *port, enum tb_clx clx)
1270{
1271	return __tb_port_clx_set(port, clx, false);
1272}
1273
1274static int tb_port_clx_enable(struct tb_port *port, enum tb_clx clx)
1275{
1276	return __tb_port_clx_set(port, clx, true);
1277}
1278
1279/**
1280 * tb_port_is_clx_enabled() - Is given CL state enabled
1281 * @port: USB4 port to check
1282 * @clx_mask: Mask of CL states to check
1283 *
1284 * Returns true if any of the given CL states is enabled for @port.
1285 */
1286bool tb_port_is_clx_enabled(struct tb_port *port, unsigned int clx_mask)
1287{
1288	u32 val, mask = 0;
1289	int ret;
1290
1291	if (!tb_port_clx_supported(port, clx_mask))
1292		return false;
1293
1294	if (clx_mask & TB_CL1)
1295		mask |= LANE_ADP_CS_1_CL0S_ENABLE | LANE_ADP_CS_1_CL1_ENABLE;
1296	if (clx_mask & TB_CL2)
1297		mask |= LANE_ADP_CS_1_CL2_ENABLE;
1298
1299	ret = tb_port_read(port, &val, TB_CFG_PORT,
1300			   port->cap_phy + LANE_ADP_CS_1, 1);
1301	if (ret)
1302		return false;
1303
1304	return !!(val & mask);
1305}
1306
1307static int tb_port_start_lane_initialization(struct tb_port *port)
1308{
1309	int ret;
1310
1311	if (tb_switch_is_usb4(port->sw))
1312		return 0;
1313
1314	ret = tb_lc_start_lane_initialization(port);
1315	return ret == -EINVAL ? 0 : ret;
1316}
1317
1318/*
1319 * Returns true if the port had something (router, XDomain) connected
1320 * before suspend.
1321 */
1322static bool tb_port_resume(struct tb_port *port)
1323{
1324	bool has_remote = tb_port_has_remote(port);
1325
1326	if (port->usb4) {
1327		usb4_port_device_resume(port->usb4);
1328	} else if (!has_remote) {
1329		/*
1330		 * For disconnected downstream lane adapters start lane
1331		 * initialization now so we detect future connects.
1332		 *
1333		 * For XDomain start the lane initialzation now so the
1334		 * link gets re-established.
1335		 *
1336		 * This is only needed for non-USB4 ports.
1337		 */
1338		if (!tb_is_upstream_port(port) || port->xdomain)
1339			tb_port_start_lane_initialization(port);
1340	}
1341
1342	return has_remote || port->xdomain;
1343}
1344
1345/**
1346 * tb_port_is_enabled() - Is the adapter port enabled
1347 * @port: Port to check
1348 */
1349bool tb_port_is_enabled(struct tb_port *port)
1350{
1351	switch (port->config.type) {
1352	case TB_TYPE_PCIE_UP:
1353	case TB_TYPE_PCIE_DOWN:
1354		return tb_pci_port_is_enabled(port);
1355
1356	case TB_TYPE_DP_HDMI_IN:
1357	case TB_TYPE_DP_HDMI_OUT:
1358		return tb_dp_port_is_enabled(port);
1359
1360	case TB_TYPE_USB3_UP:
1361	case TB_TYPE_USB3_DOWN:
1362		return tb_usb3_port_is_enabled(port);
1363
1364	default:
1365		return false;
1366	}
1367}
1368
1369/**
1370 * tb_usb3_port_is_enabled() - Is the USB3 adapter port enabled
1371 * @port: USB3 adapter port to check
1372 */
1373bool tb_usb3_port_is_enabled(struct tb_port *port)
1374{
1375	u32 data;
1376
1377	if (tb_port_read(port, &data, TB_CFG_PORT,
1378			 port->cap_adap + ADP_USB3_CS_0, 1))
1379		return false;
1380
1381	return !!(data & ADP_USB3_CS_0_PE);
1382}
1383
1384/**
1385 * tb_usb3_port_enable() - Enable USB3 adapter port
1386 * @port: USB3 adapter port to enable
1387 * @enable: Enable/disable the USB3 adapter
1388 */
1389int tb_usb3_port_enable(struct tb_port *port, bool enable)
1390{
1391	u32 word = enable ? (ADP_USB3_CS_0_PE | ADP_USB3_CS_0_V)
1392			  : ADP_USB3_CS_0_V;
1393
1394	if (!port->cap_adap)
1395		return -ENXIO;
1396	return tb_port_write(port, &word, TB_CFG_PORT,
1397			     port->cap_adap + ADP_USB3_CS_0, 1);
1398}
1399
1400/**
1401 * tb_pci_port_is_enabled() - Is the PCIe adapter port enabled
1402 * @port: PCIe port to check
1403 */
1404bool tb_pci_port_is_enabled(struct tb_port *port)
1405{
1406	u32 data;
1407
1408	if (tb_port_read(port, &data, TB_CFG_PORT,
1409			 port->cap_adap + ADP_PCIE_CS_0, 1))
1410		return false;
1411
1412	return !!(data & ADP_PCIE_CS_0_PE);
1413}
1414
1415/**
1416 * tb_pci_port_enable() - Enable PCIe adapter port
1417 * @port: PCIe port to enable
1418 * @enable: Enable/disable the PCIe adapter
1419 */
1420int tb_pci_port_enable(struct tb_port *port, bool enable)
1421{
1422	u32 word = enable ? ADP_PCIE_CS_0_PE : 0x0;
1423	if (!port->cap_adap)
1424		return -ENXIO;
1425	return tb_port_write(port, &word, TB_CFG_PORT,
1426			     port->cap_adap + ADP_PCIE_CS_0, 1);
1427}
1428
1429/**
1430 * tb_dp_port_hpd_is_active() - Is HPD already active
1431 * @port: DP out port to check
1432 *
1433 * Checks if the DP OUT adapter port has HDP bit already set.
1434 */
1435int tb_dp_port_hpd_is_active(struct tb_port *port)
1436{
1437	u32 data;
1438	int ret;
1439
1440	ret = tb_port_read(port, &data, TB_CFG_PORT,
1441			   port->cap_adap + ADP_DP_CS_2, 1);
1442	if (ret)
1443		return ret;
1444
1445	return !!(data & ADP_DP_CS_2_HDP);
1446}
1447
1448/**
1449 * tb_dp_port_hpd_clear() - Clear HPD from DP IN port
1450 * @port: Port to clear HPD
1451 *
1452 * If the DP IN port has HDP set, this function can be used to clear it.
1453 */
1454int tb_dp_port_hpd_clear(struct tb_port *port)
1455{
1456	u32 data;
1457	int ret;
1458
1459	ret = tb_port_read(port, &data, TB_CFG_PORT,
1460			   port->cap_adap + ADP_DP_CS_3, 1);
1461	if (ret)
1462		return ret;
1463
1464	data |= ADP_DP_CS_3_HDPC;
1465	return tb_port_write(port, &data, TB_CFG_PORT,
1466			     port->cap_adap + ADP_DP_CS_3, 1);
1467}
1468
1469/**
1470 * tb_dp_port_set_hops() - Set video/aux Hop IDs for DP port
1471 * @port: DP IN/OUT port to set hops
1472 * @video: Video Hop ID
1473 * @aux_tx: AUX TX Hop ID
1474 * @aux_rx: AUX RX Hop ID
1475 *
1476 * Programs specified Hop IDs for DP IN/OUT port. Can be called for USB4
1477 * router DP adapters too but does not program the values as the fields
1478 * are read-only.
1479 */
1480int tb_dp_port_set_hops(struct tb_port *port, unsigned int video,
1481			unsigned int aux_tx, unsigned int aux_rx)
1482{
1483	u32 data[2];
1484	int ret;
1485
1486	if (tb_switch_is_usb4(port->sw))
1487		return 0;
1488
1489	ret = tb_port_read(port, data, TB_CFG_PORT,
1490			   port->cap_adap + ADP_DP_CS_0, ARRAY_SIZE(data));
1491	if (ret)
1492		return ret;
1493
1494	data[0] &= ~ADP_DP_CS_0_VIDEO_HOPID_MASK;
1495	data[1] &= ~ADP_DP_CS_1_AUX_RX_HOPID_MASK;
1496	data[1] &= ~ADP_DP_CS_1_AUX_RX_HOPID_MASK;
1497
1498	data[0] |= (video << ADP_DP_CS_0_VIDEO_HOPID_SHIFT) &
1499		ADP_DP_CS_0_VIDEO_HOPID_MASK;
1500	data[1] |= aux_tx & ADP_DP_CS_1_AUX_TX_HOPID_MASK;
1501	data[1] |= (aux_rx << ADP_DP_CS_1_AUX_RX_HOPID_SHIFT) &
1502		ADP_DP_CS_1_AUX_RX_HOPID_MASK;
1503
1504	return tb_port_write(port, data, TB_CFG_PORT,
1505			     port->cap_adap + ADP_DP_CS_0, ARRAY_SIZE(data));
1506}
1507
1508/**
1509 * tb_dp_port_is_enabled() - Is DP adapter port enabled
1510 * @port: DP adapter port to check
1511 */
1512bool tb_dp_port_is_enabled(struct tb_port *port)
1513{
1514	u32 data[2];
1515
1516	if (tb_port_read(port, data, TB_CFG_PORT, port->cap_adap + ADP_DP_CS_0,
1517			 ARRAY_SIZE(data)))
1518		return false;
1519
1520	return !!(data[0] & (ADP_DP_CS_0_VE | ADP_DP_CS_0_AE));
1521}
1522
1523/**
1524 * tb_dp_port_enable() - Enables/disables DP paths of a port
1525 * @port: DP IN/OUT port
1526 * @enable: Enable/disable DP path
1527 *
1528 * Once Hop IDs are programmed DP paths can be enabled or disabled by
1529 * calling this function.
1530 */
1531int tb_dp_port_enable(struct tb_port *port, bool enable)
1532{
1533	u32 data[2];
1534	int ret;
1535
1536	ret = tb_port_read(port, data, TB_CFG_PORT,
1537			  port->cap_adap + ADP_DP_CS_0, ARRAY_SIZE(data));
1538	if (ret)
1539		return ret;
1540
1541	if (enable)
1542		data[0] |= ADP_DP_CS_0_VE | ADP_DP_CS_0_AE;
1543	else
1544		data[0] &= ~(ADP_DP_CS_0_VE | ADP_DP_CS_0_AE);
1545
1546	return tb_port_write(port, data, TB_CFG_PORT,
1547			     port->cap_adap + ADP_DP_CS_0, ARRAY_SIZE(data));
1548}
1549
1550/* switch utility functions */
1551
1552static const char *tb_switch_generation_name(const struct tb_switch *sw)
1553{
1554	switch (sw->generation) {
1555	case 1:
1556		return "Thunderbolt 1";
1557	case 2:
1558		return "Thunderbolt 2";
1559	case 3:
1560		return "Thunderbolt 3";
1561	case 4:
1562		return "USB4";
1563	default:
1564		return "Unknown";
1565	}
1566}
1567
1568static void tb_dump_switch(const struct tb *tb, const struct tb_switch *sw)
1569{
1570	const struct tb_regs_switch_header *regs = &sw->config;
1571
1572	tb_dbg(tb, " %s Switch: %x:%x (Revision: %d, TB Version: %d)\n",
1573	       tb_switch_generation_name(sw), regs->vendor_id, regs->device_id,
1574	       regs->revision, regs->thunderbolt_version);
1575	tb_dbg(tb, "  Max Port Number: %d\n", regs->max_port_number);
1576	tb_dbg(tb, "  Config:\n");
1577	tb_dbg(tb,
1578		"   Upstream Port Number: %d Depth: %d Route String: %#llx Enabled: %d, PlugEventsDelay: %dms\n",
1579	       regs->upstream_port_number, regs->depth,
1580	       (((u64) regs->route_hi) << 32) | regs->route_lo,
1581	       regs->enabled, regs->plug_events_delay);
1582	tb_dbg(tb, "   unknown1: %#x unknown4: %#x\n",
1583	       regs->__unknown1, regs->__unknown4);
1584}
1585
1586/**
1587 * tb_switch_reset() - reconfigure route, enable and send TB_CFG_PKG_RESET
1588 * @sw: Switch to reset
1589 *
1590 * Return: Returns 0 on success or an error code on failure.
1591 */
1592int tb_switch_reset(struct tb_switch *sw)
1593{
1594	struct tb_cfg_result res;
1595
1596	if (sw->generation > 1)
1597		return 0;
1598
1599	tb_sw_dbg(sw, "resetting switch\n");
1600
1601	res.err = tb_sw_write(sw, ((u32 *) &sw->config) + 2,
1602			      TB_CFG_SWITCH, 2, 2);
1603	if (res.err)
1604		return res.err;
1605	res = tb_cfg_reset(sw->tb->ctl, tb_route(sw));
1606	if (res.err > 0)
1607		return -EIO;
1608	return res.err;
1609}
1610
1611/**
1612 * tb_switch_wait_for_bit() - Wait for specified value of bits in offset
1613 * @sw: Router to read the offset value from
1614 * @offset: Offset in the router config space to read from
1615 * @bit: Bit mask in the offset to wait for
1616 * @value: Value of the bits to wait for
1617 * @timeout_msec: Timeout in ms how long to wait
1618 *
1619 * Wait till the specified bits in specified offset reach specified value.
1620 * Returns %0 in case of success, %-ETIMEDOUT if the @value was not reached
1621 * within the given timeout or a negative errno in case of failure.
1622 */
1623int tb_switch_wait_for_bit(struct tb_switch *sw, u32 offset, u32 bit,
1624			   u32 value, int timeout_msec)
1625{
1626	ktime_t timeout = ktime_add_ms(ktime_get(), timeout_msec);
1627
1628	do {
1629		u32 val;
1630		int ret;
1631
1632		ret = tb_sw_read(sw, &val, TB_CFG_SWITCH, offset, 1);
1633		if (ret)
1634			return ret;
1635
1636		if ((val & bit) == value)
1637			return 0;
1638
1639		usleep_range(50, 100);
1640	} while (ktime_before(ktime_get(), timeout));
1641
1642	return -ETIMEDOUT;
1643}
1644
1645/*
1646 * tb_plug_events_active() - enable/disable plug events on a switch
1647 *
1648 * Also configures a sane plug_events_delay of 255ms.
1649 *
1650 * Return: Returns 0 on success or an error code on failure.
1651 */
1652static int tb_plug_events_active(struct tb_switch *sw, bool active)
1653{
1654	u32 data;
1655	int res;
1656
1657	if (tb_switch_is_icm(sw) || tb_switch_is_usb4(sw))
1658		return 0;
1659
1660	sw->config.plug_events_delay = 0xff;
1661	res = tb_sw_write(sw, ((u32 *) &sw->config) + 4, TB_CFG_SWITCH, 4, 1);
1662	if (res)
1663		return res;
1664
1665	res = tb_sw_read(sw, &data, TB_CFG_SWITCH, sw->cap_plug_events + 1, 1);
1666	if (res)
1667		return res;
1668
1669	if (active) {
1670		data = data & 0xFFFFFF83;
1671		switch (sw->config.device_id) {
1672		case PCI_DEVICE_ID_INTEL_LIGHT_RIDGE:
1673		case PCI_DEVICE_ID_INTEL_EAGLE_RIDGE:
1674		case PCI_DEVICE_ID_INTEL_PORT_RIDGE:
1675			break;
1676		default:
1677			/*
1678			 * Skip Alpine Ridge, it needs to have vendor
1679			 * specific USB hotplug event enabled for the
1680			 * internal xHCI to work.
1681			 */
1682			if (!tb_switch_is_alpine_ridge(sw))
1683				data |= TB_PLUG_EVENTS_USB_DISABLE;
1684		}
1685	} else {
1686		data = data | 0x7c;
1687	}
1688	return tb_sw_write(sw, &data, TB_CFG_SWITCH,
1689			   sw->cap_plug_events + 1, 1);
1690}
1691
1692static ssize_t authorized_show(struct device *dev,
1693			       struct device_attribute *attr,
1694			       char *buf)
1695{
1696	struct tb_switch *sw = tb_to_switch(dev);
1697
1698	return sysfs_emit(buf, "%u\n", sw->authorized);
1699}
1700
1701static int disapprove_switch(struct device *dev, void *not_used)
1702{
1703	char *envp[] = { "AUTHORIZED=0", NULL };
1704	struct tb_switch *sw;
1705
1706	sw = tb_to_switch(dev);
1707	if (sw && sw->authorized) {
1708		int ret;
1709
1710		/* First children */
1711		ret = device_for_each_child_reverse(&sw->dev, NULL, disapprove_switch);
1712		if (ret)
1713			return ret;
1714
1715		ret = tb_domain_disapprove_switch(sw->tb, sw);
1716		if (ret)
1717			return ret;
1718
1719		sw->authorized = 0;
1720		kobject_uevent_env(&sw->dev.kobj, KOBJ_CHANGE, envp);
1721	}
1722
1723	return 0;
1724}
1725
1726static int tb_switch_set_authorized(struct tb_switch *sw, unsigned int val)
1727{
1728	char envp_string[13];
1729	int ret = -EINVAL;
1730	char *envp[] = { envp_string, NULL };
1731
1732	if (!mutex_trylock(&sw->tb->lock))
1733		return restart_syscall();
1734
1735	if (!!sw->authorized == !!val)
1736		goto unlock;
1737
1738	switch (val) {
1739	/* Disapprove switch */
1740	case 0:
1741		if (tb_route(sw)) {
1742			ret = disapprove_switch(&sw->dev, NULL);
1743			goto unlock;
1744		}
1745		break;
1746
1747	/* Approve switch */
1748	case 1:
1749		if (sw->key)
1750			ret = tb_domain_approve_switch_key(sw->tb, sw);
1751		else
1752			ret = tb_domain_approve_switch(sw->tb, sw);
1753		break;
1754
1755	/* Challenge switch */
1756	case 2:
1757		if (sw->key)
1758			ret = tb_domain_challenge_switch_key(sw->tb, sw);
1759		break;
1760
1761	default:
1762		break;
1763	}
1764
1765	if (!ret) {
1766		sw->authorized = val;
1767		/*
1768		 * Notify status change to the userspace, informing the new
1769		 * value of /sys/bus/thunderbolt/devices/.../authorized.
1770		 */
1771		sprintf(envp_string, "AUTHORIZED=%u", sw->authorized);
1772		kobject_uevent_env(&sw->dev.kobj, KOBJ_CHANGE, envp);
1773	}
1774
1775unlock:
1776	mutex_unlock(&sw->tb->lock);
1777	return ret;
1778}
1779
1780static ssize_t authorized_store(struct device *dev,
1781				struct device_attribute *attr,
1782				const char *buf, size_t count)
1783{
1784	struct tb_switch *sw = tb_to_switch(dev);
1785	unsigned int val;
1786	ssize_t ret;
1787
1788	ret = kstrtouint(buf, 0, &val);
1789	if (ret)
1790		return ret;
1791	if (val > 2)
1792		return -EINVAL;
1793
1794	pm_runtime_get_sync(&sw->dev);
1795	ret = tb_switch_set_authorized(sw, val);
1796	pm_runtime_mark_last_busy(&sw->dev);
1797	pm_runtime_put_autosuspend(&sw->dev);
1798
1799	return ret ? ret : count;
1800}
1801static DEVICE_ATTR_RW(authorized);
1802
1803static ssize_t boot_show(struct device *dev, struct device_attribute *attr,
1804			 char *buf)
1805{
1806	struct tb_switch *sw = tb_to_switch(dev);
1807
1808	return sysfs_emit(buf, "%u\n", sw->boot);
1809}
1810static DEVICE_ATTR_RO(boot);
1811
1812static ssize_t device_show(struct device *dev, struct device_attribute *attr,
1813			   char *buf)
1814{
1815	struct tb_switch *sw = tb_to_switch(dev);
1816
1817	return sysfs_emit(buf, "%#x\n", sw->device);
1818}
1819static DEVICE_ATTR_RO(device);
1820
1821static ssize_t
1822device_name_show(struct device *dev, struct device_attribute *attr, char *buf)
1823{
1824	struct tb_switch *sw = tb_to_switch(dev);
1825
1826	return sysfs_emit(buf, "%s\n", sw->device_name ?: "");
1827}
1828static DEVICE_ATTR_RO(device_name);
1829
1830static ssize_t
1831generation_show(struct device *dev, struct device_attribute *attr, char *buf)
1832{
1833	struct tb_switch *sw = tb_to_switch(dev);
1834
1835	return sysfs_emit(buf, "%u\n", sw->generation);
1836}
1837static DEVICE_ATTR_RO(generation);
1838
1839static ssize_t key_show(struct device *dev, struct device_attribute *attr,
1840			char *buf)
1841{
1842	struct tb_switch *sw = tb_to_switch(dev);
1843	ssize_t ret;
1844
1845	if (!mutex_trylock(&sw->tb->lock))
1846		return restart_syscall();
1847
1848	if (sw->key)
1849		ret = sysfs_emit(buf, "%*phN\n", TB_SWITCH_KEY_SIZE, sw->key);
1850	else
1851		ret = sysfs_emit(buf, "\n");
1852
1853	mutex_unlock(&sw->tb->lock);
1854	return ret;
1855}
1856
1857static ssize_t key_store(struct device *dev, struct device_attribute *attr,
1858			 const char *buf, size_t count)
1859{
1860	struct tb_switch *sw = tb_to_switch(dev);
1861	u8 key[TB_SWITCH_KEY_SIZE];
1862	ssize_t ret = count;
1863	bool clear = false;
1864
1865	if (!strcmp(buf, "\n"))
1866		clear = true;
1867	else if (hex2bin(key, buf, sizeof(key)))
1868		return -EINVAL;
1869
1870	if (!mutex_trylock(&sw->tb->lock))
1871		return restart_syscall();
1872
1873	if (sw->authorized) {
1874		ret = -EBUSY;
1875	} else {
1876		kfree(sw->key);
1877		if (clear) {
1878			sw->key = NULL;
1879		} else {
1880			sw->key = kmemdup(key, sizeof(key), GFP_KERNEL);
1881			if (!sw->key)
1882				ret = -ENOMEM;
1883		}
1884	}
1885
1886	mutex_unlock(&sw->tb->lock);
1887	return ret;
1888}
1889static DEVICE_ATTR(key, 0600, key_show, key_store);
1890
1891static ssize_t speed_show(struct device *dev, struct device_attribute *attr,
1892			  char *buf)
1893{
1894	struct tb_switch *sw = tb_to_switch(dev);
1895
1896	return sysfs_emit(buf, "%u.0 Gb/s\n", sw->link_speed);
1897}
1898
1899/*
1900 * Currently all lanes must run at the same speed but we expose here
1901 * both directions to allow possible asymmetric links in the future.
1902 */
1903static DEVICE_ATTR(rx_speed, 0444, speed_show, NULL);
1904static DEVICE_ATTR(tx_speed, 0444, speed_show, NULL);
1905
1906static ssize_t lanes_show(struct device *dev, struct device_attribute *attr,
1907			  char *buf)
1908{
1909	struct tb_switch *sw = tb_to_switch(dev);
1910
1911	return sysfs_emit(buf, "%u\n", sw->link_width);
1912}
1913
1914/*
1915 * Currently link has same amount of lanes both directions (1 or 2) but
1916 * expose them separately to allow possible asymmetric links in the future.
1917 */
1918static DEVICE_ATTR(rx_lanes, 0444, lanes_show, NULL);
1919static DEVICE_ATTR(tx_lanes, 0444, lanes_show, NULL);
1920
1921static ssize_t nvm_authenticate_show(struct device *dev,
1922	struct device_attribute *attr, char *buf)
1923{
1924	struct tb_switch *sw = tb_to_switch(dev);
1925	u32 status;
1926
1927	nvm_get_auth_status(sw, &status);
1928	return sysfs_emit(buf, "%#x\n", status);
1929}
1930
1931static ssize_t nvm_authenticate_sysfs(struct device *dev, const char *buf,
1932				      bool disconnect)
1933{
1934	struct tb_switch *sw = tb_to_switch(dev);
1935	int val, ret;
1936
1937	pm_runtime_get_sync(&sw->dev);
1938
1939	if (!mutex_trylock(&sw->tb->lock)) {
1940		ret = restart_syscall();
1941		goto exit_rpm;
1942	}
1943
1944	if (sw->no_nvm_upgrade) {
1945		ret = -EOPNOTSUPP;
1946		goto exit_unlock;
1947	}
1948
1949	/* If NVMem devices are not yet added */
1950	if (!sw->nvm) {
1951		ret = -EAGAIN;
1952		goto exit_unlock;
1953	}
1954
1955	ret = kstrtoint(buf, 10, &val);
1956	if (ret)
1957		goto exit_unlock;
1958
1959	/* Always clear the authentication status */
1960	nvm_clear_auth_status(sw);
1961
1962	if (val > 0) {
1963		if (val == AUTHENTICATE_ONLY) {
1964			if (disconnect)
1965				ret = -EINVAL;
1966			else
1967				ret = nvm_authenticate(sw, true);
1968		} else {
1969			if (!sw->nvm->flushed) {
1970				if (!sw->nvm->buf) {
1971					ret = -EINVAL;
1972					goto exit_unlock;
1973				}
1974
1975				ret = nvm_validate_and_write(sw);
1976				if (ret || val == WRITE_ONLY)
1977					goto exit_unlock;
1978			}
1979			if (val == WRITE_AND_AUTHENTICATE) {
1980				if (disconnect)
1981					ret = tb_lc_force_power(sw);
1982				else
1983					ret = nvm_authenticate(sw, false);
1984			}
1985		}
1986	}
1987
1988exit_unlock:
1989	mutex_unlock(&sw->tb->lock);
1990exit_rpm:
1991	pm_runtime_mark_last_busy(&sw->dev);
1992	pm_runtime_put_autosuspend(&sw->dev);
1993
1994	return ret;
1995}
1996
1997static ssize_t nvm_authenticate_store(struct device *dev,
1998	struct device_attribute *attr, const char *buf, size_t count)
1999{
2000	int ret = nvm_authenticate_sysfs(dev, buf, false);
2001	if (ret)
2002		return ret;
2003	return count;
2004}
2005static DEVICE_ATTR_RW(nvm_authenticate);
2006
2007static ssize_t nvm_authenticate_on_disconnect_show(struct device *dev,
2008	struct device_attribute *attr, char *buf)
2009{
2010	return nvm_authenticate_show(dev, attr, buf);
2011}
2012
2013static ssize_t nvm_authenticate_on_disconnect_store(struct device *dev,
2014	struct device_attribute *attr, const char *buf, size_t count)
2015{
2016	int ret;
2017
2018	ret = nvm_authenticate_sysfs(dev, buf, true);
2019	return ret ? ret : count;
2020}
2021static DEVICE_ATTR_RW(nvm_authenticate_on_disconnect);
2022
2023static ssize_t nvm_version_show(struct device *dev,
2024				struct device_attribute *attr, char *buf)
2025{
2026	struct tb_switch *sw = tb_to_switch(dev);
2027	int ret;
2028
2029	if (!mutex_trylock(&sw->tb->lock))
2030		return restart_syscall();
2031
2032	if (sw->safe_mode)
2033		ret = -ENODATA;
2034	else if (!sw->nvm)
2035		ret = -EAGAIN;
2036	else
2037		ret = sysfs_emit(buf, "%x.%x\n", sw->nvm->major, sw->nvm->minor);
2038
2039	mutex_unlock(&sw->tb->lock);
2040
2041	return ret;
2042}
2043static DEVICE_ATTR_RO(nvm_version);
2044
2045static ssize_t vendor_show(struct device *dev, struct device_attribute *attr,
2046			   char *buf)
2047{
2048	struct tb_switch *sw = tb_to_switch(dev);
2049
2050	return sysfs_emit(buf, "%#x\n", sw->vendor);
2051}
2052static DEVICE_ATTR_RO(vendor);
2053
2054static ssize_t
2055vendor_name_show(struct device *dev, struct device_attribute *attr, char *buf)
2056{
2057	struct tb_switch *sw = tb_to_switch(dev);
2058
2059	return sysfs_emit(buf, "%s\n", sw->vendor_name ?: "");
2060}
2061static DEVICE_ATTR_RO(vendor_name);
2062
2063static ssize_t unique_id_show(struct device *dev, struct device_attribute *attr,
2064			      char *buf)
2065{
2066	struct tb_switch *sw = tb_to_switch(dev);
2067
2068	return sysfs_emit(buf, "%pUb\n", sw->uuid);
2069}
2070static DEVICE_ATTR_RO(unique_id);
2071
2072static struct attribute *switch_attrs[] = {
2073	&dev_attr_authorized.attr,
2074	&dev_attr_boot.attr,
2075	&dev_attr_device.attr,
2076	&dev_attr_device_name.attr,
2077	&dev_attr_generation.attr,
2078	&dev_attr_key.attr,
2079	&dev_attr_nvm_authenticate.attr,
2080	&dev_attr_nvm_authenticate_on_disconnect.attr,
2081	&dev_attr_nvm_version.attr,
2082	&dev_attr_rx_speed.attr,
2083	&dev_attr_rx_lanes.attr,
2084	&dev_attr_tx_speed.attr,
2085	&dev_attr_tx_lanes.attr,
2086	&dev_attr_vendor.attr,
2087	&dev_attr_vendor_name.attr,
2088	&dev_attr_unique_id.attr,
2089	NULL,
2090};
2091
2092static umode_t switch_attr_is_visible(struct kobject *kobj,
2093				      struct attribute *attr, int n)
2094{
2095	struct device *dev = kobj_to_dev(kobj);
2096	struct tb_switch *sw = tb_to_switch(dev);
2097
2098	if (attr == &dev_attr_authorized.attr) {
2099		if (sw->tb->security_level == TB_SECURITY_NOPCIE ||
2100		    sw->tb->security_level == TB_SECURITY_DPONLY)
2101			return 0;
2102	} else if (attr == &dev_attr_device.attr) {
2103		if (!sw->device)
2104			return 0;
2105	} else if (attr == &dev_attr_device_name.attr) {
2106		if (!sw->device_name)
2107			return 0;
2108	} else if (attr == &dev_attr_vendor.attr)  {
2109		if (!sw->vendor)
2110			return 0;
2111	} else if (attr == &dev_attr_vendor_name.attr)  {
2112		if (!sw->vendor_name)
2113			return 0;
2114	} else if (attr == &dev_attr_key.attr) {
2115		if (tb_route(sw) &&
2116		    sw->tb->security_level == TB_SECURITY_SECURE &&
2117		    sw->security_level == TB_SECURITY_SECURE)
2118			return attr->mode;
2119		return 0;
2120	} else if (attr == &dev_attr_rx_speed.attr ||
2121		   attr == &dev_attr_rx_lanes.attr ||
2122		   attr == &dev_attr_tx_speed.attr ||
2123		   attr == &dev_attr_tx_lanes.attr) {
2124		if (tb_route(sw))
2125			return attr->mode;
2126		return 0;
2127	} else if (attr == &dev_attr_nvm_authenticate.attr) {
2128		if (nvm_upgradeable(sw))
2129			return attr->mode;
2130		return 0;
2131	} else if (attr == &dev_attr_nvm_version.attr) {
2132		if (nvm_readable(sw))
2133			return attr->mode;
2134		return 0;
2135	} else if (attr == &dev_attr_boot.attr) {
2136		if (tb_route(sw))
2137			return attr->mode;
2138		return 0;
2139	} else if (attr == &dev_attr_nvm_authenticate_on_disconnect.attr) {
2140		if (sw->quirks & QUIRK_FORCE_POWER_LINK_CONTROLLER)
2141			return attr->mode;
2142		return 0;
2143	}
2144
2145	return sw->safe_mode ? 0 : attr->mode;
2146}
2147
2148static const struct attribute_group switch_group = {
2149	.is_visible = switch_attr_is_visible,
2150	.attrs = switch_attrs,
2151};
2152
2153static const struct attribute_group *switch_groups[] = {
2154	&switch_group,
2155	NULL,
2156};
2157
2158static void tb_switch_release(struct device *dev)
2159{
2160	struct tb_switch *sw = tb_to_switch(dev);
2161	struct tb_port *port;
2162
2163	dma_port_free(sw->dma_port);
2164
2165	tb_switch_for_each_port(sw, port) {
2166		ida_destroy(&port->in_hopids);
2167		ida_destroy(&port->out_hopids);
2168	}
2169
2170	kfree(sw->uuid);
2171	kfree(sw->device_name);
2172	kfree(sw->vendor_name);
2173	kfree(sw->ports);
2174	kfree(sw->drom);
2175	kfree(sw->key);
2176	kfree(sw);
2177}
2178
2179static int tb_switch_uevent(struct device *dev, struct kobj_uevent_env *env)
2180{
2181	struct tb_switch *sw = tb_to_switch(dev);
2182	const char *type;
2183
2184	if (sw->config.thunderbolt_version == USB4_VERSION_1_0) {
2185		if (add_uevent_var(env, "USB4_VERSION=1.0"))
2186			return -ENOMEM;
2187	}
2188
2189	if (!tb_route(sw)) {
2190		type = "host";
2191	} else {
2192		const struct tb_port *port;
2193		bool hub = false;
2194
2195		/* Device is hub if it has any downstream ports */
2196		tb_switch_for_each_port(sw, port) {
2197			if (!port->disabled && !tb_is_upstream_port(port) &&
2198			     tb_port_is_null(port)) {
2199				hub = true;
2200				break;
2201			}
2202		}
2203
2204		type = hub ? "hub" : "device";
2205	}
2206
2207	if (add_uevent_var(env, "USB4_TYPE=%s", type))
2208		return -ENOMEM;
2209	return 0;
2210}
2211
2212/*
2213 * Currently only need to provide the callbacks. Everything else is handled
2214 * in the connection manager.
2215 */
2216static int __maybe_unused tb_switch_runtime_suspend(struct device *dev)
2217{
2218	struct tb_switch *sw = tb_to_switch(dev);
2219	const struct tb_cm_ops *cm_ops = sw->tb->cm_ops;
2220
2221	if (cm_ops->runtime_suspend_switch)
2222		return cm_ops->runtime_suspend_switch(sw);
2223
2224	return 0;
2225}
2226
2227static int __maybe_unused tb_switch_runtime_resume(struct device *dev)
2228{
2229	struct tb_switch *sw = tb_to_switch(dev);
2230	const struct tb_cm_ops *cm_ops = sw->tb->cm_ops;
2231
2232	if (cm_ops->runtime_resume_switch)
2233		return cm_ops->runtime_resume_switch(sw);
2234	return 0;
2235}
2236
2237static const struct dev_pm_ops tb_switch_pm_ops = {
2238	SET_RUNTIME_PM_OPS(tb_switch_runtime_suspend, tb_switch_runtime_resume,
2239			   NULL)
2240};
2241
2242struct device_type tb_switch_type = {
2243	.name = "thunderbolt_device",
2244	.release = tb_switch_release,
2245	.uevent = tb_switch_uevent,
2246	.pm = &tb_switch_pm_ops,
2247};
2248
2249static int tb_switch_get_generation(struct tb_switch *sw)
2250{
2251	switch (sw->config.device_id) {
2252	case PCI_DEVICE_ID_INTEL_LIGHT_RIDGE:
2253	case PCI_DEVICE_ID_INTEL_EAGLE_RIDGE:
2254	case PCI_DEVICE_ID_INTEL_LIGHT_PEAK:
2255	case PCI_DEVICE_ID_INTEL_CACTUS_RIDGE_2C:
2256	case PCI_DEVICE_ID_INTEL_CACTUS_RIDGE_4C:
2257	case PCI_DEVICE_ID_INTEL_PORT_RIDGE:
2258	case PCI_DEVICE_ID_INTEL_REDWOOD_RIDGE_2C_BRIDGE:
2259	case PCI_DEVICE_ID_INTEL_REDWOOD_RIDGE_4C_BRIDGE:
2260		return 1;
2261
2262	case PCI_DEVICE_ID_INTEL_WIN_RIDGE_2C_BRIDGE:
2263	case PCI_DEVICE_ID_INTEL_FALCON_RIDGE_2C_BRIDGE:
2264	case PCI_DEVICE_ID_INTEL_FALCON_RIDGE_4C_BRIDGE:
2265		return 2;
2266
2267	case PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_LP_BRIDGE:
2268	case PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_2C_BRIDGE:
2269	case PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_4C_BRIDGE:
2270	case PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_C_2C_BRIDGE:
2271	case PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_C_4C_BRIDGE:
2272	case PCI_DEVICE_ID_INTEL_TITAN_RIDGE_2C_BRIDGE:
2273	case PCI_DEVICE_ID_INTEL_TITAN_RIDGE_4C_BRIDGE:
2274	case PCI_DEVICE_ID_INTEL_TITAN_RIDGE_DD_BRIDGE:
2275	case PCI_DEVICE_ID_INTEL_ICL_NHI0:
2276	case PCI_DEVICE_ID_INTEL_ICL_NHI1:
2277		return 3;
2278
2279	default:
2280		if (tb_switch_is_usb4(sw))
2281			return 4;
2282
2283		/*
2284		 * For unknown switches assume generation to be 1 to be
2285		 * on the safe side.
2286		 */
2287		tb_sw_warn(sw, "unsupported switch device id %#x\n",
2288			   sw->config.device_id);
2289		return 1;
2290	}
2291}
2292
2293static bool tb_switch_exceeds_max_depth(const struct tb_switch *sw, int depth)
2294{
2295	int max_depth;
2296
2297	if (tb_switch_is_usb4(sw) ||
2298	    (sw->tb->root_switch && tb_switch_is_usb4(sw->tb->root_switch)))
2299		max_depth = USB4_SWITCH_MAX_DEPTH;
2300	else
2301		max_depth = TB_SWITCH_MAX_DEPTH;
2302
2303	return depth > max_depth;
2304}
2305
2306/**
2307 * tb_switch_alloc() - allocate a switch
2308 * @tb: Pointer to the owning domain
2309 * @parent: Parent device for this switch
2310 * @route: Route string for this switch
2311 *
2312 * Allocates and initializes a switch. Will not upload configuration to
2313 * the switch. For that you need to call tb_switch_configure()
2314 * separately. The returned switch should be released by calling
2315 * tb_switch_put().
2316 *
2317 * Return: Pointer to the allocated switch or ERR_PTR() in case of
2318 * failure.
2319 */
2320struct tb_switch *tb_switch_alloc(struct tb *tb, struct device *parent,
2321				  u64 route)
2322{
2323	struct tb_switch *sw;
2324	int upstream_port;
2325	int i, ret, depth;
2326
2327	/* Unlock the downstream port so we can access the switch below */
2328	if (route) {
2329		struct tb_switch *parent_sw = tb_to_switch(parent);
2330		struct tb_port *down;
2331
2332		down = tb_port_at(route, parent_sw);
2333		tb_port_unlock(down);
2334	}
2335
2336	depth = tb_route_length(route);
2337
2338	upstream_port = tb_cfg_get_upstream_port(tb->ctl, route);
2339	if (upstream_port < 0)
2340		return ERR_PTR(upstream_port);
2341
2342	sw = kzalloc(sizeof(*sw), GFP_KERNEL);
2343	if (!sw)
2344		return ERR_PTR(-ENOMEM);
2345
2346	sw->tb = tb;
2347	ret = tb_cfg_read(tb->ctl, &sw->config, route, 0, TB_CFG_SWITCH, 0, 5);
2348	if (ret)
2349		goto err_free_sw_ports;
2350
2351	sw->generation = tb_switch_get_generation(sw);
2352
2353	tb_dbg(tb, "current switch config:\n");
2354	tb_dump_switch(tb, sw);
2355
2356	/* configure switch */
2357	sw->config.upstream_port_number = upstream_port;
2358	sw->config.depth = depth;
2359	sw->config.route_hi = upper_32_bits(route);
2360	sw->config.route_lo = lower_32_bits(route);
2361	sw->config.enabled = 0;
2362
2363	/* Make sure we do not exceed maximum topology limit */
2364	if (tb_switch_exceeds_max_depth(sw, depth)) {
2365		ret = -EADDRNOTAVAIL;
2366		goto err_free_sw_ports;
2367	}
2368
2369	/* initialize ports */
2370	sw->ports = kcalloc(sw->config.max_port_number + 1, sizeof(*sw->ports),
2371				GFP_KERNEL);
2372	if (!sw->ports) {
2373		ret = -ENOMEM;
2374		goto err_free_sw_ports;
2375	}
2376
2377	for (i = 0; i <= sw->config.max_port_number; i++) {
2378		/* minimum setup for tb_find_cap and tb_drom_read to work */
2379		sw->ports[i].sw = sw;
2380		sw->ports[i].port = i;
2381
2382		/* Control port does not need HopID allocation */
2383		if (i) {
2384			ida_init(&sw->ports[i].in_hopids);
2385			ida_init(&sw->ports[i].out_hopids);
2386		}
2387	}
2388
2389	ret = tb_switch_find_vse_cap(sw, TB_VSE_CAP_PLUG_EVENTS);
2390	if (ret > 0)
2391		sw->cap_plug_events = ret;
2392
2393	ret = tb_switch_find_vse_cap(sw, TB_VSE_CAP_TIME2);
2394	if (ret > 0)
2395		sw->cap_vsec_tmu = ret;
2396
2397	ret = tb_switch_find_vse_cap(sw, TB_VSE_CAP_LINK_CONTROLLER);
2398	if (ret > 0)
2399		sw->cap_lc = ret;
2400
2401	ret = tb_switch_find_vse_cap(sw, TB_VSE_CAP_CP_LP);
2402	if (ret > 0)
2403		sw->cap_lp = ret;
2404
2405	/* Root switch is always authorized */
2406	if (!route)
2407		sw->authorized = true;
2408
2409	device_initialize(&sw->dev);
2410	sw->dev.parent = parent;
2411	sw->dev.bus = &tb_bus_type;
2412	sw->dev.type = &tb_switch_type;
2413	sw->dev.groups = switch_groups;
2414	dev_set_name(&sw->dev, "%u-%llx", tb->index, tb_route(sw));
2415
2416	return sw;
2417
2418err_free_sw_ports:
2419	kfree(sw->ports);
2420	kfree(sw);
2421
2422	return ERR_PTR(ret);
2423}
2424
2425/**
2426 * tb_switch_alloc_safe_mode() - allocate a switch that is in safe mode
2427 * @tb: Pointer to the owning domain
2428 * @parent: Parent device for this switch
2429 * @route: Route string for this switch
2430 *
2431 * This creates a switch in safe mode. This means the switch pretty much
2432 * lacks all capabilities except DMA configuration port before it is
2433 * flashed with a valid NVM firmware.
2434 *
2435 * The returned switch must be released by calling tb_switch_put().
2436 *
2437 * Return: Pointer to the allocated switch or ERR_PTR() in case of failure
2438 */
2439struct tb_switch *
2440tb_switch_alloc_safe_mode(struct tb *tb, struct device *parent, u64 route)
2441{
2442	struct tb_switch *sw;
2443
2444	sw = kzalloc(sizeof(*sw), GFP_KERNEL);
2445	if (!sw)
2446		return ERR_PTR(-ENOMEM);
2447
2448	sw->tb = tb;
2449	sw->config.depth = tb_route_length(route);
2450	sw->config.route_hi = upper_32_bits(route);
2451	sw->config.route_lo = lower_32_bits(route);
2452	sw->safe_mode = true;
2453
2454	device_initialize(&sw->dev);
2455	sw->dev.parent = parent;
2456	sw->dev.bus = &tb_bus_type;
2457	sw->dev.type = &tb_switch_type;
2458	sw->dev.groups = switch_groups;
2459	dev_set_name(&sw->dev, "%u-%llx", tb->index, tb_route(sw));
2460
2461	return sw;
2462}
2463
2464/**
2465 * tb_switch_configure() - Uploads configuration to the switch
2466 * @sw: Switch to configure
2467 *
2468 * Call this function before the switch is added to the system. It will
2469 * upload configuration to the switch and makes it available for the
2470 * connection manager to use. Can be called to the switch again after
2471 * resume from low power states to re-initialize it.
2472 *
2473 * Return: %0 in case of success and negative errno in case of failure
2474 */
2475int tb_switch_configure(struct tb_switch *sw)
2476{
2477	struct tb *tb = sw->tb;
2478	u64 route;
2479	int ret;
2480
2481	route = tb_route(sw);
2482
2483	tb_dbg(tb, "%s Switch at %#llx (depth: %d, up port: %d)\n",
2484	       sw->config.enabled ? "restoring" : "initializing", route,
2485	       tb_route_length(route), sw->config.upstream_port_number);
2486
2487	sw->config.enabled = 1;
2488
2489	if (tb_switch_is_usb4(sw)) {
2490		/*
2491		 * For USB4 devices, we need to program the CM version
2492		 * accordingly so that it knows to expose all the
2493		 * additional capabilities.
2494		 */
2495		sw->config.cmuv = USB4_VERSION_1_0;
2496		sw->config.plug_events_delay = 0xa;
2497
2498		/* Enumerate the switch */
2499		ret = tb_sw_write(sw, (u32 *)&sw->config + 1, TB_CFG_SWITCH,
2500				  ROUTER_CS_1, 4);
2501		if (ret)
2502			return ret;
2503
2504		ret = usb4_switch_setup(sw);
2505	} else {
2506		if (sw->config.vendor_id != PCI_VENDOR_ID_INTEL)
2507			tb_sw_warn(sw, "unknown switch vendor id %#x\n",
2508				   sw->config.vendor_id);
2509
2510		if (!sw->cap_plug_events) {
2511			tb_sw_warn(sw, "cannot find TB_VSE_CAP_PLUG_EVENTS aborting\n");
2512			return -ENODEV;
2513		}
2514
2515		/* Enumerate the switch */
2516		ret = tb_sw_write(sw, (u32 *)&sw->config + 1, TB_CFG_SWITCH,
2517				  ROUTER_CS_1, 3);
2518	}
2519	if (ret)
2520		return ret;
2521
2522	return tb_plug_events_active(sw, true);
2523}
2524
2525static int tb_switch_set_uuid(struct tb_switch *sw)
2526{
2527	bool uid = false;
2528	u32 uuid[4];
2529	int ret;
2530
2531	if (sw->uuid)
2532		return 0;
2533
2534	if (tb_switch_is_usb4(sw)) {
2535		ret = usb4_switch_read_uid(sw, &sw->uid);
2536		if (ret)
2537			return ret;
2538		uid = true;
2539	} else {
2540		/*
2541		 * The newer controllers include fused UUID as part of
2542		 * link controller specific registers
2543		 */
2544		ret = tb_lc_read_uuid(sw, uuid);
2545		if (ret) {
2546			if (ret != -EINVAL)
2547				return ret;
2548			uid = true;
2549		}
2550	}
2551
2552	if (uid) {
2553		/*
2554		 * ICM generates UUID based on UID and fills the upper
2555		 * two words with ones. This is not strictly following
2556		 * UUID format but we want to be compatible with it so
2557		 * we do the same here.
2558		 */
2559		uuid[0] = sw->uid & 0xffffffff;
2560		uuid[1] = (sw->uid >> 32) & 0xffffffff;
2561		uuid[2] = 0xffffffff;
2562		uuid[3] = 0xffffffff;
2563	}
2564
2565	sw->uuid = kmemdup(uuid, sizeof(uuid), GFP_KERNEL);
2566	if (!sw->uuid)
2567		return -ENOMEM;
2568	return 0;
2569}
2570
2571static int tb_switch_add_dma_port(struct tb_switch *sw)
2572{
2573	u32 status;
2574	int ret;
2575
2576	switch (sw->generation) {
2577	case 2:
2578		/* Only root switch can be upgraded */
2579		if (tb_route(sw))
2580			return 0;
2581
2582		fallthrough;
2583	case 3:
2584	case 4:
2585		ret = tb_switch_set_uuid(sw);
2586		if (ret)
2587			return ret;
2588		break;
2589
2590	default:
2591		/*
2592		 * DMA port is the only thing available when the switch
2593		 * is in safe mode.
2594		 */
2595		if (!sw->safe_mode)
2596			return 0;
2597		break;
2598	}
2599
2600	if (sw->no_nvm_upgrade)
2601		return 0;
2602
2603	if (tb_switch_is_usb4(sw)) {
2604		ret = usb4_switch_nvm_authenticate_status(sw, &status);
2605		if (ret)
2606			return ret;
2607
2608		if (status) {
2609			tb_sw_info(sw, "switch flash authentication failed\n");
2610			nvm_set_auth_status(sw, status);
2611		}
2612
2613		return 0;
2614	}
2615
2616	/* Root switch DMA port requires running firmware */
2617	if (!tb_route(sw) && !tb_switch_is_icm(sw))
2618		return 0;
2619
2620	sw->dma_port = dma_port_alloc(sw);
2621	if (!sw->dma_port)
2622		return 0;
2623
2624	/*
2625	 * If there is status already set then authentication failed
2626	 * when the dma_port_flash_update_auth() returned. Power cycling
2627	 * is not needed (it was done already) so only thing we do here
2628	 * is to unblock runtime PM of the root port.
2629	 */
2630	nvm_get_auth_status(sw, &status);
2631	if (status) {
2632		if (!tb_route(sw))
2633			nvm_authenticate_complete_dma_port(sw);
2634		return 0;
2635	}
2636
2637	/*
2638	 * Check status of the previous flash authentication. If there
2639	 * is one we need to power cycle the switch in any case to make
2640	 * it functional again.
2641	 */
2642	ret = dma_port_flash_update_auth_status(sw->dma_port, &status);
2643	if (ret <= 0)
2644		return ret;
2645
2646	/* Now we can allow root port to suspend again */
2647	if (!tb_route(sw))
2648		nvm_authenticate_complete_dma_port(sw);
2649
2650	if (status) {
2651		tb_sw_info(sw, "switch flash authentication failed\n");
2652		nvm_set_auth_status(sw, status);
2653	}
2654
2655	tb_sw_info(sw, "power cycling the switch now\n");
2656	dma_port_power_cycle(sw->dma_port);
2657
2658	/*
2659	 * We return error here which causes the switch adding failure.
2660	 * It should appear back after power cycle is complete.
2661	 */
2662	return -ESHUTDOWN;
2663}
2664
2665static void tb_switch_default_link_ports(struct tb_switch *sw)
2666{
2667	int i;
2668
2669	for (i = 1; i <= sw->config.max_port_number; i++) {
2670		struct tb_port *port = &sw->ports[i];
2671		struct tb_port *subordinate;
2672
2673		if (!tb_port_is_null(port))
2674			continue;
2675
2676		/* Check for the subordinate port */
2677		if (i == sw->config.max_port_number ||
2678		    !tb_port_is_null(&sw->ports[i + 1]))
2679			continue;
2680
2681		/* Link them if not already done so (by DROM) */
2682		subordinate = &sw->ports[i + 1];
2683		if (!port->dual_link_port && !subordinate->dual_link_port) {
2684			port->link_nr = 0;
2685			port->dual_link_port = subordinate;
2686			subordinate->link_nr = 1;
2687			subordinate->dual_link_port = port;
2688
2689			tb_sw_dbg(sw, "linked ports %d <-> %d\n",
2690				  port->port, subordinate->port);
2691		}
2692	}
2693}
2694
2695static bool tb_switch_lane_bonding_possible(struct tb_switch *sw)
2696{
2697	const struct tb_port *up = tb_upstream_port(sw);
2698
2699	if (!up->dual_link_port || !up->dual_link_port->remote)
2700		return false;
2701
2702	if (tb_switch_is_usb4(sw))
2703		return usb4_switch_lane_bonding_possible(sw);
2704	return tb_lc_lane_bonding_possible(sw);
2705}
2706
2707static int tb_switch_update_link_attributes(struct tb_switch *sw)
2708{
2709	struct tb_port *up;
2710	bool change = false;
2711	int ret;
2712
2713	if (!tb_route(sw) || tb_switch_is_icm(sw))
2714		return 0;
2715
2716	up = tb_upstream_port(sw);
2717
2718	ret = tb_port_get_link_speed(up);
2719	if (ret < 0)
2720		return ret;
2721	if (sw->link_speed != ret)
2722		change = true;
2723	sw->link_speed = ret;
2724
2725	ret = tb_port_get_link_width(up);
2726	if (ret < 0)
2727		return ret;
2728	if (sw->link_width != ret)
2729		change = true;
2730	sw->link_width = ret;
2731
2732	/* Notify userspace that there is possible link attribute change */
2733	if (device_is_registered(&sw->dev) && change)
2734		kobject_uevent(&sw->dev.kobj, KOBJ_CHANGE);
2735
2736	return 0;
2737}
2738
2739/**
2740 * tb_switch_lane_bonding_enable() - Enable lane bonding
2741 * @sw: Switch to enable lane bonding
2742 *
2743 * Connection manager can call this function to enable lane bonding of a
2744 * switch. If conditions are correct and both switches support the feature,
2745 * lanes are bonded. It is safe to call this to any switch.
2746 */
2747int tb_switch_lane_bonding_enable(struct tb_switch *sw)
2748{
2749	struct tb_switch *parent = tb_to_switch(sw->dev.parent);
2750	struct tb_port *up, *down;
2751	u64 route = tb_route(sw);
2752	int ret;
2753
2754	if (!route)
2755		return 0;
2756
2757	if (!tb_switch_lane_bonding_possible(sw))
2758		return 0;
2759
2760	up = tb_upstream_port(sw);
2761	down = tb_port_at(route, parent);
2762
2763	if (!tb_port_is_width_supported(up, 2) ||
2764	    !tb_port_is_width_supported(down, 2))
2765		return 0;
2766
2767	ret = tb_port_lane_bonding_enable(up);
2768	if (ret) {
2769		tb_port_warn(up, "failed to enable lane bonding\n");
2770		return ret;
2771	}
2772
2773	ret = tb_port_lane_bonding_enable(down);
2774	if (ret) {
2775		tb_port_warn(down, "failed to enable lane bonding\n");
2776		tb_port_lane_bonding_disable(up);
2777		return ret;
2778	}
2779
2780	ret = tb_port_wait_for_link_width(down, 2, 100);
2781	if (ret) {
2782		tb_port_warn(down, "timeout enabling lane bonding\n");
2783		return ret;
2784	}
2785
2786	tb_port_update_credits(down);
2787	tb_port_update_credits(up);
2788	tb_switch_update_link_attributes(sw);
2789
2790	tb_sw_dbg(sw, "lane bonding enabled\n");
2791	return ret;
2792}
2793
2794/**
2795 * tb_switch_lane_bonding_disable() - Disable lane bonding
2796 * @sw: Switch whose lane bonding to disable
2797 *
2798 * Disables lane bonding between @sw and parent. This can be called even
2799 * if lanes were not bonded originally.
2800 */
2801void tb_switch_lane_bonding_disable(struct tb_switch *sw)
2802{
2803	struct tb_switch *parent = tb_to_switch(sw->dev.parent);
2804	struct tb_port *up, *down;
2805
2806	if (!tb_route(sw))
2807		return;
2808
2809	up = tb_upstream_port(sw);
2810	if (!up->bonded)
2811		return;
2812
2813	down = tb_port_at(tb_route(sw), parent);
2814
2815	tb_port_lane_bonding_disable(up);
2816	tb_port_lane_bonding_disable(down);
2817
2818	/*
2819	 * It is fine if we get other errors as the router might have
2820	 * been unplugged.
2821	 */
2822	if (tb_port_wait_for_link_width(down, 1, 100) == -ETIMEDOUT)
2823		tb_sw_warn(sw, "timeout disabling lane bonding\n");
2824
2825	tb_port_update_credits(down);
2826	tb_port_update_credits(up);
2827	tb_switch_update_link_attributes(sw);
2828
2829	tb_sw_dbg(sw, "lane bonding disabled\n");
2830}
2831
2832/**
2833 * tb_switch_configure_link() - Set link configured
2834 * @sw: Switch whose link is configured
2835 *
2836 * Sets the link upstream from @sw configured (from both ends) so that
2837 * it will not be disconnected when the domain exits sleep. Can be
2838 * called for any switch.
2839 *
2840 * It is recommended that this is called after lane bonding is enabled.
2841 *
2842 * Returns %0 on success and negative errno in case of error.
2843 */
2844int tb_switch_configure_link(struct tb_switch *sw)
2845{
2846	struct tb_port *up, *down;
2847	int ret;
2848
2849	if (!tb_route(sw) || tb_switch_is_icm(sw))
2850		return 0;
2851
2852	up = tb_upstream_port(sw);
2853	if (tb_switch_is_usb4(up->sw))
2854		ret = usb4_port_configure(up);
2855	else
2856		ret = tb_lc_configure_port(up);
2857	if (ret)
2858		return ret;
2859
2860	down = up->remote;
2861	if (tb_switch_is_usb4(down->sw))
2862		return usb4_port_configure(down);
2863	return tb_lc_configure_port(down);
2864}
2865
2866/**
2867 * tb_switch_unconfigure_link() - Unconfigure link
2868 * @sw: Switch whose link is unconfigured
2869 *
2870 * Sets the link unconfigured so the @sw will be disconnected if the
2871 * domain exists sleep.
2872 */
2873void tb_switch_unconfigure_link(struct tb_switch *sw)
2874{
2875	struct tb_port *up, *down;
2876
2877	if (sw->is_unplugged)
2878		return;
2879	if (!tb_route(sw) || tb_switch_is_icm(sw))
2880		return;
2881
2882	up = tb_upstream_port(sw);
2883	if (tb_switch_is_usb4(up->sw))
2884		usb4_port_unconfigure(up);
2885	else
2886		tb_lc_unconfigure_port(up);
2887
2888	down = up->remote;
2889	if (tb_switch_is_usb4(down->sw))
2890		usb4_port_unconfigure(down);
2891	else
2892		tb_lc_unconfigure_port(down);
2893}
2894
2895static void tb_switch_credits_init(struct tb_switch *sw)
2896{
2897	if (tb_switch_is_icm(sw))
2898		return;
2899	if (!tb_switch_is_usb4(sw))
2900		return;
2901	if (usb4_switch_credits_init(sw))
2902		tb_sw_info(sw, "failed to determine preferred buffer allocation, using defaults\n");
2903}
2904
2905static int tb_switch_port_hotplug_enable(struct tb_switch *sw)
2906{
2907	struct tb_port *port;
2908
2909	if (tb_switch_is_icm(sw))
2910		return 0;
2911
2912	tb_switch_for_each_port(sw, port) {
2913		int res;
2914
2915		if (!port->cap_usb4)
2916			continue;
2917
2918		res = usb4_port_hotplug_enable(port);
2919		if (res)
2920			return res;
2921	}
2922	return 0;
2923}
2924
2925/**
2926 * tb_switch_add() - Add a switch to the domain
2927 * @sw: Switch to add
2928 *
2929 * This is the last step in adding switch to the domain. It will read
2930 * identification information from DROM and initializes ports so that
2931 * they can be used to connect other switches. The switch will be
2932 * exposed to the userspace when this function successfully returns. To
2933 * remove and release the switch, call tb_switch_remove().
2934 *
2935 * Return: %0 in case of success and negative errno in case of failure
2936 */
2937int tb_switch_add(struct tb_switch *sw)
2938{
2939	int i, ret;
2940
2941	/*
2942	 * Initialize DMA control port now before we read DROM. Recent
2943	 * host controllers have more complete DROM on NVM that includes
2944	 * vendor and model identification strings which we then expose
2945	 * to the userspace. NVM can be accessed through DMA
2946	 * configuration based mailbox.
2947	 */
2948	ret = tb_switch_add_dma_port(sw);
2949	if (ret) {
2950		dev_err(&sw->dev, "failed to add DMA port\n");
2951		return ret;
2952	}
2953
2954	if (!sw->safe_mode) {
2955		tb_switch_credits_init(sw);
2956
2957		/* read drom */
2958		ret = tb_drom_read(sw);
2959		if (ret)
2960			dev_warn(&sw->dev, "reading DROM failed: %d\n", ret);
2961		tb_sw_dbg(sw, "uid: %#llx\n", sw->uid);
2962
2963		tb_check_quirks(sw);
2964
2965		ret = tb_switch_set_uuid(sw);
2966		if (ret) {
2967			dev_err(&sw->dev, "failed to set UUID\n");
2968			return ret;
2969		}
2970
2971		for (i = 0; i <= sw->config.max_port_number; i++) {
2972			if (sw->ports[i].disabled) {
2973				tb_port_dbg(&sw->ports[i], "disabled by eeprom\n");
2974				continue;
2975			}
2976			ret = tb_init_port(&sw->ports[i]);
2977			if (ret) {
2978				dev_err(&sw->dev, "failed to initialize port %d\n", i);
2979				return ret;
2980			}
2981		}
2982
2983		tb_switch_default_link_ports(sw);
2984
2985		ret = tb_switch_update_link_attributes(sw);
2986		if (ret)
2987			return ret;
2988
2989		ret = tb_switch_tmu_init(sw);
2990		if (ret)
2991			return ret;
2992	}
2993
2994	ret = tb_switch_port_hotplug_enable(sw);
2995	if (ret)
2996		return ret;
2997
2998	ret = device_add(&sw->dev);
2999	if (ret) {
3000		dev_err(&sw->dev, "failed to add device: %d\n", ret);
3001		return ret;
3002	}
3003
3004	if (tb_route(sw)) {
3005		dev_info(&sw->dev, "new device found, vendor=%#x device=%#x\n",
3006			 sw->vendor, sw->device);
3007		if (sw->vendor_name && sw->device_name)
3008			dev_info(&sw->dev, "%s %s\n", sw->vendor_name,
3009				 sw->device_name);
3010	}
3011
3012	ret = usb4_switch_add_ports(sw);
3013	if (ret) {
3014		dev_err(&sw->dev, "failed to add USB4 ports\n");
3015		goto err_del;
3016	}
3017
3018	ret = tb_switch_nvm_add(sw);
3019	if (ret) {
3020		dev_err(&sw->dev, "failed to add NVM devices\n");
3021		goto err_ports;
3022	}
3023
3024	/*
3025	 * Thunderbolt routers do not generate wakeups themselves but
3026	 * they forward wakeups from tunneled protocols, so enable it
3027	 * here.
3028	 */
3029	device_init_wakeup(&sw->dev, true);
3030
3031	pm_runtime_set_active(&sw->dev);
3032	if (sw->rpm) {
3033		pm_runtime_set_autosuspend_delay(&sw->dev, TB_AUTOSUSPEND_DELAY);
3034		pm_runtime_use_autosuspend(&sw->dev);
3035		pm_runtime_mark_last_busy(&sw->dev);
3036		pm_runtime_enable(&sw->dev);
3037		pm_request_autosuspend(&sw->dev);
3038	}
3039
3040	tb_switch_debugfs_init(sw);
3041	return 0;
3042
3043err_ports:
3044	usb4_switch_remove_ports(sw);
3045err_del:
3046	device_del(&sw->dev);
3047
3048	return ret;
3049}
3050
3051/**
3052 * tb_switch_remove() - Remove and release a switch
3053 * @sw: Switch to remove
3054 *
3055 * This will remove the switch from the domain and release it after last
3056 * reference count drops to zero. If there are switches connected below
3057 * this switch, they will be removed as well.
3058 */
3059void tb_switch_remove(struct tb_switch *sw)
3060{
3061	struct tb_port *port;
3062
3063	tb_switch_debugfs_remove(sw);
3064
3065	if (sw->rpm) {
3066		pm_runtime_get_sync(&sw->dev);
3067		pm_runtime_disable(&sw->dev);
3068	}
3069
3070	/* port 0 is the switch itself and never has a remote */
3071	tb_switch_for_each_port(sw, port) {
3072		if (tb_port_has_remote(port)) {
3073			tb_switch_remove(port->remote->sw);
3074			port->remote = NULL;
3075		} else if (port->xdomain) {
3076			tb_xdomain_remove(port->xdomain);
3077			port->xdomain = NULL;
3078		}
3079
3080		/* Remove any downstream retimers */
3081		tb_retimer_remove_all(port);
3082	}
3083
3084	if (!sw->is_unplugged)
3085		tb_plug_events_active(sw, false);
3086
3087	tb_switch_nvm_remove(sw);
3088	usb4_switch_remove_ports(sw);
3089
3090	if (tb_route(sw))
3091		dev_info(&sw->dev, "device disconnected\n");
3092	device_unregister(&sw->dev);
3093}
3094
3095/**
3096 * tb_sw_set_unplugged() - set is_unplugged on switch and downstream switches
3097 * @sw: Router to mark unplugged
3098 */
3099void tb_sw_set_unplugged(struct tb_switch *sw)
3100{
3101	struct tb_port *port;
3102
3103	if (sw == sw->tb->root_switch) {
3104		tb_sw_WARN(sw, "cannot unplug root switch\n");
3105		return;
3106	}
3107	if (sw->is_unplugged) {
3108		tb_sw_WARN(sw, "is_unplugged already set\n");
3109		return;
3110	}
3111	sw->is_unplugged = true;
3112	tb_switch_for_each_port(sw, port) {
3113		if (tb_port_has_remote(port))
3114			tb_sw_set_unplugged(port->remote->sw);
3115		else if (port->xdomain)
3116			port->xdomain->is_unplugged = true;
3117	}
3118}
3119
3120static int tb_switch_set_wake(struct tb_switch *sw, unsigned int flags)
3121{
3122	if (flags)
3123		tb_sw_dbg(sw, "enabling wakeup: %#x\n", flags);
3124	else
3125		tb_sw_dbg(sw, "disabling wakeup\n");
3126
3127	if (tb_switch_is_usb4(sw))
3128		return usb4_switch_set_wake(sw, flags);
3129	return tb_lc_set_wake(sw, flags);
3130}
3131
3132int tb_switch_resume(struct tb_switch *sw)
3133{
3134	struct tb_port *port;
3135	int err;
3136
3137	tb_sw_dbg(sw, "resuming switch\n");
3138
3139	/*
3140	 * Check for UID of the connected switches except for root
3141	 * switch which we assume cannot be removed.
3142	 */
3143	if (tb_route(sw)) {
3144		u64 uid;
3145
3146		/*
3147		 * Check first that we can still read the switch config
3148		 * space. It may be that there is now another domain
3149		 * connected.
3150		 */
3151		err = tb_cfg_get_upstream_port(sw->tb->ctl, tb_route(sw));
3152		if (err < 0) {
3153			tb_sw_info(sw, "switch not present anymore\n");
3154			return err;
3155		}
3156
3157		/* We don't have any way to confirm this was the same device */
3158		if (!sw->uid)
3159			return -ENODEV;
3160
3161		if (tb_switch_is_usb4(sw))
3162			err = usb4_switch_read_uid(sw, &uid);
3163		else
3164			err = tb_drom_read_uid_only(sw, &uid);
3165		if (err) {
3166			tb_sw_warn(sw, "uid read failed\n");
3167			return err;
3168		}
3169		if (sw->uid != uid) {
3170			tb_sw_info(sw,
3171				"changed while suspended (uid %#llx -> %#llx)\n",
3172				sw->uid, uid);
3173			return -ENODEV;
3174		}
3175	}
3176
3177	err = tb_switch_configure(sw);
3178	if (err)
3179		return err;
3180
3181	/* Disable wakes */
3182	tb_switch_set_wake(sw, 0);
3183
3184	err = tb_switch_tmu_init(sw);
3185	if (err)
3186		return err;
3187
3188	/* check for surviving downstream switches */
3189	tb_switch_for_each_port(sw, port) {
3190		if (!tb_port_is_null(port))
3191			continue;
3192
3193		if (!tb_port_resume(port))
3194			continue;
3195
3196		if (tb_wait_for_port(port, true) <= 0) {
3197			tb_port_warn(port,
3198				     "lost during suspend, disconnecting\n");
3199			if (tb_port_has_remote(port))
3200				tb_sw_set_unplugged(port->remote->sw);
3201			else if (port->xdomain)
3202				port->xdomain->is_unplugged = true;
3203		} else {
3204			/*
3205			 * Always unlock the port so the downstream
3206			 * switch/domain is accessible.
3207			 */
3208			if (tb_port_unlock(port))
3209				tb_port_warn(port, "failed to unlock port\n");
3210			if (port->remote && tb_switch_resume(port->remote->sw)) {
3211				tb_port_warn(port,
3212					     "lost during suspend, disconnecting\n");
3213				tb_sw_set_unplugged(port->remote->sw);
3214			}
3215		}
3216	}
3217	return 0;
3218}
3219
3220/**
3221 * tb_switch_suspend() - Put a switch to sleep
3222 * @sw: Switch to suspend
3223 * @runtime: Is this runtime suspend or system sleep
3224 *
3225 * Suspends router and all its children. Enables wakes according to
3226 * value of @runtime and then sets sleep bit for the router. If @sw is
3227 * host router the domain is ready to go to sleep once this function
3228 * returns.
3229 */
3230void tb_switch_suspend(struct tb_switch *sw, bool runtime)
3231{
3232	unsigned int flags = 0;
3233	struct tb_port *port;
3234	int err;
3235
3236	tb_sw_dbg(sw, "suspending switch\n");
3237
3238	/*
3239	 * Actually only needed for Titan Ridge but for simplicity can be
3240	 * done for USB4 device too as CLx is re-enabled at resume.
3241	 * CL0s and CL1 are enabled and supported together.
3242	 */
3243	if (tb_switch_is_clx_enabled(sw, TB_CL1)) {
3244		if (tb_switch_disable_clx(sw, TB_CL1))
3245			tb_sw_warn(sw, "failed to disable %s on upstream port\n",
3246				   tb_switch_clx_name(TB_CL1));
3247	}
3248
3249	err = tb_plug_events_active(sw, false);
3250	if (err)
3251		return;
3252
3253	tb_switch_for_each_port(sw, port) {
3254		if (tb_port_has_remote(port))
3255			tb_switch_suspend(port->remote->sw, runtime);
3256	}
3257
3258	if (runtime) {
3259		/* Trigger wake when something is plugged in/out */
3260		flags |= TB_WAKE_ON_CONNECT | TB_WAKE_ON_DISCONNECT;
3261		flags |= TB_WAKE_ON_USB4;
3262		flags |= TB_WAKE_ON_USB3 | TB_WAKE_ON_PCIE | TB_WAKE_ON_DP;
3263	} else if (device_may_wakeup(&sw->dev)) {
3264		flags |= TB_WAKE_ON_USB4 | TB_WAKE_ON_USB3 | TB_WAKE_ON_PCIE;
3265	}
3266
3267	tb_switch_set_wake(sw, flags);
3268
3269	if (tb_switch_is_usb4(sw))
3270		usb4_switch_set_sleep(sw);
3271	else
3272		tb_lc_set_sleep(sw);
3273}
3274
3275/**
3276 * tb_switch_query_dp_resource() - Query availability of DP resource
3277 * @sw: Switch whose DP resource is queried
3278 * @in: DP IN port
3279 *
3280 * Queries availability of DP resource for DP tunneling using switch
3281 * specific means. Returns %true if resource is available.
3282 */
3283bool tb_switch_query_dp_resource(struct tb_switch *sw, struct tb_port *in)
3284{
3285	if (tb_switch_is_usb4(sw))
3286		return usb4_switch_query_dp_resource(sw, in);
3287	return tb_lc_dp_sink_query(sw, in);
3288}
3289
3290/**
3291 * tb_switch_alloc_dp_resource() - Allocate available DP resource
3292 * @sw: Switch whose DP resource is allocated
3293 * @in: DP IN port
3294 *
3295 * Allocates DP resource for DP tunneling. The resource must be
3296 * available for this to succeed (see tb_switch_query_dp_resource()).
3297 * Returns %0 in success and negative errno otherwise.
3298 */
3299int tb_switch_alloc_dp_resource(struct tb_switch *sw, struct tb_port *in)
3300{
3301	int ret;
3302
3303	if (tb_switch_is_usb4(sw))
3304		ret = usb4_switch_alloc_dp_resource(sw, in);
3305	else
3306		ret = tb_lc_dp_sink_alloc(sw, in);
3307
3308	if (ret)
3309		tb_sw_warn(sw, "failed to allocate DP resource for port %d\n",
3310			   in->port);
3311	else
3312		tb_sw_dbg(sw, "allocated DP resource for port %d\n", in->port);
3313
3314	return ret;
3315}
3316
3317/**
3318 * tb_switch_dealloc_dp_resource() - De-allocate DP resource
3319 * @sw: Switch whose DP resource is de-allocated
3320 * @in: DP IN port
3321 *
3322 * De-allocates DP resource that was previously allocated for DP
3323 * tunneling.
3324 */
3325void tb_switch_dealloc_dp_resource(struct tb_switch *sw, struct tb_port *in)
3326{
3327	int ret;
3328
3329	if (tb_switch_is_usb4(sw))
3330		ret = usb4_switch_dealloc_dp_resource(sw, in);
3331	else
3332		ret = tb_lc_dp_sink_dealloc(sw, in);
3333
3334	if (ret)
3335		tb_sw_warn(sw, "failed to de-allocate DP resource for port %d\n",
3336			   in->port);
3337	else
3338		tb_sw_dbg(sw, "released DP resource for port %d\n", in->port);
3339}
3340
3341struct tb_sw_lookup {
3342	struct tb *tb;
3343	u8 link;
3344	u8 depth;
3345	const uuid_t *uuid;
3346	u64 route;
3347};
3348
3349static int tb_switch_match(struct device *dev, const void *data)
3350{
3351	struct tb_switch *sw = tb_to_switch(dev);
3352	const struct tb_sw_lookup *lookup = data;
3353
3354	if (!sw)
3355		return 0;
3356	if (sw->tb != lookup->tb)
3357		return 0;
3358
3359	if (lookup->uuid)
3360		return !memcmp(sw->uuid, lookup->uuid, sizeof(*lookup->uuid));
3361
3362	if (lookup->route) {
3363		return sw->config.route_lo == lower_32_bits(lookup->route) &&
3364		       sw->config.route_hi == upper_32_bits(lookup->route);
3365	}
3366
3367	/* Root switch is matched only by depth */
3368	if (!lookup->depth)
3369		return !sw->depth;
3370
3371	return sw->link == lookup->link && sw->depth == lookup->depth;
3372}
3373
3374/**
3375 * tb_switch_find_by_link_depth() - Find switch by link and depth
3376 * @tb: Domain the switch belongs
3377 * @link: Link number the switch is connected
3378 * @depth: Depth of the switch in link
3379 *
3380 * Returned switch has reference count increased so the caller needs to
3381 * call tb_switch_put() when done with the switch.
3382 */
3383struct tb_switch *tb_switch_find_by_link_depth(struct tb *tb, u8 link, u8 depth)
3384{
3385	struct tb_sw_lookup lookup;
3386	struct device *dev;
3387
3388	memset(&lookup, 0, sizeof(lookup));
3389	lookup.tb = tb;
3390	lookup.link = link;
3391	lookup.depth = depth;
3392
3393	dev = bus_find_device(&tb_bus_type, NULL, &lookup, tb_switch_match);
3394	if (dev)
3395		return tb_to_switch(dev);
3396
3397	return NULL;
3398}
3399
3400/**
3401 * tb_switch_find_by_uuid() - Find switch by UUID
3402 * @tb: Domain the switch belongs
3403 * @uuid: UUID to look for
3404 *
3405 * Returned switch has reference count increased so the caller needs to
3406 * call tb_switch_put() when done with the switch.
3407 */
3408struct tb_switch *tb_switch_find_by_uuid(struct tb *tb, const uuid_t *uuid)
3409{
3410	struct tb_sw_lookup lookup;
3411	struct device *dev;
3412
3413	memset(&lookup, 0, sizeof(lookup));
3414	lookup.tb = tb;
3415	lookup.uuid = uuid;
3416
3417	dev = bus_find_device(&tb_bus_type, NULL, &lookup, tb_switch_match);
3418	if (dev)
3419		return tb_to_switch(dev);
3420
3421	return NULL;
3422}
3423
3424/**
3425 * tb_switch_find_by_route() - Find switch by route string
3426 * @tb: Domain the switch belongs
3427 * @route: Route string to look for
3428 *
3429 * Returned switch has reference count increased so the caller needs to
3430 * call tb_switch_put() when done with the switch.
3431 */
3432struct tb_switch *tb_switch_find_by_route(struct tb *tb, u64 route)
3433{
3434	struct tb_sw_lookup lookup;
3435	struct device *dev;
3436
3437	if (!route)
3438		return tb_switch_get(tb->root_switch);
3439
3440	memset(&lookup, 0, sizeof(lookup));
3441	lookup.tb = tb;
3442	lookup.route = route;
3443
3444	dev = bus_find_device(&tb_bus_type, NULL, &lookup, tb_switch_match);
3445	if (dev)
3446		return tb_to_switch(dev);
3447
3448	return NULL;
3449}
3450
3451/**
3452 * tb_switch_find_port() - return the first port of @type on @sw or NULL
3453 * @sw: Switch to find the port from
3454 * @type: Port type to look for
3455 */
3456struct tb_port *tb_switch_find_port(struct tb_switch *sw,
3457				    enum tb_port_type type)
3458{
3459	struct tb_port *port;
3460
3461	tb_switch_for_each_port(sw, port) {
3462		if (port->config.type == type)
3463			return port;
3464	}
3465
3466	return NULL;
3467}
3468
3469static int tb_switch_pm_secondary_resolve(struct tb_switch *sw)
3470{
3471	struct tb_switch *parent = tb_switch_parent(sw);
3472	struct tb_port *up, *down;
3473	int ret;
3474
3475	if (!tb_route(sw))
3476		return 0;
3477
3478	up = tb_upstream_port(sw);
3479	down = tb_port_at(tb_route(sw), parent);
3480	ret = tb_port_pm_secondary_enable(up);
3481	if (ret)
3482		return ret;
3483
3484	return tb_port_pm_secondary_disable(down);
3485}
3486
3487static int __tb_switch_enable_clx(struct tb_switch *sw, enum tb_clx clx)
3488{
3489	struct tb_switch *parent = tb_switch_parent(sw);
3490	bool up_clx_support, down_clx_support;
3491	struct tb_port *up, *down;
3492	int ret;
3493
3494	if (!tb_switch_is_clx_supported(sw))
3495		return 0;
3496
3497	/*
3498	 * Enable CLx for host router's downstream port as part of the
3499	 * downstream router enabling procedure.
3500	 */
3501	if (!tb_route(sw))
3502		return 0;
3503
3504	/* Enable CLx only for first hop router (depth = 1) */
3505	if (tb_route(parent))
3506		return 0;
3507
3508	ret = tb_switch_pm_secondary_resolve(sw);
3509	if (ret)
3510		return ret;
3511
3512	up = tb_upstream_port(sw);
3513	down = tb_port_at(tb_route(sw), parent);
3514
3515	up_clx_support = tb_port_clx_supported(up, clx);
3516	down_clx_support = tb_port_clx_supported(down, clx);
3517
3518	tb_port_dbg(up, "%s %ssupported\n", tb_switch_clx_name(clx),
3519		    up_clx_support ? "" : "not ");
3520	tb_port_dbg(down, "%s %ssupported\n", tb_switch_clx_name(clx),
3521		    down_clx_support ? "" : "not ");
3522
3523	if (!up_clx_support || !down_clx_support)
3524		return -EOPNOTSUPP;
3525
3526	ret = tb_port_clx_enable(up, clx);
3527	if (ret)
3528		return ret;
3529
3530	ret = tb_port_clx_enable(down, clx);
3531	if (ret) {
3532		tb_port_clx_disable(up, clx);
3533		return ret;
3534	}
3535
3536	ret = tb_switch_mask_clx_objections(sw);
3537	if (ret) {
3538		tb_port_clx_disable(up, clx);
3539		tb_port_clx_disable(down, clx);
3540		return ret;
3541	}
3542
3543	sw->clx = clx;
3544
3545	tb_port_dbg(up, "%s enabled\n", tb_switch_clx_name(clx));
3546	return 0;
3547}
3548
3549/**
3550 * tb_switch_enable_clx() - Enable CLx on upstream port of specified router
3551 * @sw: Router to enable CLx for
3552 * @clx: The CLx state to enable
3553 *
3554 * Enable CLx state only for first hop router. That is the most common
3555 * use-case, that is intended for better thermal management, and so helps
3556 * to improve performance. CLx is enabled only if both sides of the link
3557 * support CLx, and if both sides of the link are not configured as two
3558 * single lane links and only if the link is not inter-domain link. The
3559 * complete set of conditions is described in CM Guide 1.0 section 8.1.
3560 *
3561 * Return: Returns 0 on success or an error code on failure.
3562 */
3563int tb_switch_enable_clx(struct tb_switch *sw, enum tb_clx clx)
3564{
3565	struct tb_switch *root_sw = sw->tb->root_switch;
3566
3567	if (!clx_enabled)
3568		return 0;
3569
3570	/*
3571	 * CLx is not enabled and validated on Intel USB4 platforms before
3572	 * Alder Lake.
3573	 */
3574	if (root_sw->generation < 4 || tb_switch_is_tiger_lake(root_sw))
3575		return 0;
3576
3577	switch (clx) {
3578	case TB_CL1:
3579		/* CL0s and CL1 are enabled and supported together */
3580		return __tb_switch_enable_clx(sw, clx);
3581
3582	default:
3583		return -EOPNOTSUPP;
3584	}
3585}
3586
3587static int __tb_switch_disable_clx(struct tb_switch *sw, enum tb_clx clx)
3588{
3589	struct tb_switch *parent = tb_switch_parent(sw);
3590	struct tb_port *up, *down;
3591	int ret;
3592
3593	if (!tb_switch_is_clx_supported(sw))
3594		return 0;
3595
3596	/*
3597	 * Disable CLx for host router's downstream port as part of the
3598	 * downstream router enabling procedure.
3599	 */
3600	if (!tb_route(sw))
3601		return 0;
3602
3603	/* Disable CLx only for first hop router (depth = 1) */
3604	if (tb_route(parent))
3605		return 0;
3606
3607	up = tb_upstream_port(sw);
3608	down = tb_port_at(tb_route(sw), parent);
3609	ret = tb_port_clx_disable(up, clx);
3610	if (ret)
3611		return ret;
3612
3613	ret = tb_port_clx_disable(down, clx);
3614	if (ret)
3615		return ret;
3616
3617	sw->clx = TB_CLX_DISABLE;
3618
3619	tb_port_dbg(up, "%s disabled\n", tb_switch_clx_name(clx));
3620	return 0;
3621}
3622
3623/**
3624 * tb_switch_disable_clx() - Disable CLx on upstream port of specified router
3625 * @sw: Router to disable CLx for
3626 * @clx: The CLx state to disable
3627 *
3628 * Return: Returns 0 on success or an error code on failure.
3629 */
3630int tb_switch_disable_clx(struct tb_switch *sw, enum tb_clx clx)
3631{
3632	if (!clx_enabled)
3633		return 0;
3634
3635	switch (clx) {
3636	case TB_CL1:
3637		/* CL0s and CL1 are enabled and supported together */
3638		return __tb_switch_disable_clx(sw, clx);
3639
3640	default:
3641		return -EOPNOTSUPP;
3642	}
3643}
3644
3645/**
3646 * tb_switch_mask_clx_objections() - Mask CLx objections for a router
3647 * @sw: Router to mask objections for
3648 *
3649 * Mask the objections coming from the second depth routers in order to
3650 * stop these objections from interfering with the CLx states of the first
3651 * depth link.
3652 */
3653int tb_switch_mask_clx_objections(struct tb_switch *sw)
3654{
3655	int up_port = sw->config.upstream_port_number;
3656	u32 offset, val[2], mask_obj, unmask_obj;
3657	int ret, i;
3658
3659	/* Only Titan Ridge of pre-USB4 devices support CLx states */
3660	if (!tb_switch_is_titan_ridge(sw))
3661		return 0;
3662
3663	if (!tb_route(sw))
3664		return 0;
3665
3666	/*
3667	 * In Titan Ridge there are only 2 dual-lane Thunderbolt ports:
3668	 * Port A consists of lane adapters 1,2 and
3669	 * Port B consists of lane adapters 3,4
3670	 * If upstream port is A, (lanes are 1,2), we mask objections from
3671	 * port B (lanes 3,4) and unmask objections from Port A and vice-versa.
3672	 */
3673	if (up_port == 1) {
3674		mask_obj = TB_LOW_PWR_C0_PORT_B_MASK;
3675		unmask_obj = TB_LOW_PWR_C1_PORT_A_MASK;
3676		offset = TB_LOW_PWR_C1_CL1;
3677	} else {
3678		mask_obj = TB_LOW_PWR_C1_PORT_A_MASK;
3679		unmask_obj = TB_LOW_PWR_C0_PORT_B_MASK;
3680		offset = TB_LOW_PWR_C3_CL1;
3681	}
3682
3683	ret = tb_sw_read(sw, &val, TB_CFG_SWITCH,
3684			 sw->cap_lp + offset, ARRAY_SIZE(val));
3685	if (ret)
3686		return ret;
3687
3688	for (i = 0; i < ARRAY_SIZE(val); i++) {
3689		val[i] |= mask_obj;
3690		val[i] &= ~unmask_obj;
3691	}
3692
3693	return tb_sw_write(sw, &val, TB_CFG_SWITCH,
3694			   sw->cap_lp + offset, ARRAY_SIZE(val));
3695}
3696
3697/*
3698 * Can be used for read/write a specified PCIe bridge for any Thunderbolt 3
3699 * device. For now used only for Titan Ridge.
3700 */
3701static int tb_switch_pcie_bridge_write(struct tb_switch *sw, unsigned int bridge,
3702				       unsigned int pcie_offset, u32 value)
3703{
3704	u32 offset, command, val;
3705	int ret;
3706
3707	if (sw->generation != 3)
3708		return -EOPNOTSUPP;
3709
3710	offset = sw->cap_plug_events + TB_PLUG_EVENTS_PCIE_WR_DATA;
3711	ret = tb_sw_write(sw, &value, TB_CFG_SWITCH, offset, 1);
3712	if (ret)
3713		return ret;
3714
3715	command = pcie_offset & TB_PLUG_EVENTS_PCIE_CMD_DW_OFFSET_MASK;
3716	command |= BIT(bridge + TB_PLUG_EVENTS_PCIE_CMD_BR_SHIFT);
3717	command |= TB_PLUG_EVENTS_PCIE_CMD_RD_WR_MASK;
3718	command |= TB_PLUG_EVENTS_PCIE_CMD_COMMAND_VAL
3719			<< TB_PLUG_EVENTS_PCIE_CMD_COMMAND_SHIFT;
3720	command |= TB_PLUG_EVENTS_PCIE_CMD_REQ_ACK_MASK;
3721
3722	offset = sw->cap_plug_events + TB_PLUG_EVENTS_PCIE_CMD;
3723
3724	ret = tb_sw_write(sw, &command, TB_CFG_SWITCH, offset, 1);
3725	if (ret)
3726		return ret;
3727
3728	ret = tb_switch_wait_for_bit(sw, offset,
3729				     TB_PLUG_EVENTS_PCIE_CMD_REQ_ACK_MASK, 0, 100);
3730	if (ret)
3731		return ret;
3732
3733	ret = tb_sw_read(sw, &val, TB_CFG_SWITCH, offset, 1);
3734	if (ret)
3735		return ret;
3736
3737	if (val & TB_PLUG_EVENTS_PCIE_CMD_TIMEOUT_MASK)
3738		return -ETIMEDOUT;
3739
3740	return 0;
3741}
3742
3743/**
3744 * tb_switch_pcie_l1_enable() - Enable PCIe link to enter L1 state
3745 * @sw: Router to enable PCIe L1
3746 *
3747 * For Titan Ridge switch to enter CLx state, its PCIe bridges shall enable
3748 * entry to PCIe L1 state. Shall be called after the upstream PCIe tunnel
3749 * was configured. Due to Intel platforms limitation, shall be called only
3750 * for first hop switch.
3751 */
3752int tb_switch_pcie_l1_enable(struct tb_switch *sw)
3753{
3754	struct tb_switch *parent = tb_switch_parent(sw);
3755	int ret;
3756
3757	if (!tb_route(sw))
3758		return 0;
3759
3760	if (!tb_switch_is_titan_ridge(sw))
3761		return 0;
3762
3763	/* Enable PCIe L1 enable only for first hop router (depth = 1) */
3764	if (tb_route(parent))
3765		return 0;
3766
3767	/* Write to downstream PCIe bridge #5 aka Dn4 */
3768	ret = tb_switch_pcie_bridge_write(sw, 5, 0x143, 0x0c7806b1);
3769	if (ret)
3770		return ret;
3771
3772	/* Write to Upstream PCIe bridge #0 aka Up0 */
3773	return tb_switch_pcie_bridge_write(sw, 0, 0x143, 0x0c5806b1);
3774}
3775
3776/**
3777 * tb_switch_xhci_connect() - Connect internal xHCI
3778 * @sw: Router whose xHCI to connect
3779 *
3780 * Can be called to any router. For Alpine Ridge and Titan Ridge
3781 * performs special flows that bring the xHCI functional for any device
3782 * connected to the type-C port. Call only after PCIe tunnel has been
3783 * established. The function only does the connect if not done already
3784 * so can be called several times for the same router.
3785 */
3786int tb_switch_xhci_connect(struct tb_switch *sw)
3787{
3788	struct tb_port *port1, *port3;
3789	int ret;
3790
3791	if (sw->generation != 3)
3792		return 0;
3793
3794	port1 = &sw->ports[1];
3795	port3 = &sw->ports[3];
3796
3797	if (tb_switch_is_alpine_ridge(sw)) {
3798		bool usb_port1, usb_port3, xhci_port1, xhci_port3;
3799
3800		usb_port1 = tb_lc_is_usb_plugged(port1);
3801		usb_port3 = tb_lc_is_usb_plugged(port3);
3802		xhci_port1 = tb_lc_is_xhci_connected(port1);
3803		xhci_port3 = tb_lc_is_xhci_connected(port3);
3804
3805		/* Figure out correct USB port to connect */
3806		if (usb_port1 && !xhci_port1) {
3807			ret = tb_lc_xhci_connect(port1);
3808			if (ret)
3809				return ret;
3810		}
3811		if (usb_port3 && !xhci_port3)
3812			return tb_lc_xhci_connect(port3);
3813	} else if (tb_switch_is_titan_ridge(sw)) {
3814		ret = tb_lc_xhci_connect(port1);
3815		if (ret)
3816			return ret;
3817		return tb_lc_xhci_connect(port3);
3818	}
3819
3820	return 0;
3821}
3822
3823/**
3824 * tb_switch_xhci_disconnect() - Disconnect internal xHCI
3825 * @sw: Router whose xHCI to disconnect
3826 *
3827 * The opposite of tb_switch_xhci_connect(). Disconnects xHCI on both
3828 * ports.
3829 */
3830void tb_switch_xhci_disconnect(struct tb_switch *sw)
3831{
3832	if (sw->generation == 3) {
3833		struct tb_port *port1 = &sw->ports[1];
3834		struct tb_port *port3 = &sw->ports[3];
3835
3836		tb_lc_xhci_disconnect(port1);
3837		tb_port_dbg(port1, "disconnected xHCI\n");
3838		tb_lc_xhci_disconnect(port3);
3839		tb_port_dbg(port3, "disconnected xHCI\n");
3840	}
3841}