Linux Audio

Check our new training course

Loading...
Note: File does not exist in v3.1.
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 * Thunderbolt Cactus Ridge driver - switch/port utility functions
   4 *
   5 * Copyright (c) 2014 Andreas Noever <andreas.noever@gmail.com>
   6 */
   7
   8#include <linux/delay.h>
   9#include <linux/idr.h>
  10#include <linux/nvmem-provider.h>
  11#include <linux/sizes.h>
  12#include <linux/slab.h>
  13#include <linux/vmalloc.h>
  14
  15#include "tb.h"
  16
  17/* Switch authorization from userspace is serialized by this lock */
  18static DEFINE_MUTEX(switch_lock);
  19
  20/* Switch NVM support */
  21
  22#define NVM_DEVID		0x05
  23#define NVM_VERSION		0x08
  24#define NVM_CSS			0x10
  25#define NVM_FLASH_SIZE		0x45
  26
  27#define NVM_MIN_SIZE		SZ_32K
  28#define NVM_MAX_SIZE		SZ_512K
  29
  30static DEFINE_IDA(nvm_ida);
  31
  32struct nvm_auth_status {
  33	struct list_head list;
  34	uuid_t uuid;
  35	u32 status;
  36};
  37
  38/*
  39 * Hold NVM authentication failure status per switch This information
  40 * needs to stay around even when the switch gets power cycled so we
  41 * keep it separately.
  42 */
  43static LIST_HEAD(nvm_auth_status_cache);
  44static DEFINE_MUTEX(nvm_auth_status_lock);
  45
  46static struct nvm_auth_status *__nvm_get_auth_status(const struct tb_switch *sw)
  47{
  48	struct nvm_auth_status *st;
  49
  50	list_for_each_entry(st, &nvm_auth_status_cache, list) {
  51		if (uuid_equal(&st->uuid, sw->uuid))
  52			return st;
  53	}
  54
  55	return NULL;
  56}
  57
  58static void nvm_get_auth_status(const struct tb_switch *sw, u32 *status)
  59{
  60	struct nvm_auth_status *st;
  61
  62	mutex_lock(&nvm_auth_status_lock);
  63	st = __nvm_get_auth_status(sw);
  64	mutex_unlock(&nvm_auth_status_lock);
  65
  66	*status = st ? st->status : 0;
  67}
  68
  69static void nvm_set_auth_status(const struct tb_switch *sw, u32 status)
  70{
  71	struct nvm_auth_status *st;
  72
  73	if (WARN_ON(!sw->uuid))
  74		return;
  75
  76	mutex_lock(&nvm_auth_status_lock);
  77	st = __nvm_get_auth_status(sw);
  78
  79	if (!st) {
  80		st = kzalloc(sizeof(*st), GFP_KERNEL);
  81		if (!st)
  82			goto unlock;
  83
  84		memcpy(&st->uuid, sw->uuid, sizeof(st->uuid));
  85		INIT_LIST_HEAD(&st->list);
  86		list_add_tail(&st->list, &nvm_auth_status_cache);
  87	}
  88
  89	st->status = status;
  90unlock:
  91	mutex_unlock(&nvm_auth_status_lock);
  92}
  93
  94static void nvm_clear_auth_status(const struct tb_switch *sw)
  95{
  96	struct nvm_auth_status *st;
  97
  98	mutex_lock(&nvm_auth_status_lock);
  99	st = __nvm_get_auth_status(sw);
 100	if (st) {
 101		list_del(&st->list);
 102		kfree(st);
 103	}
 104	mutex_unlock(&nvm_auth_status_lock);
 105}
 106
 107static int nvm_validate_and_write(struct tb_switch *sw)
 108{
 109	unsigned int image_size, hdr_size;
 110	const u8 *buf = sw->nvm->buf;
 111	u16 ds_size;
 112	int ret;
 113
 114	if (!buf)
 115		return -EINVAL;
 116
 117	image_size = sw->nvm->buf_data_size;
 118	if (image_size < NVM_MIN_SIZE || image_size > NVM_MAX_SIZE)
 119		return -EINVAL;
 120
 121	/*
 122	 * FARB pointer must point inside the image and must at least
 123	 * contain parts of the digital section we will be reading here.
 124	 */
 125	hdr_size = (*(u32 *)buf) & 0xffffff;
 126	if (hdr_size + NVM_DEVID + 2 >= image_size)
 127		return -EINVAL;
 128
 129	/* Digital section start should be aligned to 4k page */
 130	if (!IS_ALIGNED(hdr_size, SZ_4K))
 131		return -EINVAL;
 132
 133	/*
 134	 * Read digital section size and check that it also fits inside
 135	 * the image.
 136	 */
 137	ds_size = *(u16 *)(buf + hdr_size);
 138	if (ds_size >= image_size)
 139		return -EINVAL;
 140
 141	if (!sw->safe_mode) {
 142		u16 device_id;
 143
 144		/*
 145		 * Make sure the device ID in the image matches the one
 146		 * we read from the switch config space.
 147		 */
 148		device_id = *(u16 *)(buf + hdr_size + NVM_DEVID);
 149		if (device_id != sw->config.device_id)
 150			return -EINVAL;
 151
 152		if (sw->generation < 3) {
 153			/* Write CSS headers first */
 154			ret = dma_port_flash_write(sw->dma_port,
 155				DMA_PORT_CSS_ADDRESS, buf + NVM_CSS,
 156				DMA_PORT_CSS_MAX_SIZE);
 157			if (ret)
 158				return ret;
 159		}
 160
 161		/* Skip headers in the image */
 162		buf += hdr_size;
 163		image_size -= hdr_size;
 164	}
 165
 166	return dma_port_flash_write(sw->dma_port, 0, buf, image_size);
 167}
 168
 169static int nvm_authenticate_host(struct tb_switch *sw)
 170{
 171	int ret;
 172
 173	/*
 174	 * Root switch NVM upgrade requires that we disconnect the
 175	 * existing paths first (in case it is not in safe mode
 176	 * already).
 177	 */
 178	if (!sw->safe_mode) {
 179		ret = tb_domain_disconnect_all_paths(sw->tb);
 180		if (ret)
 181			return ret;
 182		/*
 183		 * The host controller goes away pretty soon after this if
 184		 * everything goes well so getting timeout is expected.
 185		 */
 186		ret = dma_port_flash_update_auth(sw->dma_port);
 187		return ret == -ETIMEDOUT ? 0 : ret;
 188	}
 189
 190	/*
 191	 * From safe mode we can get out by just power cycling the
 192	 * switch.
 193	 */
 194	dma_port_power_cycle(sw->dma_port);
 195	return 0;
 196}
 197
 198static int nvm_authenticate_device(struct tb_switch *sw)
 199{
 200	int ret, retries = 10;
 201
 202	ret = dma_port_flash_update_auth(sw->dma_port);
 203	if (ret && ret != -ETIMEDOUT)
 204		return ret;
 205
 206	/*
 207	 * Poll here for the authentication status. It takes some time
 208	 * for the device to respond (we get timeout for a while). Once
 209	 * we get response the device needs to be power cycled in order
 210	 * to the new NVM to be taken into use.
 211	 */
 212	do {
 213		u32 status;
 214
 215		ret = dma_port_flash_update_auth_status(sw->dma_port, &status);
 216		if (ret < 0 && ret != -ETIMEDOUT)
 217			return ret;
 218		if (ret > 0) {
 219			if (status) {
 220				tb_sw_warn(sw, "failed to authenticate NVM\n");
 221				nvm_set_auth_status(sw, status);
 222			}
 223
 224			tb_sw_info(sw, "power cycling the switch now\n");
 225			dma_port_power_cycle(sw->dma_port);
 226			return 0;
 227		}
 228
 229		msleep(500);
 230	} while (--retries);
 231
 232	return -ETIMEDOUT;
 233}
 234
 235static int tb_switch_nvm_read(void *priv, unsigned int offset, void *val,
 236			      size_t bytes)
 237{
 238	struct tb_switch *sw = priv;
 239
 240	return dma_port_flash_read(sw->dma_port, offset, val, bytes);
 241}
 242
 243static int tb_switch_nvm_write(void *priv, unsigned int offset, void *val,
 244			       size_t bytes)
 245{
 246	struct tb_switch *sw = priv;
 247	int ret = 0;
 248
 249	if (mutex_lock_interruptible(&switch_lock))
 250		return -ERESTARTSYS;
 251
 252	/*
 253	 * Since writing the NVM image might require some special steps,
 254	 * for example when CSS headers are written, we cache the image
 255	 * locally here and handle the special cases when the user asks
 256	 * us to authenticate the image.
 257	 */
 258	if (!sw->nvm->buf) {
 259		sw->nvm->buf = vmalloc(NVM_MAX_SIZE);
 260		if (!sw->nvm->buf) {
 261			ret = -ENOMEM;
 262			goto unlock;
 263		}
 264	}
 265
 266	sw->nvm->buf_data_size = offset + bytes;
 267	memcpy(sw->nvm->buf + offset, val, bytes);
 268
 269unlock:
 270	mutex_unlock(&switch_lock);
 271
 272	return ret;
 273}
 274
 275static struct nvmem_device *register_nvmem(struct tb_switch *sw, int id,
 276					   size_t size, bool active)
 277{
 278	struct nvmem_config config;
 279
 280	memset(&config, 0, sizeof(config));
 281
 282	if (active) {
 283		config.name = "nvm_active";
 284		config.reg_read = tb_switch_nvm_read;
 285		config.read_only = true;
 286	} else {
 287		config.name = "nvm_non_active";
 288		config.reg_write = tb_switch_nvm_write;
 289		config.root_only = true;
 290	}
 291
 292	config.id = id;
 293	config.stride = 4;
 294	config.word_size = 4;
 295	config.size = size;
 296	config.dev = &sw->dev;
 297	config.owner = THIS_MODULE;
 298	config.priv = sw;
 299
 300	return nvmem_register(&config);
 301}
 302
 303static int tb_switch_nvm_add(struct tb_switch *sw)
 304{
 305	struct nvmem_device *nvm_dev;
 306	struct tb_switch_nvm *nvm;
 307	u32 val;
 308	int ret;
 309
 310	if (!sw->dma_port)
 311		return 0;
 312
 313	nvm = kzalloc(sizeof(*nvm), GFP_KERNEL);
 314	if (!nvm)
 315		return -ENOMEM;
 316
 317	nvm->id = ida_simple_get(&nvm_ida, 0, 0, GFP_KERNEL);
 318
 319	/*
 320	 * If the switch is in safe-mode the only accessible portion of
 321	 * the NVM is the non-active one where userspace is expected to
 322	 * write new functional NVM.
 323	 */
 324	if (!sw->safe_mode) {
 325		u32 nvm_size, hdr_size;
 326
 327		ret = dma_port_flash_read(sw->dma_port, NVM_FLASH_SIZE, &val,
 328					  sizeof(val));
 329		if (ret)
 330			goto err_ida;
 331
 332		hdr_size = sw->generation < 3 ? SZ_8K : SZ_16K;
 333		nvm_size = (SZ_1M << (val & 7)) / 8;
 334		nvm_size = (nvm_size - hdr_size) / 2;
 335
 336		ret = dma_port_flash_read(sw->dma_port, NVM_VERSION, &val,
 337					  sizeof(val));
 338		if (ret)
 339			goto err_ida;
 340
 341		nvm->major = val >> 16;
 342		nvm->minor = val >> 8;
 343
 344		nvm_dev = register_nvmem(sw, nvm->id, nvm_size, true);
 345		if (IS_ERR(nvm_dev)) {
 346			ret = PTR_ERR(nvm_dev);
 347			goto err_ida;
 348		}
 349		nvm->active = nvm_dev;
 350	}
 351
 352	nvm_dev = register_nvmem(sw, nvm->id, NVM_MAX_SIZE, false);
 353	if (IS_ERR(nvm_dev)) {
 354		ret = PTR_ERR(nvm_dev);
 355		goto err_nvm_active;
 356	}
 357	nvm->non_active = nvm_dev;
 358
 359	mutex_lock(&switch_lock);
 360	sw->nvm = nvm;
 361	mutex_unlock(&switch_lock);
 362
 363	return 0;
 364
 365err_nvm_active:
 366	if (nvm->active)
 367		nvmem_unregister(nvm->active);
 368err_ida:
 369	ida_simple_remove(&nvm_ida, nvm->id);
 370	kfree(nvm);
 371
 372	return ret;
 373}
 374
 375static void tb_switch_nvm_remove(struct tb_switch *sw)
 376{
 377	struct tb_switch_nvm *nvm;
 378
 379	mutex_lock(&switch_lock);
 380	nvm = sw->nvm;
 381	sw->nvm = NULL;
 382	mutex_unlock(&switch_lock);
 383
 384	if (!nvm)
 385		return;
 386
 387	/* Remove authentication status in case the switch is unplugged */
 388	if (!nvm->authenticating)
 389		nvm_clear_auth_status(sw);
 390
 391	nvmem_unregister(nvm->non_active);
 392	if (nvm->active)
 393		nvmem_unregister(nvm->active);
 394	ida_simple_remove(&nvm_ida, nvm->id);
 395	vfree(nvm->buf);
 396	kfree(nvm);
 397}
 398
 399/* port utility functions */
 400
 401static const char *tb_port_type(struct tb_regs_port_header *port)
 402{
 403	switch (port->type >> 16) {
 404	case 0:
 405		switch ((u8) port->type) {
 406		case 0:
 407			return "Inactive";
 408		case 1:
 409			return "Port";
 410		case 2:
 411			return "NHI";
 412		default:
 413			return "unknown";
 414		}
 415	case 0x2:
 416		return "Ethernet";
 417	case 0x8:
 418		return "SATA";
 419	case 0xe:
 420		return "DP/HDMI";
 421	case 0x10:
 422		return "PCIe";
 423	case 0x20:
 424		return "USB";
 425	default:
 426		return "unknown";
 427	}
 428}
 429
 430static void tb_dump_port(struct tb *tb, struct tb_regs_port_header *port)
 431{
 432	tb_info(tb,
 433		" Port %d: %x:%x (Revision: %d, TB Version: %d, Type: %s (%#x))\n",
 434		port->port_number, port->vendor_id, port->device_id,
 435		port->revision, port->thunderbolt_version, tb_port_type(port),
 436		port->type);
 437	tb_info(tb, "  Max hop id (in/out): %d/%d\n",
 438		port->max_in_hop_id, port->max_out_hop_id);
 439	tb_info(tb, "  Max counters: %d\n", port->max_counters);
 440	tb_info(tb, "  NFC Credits: %#x\n", port->nfc_credits);
 441}
 442
 443/**
 444 * tb_port_state() - get connectedness state of a port
 445 *
 446 * The port must have a TB_CAP_PHY (i.e. it should be a real port).
 447 *
 448 * Return: Returns an enum tb_port_state on success or an error code on failure.
 449 */
 450static int tb_port_state(struct tb_port *port)
 451{
 452	struct tb_cap_phy phy;
 453	int res;
 454	if (port->cap_phy == 0) {
 455		tb_port_WARN(port, "does not have a PHY\n");
 456		return -EINVAL;
 457	}
 458	res = tb_port_read(port, &phy, TB_CFG_PORT, port->cap_phy, 2);
 459	if (res)
 460		return res;
 461	return phy.state;
 462}
 463
 464/**
 465 * tb_wait_for_port() - wait for a port to become ready
 466 *
 467 * Wait up to 1 second for a port to reach state TB_PORT_UP. If
 468 * wait_if_unplugged is set then we also wait if the port is in state
 469 * TB_PORT_UNPLUGGED (it takes a while for the device to be registered after
 470 * switch resume). Otherwise we only wait if a device is registered but the link
 471 * has not yet been established.
 472 *
 473 * Return: Returns an error code on failure. Returns 0 if the port is not
 474 * connected or failed to reach state TB_PORT_UP within one second. Returns 1
 475 * if the port is connected and in state TB_PORT_UP.
 476 */
 477int tb_wait_for_port(struct tb_port *port, bool wait_if_unplugged)
 478{
 479	int retries = 10;
 480	int state;
 481	if (!port->cap_phy) {
 482		tb_port_WARN(port, "does not have PHY\n");
 483		return -EINVAL;
 484	}
 485	if (tb_is_upstream_port(port)) {
 486		tb_port_WARN(port, "is the upstream port\n");
 487		return -EINVAL;
 488	}
 489
 490	while (retries--) {
 491		state = tb_port_state(port);
 492		if (state < 0)
 493			return state;
 494		if (state == TB_PORT_DISABLED) {
 495			tb_port_info(port, "is disabled (state: 0)\n");
 496			return 0;
 497		}
 498		if (state == TB_PORT_UNPLUGGED) {
 499			if (wait_if_unplugged) {
 500				/* used during resume */
 501				tb_port_info(port,
 502					     "is unplugged (state: 7), retrying...\n");
 503				msleep(100);
 504				continue;
 505			}
 506			tb_port_info(port, "is unplugged (state: 7)\n");
 507			return 0;
 508		}
 509		if (state == TB_PORT_UP) {
 510			tb_port_info(port,
 511				     "is connected, link is up (state: 2)\n");
 512			return 1;
 513		}
 514
 515		/*
 516		 * After plug-in the state is TB_PORT_CONNECTING. Give it some
 517		 * time.
 518		 */
 519		tb_port_info(port,
 520			     "is connected, link is not up (state: %d), retrying...\n",
 521			     state);
 522		msleep(100);
 523	}
 524	tb_port_warn(port,
 525		     "failed to reach state TB_PORT_UP. Ignoring port...\n");
 526	return 0;
 527}
 528
 529/**
 530 * tb_port_add_nfc_credits() - add/remove non flow controlled credits to port
 531 *
 532 * Change the number of NFC credits allocated to @port by @credits. To remove
 533 * NFC credits pass a negative amount of credits.
 534 *
 535 * Return: Returns 0 on success or an error code on failure.
 536 */
 537int tb_port_add_nfc_credits(struct tb_port *port, int credits)
 538{
 539	if (credits == 0)
 540		return 0;
 541	tb_port_info(port,
 542		     "adding %#x NFC credits (%#x -> %#x)",
 543		     credits,
 544		     port->config.nfc_credits,
 545		     port->config.nfc_credits + credits);
 546	port->config.nfc_credits += credits;
 547	return tb_port_write(port, &port->config.nfc_credits,
 548			     TB_CFG_PORT, 4, 1);
 549}
 550
 551/**
 552 * tb_port_clear_counter() - clear a counter in TB_CFG_COUNTER
 553 *
 554 * Return: Returns 0 on success or an error code on failure.
 555 */
 556int tb_port_clear_counter(struct tb_port *port, int counter)
 557{
 558	u32 zero[3] = { 0, 0, 0 };
 559	tb_port_info(port, "clearing counter %d\n", counter);
 560	return tb_port_write(port, zero, TB_CFG_COUNTERS, 3 * counter, 3);
 561}
 562
 563/**
 564 * tb_init_port() - initialize a port
 565 *
 566 * This is a helper method for tb_switch_alloc. Does not check or initialize
 567 * any downstream switches.
 568 *
 569 * Return: Returns 0 on success or an error code on failure.
 570 */
 571static int tb_init_port(struct tb_port *port)
 572{
 573	int res;
 574	int cap;
 575
 576	res = tb_port_read(port, &port->config, TB_CFG_PORT, 0, 8);
 577	if (res)
 578		return res;
 579
 580	/* Port 0 is the switch itself and has no PHY. */
 581	if (port->config.type == TB_TYPE_PORT && port->port != 0) {
 582		cap = tb_port_find_cap(port, TB_PORT_CAP_PHY);
 583
 584		if (cap > 0)
 585			port->cap_phy = cap;
 586		else
 587			tb_port_WARN(port, "non switch port without a PHY\n");
 588	}
 589
 590	tb_dump_port(port->sw->tb, &port->config);
 591
 592	/* TODO: Read dual link port, DP port and more from EEPROM. */
 593	return 0;
 594
 595}
 596
 597/* switch utility functions */
 598
 599static void tb_dump_switch(struct tb *tb, struct tb_regs_switch_header *sw)
 600{
 601	tb_info(tb,
 602		" Switch: %x:%x (Revision: %d, TB Version: %d)\n",
 603		sw->vendor_id, sw->device_id, sw->revision,
 604		sw->thunderbolt_version);
 605	tb_info(tb, "  Max Port Number: %d\n", sw->max_port_number);
 606	tb_info(tb, "  Config:\n");
 607	tb_info(tb,
 608		"   Upstream Port Number: %d Depth: %d Route String: %#llx Enabled: %d, PlugEventsDelay: %dms\n",
 609		sw->upstream_port_number, sw->depth,
 610		(((u64) sw->route_hi) << 32) | sw->route_lo,
 611		sw->enabled, sw->plug_events_delay);
 612	tb_info(tb,
 613		"   unknown1: %#x unknown4: %#x\n",
 614		sw->__unknown1, sw->__unknown4);
 615}
 616
 617/**
 618 * reset_switch() - reconfigure route, enable and send TB_CFG_PKG_RESET
 619 *
 620 * Return: Returns 0 on success or an error code on failure.
 621 */
 622int tb_switch_reset(struct tb *tb, u64 route)
 623{
 624	struct tb_cfg_result res;
 625	struct tb_regs_switch_header header = {
 626		header.route_hi = route >> 32,
 627		header.route_lo = route,
 628		header.enabled = true,
 629	};
 630	tb_info(tb, "resetting switch at %llx\n", route);
 631	res.err = tb_cfg_write(tb->ctl, ((u32 *) &header) + 2, route,
 632			0, 2, 2, 2);
 633	if (res.err)
 634		return res.err;
 635	res = tb_cfg_reset(tb->ctl, route, TB_CFG_DEFAULT_TIMEOUT);
 636	if (res.err > 0)
 637		return -EIO;
 638	return res.err;
 639}
 640
 641struct tb_switch *get_switch_at_route(struct tb_switch *sw, u64 route)
 642{
 643	u8 next_port = route; /*
 644			       * Routes use a stride of 8 bits,
 645			       * eventhough a port index has 6 bits at most.
 646			       * */
 647	if (route == 0)
 648		return sw;
 649	if (next_port > sw->config.max_port_number)
 650		return NULL;
 651	if (tb_is_upstream_port(&sw->ports[next_port]))
 652		return NULL;
 653	if (!sw->ports[next_port].remote)
 654		return NULL;
 655	return get_switch_at_route(sw->ports[next_port].remote->sw,
 656				   route >> TB_ROUTE_SHIFT);
 657}
 658
 659/**
 660 * tb_plug_events_active() - enable/disable plug events on a switch
 661 *
 662 * Also configures a sane plug_events_delay of 255ms.
 663 *
 664 * Return: Returns 0 on success or an error code on failure.
 665 */
 666static int tb_plug_events_active(struct tb_switch *sw, bool active)
 667{
 668	u32 data;
 669	int res;
 670
 671	if (!sw->config.enabled)
 672		return 0;
 673
 674	sw->config.plug_events_delay = 0xff;
 675	res = tb_sw_write(sw, ((u32 *) &sw->config) + 4, TB_CFG_SWITCH, 4, 1);
 676	if (res)
 677		return res;
 678
 679	res = tb_sw_read(sw, &data, TB_CFG_SWITCH, sw->cap_plug_events + 1, 1);
 680	if (res)
 681		return res;
 682
 683	if (active) {
 684		data = data & 0xFFFFFF83;
 685		switch (sw->config.device_id) {
 686		case PCI_DEVICE_ID_INTEL_LIGHT_RIDGE:
 687		case PCI_DEVICE_ID_INTEL_EAGLE_RIDGE:
 688		case PCI_DEVICE_ID_INTEL_PORT_RIDGE:
 689			break;
 690		default:
 691			data |= 4;
 692		}
 693	} else {
 694		data = data | 0x7c;
 695	}
 696	return tb_sw_write(sw, &data, TB_CFG_SWITCH,
 697			   sw->cap_plug_events + 1, 1);
 698}
 699
 700static ssize_t authorized_show(struct device *dev,
 701			       struct device_attribute *attr,
 702			       char *buf)
 703{
 704	struct tb_switch *sw = tb_to_switch(dev);
 705
 706	return sprintf(buf, "%u\n", sw->authorized);
 707}
 708
 709static int tb_switch_set_authorized(struct tb_switch *sw, unsigned int val)
 710{
 711	int ret = -EINVAL;
 712
 713	if (mutex_lock_interruptible(&switch_lock))
 714		return -ERESTARTSYS;
 715
 716	if (sw->authorized)
 717		goto unlock;
 718
 719	/*
 720	 * Make sure there is no PCIe rescan ongoing when a new PCIe
 721	 * tunnel is created. Otherwise the PCIe rescan code might find
 722	 * the new tunnel too early.
 723	 */
 724	pci_lock_rescan_remove();
 725
 726	switch (val) {
 727	/* Approve switch */
 728	case 1:
 729		if (sw->key)
 730			ret = tb_domain_approve_switch_key(sw->tb, sw);
 731		else
 732			ret = tb_domain_approve_switch(sw->tb, sw);
 733		break;
 734
 735	/* Challenge switch */
 736	case 2:
 737		if (sw->key)
 738			ret = tb_domain_challenge_switch_key(sw->tb, sw);
 739		break;
 740
 741	default:
 742		break;
 743	}
 744
 745	pci_unlock_rescan_remove();
 746
 747	if (!ret) {
 748		sw->authorized = val;
 749		/* Notify status change to the userspace */
 750		kobject_uevent(&sw->dev.kobj, KOBJ_CHANGE);
 751	}
 752
 753unlock:
 754	mutex_unlock(&switch_lock);
 755	return ret;
 756}
 757
 758static ssize_t authorized_store(struct device *dev,
 759				struct device_attribute *attr,
 760				const char *buf, size_t count)
 761{
 762	struct tb_switch *sw = tb_to_switch(dev);
 763	unsigned int val;
 764	ssize_t ret;
 765
 766	ret = kstrtouint(buf, 0, &val);
 767	if (ret)
 768		return ret;
 769	if (val > 2)
 770		return -EINVAL;
 771
 772	ret = tb_switch_set_authorized(sw, val);
 773
 774	return ret ? ret : count;
 775}
 776static DEVICE_ATTR_RW(authorized);
 777
 778static ssize_t boot_show(struct device *dev, struct device_attribute *attr,
 779			 char *buf)
 780{
 781	struct tb_switch *sw = tb_to_switch(dev);
 782
 783	return sprintf(buf, "%u\n", sw->boot);
 784}
 785static DEVICE_ATTR_RO(boot);
 786
 787static ssize_t device_show(struct device *dev, struct device_attribute *attr,
 788			   char *buf)
 789{
 790	struct tb_switch *sw = tb_to_switch(dev);
 791
 792	return sprintf(buf, "%#x\n", sw->device);
 793}
 794static DEVICE_ATTR_RO(device);
 795
 796static ssize_t
 797device_name_show(struct device *dev, struct device_attribute *attr, char *buf)
 798{
 799	struct tb_switch *sw = tb_to_switch(dev);
 800
 801	return sprintf(buf, "%s\n", sw->device_name ? sw->device_name : "");
 802}
 803static DEVICE_ATTR_RO(device_name);
 804
 805static ssize_t key_show(struct device *dev, struct device_attribute *attr,
 806			char *buf)
 807{
 808	struct tb_switch *sw = tb_to_switch(dev);
 809	ssize_t ret;
 810
 811	if (mutex_lock_interruptible(&switch_lock))
 812		return -ERESTARTSYS;
 813
 814	if (sw->key)
 815		ret = sprintf(buf, "%*phN\n", TB_SWITCH_KEY_SIZE, sw->key);
 816	else
 817		ret = sprintf(buf, "\n");
 818
 819	mutex_unlock(&switch_lock);
 820	return ret;
 821}
 822
 823static ssize_t key_store(struct device *dev, struct device_attribute *attr,
 824			 const char *buf, size_t count)
 825{
 826	struct tb_switch *sw = tb_to_switch(dev);
 827	u8 key[TB_SWITCH_KEY_SIZE];
 828	ssize_t ret = count;
 829	bool clear = false;
 830
 831	if (!strcmp(buf, "\n"))
 832		clear = true;
 833	else if (hex2bin(key, buf, sizeof(key)))
 834		return -EINVAL;
 835
 836	if (mutex_lock_interruptible(&switch_lock))
 837		return -ERESTARTSYS;
 838
 839	if (sw->authorized) {
 840		ret = -EBUSY;
 841	} else {
 842		kfree(sw->key);
 843		if (clear) {
 844			sw->key = NULL;
 845		} else {
 846			sw->key = kmemdup(key, sizeof(key), GFP_KERNEL);
 847			if (!sw->key)
 848				ret = -ENOMEM;
 849		}
 850	}
 851
 852	mutex_unlock(&switch_lock);
 853	return ret;
 854}
 855static DEVICE_ATTR(key, 0600, key_show, key_store);
 856
 857static ssize_t nvm_authenticate_show(struct device *dev,
 858	struct device_attribute *attr, char *buf)
 859{
 860	struct tb_switch *sw = tb_to_switch(dev);
 861	u32 status;
 862
 863	nvm_get_auth_status(sw, &status);
 864	return sprintf(buf, "%#x\n", status);
 865}
 866
 867static ssize_t nvm_authenticate_store(struct device *dev,
 868	struct device_attribute *attr, const char *buf, size_t count)
 869{
 870	struct tb_switch *sw = tb_to_switch(dev);
 871	bool val;
 872	int ret;
 873
 874	if (mutex_lock_interruptible(&switch_lock))
 875		return -ERESTARTSYS;
 876
 877	/* If NVMem devices are not yet added */
 878	if (!sw->nvm) {
 879		ret = -EAGAIN;
 880		goto exit_unlock;
 881	}
 882
 883	ret = kstrtobool(buf, &val);
 884	if (ret)
 885		goto exit_unlock;
 886
 887	/* Always clear the authentication status */
 888	nvm_clear_auth_status(sw);
 889
 890	if (val) {
 891		ret = nvm_validate_and_write(sw);
 892		if (ret)
 893			goto exit_unlock;
 894
 895		sw->nvm->authenticating = true;
 896
 897		if (!tb_route(sw))
 898			ret = nvm_authenticate_host(sw);
 899		else
 900			ret = nvm_authenticate_device(sw);
 901	}
 902
 903exit_unlock:
 904	mutex_unlock(&switch_lock);
 905
 906	if (ret)
 907		return ret;
 908	return count;
 909}
 910static DEVICE_ATTR_RW(nvm_authenticate);
 911
 912static ssize_t nvm_version_show(struct device *dev,
 913				struct device_attribute *attr, char *buf)
 914{
 915	struct tb_switch *sw = tb_to_switch(dev);
 916	int ret;
 917
 918	if (mutex_lock_interruptible(&switch_lock))
 919		return -ERESTARTSYS;
 920
 921	if (sw->safe_mode)
 922		ret = -ENODATA;
 923	else if (!sw->nvm)
 924		ret = -EAGAIN;
 925	else
 926		ret = sprintf(buf, "%x.%x\n", sw->nvm->major, sw->nvm->minor);
 927
 928	mutex_unlock(&switch_lock);
 929
 930	return ret;
 931}
 932static DEVICE_ATTR_RO(nvm_version);
 933
 934static ssize_t vendor_show(struct device *dev, struct device_attribute *attr,
 935			   char *buf)
 936{
 937	struct tb_switch *sw = tb_to_switch(dev);
 938
 939	return sprintf(buf, "%#x\n", sw->vendor);
 940}
 941static DEVICE_ATTR_RO(vendor);
 942
 943static ssize_t
 944vendor_name_show(struct device *dev, struct device_attribute *attr, char *buf)
 945{
 946	struct tb_switch *sw = tb_to_switch(dev);
 947
 948	return sprintf(buf, "%s\n", sw->vendor_name ? sw->vendor_name : "");
 949}
 950static DEVICE_ATTR_RO(vendor_name);
 951
 952static ssize_t unique_id_show(struct device *dev, struct device_attribute *attr,
 953			      char *buf)
 954{
 955	struct tb_switch *sw = tb_to_switch(dev);
 956
 957	return sprintf(buf, "%pUb\n", sw->uuid);
 958}
 959static DEVICE_ATTR_RO(unique_id);
 960
 961static struct attribute *switch_attrs[] = {
 962	&dev_attr_authorized.attr,
 963	&dev_attr_boot.attr,
 964	&dev_attr_device.attr,
 965	&dev_attr_device_name.attr,
 966	&dev_attr_key.attr,
 967	&dev_attr_nvm_authenticate.attr,
 968	&dev_attr_nvm_version.attr,
 969	&dev_attr_vendor.attr,
 970	&dev_attr_vendor_name.attr,
 971	&dev_attr_unique_id.attr,
 972	NULL,
 973};
 974
 975static umode_t switch_attr_is_visible(struct kobject *kobj,
 976				      struct attribute *attr, int n)
 977{
 978	struct device *dev = container_of(kobj, struct device, kobj);
 979	struct tb_switch *sw = tb_to_switch(dev);
 980
 981	if (attr == &dev_attr_key.attr) {
 982		if (tb_route(sw) &&
 983		    sw->tb->security_level == TB_SECURITY_SECURE &&
 984		    sw->security_level == TB_SECURITY_SECURE)
 985			return attr->mode;
 986		return 0;
 987	} else if (attr == &dev_attr_nvm_authenticate.attr ||
 988		   attr == &dev_attr_nvm_version.attr) {
 989		if (sw->dma_port)
 990			return attr->mode;
 991		return 0;
 992	} else if (attr == &dev_attr_boot.attr) {
 993		if (tb_route(sw))
 994			return attr->mode;
 995		return 0;
 996	}
 997
 998	return sw->safe_mode ? 0 : attr->mode;
 999}
1000
1001static struct attribute_group switch_group = {
1002	.is_visible = switch_attr_is_visible,
1003	.attrs = switch_attrs,
1004};
1005
1006static const struct attribute_group *switch_groups[] = {
1007	&switch_group,
1008	NULL,
1009};
1010
1011static void tb_switch_release(struct device *dev)
1012{
1013	struct tb_switch *sw = tb_to_switch(dev);
1014
1015	dma_port_free(sw->dma_port);
1016
1017	kfree(sw->uuid);
1018	kfree(sw->device_name);
1019	kfree(sw->vendor_name);
1020	kfree(sw->ports);
1021	kfree(sw->drom);
1022	kfree(sw->key);
1023	kfree(sw);
1024}
1025
1026struct device_type tb_switch_type = {
1027	.name = "thunderbolt_device",
1028	.release = tb_switch_release,
1029};
1030
1031static int tb_switch_get_generation(struct tb_switch *sw)
1032{
1033	switch (sw->config.device_id) {
1034	case PCI_DEVICE_ID_INTEL_LIGHT_RIDGE:
1035	case PCI_DEVICE_ID_INTEL_EAGLE_RIDGE:
1036	case PCI_DEVICE_ID_INTEL_LIGHT_PEAK:
1037	case PCI_DEVICE_ID_INTEL_CACTUS_RIDGE_2C:
1038	case PCI_DEVICE_ID_INTEL_CACTUS_RIDGE_4C:
1039	case PCI_DEVICE_ID_INTEL_PORT_RIDGE:
1040	case PCI_DEVICE_ID_INTEL_REDWOOD_RIDGE_2C_BRIDGE:
1041	case PCI_DEVICE_ID_INTEL_REDWOOD_RIDGE_4C_BRIDGE:
1042		return 1;
1043
1044	case PCI_DEVICE_ID_INTEL_WIN_RIDGE_2C_BRIDGE:
1045	case PCI_DEVICE_ID_INTEL_FALCON_RIDGE_2C_BRIDGE:
1046	case PCI_DEVICE_ID_INTEL_FALCON_RIDGE_4C_BRIDGE:
1047		return 2;
1048
1049	case PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_LP_BRIDGE:
1050	case PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_2C_BRIDGE:
1051	case PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_4C_BRIDGE:
1052	case PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_C_2C_BRIDGE:
1053	case PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_C_4C_BRIDGE:
1054	case PCI_DEVICE_ID_INTEL_TITAN_RIDGE_2C_BRIDGE:
1055	case PCI_DEVICE_ID_INTEL_TITAN_RIDGE_4C_BRIDGE:
1056	case PCI_DEVICE_ID_INTEL_TITAN_RIDGE_DD_BRIDGE:
1057		return 3;
1058
1059	default:
1060		/*
1061		 * For unknown switches assume generation to be 1 to be
1062		 * on the safe side.
1063		 */
1064		tb_sw_warn(sw, "unsupported switch device id %#x\n",
1065			   sw->config.device_id);
1066		return 1;
1067	}
1068}
1069
1070/**
1071 * tb_switch_alloc() - allocate a switch
1072 * @tb: Pointer to the owning domain
1073 * @parent: Parent device for this switch
1074 * @route: Route string for this switch
1075 *
1076 * Allocates and initializes a switch. Will not upload configuration to
1077 * the switch. For that you need to call tb_switch_configure()
1078 * separately. The returned switch should be released by calling
1079 * tb_switch_put().
1080 *
1081 * Return: Pointer to the allocated switch or %NULL in case of failure
1082 */
1083struct tb_switch *tb_switch_alloc(struct tb *tb, struct device *parent,
1084				  u64 route)
1085{
1086	int i;
1087	int cap;
1088	struct tb_switch *sw;
1089	int upstream_port = tb_cfg_get_upstream_port(tb->ctl, route);
1090	if (upstream_port < 0)
1091		return NULL;
1092
1093	sw = kzalloc(sizeof(*sw), GFP_KERNEL);
1094	if (!sw)
1095		return NULL;
1096
1097	sw->tb = tb;
1098	if (tb_cfg_read(tb->ctl, &sw->config, route, 0, TB_CFG_SWITCH, 0, 5))
1099		goto err_free_sw_ports;
1100
1101	tb_info(tb, "current switch config:\n");
1102	tb_dump_switch(tb, &sw->config);
1103
1104	/* configure switch */
1105	sw->config.upstream_port_number = upstream_port;
1106	sw->config.depth = tb_route_length(route);
1107	sw->config.route_lo = route;
1108	sw->config.route_hi = route >> 32;
1109	sw->config.enabled = 0;
1110
1111	/* initialize ports */
1112	sw->ports = kcalloc(sw->config.max_port_number + 1, sizeof(*sw->ports),
1113				GFP_KERNEL);
1114	if (!sw->ports)
1115		goto err_free_sw_ports;
1116
1117	for (i = 0; i <= sw->config.max_port_number; i++) {
1118		/* minimum setup for tb_find_cap and tb_drom_read to work */
1119		sw->ports[i].sw = sw;
1120		sw->ports[i].port = i;
1121	}
1122
1123	sw->generation = tb_switch_get_generation(sw);
1124
1125	cap = tb_switch_find_vse_cap(sw, TB_VSE_CAP_PLUG_EVENTS);
1126	if (cap < 0) {
1127		tb_sw_warn(sw, "cannot find TB_VSE_CAP_PLUG_EVENTS aborting\n");
1128		goto err_free_sw_ports;
1129	}
1130	sw->cap_plug_events = cap;
1131
1132	/* Root switch is always authorized */
1133	if (!route)
1134		sw->authorized = true;
1135
1136	device_initialize(&sw->dev);
1137	sw->dev.parent = parent;
1138	sw->dev.bus = &tb_bus_type;
1139	sw->dev.type = &tb_switch_type;
1140	sw->dev.groups = switch_groups;
1141	dev_set_name(&sw->dev, "%u-%llx", tb->index, tb_route(sw));
1142
1143	return sw;
1144
1145err_free_sw_ports:
1146	kfree(sw->ports);
1147	kfree(sw);
1148
1149	return NULL;
1150}
1151
1152/**
1153 * tb_switch_alloc_safe_mode() - allocate a switch that is in safe mode
1154 * @tb: Pointer to the owning domain
1155 * @parent: Parent device for this switch
1156 * @route: Route string for this switch
1157 *
1158 * This creates a switch in safe mode. This means the switch pretty much
1159 * lacks all capabilities except DMA configuration port before it is
1160 * flashed with a valid NVM firmware.
1161 *
1162 * The returned switch must be released by calling tb_switch_put().
1163 *
1164 * Return: Pointer to the allocated switch or %NULL in case of failure
1165 */
1166struct tb_switch *
1167tb_switch_alloc_safe_mode(struct tb *tb, struct device *parent, u64 route)
1168{
1169	struct tb_switch *sw;
1170
1171	sw = kzalloc(sizeof(*sw), GFP_KERNEL);
1172	if (!sw)
1173		return NULL;
1174
1175	sw->tb = tb;
1176	sw->config.depth = tb_route_length(route);
1177	sw->config.route_hi = upper_32_bits(route);
1178	sw->config.route_lo = lower_32_bits(route);
1179	sw->safe_mode = true;
1180
1181	device_initialize(&sw->dev);
1182	sw->dev.parent = parent;
1183	sw->dev.bus = &tb_bus_type;
1184	sw->dev.type = &tb_switch_type;
1185	sw->dev.groups = switch_groups;
1186	dev_set_name(&sw->dev, "%u-%llx", tb->index, tb_route(sw));
1187
1188	return sw;
1189}
1190
1191/**
1192 * tb_switch_configure() - Uploads configuration to the switch
1193 * @sw: Switch to configure
1194 *
1195 * Call this function before the switch is added to the system. It will
1196 * upload configuration to the switch and makes it available for the
1197 * connection manager to use.
1198 *
1199 * Return: %0 in case of success and negative errno in case of failure
1200 */
1201int tb_switch_configure(struct tb_switch *sw)
1202{
1203	struct tb *tb = sw->tb;
1204	u64 route;
1205	int ret;
1206
1207	route = tb_route(sw);
1208	tb_info(tb,
1209		"initializing Switch at %#llx (depth: %d, up port: %d)\n",
1210		route, tb_route_length(route), sw->config.upstream_port_number);
1211
1212	if (sw->config.vendor_id != PCI_VENDOR_ID_INTEL)
1213		tb_sw_warn(sw, "unknown switch vendor id %#x\n",
1214			   sw->config.vendor_id);
1215
1216	sw->config.enabled = 1;
1217
1218	/* upload configuration */
1219	ret = tb_sw_write(sw, 1 + (u32 *)&sw->config, TB_CFG_SWITCH, 1, 3);
1220	if (ret)
1221		return ret;
1222
1223	return tb_plug_events_active(sw, true);
1224}
1225
1226static void tb_switch_set_uuid(struct tb_switch *sw)
1227{
1228	u32 uuid[4];
1229	int cap;
1230
1231	if (sw->uuid)
1232		return;
1233
1234	/*
1235	 * The newer controllers include fused UUID as part of link
1236	 * controller specific registers
1237	 */
1238	cap = tb_switch_find_vse_cap(sw, TB_VSE_CAP_LINK_CONTROLLER);
1239	if (cap > 0) {
1240		tb_sw_read(sw, uuid, TB_CFG_SWITCH, cap + 3, 4);
1241	} else {
1242		/*
1243		 * ICM generates UUID based on UID and fills the upper
1244		 * two words with ones. This is not strictly following
1245		 * UUID format but we want to be compatible with it so
1246		 * we do the same here.
1247		 */
1248		uuid[0] = sw->uid & 0xffffffff;
1249		uuid[1] = (sw->uid >> 32) & 0xffffffff;
1250		uuid[2] = 0xffffffff;
1251		uuid[3] = 0xffffffff;
1252	}
1253
1254	sw->uuid = kmemdup(uuid, sizeof(uuid), GFP_KERNEL);
1255}
1256
1257static int tb_switch_add_dma_port(struct tb_switch *sw)
1258{
1259	u32 status;
1260	int ret;
1261
1262	switch (sw->generation) {
1263	case 3:
1264		break;
1265
1266	case 2:
1267		/* Only root switch can be upgraded */
1268		if (tb_route(sw))
1269			return 0;
1270		break;
1271
1272	default:
1273		/*
1274		 * DMA port is the only thing available when the switch
1275		 * is in safe mode.
1276		 */
1277		if (!sw->safe_mode)
1278			return 0;
1279		break;
1280	}
1281
1282	if (sw->no_nvm_upgrade)
1283		return 0;
1284
1285	sw->dma_port = dma_port_alloc(sw);
1286	if (!sw->dma_port)
1287		return 0;
1288
1289	/*
1290	 * Check status of the previous flash authentication. If there
1291	 * is one we need to power cycle the switch in any case to make
1292	 * it functional again.
1293	 */
1294	ret = dma_port_flash_update_auth_status(sw->dma_port, &status);
1295	if (ret <= 0)
1296		return ret;
1297
1298	if (status) {
1299		tb_sw_info(sw, "switch flash authentication failed\n");
1300		tb_switch_set_uuid(sw);
1301		nvm_set_auth_status(sw, status);
1302	}
1303
1304	tb_sw_info(sw, "power cycling the switch now\n");
1305	dma_port_power_cycle(sw->dma_port);
1306
1307	/*
1308	 * We return error here which causes the switch adding failure.
1309	 * It should appear back after power cycle is complete.
1310	 */
1311	return -ESHUTDOWN;
1312}
1313
1314/**
1315 * tb_switch_add() - Add a switch to the domain
1316 * @sw: Switch to add
1317 *
1318 * This is the last step in adding switch to the domain. It will read
1319 * identification information from DROM and initializes ports so that
1320 * they can be used to connect other switches. The switch will be
1321 * exposed to the userspace when this function successfully returns. To
1322 * remove and release the switch, call tb_switch_remove().
1323 *
1324 * Return: %0 in case of success and negative errno in case of failure
1325 */
1326int tb_switch_add(struct tb_switch *sw)
1327{
1328	int i, ret;
1329
1330	/*
1331	 * Initialize DMA control port now before we read DROM. Recent
1332	 * host controllers have more complete DROM on NVM that includes
1333	 * vendor and model identification strings which we then expose
1334	 * to the userspace. NVM can be accessed through DMA
1335	 * configuration based mailbox.
1336	 */
1337	ret = tb_switch_add_dma_port(sw);
1338	if (ret)
1339		return ret;
1340
1341	if (!sw->safe_mode) {
1342		/* read drom */
1343		ret = tb_drom_read(sw);
1344		if (ret) {
1345			tb_sw_warn(sw, "tb_eeprom_read_rom failed\n");
1346			return ret;
1347		}
1348		tb_sw_info(sw, "uid: %#llx\n", sw->uid);
1349
1350		tb_switch_set_uuid(sw);
1351
1352		for (i = 0; i <= sw->config.max_port_number; i++) {
1353			if (sw->ports[i].disabled) {
1354				tb_port_info(&sw->ports[i], "disabled by eeprom\n");
1355				continue;
1356			}
1357			ret = tb_init_port(&sw->ports[i]);
1358			if (ret)
1359				return ret;
1360		}
1361	}
1362
1363	ret = device_add(&sw->dev);
1364	if (ret)
1365		return ret;
1366
1367	ret = tb_switch_nvm_add(sw);
1368	if (ret)
1369		device_del(&sw->dev);
1370
1371	return ret;
1372}
1373
1374/**
1375 * tb_switch_remove() - Remove and release a switch
1376 * @sw: Switch to remove
1377 *
1378 * This will remove the switch from the domain and release it after last
1379 * reference count drops to zero. If there are switches connected below
1380 * this switch, they will be removed as well.
1381 */
1382void tb_switch_remove(struct tb_switch *sw)
1383{
1384	int i;
1385
1386	/* port 0 is the switch itself and never has a remote */
1387	for (i = 1; i <= sw->config.max_port_number; i++) {
1388		if (tb_is_upstream_port(&sw->ports[i]))
1389			continue;
1390		if (sw->ports[i].remote)
1391			tb_switch_remove(sw->ports[i].remote->sw);
1392		sw->ports[i].remote = NULL;
1393		if (sw->ports[i].xdomain)
1394			tb_xdomain_remove(sw->ports[i].xdomain);
1395		sw->ports[i].xdomain = NULL;
1396	}
1397
1398	if (!sw->is_unplugged)
1399		tb_plug_events_active(sw, false);
1400
1401	tb_switch_nvm_remove(sw);
1402	device_unregister(&sw->dev);
1403}
1404
1405/**
1406 * tb_sw_set_unplugged() - set is_unplugged on switch and downstream switches
1407 */
1408void tb_sw_set_unplugged(struct tb_switch *sw)
1409{
1410	int i;
1411	if (sw == sw->tb->root_switch) {
1412		tb_sw_WARN(sw, "cannot unplug root switch\n");
1413		return;
1414	}
1415	if (sw->is_unplugged) {
1416		tb_sw_WARN(sw, "is_unplugged already set\n");
1417		return;
1418	}
1419	sw->is_unplugged = true;
1420	for (i = 0; i <= sw->config.max_port_number; i++) {
1421		if (!tb_is_upstream_port(&sw->ports[i]) && sw->ports[i].remote)
1422			tb_sw_set_unplugged(sw->ports[i].remote->sw);
1423	}
1424}
1425
1426int tb_switch_resume(struct tb_switch *sw)
1427{
1428	int i, err;
1429	tb_sw_info(sw, "resuming switch\n");
1430
1431	/*
1432	 * Check for UID of the connected switches except for root
1433	 * switch which we assume cannot be removed.
1434	 */
1435	if (tb_route(sw)) {
1436		u64 uid;
1437
1438		err = tb_drom_read_uid_only(sw, &uid);
1439		if (err) {
1440			tb_sw_warn(sw, "uid read failed\n");
1441			return err;
1442		}
1443		if (sw->uid != uid) {
1444			tb_sw_info(sw,
1445				"changed while suspended (uid %#llx -> %#llx)\n",
1446				sw->uid, uid);
1447			return -ENODEV;
1448		}
1449	}
1450
1451	/* upload configuration */
1452	err = tb_sw_write(sw, 1 + (u32 *) &sw->config, TB_CFG_SWITCH, 1, 3);
1453	if (err)
1454		return err;
1455
1456	err = tb_plug_events_active(sw, true);
1457	if (err)
1458		return err;
1459
1460	/* check for surviving downstream switches */
1461	for (i = 1; i <= sw->config.max_port_number; i++) {
1462		struct tb_port *port = &sw->ports[i];
1463		if (tb_is_upstream_port(port))
1464			continue;
1465		if (!port->remote)
1466			continue;
1467		if (tb_wait_for_port(port, true) <= 0
1468			|| tb_switch_resume(port->remote->sw)) {
1469			tb_port_warn(port,
1470				     "lost during suspend, disconnecting\n");
1471			tb_sw_set_unplugged(port->remote->sw);
1472		}
1473	}
1474	return 0;
1475}
1476
1477void tb_switch_suspend(struct tb_switch *sw)
1478{
1479	int i, err;
1480	err = tb_plug_events_active(sw, false);
1481	if (err)
1482		return;
1483
1484	for (i = 1; i <= sw->config.max_port_number; i++) {
1485		if (!tb_is_upstream_port(&sw->ports[i]) && sw->ports[i].remote)
1486			tb_switch_suspend(sw->ports[i].remote->sw);
1487	}
1488	/*
1489	 * TODO: invoke tb_cfg_prepare_to_sleep here? does not seem to have any
1490	 * effect?
1491	 */
1492}
1493
1494struct tb_sw_lookup {
1495	struct tb *tb;
1496	u8 link;
1497	u8 depth;
1498	const uuid_t *uuid;
1499	u64 route;
1500};
1501
1502static int tb_switch_match(struct device *dev, void *data)
1503{
1504	struct tb_switch *sw = tb_to_switch(dev);
1505	struct tb_sw_lookup *lookup = data;
1506
1507	if (!sw)
1508		return 0;
1509	if (sw->tb != lookup->tb)
1510		return 0;
1511
1512	if (lookup->uuid)
1513		return !memcmp(sw->uuid, lookup->uuid, sizeof(*lookup->uuid));
1514
1515	if (lookup->route) {
1516		return sw->config.route_lo == lower_32_bits(lookup->route) &&
1517		       sw->config.route_hi == upper_32_bits(lookup->route);
1518	}
1519
1520	/* Root switch is matched only by depth */
1521	if (!lookup->depth)
1522		return !sw->depth;
1523
1524	return sw->link == lookup->link && sw->depth == lookup->depth;
1525}
1526
1527/**
1528 * tb_switch_find_by_link_depth() - Find switch by link and depth
1529 * @tb: Domain the switch belongs
1530 * @link: Link number the switch is connected
1531 * @depth: Depth of the switch in link
1532 *
1533 * Returned switch has reference count increased so the caller needs to
1534 * call tb_switch_put() when done with the switch.
1535 */
1536struct tb_switch *tb_switch_find_by_link_depth(struct tb *tb, u8 link, u8 depth)
1537{
1538	struct tb_sw_lookup lookup;
1539	struct device *dev;
1540
1541	memset(&lookup, 0, sizeof(lookup));
1542	lookup.tb = tb;
1543	lookup.link = link;
1544	lookup.depth = depth;
1545
1546	dev = bus_find_device(&tb_bus_type, NULL, &lookup, tb_switch_match);
1547	if (dev)
1548		return tb_to_switch(dev);
1549
1550	return NULL;
1551}
1552
1553/**
1554 * tb_switch_find_by_uuid() - Find switch by UUID
1555 * @tb: Domain the switch belongs
1556 * @uuid: UUID to look for
1557 *
1558 * Returned switch has reference count increased so the caller needs to
1559 * call tb_switch_put() when done with the switch.
1560 */
1561struct tb_switch *tb_switch_find_by_uuid(struct tb *tb, const uuid_t *uuid)
1562{
1563	struct tb_sw_lookup lookup;
1564	struct device *dev;
1565
1566	memset(&lookup, 0, sizeof(lookup));
1567	lookup.tb = tb;
1568	lookup.uuid = uuid;
1569
1570	dev = bus_find_device(&tb_bus_type, NULL, &lookup, tb_switch_match);
1571	if (dev)
1572		return tb_to_switch(dev);
1573
1574	return NULL;
1575}
1576
1577/**
1578 * tb_switch_find_by_route() - Find switch by route string
1579 * @tb: Domain the switch belongs
1580 * @route: Route string to look for
1581 *
1582 * Returned switch has reference count increased so the caller needs to
1583 * call tb_switch_put() when done with the switch.
1584 */
1585struct tb_switch *tb_switch_find_by_route(struct tb *tb, u64 route)
1586{
1587	struct tb_sw_lookup lookup;
1588	struct device *dev;
1589
1590	if (!route)
1591		return tb_switch_get(tb->root_switch);
1592
1593	memset(&lookup, 0, sizeof(lookup));
1594	lookup.tb = tb;
1595	lookup.route = route;
1596
1597	dev = bus_find_device(&tb_bus_type, NULL, &lookup, tb_switch_match);
1598	if (dev)
1599		return tb_to_switch(dev);
1600
1601	return NULL;
1602}
1603
1604void tb_switch_exit(void)
1605{
1606	ida_destroy(&nvm_ida);
1607}