Linux Audio

Check our new training course

Linux BSP upgrade and security maintenance

Need help to get security updates for your Linux BSP?
Loading...
v6.8
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 * Internal Thunderbolt Connection Manager. This is a firmware running on
   4 * the Thunderbolt host controller performing most of the low-level
   5 * handling.
   6 *
   7 * Copyright (C) 2017, Intel Corporation
   8 * Authors: Michael Jamet <michael.jamet@intel.com>
   9 *          Mika Westerberg <mika.westerberg@linux.intel.com>
 
 
 
 
  10 */
  11
  12#include <linux/delay.h>
  13#include <linux/mutex.h>
  14#include <linux/moduleparam.h>
  15#include <linux/pci.h>
  16#include <linux/pm_runtime.h>
  17#include <linux/platform_data/x86/apple.h>
  18#include <linux/sizes.h>
  19#include <linux/slab.h>
  20#include <linux/workqueue.h>
  21
  22#include "ctl.h"
  23#include "nhi_regs.h"
  24#include "tb.h"
  25
  26#define PCIE2CIO_CMD			0x30
  27#define PCIE2CIO_CMD_TIMEOUT		BIT(31)
  28#define PCIE2CIO_CMD_START		BIT(30)
  29#define PCIE2CIO_CMD_WRITE		BIT(21)
  30#define PCIE2CIO_CMD_CS_MASK		GENMASK(20, 19)
  31#define PCIE2CIO_CMD_CS_SHIFT		19
  32#define PCIE2CIO_CMD_PORT_MASK		GENMASK(18, 13)
  33#define PCIE2CIO_CMD_PORT_SHIFT		13
  34
  35#define PCIE2CIO_WRDATA			0x34
  36#define PCIE2CIO_RDDATA			0x38
  37
  38#define PHY_PORT_CS1			0x37
  39#define PHY_PORT_CS1_LINK_DISABLE	BIT(14)
  40#define PHY_PORT_CS1_LINK_STATE_MASK	GENMASK(29, 26)
  41#define PHY_PORT_CS1_LINK_STATE_SHIFT	26
  42
  43#define ICM_TIMEOUT			5000	/* ms */
  44#define ICM_RETRIES			3
  45#define ICM_APPROVE_TIMEOUT		10000	/* ms */
  46#define ICM_MAX_LINK			4
  47
  48static bool start_icm;
  49module_param(start_icm, bool, 0444);
  50MODULE_PARM_DESC(start_icm, "start ICM firmware if it is not running (default: false)");
  51
  52/**
  53 * struct usb4_switch_nvm_auth - Holds USB4 NVM_AUTH status
  54 * @reply: Reply from ICM firmware is placed here
  55 * @request: Request that is sent to ICM firmware
  56 * @icm: Pointer to ICM private data
  57 */
  58struct usb4_switch_nvm_auth {
  59	struct icm_usb4_switch_op_response reply;
  60	struct icm_usb4_switch_op request;
  61	struct icm *icm;
  62};
  63
  64/**
  65 * struct icm - Internal connection manager private data
  66 * @request_lock: Makes sure only one message is send to ICM at time
  67 * @rescan_work: Work used to rescan the surviving switches after resume
  68 * @upstream_port: Pointer to the PCIe upstream port this host
  69 *		   controller is connected. This is only set for systems
  70 *		   where ICM needs to be started manually
  71 * @vnd_cap: Vendor defined capability where PCIe2CIO mailbox resides
  72 *	     (only set when @upstream_port is not %NULL)
  73 * @safe_mode: ICM is in safe mode
  74 * @max_boot_acl: Maximum number of preboot ACL entries (%0 if not supported)
  75 * @rpm: Does the controller support runtime PM (RTD3)
  76 * @can_upgrade_nvm: Can the NVM firmware be upgrade on this controller
  77 * @proto_version: Firmware protocol version
  78 * @last_nvm_auth: Last USB4 router NVM_AUTH result (or %NULL if not set)
  79 * @veto: Is RTD3 veto in effect
  80 * @is_supported: Checks if we can support ICM on this controller
  81 * @cio_reset: Trigger CIO reset
  82 * @get_mode: Read and return the ICM firmware mode (optional)
  83 * @get_route: Find a route string for given switch
  84 * @save_devices: Ask ICM to save devices to ACL when suspending (optional)
  85 * @driver_ready: Send driver ready message to ICM
  86 * @set_uuid: Set UUID for the root switch (optional)
  87 * @device_connected: Handle device connected ICM message
  88 * @device_disconnected: Handle device disconnected ICM message
  89 * @xdomain_connected: Handle XDomain connected ICM message
  90 * @xdomain_disconnected: Handle XDomain disconnected ICM message
  91 * @rtd3_veto: Handle RTD3 veto notification ICM message
  92 */
  93struct icm {
  94	struct mutex request_lock;
  95	struct delayed_work rescan_work;
  96	struct pci_dev *upstream_port;
 
  97	int vnd_cap;
  98	bool safe_mode;
  99	size_t max_boot_acl;
 100	bool rpm;
 101	bool can_upgrade_nvm;
 102	u8 proto_version;
 103	struct usb4_switch_nvm_auth *last_nvm_auth;
 104	bool veto;
 105	bool (*is_supported)(struct tb *tb);
 106	int (*cio_reset)(struct tb *tb);
 107	int (*get_mode)(struct tb *tb);
 108	int (*get_route)(struct tb *tb, u8 link, u8 depth, u64 *route);
 109	void (*save_devices)(struct tb *tb);
 110	int (*driver_ready)(struct tb *tb,
 111			    enum tb_security_level *security_level,
 112			    u8 *proto_version, size_t *nboot_acl, bool *rpm);
 113	void (*set_uuid)(struct tb *tb);
 114	void (*device_connected)(struct tb *tb,
 115				 const struct icm_pkg_header *hdr);
 116	void (*device_disconnected)(struct tb *tb,
 117				    const struct icm_pkg_header *hdr);
 118	void (*xdomain_connected)(struct tb *tb,
 119				  const struct icm_pkg_header *hdr);
 120	void (*xdomain_disconnected)(struct tb *tb,
 121				     const struct icm_pkg_header *hdr);
 122	void (*rtd3_veto)(struct tb *tb, const struct icm_pkg_header *hdr);
 123};
 124
 125struct icm_notification {
 126	struct work_struct work;
 127	struct icm_pkg_header *pkg;
 128	struct tb *tb;
 129};
 130
 131struct ep_name_entry {
 132	u8 len;
 133	u8 type;
 134	u8 data[];
 135};
 136
 137#define EP_NAME_INTEL_VSS	0x10
 138
 139/* Intel Vendor specific structure */
 140struct intel_vss {
 141	u16 vendor;
 142	u16 model;
 143	u8 mc;
 144	u8 flags;
 145	u16 pci_devid;
 146	u32 nvm_version;
 147};
 148
 149#define INTEL_VSS_FLAGS_RTD3	BIT(0)
 150
 151static const struct intel_vss *parse_intel_vss(const void *ep_name, size_t size)
 152{
 153	const void *end = ep_name + size;
 154
 155	while (ep_name < end) {
 156		const struct ep_name_entry *ep = ep_name;
 157
 158		if (!ep->len)
 159			break;
 160		if (ep_name + ep->len > end)
 161			break;
 162
 163		if (ep->type == EP_NAME_INTEL_VSS)
 164			return (const struct intel_vss *)ep->data;
 165
 166		ep_name += ep->len;
 167	}
 168
 169	return NULL;
 170}
 171
 172static bool intel_vss_is_rtd3(const void *ep_name, size_t size)
 173{
 174	const struct intel_vss *vss;
 175
 176	vss = parse_intel_vss(ep_name, size);
 177	if (vss)
 178		return !!(vss->flags & INTEL_VSS_FLAGS_RTD3);
 179
 180	return false;
 181}
 182
 183static inline struct tb *icm_to_tb(struct icm *icm)
 184{
 185	return ((void *)icm - sizeof(struct tb));
 186}
 187
 188static inline u8 phy_port_from_route(u64 route, u8 depth)
 189{
 190	u8 link;
 191
 192	link = depth ? route >> ((depth - 1) * 8) : route;
 193	return tb_phy_port_from_link(link);
 194}
 195
 196static inline u8 dual_link_from_link(u8 link)
 197{
 198	return link ? ((link - 1) ^ 0x01) + 1 : 0;
 199}
 200
 201static inline u64 get_route(u32 route_hi, u32 route_lo)
 202{
 203	return (u64)route_hi << 32 | route_lo;
 204}
 205
 206static inline u64 get_parent_route(u64 route)
 207{
 208	int depth = tb_route_length(route);
 209	return depth ? route & ~(0xffULL << (depth - 1) * TB_ROUTE_SHIFT) : 0;
 210}
 211
 212static int pci2cio_wait_completion(struct icm *icm, unsigned long timeout_msec)
 213{
 214	unsigned long end = jiffies + msecs_to_jiffies(timeout_msec);
 215	u32 cmd;
 216
 217	do {
 218		pci_read_config_dword(icm->upstream_port,
 219				      icm->vnd_cap + PCIE2CIO_CMD, &cmd);
 220		if (!(cmd & PCIE2CIO_CMD_START)) {
 221			if (cmd & PCIE2CIO_CMD_TIMEOUT)
 222				break;
 223			return 0;
 224		}
 225
 226		msleep(50);
 227	} while (time_before(jiffies, end));
 228
 229	return -ETIMEDOUT;
 230}
 231
 232static int pcie2cio_read(struct icm *icm, enum tb_cfg_space cs,
 233			 unsigned int port, unsigned int index, u32 *data)
 234{
 235	struct pci_dev *pdev = icm->upstream_port;
 236	int ret, vnd_cap = icm->vnd_cap;
 237	u32 cmd;
 238
 239	cmd = index;
 240	cmd |= (port << PCIE2CIO_CMD_PORT_SHIFT) & PCIE2CIO_CMD_PORT_MASK;
 241	cmd |= (cs << PCIE2CIO_CMD_CS_SHIFT) & PCIE2CIO_CMD_CS_MASK;
 242	cmd |= PCIE2CIO_CMD_START;
 243	pci_write_config_dword(pdev, vnd_cap + PCIE2CIO_CMD, cmd);
 244
 245	ret = pci2cio_wait_completion(icm, 5000);
 246	if (ret)
 247		return ret;
 248
 249	pci_read_config_dword(pdev, vnd_cap + PCIE2CIO_RDDATA, data);
 250	return 0;
 251}
 252
 253static int pcie2cio_write(struct icm *icm, enum tb_cfg_space cs,
 254			  unsigned int port, unsigned int index, u32 data)
 255{
 256	struct pci_dev *pdev = icm->upstream_port;
 257	int vnd_cap = icm->vnd_cap;
 258	u32 cmd;
 259
 260	pci_write_config_dword(pdev, vnd_cap + PCIE2CIO_WRDATA, data);
 261
 262	cmd = index;
 263	cmd |= (port << PCIE2CIO_CMD_PORT_SHIFT) & PCIE2CIO_CMD_PORT_MASK;
 264	cmd |= (cs << PCIE2CIO_CMD_CS_SHIFT) & PCIE2CIO_CMD_CS_MASK;
 265	cmd |= PCIE2CIO_CMD_WRITE | PCIE2CIO_CMD_START;
 266	pci_write_config_dword(pdev, vnd_cap + PCIE2CIO_CMD, cmd);
 267
 268	return pci2cio_wait_completion(icm, 5000);
 269}
 270
 271static bool icm_match(const struct tb_cfg_request *req,
 272		      const struct ctl_pkg *pkg)
 273{
 274	const struct icm_pkg_header *res_hdr = pkg->buffer;
 275	const struct icm_pkg_header *req_hdr = req->request;
 276
 277	if (pkg->frame.eof != req->response_type)
 278		return false;
 279	if (res_hdr->code != req_hdr->code)
 280		return false;
 281
 282	return true;
 283}
 284
 285static bool icm_copy(struct tb_cfg_request *req, const struct ctl_pkg *pkg)
 286{
 287	const struct icm_pkg_header *hdr = pkg->buffer;
 288
 289	if (hdr->packet_id < req->npackets) {
 290		size_t offset = hdr->packet_id * req->response_size;
 291
 292		memcpy(req->response + offset, pkg->buffer, req->response_size);
 293	}
 294
 295	return hdr->packet_id == hdr->total_packets - 1;
 296}
 297
 298static int icm_request(struct tb *tb, const void *request, size_t request_size,
 299		       void *response, size_t response_size, size_t npackets,
 300		       int retries, unsigned int timeout_msec)
 301{
 302	struct icm *icm = tb_priv(tb);
 
 303
 304	do {
 305		struct tb_cfg_request *req;
 306		struct tb_cfg_result res;
 307
 308		req = tb_cfg_request_alloc();
 309		if (!req)
 310			return -ENOMEM;
 311
 312		req->match = icm_match;
 313		req->copy = icm_copy;
 314		req->request = request;
 315		req->request_size = request_size;
 316		req->request_type = TB_CFG_PKG_ICM_CMD;
 317		req->response = response;
 318		req->npackets = npackets;
 319		req->response_size = response_size;
 320		req->response_type = TB_CFG_PKG_ICM_RESP;
 321
 322		mutex_lock(&icm->request_lock);
 323		res = tb_cfg_request_sync(tb->ctl, req, timeout_msec);
 324		mutex_unlock(&icm->request_lock);
 325
 326		tb_cfg_request_put(req);
 327
 328		if (res.err != -ETIMEDOUT)
 329			return res.err == 1 ? -EIO : res.err;
 330
 331		usleep_range(20, 50);
 332	} while (retries--);
 333
 334	return -ETIMEDOUT;
 335}
 336
 337/*
 338 * If rescan is queued to run (we are resuming), postpone it to give the
 339 * firmware some more time to send device connected notifications for next
 340 * devices in the chain.
 341 */
 342static void icm_postpone_rescan(struct tb *tb)
 343{
 344	struct icm *icm = tb_priv(tb);
 345
 346	if (delayed_work_pending(&icm->rescan_work))
 347		mod_delayed_work(tb->wq, &icm->rescan_work,
 348				 msecs_to_jiffies(500));
 349}
 350
 351static void icm_veto_begin(struct tb *tb)
 352{
 353	struct icm *icm = tb_priv(tb);
 354
 355	if (!icm->veto) {
 356		icm->veto = true;
 357		/* Keep the domain powered while veto is in effect */
 358		pm_runtime_get(&tb->dev);
 359	}
 360}
 361
 362static void icm_veto_end(struct tb *tb)
 363{
 364	struct icm *icm = tb_priv(tb);
 365
 366	if (icm->veto) {
 367		icm->veto = false;
 368		/* Allow the domain suspend now */
 369		pm_runtime_mark_last_busy(&tb->dev);
 370		pm_runtime_put_autosuspend(&tb->dev);
 371	}
 372}
 373
 374static bool icm_firmware_running(const struct tb_nhi *nhi)
 375{
 376	u32 val;
 377
 378	val = ioread32(nhi->iobase + REG_FW_STS);
 379	return !!(val & REG_FW_STS_ICM_EN);
 380}
 381
 382static bool icm_fr_is_supported(struct tb *tb)
 383{
 384	return !x86_apple_machine;
 385}
 386
 387static inline int icm_fr_get_switch_index(u32 port)
 388{
 389	int index;
 390
 391	if ((port & ICM_PORT_TYPE_MASK) != TB_TYPE_PORT)
 392		return 0;
 393
 394	index = port >> ICM_PORT_INDEX_SHIFT;
 395	return index != 0xff ? index : 0;
 396}
 397
 398static int icm_fr_get_route(struct tb *tb, u8 link, u8 depth, u64 *route)
 399{
 400	struct icm_fr_pkg_get_topology_response *switches, *sw;
 401	struct icm_fr_pkg_get_topology request = {
 402		.hdr = { .code = ICM_GET_TOPOLOGY },
 403	};
 404	size_t npackets = ICM_GET_TOPOLOGY_PACKETS;
 405	int ret, index;
 406	u8 i;
 407
 408	switches = kcalloc(npackets, sizeof(*switches), GFP_KERNEL);
 409	if (!switches)
 410		return -ENOMEM;
 411
 412	ret = icm_request(tb, &request, sizeof(request), switches,
 413			  sizeof(*switches), npackets, ICM_RETRIES, ICM_TIMEOUT);
 414	if (ret)
 415		goto err_free;
 416
 417	sw = &switches[0];
 418	index = icm_fr_get_switch_index(sw->ports[link]);
 419	if (!index) {
 420		ret = -ENODEV;
 421		goto err_free;
 422	}
 423
 424	sw = &switches[index];
 425	for (i = 1; i < depth; i++) {
 426		unsigned int j;
 427
 428		if (!(sw->first_data & ICM_SWITCH_USED)) {
 429			ret = -ENODEV;
 430			goto err_free;
 431		}
 432
 433		for (j = 0; j < ARRAY_SIZE(sw->ports); j++) {
 434			index = icm_fr_get_switch_index(sw->ports[j]);
 435			if (index > sw->switch_index) {
 436				sw = &switches[index];
 437				break;
 438			}
 439		}
 440	}
 441
 442	*route = get_route(sw->route_hi, sw->route_lo);
 443
 444err_free:
 445	kfree(switches);
 446	return ret;
 447}
 448
 449static void icm_fr_save_devices(struct tb *tb)
 450{
 451	nhi_mailbox_cmd(tb->nhi, NHI_MAILBOX_SAVE_DEVS, 0);
 452}
 453
 454static int
 455icm_fr_driver_ready(struct tb *tb, enum tb_security_level *security_level,
 456		    u8 *proto_version, size_t *nboot_acl, bool *rpm)
 457{
 458	struct icm_fr_pkg_driver_ready_response reply;
 459	struct icm_pkg_driver_ready request = {
 460		.hdr.code = ICM_DRIVER_READY,
 461	};
 462	int ret;
 463
 464	memset(&reply, 0, sizeof(reply));
 465	ret = icm_request(tb, &request, sizeof(request), &reply, sizeof(reply),
 466			  1, ICM_RETRIES, ICM_TIMEOUT);
 467	if (ret)
 468		return ret;
 469
 470	if (security_level)
 471		*security_level = reply.security_level & ICM_FR_SLEVEL_MASK;
 472
 473	return 0;
 474}
 475
 476static int icm_fr_approve_switch(struct tb *tb, struct tb_switch *sw)
 477{
 478	struct icm_fr_pkg_approve_device request;
 479	struct icm_fr_pkg_approve_device reply;
 480	int ret;
 481
 482	memset(&request, 0, sizeof(request));
 483	memcpy(&request.ep_uuid, sw->uuid, sizeof(request.ep_uuid));
 484	request.hdr.code = ICM_APPROVE_DEVICE;
 485	request.connection_id = sw->connection_id;
 486	request.connection_key = sw->connection_key;
 487
 488	memset(&reply, 0, sizeof(reply));
 489	/* Use larger timeout as establishing tunnels can take some time */
 490	ret = icm_request(tb, &request, sizeof(request), &reply, sizeof(reply),
 491			  1, ICM_RETRIES, ICM_APPROVE_TIMEOUT);
 492	if (ret)
 493		return ret;
 494
 495	if (reply.hdr.flags & ICM_FLAGS_ERROR) {
 496		tb_warn(tb, "PCIe tunnel creation failed\n");
 497		return -EIO;
 498	}
 499
 500	return 0;
 501}
 502
 503static int icm_fr_add_switch_key(struct tb *tb, struct tb_switch *sw)
 504{
 505	struct icm_fr_pkg_add_device_key request;
 506	struct icm_fr_pkg_add_device_key_response reply;
 507	int ret;
 508
 509	memset(&request, 0, sizeof(request));
 510	memcpy(&request.ep_uuid, sw->uuid, sizeof(request.ep_uuid));
 511	request.hdr.code = ICM_ADD_DEVICE_KEY;
 512	request.connection_id = sw->connection_id;
 513	request.connection_key = sw->connection_key;
 514	memcpy(request.key, sw->key, TB_SWITCH_KEY_SIZE);
 515
 516	memset(&reply, 0, sizeof(reply));
 517	ret = icm_request(tb, &request, sizeof(request), &reply, sizeof(reply),
 518			  1, ICM_RETRIES, ICM_TIMEOUT);
 519	if (ret)
 520		return ret;
 521
 522	if (reply.hdr.flags & ICM_FLAGS_ERROR) {
 523		tb_warn(tb, "Adding key to switch failed\n");
 524		return -EIO;
 525	}
 526
 527	return 0;
 528}
 529
 530static int icm_fr_challenge_switch_key(struct tb *tb, struct tb_switch *sw,
 531				       const u8 *challenge, u8 *response)
 532{
 533	struct icm_fr_pkg_challenge_device request;
 534	struct icm_fr_pkg_challenge_device_response reply;
 535	int ret;
 536
 537	memset(&request, 0, sizeof(request));
 538	memcpy(&request.ep_uuid, sw->uuid, sizeof(request.ep_uuid));
 539	request.hdr.code = ICM_CHALLENGE_DEVICE;
 540	request.connection_id = sw->connection_id;
 541	request.connection_key = sw->connection_key;
 542	memcpy(request.challenge, challenge, TB_SWITCH_KEY_SIZE);
 543
 544	memset(&reply, 0, sizeof(reply));
 545	ret = icm_request(tb, &request, sizeof(request), &reply, sizeof(reply),
 546			  1, ICM_RETRIES, ICM_TIMEOUT);
 547	if (ret)
 548		return ret;
 549
 550	if (reply.hdr.flags & ICM_FLAGS_ERROR)
 551		return -EKEYREJECTED;
 552	if (reply.hdr.flags & ICM_FLAGS_NO_KEY)
 553		return -ENOKEY;
 554
 555	memcpy(response, reply.response, TB_SWITCH_KEY_SIZE);
 556
 557	return 0;
 558}
 559
 560static int icm_fr_approve_xdomain_paths(struct tb *tb, struct tb_xdomain *xd,
 561					int transmit_path, int transmit_ring,
 562					int receive_path, int receive_ring)
 563{
 564	struct icm_fr_pkg_approve_xdomain_response reply;
 565	struct icm_fr_pkg_approve_xdomain request;
 566	int ret;
 567
 568	memset(&request, 0, sizeof(request));
 569	request.hdr.code = ICM_APPROVE_XDOMAIN;
 570	request.link_info = xd->depth << ICM_LINK_INFO_DEPTH_SHIFT | xd->link;
 571	memcpy(&request.remote_uuid, xd->remote_uuid, sizeof(*xd->remote_uuid));
 572
 573	request.transmit_path = transmit_path;
 574	request.transmit_ring = transmit_ring;
 575	request.receive_path = receive_path;
 576	request.receive_ring = receive_ring;
 577
 578	memset(&reply, 0, sizeof(reply));
 579	ret = icm_request(tb, &request, sizeof(request), &reply, sizeof(reply),
 580			  1, ICM_RETRIES, ICM_TIMEOUT);
 581	if (ret)
 582		return ret;
 583
 584	if (reply.hdr.flags & ICM_FLAGS_ERROR)
 585		return -EIO;
 586
 587	return 0;
 588}
 589
 590static int icm_fr_disconnect_xdomain_paths(struct tb *tb, struct tb_xdomain *xd,
 591					   int transmit_path, int transmit_ring,
 592					   int receive_path, int receive_ring)
 593{
 594	u8 phy_port;
 595	u8 cmd;
 596
 597	phy_port = tb_phy_port_from_link(xd->link);
 598	if (phy_port == 0)
 599		cmd = NHI_MAILBOX_DISCONNECT_PA;
 600	else
 601		cmd = NHI_MAILBOX_DISCONNECT_PB;
 602
 603	nhi_mailbox_cmd(tb->nhi, cmd, 1);
 604	usleep_range(10, 50);
 605	nhi_mailbox_cmd(tb->nhi, cmd, 2);
 606	return 0;
 607}
 608
 609static struct tb_switch *alloc_switch(struct tb_switch *parent_sw, u64 route,
 610				      const uuid_t *uuid)
 
 
 611{
 612	struct tb *tb = parent_sw->tb;
 613	struct tb_switch *sw;
 614
 615	sw = tb_switch_alloc(tb, &parent_sw->dev, route);
 616	if (IS_ERR(sw)) {
 617		tb_warn(tb, "failed to allocate switch at %llx\n", route);
 618		return sw;
 619	}
 620
 621	sw->uuid = kmemdup(uuid, sizeof(*uuid), GFP_KERNEL);
 622	if (!sw->uuid) {
 623		tb_switch_put(sw);
 624		return ERR_PTR(-ENOMEM);
 625	}
 626
 627	init_completion(&sw->rpm_complete);
 628	return sw;
 629}
 630
 631static int add_switch(struct tb_switch *parent_sw, struct tb_switch *sw)
 632{
 633	u64 route = tb_route(sw);
 634	int ret;
 635
 636	/* Link the two switches now */
 637	tb_port_at(route, parent_sw)->remote = tb_upstream_port(sw);
 638	tb_upstream_port(sw)->remote = tb_port_at(route, parent_sw);
 639
 640	ret = tb_switch_add(sw);
 641	if (ret)
 642		tb_port_at(tb_route(sw), parent_sw)->remote = NULL;
 643
 644	return ret;
 
 645}
 646
 647static void update_switch(struct tb_switch *sw, u64 route, u8 connection_id,
 648			  u8 connection_key, u8 link, u8 depth, bool boot)
 
 649{
 650	struct tb_switch *parent_sw = tb_switch_parent(sw);
 651
 652	/* Disconnect from parent */
 653	tb_switch_downstream_port(sw)->remote = NULL;
 654	/* Re-connect via updated port */
 655	tb_port_at(route, parent_sw)->remote = tb_upstream_port(sw);
 656
 657	/* Update with the new addressing information */
 658	sw->config.route_hi = upper_32_bits(route);
 659	sw->config.route_lo = lower_32_bits(route);
 660	sw->connection_id = connection_id;
 661	sw->connection_key = connection_key;
 662	sw->link = link;
 663	sw->depth = depth;
 664	sw->boot = boot;
 665
 666	/* This switch still exists */
 667	sw->is_unplugged = false;
 668
 669	/* Runtime resume is now complete */
 670	complete(&sw->rpm_complete);
 671}
 672
 673static void remove_switch(struct tb_switch *sw)
 674{
 675	tb_switch_downstream_port(sw)->remote = NULL;
 
 
 
 676	tb_switch_remove(sw);
 677}
 678
 679static void add_xdomain(struct tb_switch *sw, u64 route,
 680			const uuid_t *local_uuid, const uuid_t *remote_uuid,
 681			u8 link, u8 depth)
 682{
 683	struct tb_xdomain *xd;
 684
 685	pm_runtime_get_sync(&sw->dev);
 686
 687	xd = tb_xdomain_alloc(sw->tb, &sw->dev, route, local_uuid, remote_uuid);
 688	if (!xd)
 689		goto out;
 690
 691	xd->link = link;
 692	xd->depth = depth;
 693
 694	tb_port_at(route, sw)->xdomain = xd;
 695
 696	tb_xdomain_add(xd);
 697
 698out:
 699	pm_runtime_mark_last_busy(&sw->dev);
 700	pm_runtime_put_autosuspend(&sw->dev);
 701}
 702
 703static void update_xdomain(struct tb_xdomain *xd, u64 route, u8 link)
 704{
 705	xd->link = link;
 706	xd->route = route;
 707	xd->is_unplugged = false;
 708}
 709
 710static void remove_xdomain(struct tb_xdomain *xd)
 711{
 712	struct tb_switch *sw;
 713
 714	sw = tb_to_switch(xd->dev.parent);
 715	tb_port_at(xd->route, sw)->xdomain = NULL;
 716	tb_xdomain_remove(xd);
 717}
 718
 719static void
 720icm_fr_device_connected(struct tb *tb, const struct icm_pkg_header *hdr)
 721{
 722	const struct icm_fr_event_device_connected *pkg =
 723		(const struct icm_fr_event_device_connected *)hdr;
 724	enum tb_security_level security_level;
 725	struct tb_switch *sw, *parent_sw;
 726	bool boot, dual_lane, speed_gen3;
 727	struct icm *icm = tb_priv(tb);
 728	bool authorized = false;
 729	struct tb_xdomain *xd;
 730	u8 link, depth;
 
 731	u64 route;
 732	int ret;
 733
 734	icm_postpone_rescan(tb);
 735
 736	link = pkg->link_info & ICM_LINK_INFO_LINK_MASK;
 737	depth = (pkg->link_info & ICM_LINK_INFO_DEPTH_MASK) >>
 738		ICM_LINK_INFO_DEPTH_SHIFT;
 739	authorized = pkg->link_info & ICM_LINK_INFO_APPROVED;
 740	security_level = (pkg->hdr.flags & ICM_FLAGS_SLEVEL_MASK) >>
 741			 ICM_FLAGS_SLEVEL_SHIFT;
 742	boot = pkg->link_info & ICM_LINK_INFO_BOOT;
 743	dual_lane = pkg->hdr.flags & ICM_FLAGS_DUAL_LANE;
 744	speed_gen3 = pkg->hdr.flags & ICM_FLAGS_SPEED_GEN3;
 745
 746	if (pkg->link_info & ICM_LINK_INFO_REJECTED) {
 747		tb_info(tb, "switch at %u.%u was rejected by ICM firmware because topology limit exceeded\n",
 748			link, depth);
 749		return;
 750	}
 751
 
 
 
 
 
 
 
 752	sw = tb_switch_find_by_uuid(tb, &pkg->ep_uuid);
 753	if (sw) {
 754		u8 phy_port, sw_phy_port;
 755
 756		sw_phy_port = tb_phy_port_from_link(sw->link);
 757		phy_port = tb_phy_port_from_link(link);
 
 758
 759		/*
 760		 * On resume ICM will send us connected events for the
 761		 * devices that still are present. However, that
 762		 * information might have changed for example by the
 763		 * fact that a switch on a dual-link connection might
 764		 * have been enumerated using the other link now. Make
 765		 * sure our book keeping matches that.
 766		 */
 767		if (sw->depth == depth && sw_phy_port == phy_port &&
 768		    !!sw->authorized == authorized) {
 769			/*
 770			 * It was enumerated through another link so update
 771			 * route string accordingly.
 772			 */
 773			if (sw->link != link) {
 774				ret = icm->get_route(tb, link, depth, &route);
 775				if (ret) {
 776					tb_err(tb, "failed to update route string for switch at %u.%u\n",
 777					       link, depth);
 778					tb_switch_put(sw);
 779					return;
 780				}
 781			} else {
 782				route = tb_route(sw);
 783			}
 784
 785			update_switch(sw, route, pkg->connection_id,
 786				      pkg->connection_key, link, depth, boot);
 787			tb_switch_put(sw);
 788			return;
 789		}
 790
 791		/*
 792		 * User connected the same switch to another physical
 793		 * port or to another part of the topology. Remove the
 794		 * existing switch now before adding the new one.
 795		 */
 796		remove_switch(sw);
 797		tb_switch_put(sw);
 798	}
 799
 800	/*
 801	 * If the switch was not found by UUID, look for a switch on
 802	 * same physical port (taking possible link aggregation into
 803	 * account) and depth. If we found one it is definitely a stale
 804	 * one so remove it first.
 805	 */
 806	sw = tb_switch_find_by_link_depth(tb, link, depth);
 807	if (!sw) {
 808		u8 dual_link;
 809
 810		dual_link = dual_link_from_link(link);
 811		if (dual_link)
 812			sw = tb_switch_find_by_link_depth(tb, dual_link, depth);
 813	}
 814	if (sw) {
 815		remove_switch(sw);
 816		tb_switch_put(sw);
 817	}
 818
 819	/* Remove existing XDomain connection if found */
 820	xd = tb_xdomain_find_by_link_depth(tb, link, depth);
 821	if (xd) {
 822		remove_xdomain(xd);
 823		tb_xdomain_put(xd);
 824	}
 825
 826	parent_sw = tb_switch_find_by_link_depth(tb, link, depth - 1);
 827	if (!parent_sw) {
 828		tb_err(tb, "failed to find parent switch for %u.%u\n",
 829		       link, depth);
 830		return;
 831	}
 832
 833	ret = icm->get_route(tb, link, depth, &route);
 834	if (ret) {
 835		tb_err(tb, "failed to find route string for switch at %u.%u\n",
 836		       link, depth);
 837		tb_switch_put(parent_sw);
 838		return;
 839	}
 840
 841	pm_runtime_get_sync(&parent_sw->dev);
 842
 843	sw = alloc_switch(parent_sw, route, &pkg->ep_uuid);
 844	if (!IS_ERR(sw)) {
 845		sw->connection_id = pkg->connection_id;
 846		sw->connection_key = pkg->connection_key;
 847		sw->link = link;
 848		sw->depth = depth;
 849		sw->authorized = authorized;
 850		sw->security_level = security_level;
 851		sw->boot = boot;
 852		sw->link_speed = speed_gen3 ? 20 : 10;
 853		sw->link_width = dual_lane ? TB_LINK_WIDTH_DUAL :
 854					     TB_LINK_WIDTH_SINGLE;
 855		sw->rpm = intel_vss_is_rtd3(pkg->ep_name, sizeof(pkg->ep_name));
 856
 857		if (add_switch(parent_sw, sw))
 858			tb_switch_put(sw);
 859	}
 860
 861	pm_runtime_mark_last_busy(&parent_sw->dev);
 862	pm_runtime_put_autosuspend(&parent_sw->dev);
 863
 864	tb_switch_put(parent_sw);
 865}
 866
 867static void
 868icm_fr_device_disconnected(struct tb *tb, const struct icm_pkg_header *hdr)
 869{
 870	const struct icm_fr_event_device_disconnected *pkg =
 871		(const struct icm_fr_event_device_disconnected *)hdr;
 872	struct tb_switch *sw;
 873	u8 link, depth;
 874
 875	link = pkg->link_info & ICM_LINK_INFO_LINK_MASK;
 876	depth = (pkg->link_info & ICM_LINK_INFO_DEPTH_MASK) >>
 877		ICM_LINK_INFO_DEPTH_SHIFT;
 878
 879	if (link > ICM_MAX_LINK || depth > TB_SWITCH_MAX_DEPTH) {
 880		tb_warn(tb, "invalid topology %u.%u, ignoring\n", link, depth);
 881		return;
 882	}
 883
 884	sw = tb_switch_find_by_link_depth(tb, link, depth);
 885	if (!sw) {
 886		tb_warn(tb, "no switch exists at %u.%u, ignoring\n", link,
 887			depth);
 888		return;
 889	}
 890
 891	pm_runtime_get_sync(sw->dev.parent);
 892
 893	remove_switch(sw);
 894
 895	pm_runtime_mark_last_busy(sw->dev.parent);
 896	pm_runtime_put_autosuspend(sw->dev.parent);
 897
 898	tb_switch_put(sw);
 899}
 900
 901static void
 902icm_fr_xdomain_connected(struct tb *tb, const struct icm_pkg_header *hdr)
 903{
 904	const struct icm_fr_event_xdomain_connected *pkg =
 905		(const struct icm_fr_event_xdomain_connected *)hdr;
 906	struct tb_xdomain *xd;
 907	struct tb_switch *sw;
 908	u8 link, depth;
 
 909	u64 route;
 910
 
 
 
 
 
 
 
 
 911	link = pkg->link_info & ICM_LINK_INFO_LINK_MASK;
 912	depth = (pkg->link_info & ICM_LINK_INFO_DEPTH_MASK) >>
 913		ICM_LINK_INFO_DEPTH_SHIFT;
 
 914
 915	if (link > ICM_MAX_LINK || depth > TB_SWITCH_MAX_DEPTH) {
 916		tb_warn(tb, "invalid topology %u.%u, ignoring\n", link, depth);
 917		return;
 918	}
 919
 920	route = get_route(pkg->local_route_hi, pkg->local_route_lo);
 921
 922	xd = tb_xdomain_find_by_uuid(tb, &pkg->remote_uuid);
 923	if (xd) {
 924		u8 xd_phy_port, phy_port;
 925
 926		xd_phy_port = phy_port_from_route(xd->route, xd->depth);
 927		phy_port = phy_port_from_route(route, depth);
 928
 929		if (xd->depth == depth && xd_phy_port == phy_port) {
 930			update_xdomain(xd, route, link);
 931			tb_xdomain_put(xd);
 932			return;
 933		}
 934
 935		/*
 936		 * If we find an existing XDomain connection remove it
 937		 * now. We need to go through login handshake and
 938		 * everything anyway to be able to re-establish the
 939		 * connection.
 940		 */
 941		remove_xdomain(xd);
 942		tb_xdomain_put(xd);
 943	}
 944
 945	/*
 946	 * Look if there already exists an XDomain in the same place
 947	 * than the new one and in that case remove it because it is
 948	 * most likely another host that got disconnected.
 949	 */
 950	xd = tb_xdomain_find_by_link_depth(tb, link, depth);
 951	if (!xd) {
 952		u8 dual_link;
 953
 954		dual_link = dual_link_from_link(link);
 955		if (dual_link)
 956			xd = tb_xdomain_find_by_link_depth(tb, dual_link,
 957							   depth);
 958	}
 959	if (xd) {
 960		remove_xdomain(xd);
 961		tb_xdomain_put(xd);
 962	}
 963
 964	/*
 965	 * If the user disconnected a switch during suspend and
 966	 * connected another host to the same port, remove the switch
 967	 * first.
 968	 */
 969	sw = tb_switch_find_by_route(tb, route);
 970	if (sw) {
 971		remove_switch(sw);
 972		tb_switch_put(sw);
 973	}
 974
 975	sw = tb_switch_find_by_link_depth(tb, link, depth);
 976	if (!sw) {
 977		tb_warn(tb, "no switch exists at %u.%u, ignoring\n", link,
 978			depth);
 979		return;
 980	}
 981
 982	add_xdomain(sw, route, &pkg->local_uuid, &pkg->remote_uuid, link,
 983		    depth);
 984	tb_switch_put(sw);
 985}
 986
 987static void
 988icm_fr_xdomain_disconnected(struct tb *tb, const struct icm_pkg_header *hdr)
 989{
 990	const struct icm_fr_event_xdomain_disconnected *pkg =
 991		(const struct icm_fr_event_xdomain_disconnected *)hdr;
 992	struct tb_xdomain *xd;
 993
 994	/*
 995	 * If the connection is through one or multiple devices, the
 996	 * XDomain device is removed along with them so it is fine if we
 997	 * cannot find it here.
 998	 */
 999	xd = tb_xdomain_find_by_uuid(tb, &pkg->remote_uuid);
1000	if (xd) {
1001		remove_xdomain(xd);
1002		tb_xdomain_put(xd);
1003	}
1004}
1005
1006static int icm_tr_cio_reset(struct tb *tb)
1007{
1008	return pcie2cio_write(tb_priv(tb), TB_CFG_SWITCH, 0, 0x777, BIT(1));
1009}
1010
1011static int
1012icm_tr_driver_ready(struct tb *tb, enum tb_security_level *security_level,
1013		    u8 *proto_version, size_t *nboot_acl, bool *rpm)
1014{
1015	struct icm_tr_pkg_driver_ready_response reply;
1016	struct icm_pkg_driver_ready request = {
1017		.hdr.code = ICM_DRIVER_READY,
1018	};
1019	int ret;
1020
1021	memset(&reply, 0, sizeof(reply));
1022	ret = icm_request(tb, &request, sizeof(request), &reply, sizeof(reply),
1023			  1, 10, 250);
1024	if (ret)
1025		return ret;
1026
1027	if (security_level)
1028		*security_level = reply.info & ICM_TR_INFO_SLEVEL_MASK;
1029	if (proto_version)
1030		*proto_version = (reply.info & ICM_TR_INFO_PROTO_VERSION_MASK) >>
1031				ICM_TR_INFO_PROTO_VERSION_SHIFT;
1032	if (nboot_acl)
1033		*nboot_acl = (reply.info & ICM_TR_INFO_BOOT_ACL_MASK) >>
1034				ICM_TR_INFO_BOOT_ACL_SHIFT;
1035	if (rpm)
1036		*rpm = !!(reply.hdr.flags & ICM_TR_FLAGS_RTD3);
1037
1038	return 0;
1039}
1040
1041static int icm_tr_approve_switch(struct tb *tb, struct tb_switch *sw)
1042{
1043	struct icm_tr_pkg_approve_device request;
1044	struct icm_tr_pkg_approve_device reply;
1045	int ret;
1046
1047	memset(&request, 0, sizeof(request));
1048	memcpy(&request.ep_uuid, sw->uuid, sizeof(request.ep_uuid));
1049	request.hdr.code = ICM_APPROVE_DEVICE;
1050	request.route_lo = sw->config.route_lo;
1051	request.route_hi = sw->config.route_hi;
1052	request.connection_id = sw->connection_id;
1053
1054	memset(&reply, 0, sizeof(reply));
1055	ret = icm_request(tb, &request, sizeof(request), &reply, sizeof(reply),
1056			  1, ICM_RETRIES, ICM_APPROVE_TIMEOUT);
1057	if (ret)
1058		return ret;
1059
1060	if (reply.hdr.flags & ICM_FLAGS_ERROR) {
1061		tb_warn(tb, "PCIe tunnel creation failed\n");
1062		return -EIO;
1063	}
1064
1065	return 0;
1066}
1067
1068static int icm_tr_add_switch_key(struct tb *tb, struct tb_switch *sw)
1069{
1070	struct icm_tr_pkg_add_device_key_response reply;
1071	struct icm_tr_pkg_add_device_key request;
1072	int ret;
1073
1074	memset(&request, 0, sizeof(request));
1075	memcpy(&request.ep_uuid, sw->uuid, sizeof(request.ep_uuid));
1076	request.hdr.code = ICM_ADD_DEVICE_KEY;
1077	request.route_lo = sw->config.route_lo;
1078	request.route_hi = sw->config.route_hi;
1079	request.connection_id = sw->connection_id;
1080	memcpy(request.key, sw->key, TB_SWITCH_KEY_SIZE);
1081
1082	memset(&reply, 0, sizeof(reply));
1083	ret = icm_request(tb, &request, sizeof(request), &reply, sizeof(reply),
1084			  1, ICM_RETRIES, ICM_TIMEOUT);
1085	if (ret)
1086		return ret;
1087
1088	if (reply.hdr.flags & ICM_FLAGS_ERROR) {
1089		tb_warn(tb, "Adding key to switch failed\n");
1090		return -EIO;
1091	}
1092
1093	return 0;
1094}
1095
1096static int icm_tr_challenge_switch_key(struct tb *tb, struct tb_switch *sw,
1097				       const u8 *challenge, u8 *response)
1098{
1099	struct icm_tr_pkg_challenge_device_response reply;
1100	struct icm_tr_pkg_challenge_device request;
1101	int ret;
1102
1103	memset(&request, 0, sizeof(request));
1104	memcpy(&request.ep_uuid, sw->uuid, sizeof(request.ep_uuid));
1105	request.hdr.code = ICM_CHALLENGE_DEVICE;
1106	request.route_lo = sw->config.route_lo;
1107	request.route_hi = sw->config.route_hi;
1108	request.connection_id = sw->connection_id;
1109	memcpy(request.challenge, challenge, TB_SWITCH_KEY_SIZE);
1110
1111	memset(&reply, 0, sizeof(reply));
1112	ret = icm_request(tb, &request, sizeof(request), &reply, sizeof(reply),
1113			  1, ICM_RETRIES, ICM_TIMEOUT);
1114	if (ret)
1115		return ret;
1116
1117	if (reply.hdr.flags & ICM_FLAGS_ERROR)
1118		return -EKEYREJECTED;
1119	if (reply.hdr.flags & ICM_FLAGS_NO_KEY)
1120		return -ENOKEY;
1121
1122	memcpy(response, reply.response, TB_SWITCH_KEY_SIZE);
1123
1124	return 0;
1125}
1126
1127static int icm_tr_approve_xdomain_paths(struct tb *tb, struct tb_xdomain *xd,
1128					int transmit_path, int transmit_ring,
1129					int receive_path, int receive_ring)
1130{
1131	struct icm_tr_pkg_approve_xdomain_response reply;
1132	struct icm_tr_pkg_approve_xdomain request;
1133	int ret;
1134
1135	memset(&request, 0, sizeof(request));
1136	request.hdr.code = ICM_APPROVE_XDOMAIN;
1137	request.route_hi = upper_32_bits(xd->route);
1138	request.route_lo = lower_32_bits(xd->route);
1139	request.transmit_path = transmit_path;
1140	request.transmit_ring = transmit_ring;
1141	request.receive_path = receive_path;
1142	request.receive_ring = receive_ring;
1143	memcpy(&request.remote_uuid, xd->remote_uuid, sizeof(*xd->remote_uuid));
1144
1145	memset(&reply, 0, sizeof(reply));
1146	ret = icm_request(tb, &request, sizeof(request), &reply, sizeof(reply),
1147			  1, ICM_RETRIES, ICM_TIMEOUT);
1148	if (ret)
1149		return ret;
1150
1151	if (reply.hdr.flags & ICM_FLAGS_ERROR)
1152		return -EIO;
1153
1154	return 0;
1155}
1156
1157static int icm_tr_xdomain_tear_down(struct tb *tb, struct tb_xdomain *xd,
1158				    int stage)
1159{
1160	struct icm_tr_pkg_disconnect_xdomain_response reply;
1161	struct icm_tr_pkg_disconnect_xdomain request;
1162	int ret;
1163
1164	memset(&request, 0, sizeof(request));
1165	request.hdr.code = ICM_DISCONNECT_XDOMAIN;
1166	request.stage = stage;
1167	request.route_hi = upper_32_bits(xd->route);
1168	request.route_lo = lower_32_bits(xd->route);
1169	memcpy(&request.remote_uuid, xd->remote_uuid, sizeof(*xd->remote_uuid));
1170
1171	memset(&reply, 0, sizeof(reply));
1172	ret = icm_request(tb, &request, sizeof(request), &reply, sizeof(reply),
1173			  1, ICM_RETRIES, ICM_TIMEOUT);
1174	if (ret)
1175		return ret;
1176
1177	if (reply.hdr.flags & ICM_FLAGS_ERROR)
1178		return -EIO;
1179
1180	return 0;
1181}
1182
1183static int icm_tr_disconnect_xdomain_paths(struct tb *tb, struct tb_xdomain *xd,
1184					   int transmit_path, int transmit_ring,
1185					   int receive_path, int receive_ring)
1186{
1187	int ret;
1188
1189	ret = icm_tr_xdomain_tear_down(tb, xd, 1);
1190	if (ret)
1191		return ret;
1192
1193	usleep_range(10, 50);
1194	return icm_tr_xdomain_tear_down(tb, xd, 2);
1195}
1196
1197static void
1198__icm_tr_device_connected(struct tb *tb, const struct icm_pkg_header *hdr,
1199			  bool force_rtd3)
1200{
1201	const struct icm_tr_event_device_connected *pkg =
1202		(const struct icm_tr_event_device_connected *)hdr;
1203	bool authorized, boot, dual_lane, speed_gen3;
1204	enum tb_security_level security_level;
1205	struct tb_switch *sw, *parent_sw;
1206	struct tb_xdomain *xd;
 
1207	u64 route;
1208
1209	icm_postpone_rescan(tb);
1210
1211	/*
1212	 * Currently we don't use the QoS information coming with the
1213	 * device connected message so simply just ignore that extra
1214	 * packet for now.
1215	 */
1216	if (pkg->hdr.packet_id)
1217		return;
1218
 
 
 
 
 
 
 
 
1219	route = get_route(pkg->route_hi, pkg->route_lo);
1220	authorized = pkg->link_info & ICM_LINK_INFO_APPROVED;
1221	security_level = (pkg->hdr.flags & ICM_FLAGS_SLEVEL_MASK) >>
1222			 ICM_FLAGS_SLEVEL_SHIFT;
1223	boot = pkg->link_info & ICM_LINK_INFO_BOOT;
1224	dual_lane = pkg->hdr.flags & ICM_FLAGS_DUAL_LANE;
1225	speed_gen3 = pkg->hdr.flags & ICM_FLAGS_SPEED_GEN3;
1226
1227	if (pkg->link_info & ICM_LINK_INFO_REJECTED) {
1228		tb_info(tb, "switch at %llx was rejected by ICM firmware because topology limit exceeded\n",
1229			route);
1230		return;
1231	}
1232
1233	sw = tb_switch_find_by_uuid(tb, &pkg->ep_uuid);
1234	if (sw) {
1235		/* Update the switch if it is still in the same place */
1236		if (tb_route(sw) == route && !!sw->authorized == authorized) {
1237			update_switch(sw, route, pkg->connection_id, 0, 0, 0,
1238				      boot);
 
1239			tb_switch_put(sw);
1240			return;
1241		}
1242
1243		remove_switch(sw);
1244		tb_switch_put(sw);
1245	}
1246
1247	/* Another switch with the same address */
1248	sw = tb_switch_find_by_route(tb, route);
1249	if (sw) {
1250		remove_switch(sw);
1251		tb_switch_put(sw);
1252	}
1253
1254	/* XDomain connection with the same address */
1255	xd = tb_xdomain_find_by_route(tb, route);
1256	if (xd) {
1257		remove_xdomain(xd);
1258		tb_xdomain_put(xd);
1259	}
1260
1261	parent_sw = tb_switch_find_by_route(tb, get_parent_route(route));
1262	if (!parent_sw) {
1263		tb_err(tb, "failed to find parent switch for %llx\n", route);
1264		return;
1265	}
1266
1267	pm_runtime_get_sync(&parent_sw->dev);
1268
1269	sw = alloc_switch(parent_sw, route, &pkg->ep_uuid);
1270	if (!IS_ERR(sw)) {
1271		sw->connection_id = pkg->connection_id;
1272		sw->authorized = authorized;
1273		sw->security_level = security_level;
1274		sw->boot = boot;
1275		sw->link_speed = speed_gen3 ? 20 : 10;
1276		sw->link_width = dual_lane ? TB_LINK_WIDTH_DUAL :
1277					     TB_LINK_WIDTH_SINGLE;
1278		sw->rpm = force_rtd3;
1279		if (!sw->rpm)
1280			sw->rpm = intel_vss_is_rtd3(pkg->ep_name,
1281						    sizeof(pkg->ep_name));
1282
1283		if (add_switch(parent_sw, sw))
1284			tb_switch_put(sw);
1285	}
1286
1287	pm_runtime_mark_last_busy(&parent_sw->dev);
1288	pm_runtime_put_autosuspend(&parent_sw->dev);
1289
1290	tb_switch_put(parent_sw);
1291}
1292
1293static void
1294icm_tr_device_connected(struct tb *tb, const struct icm_pkg_header *hdr)
1295{
1296	__icm_tr_device_connected(tb, hdr, false);
1297}
1298
1299static void
1300icm_tr_device_disconnected(struct tb *tb, const struct icm_pkg_header *hdr)
1301{
1302	const struct icm_tr_event_device_disconnected *pkg =
1303		(const struct icm_tr_event_device_disconnected *)hdr;
1304	struct tb_switch *sw;
1305	u64 route;
1306
1307	route = get_route(pkg->route_hi, pkg->route_lo);
1308
1309	sw = tb_switch_find_by_route(tb, route);
1310	if (!sw) {
1311		tb_warn(tb, "no switch exists at %llx, ignoring\n", route);
1312		return;
1313	}
1314	pm_runtime_get_sync(sw->dev.parent);
1315
1316	remove_switch(sw);
1317
1318	pm_runtime_mark_last_busy(sw->dev.parent);
1319	pm_runtime_put_autosuspend(sw->dev.parent);
1320
1321	tb_switch_put(sw);
1322}
1323
1324static void
1325icm_tr_xdomain_connected(struct tb *tb, const struct icm_pkg_header *hdr)
1326{
1327	const struct icm_tr_event_xdomain_connected *pkg =
1328		(const struct icm_tr_event_xdomain_connected *)hdr;
1329	struct tb_xdomain *xd;
1330	struct tb_switch *sw;
1331	u64 route;
1332
1333	if (!tb->root_switch)
1334		return;
1335
1336	route = get_route(pkg->local_route_hi, pkg->local_route_lo);
1337
1338	xd = tb_xdomain_find_by_uuid(tb, &pkg->remote_uuid);
1339	if (xd) {
1340		if (xd->route == route) {
1341			update_xdomain(xd, route, 0);
1342			tb_xdomain_put(xd);
1343			return;
1344		}
1345
1346		remove_xdomain(xd);
1347		tb_xdomain_put(xd);
1348	}
1349
1350	/* An existing xdomain with the same address */
1351	xd = tb_xdomain_find_by_route(tb, route);
1352	if (xd) {
1353		remove_xdomain(xd);
1354		tb_xdomain_put(xd);
1355	}
1356
1357	/*
1358	 * If the user disconnected a switch during suspend and
1359	 * connected another host to the same port, remove the switch
1360	 * first.
1361	 */
1362	sw = tb_switch_find_by_route(tb, route);
1363	if (sw) {
1364		remove_switch(sw);
1365		tb_switch_put(sw);
1366	}
1367
1368	sw = tb_switch_find_by_route(tb, get_parent_route(route));
1369	if (!sw) {
1370		tb_warn(tb, "no switch exists at %llx, ignoring\n", route);
1371		return;
1372	}
1373
1374	add_xdomain(sw, route, &pkg->local_uuid, &pkg->remote_uuid, 0, 0);
1375	tb_switch_put(sw);
1376}
1377
1378static void
1379icm_tr_xdomain_disconnected(struct tb *tb, const struct icm_pkg_header *hdr)
1380{
1381	const struct icm_tr_event_xdomain_disconnected *pkg =
1382		(const struct icm_tr_event_xdomain_disconnected *)hdr;
1383	struct tb_xdomain *xd;
1384	u64 route;
1385
1386	route = get_route(pkg->route_hi, pkg->route_lo);
1387
1388	xd = tb_xdomain_find_by_route(tb, route);
1389	if (xd) {
1390		remove_xdomain(xd);
1391		tb_xdomain_put(xd);
1392	}
1393}
1394
1395static struct pci_dev *get_upstream_port(struct pci_dev *pdev)
1396{
1397	struct pci_dev *parent;
1398
1399	parent = pci_upstream_bridge(pdev);
1400	while (parent) {
1401		if (!pci_is_pcie(parent))
1402			return NULL;
1403		if (pci_pcie_type(parent) == PCI_EXP_TYPE_UPSTREAM)
1404			break;
1405		parent = pci_upstream_bridge(parent);
1406	}
1407
1408	if (!parent)
1409		return NULL;
1410
1411	switch (parent->device) {
1412	case PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_2C_BRIDGE:
1413	case PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_4C_BRIDGE:
1414	case PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_LP_BRIDGE:
1415	case PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_C_4C_BRIDGE:
1416	case PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_C_2C_BRIDGE:
1417	case PCI_DEVICE_ID_INTEL_TITAN_RIDGE_2C_BRIDGE:
1418	case PCI_DEVICE_ID_INTEL_TITAN_RIDGE_4C_BRIDGE:
1419		return parent;
1420	}
1421
1422	return NULL;
1423}
1424
1425static bool icm_ar_is_supported(struct tb *tb)
1426{
1427	struct pci_dev *upstream_port;
1428	struct icm *icm = tb_priv(tb);
1429
1430	/*
1431	 * Starting from Alpine Ridge we can use ICM on Apple machines
1432	 * as well. We just need to reset and re-enable it first.
1433	 * However, only start it if explicitly asked by the user.
1434	 */
1435	if (icm_firmware_running(tb->nhi))
1436		return true;
1437	if (!start_icm)
1438		return false;
1439
1440	/*
1441	 * Find the upstream PCIe port in case we need to do reset
1442	 * through its vendor specific registers.
1443	 */
1444	upstream_port = get_upstream_port(tb->nhi->pdev);
1445	if (upstream_port) {
1446		int cap;
1447
1448		cap = pci_find_ext_capability(upstream_port,
1449					      PCI_EXT_CAP_ID_VNDR);
1450		if (cap > 0) {
1451			icm->upstream_port = upstream_port;
1452			icm->vnd_cap = cap;
1453
1454			return true;
1455		}
1456	}
1457
1458	return false;
1459}
1460
1461static int icm_ar_cio_reset(struct tb *tb)
1462{
1463	return pcie2cio_write(tb_priv(tb), TB_CFG_SWITCH, 0, 0x50, BIT(9));
1464}
1465
1466static int icm_ar_get_mode(struct tb *tb)
1467{
1468	struct tb_nhi *nhi = tb->nhi;
1469	int retries = 60;
1470	u32 val;
1471
1472	do {
1473		val = ioread32(nhi->iobase + REG_FW_STS);
1474		if (val & REG_FW_STS_NVM_AUTH_DONE)
1475			break;
1476		msleep(50);
1477	} while (--retries);
1478
1479	if (!retries) {
1480		dev_err(&nhi->pdev->dev, "ICM firmware not authenticated\n");
1481		return -ENODEV;
1482	}
1483
1484	return nhi_mailbox_mode(nhi);
1485}
1486
1487static int
1488icm_ar_driver_ready(struct tb *tb, enum tb_security_level *security_level,
1489		    u8 *proto_version, size_t *nboot_acl, bool *rpm)
1490{
1491	struct icm_ar_pkg_driver_ready_response reply;
1492	struct icm_pkg_driver_ready request = {
1493		.hdr.code = ICM_DRIVER_READY,
1494	};
1495	int ret;
1496
1497	memset(&reply, 0, sizeof(reply));
1498	ret = icm_request(tb, &request, sizeof(request), &reply, sizeof(reply),
1499			  1, ICM_RETRIES, ICM_TIMEOUT);
1500	if (ret)
1501		return ret;
1502
1503	if (security_level)
1504		*security_level = reply.info & ICM_AR_INFO_SLEVEL_MASK;
1505	if (nboot_acl && (reply.info & ICM_AR_INFO_BOOT_ACL_SUPPORTED))
1506		*nboot_acl = (reply.info & ICM_AR_INFO_BOOT_ACL_MASK) >>
1507				ICM_AR_INFO_BOOT_ACL_SHIFT;
1508	if (rpm)
1509		*rpm = !!(reply.hdr.flags & ICM_AR_FLAGS_RTD3);
1510
1511	return 0;
1512}
1513
1514static int icm_ar_get_route(struct tb *tb, u8 link, u8 depth, u64 *route)
1515{
1516	struct icm_ar_pkg_get_route_response reply;
1517	struct icm_ar_pkg_get_route request = {
1518		.hdr = { .code = ICM_GET_ROUTE },
1519		.link_info = depth << ICM_LINK_INFO_DEPTH_SHIFT | link,
1520	};
1521	int ret;
1522
1523	memset(&reply, 0, sizeof(reply));
1524	ret = icm_request(tb, &request, sizeof(request), &reply, sizeof(reply),
1525			  1, ICM_RETRIES, ICM_TIMEOUT);
1526	if (ret)
1527		return ret;
1528
1529	if (reply.hdr.flags & ICM_FLAGS_ERROR)
1530		return -EIO;
1531
1532	*route = get_route(reply.route_hi, reply.route_lo);
1533	return 0;
1534}
1535
1536static int icm_ar_get_boot_acl(struct tb *tb, uuid_t *uuids, size_t nuuids)
1537{
1538	struct icm_ar_pkg_preboot_acl_response reply;
1539	struct icm_ar_pkg_preboot_acl request = {
1540		.hdr = { .code = ICM_PREBOOT_ACL },
1541	};
1542	int ret, i;
1543
1544	memset(&reply, 0, sizeof(reply));
1545	ret = icm_request(tb, &request, sizeof(request), &reply, sizeof(reply),
1546			  1, ICM_RETRIES, ICM_TIMEOUT);
1547	if (ret)
1548		return ret;
1549
1550	if (reply.hdr.flags & ICM_FLAGS_ERROR)
1551		return -EIO;
1552
1553	for (i = 0; i < nuuids; i++) {
1554		u32 *uuid = (u32 *)&uuids[i];
1555
1556		uuid[0] = reply.acl[i].uuid_lo;
1557		uuid[1] = reply.acl[i].uuid_hi;
1558
1559		if (uuid[0] == 0xffffffff && uuid[1] == 0xffffffff) {
1560			/* Map empty entries to null UUID */
1561			uuid[0] = 0;
1562			uuid[1] = 0;
1563		} else if (uuid[0] != 0 || uuid[1] != 0) {
1564			/* Upper two DWs are always one's */
1565			uuid[2] = 0xffffffff;
1566			uuid[3] = 0xffffffff;
1567		}
1568	}
1569
1570	return ret;
1571}
1572
1573static int icm_ar_set_boot_acl(struct tb *tb, const uuid_t *uuids,
1574			       size_t nuuids)
1575{
1576	struct icm_ar_pkg_preboot_acl_response reply;
1577	struct icm_ar_pkg_preboot_acl request = {
1578		.hdr = {
1579			.code = ICM_PREBOOT_ACL,
1580			.flags = ICM_FLAGS_WRITE,
1581		},
1582	};
1583	int ret, i;
1584
1585	for (i = 0; i < nuuids; i++) {
1586		const u32 *uuid = (const u32 *)&uuids[i];
1587
1588		if (uuid_is_null(&uuids[i])) {
1589			/*
1590			 * Map null UUID to the empty (all one) entries
1591			 * for ICM.
1592			 */
1593			request.acl[i].uuid_lo = 0xffffffff;
1594			request.acl[i].uuid_hi = 0xffffffff;
1595		} else {
1596			/* Two high DWs need to be set to all one */
1597			if (uuid[2] != 0xffffffff || uuid[3] != 0xffffffff)
1598				return -EINVAL;
1599
1600			request.acl[i].uuid_lo = uuid[0];
1601			request.acl[i].uuid_hi = uuid[1];
1602		}
1603	}
1604
1605	memset(&reply, 0, sizeof(reply));
1606	ret = icm_request(tb, &request, sizeof(request), &reply, sizeof(reply),
1607			  1, ICM_RETRIES, ICM_TIMEOUT);
1608	if (ret)
1609		return ret;
1610
1611	if (reply.hdr.flags & ICM_FLAGS_ERROR)
1612		return -EIO;
1613
1614	return 0;
1615}
1616
1617static int
1618icm_icl_driver_ready(struct tb *tb, enum tb_security_level *security_level,
1619		     u8 *proto_version, size_t *nboot_acl, bool *rpm)
1620{
1621	struct icm_tr_pkg_driver_ready_response reply;
1622	struct icm_pkg_driver_ready request = {
1623		.hdr.code = ICM_DRIVER_READY,
1624	};
1625	int ret;
1626
1627	memset(&reply, 0, sizeof(reply));
1628	ret = icm_request(tb, &request, sizeof(request), &reply, sizeof(reply),
1629			  1, ICM_RETRIES, 20000);
1630	if (ret)
1631		return ret;
1632
1633	if (proto_version)
1634		*proto_version = (reply.info & ICM_TR_INFO_PROTO_VERSION_MASK) >>
1635				ICM_TR_INFO_PROTO_VERSION_SHIFT;
1636
1637	/* Ice Lake always supports RTD3 */
1638	if (rpm)
1639		*rpm = true;
1640
1641	return 0;
1642}
1643
1644static void icm_icl_set_uuid(struct tb *tb)
1645{
1646	struct tb_nhi *nhi = tb->nhi;
1647	u32 uuid[4];
1648
1649	pci_read_config_dword(nhi->pdev, VS_CAP_10, &uuid[0]);
1650	pci_read_config_dword(nhi->pdev, VS_CAP_11, &uuid[1]);
1651	uuid[2] = 0xffffffff;
1652	uuid[3] = 0xffffffff;
1653
1654	tb->root_switch->uuid = kmemdup(uuid, sizeof(uuid), GFP_KERNEL);
1655}
1656
1657static void
1658icm_icl_device_connected(struct tb *tb, const struct icm_pkg_header *hdr)
1659{
1660	__icm_tr_device_connected(tb, hdr, true);
1661}
1662
1663static void icm_icl_rtd3_veto(struct tb *tb, const struct icm_pkg_header *hdr)
1664{
1665	const struct icm_icl_event_rtd3_veto *pkg =
1666		(const struct icm_icl_event_rtd3_veto *)hdr;
1667
1668	tb_dbg(tb, "ICM rtd3 veto=0x%08x\n", pkg->veto_reason);
1669
1670	if (pkg->veto_reason)
1671		icm_veto_begin(tb);
1672	else
1673		icm_veto_end(tb);
1674}
1675
1676static bool icm_tgl_is_supported(struct tb *tb)
1677{
1678	unsigned long end = jiffies + msecs_to_jiffies(10);
1679
1680	do {
1681		u32 val;
1682
1683		val = ioread32(tb->nhi->iobase + REG_FW_STS);
1684		if (val & REG_FW_STS_NVM_AUTH_DONE)
1685			return true;
1686		usleep_range(100, 500);
1687	} while (time_before(jiffies, end));
1688
1689	return false;
1690}
1691
1692static void icm_handle_notification(struct work_struct *work)
1693{
1694	struct icm_notification *n = container_of(work, typeof(*n), work);
1695	struct tb *tb = n->tb;
1696	struct icm *icm = tb_priv(tb);
1697
1698	mutex_lock(&tb->lock);
1699
1700	/*
1701	 * When the domain is stopped we flush its workqueue but before
1702	 * that the root switch is removed. In that case we should treat
1703	 * the queued events as being canceled.
1704	 */
1705	if (tb->root_switch) {
1706		switch (n->pkg->code) {
1707		case ICM_EVENT_DEVICE_CONNECTED:
1708			icm->device_connected(tb, n->pkg);
1709			break;
1710		case ICM_EVENT_DEVICE_DISCONNECTED:
1711			icm->device_disconnected(tb, n->pkg);
1712			break;
1713		case ICM_EVENT_XDOMAIN_CONNECTED:
1714			if (tb_is_xdomain_enabled())
1715				icm->xdomain_connected(tb, n->pkg);
1716			break;
1717		case ICM_EVENT_XDOMAIN_DISCONNECTED:
1718			if (tb_is_xdomain_enabled())
1719				icm->xdomain_disconnected(tb, n->pkg);
1720			break;
1721		case ICM_EVENT_RTD3_VETO:
1722			icm->rtd3_veto(tb, n->pkg);
1723			break;
1724		}
1725	}
1726
1727	mutex_unlock(&tb->lock);
1728
1729	kfree(n->pkg);
1730	kfree(n);
1731}
1732
1733static void icm_handle_event(struct tb *tb, enum tb_cfg_pkg_type type,
1734			     const void *buf, size_t size)
1735{
1736	struct icm_notification *n;
1737
1738	n = kmalloc(sizeof(*n), GFP_KERNEL);
1739	if (!n)
1740		return;
1741
1742	n->pkg = kmemdup(buf, size, GFP_KERNEL);
1743	if (!n->pkg) {
1744		kfree(n);
1745		return;
1746	}
1747
1748	INIT_WORK(&n->work, icm_handle_notification);
 
1749	n->tb = tb;
1750
1751	queue_work(tb->wq, &n->work);
1752}
1753
1754static int
1755__icm_driver_ready(struct tb *tb, enum tb_security_level *security_level,
1756		   u8 *proto_version, size_t *nboot_acl, bool *rpm)
1757{
1758	struct icm *icm = tb_priv(tb);
1759	unsigned int retries = 50;
1760	int ret;
1761
1762	ret = icm->driver_ready(tb, security_level, proto_version, nboot_acl,
1763				rpm);
1764	if (ret) {
1765		tb_err(tb, "failed to send driver ready to ICM\n");
1766		return ret;
1767	}
1768
1769	/*
1770	 * Hold on here until the switch config space is accessible so
1771	 * that we can read root switch config successfully.
1772	 */
1773	do {
1774		struct tb_cfg_result res;
1775		u32 tmp;
1776
1777		res = tb_cfg_read_raw(tb->ctl, &tmp, 0, 0, TB_CFG_SWITCH,
1778				      0, 1, 100);
1779		if (!res.err)
1780			return 0;
1781
1782		msleep(50);
1783	} while (--retries);
1784
1785	tb_err(tb, "failed to read root switch config space, giving up\n");
1786	return -ETIMEDOUT;
1787}
1788
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1789static int icm_firmware_reset(struct tb *tb, struct tb_nhi *nhi)
1790{
1791	struct icm *icm = tb_priv(tb);
1792	u32 val;
1793
1794	if (!icm->upstream_port)
1795		return -ENODEV;
1796
1797	/* Put ARC to wait for CIO reset event to happen */
1798	val = ioread32(nhi->iobase + REG_FW_STS);
1799	val |= REG_FW_STS_CIO_RESET_REQ;
1800	iowrite32(val, nhi->iobase + REG_FW_STS);
1801
1802	/* Re-start ARC */
1803	val = ioread32(nhi->iobase + REG_FW_STS);
1804	val |= REG_FW_STS_ICM_EN_INVERT;
1805	val |= REG_FW_STS_ICM_EN_CPU;
1806	iowrite32(val, nhi->iobase + REG_FW_STS);
1807
1808	/* Trigger CIO reset now */
1809	return icm->cio_reset(tb);
1810}
1811
1812static int icm_firmware_start(struct tb *tb, struct tb_nhi *nhi)
1813{
1814	unsigned int retries = 10;
1815	int ret;
1816	u32 val;
1817
1818	/* Check if the ICM firmware is already running */
1819	if (icm_firmware_running(nhi))
 
1820		return 0;
1821
1822	dev_dbg(&nhi->pdev->dev, "starting ICM firmware\n");
1823
1824	ret = icm_firmware_reset(tb, nhi);
1825	if (ret)
1826		return ret;
1827
1828	/* Wait until the ICM firmware tells us it is up and running */
1829	do {
1830		/* Check that the ICM firmware is running */
1831		val = ioread32(nhi->iobase + REG_FW_STS);
1832		if (val & REG_FW_STS_NVM_AUTH_DONE)
1833			return 0;
1834
1835		msleep(300);
1836	} while (--retries);
1837
1838	return -ETIMEDOUT;
1839}
1840
1841static int icm_reset_phy_port(struct tb *tb, int phy_port)
1842{
1843	struct icm *icm = tb_priv(tb);
1844	u32 state0, state1;
1845	int port0, port1;
1846	u32 val0, val1;
1847	int ret;
1848
1849	if (!icm->upstream_port)
1850		return 0;
1851
1852	if (phy_port) {
1853		port0 = 3;
1854		port1 = 4;
1855	} else {
1856		port0 = 1;
1857		port1 = 2;
1858	}
1859
1860	/*
1861	 * Read link status of both null ports belonging to a single
1862	 * physical port.
1863	 */
1864	ret = pcie2cio_read(icm, TB_CFG_PORT, port0, PHY_PORT_CS1, &val0);
1865	if (ret)
1866		return ret;
1867	ret = pcie2cio_read(icm, TB_CFG_PORT, port1, PHY_PORT_CS1, &val1);
1868	if (ret)
1869		return ret;
1870
1871	state0 = val0 & PHY_PORT_CS1_LINK_STATE_MASK;
1872	state0 >>= PHY_PORT_CS1_LINK_STATE_SHIFT;
1873	state1 = val1 & PHY_PORT_CS1_LINK_STATE_MASK;
1874	state1 >>= PHY_PORT_CS1_LINK_STATE_SHIFT;
1875
1876	/* If they are both up we need to reset them now */
1877	if (state0 != TB_PORT_UP || state1 != TB_PORT_UP)
1878		return 0;
1879
1880	val0 |= PHY_PORT_CS1_LINK_DISABLE;
1881	ret = pcie2cio_write(icm, TB_CFG_PORT, port0, PHY_PORT_CS1, val0);
1882	if (ret)
1883		return ret;
1884
1885	val1 |= PHY_PORT_CS1_LINK_DISABLE;
1886	ret = pcie2cio_write(icm, TB_CFG_PORT, port1, PHY_PORT_CS1, val1);
1887	if (ret)
1888		return ret;
1889
1890	/* Wait a bit and then re-enable both ports */
1891	usleep_range(10, 100);
1892
1893	ret = pcie2cio_read(icm, TB_CFG_PORT, port0, PHY_PORT_CS1, &val0);
1894	if (ret)
1895		return ret;
1896	ret = pcie2cio_read(icm, TB_CFG_PORT, port1, PHY_PORT_CS1, &val1);
1897	if (ret)
1898		return ret;
1899
1900	val0 &= ~PHY_PORT_CS1_LINK_DISABLE;
1901	ret = pcie2cio_write(icm, TB_CFG_PORT, port0, PHY_PORT_CS1, val0);
1902	if (ret)
1903		return ret;
1904
1905	val1 &= ~PHY_PORT_CS1_LINK_DISABLE;
1906	return pcie2cio_write(icm, TB_CFG_PORT, port1, PHY_PORT_CS1, val1);
1907}
1908
1909static int icm_firmware_init(struct tb *tb)
1910{
1911	struct icm *icm = tb_priv(tb);
1912	struct tb_nhi *nhi = tb->nhi;
1913	int ret;
1914
1915	ret = icm_firmware_start(tb, nhi);
1916	if (ret) {
1917		dev_err(&nhi->pdev->dev, "could not start ICM firmware\n");
1918		return ret;
1919	}
1920
1921	if (icm->get_mode) {
1922		ret = icm->get_mode(tb);
1923
1924		switch (ret) {
1925		case NHI_FW_SAFE_MODE:
1926			icm->safe_mode = true;
1927			break;
1928
1929		case NHI_FW_CM_MODE:
1930			/* Ask ICM to accept all Thunderbolt devices */
1931			nhi_mailbox_cmd(nhi, NHI_MAILBOX_ALLOW_ALL_DEVS, 0);
1932			break;
1933
1934		default:
1935			if (ret < 0)
1936				return ret;
1937
1938			tb_err(tb, "ICM firmware is in wrong mode: %u\n", ret);
1939			return -ENODEV;
1940		}
1941	}
1942
1943	/*
1944	 * Reset both physical ports if there is anything connected to
1945	 * them already.
1946	 */
1947	ret = icm_reset_phy_port(tb, 0);
1948	if (ret)
1949		dev_warn(&nhi->pdev->dev, "failed to reset links on port0\n");
1950	ret = icm_reset_phy_port(tb, 1);
1951	if (ret)
1952		dev_warn(&nhi->pdev->dev, "failed to reset links on port1\n");
1953
1954	return 0;
1955}
1956
1957static int icm_driver_ready(struct tb *tb)
1958{
1959	struct icm *icm = tb_priv(tb);
1960	int ret;
1961
1962	ret = icm_firmware_init(tb);
1963	if (ret)
1964		return ret;
1965
1966	if (icm->safe_mode) {
1967		tb_info(tb, "Thunderbolt host controller is in safe mode.\n");
1968		tb_info(tb, "You need to update NVM firmware of the controller before it can be used.\n");
1969		tb_info(tb, "For latest updates check https://thunderbolttechnology.net/updates.\n");
1970		return 0;
1971	}
1972
1973	ret = __icm_driver_ready(tb, &tb->security_level, &icm->proto_version,
1974				 &tb->nboot_acl, &icm->rpm);
1975	if (ret)
1976		return ret;
1977
1978	/*
1979	 * Make sure the number of supported preboot ACL matches what we
1980	 * expect or disable the whole feature.
1981	 */
1982	if (tb->nboot_acl > icm->max_boot_acl)
1983		tb->nboot_acl = 0;
1984
1985	if (icm->proto_version >= 3)
1986		tb_dbg(tb, "USB4 proxy operations supported\n");
1987
1988	return 0;
1989}
1990
1991static int icm_suspend(struct tb *tb)
1992{
1993	struct icm *icm = tb_priv(tb);
1994
1995	if (icm->save_devices)
1996		icm->save_devices(tb);
 
 
1997
1998	nhi_mailbox_cmd(tb->nhi, NHI_MAILBOX_DRV_UNLOADS, 0);
1999	return 0;
2000}
2001
2002/*
2003 * Mark all switches (except root switch) below this one unplugged. ICM
2004 * firmware will send us an updated list of switches after we have send
2005 * it driver ready command. If a switch is not in that list it will be
2006 * removed when we perform rescan.
2007 */
2008static void icm_unplug_children(struct tb_switch *sw)
2009{
2010	struct tb_port *port;
2011
2012	if (tb_route(sw))
2013		sw->is_unplugged = true;
2014
2015	tb_switch_for_each_port(sw, port) {
2016		if (port->xdomain)
2017			port->xdomain->is_unplugged = true;
2018		else if (tb_port_has_remote(port))
2019			icm_unplug_children(port->remote->sw);
2020	}
2021}
2022
2023static int complete_rpm(struct device *dev, void *data)
2024{
2025	struct tb_switch *sw = tb_to_switch(dev);
 
 
 
 
 
2026
2027	if (sw)
2028		complete(&sw->rpm_complete);
2029	return 0;
2030}
2031
2032static void remove_unplugged_switch(struct tb_switch *sw)
2033{
2034	struct device *parent = get_device(sw->dev.parent);
2035
2036	pm_runtime_get_sync(parent);
2037
2038	/*
2039	 * Signal this and switches below for rpm_complete because
2040	 * tb_switch_remove() calls pm_runtime_get_sync() that then waits
2041	 * for it.
2042	 */
2043	complete_rpm(&sw->dev, NULL);
2044	bus_for_each_dev(&tb_bus_type, &sw->dev, NULL, complete_rpm);
2045	tb_switch_remove(sw);
2046
2047	pm_runtime_mark_last_busy(parent);
2048	pm_runtime_put_autosuspend(parent);
2049
2050	put_device(parent);
2051}
2052
2053static void icm_free_unplugged_children(struct tb_switch *sw)
2054{
2055	struct tb_port *port;
2056
2057	tb_switch_for_each_port(sw, port) {
2058		if (port->xdomain && port->xdomain->is_unplugged) {
2059			tb_xdomain_remove(port->xdomain);
2060			port->xdomain = NULL;
2061		} else if (tb_port_has_remote(port)) {
2062			if (port->remote->sw->is_unplugged) {
2063				remove_unplugged_switch(port->remote->sw);
2064				port->remote = NULL;
2065			} else {
2066				icm_free_unplugged_children(port->remote->sw);
2067			}
 
 
 
 
2068		}
2069	}
2070}
2071
2072static void icm_rescan_work(struct work_struct *work)
2073{
2074	struct icm *icm = container_of(work, struct icm, rescan_work.work);
2075	struct tb *tb = icm_to_tb(icm);
2076
2077	mutex_lock(&tb->lock);
2078	if (tb->root_switch)
2079		icm_free_unplugged_children(tb->root_switch);
2080	mutex_unlock(&tb->lock);
2081}
2082
2083static void icm_complete(struct tb *tb)
2084{
2085	struct icm *icm = tb_priv(tb);
2086
2087	if (tb->nhi->going_away)
2088		return;
2089
2090	/*
2091	 * If RTD3 was vetoed before we entered system suspend allow it
2092	 * again now before driver ready is sent. Firmware sends a new RTD3
2093	 * veto if it is still the case after we have sent it driver ready
2094	 * command.
2095	 */
2096	icm_veto_end(tb);
2097	icm_unplug_children(tb->root_switch);
2098
2099	/*
2100	 * Now all existing children should be resumed, start events
2101	 * from ICM to get updated status.
2102	 */
2103	__icm_driver_ready(tb, NULL, NULL, NULL, NULL);
2104
2105	/*
2106	 * We do not get notifications of devices that have been
2107	 * unplugged during suspend so schedule rescan to clean them up
2108	 * if any.
2109	 */
2110	queue_delayed_work(tb->wq, &icm->rescan_work, msecs_to_jiffies(500));
2111}
2112
2113static int icm_runtime_suspend(struct tb *tb)
2114{
2115	nhi_mailbox_cmd(tb->nhi, NHI_MAILBOX_DRV_UNLOADS, 0);
2116	return 0;
2117}
2118
2119static int icm_runtime_suspend_switch(struct tb_switch *sw)
2120{
2121	if (tb_route(sw))
2122		reinit_completion(&sw->rpm_complete);
2123	return 0;
2124}
2125
2126static int icm_runtime_resume_switch(struct tb_switch *sw)
2127{
2128	if (tb_route(sw)) {
2129		if (!wait_for_completion_timeout(&sw->rpm_complete,
2130						 msecs_to_jiffies(500))) {
2131			dev_dbg(&sw->dev, "runtime resuming timed out\n");
2132		}
2133	}
2134	return 0;
2135}
2136
2137static int icm_runtime_resume(struct tb *tb)
2138{
2139	/*
2140	 * We can reuse the same resume functionality than with system
2141	 * suspend.
2142	 */
2143	icm_complete(tb);
2144	return 0;
2145}
2146
2147static int icm_start(struct tb *tb)
2148{
2149	struct icm *icm = tb_priv(tb);
2150	int ret;
2151
2152	if (icm->safe_mode)
2153		tb->root_switch = tb_switch_alloc_safe_mode(tb, &tb->dev, 0);
2154	else
2155		tb->root_switch = tb_switch_alloc(tb, &tb->dev, 0);
2156	if (IS_ERR(tb->root_switch))
2157		return PTR_ERR(tb->root_switch);
2158
2159	tb->root_switch->no_nvm_upgrade = !icm->can_upgrade_nvm;
2160	tb->root_switch->rpm = icm->rpm;
2161
2162	if (icm->set_uuid)
2163		icm->set_uuid(tb);
 
 
 
 
2164
2165	ret = tb_switch_add(tb->root_switch);
2166	if (ret) {
2167		tb_switch_put(tb->root_switch);
2168		tb->root_switch = NULL;
2169	}
2170
2171	return ret;
2172}
2173
2174static void icm_stop(struct tb *tb)
2175{
2176	struct icm *icm = tb_priv(tb);
2177
2178	cancel_delayed_work(&icm->rescan_work);
2179	tb_switch_remove(tb->root_switch);
2180	tb->root_switch = NULL;
2181	nhi_mailbox_cmd(tb->nhi, NHI_MAILBOX_DRV_UNLOADS, 0);
2182	kfree(icm->last_nvm_auth);
2183	icm->last_nvm_auth = NULL;
2184}
2185
2186static int icm_disconnect_pcie_paths(struct tb *tb)
2187{
2188	return nhi_mailbox_cmd(tb->nhi, NHI_MAILBOX_DISCONNECT_PCIE_PATHS, 0);
2189}
2190
2191static void icm_usb4_switch_nvm_auth_complete(void *data)
2192{
2193	struct usb4_switch_nvm_auth *auth = data;
2194	struct icm *icm = auth->icm;
2195	struct tb *tb = icm_to_tb(icm);
2196
2197	tb_dbg(tb, "NVM_AUTH response for %llx flags %#x status %#x\n",
2198	       get_route(auth->reply.route_hi, auth->reply.route_lo),
2199	       auth->reply.hdr.flags, auth->reply.status);
2200
2201	mutex_lock(&tb->lock);
2202	if (WARN_ON(icm->last_nvm_auth))
2203		kfree(icm->last_nvm_auth);
2204	icm->last_nvm_auth = auth;
2205	mutex_unlock(&tb->lock);
2206}
2207
2208static int icm_usb4_switch_nvm_authenticate(struct tb *tb, u64 route)
2209{
2210	struct usb4_switch_nvm_auth *auth;
2211	struct icm *icm = tb_priv(tb);
2212	struct tb_cfg_request *req;
2213	int ret;
2214
2215	auth = kzalloc(sizeof(*auth), GFP_KERNEL);
2216	if (!auth)
2217		return -ENOMEM;
2218
2219	auth->icm = icm;
2220	auth->request.hdr.code = ICM_USB4_SWITCH_OP;
2221	auth->request.route_hi = upper_32_bits(route);
2222	auth->request.route_lo = lower_32_bits(route);
2223	auth->request.opcode = USB4_SWITCH_OP_NVM_AUTH;
2224
2225	req = tb_cfg_request_alloc();
2226	if (!req) {
2227		ret = -ENOMEM;
2228		goto err_free_auth;
2229	}
2230
2231	req->match = icm_match;
2232	req->copy = icm_copy;
2233	req->request = &auth->request;
2234	req->request_size = sizeof(auth->request);
2235	req->request_type = TB_CFG_PKG_ICM_CMD;
2236	req->response = &auth->reply;
2237	req->npackets = 1;
2238	req->response_size = sizeof(auth->reply);
2239	req->response_type = TB_CFG_PKG_ICM_RESP;
2240
2241	tb_dbg(tb, "NVM_AUTH request for %llx\n", route);
2242
2243	mutex_lock(&icm->request_lock);
2244	ret = tb_cfg_request(tb->ctl, req, icm_usb4_switch_nvm_auth_complete,
2245			     auth);
2246	mutex_unlock(&icm->request_lock);
2247
2248	tb_cfg_request_put(req);
2249	if (ret)
2250		goto err_free_auth;
2251	return 0;
2252
2253err_free_auth:
2254	kfree(auth);
2255	return ret;
2256}
2257
2258static int icm_usb4_switch_op(struct tb_switch *sw, u16 opcode, u32 *metadata,
2259			      u8 *status, const void *tx_data, size_t tx_data_len,
2260			      void *rx_data, size_t rx_data_len)
2261{
2262	struct icm_usb4_switch_op_response reply;
2263	struct icm_usb4_switch_op request;
2264	struct tb *tb = sw->tb;
2265	struct icm *icm = tb_priv(tb);
2266	u64 route = tb_route(sw);
2267	int ret;
2268
2269	/*
2270	 * USB4 router operation proxy is supported in firmware if the
2271	 * protocol version is 3 or higher.
2272	 */
2273	if (icm->proto_version < 3)
2274		return -EOPNOTSUPP;
2275
2276	/*
2277	 * NVM_AUTH is a special USB4 proxy operation that does not
2278	 * return immediately so handle it separately.
2279	 */
2280	if (opcode == USB4_SWITCH_OP_NVM_AUTH)
2281		return icm_usb4_switch_nvm_authenticate(tb, route);
2282
2283	memset(&request, 0, sizeof(request));
2284	request.hdr.code = ICM_USB4_SWITCH_OP;
2285	request.route_hi = upper_32_bits(route);
2286	request.route_lo = lower_32_bits(route);
2287	request.opcode = opcode;
2288	if (metadata)
2289		request.metadata = *metadata;
2290
2291	if (tx_data_len) {
2292		request.data_len_valid |= ICM_USB4_SWITCH_DATA_VALID;
2293		if (tx_data_len < ARRAY_SIZE(request.data))
2294			request.data_len_valid =
2295				tx_data_len & ICM_USB4_SWITCH_DATA_LEN_MASK;
2296		memcpy(request.data, tx_data, tx_data_len * sizeof(u32));
2297	}
2298
2299	memset(&reply, 0, sizeof(reply));
2300	ret = icm_request(tb, &request, sizeof(request), &reply, sizeof(reply),
2301			  1, ICM_RETRIES, ICM_TIMEOUT);
2302	if (ret)
2303		return ret;
2304
2305	if (reply.hdr.flags & ICM_FLAGS_ERROR)
2306		return -EIO;
2307
2308	if (status)
2309		*status = reply.status;
2310
2311	if (metadata)
2312		*metadata = reply.metadata;
2313
2314	if (rx_data_len)
2315		memcpy(rx_data, reply.data, rx_data_len * sizeof(u32));
2316
2317	return 0;
2318}
2319
2320static int icm_usb4_switch_nvm_authenticate_status(struct tb_switch *sw,
2321						   u32 *status)
2322{
2323	struct usb4_switch_nvm_auth *auth;
2324	struct tb *tb = sw->tb;
2325	struct icm *icm = tb_priv(tb);
2326	int ret = 0;
2327
2328	if (icm->proto_version < 3)
2329		return -EOPNOTSUPP;
2330
2331	auth = icm->last_nvm_auth;
2332	icm->last_nvm_auth = NULL;
2333
2334	if (auth && auth->reply.route_hi == sw->config.route_hi &&
2335	    auth->reply.route_lo == sw->config.route_lo) {
2336		tb_dbg(tb, "NVM_AUTH found for %llx flags %#x status %#x\n",
2337		       tb_route(sw), auth->reply.hdr.flags, auth->reply.status);
2338		if (auth->reply.hdr.flags & ICM_FLAGS_ERROR)
2339			ret = -EIO;
2340		else
2341			*status = auth->reply.status;
2342	} else {
2343		*status = 0;
2344	}
2345
2346	kfree(auth);
2347	return ret;
2348}
2349
2350/* Falcon Ridge */
2351static const struct tb_cm_ops icm_fr_ops = {
2352	.driver_ready = icm_driver_ready,
2353	.start = icm_start,
2354	.stop = icm_stop,
2355	.suspend = icm_suspend,
2356	.complete = icm_complete,
2357	.handle_event = icm_handle_event,
2358	.approve_switch = icm_fr_approve_switch,
2359	.add_switch_key = icm_fr_add_switch_key,
2360	.challenge_switch_key = icm_fr_challenge_switch_key,
2361	.disconnect_pcie_paths = icm_disconnect_pcie_paths,
2362	.approve_xdomain_paths = icm_fr_approve_xdomain_paths,
2363	.disconnect_xdomain_paths = icm_fr_disconnect_xdomain_paths,
2364};
2365
2366/* Alpine Ridge */
2367static const struct tb_cm_ops icm_ar_ops = {
2368	.driver_ready = icm_driver_ready,
2369	.start = icm_start,
2370	.stop = icm_stop,
2371	.suspend = icm_suspend,
2372	.complete = icm_complete,
2373	.runtime_suspend = icm_runtime_suspend,
2374	.runtime_resume = icm_runtime_resume,
2375	.runtime_suspend_switch = icm_runtime_suspend_switch,
2376	.runtime_resume_switch = icm_runtime_resume_switch,
2377	.handle_event = icm_handle_event,
2378	.get_boot_acl = icm_ar_get_boot_acl,
2379	.set_boot_acl = icm_ar_set_boot_acl,
2380	.approve_switch = icm_fr_approve_switch,
2381	.add_switch_key = icm_fr_add_switch_key,
2382	.challenge_switch_key = icm_fr_challenge_switch_key,
2383	.disconnect_pcie_paths = icm_disconnect_pcie_paths,
2384	.approve_xdomain_paths = icm_fr_approve_xdomain_paths,
2385	.disconnect_xdomain_paths = icm_fr_disconnect_xdomain_paths,
2386};
2387
2388/* Titan Ridge */
2389static const struct tb_cm_ops icm_tr_ops = {
2390	.driver_ready = icm_driver_ready,
2391	.start = icm_start,
2392	.stop = icm_stop,
2393	.suspend = icm_suspend,
2394	.complete = icm_complete,
2395	.runtime_suspend = icm_runtime_suspend,
2396	.runtime_resume = icm_runtime_resume,
2397	.runtime_suspend_switch = icm_runtime_suspend_switch,
2398	.runtime_resume_switch = icm_runtime_resume_switch,
2399	.handle_event = icm_handle_event,
2400	.get_boot_acl = icm_ar_get_boot_acl,
2401	.set_boot_acl = icm_ar_set_boot_acl,
2402	.approve_switch = icm_tr_approve_switch,
2403	.add_switch_key = icm_tr_add_switch_key,
2404	.challenge_switch_key = icm_tr_challenge_switch_key,
2405	.disconnect_pcie_paths = icm_disconnect_pcie_paths,
2406	.approve_xdomain_paths = icm_tr_approve_xdomain_paths,
2407	.disconnect_xdomain_paths = icm_tr_disconnect_xdomain_paths,
2408	.usb4_switch_op = icm_usb4_switch_op,
2409	.usb4_switch_nvm_authenticate_status =
2410		icm_usb4_switch_nvm_authenticate_status,
2411};
2412
2413/* Ice Lake */
2414static const struct tb_cm_ops icm_icl_ops = {
2415	.driver_ready = icm_driver_ready,
2416	.start = icm_start,
2417	.stop = icm_stop,
2418	.complete = icm_complete,
2419	.runtime_suspend = icm_runtime_suspend,
2420	.runtime_resume = icm_runtime_resume,
2421	.handle_event = icm_handle_event,
2422	.approve_xdomain_paths = icm_tr_approve_xdomain_paths,
2423	.disconnect_xdomain_paths = icm_tr_disconnect_xdomain_paths,
2424	.usb4_switch_op = icm_usb4_switch_op,
2425	.usb4_switch_nvm_authenticate_status =
2426		icm_usb4_switch_nvm_authenticate_status,
2427};
2428
2429struct tb *icm_probe(struct tb_nhi *nhi)
2430{
2431	struct icm *icm;
2432	struct tb *tb;
2433
2434	tb = tb_domain_alloc(nhi, ICM_TIMEOUT, sizeof(struct icm));
2435	if (!tb)
2436		return NULL;
2437
2438	icm = tb_priv(tb);
2439	INIT_DELAYED_WORK(&icm->rescan_work, icm_rescan_work);
2440	mutex_init(&icm->request_lock);
2441
2442	switch (nhi->pdev->device) {
2443	case PCI_DEVICE_ID_INTEL_FALCON_RIDGE_2C_NHI:
2444	case PCI_DEVICE_ID_INTEL_FALCON_RIDGE_4C_NHI:
2445		icm->can_upgrade_nvm = true;
2446		icm->is_supported = icm_fr_is_supported;
2447		icm->get_route = icm_fr_get_route;
2448		icm->save_devices = icm_fr_save_devices;
2449		icm->driver_ready = icm_fr_driver_ready;
2450		icm->device_connected = icm_fr_device_connected;
2451		icm->device_disconnected = icm_fr_device_disconnected;
2452		icm->xdomain_connected = icm_fr_xdomain_connected;
2453		icm->xdomain_disconnected = icm_fr_xdomain_disconnected;
2454		tb->cm_ops = &icm_fr_ops;
2455		break;
2456
2457	case PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_2C_NHI:
2458	case PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_4C_NHI:
2459	case PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_LP_NHI:
2460	case PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_C_4C_NHI:
2461	case PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_C_2C_NHI:
2462		icm->max_boot_acl = ICM_AR_PREBOOT_ACL_ENTRIES;
2463		/*
2464		 * NVM upgrade has not been tested on Apple systems and
2465		 * they don't provide images publicly either. To be on
2466		 * the safe side prevent root switch NVM upgrade on Macs
2467		 * for now.
2468		 */
2469		icm->can_upgrade_nvm = !x86_apple_machine;
2470		icm->is_supported = icm_ar_is_supported;
2471		icm->cio_reset = icm_ar_cio_reset;
2472		icm->get_mode = icm_ar_get_mode;
2473		icm->get_route = icm_ar_get_route;
2474		icm->save_devices = icm_fr_save_devices;
2475		icm->driver_ready = icm_ar_driver_ready;
2476		icm->device_connected = icm_fr_device_connected;
2477		icm->device_disconnected = icm_fr_device_disconnected;
2478		icm->xdomain_connected = icm_fr_xdomain_connected;
2479		icm->xdomain_disconnected = icm_fr_xdomain_disconnected;
2480		tb->cm_ops = &icm_ar_ops;
2481		break;
2482
2483	case PCI_DEVICE_ID_INTEL_TITAN_RIDGE_2C_NHI:
2484	case PCI_DEVICE_ID_INTEL_TITAN_RIDGE_4C_NHI:
2485		icm->max_boot_acl = ICM_AR_PREBOOT_ACL_ENTRIES;
2486		icm->can_upgrade_nvm = !x86_apple_machine;
2487		icm->is_supported = icm_ar_is_supported;
2488		icm->cio_reset = icm_tr_cio_reset;
2489		icm->get_mode = icm_ar_get_mode;
2490		icm->driver_ready = icm_tr_driver_ready;
2491		icm->device_connected = icm_tr_device_connected;
2492		icm->device_disconnected = icm_tr_device_disconnected;
2493		icm->xdomain_connected = icm_tr_xdomain_connected;
2494		icm->xdomain_disconnected = icm_tr_xdomain_disconnected;
2495		tb->cm_ops = &icm_tr_ops;
2496		break;
2497
2498	case PCI_DEVICE_ID_INTEL_ICL_NHI0:
2499	case PCI_DEVICE_ID_INTEL_ICL_NHI1:
2500		icm->is_supported = icm_fr_is_supported;
2501		icm->driver_ready = icm_icl_driver_ready;
2502		icm->set_uuid = icm_icl_set_uuid;
2503		icm->device_connected = icm_icl_device_connected;
2504		icm->device_disconnected = icm_tr_device_disconnected;
2505		icm->xdomain_connected = icm_tr_xdomain_connected;
2506		icm->xdomain_disconnected = icm_tr_xdomain_disconnected;
2507		icm->rtd3_veto = icm_icl_rtd3_veto;
2508		tb->cm_ops = &icm_icl_ops;
2509		break;
2510
2511	case PCI_DEVICE_ID_INTEL_TGL_NHI0:
2512	case PCI_DEVICE_ID_INTEL_TGL_NHI1:
2513	case PCI_DEVICE_ID_INTEL_TGL_H_NHI0:
2514	case PCI_DEVICE_ID_INTEL_TGL_H_NHI1:
2515	case PCI_DEVICE_ID_INTEL_ADL_NHI0:
2516	case PCI_DEVICE_ID_INTEL_ADL_NHI1:
2517	case PCI_DEVICE_ID_INTEL_RPL_NHI0:
2518	case PCI_DEVICE_ID_INTEL_RPL_NHI1:
2519	case PCI_DEVICE_ID_INTEL_MTL_M_NHI0:
2520	case PCI_DEVICE_ID_INTEL_MTL_P_NHI0:
2521	case PCI_DEVICE_ID_INTEL_MTL_P_NHI1:
2522		icm->is_supported = icm_tgl_is_supported;
2523		icm->driver_ready = icm_icl_driver_ready;
2524		icm->set_uuid = icm_icl_set_uuid;
2525		icm->device_connected = icm_icl_device_connected;
2526		icm->device_disconnected = icm_tr_device_disconnected;
2527		icm->xdomain_connected = icm_tr_xdomain_connected;
2528		icm->xdomain_disconnected = icm_tr_xdomain_disconnected;
2529		icm->rtd3_veto = icm_icl_rtd3_veto;
2530		tb->cm_ops = &icm_icl_ops;
2531		break;
2532
2533	case PCI_DEVICE_ID_INTEL_MAPLE_RIDGE_2C_NHI:
2534	case PCI_DEVICE_ID_INTEL_MAPLE_RIDGE_4C_NHI:
2535		icm->is_supported = icm_tgl_is_supported;
2536		icm->get_mode = icm_ar_get_mode;
2537		icm->driver_ready = icm_tr_driver_ready;
2538		icm->device_connected = icm_tr_device_connected;
2539		icm->device_disconnected = icm_tr_device_disconnected;
2540		icm->xdomain_connected = icm_tr_xdomain_connected;
2541		icm->xdomain_disconnected = icm_tr_xdomain_disconnected;
2542		tb->cm_ops = &icm_tr_ops;
2543		break;
2544	}
2545
2546	if (!icm->is_supported || !icm->is_supported(tb)) {
2547		dev_dbg(&nhi->pdev->dev, "ICM not supported on this controller\n");
2548		tb_domain_put(tb);
2549		return NULL;
2550	}
2551
2552	tb_dbg(tb, "using firmware connection manager\n");
2553
2554	return tb;
2555}
v4.17
 
   1/*
   2 * Internal Thunderbolt Connection Manager. This is a firmware running on
   3 * the Thunderbolt host controller performing most of the low-level
   4 * handling.
   5 *
   6 * Copyright (C) 2017, Intel Corporation
   7 * Authors: Michael Jamet <michael.jamet@intel.com>
   8 *          Mika Westerberg <mika.westerberg@linux.intel.com>
   9 *
  10 * This program is free software; you can redistribute it and/or modify
  11 * it under the terms of the GNU General Public License version 2 as
  12 * published by the Free Software Foundation.
  13 */
  14
  15#include <linux/delay.h>
  16#include <linux/mutex.h>
 
  17#include <linux/pci.h>
 
  18#include <linux/platform_data/x86/apple.h>
  19#include <linux/sizes.h>
  20#include <linux/slab.h>
  21#include <linux/workqueue.h>
  22
  23#include "ctl.h"
  24#include "nhi_regs.h"
  25#include "tb.h"
  26
  27#define PCIE2CIO_CMD			0x30
  28#define PCIE2CIO_CMD_TIMEOUT		BIT(31)
  29#define PCIE2CIO_CMD_START		BIT(30)
  30#define PCIE2CIO_CMD_WRITE		BIT(21)
  31#define PCIE2CIO_CMD_CS_MASK		GENMASK(20, 19)
  32#define PCIE2CIO_CMD_CS_SHIFT		19
  33#define PCIE2CIO_CMD_PORT_MASK		GENMASK(18, 13)
  34#define PCIE2CIO_CMD_PORT_SHIFT		13
  35
  36#define PCIE2CIO_WRDATA			0x34
  37#define PCIE2CIO_RDDATA			0x38
  38
  39#define PHY_PORT_CS1			0x37
  40#define PHY_PORT_CS1_LINK_DISABLE	BIT(14)
  41#define PHY_PORT_CS1_LINK_STATE_MASK	GENMASK(29, 26)
  42#define PHY_PORT_CS1_LINK_STATE_SHIFT	26
  43
  44#define ICM_TIMEOUT			5000	/* ms */
 
  45#define ICM_APPROVE_TIMEOUT		10000	/* ms */
  46#define ICM_MAX_LINK			4
  47#define ICM_MAX_DEPTH			6
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
  48
  49/**
  50 * struct icm - Internal connection manager private data
  51 * @request_lock: Makes sure only one message is send to ICM at time
  52 * @rescan_work: Work used to rescan the surviving switches after resume
  53 * @upstream_port: Pointer to the PCIe upstream port this host
  54 *		   controller is connected. This is only set for systems
  55 *		   where ICM needs to be started manually
  56 * @vnd_cap: Vendor defined capability where PCIe2CIO mailbox resides
  57 *	     (only set when @upstream_port is not %NULL)
  58 * @safe_mode: ICM is in safe mode
  59 * @max_boot_acl: Maximum number of preboot ACL entries (%0 if not supported)
 
 
 
 
 
  60 * @is_supported: Checks if we can support ICM on this controller
 
  61 * @get_mode: Read and return the ICM firmware mode (optional)
  62 * @get_route: Find a route string for given switch
 
  63 * @driver_ready: Send driver ready message to ICM
 
  64 * @device_connected: Handle device connected ICM message
  65 * @device_disconnected: Handle device disconnected ICM message
  66 * @xdomain_connected - Handle XDomain connected ICM message
  67 * @xdomain_disconnected - Handle XDomain disconnected ICM message
 
  68 */
  69struct icm {
  70	struct mutex request_lock;
  71	struct delayed_work rescan_work;
  72	struct pci_dev *upstream_port;
  73	size_t max_boot_acl;
  74	int vnd_cap;
  75	bool safe_mode;
 
 
 
 
 
 
  76	bool (*is_supported)(struct tb *tb);
 
  77	int (*get_mode)(struct tb *tb);
  78	int (*get_route)(struct tb *tb, u8 link, u8 depth, u64 *route);
 
  79	int (*driver_ready)(struct tb *tb,
  80			    enum tb_security_level *security_level,
  81			    size_t *nboot_acl);
 
  82	void (*device_connected)(struct tb *tb,
  83				 const struct icm_pkg_header *hdr);
  84	void (*device_disconnected)(struct tb *tb,
  85				    const struct icm_pkg_header *hdr);
  86	void (*xdomain_connected)(struct tb *tb,
  87				  const struct icm_pkg_header *hdr);
  88	void (*xdomain_disconnected)(struct tb *tb,
  89				     const struct icm_pkg_header *hdr);
 
  90};
  91
  92struct icm_notification {
  93	struct work_struct work;
  94	struct icm_pkg_header *pkg;
  95	struct tb *tb;
  96};
  97
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
  98static inline struct tb *icm_to_tb(struct icm *icm)
  99{
 100	return ((void *)icm - sizeof(struct tb));
 101}
 102
 103static inline u8 phy_port_from_route(u64 route, u8 depth)
 104{
 105	u8 link;
 106
 107	link = depth ? route >> ((depth - 1) * 8) : route;
 108	return tb_phy_port_from_link(link);
 109}
 110
 111static inline u8 dual_link_from_link(u8 link)
 112{
 113	return link ? ((link - 1) ^ 0x01) + 1 : 0;
 114}
 115
 116static inline u64 get_route(u32 route_hi, u32 route_lo)
 117{
 118	return (u64)route_hi << 32 | route_lo;
 119}
 120
 121static inline u64 get_parent_route(u64 route)
 122{
 123	int depth = tb_route_length(route);
 124	return depth ? route & ~(0xffULL << (depth - 1) * TB_ROUTE_SHIFT) : 0;
 125}
 126
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 127static bool icm_match(const struct tb_cfg_request *req,
 128		      const struct ctl_pkg *pkg)
 129{
 130	const struct icm_pkg_header *res_hdr = pkg->buffer;
 131	const struct icm_pkg_header *req_hdr = req->request;
 132
 133	if (pkg->frame.eof != req->response_type)
 134		return false;
 135	if (res_hdr->code != req_hdr->code)
 136		return false;
 137
 138	return true;
 139}
 140
 141static bool icm_copy(struct tb_cfg_request *req, const struct ctl_pkg *pkg)
 142{
 143	const struct icm_pkg_header *hdr = pkg->buffer;
 144
 145	if (hdr->packet_id < req->npackets) {
 146		size_t offset = hdr->packet_id * req->response_size;
 147
 148		memcpy(req->response + offset, pkg->buffer, req->response_size);
 149	}
 150
 151	return hdr->packet_id == hdr->total_packets - 1;
 152}
 153
 154static int icm_request(struct tb *tb, const void *request, size_t request_size,
 155		       void *response, size_t response_size, size_t npackets,
 156		       unsigned int timeout_msec)
 157{
 158	struct icm *icm = tb_priv(tb);
 159	int retries = 3;
 160
 161	do {
 162		struct tb_cfg_request *req;
 163		struct tb_cfg_result res;
 164
 165		req = tb_cfg_request_alloc();
 166		if (!req)
 167			return -ENOMEM;
 168
 169		req->match = icm_match;
 170		req->copy = icm_copy;
 171		req->request = request;
 172		req->request_size = request_size;
 173		req->request_type = TB_CFG_PKG_ICM_CMD;
 174		req->response = response;
 175		req->npackets = npackets;
 176		req->response_size = response_size;
 177		req->response_type = TB_CFG_PKG_ICM_RESP;
 178
 179		mutex_lock(&icm->request_lock);
 180		res = tb_cfg_request_sync(tb->ctl, req, timeout_msec);
 181		mutex_unlock(&icm->request_lock);
 182
 183		tb_cfg_request_put(req);
 184
 185		if (res.err != -ETIMEDOUT)
 186			return res.err == 1 ? -EIO : res.err;
 187
 188		usleep_range(20, 50);
 189	} while (retries--);
 190
 191	return -ETIMEDOUT;
 192}
 193
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 194static bool icm_fr_is_supported(struct tb *tb)
 195{
 196	return !x86_apple_machine;
 197}
 198
 199static inline int icm_fr_get_switch_index(u32 port)
 200{
 201	int index;
 202
 203	if ((port & ICM_PORT_TYPE_MASK) != TB_TYPE_PORT)
 204		return 0;
 205
 206	index = port >> ICM_PORT_INDEX_SHIFT;
 207	return index != 0xff ? index : 0;
 208}
 209
 210static int icm_fr_get_route(struct tb *tb, u8 link, u8 depth, u64 *route)
 211{
 212	struct icm_fr_pkg_get_topology_response *switches, *sw;
 213	struct icm_fr_pkg_get_topology request = {
 214		.hdr = { .code = ICM_GET_TOPOLOGY },
 215	};
 216	size_t npackets = ICM_GET_TOPOLOGY_PACKETS;
 217	int ret, index;
 218	u8 i;
 219
 220	switches = kcalloc(npackets, sizeof(*switches), GFP_KERNEL);
 221	if (!switches)
 222		return -ENOMEM;
 223
 224	ret = icm_request(tb, &request, sizeof(request), switches,
 225			  sizeof(*switches), npackets, ICM_TIMEOUT);
 226	if (ret)
 227		goto err_free;
 228
 229	sw = &switches[0];
 230	index = icm_fr_get_switch_index(sw->ports[link]);
 231	if (!index) {
 232		ret = -ENODEV;
 233		goto err_free;
 234	}
 235
 236	sw = &switches[index];
 237	for (i = 1; i < depth; i++) {
 238		unsigned int j;
 239
 240		if (!(sw->first_data & ICM_SWITCH_USED)) {
 241			ret = -ENODEV;
 242			goto err_free;
 243		}
 244
 245		for (j = 0; j < ARRAY_SIZE(sw->ports); j++) {
 246			index = icm_fr_get_switch_index(sw->ports[j]);
 247			if (index > sw->switch_index) {
 248				sw = &switches[index];
 249				break;
 250			}
 251		}
 252	}
 253
 254	*route = get_route(sw->route_hi, sw->route_lo);
 255
 256err_free:
 257	kfree(switches);
 258	return ret;
 259}
 260
 
 
 
 
 
 261static int
 262icm_fr_driver_ready(struct tb *tb, enum tb_security_level *security_level,
 263		    size_t *nboot_acl)
 264{
 265	struct icm_fr_pkg_driver_ready_response reply;
 266	struct icm_pkg_driver_ready request = {
 267		.hdr.code = ICM_DRIVER_READY,
 268	};
 269	int ret;
 270
 271	memset(&reply, 0, sizeof(reply));
 272	ret = icm_request(tb, &request, sizeof(request), &reply, sizeof(reply),
 273			  1, ICM_TIMEOUT);
 274	if (ret)
 275		return ret;
 276
 277	if (security_level)
 278		*security_level = reply.security_level & ICM_FR_SLEVEL_MASK;
 279
 280	return 0;
 281}
 282
 283static int icm_fr_approve_switch(struct tb *tb, struct tb_switch *sw)
 284{
 285	struct icm_fr_pkg_approve_device request;
 286	struct icm_fr_pkg_approve_device reply;
 287	int ret;
 288
 289	memset(&request, 0, sizeof(request));
 290	memcpy(&request.ep_uuid, sw->uuid, sizeof(request.ep_uuid));
 291	request.hdr.code = ICM_APPROVE_DEVICE;
 292	request.connection_id = sw->connection_id;
 293	request.connection_key = sw->connection_key;
 294
 295	memset(&reply, 0, sizeof(reply));
 296	/* Use larger timeout as establishing tunnels can take some time */
 297	ret = icm_request(tb, &request, sizeof(request), &reply, sizeof(reply),
 298			  1, ICM_APPROVE_TIMEOUT);
 299	if (ret)
 300		return ret;
 301
 302	if (reply.hdr.flags & ICM_FLAGS_ERROR) {
 303		tb_warn(tb, "PCIe tunnel creation failed\n");
 304		return -EIO;
 305	}
 306
 307	return 0;
 308}
 309
 310static int icm_fr_add_switch_key(struct tb *tb, struct tb_switch *sw)
 311{
 312	struct icm_fr_pkg_add_device_key request;
 313	struct icm_fr_pkg_add_device_key_response reply;
 314	int ret;
 315
 316	memset(&request, 0, sizeof(request));
 317	memcpy(&request.ep_uuid, sw->uuid, sizeof(request.ep_uuid));
 318	request.hdr.code = ICM_ADD_DEVICE_KEY;
 319	request.connection_id = sw->connection_id;
 320	request.connection_key = sw->connection_key;
 321	memcpy(request.key, sw->key, TB_SWITCH_KEY_SIZE);
 322
 323	memset(&reply, 0, sizeof(reply));
 324	ret = icm_request(tb, &request, sizeof(request), &reply, sizeof(reply),
 325			  1, ICM_TIMEOUT);
 326	if (ret)
 327		return ret;
 328
 329	if (reply.hdr.flags & ICM_FLAGS_ERROR) {
 330		tb_warn(tb, "Adding key to switch failed\n");
 331		return -EIO;
 332	}
 333
 334	return 0;
 335}
 336
 337static int icm_fr_challenge_switch_key(struct tb *tb, struct tb_switch *sw,
 338				       const u8 *challenge, u8 *response)
 339{
 340	struct icm_fr_pkg_challenge_device request;
 341	struct icm_fr_pkg_challenge_device_response reply;
 342	int ret;
 343
 344	memset(&request, 0, sizeof(request));
 345	memcpy(&request.ep_uuid, sw->uuid, sizeof(request.ep_uuid));
 346	request.hdr.code = ICM_CHALLENGE_DEVICE;
 347	request.connection_id = sw->connection_id;
 348	request.connection_key = sw->connection_key;
 349	memcpy(request.challenge, challenge, TB_SWITCH_KEY_SIZE);
 350
 351	memset(&reply, 0, sizeof(reply));
 352	ret = icm_request(tb, &request, sizeof(request), &reply, sizeof(reply),
 353			  1, ICM_TIMEOUT);
 354	if (ret)
 355		return ret;
 356
 357	if (reply.hdr.flags & ICM_FLAGS_ERROR)
 358		return -EKEYREJECTED;
 359	if (reply.hdr.flags & ICM_FLAGS_NO_KEY)
 360		return -ENOKEY;
 361
 362	memcpy(response, reply.response, TB_SWITCH_KEY_SIZE);
 363
 364	return 0;
 365}
 366
 367static int icm_fr_approve_xdomain_paths(struct tb *tb, struct tb_xdomain *xd)
 
 
 368{
 369	struct icm_fr_pkg_approve_xdomain_response reply;
 370	struct icm_fr_pkg_approve_xdomain request;
 371	int ret;
 372
 373	memset(&request, 0, sizeof(request));
 374	request.hdr.code = ICM_APPROVE_XDOMAIN;
 375	request.link_info = xd->depth << ICM_LINK_INFO_DEPTH_SHIFT | xd->link;
 376	memcpy(&request.remote_uuid, xd->remote_uuid, sizeof(*xd->remote_uuid));
 377
 378	request.transmit_path = xd->transmit_path;
 379	request.transmit_ring = xd->transmit_ring;
 380	request.receive_path = xd->receive_path;
 381	request.receive_ring = xd->receive_ring;
 382
 383	memset(&reply, 0, sizeof(reply));
 384	ret = icm_request(tb, &request, sizeof(request), &reply, sizeof(reply),
 385			  1, ICM_TIMEOUT);
 386	if (ret)
 387		return ret;
 388
 389	if (reply.hdr.flags & ICM_FLAGS_ERROR)
 390		return -EIO;
 391
 392	return 0;
 393}
 394
 395static int icm_fr_disconnect_xdomain_paths(struct tb *tb, struct tb_xdomain *xd)
 
 
 396{
 397	u8 phy_port;
 398	u8 cmd;
 399
 400	phy_port = tb_phy_port_from_link(xd->link);
 401	if (phy_port == 0)
 402		cmd = NHI_MAILBOX_DISCONNECT_PA;
 403	else
 404		cmd = NHI_MAILBOX_DISCONNECT_PB;
 405
 406	nhi_mailbox_cmd(tb->nhi, cmd, 1);
 407	usleep_range(10, 50);
 408	nhi_mailbox_cmd(tb->nhi, cmd, 2);
 409	return 0;
 410}
 411
 412static void add_switch(struct tb_switch *parent_sw, u64 route,
 413		       const uuid_t *uuid, u8 connection_id, u8 connection_key,
 414		       u8 link, u8 depth, enum tb_security_level security_level,
 415		       bool authorized, bool boot)
 416{
 
 417	struct tb_switch *sw;
 418
 419	sw = tb_switch_alloc(parent_sw->tb, &parent_sw->dev, route);
 420	if (!sw)
 421		return;
 
 
 422
 423	sw->uuid = kmemdup(uuid, sizeof(*uuid), GFP_KERNEL);
 424	sw->connection_id = connection_id;
 425	sw->connection_key = connection_key;
 426	sw->link = link;
 427	sw->depth = depth;
 428	sw->authorized = authorized;
 429	sw->security_level = security_level;
 430	sw->boot = boot;
 
 
 
 
 
 
 431
 432	/* Link the two switches now */
 433	tb_port_at(route, parent_sw)->remote = tb_upstream_port(sw);
 434	tb_upstream_port(sw)->remote = tb_port_at(route, parent_sw);
 435
 436	if (tb_switch_add(sw)) {
 
 437		tb_port_at(tb_route(sw), parent_sw)->remote = NULL;
 438		tb_switch_put(sw);
 439		return;
 440	}
 441}
 442
 443static void update_switch(struct tb_switch *parent_sw, struct tb_switch *sw,
 444			  u64 route, u8 connection_id, u8 connection_key,
 445			  u8 link, u8 depth, bool boot)
 446{
 
 
 447	/* Disconnect from parent */
 448	tb_port_at(tb_route(sw), parent_sw)->remote = NULL;
 449	/* Re-connect via updated port*/
 450	tb_port_at(route, parent_sw)->remote = tb_upstream_port(sw);
 451
 452	/* Update with the new addressing information */
 453	sw->config.route_hi = upper_32_bits(route);
 454	sw->config.route_lo = lower_32_bits(route);
 455	sw->connection_id = connection_id;
 456	sw->connection_key = connection_key;
 457	sw->link = link;
 458	sw->depth = depth;
 459	sw->boot = boot;
 460
 461	/* This switch still exists */
 462	sw->is_unplugged = false;
 
 
 
 463}
 464
 465static void remove_switch(struct tb_switch *sw)
 466{
 467	struct tb_switch *parent_sw;
 468
 469	parent_sw = tb_to_switch(sw->dev.parent);
 470	tb_port_at(tb_route(sw), parent_sw)->remote = NULL;
 471	tb_switch_remove(sw);
 472}
 473
 474static void add_xdomain(struct tb_switch *sw, u64 route,
 475			const uuid_t *local_uuid, const uuid_t *remote_uuid,
 476			u8 link, u8 depth)
 477{
 478	struct tb_xdomain *xd;
 479
 
 
 480	xd = tb_xdomain_alloc(sw->tb, &sw->dev, route, local_uuid, remote_uuid);
 481	if (!xd)
 482		return;
 483
 484	xd->link = link;
 485	xd->depth = depth;
 486
 487	tb_port_at(route, sw)->xdomain = xd;
 488
 489	tb_xdomain_add(xd);
 
 
 
 
 490}
 491
 492static void update_xdomain(struct tb_xdomain *xd, u64 route, u8 link)
 493{
 494	xd->link = link;
 495	xd->route = route;
 496	xd->is_unplugged = false;
 497}
 498
 499static void remove_xdomain(struct tb_xdomain *xd)
 500{
 501	struct tb_switch *sw;
 502
 503	sw = tb_to_switch(xd->dev.parent);
 504	tb_port_at(xd->route, sw)->xdomain = NULL;
 505	tb_xdomain_remove(xd);
 506}
 507
 508static void
 509icm_fr_device_connected(struct tb *tb, const struct icm_pkg_header *hdr)
 510{
 511	const struct icm_fr_event_device_connected *pkg =
 512		(const struct icm_fr_event_device_connected *)hdr;
 513	enum tb_security_level security_level;
 514	struct tb_switch *sw, *parent_sw;
 
 515	struct icm *icm = tb_priv(tb);
 516	bool authorized = false;
 517	struct tb_xdomain *xd;
 518	u8 link, depth;
 519	bool boot;
 520	u64 route;
 521	int ret;
 522
 
 
 523	link = pkg->link_info & ICM_LINK_INFO_LINK_MASK;
 524	depth = (pkg->link_info & ICM_LINK_INFO_DEPTH_MASK) >>
 525		ICM_LINK_INFO_DEPTH_SHIFT;
 526	authorized = pkg->link_info & ICM_LINK_INFO_APPROVED;
 527	security_level = (pkg->hdr.flags & ICM_FLAGS_SLEVEL_MASK) >>
 528			 ICM_FLAGS_SLEVEL_SHIFT;
 529	boot = pkg->link_info & ICM_LINK_INFO_BOOT;
 
 
 530
 531	if (pkg->link_info & ICM_LINK_INFO_REJECTED) {
 532		tb_info(tb, "switch at %u.%u was rejected by ICM firmware because topology limit exceeded\n",
 533			link, depth);
 534		return;
 535	}
 536
 537	ret = icm->get_route(tb, link, depth, &route);
 538	if (ret) {
 539		tb_err(tb, "failed to find route string for switch at %u.%u\n",
 540		       link, depth);
 541		return;
 542	}
 543
 544	sw = tb_switch_find_by_uuid(tb, &pkg->ep_uuid);
 545	if (sw) {
 546		u8 phy_port, sw_phy_port;
 547
 548		parent_sw = tb_to_switch(sw->dev.parent);
 549		sw_phy_port = phy_port_from_route(tb_route(sw), sw->depth);
 550		phy_port = phy_port_from_route(route, depth);
 551
 552		/*
 553		 * On resume ICM will send us connected events for the
 554		 * devices that still are present. However, that
 555		 * information might have changed for example by the
 556		 * fact that a switch on a dual-link connection might
 557		 * have been enumerated using the other link now. Make
 558		 * sure our book keeping matches that.
 559		 */
 560		if (sw->depth == depth && sw_phy_port == phy_port &&
 561		    !!sw->authorized == authorized) {
 562			update_switch(parent_sw, sw, route, pkg->connection_id,
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 563				      pkg->connection_key, link, depth, boot);
 564			tb_switch_put(sw);
 565			return;
 566		}
 567
 568		/*
 569		 * User connected the same switch to another physical
 570		 * port or to another part of the topology. Remove the
 571		 * existing switch now before adding the new one.
 572		 */
 573		remove_switch(sw);
 574		tb_switch_put(sw);
 575	}
 576
 577	/*
 578	 * If the switch was not found by UUID, look for a switch on
 579	 * same physical port (taking possible link aggregation into
 580	 * account) and depth. If we found one it is definitely a stale
 581	 * one so remove it first.
 582	 */
 583	sw = tb_switch_find_by_link_depth(tb, link, depth);
 584	if (!sw) {
 585		u8 dual_link;
 586
 587		dual_link = dual_link_from_link(link);
 588		if (dual_link)
 589			sw = tb_switch_find_by_link_depth(tb, dual_link, depth);
 590	}
 591	if (sw) {
 592		remove_switch(sw);
 593		tb_switch_put(sw);
 594	}
 595
 596	/* Remove existing XDomain connection if found */
 597	xd = tb_xdomain_find_by_link_depth(tb, link, depth);
 598	if (xd) {
 599		remove_xdomain(xd);
 600		tb_xdomain_put(xd);
 601	}
 602
 603	parent_sw = tb_switch_find_by_link_depth(tb, link, depth - 1);
 604	if (!parent_sw) {
 605		tb_err(tb, "failed to find parent switch for %u.%u\n",
 606		       link, depth);
 607		return;
 608	}
 609
 610	add_switch(parent_sw, route, &pkg->ep_uuid, pkg->connection_id,
 611		   pkg->connection_key, link, depth, security_level,
 612		   authorized, boot);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 613
 614	tb_switch_put(parent_sw);
 615}
 616
 617static void
 618icm_fr_device_disconnected(struct tb *tb, const struct icm_pkg_header *hdr)
 619{
 620	const struct icm_fr_event_device_disconnected *pkg =
 621		(const struct icm_fr_event_device_disconnected *)hdr;
 622	struct tb_switch *sw;
 623	u8 link, depth;
 624
 625	link = pkg->link_info & ICM_LINK_INFO_LINK_MASK;
 626	depth = (pkg->link_info & ICM_LINK_INFO_DEPTH_MASK) >>
 627		ICM_LINK_INFO_DEPTH_SHIFT;
 628
 629	if (link > ICM_MAX_LINK || depth > ICM_MAX_DEPTH) {
 630		tb_warn(tb, "invalid topology %u.%u, ignoring\n", link, depth);
 631		return;
 632	}
 633
 634	sw = tb_switch_find_by_link_depth(tb, link, depth);
 635	if (!sw) {
 636		tb_warn(tb, "no switch exists at %u.%u, ignoring\n", link,
 637			depth);
 638		return;
 639	}
 640
 
 
 641	remove_switch(sw);
 
 
 
 
 642	tb_switch_put(sw);
 643}
 644
 645static void
 646icm_fr_xdomain_connected(struct tb *tb, const struct icm_pkg_header *hdr)
 647{
 648	const struct icm_fr_event_xdomain_connected *pkg =
 649		(const struct icm_fr_event_xdomain_connected *)hdr;
 650	struct tb_xdomain *xd;
 651	struct tb_switch *sw;
 652	u8 link, depth;
 653	bool approved;
 654	u64 route;
 655
 656	/*
 657	 * After NVM upgrade adding root switch device fails because we
 658	 * initiated reset. During that time ICM might still send
 659	 * XDomain connected message which we ignore here.
 660	 */
 661	if (!tb->root_switch)
 662		return;
 663
 664	link = pkg->link_info & ICM_LINK_INFO_LINK_MASK;
 665	depth = (pkg->link_info & ICM_LINK_INFO_DEPTH_MASK) >>
 666		ICM_LINK_INFO_DEPTH_SHIFT;
 667	approved = pkg->link_info & ICM_LINK_INFO_APPROVED;
 668
 669	if (link > ICM_MAX_LINK || depth > ICM_MAX_DEPTH) {
 670		tb_warn(tb, "invalid topology %u.%u, ignoring\n", link, depth);
 671		return;
 672	}
 673
 674	route = get_route(pkg->local_route_hi, pkg->local_route_lo);
 675
 676	xd = tb_xdomain_find_by_uuid(tb, &pkg->remote_uuid);
 677	if (xd) {
 678		u8 xd_phy_port, phy_port;
 679
 680		xd_phy_port = phy_port_from_route(xd->route, xd->depth);
 681		phy_port = phy_port_from_route(route, depth);
 682
 683		if (xd->depth == depth && xd_phy_port == phy_port) {
 684			update_xdomain(xd, route, link);
 685			tb_xdomain_put(xd);
 686			return;
 687		}
 688
 689		/*
 690		 * If we find an existing XDomain connection remove it
 691		 * now. We need to go through login handshake and
 692		 * everything anyway to be able to re-establish the
 693		 * connection.
 694		 */
 695		remove_xdomain(xd);
 696		tb_xdomain_put(xd);
 697	}
 698
 699	/*
 700	 * Look if there already exists an XDomain in the same place
 701	 * than the new one and in that case remove it because it is
 702	 * most likely another host that got disconnected.
 703	 */
 704	xd = tb_xdomain_find_by_link_depth(tb, link, depth);
 705	if (!xd) {
 706		u8 dual_link;
 707
 708		dual_link = dual_link_from_link(link);
 709		if (dual_link)
 710			xd = tb_xdomain_find_by_link_depth(tb, dual_link,
 711							   depth);
 712	}
 713	if (xd) {
 714		remove_xdomain(xd);
 715		tb_xdomain_put(xd);
 716	}
 717
 718	/*
 719	 * If the user disconnected a switch during suspend and
 720	 * connected another host to the same port, remove the switch
 721	 * first.
 722	 */
 723	sw = get_switch_at_route(tb->root_switch, route);
 724	if (sw)
 725		remove_switch(sw);
 
 
 726
 727	sw = tb_switch_find_by_link_depth(tb, link, depth);
 728	if (!sw) {
 729		tb_warn(tb, "no switch exists at %u.%u, ignoring\n", link,
 730			depth);
 731		return;
 732	}
 733
 734	add_xdomain(sw, route, &pkg->local_uuid, &pkg->remote_uuid, link,
 735		    depth);
 736	tb_switch_put(sw);
 737}
 738
 739static void
 740icm_fr_xdomain_disconnected(struct tb *tb, const struct icm_pkg_header *hdr)
 741{
 742	const struct icm_fr_event_xdomain_disconnected *pkg =
 743		(const struct icm_fr_event_xdomain_disconnected *)hdr;
 744	struct tb_xdomain *xd;
 745
 746	/*
 747	 * If the connection is through one or multiple devices, the
 748	 * XDomain device is removed along with them so it is fine if we
 749	 * cannot find it here.
 750	 */
 751	xd = tb_xdomain_find_by_uuid(tb, &pkg->remote_uuid);
 752	if (xd) {
 753		remove_xdomain(xd);
 754		tb_xdomain_put(xd);
 755	}
 756}
 757
 
 
 
 
 
 758static int
 759icm_tr_driver_ready(struct tb *tb, enum tb_security_level *security_level,
 760		    size_t *nboot_acl)
 761{
 762	struct icm_tr_pkg_driver_ready_response reply;
 763	struct icm_pkg_driver_ready request = {
 764		.hdr.code = ICM_DRIVER_READY,
 765	};
 766	int ret;
 767
 768	memset(&reply, 0, sizeof(reply));
 769	ret = icm_request(tb, &request, sizeof(request), &reply, sizeof(reply),
 770			  1, 20000);
 771	if (ret)
 772		return ret;
 773
 774	if (security_level)
 775		*security_level = reply.info & ICM_TR_INFO_SLEVEL_MASK;
 
 
 
 776	if (nboot_acl)
 777		*nboot_acl = (reply.info & ICM_TR_INFO_BOOT_ACL_MASK) >>
 778				ICM_TR_INFO_BOOT_ACL_SHIFT;
 
 
 
 779	return 0;
 780}
 781
 782static int icm_tr_approve_switch(struct tb *tb, struct tb_switch *sw)
 783{
 784	struct icm_tr_pkg_approve_device request;
 785	struct icm_tr_pkg_approve_device reply;
 786	int ret;
 787
 788	memset(&request, 0, sizeof(request));
 789	memcpy(&request.ep_uuid, sw->uuid, sizeof(request.ep_uuid));
 790	request.hdr.code = ICM_APPROVE_DEVICE;
 791	request.route_lo = sw->config.route_lo;
 792	request.route_hi = sw->config.route_hi;
 793	request.connection_id = sw->connection_id;
 794
 795	memset(&reply, 0, sizeof(reply));
 796	ret = icm_request(tb, &request, sizeof(request), &reply, sizeof(reply),
 797			  1, ICM_APPROVE_TIMEOUT);
 798	if (ret)
 799		return ret;
 800
 801	if (reply.hdr.flags & ICM_FLAGS_ERROR) {
 802		tb_warn(tb, "PCIe tunnel creation failed\n");
 803		return -EIO;
 804	}
 805
 806	return 0;
 807}
 808
 809static int icm_tr_add_switch_key(struct tb *tb, struct tb_switch *sw)
 810{
 811	struct icm_tr_pkg_add_device_key_response reply;
 812	struct icm_tr_pkg_add_device_key request;
 813	int ret;
 814
 815	memset(&request, 0, sizeof(request));
 816	memcpy(&request.ep_uuid, sw->uuid, sizeof(request.ep_uuid));
 817	request.hdr.code = ICM_ADD_DEVICE_KEY;
 818	request.route_lo = sw->config.route_lo;
 819	request.route_hi = sw->config.route_hi;
 820	request.connection_id = sw->connection_id;
 821	memcpy(request.key, sw->key, TB_SWITCH_KEY_SIZE);
 822
 823	memset(&reply, 0, sizeof(reply));
 824	ret = icm_request(tb, &request, sizeof(request), &reply, sizeof(reply),
 825			  1, ICM_TIMEOUT);
 826	if (ret)
 827		return ret;
 828
 829	if (reply.hdr.flags & ICM_FLAGS_ERROR) {
 830		tb_warn(tb, "Adding key to switch failed\n");
 831		return -EIO;
 832	}
 833
 834	return 0;
 835}
 836
 837static int icm_tr_challenge_switch_key(struct tb *tb, struct tb_switch *sw,
 838				       const u8 *challenge, u8 *response)
 839{
 840	struct icm_tr_pkg_challenge_device_response reply;
 841	struct icm_tr_pkg_challenge_device request;
 842	int ret;
 843
 844	memset(&request, 0, sizeof(request));
 845	memcpy(&request.ep_uuid, sw->uuid, sizeof(request.ep_uuid));
 846	request.hdr.code = ICM_CHALLENGE_DEVICE;
 847	request.route_lo = sw->config.route_lo;
 848	request.route_hi = sw->config.route_hi;
 849	request.connection_id = sw->connection_id;
 850	memcpy(request.challenge, challenge, TB_SWITCH_KEY_SIZE);
 851
 852	memset(&reply, 0, sizeof(reply));
 853	ret = icm_request(tb, &request, sizeof(request), &reply, sizeof(reply),
 854			  1, ICM_TIMEOUT);
 855	if (ret)
 856		return ret;
 857
 858	if (reply.hdr.flags & ICM_FLAGS_ERROR)
 859		return -EKEYREJECTED;
 860	if (reply.hdr.flags & ICM_FLAGS_NO_KEY)
 861		return -ENOKEY;
 862
 863	memcpy(response, reply.response, TB_SWITCH_KEY_SIZE);
 864
 865	return 0;
 866}
 867
 868static int icm_tr_approve_xdomain_paths(struct tb *tb, struct tb_xdomain *xd)
 
 
 869{
 870	struct icm_tr_pkg_approve_xdomain_response reply;
 871	struct icm_tr_pkg_approve_xdomain request;
 872	int ret;
 873
 874	memset(&request, 0, sizeof(request));
 875	request.hdr.code = ICM_APPROVE_XDOMAIN;
 876	request.route_hi = upper_32_bits(xd->route);
 877	request.route_lo = lower_32_bits(xd->route);
 878	request.transmit_path = xd->transmit_path;
 879	request.transmit_ring = xd->transmit_ring;
 880	request.receive_path = xd->receive_path;
 881	request.receive_ring = xd->receive_ring;
 882	memcpy(&request.remote_uuid, xd->remote_uuid, sizeof(*xd->remote_uuid));
 883
 884	memset(&reply, 0, sizeof(reply));
 885	ret = icm_request(tb, &request, sizeof(request), &reply, sizeof(reply),
 886			  1, ICM_TIMEOUT);
 887	if (ret)
 888		return ret;
 889
 890	if (reply.hdr.flags & ICM_FLAGS_ERROR)
 891		return -EIO;
 892
 893	return 0;
 894}
 895
 896static int icm_tr_xdomain_tear_down(struct tb *tb, struct tb_xdomain *xd,
 897				    int stage)
 898{
 899	struct icm_tr_pkg_disconnect_xdomain_response reply;
 900	struct icm_tr_pkg_disconnect_xdomain request;
 901	int ret;
 902
 903	memset(&request, 0, sizeof(request));
 904	request.hdr.code = ICM_DISCONNECT_XDOMAIN;
 905	request.stage = stage;
 906	request.route_hi = upper_32_bits(xd->route);
 907	request.route_lo = lower_32_bits(xd->route);
 908	memcpy(&request.remote_uuid, xd->remote_uuid, sizeof(*xd->remote_uuid));
 909
 910	memset(&reply, 0, sizeof(reply));
 911	ret = icm_request(tb, &request, sizeof(request), &reply, sizeof(reply),
 912			  1, ICM_TIMEOUT);
 913	if (ret)
 914		return ret;
 915
 916	if (reply.hdr.flags & ICM_FLAGS_ERROR)
 917		return -EIO;
 918
 919	return 0;
 920}
 921
 922static int icm_tr_disconnect_xdomain_paths(struct tb *tb, struct tb_xdomain *xd)
 
 
 923{
 924	int ret;
 925
 926	ret = icm_tr_xdomain_tear_down(tb, xd, 1);
 927	if (ret)
 928		return ret;
 929
 930	usleep_range(10, 50);
 931	return icm_tr_xdomain_tear_down(tb, xd, 2);
 932}
 933
 934static void
 935icm_tr_device_connected(struct tb *tb, const struct icm_pkg_header *hdr)
 
 936{
 937	const struct icm_tr_event_device_connected *pkg =
 938		(const struct icm_tr_event_device_connected *)hdr;
 
 939	enum tb_security_level security_level;
 940	struct tb_switch *sw, *parent_sw;
 941	struct tb_xdomain *xd;
 942	bool authorized, boot;
 943	u64 route;
 944
 
 
 945	/*
 946	 * Currently we don't use the QoS information coming with the
 947	 * device connected message so simply just ignore that extra
 948	 * packet for now.
 949	 */
 950	if (pkg->hdr.packet_id)
 951		return;
 952
 953	/*
 954	 * After NVM upgrade adding root switch device fails because we
 955	 * initiated reset. During that time ICM might still send device
 956	 * connected message which we ignore here.
 957	 */
 958	if (!tb->root_switch)
 959		return;
 960
 961	route = get_route(pkg->route_hi, pkg->route_lo);
 962	authorized = pkg->link_info & ICM_LINK_INFO_APPROVED;
 963	security_level = (pkg->hdr.flags & ICM_FLAGS_SLEVEL_MASK) >>
 964			 ICM_FLAGS_SLEVEL_SHIFT;
 965	boot = pkg->link_info & ICM_LINK_INFO_BOOT;
 
 
 966
 967	if (pkg->link_info & ICM_LINK_INFO_REJECTED) {
 968		tb_info(tb, "switch at %llx was rejected by ICM firmware because topology limit exceeded\n",
 969			route);
 970		return;
 971	}
 972
 973	sw = tb_switch_find_by_uuid(tb, &pkg->ep_uuid);
 974	if (sw) {
 975		/* Update the switch if it is still in the same place */
 976		if (tb_route(sw) == route && !!sw->authorized == authorized) {
 977			parent_sw = tb_to_switch(sw->dev.parent);
 978			update_switch(parent_sw, sw, route, pkg->connection_id,
 979				      0, 0, 0, boot);
 980			tb_switch_put(sw);
 981			return;
 982		}
 983
 984		remove_switch(sw);
 985		tb_switch_put(sw);
 986	}
 987
 988	/* Another switch with the same address */
 989	sw = tb_switch_find_by_route(tb, route);
 990	if (sw) {
 991		remove_switch(sw);
 992		tb_switch_put(sw);
 993	}
 994
 995	/* XDomain connection with the same address */
 996	xd = tb_xdomain_find_by_route(tb, route);
 997	if (xd) {
 998		remove_xdomain(xd);
 999		tb_xdomain_put(xd);
1000	}
1001
1002	parent_sw = tb_switch_find_by_route(tb, get_parent_route(route));
1003	if (!parent_sw) {
1004		tb_err(tb, "failed to find parent switch for %llx\n", route);
1005		return;
1006	}
1007
1008	add_switch(parent_sw, route, &pkg->ep_uuid, pkg->connection_id,
1009		   0, 0, 0, security_level, authorized, boot);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1010
1011	tb_switch_put(parent_sw);
1012}
1013
1014static void
 
 
 
 
 
 
1015icm_tr_device_disconnected(struct tb *tb, const struct icm_pkg_header *hdr)
1016{
1017	const struct icm_tr_event_device_disconnected *pkg =
1018		(const struct icm_tr_event_device_disconnected *)hdr;
1019	struct tb_switch *sw;
1020	u64 route;
1021
1022	route = get_route(pkg->route_hi, pkg->route_lo);
1023
1024	sw = tb_switch_find_by_route(tb, route);
1025	if (!sw) {
1026		tb_warn(tb, "no switch exists at %llx, ignoring\n", route);
1027		return;
1028	}
 
1029
1030	remove_switch(sw);
 
 
 
 
1031	tb_switch_put(sw);
1032}
1033
1034static void
1035icm_tr_xdomain_connected(struct tb *tb, const struct icm_pkg_header *hdr)
1036{
1037	const struct icm_tr_event_xdomain_connected *pkg =
1038		(const struct icm_tr_event_xdomain_connected *)hdr;
1039	struct tb_xdomain *xd;
1040	struct tb_switch *sw;
1041	u64 route;
1042
1043	if (!tb->root_switch)
1044		return;
1045
1046	route = get_route(pkg->local_route_hi, pkg->local_route_lo);
1047
1048	xd = tb_xdomain_find_by_uuid(tb, &pkg->remote_uuid);
1049	if (xd) {
1050		if (xd->route == route) {
1051			update_xdomain(xd, route, 0);
1052			tb_xdomain_put(xd);
1053			return;
1054		}
1055
1056		remove_xdomain(xd);
1057		tb_xdomain_put(xd);
1058	}
1059
1060	/* An existing xdomain with the same address */
1061	xd = tb_xdomain_find_by_route(tb, route);
1062	if (xd) {
1063		remove_xdomain(xd);
1064		tb_xdomain_put(xd);
1065	}
1066
1067	/*
1068	 * If the user disconnected a switch during suspend and
1069	 * connected another host to the same port, remove the switch
1070	 * first.
1071	 */
1072	sw = get_switch_at_route(tb->root_switch, route);
1073	if (sw)
1074		remove_switch(sw);
 
 
1075
1076	sw = tb_switch_find_by_route(tb, get_parent_route(route));
1077	if (!sw) {
1078		tb_warn(tb, "no switch exists at %llx, ignoring\n", route);
1079		return;
1080	}
1081
1082	add_xdomain(sw, route, &pkg->local_uuid, &pkg->remote_uuid, 0, 0);
1083	tb_switch_put(sw);
1084}
1085
1086static void
1087icm_tr_xdomain_disconnected(struct tb *tb, const struct icm_pkg_header *hdr)
1088{
1089	const struct icm_tr_event_xdomain_disconnected *pkg =
1090		(const struct icm_tr_event_xdomain_disconnected *)hdr;
1091	struct tb_xdomain *xd;
1092	u64 route;
1093
1094	route = get_route(pkg->route_hi, pkg->route_lo);
1095
1096	xd = tb_xdomain_find_by_route(tb, route);
1097	if (xd) {
1098		remove_xdomain(xd);
1099		tb_xdomain_put(xd);
1100	}
1101}
1102
1103static struct pci_dev *get_upstream_port(struct pci_dev *pdev)
1104{
1105	struct pci_dev *parent;
1106
1107	parent = pci_upstream_bridge(pdev);
1108	while (parent) {
1109		if (!pci_is_pcie(parent))
1110			return NULL;
1111		if (pci_pcie_type(parent) == PCI_EXP_TYPE_UPSTREAM)
1112			break;
1113		parent = pci_upstream_bridge(parent);
1114	}
1115
1116	if (!parent)
1117		return NULL;
1118
1119	switch (parent->device) {
1120	case PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_2C_BRIDGE:
1121	case PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_4C_BRIDGE:
1122	case PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_LP_BRIDGE:
1123	case PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_C_4C_BRIDGE:
1124	case PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_C_2C_BRIDGE:
 
 
1125		return parent;
1126	}
1127
1128	return NULL;
1129}
1130
1131static bool icm_ar_is_supported(struct tb *tb)
1132{
1133	struct pci_dev *upstream_port;
1134	struct icm *icm = tb_priv(tb);
1135
1136	/*
1137	 * Starting from Alpine Ridge we can use ICM on Apple machines
1138	 * as well. We just need to reset and re-enable it first.
 
1139	 */
1140	if (!x86_apple_machine)
1141		return true;
 
 
1142
1143	/*
1144	 * Find the upstream PCIe port in case we need to do reset
1145	 * through its vendor specific registers.
1146	 */
1147	upstream_port = get_upstream_port(tb->nhi->pdev);
1148	if (upstream_port) {
1149		int cap;
1150
1151		cap = pci_find_ext_capability(upstream_port,
1152					      PCI_EXT_CAP_ID_VNDR);
1153		if (cap > 0) {
1154			icm->upstream_port = upstream_port;
1155			icm->vnd_cap = cap;
1156
1157			return true;
1158		}
1159	}
1160
1161	return false;
1162}
1163
 
 
 
 
 
1164static int icm_ar_get_mode(struct tb *tb)
1165{
1166	struct tb_nhi *nhi = tb->nhi;
1167	int retries = 60;
1168	u32 val;
1169
1170	do {
1171		val = ioread32(nhi->iobase + REG_FW_STS);
1172		if (val & REG_FW_STS_NVM_AUTH_DONE)
1173			break;
1174		msleep(50);
1175	} while (--retries);
1176
1177	if (!retries) {
1178		dev_err(&nhi->pdev->dev, "ICM firmware not authenticated\n");
1179		return -ENODEV;
1180	}
1181
1182	return nhi_mailbox_mode(nhi);
1183}
1184
1185static int
1186icm_ar_driver_ready(struct tb *tb, enum tb_security_level *security_level,
1187		    size_t *nboot_acl)
1188{
1189	struct icm_ar_pkg_driver_ready_response reply;
1190	struct icm_pkg_driver_ready request = {
1191		.hdr.code = ICM_DRIVER_READY,
1192	};
1193	int ret;
1194
1195	memset(&reply, 0, sizeof(reply));
1196	ret = icm_request(tb, &request, sizeof(request), &reply, sizeof(reply),
1197			  1, ICM_TIMEOUT);
1198	if (ret)
1199		return ret;
1200
1201	if (security_level)
1202		*security_level = reply.info & ICM_AR_INFO_SLEVEL_MASK;
1203	if (nboot_acl && (reply.info & ICM_AR_INFO_BOOT_ACL_SUPPORTED))
1204		*nboot_acl = (reply.info & ICM_AR_INFO_BOOT_ACL_MASK) >>
1205				ICM_AR_INFO_BOOT_ACL_SHIFT;
 
 
 
1206	return 0;
1207}
1208
1209static int icm_ar_get_route(struct tb *tb, u8 link, u8 depth, u64 *route)
1210{
1211	struct icm_ar_pkg_get_route_response reply;
1212	struct icm_ar_pkg_get_route request = {
1213		.hdr = { .code = ICM_GET_ROUTE },
1214		.link_info = depth << ICM_LINK_INFO_DEPTH_SHIFT | link,
1215	};
1216	int ret;
1217
1218	memset(&reply, 0, sizeof(reply));
1219	ret = icm_request(tb, &request, sizeof(request), &reply, sizeof(reply),
1220			  1, ICM_TIMEOUT);
1221	if (ret)
1222		return ret;
1223
1224	if (reply.hdr.flags & ICM_FLAGS_ERROR)
1225		return -EIO;
1226
1227	*route = get_route(reply.route_hi, reply.route_lo);
1228	return 0;
1229}
1230
1231static int icm_ar_get_boot_acl(struct tb *tb, uuid_t *uuids, size_t nuuids)
1232{
1233	struct icm_ar_pkg_preboot_acl_response reply;
1234	struct icm_ar_pkg_preboot_acl request = {
1235		.hdr = { .code = ICM_PREBOOT_ACL },
1236	};
1237	int ret, i;
1238
1239	memset(&reply, 0, sizeof(reply));
1240	ret = icm_request(tb, &request, sizeof(request), &reply, sizeof(reply),
1241			  1, ICM_TIMEOUT);
1242	if (ret)
1243		return ret;
1244
1245	if (reply.hdr.flags & ICM_FLAGS_ERROR)
1246		return -EIO;
1247
1248	for (i = 0; i < nuuids; i++) {
1249		u32 *uuid = (u32 *)&uuids[i];
1250
1251		uuid[0] = reply.acl[i].uuid_lo;
1252		uuid[1] = reply.acl[i].uuid_hi;
1253
1254		if (uuid[0] == 0xffffffff && uuid[1] == 0xffffffff) {
1255			/* Map empty entries to null UUID */
1256			uuid[0] = 0;
1257			uuid[1] = 0;
1258		} else if (uuid[0] != 0 || uuid[1] != 0) {
1259			/* Upper two DWs are always one's */
1260			uuid[2] = 0xffffffff;
1261			uuid[3] = 0xffffffff;
1262		}
1263	}
1264
1265	return ret;
1266}
1267
1268static int icm_ar_set_boot_acl(struct tb *tb, const uuid_t *uuids,
1269			       size_t nuuids)
1270{
1271	struct icm_ar_pkg_preboot_acl_response reply;
1272	struct icm_ar_pkg_preboot_acl request = {
1273		.hdr = {
1274			.code = ICM_PREBOOT_ACL,
1275			.flags = ICM_FLAGS_WRITE,
1276		},
1277	};
1278	int ret, i;
1279
1280	for (i = 0; i < nuuids; i++) {
1281		const u32 *uuid = (const u32 *)&uuids[i];
1282
1283		if (uuid_is_null(&uuids[i])) {
1284			/*
1285			 * Map null UUID to the empty (all one) entries
1286			 * for ICM.
1287			 */
1288			request.acl[i].uuid_lo = 0xffffffff;
1289			request.acl[i].uuid_hi = 0xffffffff;
1290		} else {
1291			/* Two high DWs need to be set to all one */
1292			if (uuid[2] != 0xffffffff || uuid[3] != 0xffffffff)
1293				return -EINVAL;
1294
1295			request.acl[i].uuid_lo = uuid[0];
1296			request.acl[i].uuid_hi = uuid[1];
1297		}
1298	}
1299
1300	memset(&reply, 0, sizeof(reply));
1301	ret = icm_request(tb, &request, sizeof(request), &reply, sizeof(reply),
1302			  1, ICM_TIMEOUT);
1303	if (ret)
1304		return ret;
1305
1306	if (reply.hdr.flags & ICM_FLAGS_ERROR)
1307		return -EIO;
1308
1309	return 0;
1310}
1311
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1312static void icm_handle_notification(struct work_struct *work)
1313{
1314	struct icm_notification *n = container_of(work, typeof(*n), work);
1315	struct tb *tb = n->tb;
1316	struct icm *icm = tb_priv(tb);
1317
1318	mutex_lock(&tb->lock);
1319
1320	switch (n->pkg->code) {
1321	case ICM_EVENT_DEVICE_CONNECTED:
1322		icm->device_connected(tb, n->pkg);
1323		break;
1324	case ICM_EVENT_DEVICE_DISCONNECTED:
1325		icm->device_disconnected(tb, n->pkg);
1326		break;
1327	case ICM_EVENT_XDOMAIN_CONNECTED:
1328		icm->xdomain_connected(tb, n->pkg);
1329		break;
1330	case ICM_EVENT_XDOMAIN_DISCONNECTED:
1331		icm->xdomain_disconnected(tb, n->pkg);
1332		break;
 
 
 
 
 
 
 
 
 
 
 
 
1333	}
1334
1335	mutex_unlock(&tb->lock);
1336
1337	kfree(n->pkg);
1338	kfree(n);
1339}
1340
1341static void icm_handle_event(struct tb *tb, enum tb_cfg_pkg_type type,
1342			     const void *buf, size_t size)
1343{
1344	struct icm_notification *n;
1345
1346	n = kmalloc(sizeof(*n), GFP_KERNEL);
1347	if (!n)
1348		return;
1349
 
 
 
 
 
 
1350	INIT_WORK(&n->work, icm_handle_notification);
1351	n->pkg = kmemdup(buf, size, GFP_KERNEL);
1352	n->tb = tb;
1353
1354	queue_work(tb->wq, &n->work);
1355}
1356
1357static int
1358__icm_driver_ready(struct tb *tb, enum tb_security_level *security_level,
1359		   size_t *nboot_acl)
1360{
1361	struct icm *icm = tb_priv(tb);
1362	unsigned int retries = 50;
1363	int ret;
1364
1365	ret = icm->driver_ready(tb, security_level, nboot_acl);
 
1366	if (ret) {
1367		tb_err(tb, "failed to send driver ready to ICM\n");
1368		return ret;
1369	}
1370
1371	/*
1372	 * Hold on here until the switch config space is accessible so
1373	 * that we can read root switch config successfully.
1374	 */
1375	do {
1376		struct tb_cfg_result res;
1377		u32 tmp;
1378
1379		res = tb_cfg_read_raw(tb->ctl, &tmp, 0, 0, TB_CFG_SWITCH,
1380				      0, 1, 100);
1381		if (!res.err)
1382			return 0;
1383
1384		msleep(50);
1385	} while (--retries);
1386
1387	tb_err(tb, "failed to read root switch config space, giving up\n");
1388	return -ETIMEDOUT;
1389}
1390
1391static int pci2cio_wait_completion(struct icm *icm, unsigned long timeout_msec)
1392{
1393	unsigned long end = jiffies + msecs_to_jiffies(timeout_msec);
1394	u32 cmd;
1395
1396	do {
1397		pci_read_config_dword(icm->upstream_port,
1398				      icm->vnd_cap + PCIE2CIO_CMD, &cmd);
1399		if (!(cmd & PCIE2CIO_CMD_START)) {
1400			if (cmd & PCIE2CIO_CMD_TIMEOUT)
1401				break;
1402			return 0;
1403		}
1404
1405		msleep(50);
1406	} while (time_before(jiffies, end));
1407
1408	return -ETIMEDOUT;
1409}
1410
1411static int pcie2cio_read(struct icm *icm, enum tb_cfg_space cs,
1412			 unsigned int port, unsigned int index, u32 *data)
1413{
1414	struct pci_dev *pdev = icm->upstream_port;
1415	int ret, vnd_cap = icm->vnd_cap;
1416	u32 cmd;
1417
1418	cmd = index;
1419	cmd |= (port << PCIE2CIO_CMD_PORT_SHIFT) & PCIE2CIO_CMD_PORT_MASK;
1420	cmd |= (cs << PCIE2CIO_CMD_CS_SHIFT) & PCIE2CIO_CMD_CS_MASK;
1421	cmd |= PCIE2CIO_CMD_START;
1422	pci_write_config_dword(pdev, vnd_cap + PCIE2CIO_CMD, cmd);
1423
1424	ret = pci2cio_wait_completion(icm, 5000);
1425	if (ret)
1426		return ret;
1427
1428	pci_read_config_dword(pdev, vnd_cap + PCIE2CIO_RDDATA, data);
1429	return 0;
1430}
1431
1432static int pcie2cio_write(struct icm *icm, enum tb_cfg_space cs,
1433			  unsigned int port, unsigned int index, u32 data)
1434{
1435	struct pci_dev *pdev = icm->upstream_port;
1436	int vnd_cap = icm->vnd_cap;
1437	u32 cmd;
1438
1439	pci_write_config_dword(pdev, vnd_cap + PCIE2CIO_WRDATA, data);
1440
1441	cmd = index;
1442	cmd |= (port << PCIE2CIO_CMD_PORT_SHIFT) & PCIE2CIO_CMD_PORT_MASK;
1443	cmd |= (cs << PCIE2CIO_CMD_CS_SHIFT) & PCIE2CIO_CMD_CS_MASK;
1444	cmd |= PCIE2CIO_CMD_WRITE | PCIE2CIO_CMD_START;
1445	pci_write_config_dword(pdev, vnd_cap + PCIE2CIO_CMD, cmd);
1446
1447	return pci2cio_wait_completion(icm, 5000);
1448}
1449
1450static int icm_firmware_reset(struct tb *tb, struct tb_nhi *nhi)
1451{
1452	struct icm *icm = tb_priv(tb);
1453	u32 val;
1454
1455	if (!icm->upstream_port)
1456		return -ENODEV;
1457
1458	/* Put ARC to wait for CIO reset event to happen */
1459	val = ioread32(nhi->iobase + REG_FW_STS);
1460	val |= REG_FW_STS_CIO_RESET_REQ;
1461	iowrite32(val, nhi->iobase + REG_FW_STS);
1462
1463	/* Re-start ARC */
1464	val = ioread32(nhi->iobase + REG_FW_STS);
1465	val |= REG_FW_STS_ICM_EN_INVERT;
1466	val |= REG_FW_STS_ICM_EN_CPU;
1467	iowrite32(val, nhi->iobase + REG_FW_STS);
1468
1469	/* Trigger CIO reset now */
1470	return pcie2cio_write(icm, TB_CFG_SWITCH, 0, 0x50, BIT(9));
1471}
1472
1473static int icm_firmware_start(struct tb *tb, struct tb_nhi *nhi)
1474{
1475	unsigned int retries = 10;
1476	int ret;
1477	u32 val;
1478
1479	/* Check if the ICM firmware is already running */
1480	val = ioread32(nhi->iobase + REG_FW_STS);
1481	if (val & REG_FW_STS_ICM_EN)
1482		return 0;
1483
1484	dev_info(&nhi->pdev->dev, "starting ICM firmware\n");
1485
1486	ret = icm_firmware_reset(tb, nhi);
1487	if (ret)
1488		return ret;
1489
1490	/* Wait until the ICM firmware tells us it is up and running */
1491	do {
1492		/* Check that the ICM firmware is running */
1493		val = ioread32(nhi->iobase + REG_FW_STS);
1494		if (val & REG_FW_STS_NVM_AUTH_DONE)
1495			return 0;
1496
1497		msleep(300);
1498	} while (--retries);
1499
1500	return -ETIMEDOUT;
1501}
1502
1503static int icm_reset_phy_port(struct tb *tb, int phy_port)
1504{
1505	struct icm *icm = tb_priv(tb);
1506	u32 state0, state1;
1507	int port0, port1;
1508	u32 val0, val1;
1509	int ret;
1510
1511	if (!icm->upstream_port)
1512		return 0;
1513
1514	if (phy_port) {
1515		port0 = 3;
1516		port1 = 4;
1517	} else {
1518		port0 = 1;
1519		port1 = 2;
1520	}
1521
1522	/*
1523	 * Read link status of both null ports belonging to a single
1524	 * physical port.
1525	 */
1526	ret = pcie2cio_read(icm, TB_CFG_PORT, port0, PHY_PORT_CS1, &val0);
1527	if (ret)
1528		return ret;
1529	ret = pcie2cio_read(icm, TB_CFG_PORT, port1, PHY_PORT_CS1, &val1);
1530	if (ret)
1531		return ret;
1532
1533	state0 = val0 & PHY_PORT_CS1_LINK_STATE_MASK;
1534	state0 >>= PHY_PORT_CS1_LINK_STATE_SHIFT;
1535	state1 = val1 & PHY_PORT_CS1_LINK_STATE_MASK;
1536	state1 >>= PHY_PORT_CS1_LINK_STATE_SHIFT;
1537
1538	/* If they are both up we need to reset them now */
1539	if (state0 != TB_PORT_UP || state1 != TB_PORT_UP)
1540		return 0;
1541
1542	val0 |= PHY_PORT_CS1_LINK_DISABLE;
1543	ret = pcie2cio_write(icm, TB_CFG_PORT, port0, PHY_PORT_CS1, val0);
1544	if (ret)
1545		return ret;
1546
1547	val1 |= PHY_PORT_CS1_LINK_DISABLE;
1548	ret = pcie2cio_write(icm, TB_CFG_PORT, port1, PHY_PORT_CS1, val1);
1549	if (ret)
1550		return ret;
1551
1552	/* Wait a bit and then re-enable both ports */
1553	usleep_range(10, 100);
1554
1555	ret = pcie2cio_read(icm, TB_CFG_PORT, port0, PHY_PORT_CS1, &val0);
1556	if (ret)
1557		return ret;
1558	ret = pcie2cio_read(icm, TB_CFG_PORT, port1, PHY_PORT_CS1, &val1);
1559	if (ret)
1560		return ret;
1561
1562	val0 &= ~PHY_PORT_CS1_LINK_DISABLE;
1563	ret = pcie2cio_write(icm, TB_CFG_PORT, port0, PHY_PORT_CS1, val0);
1564	if (ret)
1565		return ret;
1566
1567	val1 &= ~PHY_PORT_CS1_LINK_DISABLE;
1568	return pcie2cio_write(icm, TB_CFG_PORT, port1, PHY_PORT_CS1, val1);
1569}
1570
1571static int icm_firmware_init(struct tb *tb)
1572{
1573	struct icm *icm = tb_priv(tb);
1574	struct tb_nhi *nhi = tb->nhi;
1575	int ret;
1576
1577	ret = icm_firmware_start(tb, nhi);
1578	if (ret) {
1579		dev_err(&nhi->pdev->dev, "could not start ICM firmware\n");
1580		return ret;
1581	}
1582
1583	if (icm->get_mode) {
1584		ret = icm->get_mode(tb);
1585
1586		switch (ret) {
1587		case NHI_FW_SAFE_MODE:
1588			icm->safe_mode = true;
1589			break;
1590
1591		case NHI_FW_CM_MODE:
1592			/* Ask ICM to accept all Thunderbolt devices */
1593			nhi_mailbox_cmd(nhi, NHI_MAILBOX_ALLOW_ALL_DEVS, 0);
1594			break;
1595
1596		default:
1597			if (ret < 0)
1598				return ret;
1599
1600			tb_err(tb, "ICM firmware is in wrong mode: %u\n", ret);
1601			return -ENODEV;
1602		}
1603	}
1604
1605	/*
1606	 * Reset both physical ports if there is anything connected to
1607	 * them already.
1608	 */
1609	ret = icm_reset_phy_port(tb, 0);
1610	if (ret)
1611		dev_warn(&nhi->pdev->dev, "failed to reset links on port0\n");
1612	ret = icm_reset_phy_port(tb, 1);
1613	if (ret)
1614		dev_warn(&nhi->pdev->dev, "failed to reset links on port1\n");
1615
1616	return 0;
1617}
1618
1619static int icm_driver_ready(struct tb *tb)
1620{
1621	struct icm *icm = tb_priv(tb);
1622	int ret;
1623
1624	ret = icm_firmware_init(tb);
1625	if (ret)
1626		return ret;
1627
1628	if (icm->safe_mode) {
1629		tb_info(tb, "Thunderbolt host controller is in safe mode.\n");
1630		tb_info(tb, "You need to update NVM firmware of the controller before it can be used.\n");
1631		tb_info(tb, "For latest updates check https://thunderbolttechnology.net/updates.\n");
1632		return 0;
1633	}
1634
1635	ret = __icm_driver_ready(tb, &tb->security_level, &tb->nboot_acl);
 
1636	if (ret)
1637		return ret;
1638
1639	/*
1640	 * Make sure the number of supported preboot ACL matches what we
1641	 * expect or disable the whole feature.
1642	 */
1643	if (tb->nboot_acl > icm->max_boot_acl)
1644		tb->nboot_acl = 0;
1645
 
 
 
1646	return 0;
1647}
1648
1649static int icm_suspend(struct tb *tb)
1650{
1651	int ret;
1652
1653	ret = nhi_mailbox_cmd(tb->nhi, NHI_MAILBOX_SAVE_DEVS, 0);
1654	if (ret)
1655		tb_info(tb, "Ignoring mailbox command error (%d) in %s\n",
1656			ret, __func__);
1657
 
1658	return 0;
1659}
1660
1661/*
1662 * Mark all switches (except root switch) below this one unplugged. ICM
1663 * firmware will send us an updated list of switches after we have send
1664 * it driver ready command. If a switch is not in that list it will be
1665 * removed when we perform rescan.
1666 */
1667static void icm_unplug_children(struct tb_switch *sw)
1668{
1669	unsigned int i;
1670
1671	if (tb_route(sw))
1672		sw->is_unplugged = true;
1673
1674	for (i = 1; i <= sw->config.max_port_number; i++) {
1675		struct tb_port *port = &sw->ports[i];
 
 
 
 
 
1676
1677		if (tb_is_upstream_port(port))
1678			continue;
1679		if (port->xdomain) {
1680			port->xdomain->is_unplugged = true;
1681			continue;
1682		}
1683		if (!port->remote)
1684			continue;
1685
1686		icm_unplug_children(port->remote->sw);
1687	}
 
1688}
1689
1690static void icm_free_unplugged_children(struct tb_switch *sw)
1691{
1692	unsigned int i;
 
 
1693
1694	for (i = 1; i <= sw->config.max_port_number; i++) {
1695		struct tb_port *port = &sw->ports[i];
 
 
 
 
 
 
1696
1697		if (tb_is_upstream_port(port))
1698			continue;
1699
 
 
 
 
 
 
 
 
1700		if (port->xdomain && port->xdomain->is_unplugged) {
1701			tb_xdomain_remove(port->xdomain);
1702			port->xdomain = NULL;
1703			continue;
1704		}
1705
1706		if (!port->remote)
1707			continue;
1708
1709		if (port->remote->sw->is_unplugged) {
1710			tb_switch_remove(port->remote->sw);
1711			port->remote = NULL;
1712		} else {
1713			icm_free_unplugged_children(port->remote->sw);
1714		}
1715	}
1716}
1717
1718static void icm_rescan_work(struct work_struct *work)
1719{
1720	struct icm *icm = container_of(work, struct icm, rescan_work.work);
1721	struct tb *tb = icm_to_tb(icm);
1722
1723	mutex_lock(&tb->lock);
1724	if (tb->root_switch)
1725		icm_free_unplugged_children(tb->root_switch);
1726	mutex_unlock(&tb->lock);
1727}
1728
1729static void icm_complete(struct tb *tb)
1730{
1731	struct icm *icm = tb_priv(tb);
1732
1733	if (tb->nhi->going_away)
1734		return;
1735
 
 
 
 
 
 
 
1736	icm_unplug_children(tb->root_switch);
1737
1738	/*
1739	 * Now all existing children should be resumed, start events
1740	 * from ICM to get updated status.
1741	 */
1742	__icm_driver_ready(tb, NULL, NULL);
1743
1744	/*
1745	 * We do not get notifications of devices that have been
1746	 * unplugged during suspend so schedule rescan to clean them up
1747	 * if any.
1748	 */
1749	queue_delayed_work(tb->wq, &icm->rescan_work, msecs_to_jiffies(500));
1750}
1751
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1752static int icm_start(struct tb *tb)
1753{
1754	struct icm *icm = tb_priv(tb);
1755	int ret;
1756
1757	if (icm->safe_mode)
1758		tb->root_switch = tb_switch_alloc_safe_mode(tb, &tb->dev, 0);
1759	else
1760		tb->root_switch = tb_switch_alloc(tb, &tb->dev, 0);
1761	if (!tb->root_switch)
1762		return -ENODEV;
 
 
 
1763
1764	/*
1765	 * NVM upgrade has not been tested on Apple systems and they
1766	 * don't provide images publicly either. To be on the safe side
1767	 * prevent root switch NVM upgrade on Macs for now.
1768	 */
1769	tb->root_switch->no_nvm_upgrade = x86_apple_machine;
1770
1771	ret = tb_switch_add(tb->root_switch);
1772	if (ret) {
1773		tb_switch_put(tb->root_switch);
1774		tb->root_switch = NULL;
1775	}
1776
1777	return ret;
1778}
1779
1780static void icm_stop(struct tb *tb)
1781{
1782	struct icm *icm = tb_priv(tb);
1783
1784	cancel_delayed_work(&icm->rescan_work);
1785	tb_switch_remove(tb->root_switch);
1786	tb->root_switch = NULL;
1787	nhi_mailbox_cmd(tb->nhi, NHI_MAILBOX_DRV_UNLOADS, 0);
 
 
1788}
1789
1790static int icm_disconnect_pcie_paths(struct tb *tb)
1791{
1792	return nhi_mailbox_cmd(tb->nhi, NHI_MAILBOX_DISCONNECT_PCIE_PATHS, 0);
1793}
1794
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1795/* Falcon Ridge */
1796static const struct tb_cm_ops icm_fr_ops = {
1797	.driver_ready = icm_driver_ready,
1798	.start = icm_start,
1799	.stop = icm_stop,
1800	.suspend = icm_suspend,
1801	.complete = icm_complete,
1802	.handle_event = icm_handle_event,
1803	.approve_switch = icm_fr_approve_switch,
1804	.add_switch_key = icm_fr_add_switch_key,
1805	.challenge_switch_key = icm_fr_challenge_switch_key,
1806	.disconnect_pcie_paths = icm_disconnect_pcie_paths,
1807	.approve_xdomain_paths = icm_fr_approve_xdomain_paths,
1808	.disconnect_xdomain_paths = icm_fr_disconnect_xdomain_paths,
1809};
1810
1811/* Alpine Ridge */
1812static const struct tb_cm_ops icm_ar_ops = {
1813	.driver_ready = icm_driver_ready,
1814	.start = icm_start,
1815	.stop = icm_stop,
1816	.suspend = icm_suspend,
1817	.complete = icm_complete,
 
 
 
 
1818	.handle_event = icm_handle_event,
1819	.get_boot_acl = icm_ar_get_boot_acl,
1820	.set_boot_acl = icm_ar_set_boot_acl,
1821	.approve_switch = icm_fr_approve_switch,
1822	.add_switch_key = icm_fr_add_switch_key,
1823	.challenge_switch_key = icm_fr_challenge_switch_key,
1824	.disconnect_pcie_paths = icm_disconnect_pcie_paths,
1825	.approve_xdomain_paths = icm_fr_approve_xdomain_paths,
1826	.disconnect_xdomain_paths = icm_fr_disconnect_xdomain_paths,
1827};
1828
1829/* Titan Ridge */
1830static const struct tb_cm_ops icm_tr_ops = {
1831	.driver_ready = icm_driver_ready,
1832	.start = icm_start,
1833	.stop = icm_stop,
1834	.suspend = icm_suspend,
1835	.complete = icm_complete,
 
 
 
 
1836	.handle_event = icm_handle_event,
1837	.get_boot_acl = icm_ar_get_boot_acl,
1838	.set_boot_acl = icm_ar_set_boot_acl,
1839	.approve_switch = icm_tr_approve_switch,
1840	.add_switch_key = icm_tr_add_switch_key,
1841	.challenge_switch_key = icm_tr_challenge_switch_key,
1842	.disconnect_pcie_paths = icm_disconnect_pcie_paths,
1843	.approve_xdomain_paths = icm_tr_approve_xdomain_paths,
1844	.disconnect_xdomain_paths = icm_tr_disconnect_xdomain_paths,
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1845};
1846
1847struct tb *icm_probe(struct tb_nhi *nhi)
1848{
1849	struct icm *icm;
1850	struct tb *tb;
1851
1852	tb = tb_domain_alloc(nhi, sizeof(struct icm));
1853	if (!tb)
1854		return NULL;
1855
1856	icm = tb_priv(tb);
1857	INIT_DELAYED_WORK(&icm->rescan_work, icm_rescan_work);
1858	mutex_init(&icm->request_lock);
1859
1860	switch (nhi->pdev->device) {
1861	case PCI_DEVICE_ID_INTEL_FALCON_RIDGE_2C_NHI:
1862	case PCI_DEVICE_ID_INTEL_FALCON_RIDGE_4C_NHI:
 
1863		icm->is_supported = icm_fr_is_supported;
1864		icm->get_route = icm_fr_get_route;
 
1865		icm->driver_ready = icm_fr_driver_ready;
1866		icm->device_connected = icm_fr_device_connected;
1867		icm->device_disconnected = icm_fr_device_disconnected;
1868		icm->xdomain_connected = icm_fr_xdomain_connected;
1869		icm->xdomain_disconnected = icm_fr_xdomain_disconnected;
1870		tb->cm_ops = &icm_fr_ops;
1871		break;
1872
1873	case PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_2C_NHI:
1874	case PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_4C_NHI:
1875	case PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_LP_NHI:
1876	case PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_C_4C_NHI:
1877	case PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_C_2C_NHI:
1878		icm->max_boot_acl = ICM_AR_PREBOOT_ACL_ENTRIES;
 
 
 
 
 
 
 
1879		icm->is_supported = icm_ar_is_supported;
 
1880		icm->get_mode = icm_ar_get_mode;
1881		icm->get_route = icm_ar_get_route;
 
1882		icm->driver_ready = icm_ar_driver_ready;
1883		icm->device_connected = icm_fr_device_connected;
1884		icm->device_disconnected = icm_fr_device_disconnected;
1885		icm->xdomain_connected = icm_fr_xdomain_connected;
1886		icm->xdomain_disconnected = icm_fr_xdomain_disconnected;
1887		tb->cm_ops = &icm_ar_ops;
1888		break;
1889
1890	case PCI_DEVICE_ID_INTEL_TITAN_RIDGE_2C_NHI:
1891	case PCI_DEVICE_ID_INTEL_TITAN_RIDGE_4C_NHI:
1892		icm->max_boot_acl = ICM_AR_PREBOOT_ACL_ENTRIES;
 
1893		icm->is_supported = icm_ar_is_supported;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1894		icm->get_mode = icm_ar_get_mode;
1895		icm->driver_ready = icm_tr_driver_ready;
1896		icm->device_connected = icm_tr_device_connected;
1897		icm->device_disconnected = icm_tr_device_disconnected;
1898		icm->xdomain_connected = icm_tr_xdomain_connected;
1899		icm->xdomain_disconnected = icm_tr_xdomain_disconnected;
1900		tb->cm_ops = &icm_tr_ops;
1901		break;
1902	}
1903
1904	if (!icm->is_supported || !icm->is_supported(tb)) {
1905		dev_dbg(&nhi->pdev->dev, "ICM not supported on this controller\n");
1906		tb_domain_put(tb);
1907		return NULL;
1908	}
 
 
1909
1910	return tb;
1911}