Linux Audio

Check our new training course

Loading...
Note: File does not exist in v3.5.6.
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 * Thunderbolt XDomain discovery protocol support
   4 *
   5 * Copyright (C) 2017, Intel Corporation
   6 * Authors: Michael Jamet <michael.jamet@intel.com>
   7 *          Mika Westerberg <mika.westerberg@linux.intel.com>
   8 */
   9
  10#include <linux/device.h>
  11#include <linux/delay.h>
  12#include <linux/kmod.h>
  13#include <linux/module.h>
  14#include <linux/pm_runtime.h>
  15#include <linux/prandom.h>
  16#include <linux/string_helpers.h>
  17#include <linux/utsname.h>
  18#include <linux/uuid.h>
  19#include <linux/workqueue.h>
  20
  21#include "tb.h"
  22
  23#define XDOMAIN_SHORT_TIMEOUT			100	/* ms */
  24#define XDOMAIN_DEFAULT_TIMEOUT			1000	/* ms */
  25#define XDOMAIN_BONDING_TIMEOUT			10000	/* ms */
  26#define XDOMAIN_RETRIES				10
  27#define XDOMAIN_DEFAULT_MAX_HOPID		15
  28
  29enum {
  30	XDOMAIN_STATE_INIT,
  31	XDOMAIN_STATE_UUID,
  32	XDOMAIN_STATE_LINK_STATUS,
  33	XDOMAIN_STATE_LINK_STATE_CHANGE,
  34	XDOMAIN_STATE_LINK_STATUS2,
  35	XDOMAIN_STATE_BONDING_UUID_LOW,
  36	XDOMAIN_STATE_BONDING_UUID_HIGH,
  37	XDOMAIN_STATE_PROPERTIES,
  38	XDOMAIN_STATE_ENUMERATED,
  39	XDOMAIN_STATE_ERROR,
  40};
  41
  42static const char * const state_names[] = {
  43	[XDOMAIN_STATE_INIT] = "INIT",
  44	[XDOMAIN_STATE_UUID] = "UUID",
  45	[XDOMAIN_STATE_LINK_STATUS] = "LINK_STATUS",
  46	[XDOMAIN_STATE_LINK_STATE_CHANGE] = "LINK_STATE_CHANGE",
  47	[XDOMAIN_STATE_LINK_STATUS2] = "LINK_STATUS2",
  48	[XDOMAIN_STATE_BONDING_UUID_LOW] = "BONDING_UUID_LOW",
  49	[XDOMAIN_STATE_BONDING_UUID_HIGH] = "BONDING_UUID_HIGH",
  50	[XDOMAIN_STATE_PROPERTIES] = "PROPERTIES",
  51	[XDOMAIN_STATE_ENUMERATED] = "ENUMERATED",
  52	[XDOMAIN_STATE_ERROR] = "ERROR",
  53};
  54
  55struct xdomain_request_work {
  56	struct work_struct work;
  57	struct tb_xdp_header *pkg;
  58	struct tb *tb;
  59};
  60
  61static bool tb_xdomain_enabled = true;
  62module_param_named(xdomain, tb_xdomain_enabled, bool, 0444);
  63MODULE_PARM_DESC(xdomain, "allow XDomain protocol (default: true)");
  64
  65/*
  66 * Serializes access to the properties and protocol handlers below. If
  67 * you need to take both this lock and the struct tb_xdomain lock, take
  68 * this one first.
  69 */
  70static DEFINE_MUTEX(xdomain_lock);
  71
  72/* Properties exposed to the remote domains */
  73static struct tb_property_dir *xdomain_property_dir;
  74static u32 xdomain_property_block_gen;
  75
  76/* Additional protocol handlers */
  77static LIST_HEAD(protocol_handlers);
  78
  79/* UUID for XDomain discovery protocol: b638d70e-42ff-40bb-97c2-90e2c0b2ff07 */
  80static const uuid_t tb_xdp_uuid =
  81	UUID_INIT(0xb638d70e, 0x42ff, 0x40bb,
  82		  0x97, 0xc2, 0x90, 0xe2, 0xc0, 0xb2, 0xff, 0x07);
  83
  84bool tb_is_xdomain_enabled(void)
  85{
  86	return tb_xdomain_enabled && tb_acpi_is_xdomain_allowed();
  87}
  88
  89static bool tb_xdomain_match(const struct tb_cfg_request *req,
  90			     const struct ctl_pkg *pkg)
  91{
  92	switch (pkg->frame.eof) {
  93	case TB_CFG_PKG_ERROR:
  94		return true;
  95
  96	case TB_CFG_PKG_XDOMAIN_RESP: {
  97		const struct tb_xdp_header *res_hdr = pkg->buffer;
  98		const struct tb_xdp_header *req_hdr = req->request;
  99
 100		if (pkg->frame.size < req->response_size / 4)
 101			return false;
 102
 103		/* Make sure route matches */
 104		if ((res_hdr->xd_hdr.route_hi & ~BIT(31)) !=
 105		     req_hdr->xd_hdr.route_hi)
 106			return false;
 107		if ((res_hdr->xd_hdr.route_lo) != req_hdr->xd_hdr.route_lo)
 108			return false;
 109
 110		/* Check that the XDomain protocol matches */
 111		if (!uuid_equal(&res_hdr->uuid, &req_hdr->uuid))
 112			return false;
 113
 114		return true;
 115	}
 116
 117	default:
 118		return false;
 119	}
 120}
 121
 122static bool tb_xdomain_copy(struct tb_cfg_request *req,
 123			    const struct ctl_pkg *pkg)
 124{
 125	memcpy(req->response, pkg->buffer, req->response_size);
 126	req->result.err = 0;
 127	return true;
 128}
 129
 130static void response_ready(void *data)
 131{
 132	tb_cfg_request_put(data);
 133}
 134
 135static int __tb_xdomain_response(struct tb_ctl *ctl, const void *response,
 136				 size_t size, enum tb_cfg_pkg_type type)
 137{
 138	struct tb_cfg_request *req;
 139
 140	req = tb_cfg_request_alloc();
 141	if (!req)
 142		return -ENOMEM;
 143
 144	req->match = tb_xdomain_match;
 145	req->copy = tb_xdomain_copy;
 146	req->request = response;
 147	req->request_size = size;
 148	req->request_type = type;
 149
 150	return tb_cfg_request(ctl, req, response_ready, req);
 151}
 152
 153/**
 154 * tb_xdomain_response() - Send a XDomain response message
 155 * @xd: XDomain to send the message
 156 * @response: Response to send
 157 * @size: Size of the response
 158 * @type: PDF type of the response
 159 *
 160 * This can be used to send a XDomain response message to the other
 161 * domain. No response for the message is expected.
 162 *
 163 * Return: %0 in case of success and negative errno in case of failure
 164 */
 165int tb_xdomain_response(struct tb_xdomain *xd, const void *response,
 166			size_t size, enum tb_cfg_pkg_type type)
 167{
 168	return __tb_xdomain_response(xd->tb->ctl, response, size, type);
 169}
 170EXPORT_SYMBOL_GPL(tb_xdomain_response);
 171
 172static int __tb_xdomain_request(struct tb_ctl *ctl, const void *request,
 173	size_t request_size, enum tb_cfg_pkg_type request_type, void *response,
 174	size_t response_size, enum tb_cfg_pkg_type response_type,
 175	unsigned int timeout_msec)
 176{
 177	struct tb_cfg_request *req;
 178	struct tb_cfg_result res;
 179
 180	req = tb_cfg_request_alloc();
 181	if (!req)
 182		return -ENOMEM;
 183
 184	req->match = tb_xdomain_match;
 185	req->copy = tb_xdomain_copy;
 186	req->request = request;
 187	req->request_size = request_size;
 188	req->request_type = request_type;
 189	req->response = response;
 190	req->response_size = response_size;
 191	req->response_type = response_type;
 192
 193	res = tb_cfg_request_sync(ctl, req, timeout_msec);
 194
 195	tb_cfg_request_put(req);
 196
 197	return res.err == 1 ? -EIO : res.err;
 198}
 199
 200/**
 201 * tb_xdomain_request() - Send a XDomain request
 202 * @xd: XDomain to send the request
 203 * @request: Request to send
 204 * @request_size: Size of the request in bytes
 205 * @request_type: PDF type of the request
 206 * @response: Response is copied here
 207 * @response_size: Expected size of the response in bytes
 208 * @response_type: Expected PDF type of the response
 209 * @timeout_msec: Timeout in milliseconds to wait for the response
 210 *
 211 * This function can be used to send XDomain control channel messages to
 212 * the other domain. The function waits until the response is received
 213 * or when timeout triggers. Whichever comes first.
 214 *
 215 * Return: %0 in case of success and negative errno in case of failure
 216 */
 217int tb_xdomain_request(struct tb_xdomain *xd, const void *request,
 218	size_t request_size, enum tb_cfg_pkg_type request_type,
 219	void *response, size_t response_size,
 220	enum tb_cfg_pkg_type response_type, unsigned int timeout_msec)
 221{
 222	return __tb_xdomain_request(xd->tb->ctl, request, request_size,
 223				    request_type, response, response_size,
 224				    response_type, timeout_msec);
 225}
 226EXPORT_SYMBOL_GPL(tb_xdomain_request);
 227
 228static inline void tb_xdp_fill_header(struct tb_xdp_header *hdr, u64 route,
 229	u8 sequence, enum tb_xdp_type type, size_t size)
 230{
 231	u32 length_sn;
 232
 233	length_sn = (size - sizeof(hdr->xd_hdr)) / 4;
 234	length_sn |= (sequence << TB_XDOMAIN_SN_SHIFT) & TB_XDOMAIN_SN_MASK;
 235
 236	hdr->xd_hdr.route_hi = upper_32_bits(route);
 237	hdr->xd_hdr.route_lo = lower_32_bits(route);
 238	hdr->xd_hdr.length_sn = length_sn;
 239	hdr->type = type;
 240	memcpy(&hdr->uuid, &tb_xdp_uuid, sizeof(tb_xdp_uuid));
 241}
 242
 243static int tb_xdp_handle_error(const struct tb_xdp_error_response *res)
 244{
 245	if (res->hdr.type != ERROR_RESPONSE)
 246		return 0;
 247
 248	switch (res->error) {
 249	case ERROR_UNKNOWN_PACKET:
 250	case ERROR_UNKNOWN_DOMAIN:
 251		return -EIO;
 252	case ERROR_NOT_SUPPORTED:
 253		return -ENOTSUPP;
 254	case ERROR_NOT_READY:
 255		return -EAGAIN;
 256	default:
 257		break;
 258	}
 259
 260	return 0;
 261}
 262
 263static int tb_xdp_uuid_request(struct tb_ctl *ctl, u64 route, int retry,
 264			       uuid_t *uuid, u64 *remote_route)
 265{
 266	struct tb_xdp_uuid_response res;
 267	struct tb_xdp_uuid req;
 268	int ret;
 269
 270	memset(&req, 0, sizeof(req));
 271	tb_xdp_fill_header(&req.hdr, route, retry % 4, UUID_REQUEST,
 272			   sizeof(req));
 273
 274	memset(&res, 0, sizeof(res));
 275	ret = __tb_xdomain_request(ctl, &req, sizeof(req),
 276				   TB_CFG_PKG_XDOMAIN_REQ, &res, sizeof(res),
 277				   TB_CFG_PKG_XDOMAIN_RESP,
 278				   XDOMAIN_DEFAULT_TIMEOUT);
 279	if (ret)
 280		return ret;
 281
 282	ret = tb_xdp_handle_error(&res.err);
 283	if (ret)
 284		return ret;
 285
 286	uuid_copy(uuid, &res.src_uuid);
 287	*remote_route = (u64)res.src_route_hi << 32 | res.src_route_lo;
 288
 289	return 0;
 290}
 291
 292static int tb_xdp_uuid_response(struct tb_ctl *ctl, u64 route, u8 sequence,
 293				const uuid_t *uuid)
 294{
 295	struct tb_xdp_uuid_response res;
 296
 297	memset(&res, 0, sizeof(res));
 298	tb_xdp_fill_header(&res.hdr, route, sequence, UUID_RESPONSE,
 299			   sizeof(res));
 300
 301	uuid_copy(&res.src_uuid, uuid);
 302	res.src_route_hi = upper_32_bits(route);
 303	res.src_route_lo = lower_32_bits(route);
 304
 305	return __tb_xdomain_response(ctl, &res, sizeof(res),
 306				     TB_CFG_PKG_XDOMAIN_RESP);
 307}
 308
 309static int tb_xdp_error_response(struct tb_ctl *ctl, u64 route, u8 sequence,
 310				 enum tb_xdp_error error)
 311{
 312	struct tb_xdp_error_response res;
 313
 314	memset(&res, 0, sizeof(res));
 315	tb_xdp_fill_header(&res.hdr, route, sequence, ERROR_RESPONSE,
 316			   sizeof(res));
 317	res.error = error;
 318
 319	return __tb_xdomain_response(ctl, &res, sizeof(res),
 320				     TB_CFG_PKG_XDOMAIN_RESP);
 321}
 322
 323static int tb_xdp_properties_request(struct tb_ctl *ctl, u64 route,
 324	const uuid_t *src_uuid, const uuid_t *dst_uuid, int retry,
 325	u32 **block, u32 *generation)
 326{
 327	struct tb_xdp_properties_response *res;
 328	struct tb_xdp_properties req;
 329	u16 data_len, len;
 330	size_t total_size;
 331	u32 *data = NULL;
 332	int ret;
 333
 334	total_size = sizeof(*res) + TB_XDP_PROPERTIES_MAX_DATA_LENGTH * 4;
 335	res = kzalloc(total_size, GFP_KERNEL);
 336	if (!res)
 337		return -ENOMEM;
 338
 339	memset(&req, 0, sizeof(req));
 340	tb_xdp_fill_header(&req.hdr, route, retry % 4, PROPERTIES_REQUEST,
 341			   sizeof(req));
 342	memcpy(&req.src_uuid, src_uuid, sizeof(*src_uuid));
 343	memcpy(&req.dst_uuid, dst_uuid, sizeof(*dst_uuid));
 344
 345	data_len = 0;
 346
 347	do {
 348		ret = __tb_xdomain_request(ctl, &req, sizeof(req),
 349					   TB_CFG_PKG_XDOMAIN_REQ, res,
 350					   total_size, TB_CFG_PKG_XDOMAIN_RESP,
 351					   XDOMAIN_DEFAULT_TIMEOUT);
 352		if (ret)
 353			goto err;
 354
 355		ret = tb_xdp_handle_error(&res->err);
 356		if (ret)
 357			goto err;
 358
 359		/*
 360		 * Package length includes the whole payload without the
 361		 * XDomain header. Validate first that the package is at
 362		 * least size of the response structure.
 363		 */
 364		len = res->hdr.xd_hdr.length_sn & TB_XDOMAIN_LENGTH_MASK;
 365		if (len < sizeof(*res) / 4) {
 366			ret = -EINVAL;
 367			goto err;
 368		}
 369
 370		len += sizeof(res->hdr.xd_hdr) / 4;
 371		len -= sizeof(*res) / 4;
 372
 373		if (res->offset != req.offset) {
 374			ret = -EINVAL;
 375			goto err;
 376		}
 377
 378		/*
 379		 * First time allocate block that has enough space for
 380		 * the whole properties block.
 381		 */
 382		if (!data) {
 383			data_len = res->data_length;
 384			if (data_len > TB_XDP_PROPERTIES_MAX_LENGTH) {
 385				ret = -E2BIG;
 386				goto err;
 387			}
 388
 389			data = kcalloc(data_len, sizeof(u32), GFP_KERNEL);
 390			if (!data) {
 391				ret = -ENOMEM;
 392				goto err;
 393			}
 394		}
 395
 396		memcpy(data + req.offset, res->data, len * 4);
 397		req.offset += len;
 398	} while (!data_len || req.offset < data_len);
 399
 400	*block = data;
 401	*generation = res->generation;
 402
 403	kfree(res);
 404
 405	return data_len;
 406
 407err:
 408	kfree(data);
 409	kfree(res);
 410
 411	return ret;
 412}
 413
 414static int tb_xdp_properties_response(struct tb *tb, struct tb_ctl *ctl,
 415	struct tb_xdomain *xd, u8 sequence, const struct tb_xdp_properties *req)
 416{
 417	struct tb_xdp_properties_response *res;
 418	size_t total_size;
 419	u16 len;
 420	int ret;
 421
 422	/*
 423	 * Currently we expect all requests to be directed to us. The
 424	 * protocol supports forwarding, though which we might add
 425	 * support later on.
 426	 */
 427	if (!uuid_equal(xd->local_uuid, &req->dst_uuid)) {
 428		tb_xdp_error_response(ctl, xd->route, sequence,
 429				      ERROR_UNKNOWN_DOMAIN);
 430		return 0;
 431	}
 432
 433	mutex_lock(&xd->lock);
 434
 435	if (req->offset >= xd->local_property_block_len) {
 436		mutex_unlock(&xd->lock);
 437		return -EINVAL;
 438	}
 439
 440	len = xd->local_property_block_len - req->offset;
 441	len = min_t(u16, len, TB_XDP_PROPERTIES_MAX_DATA_LENGTH);
 442	total_size = sizeof(*res) + len * 4;
 443
 444	res = kzalloc(total_size, GFP_KERNEL);
 445	if (!res) {
 446		mutex_unlock(&xd->lock);
 447		return -ENOMEM;
 448	}
 449
 450	tb_xdp_fill_header(&res->hdr, xd->route, sequence, PROPERTIES_RESPONSE,
 451			   total_size);
 452	res->generation = xd->local_property_block_gen;
 453	res->data_length = xd->local_property_block_len;
 454	res->offset = req->offset;
 455	uuid_copy(&res->src_uuid, xd->local_uuid);
 456	uuid_copy(&res->dst_uuid, &req->src_uuid);
 457	memcpy(res->data, &xd->local_property_block[req->offset], len * 4);
 458
 459	mutex_unlock(&xd->lock);
 460
 461	ret = __tb_xdomain_response(ctl, res, total_size,
 462				    TB_CFG_PKG_XDOMAIN_RESP);
 463
 464	kfree(res);
 465	return ret;
 466}
 467
 468static int tb_xdp_properties_changed_request(struct tb_ctl *ctl, u64 route,
 469					     int retry, const uuid_t *uuid)
 470{
 471	struct tb_xdp_properties_changed_response res;
 472	struct tb_xdp_properties_changed req;
 473	int ret;
 474
 475	memset(&req, 0, sizeof(req));
 476	tb_xdp_fill_header(&req.hdr, route, retry % 4,
 477			   PROPERTIES_CHANGED_REQUEST, sizeof(req));
 478	uuid_copy(&req.src_uuid, uuid);
 479
 480	memset(&res, 0, sizeof(res));
 481	ret = __tb_xdomain_request(ctl, &req, sizeof(req),
 482				   TB_CFG_PKG_XDOMAIN_REQ, &res, sizeof(res),
 483				   TB_CFG_PKG_XDOMAIN_RESP,
 484				   XDOMAIN_DEFAULT_TIMEOUT);
 485	if (ret)
 486		return ret;
 487
 488	return tb_xdp_handle_error(&res.err);
 489}
 490
 491static int
 492tb_xdp_properties_changed_response(struct tb_ctl *ctl, u64 route, u8 sequence)
 493{
 494	struct tb_xdp_properties_changed_response res;
 495
 496	memset(&res, 0, sizeof(res));
 497	tb_xdp_fill_header(&res.hdr, route, sequence,
 498			   PROPERTIES_CHANGED_RESPONSE, sizeof(res));
 499	return __tb_xdomain_response(ctl, &res, sizeof(res),
 500				     TB_CFG_PKG_XDOMAIN_RESP);
 501}
 502
 503static int tb_xdp_link_state_status_request(struct tb_ctl *ctl, u64 route,
 504					    u8 sequence, u8 *slw, u8 *tlw,
 505					    u8 *sls, u8 *tls)
 506{
 507	struct tb_xdp_link_state_status_response res;
 508	struct tb_xdp_link_state_status req;
 509	int ret;
 510
 511	memset(&req, 0, sizeof(req));
 512	tb_xdp_fill_header(&req.hdr, route, sequence, LINK_STATE_STATUS_REQUEST,
 513			   sizeof(req));
 514
 515	memset(&res, 0, sizeof(res));
 516	ret = __tb_xdomain_request(ctl, &req, sizeof(req), TB_CFG_PKG_XDOMAIN_REQ,
 517				   &res, sizeof(res), TB_CFG_PKG_XDOMAIN_RESP,
 518				   XDOMAIN_DEFAULT_TIMEOUT);
 519	if (ret)
 520		return ret;
 521
 522	ret = tb_xdp_handle_error(&res.err);
 523	if (ret)
 524		return ret;
 525
 526	if (res.status != 0)
 527		return -EREMOTEIO;
 528
 529	*slw = res.slw;
 530	*tlw = res.tlw;
 531	*sls = res.sls;
 532	*tls = res.tls;
 533
 534	return 0;
 535}
 536
 537static int tb_xdp_link_state_status_response(struct tb *tb, struct tb_ctl *ctl,
 538					     struct tb_xdomain *xd, u8 sequence)
 539{
 540	struct tb_switch *sw = tb_to_switch(xd->dev.parent);
 541	struct tb_xdp_link_state_status_response res;
 542	struct tb_port *port = tb_port_at(xd->route, sw);
 543	u32 val[2];
 544	int ret;
 545
 546	memset(&res, 0, sizeof(res));
 547	tb_xdp_fill_header(&res.hdr, xd->route, sequence,
 548			   LINK_STATE_STATUS_RESPONSE, sizeof(res));
 549
 550	ret = tb_port_read(port, val, TB_CFG_PORT,
 551			   port->cap_phy + LANE_ADP_CS_0, ARRAY_SIZE(val));
 552	if (ret)
 553		return ret;
 554
 555	res.slw = (val[0] & LANE_ADP_CS_0_SUPPORTED_WIDTH_MASK) >>
 556			LANE_ADP_CS_0_SUPPORTED_WIDTH_SHIFT;
 557	res.sls = (val[0] & LANE_ADP_CS_0_SUPPORTED_SPEED_MASK) >>
 558			LANE_ADP_CS_0_SUPPORTED_SPEED_SHIFT;
 559	res.tls = val[1] & LANE_ADP_CS_1_TARGET_SPEED_MASK;
 560	res.tlw = (val[1] & LANE_ADP_CS_1_TARGET_WIDTH_MASK) >>
 561			LANE_ADP_CS_1_TARGET_WIDTH_SHIFT;
 562
 563	return __tb_xdomain_response(ctl, &res, sizeof(res),
 564				     TB_CFG_PKG_XDOMAIN_RESP);
 565}
 566
 567static int tb_xdp_link_state_change_request(struct tb_ctl *ctl, u64 route,
 568					    u8 sequence, u8 tlw, u8 tls)
 569{
 570	struct tb_xdp_link_state_change_response res;
 571	struct tb_xdp_link_state_change req;
 572	int ret;
 573
 574	memset(&req, 0, sizeof(req));
 575	tb_xdp_fill_header(&req.hdr, route, sequence, LINK_STATE_CHANGE_REQUEST,
 576			   sizeof(req));
 577	req.tlw = tlw;
 578	req.tls = tls;
 579
 580	memset(&res, 0, sizeof(res));
 581	ret = __tb_xdomain_request(ctl, &req, sizeof(req), TB_CFG_PKG_XDOMAIN_REQ,
 582				   &res, sizeof(res), TB_CFG_PKG_XDOMAIN_RESP,
 583				   XDOMAIN_DEFAULT_TIMEOUT);
 584	if (ret)
 585		return ret;
 586
 587	ret = tb_xdp_handle_error(&res.err);
 588	if (ret)
 589		return ret;
 590
 591	return res.status != 0 ? -EREMOTEIO : 0;
 592}
 593
 594static int tb_xdp_link_state_change_response(struct tb_ctl *ctl, u64 route,
 595					     u8 sequence, u32 status)
 596{
 597	struct tb_xdp_link_state_change_response res;
 598
 599	memset(&res, 0, sizeof(res));
 600	tb_xdp_fill_header(&res.hdr, route, sequence, LINK_STATE_CHANGE_RESPONSE,
 601			   sizeof(res));
 602
 603	res.status = status;
 604
 605	return __tb_xdomain_response(ctl, &res, sizeof(res),
 606				     TB_CFG_PKG_XDOMAIN_RESP);
 607}
 608
 609/**
 610 * tb_register_protocol_handler() - Register protocol handler
 611 * @handler: Handler to register
 612 *
 613 * This allows XDomain service drivers to hook into incoming XDomain
 614 * messages. After this function is called the service driver needs to
 615 * be able to handle calls to callback whenever a package with the
 616 * registered protocol is received.
 617 */
 618int tb_register_protocol_handler(struct tb_protocol_handler *handler)
 619{
 620	if (!handler->uuid || !handler->callback)
 621		return -EINVAL;
 622	if (uuid_equal(handler->uuid, &tb_xdp_uuid))
 623		return -EINVAL;
 624
 625	mutex_lock(&xdomain_lock);
 626	list_add_tail(&handler->list, &protocol_handlers);
 627	mutex_unlock(&xdomain_lock);
 628
 629	return 0;
 630}
 631EXPORT_SYMBOL_GPL(tb_register_protocol_handler);
 632
 633/**
 634 * tb_unregister_protocol_handler() - Unregister protocol handler
 635 * @handler: Handler to unregister
 636 *
 637 * Removes the previously registered protocol handler.
 638 */
 639void tb_unregister_protocol_handler(struct tb_protocol_handler *handler)
 640{
 641	mutex_lock(&xdomain_lock);
 642	list_del_init(&handler->list);
 643	mutex_unlock(&xdomain_lock);
 644}
 645EXPORT_SYMBOL_GPL(tb_unregister_protocol_handler);
 646
 647static void update_property_block(struct tb_xdomain *xd)
 648{
 649	mutex_lock(&xdomain_lock);
 650	mutex_lock(&xd->lock);
 651	/*
 652	 * If the local property block is not up-to-date, rebuild it now
 653	 * based on the global property template.
 654	 */
 655	if (!xd->local_property_block ||
 656	    xd->local_property_block_gen < xdomain_property_block_gen) {
 657		struct tb_property_dir *dir;
 658		int ret, block_len;
 659		u32 *block;
 660
 661		dir = tb_property_copy_dir(xdomain_property_dir);
 662		if (!dir) {
 663			dev_warn(&xd->dev, "failed to copy properties\n");
 664			goto out_unlock;
 665		}
 666
 667		/* Fill in non-static properties now */
 668		tb_property_add_text(dir, "deviceid", utsname()->nodename);
 669		tb_property_add_immediate(dir, "maxhopid", xd->local_max_hopid);
 670
 671		ret = tb_property_format_dir(dir, NULL, 0);
 672		if (ret < 0) {
 673			dev_warn(&xd->dev, "local property block creation failed\n");
 674			tb_property_free_dir(dir);
 675			goto out_unlock;
 676		}
 677
 678		block_len = ret;
 679		block = kcalloc(block_len, sizeof(*block), GFP_KERNEL);
 680		if (!block) {
 681			tb_property_free_dir(dir);
 682			goto out_unlock;
 683		}
 684
 685		ret = tb_property_format_dir(dir, block, block_len);
 686		if (ret) {
 687			dev_warn(&xd->dev, "property block generation failed\n");
 688			tb_property_free_dir(dir);
 689			kfree(block);
 690			goto out_unlock;
 691		}
 692
 693		tb_property_free_dir(dir);
 694		/* Release the previous block */
 695		kfree(xd->local_property_block);
 696		/* Assign new one */
 697		xd->local_property_block = block;
 698		xd->local_property_block_len = block_len;
 699		xd->local_property_block_gen = xdomain_property_block_gen;
 700	}
 701
 702out_unlock:
 703	mutex_unlock(&xd->lock);
 704	mutex_unlock(&xdomain_lock);
 705}
 706
 707static void tb_xdp_handle_request(struct work_struct *work)
 708{
 709	struct xdomain_request_work *xw = container_of(work, typeof(*xw), work);
 710	const struct tb_xdp_header *pkg = xw->pkg;
 711	const struct tb_xdomain_header *xhdr = &pkg->xd_hdr;
 712	struct tb *tb = xw->tb;
 713	struct tb_ctl *ctl = tb->ctl;
 714	struct tb_xdomain *xd;
 715	const uuid_t *uuid;
 716	int ret = 0;
 717	u32 sequence;
 718	u64 route;
 719
 720	route = ((u64)xhdr->route_hi << 32 | xhdr->route_lo) & ~BIT_ULL(63);
 721	sequence = xhdr->length_sn & TB_XDOMAIN_SN_MASK;
 722	sequence >>= TB_XDOMAIN_SN_SHIFT;
 723
 724	mutex_lock(&tb->lock);
 725	if (tb->root_switch)
 726		uuid = tb->root_switch->uuid;
 727	else
 728		uuid = NULL;
 729	mutex_unlock(&tb->lock);
 730
 731	if (!uuid) {
 732		tb_xdp_error_response(ctl, route, sequence, ERROR_NOT_READY);
 733		goto out;
 734	}
 735
 736	xd = tb_xdomain_find_by_route_locked(tb, route);
 737	if (xd)
 738		update_property_block(xd);
 739
 740	switch (pkg->type) {
 741	case PROPERTIES_REQUEST:
 742		tb_dbg(tb, "%llx: received XDomain properties request\n", route);
 743		if (xd) {
 744			ret = tb_xdp_properties_response(tb, ctl, xd, sequence,
 745				(const struct tb_xdp_properties *)pkg);
 746		}
 747		break;
 748
 749	case PROPERTIES_CHANGED_REQUEST:
 750		tb_dbg(tb, "%llx: received XDomain properties changed request\n",
 751		       route);
 752
 753		ret = tb_xdp_properties_changed_response(ctl, route, sequence);
 754
 755		/*
 756		 * Since the properties have been changed, let's update
 757		 * the xdomain related to this connection as well in
 758		 * case there is a change in services it offers.
 759		 */
 760		if (xd && device_is_registered(&xd->dev))
 761			queue_delayed_work(tb->wq, &xd->state_work,
 762					   msecs_to_jiffies(XDOMAIN_SHORT_TIMEOUT));
 763		break;
 764
 765	case UUID_REQUEST_OLD:
 766	case UUID_REQUEST:
 767		tb_dbg(tb, "%llx: received XDomain UUID request\n", route);
 768		ret = tb_xdp_uuid_response(ctl, route, sequence, uuid);
 769		break;
 770
 771	case LINK_STATE_STATUS_REQUEST:
 772		tb_dbg(tb, "%llx: received XDomain link state status request\n",
 773		       route);
 774
 775		if (xd) {
 776			ret = tb_xdp_link_state_status_response(tb, ctl, xd,
 777								sequence);
 778		} else {
 779			tb_xdp_error_response(ctl, route, sequence,
 780					      ERROR_NOT_READY);
 781		}
 782		break;
 783
 784	case LINK_STATE_CHANGE_REQUEST:
 785		tb_dbg(tb, "%llx: received XDomain link state change request\n",
 786		       route);
 787
 788		if (xd && xd->state == XDOMAIN_STATE_BONDING_UUID_HIGH) {
 789			const struct tb_xdp_link_state_change *lsc =
 790				(const struct tb_xdp_link_state_change *)pkg;
 791
 792			ret = tb_xdp_link_state_change_response(ctl, route,
 793								sequence, 0);
 794			xd->target_link_width = lsc->tlw;
 795			queue_delayed_work(tb->wq, &xd->state_work,
 796					   msecs_to_jiffies(XDOMAIN_SHORT_TIMEOUT));
 797		} else {
 798			tb_xdp_error_response(ctl, route, sequence,
 799					      ERROR_NOT_READY);
 800		}
 801		break;
 802
 803	default:
 804		tb_dbg(tb, "%llx: unknown XDomain request %#x\n", route, pkg->type);
 805		tb_xdp_error_response(ctl, route, sequence,
 806				      ERROR_NOT_SUPPORTED);
 807		break;
 808	}
 809
 810	tb_xdomain_put(xd);
 811
 812	if (ret) {
 813		tb_warn(tb, "failed to send XDomain response for %#x\n",
 814			pkg->type);
 815	}
 816
 817out:
 818	kfree(xw->pkg);
 819	kfree(xw);
 820
 821	tb_domain_put(tb);
 822}
 823
 824static bool
 825tb_xdp_schedule_request(struct tb *tb, const struct tb_xdp_header *hdr,
 826			size_t size)
 827{
 828	struct xdomain_request_work *xw;
 829
 830	xw = kmalloc(sizeof(*xw), GFP_KERNEL);
 831	if (!xw)
 832		return false;
 833
 834	INIT_WORK(&xw->work, tb_xdp_handle_request);
 835	xw->pkg = kmemdup(hdr, size, GFP_KERNEL);
 836	if (!xw->pkg) {
 837		kfree(xw);
 838		return false;
 839	}
 840	xw->tb = tb_domain_get(tb);
 841
 842	schedule_work(&xw->work);
 843	return true;
 844}
 845
 846/**
 847 * tb_register_service_driver() - Register XDomain service driver
 848 * @drv: Driver to register
 849 *
 850 * Registers new service driver from @drv to the bus.
 851 */
 852int tb_register_service_driver(struct tb_service_driver *drv)
 853{
 854	drv->driver.bus = &tb_bus_type;
 855	return driver_register(&drv->driver);
 856}
 857EXPORT_SYMBOL_GPL(tb_register_service_driver);
 858
 859/**
 860 * tb_unregister_service_driver() - Unregister XDomain service driver
 861 * @drv: Driver to unregister
 862 *
 863 * Unregisters XDomain service driver from the bus.
 864 */
 865void tb_unregister_service_driver(struct tb_service_driver *drv)
 866{
 867	driver_unregister(&drv->driver);
 868}
 869EXPORT_SYMBOL_GPL(tb_unregister_service_driver);
 870
 871static ssize_t key_show(struct device *dev, struct device_attribute *attr,
 872			char *buf)
 873{
 874	struct tb_service *svc = container_of(dev, struct tb_service, dev);
 875
 876	/*
 877	 * It should be null terminated but anything else is pretty much
 878	 * allowed.
 879	 */
 880	return sysfs_emit(buf, "%*pE\n", (int)strlen(svc->key), svc->key);
 881}
 882static DEVICE_ATTR_RO(key);
 883
 884static int get_modalias(struct tb_service *svc, char *buf, size_t size)
 885{
 886	return snprintf(buf, size, "tbsvc:k%sp%08Xv%08Xr%08X", svc->key,
 887			svc->prtcid, svc->prtcvers, svc->prtcrevs);
 888}
 889
 890static ssize_t modalias_show(struct device *dev, struct device_attribute *attr,
 891			     char *buf)
 892{
 893	struct tb_service *svc = container_of(dev, struct tb_service, dev);
 894
 895	/* Full buffer size except new line and null termination */
 896	get_modalias(svc, buf, PAGE_SIZE - 2);
 897	return strlen(strcat(buf, "\n"));
 898}
 899static DEVICE_ATTR_RO(modalias);
 900
 901static ssize_t prtcid_show(struct device *dev, struct device_attribute *attr,
 902			   char *buf)
 903{
 904	struct tb_service *svc = container_of(dev, struct tb_service, dev);
 905
 906	return sysfs_emit(buf, "%u\n", svc->prtcid);
 907}
 908static DEVICE_ATTR_RO(prtcid);
 909
 910static ssize_t prtcvers_show(struct device *dev, struct device_attribute *attr,
 911			     char *buf)
 912{
 913	struct tb_service *svc = container_of(dev, struct tb_service, dev);
 914
 915	return sysfs_emit(buf, "%u\n", svc->prtcvers);
 916}
 917static DEVICE_ATTR_RO(prtcvers);
 918
 919static ssize_t prtcrevs_show(struct device *dev, struct device_attribute *attr,
 920			     char *buf)
 921{
 922	struct tb_service *svc = container_of(dev, struct tb_service, dev);
 923
 924	return sysfs_emit(buf, "%u\n", svc->prtcrevs);
 925}
 926static DEVICE_ATTR_RO(prtcrevs);
 927
 928static ssize_t prtcstns_show(struct device *dev, struct device_attribute *attr,
 929			     char *buf)
 930{
 931	struct tb_service *svc = container_of(dev, struct tb_service, dev);
 932
 933	return sysfs_emit(buf, "0x%08x\n", svc->prtcstns);
 934}
 935static DEVICE_ATTR_RO(prtcstns);
 936
 937static struct attribute *tb_service_attrs[] = {
 938	&dev_attr_key.attr,
 939	&dev_attr_modalias.attr,
 940	&dev_attr_prtcid.attr,
 941	&dev_attr_prtcvers.attr,
 942	&dev_attr_prtcrevs.attr,
 943	&dev_attr_prtcstns.attr,
 944	NULL,
 945};
 946
 947static const struct attribute_group tb_service_attr_group = {
 948	.attrs = tb_service_attrs,
 949};
 950
 951static const struct attribute_group *tb_service_attr_groups[] = {
 952	&tb_service_attr_group,
 953	NULL,
 954};
 955
 956static int tb_service_uevent(struct device *dev, struct kobj_uevent_env *env)
 957{
 958	struct tb_service *svc = container_of(dev, struct tb_service, dev);
 959	char modalias[64];
 960
 961	get_modalias(svc, modalias, sizeof(modalias));
 962	return add_uevent_var(env, "MODALIAS=%s", modalias);
 963}
 964
 965static void tb_service_release(struct device *dev)
 966{
 967	struct tb_service *svc = container_of(dev, struct tb_service, dev);
 968	struct tb_xdomain *xd = tb_service_parent(svc);
 969
 970	tb_service_debugfs_remove(svc);
 971	ida_simple_remove(&xd->service_ids, svc->id);
 972	kfree(svc->key);
 973	kfree(svc);
 974}
 975
 976struct device_type tb_service_type = {
 977	.name = "thunderbolt_service",
 978	.groups = tb_service_attr_groups,
 979	.uevent = tb_service_uevent,
 980	.release = tb_service_release,
 981};
 982EXPORT_SYMBOL_GPL(tb_service_type);
 983
 984static int remove_missing_service(struct device *dev, void *data)
 985{
 986	struct tb_xdomain *xd = data;
 987	struct tb_service *svc;
 988
 989	svc = tb_to_service(dev);
 990	if (!svc)
 991		return 0;
 992
 993	if (!tb_property_find(xd->remote_properties, svc->key,
 994			      TB_PROPERTY_TYPE_DIRECTORY))
 995		device_unregister(dev);
 996
 997	return 0;
 998}
 999
1000static int find_service(struct device *dev, void *data)
1001{
1002	const struct tb_property *p = data;
1003	struct tb_service *svc;
1004
1005	svc = tb_to_service(dev);
1006	if (!svc)
1007		return 0;
1008
1009	return !strcmp(svc->key, p->key);
1010}
1011
1012static int populate_service(struct tb_service *svc,
1013			    struct tb_property *property)
1014{
1015	struct tb_property_dir *dir = property->value.dir;
1016	struct tb_property *p;
1017
1018	/* Fill in standard properties */
1019	p = tb_property_find(dir, "prtcid", TB_PROPERTY_TYPE_VALUE);
1020	if (p)
1021		svc->prtcid = p->value.immediate;
1022	p = tb_property_find(dir, "prtcvers", TB_PROPERTY_TYPE_VALUE);
1023	if (p)
1024		svc->prtcvers = p->value.immediate;
1025	p = tb_property_find(dir, "prtcrevs", TB_PROPERTY_TYPE_VALUE);
1026	if (p)
1027		svc->prtcrevs = p->value.immediate;
1028	p = tb_property_find(dir, "prtcstns", TB_PROPERTY_TYPE_VALUE);
1029	if (p)
1030		svc->prtcstns = p->value.immediate;
1031
1032	svc->key = kstrdup(property->key, GFP_KERNEL);
1033	if (!svc->key)
1034		return -ENOMEM;
1035
1036	return 0;
1037}
1038
1039static void enumerate_services(struct tb_xdomain *xd)
1040{
1041	struct tb_service *svc;
1042	struct tb_property *p;
1043	struct device *dev;
1044	int id;
1045
1046	/*
1047	 * First remove all services that are not available anymore in
1048	 * the updated property block.
1049	 */
1050	device_for_each_child_reverse(&xd->dev, xd, remove_missing_service);
1051
1052	/* Then re-enumerate properties creating new services as we go */
1053	tb_property_for_each(xd->remote_properties, p) {
1054		if (p->type != TB_PROPERTY_TYPE_DIRECTORY)
1055			continue;
1056
1057		/* If the service exists already we are fine */
1058		dev = device_find_child(&xd->dev, p, find_service);
1059		if (dev) {
1060			put_device(dev);
1061			continue;
1062		}
1063
1064		svc = kzalloc(sizeof(*svc), GFP_KERNEL);
1065		if (!svc)
1066			break;
1067
1068		if (populate_service(svc, p)) {
1069			kfree(svc);
1070			break;
1071		}
1072
1073		id = ida_simple_get(&xd->service_ids, 0, 0, GFP_KERNEL);
1074		if (id < 0) {
1075			kfree(svc->key);
1076			kfree(svc);
1077			break;
1078		}
1079		svc->id = id;
1080		svc->dev.bus = &tb_bus_type;
1081		svc->dev.type = &tb_service_type;
1082		svc->dev.parent = &xd->dev;
1083		dev_set_name(&svc->dev, "%s.%d", dev_name(&xd->dev), svc->id);
1084
1085		tb_service_debugfs_init(svc);
1086
1087		if (device_register(&svc->dev)) {
1088			put_device(&svc->dev);
1089			break;
1090		}
1091	}
1092}
1093
1094static int populate_properties(struct tb_xdomain *xd,
1095			       struct tb_property_dir *dir)
1096{
1097	const struct tb_property *p;
1098
1099	/* Required properties */
1100	p = tb_property_find(dir, "deviceid", TB_PROPERTY_TYPE_VALUE);
1101	if (!p)
1102		return -EINVAL;
1103	xd->device = p->value.immediate;
1104
1105	p = tb_property_find(dir, "vendorid", TB_PROPERTY_TYPE_VALUE);
1106	if (!p)
1107		return -EINVAL;
1108	xd->vendor = p->value.immediate;
1109
1110	p = tb_property_find(dir, "maxhopid", TB_PROPERTY_TYPE_VALUE);
1111	/*
1112	 * USB4 inter-domain spec suggests using 15 as HopID if the
1113	 * other end does not announce it in a property. This is for
1114	 * TBT3 compatibility.
1115	 */
1116	xd->remote_max_hopid = p ? p->value.immediate : XDOMAIN_DEFAULT_MAX_HOPID;
1117
1118	kfree(xd->device_name);
1119	xd->device_name = NULL;
1120	kfree(xd->vendor_name);
1121	xd->vendor_name = NULL;
1122
1123	/* Optional properties */
1124	p = tb_property_find(dir, "deviceid", TB_PROPERTY_TYPE_TEXT);
1125	if (p)
1126		xd->device_name = kstrdup(p->value.text, GFP_KERNEL);
1127	p = tb_property_find(dir, "vendorid", TB_PROPERTY_TYPE_TEXT);
1128	if (p)
1129		xd->vendor_name = kstrdup(p->value.text, GFP_KERNEL);
1130
1131	return 0;
1132}
1133
1134static int tb_xdomain_update_link_attributes(struct tb_xdomain *xd)
1135{
1136	bool change = false;
1137	struct tb_port *port;
1138	int ret;
1139
1140	port = tb_port_at(xd->route, tb_xdomain_parent(xd));
1141
1142	ret = tb_port_get_link_speed(port);
1143	if (ret < 0)
1144		return ret;
1145
1146	if (xd->link_speed != ret)
1147		change = true;
1148
1149	xd->link_speed = ret;
1150
1151	ret = tb_port_get_link_width(port);
1152	if (ret < 0)
1153		return ret;
1154
1155	if (xd->link_width != ret)
1156		change = true;
1157
1158	xd->link_width = ret;
1159
1160	if (change)
1161		kobject_uevent(&xd->dev.kobj, KOBJ_CHANGE);
1162
1163	return 0;
1164}
1165
1166static int tb_xdomain_get_uuid(struct tb_xdomain *xd)
1167{
1168	struct tb *tb = xd->tb;
1169	uuid_t uuid;
1170	u64 route;
1171	int ret;
1172
1173	dev_dbg(&xd->dev, "requesting remote UUID\n");
1174
1175	ret = tb_xdp_uuid_request(tb->ctl, xd->route, xd->state_retries, &uuid,
1176				  &route);
1177	if (ret < 0) {
1178		if (xd->state_retries-- > 0) {
1179			dev_dbg(&xd->dev, "failed to request UUID, retrying\n");
1180			return -EAGAIN;
1181		} else {
1182			dev_dbg(&xd->dev, "failed to read remote UUID\n");
1183		}
1184		return ret;
1185	}
1186
1187	dev_dbg(&xd->dev, "got remote UUID %pUb\n", &uuid);
1188
1189	if (uuid_equal(&uuid, xd->local_uuid)) {
1190		if (route == xd->route)
1191			dev_dbg(&xd->dev, "loop back detected\n");
1192		else
1193			dev_dbg(&xd->dev, "intra-domain loop detected\n");
1194
1195		/* Don't bond lanes automatically for loops */
1196		xd->bonding_possible = false;
1197	}
1198
1199	/*
1200	 * If the UUID is different, there is another domain connected
1201	 * so mark this one unplugged and wait for the connection
1202	 * manager to replace it.
1203	 */
1204	if (xd->remote_uuid && !uuid_equal(&uuid, xd->remote_uuid)) {
1205		dev_dbg(&xd->dev, "remote UUID is different, unplugging\n");
1206		xd->is_unplugged = true;
1207		return -ENODEV;
1208	}
1209
1210	/* First time fill in the missing UUID */
1211	if (!xd->remote_uuid) {
1212		xd->remote_uuid = kmemdup(&uuid, sizeof(uuid_t), GFP_KERNEL);
1213		if (!xd->remote_uuid)
1214			return -ENOMEM;
1215	}
1216
1217	return 0;
1218}
1219
1220static int tb_xdomain_get_link_status(struct tb_xdomain *xd)
1221{
1222	struct tb *tb = xd->tb;
1223	u8 slw, tlw, sls, tls;
1224	int ret;
1225
1226	dev_dbg(&xd->dev, "sending link state status request to %pUb\n",
1227		xd->remote_uuid);
1228
1229	ret = tb_xdp_link_state_status_request(tb->ctl, xd->route,
1230					       xd->state_retries, &slw, &tlw, &sls,
1231					       &tls);
1232	if (ret) {
1233		if (ret != -EOPNOTSUPP && xd->state_retries-- > 0) {
1234			dev_dbg(&xd->dev,
1235				"failed to request remote link status, retrying\n");
1236			return -EAGAIN;
1237		}
1238		dev_dbg(&xd->dev, "failed to receive remote link status\n");
1239		return ret;
1240	}
1241
1242	dev_dbg(&xd->dev, "remote link supports width %#x speed %#x\n", slw, sls);
1243
1244	if (slw < LANE_ADP_CS_0_SUPPORTED_WIDTH_DUAL) {
1245		dev_dbg(&xd->dev, "remote adapter is single lane only\n");
1246		return -EOPNOTSUPP;
1247	}
1248
1249	return 0;
1250}
1251
1252static int tb_xdomain_link_state_change(struct tb_xdomain *xd,
1253					unsigned int width)
1254{
1255	struct tb_switch *sw = tb_to_switch(xd->dev.parent);
1256	struct tb_port *port = tb_port_at(xd->route, sw);
1257	struct tb *tb = xd->tb;
1258	u8 tlw, tls;
1259	u32 val;
1260	int ret;
1261
1262	if (width == 2)
1263		tlw = LANE_ADP_CS_1_TARGET_WIDTH_DUAL;
1264	else if (width == 1)
1265		tlw = LANE_ADP_CS_1_TARGET_WIDTH_SINGLE;
1266	else
1267		return -EINVAL;
1268
1269	/* Use the current target speed */
1270	ret = tb_port_read(port, &val, TB_CFG_PORT, port->cap_phy + LANE_ADP_CS_1, 1);
1271	if (ret)
1272		return ret;
1273	tls = val & LANE_ADP_CS_1_TARGET_SPEED_MASK;
1274
1275	dev_dbg(&xd->dev, "sending link state change request with width %#x speed %#x\n",
1276		tlw, tls);
1277
1278	ret = tb_xdp_link_state_change_request(tb->ctl, xd->route,
1279					       xd->state_retries, tlw, tls);
1280	if (ret) {
1281		if (ret != -EOPNOTSUPP && xd->state_retries-- > 0) {
1282			dev_dbg(&xd->dev,
1283				"failed to change remote link state, retrying\n");
1284			return -EAGAIN;
1285		}
1286		dev_err(&xd->dev, "failed request link state change, aborting\n");
1287		return ret;
1288	}
1289
1290	dev_dbg(&xd->dev, "received link state change response\n");
1291	return 0;
1292}
1293
1294static int tb_xdomain_bond_lanes_uuid_high(struct tb_xdomain *xd)
1295{
1296	struct tb_port *port;
1297	int ret, width;
1298
1299	if (xd->target_link_width == LANE_ADP_CS_1_TARGET_WIDTH_SINGLE) {
1300		width = 1;
1301	} else if (xd->target_link_width == LANE_ADP_CS_1_TARGET_WIDTH_DUAL) {
1302		width = 2;
1303	} else {
1304		if (xd->state_retries-- > 0) {
1305			dev_dbg(&xd->dev,
1306				"link state change request not received yet, retrying\n");
1307			return -EAGAIN;
1308		}
1309		dev_dbg(&xd->dev, "timeout waiting for link change request\n");
1310		return -ETIMEDOUT;
1311	}
1312
1313	port = tb_port_at(xd->route, tb_xdomain_parent(xd));
1314
1315	/*
1316	 * We can't use tb_xdomain_lane_bonding_enable() here because it
1317	 * is the other side that initiates lane bonding. So here we
1318	 * just set the width to both lane adapters and wait for the
1319	 * link to transition bonded.
1320	 */
1321	ret = tb_port_set_link_width(port->dual_link_port, width);
1322	if (ret) {
1323		tb_port_warn(port->dual_link_port,
1324			     "failed to set link width to %d\n", width);
1325		return ret;
1326	}
1327
1328	ret = tb_port_set_link_width(port, width);
1329	if (ret) {
1330		tb_port_warn(port, "failed to set link width to %d\n", width);
1331		return ret;
1332	}
1333
1334	ret = tb_port_wait_for_link_width(port, width, XDOMAIN_BONDING_TIMEOUT);
1335	if (ret) {
1336		dev_warn(&xd->dev, "error waiting for link width to become %d\n",
1337			 width);
1338		return ret;
1339	}
1340
1341	port->bonded = width == 2;
1342	port->dual_link_port->bonded = width == 2;
1343
1344	tb_port_update_credits(port);
1345	tb_xdomain_update_link_attributes(xd);
1346
1347	dev_dbg(&xd->dev, "lane bonding %s\n", str_enabled_disabled(width == 2));
1348	return 0;
1349}
1350
1351static int tb_xdomain_get_properties(struct tb_xdomain *xd)
1352{
1353	struct tb_property_dir *dir;
1354	struct tb *tb = xd->tb;
1355	bool update = false;
1356	u32 *block = NULL;
1357	u32 gen = 0;
1358	int ret;
1359
1360	dev_dbg(&xd->dev, "requesting remote properties\n");
1361
1362	ret = tb_xdp_properties_request(tb->ctl, xd->route, xd->local_uuid,
1363					xd->remote_uuid, xd->state_retries,
1364					&block, &gen);
1365	if (ret < 0) {
1366		if (xd->state_retries-- > 0) {
1367			dev_dbg(&xd->dev,
1368				"failed to request remote properties, retrying\n");
1369			return -EAGAIN;
1370		} else {
1371			/* Give up now */
1372			dev_err(&xd->dev,
1373				"failed read XDomain properties from %pUb\n",
1374				xd->remote_uuid);
1375		}
1376
1377		return ret;
1378	}
1379
1380	mutex_lock(&xd->lock);
1381
1382	/* Only accept newer generation properties */
1383	if (xd->remote_properties && gen <= xd->remote_property_block_gen) {
1384		ret = 0;
1385		goto err_free_block;
1386	}
1387
1388	dir = tb_property_parse_dir(block, ret);
1389	if (!dir) {
1390		dev_err(&xd->dev, "failed to parse XDomain properties\n");
1391		ret = -ENOMEM;
1392		goto err_free_block;
1393	}
1394
1395	ret = populate_properties(xd, dir);
1396	if (ret) {
1397		dev_err(&xd->dev, "missing XDomain properties in response\n");
1398		goto err_free_dir;
1399	}
1400
1401	/* Release the existing one */
1402	if (xd->remote_properties) {
1403		tb_property_free_dir(xd->remote_properties);
1404		update = true;
1405	}
1406
1407	xd->remote_properties = dir;
1408	xd->remote_property_block_gen = gen;
1409
1410	tb_xdomain_update_link_attributes(xd);
1411
1412	mutex_unlock(&xd->lock);
1413
1414	kfree(block);
1415
1416	/*
1417	 * Now the device should be ready enough so we can add it to the
1418	 * bus and let userspace know about it. If the device is already
1419	 * registered, we notify the userspace that it has changed.
1420	 */
1421	if (!update) {
1422		/*
1423		 * Now disable lane 1 if bonding was not enabled. Do
1424		 * this only if bonding was possible at the beginning
1425		 * (that is we are the connection manager and there are
1426		 * two lanes).
1427		 */
1428		if (xd->bonding_possible) {
1429			struct tb_port *port;
1430
1431			port = tb_port_at(xd->route, tb_xdomain_parent(xd));
1432			if (!port->bonded)
1433				tb_port_disable(port->dual_link_port);
1434		}
1435
1436		if (device_add(&xd->dev)) {
1437			dev_err(&xd->dev, "failed to add XDomain device\n");
1438			return -ENODEV;
1439		}
1440		dev_info(&xd->dev, "new host found, vendor=%#x device=%#x\n",
1441			 xd->vendor, xd->device);
1442		if (xd->vendor_name && xd->device_name)
1443			dev_info(&xd->dev, "%s %s\n", xd->vendor_name,
1444				 xd->device_name);
1445
1446		tb_xdomain_debugfs_init(xd);
1447	} else {
1448		kobject_uevent(&xd->dev.kobj, KOBJ_CHANGE);
1449	}
1450
1451	enumerate_services(xd);
1452	return 0;
1453
1454err_free_dir:
1455	tb_property_free_dir(dir);
1456err_free_block:
1457	kfree(block);
1458	mutex_unlock(&xd->lock);
1459
1460	return ret;
1461}
1462
1463static void tb_xdomain_queue_uuid(struct tb_xdomain *xd)
1464{
1465	xd->state = XDOMAIN_STATE_UUID;
1466	xd->state_retries = XDOMAIN_RETRIES;
1467	queue_delayed_work(xd->tb->wq, &xd->state_work,
1468			   msecs_to_jiffies(XDOMAIN_SHORT_TIMEOUT));
1469}
1470
1471static void tb_xdomain_queue_link_status(struct tb_xdomain *xd)
1472{
1473	xd->state = XDOMAIN_STATE_LINK_STATUS;
1474	xd->state_retries = XDOMAIN_RETRIES;
1475	queue_delayed_work(xd->tb->wq, &xd->state_work,
1476			   msecs_to_jiffies(XDOMAIN_DEFAULT_TIMEOUT));
1477}
1478
1479static void tb_xdomain_queue_link_status2(struct tb_xdomain *xd)
1480{
1481	xd->state = XDOMAIN_STATE_LINK_STATUS2;
1482	xd->state_retries = XDOMAIN_RETRIES;
1483	queue_delayed_work(xd->tb->wq, &xd->state_work,
1484			   msecs_to_jiffies(XDOMAIN_DEFAULT_TIMEOUT));
1485}
1486
1487static void tb_xdomain_queue_bonding(struct tb_xdomain *xd)
1488{
1489	if (memcmp(xd->local_uuid, xd->remote_uuid, UUID_SIZE) > 0) {
1490		dev_dbg(&xd->dev, "we have higher UUID, other side bonds the lanes\n");
1491		xd->state = XDOMAIN_STATE_BONDING_UUID_HIGH;
1492	} else {
1493		dev_dbg(&xd->dev, "we have lower UUID, bonding lanes\n");
1494		xd->state = XDOMAIN_STATE_LINK_STATE_CHANGE;
1495	}
1496
1497	xd->state_retries = XDOMAIN_RETRIES;
1498	queue_delayed_work(xd->tb->wq, &xd->state_work,
1499			   msecs_to_jiffies(XDOMAIN_DEFAULT_TIMEOUT));
1500}
1501
1502static void tb_xdomain_queue_bonding_uuid_low(struct tb_xdomain *xd)
1503{
1504	xd->state = XDOMAIN_STATE_BONDING_UUID_LOW;
1505	xd->state_retries = XDOMAIN_RETRIES;
1506	queue_delayed_work(xd->tb->wq, &xd->state_work,
1507			   msecs_to_jiffies(XDOMAIN_DEFAULT_TIMEOUT));
1508}
1509
1510static void tb_xdomain_queue_properties(struct tb_xdomain *xd)
1511{
1512	xd->state = XDOMAIN_STATE_PROPERTIES;
1513	xd->state_retries = XDOMAIN_RETRIES;
1514	queue_delayed_work(xd->tb->wq, &xd->state_work,
1515			   msecs_to_jiffies(XDOMAIN_DEFAULT_TIMEOUT));
1516}
1517
1518static void tb_xdomain_queue_properties_changed(struct tb_xdomain *xd)
1519{
1520	xd->properties_changed_retries = XDOMAIN_RETRIES;
1521	queue_delayed_work(xd->tb->wq, &xd->properties_changed_work,
1522			   msecs_to_jiffies(XDOMAIN_SHORT_TIMEOUT));
1523}
1524
1525static void tb_xdomain_state_work(struct work_struct *work)
1526{
1527	struct tb_xdomain *xd = container_of(work, typeof(*xd), state_work.work);
1528	int ret, state = xd->state;
1529
1530	if (WARN_ON_ONCE(state < XDOMAIN_STATE_INIT ||
1531			 state > XDOMAIN_STATE_ERROR))
1532		return;
1533
1534	dev_dbg(&xd->dev, "running state %s\n", state_names[state]);
1535
1536	switch (state) {
1537	case XDOMAIN_STATE_INIT:
1538		if (xd->needs_uuid) {
1539			tb_xdomain_queue_uuid(xd);
1540		} else {
1541			tb_xdomain_queue_properties_changed(xd);
1542			tb_xdomain_queue_properties(xd);
1543		}
1544		break;
1545
1546	case XDOMAIN_STATE_UUID:
1547		ret = tb_xdomain_get_uuid(xd);
1548		if (ret) {
1549			if (ret == -EAGAIN)
1550				goto retry_state;
1551			xd->state = XDOMAIN_STATE_ERROR;
1552		} else {
1553			tb_xdomain_queue_properties_changed(xd);
1554			if (xd->bonding_possible)
1555				tb_xdomain_queue_link_status(xd);
1556			else
1557				tb_xdomain_queue_properties(xd);
1558		}
1559		break;
1560
1561	case XDOMAIN_STATE_LINK_STATUS:
1562		ret = tb_xdomain_get_link_status(xd);
1563		if (ret) {
1564			if (ret == -EAGAIN)
1565				goto retry_state;
1566
1567			/*
1568			 * If any of the lane bonding states fail we skip
1569			 * bonding completely and try to continue from
1570			 * reading properties.
1571			 */
1572			tb_xdomain_queue_properties(xd);
1573		} else {
1574			tb_xdomain_queue_bonding(xd);
1575		}
1576		break;
1577
1578	case XDOMAIN_STATE_LINK_STATE_CHANGE:
1579		ret = tb_xdomain_link_state_change(xd, 2);
1580		if (ret) {
1581			if (ret == -EAGAIN)
1582				goto retry_state;
1583			tb_xdomain_queue_properties(xd);
1584		} else {
1585			tb_xdomain_queue_link_status2(xd);
1586		}
1587		break;
1588
1589	case XDOMAIN_STATE_LINK_STATUS2:
1590		ret = tb_xdomain_get_link_status(xd);
1591		if (ret) {
1592			if (ret == -EAGAIN)
1593				goto retry_state;
1594			tb_xdomain_queue_properties(xd);
1595		} else {
1596			tb_xdomain_queue_bonding_uuid_low(xd);
1597		}
1598		break;
1599
1600	case XDOMAIN_STATE_BONDING_UUID_LOW:
1601		tb_xdomain_lane_bonding_enable(xd);
1602		tb_xdomain_queue_properties(xd);
1603		break;
1604
1605	case XDOMAIN_STATE_BONDING_UUID_HIGH:
1606		if (tb_xdomain_bond_lanes_uuid_high(xd) == -EAGAIN)
1607			goto retry_state;
1608		tb_xdomain_queue_properties(xd);
1609		break;
1610
1611	case XDOMAIN_STATE_PROPERTIES:
1612		ret = tb_xdomain_get_properties(xd);
1613		if (ret) {
1614			if (ret == -EAGAIN)
1615				goto retry_state;
1616			xd->state = XDOMAIN_STATE_ERROR;
1617		} else {
1618			xd->state = XDOMAIN_STATE_ENUMERATED;
1619		}
1620		break;
1621
1622	case XDOMAIN_STATE_ENUMERATED:
1623		tb_xdomain_queue_properties(xd);
1624		break;
1625
1626	case XDOMAIN_STATE_ERROR:
1627		break;
1628
1629	default:
1630		dev_warn(&xd->dev, "unexpected state %d\n", state);
1631		break;
1632	}
1633
1634	return;
1635
1636retry_state:
1637	queue_delayed_work(xd->tb->wq, &xd->state_work,
1638			   msecs_to_jiffies(XDOMAIN_DEFAULT_TIMEOUT));
1639}
1640
1641static void tb_xdomain_properties_changed(struct work_struct *work)
1642{
1643	struct tb_xdomain *xd = container_of(work, typeof(*xd),
1644					     properties_changed_work.work);
1645	int ret;
1646
1647	dev_dbg(&xd->dev, "sending properties changed notification\n");
1648
1649	ret = tb_xdp_properties_changed_request(xd->tb->ctl, xd->route,
1650				xd->properties_changed_retries, xd->local_uuid);
1651	if (ret) {
1652		if (xd->properties_changed_retries-- > 0) {
1653			dev_dbg(&xd->dev,
1654				"failed to send properties changed notification, retrying\n");
1655			queue_delayed_work(xd->tb->wq,
1656					   &xd->properties_changed_work,
1657					   msecs_to_jiffies(XDOMAIN_DEFAULT_TIMEOUT));
1658		}
1659		dev_err(&xd->dev, "failed to send properties changed notification\n");
1660		return;
1661	}
1662
1663	xd->properties_changed_retries = XDOMAIN_RETRIES;
1664}
1665
1666static ssize_t device_show(struct device *dev, struct device_attribute *attr,
1667			   char *buf)
1668{
1669	struct tb_xdomain *xd = container_of(dev, struct tb_xdomain, dev);
1670
1671	return sysfs_emit(buf, "%#x\n", xd->device);
1672}
1673static DEVICE_ATTR_RO(device);
1674
1675static ssize_t
1676device_name_show(struct device *dev, struct device_attribute *attr, char *buf)
1677{
1678	struct tb_xdomain *xd = container_of(dev, struct tb_xdomain, dev);
1679	int ret;
1680
1681	if (mutex_lock_interruptible(&xd->lock))
1682		return -ERESTARTSYS;
1683	ret = sysfs_emit(buf, "%s\n", xd->device_name ?: "");
1684	mutex_unlock(&xd->lock);
1685
1686	return ret;
1687}
1688static DEVICE_ATTR_RO(device_name);
1689
1690static ssize_t maxhopid_show(struct device *dev, struct device_attribute *attr,
1691			     char *buf)
1692{
1693	struct tb_xdomain *xd = container_of(dev, struct tb_xdomain, dev);
1694
1695	return sysfs_emit(buf, "%d\n", xd->remote_max_hopid);
1696}
1697static DEVICE_ATTR_RO(maxhopid);
1698
1699static ssize_t vendor_show(struct device *dev, struct device_attribute *attr,
1700			   char *buf)
1701{
1702	struct tb_xdomain *xd = container_of(dev, struct tb_xdomain, dev);
1703
1704	return sysfs_emit(buf, "%#x\n", xd->vendor);
1705}
1706static DEVICE_ATTR_RO(vendor);
1707
1708static ssize_t
1709vendor_name_show(struct device *dev, struct device_attribute *attr, char *buf)
1710{
1711	struct tb_xdomain *xd = container_of(dev, struct tb_xdomain, dev);
1712	int ret;
1713
1714	if (mutex_lock_interruptible(&xd->lock))
1715		return -ERESTARTSYS;
1716	ret = sysfs_emit(buf, "%s\n", xd->vendor_name ?: "");
1717	mutex_unlock(&xd->lock);
1718
1719	return ret;
1720}
1721static DEVICE_ATTR_RO(vendor_name);
1722
1723static ssize_t unique_id_show(struct device *dev, struct device_attribute *attr,
1724			      char *buf)
1725{
1726	struct tb_xdomain *xd = container_of(dev, struct tb_xdomain, dev);
1727
1728	return sysfs_emit(buf, "%pUb\n", xd->remote_uuid);
1729}
1730static DEVICE_ATTR_RO(unique_id);
1731
1732static ssize_t speed_show(struct device *dev, struct device_attribute *attr,
1733			  char *buf)
1734{
1735	struct tb_xdomain *xd = container_of(dev, struct tb_xdomain, dev);
1736
1737	return sysfs_emit(buf, "%u.0 Gb/s\n", xd->link_speed);
1738}
1739
1740static DEVICE_ATTR(rx_speed, 0444, speed_show, NULL);
1741static DEVICE_ATTR(tx_speed, 0444, speed_show, NULL);
1742
1743static ssize_t lanes_show(struct device *dev, struct device_attribute *attr,
1744			  char *buf)
1745{
1746	struct tb_xdomain *xd = container_of(dev, struct tb_xdomain, dev);
1747
1748	return sysfs_emit(buf, "%u\n", xd->link_width);
1749}
1750
1751static DEVICE_ATTR(rx_lanes, 0444, lanes_show, NULL);
1752static DEVICE_ATTR(tx_lanes, 0444, lanes_show, NULL);
1753
1754static struct attribute *xdomain_attrs[] = {
1755	&dev_attr_device.attr,
1756	&dev_attr_device_name.attr,
1757	&dev_attr_maxhopid.attr,
1758	&dev_attr_rx_lanes.attr,
1759	&dev_attr_rx_speed.attr,
1760	&dev_attr_tx_lanes.attr,
1761	&dev_attr_tx_speed.attr,
1762	&dev_attr_unique_id.attr,
1763	&dev_attr_vendor.attr,
1764	&dev_attr_vendor_name.attr,
1765	NULL,
1766};
1767
1768static const struct attribute_group xdomain_attr_group = {
1769	.attrs = xdomain_attrs,
1770};
1771
1772static const struct attribute_group *xdomain_attr_groups[] = {
1773	&xdomain_attr_group,
1774	NULL,
1775};
1776
1777static void tb_xdomain_release(struct device *dev)
1778{
1779	struct tb_xdomain *xd = container_of(dev, struct tb_xdomain, dev);
1780
1781	put_device(xd->dev.parent);
1782
1783	kfree(xd->local_property_block);
1784	tb_property_free_dir(xd->remote_properties);
1785	ida_destroy(&xd->out_hopids);
1786	ida_destroy(&xd->in_hopids);
1787	ida_destroy(&xd->service_ids);
1788
1789	kfree(xd->local_uuid);
1790	kfree(xd->remote_uuid);
1791	kfree(xd->device_name);
1792	kfree(xd->vendor_name);
1793	kfree(xd);
1794}
1795
1796static void start_handshake(struct tb_xdomain *xd)
1797{
1798	xd->state = XDOMAIN_STATE_INIT;
1799	queue_delayed_work(xd->tb->wq, &xd->state_work,
1800			   msecs_to_jiffies(XDOMAIN_SHORT_TIMEOUT));
1801}
1802
1803static void stop_handshake(struct tb_xdomain *xd)
1804{
1805	cancel_delayed_work_sync(&xd->properties_changed_work);
1806	cancel_delayed_work_sync(&xd->state_work);
1807	xd->properties_changed_retries = 0;
1808	xd->state_retries = 0;
1809}
1810
1811static int __maybe_unused tb_xdomain_suspend(struct device *dev)
1812{
1813	stop_handshake(tb_to_xdomain(dev));
1814	return 0;
1815}
1816
1817static int __maybe_unused tb_xdomain_resume(struct device *dev)
1818{
1819	start_handshake(tb_to_xdomain(dev));
1820	return 0;
1821}
1822
1823static const struct dev_pm_ops tb_xdomain_pm_ops = {
1824	SET_SYSTEM_SLEEP_PM_OPS(tb_xdomain_suspend, tb_xdomain_resume)
1825};
1826
1827struct device_type tb_xdomain_type = {
1828	.name = "thunderbolt_xdomain",
1829	.release = tb_xdomain_release,
1830	.pm = &tb_xdomain_pm_ops,
1831};
1832EXPORT_SYMBOL_GPL(tb_xdomain_type);
1833
1834/**
1835 * tb_xdomain_alloc() - Allocate new XDomain object
1836 * @tb: Domain where the XDomain belongs
1837 * @parent: Parent device (the switch through the connection to the
1838 *	    other domain is reached).
1839 * @route: Route string used to reach the other domain
1840 * @local_uuid: Our local domain UUID
1841 * @remote_uuid: UUID of the other domain (optional)
1842 *
1843 * Allocates new XDomain structure and returns pointer to that. The
1844 * object must be released by calling tb_xdomain_put().
1845 */
1846struct tb_xdomain *tb_xdomain_alloc(struct tb *tb, struct device *parent,
1847				    u64 route, const uuid_t *local_uuid,
1848				    const uuid_t *remote_uuid)
1849{
1850	struct tb_switch *parent_sw = tb_to_switch(parent);
1851	struct tb_xdomain *xd;
1852	struct tb_port *down;
1853
1854	/* Make sure the downstream domain is accessible */
1855	down = tb_port_at(route, parent_sw);
1856	tb_port_unlock(down);
1857
1858	xd = kzalloc(sizeof(*xd), GFP_KERNEL);
1859	if (!xd)
1860		return NULL;
1861
1862	xd->tb = tb;
1863	xd->route = route;
1864	xd->local_max_hopid = down->config.max_in_hop_id;
1865	ida_init(&xd->service_ids);
1866	ida_init(&xd->in_hopids);
1867	ida_init(&xd->out_hopids);
1868	mutex_init(&xd->lock);
1869	INIT_DELAYED_WORK(&xd->state_work, tb_xdomain_state_work);
1870	INIT_DELAYED_WORK(&xd->properties_changed_work,
1871			  tb_xdomain_properties_changed);
1872
1873	xd->local_uuid = kmemdup(local_uuid, sizeof(uuid_t), GFP_KERNEL);
1874	if (!xd->local_uuid)
1875		goto err_free;
1876
1877	if (remote_uuid) {
1878		xd->remote_uuid = kmemdup(remote_uuid, sizeof(uuid_t),
1879					  GFP_KERNEL);
1880		if (!xd->remote_uuid)
1881			goto err_free_local_uuid;
1882	} else {
1883		xd->needs_uuid = true;
1884		xd->bonding_possible = !!down->dual_link_port;
1885	}
1886
1887	device_initialize(&xd->dev);
1888	xd->dev.parent = get_device(parent);
1889	xd->dev.bus = &tb_bus_type;
1890	xd->dev.type = &tb_xdomain_type;
1891	xd->dev.groups = xdomain_attr_groups;
1892	dev_set_name(&xd->dev, "%u-%llx", tb->index, route);
1893
1894	dev_dbg(&xd->dev, "local UUID %pUb\n", local_uuid);
1895	if (remote_uuid)
1896		dev_dbg(&xd->dev, "remote UUID %pUb\n", remote_uuid);
1897
1898	/*
1899	 * This keeps the DMA powered on as long as we have active
1900	 * connection to another host.
1901	 */
1902	pm_runtime_set_active(&xd->dev);
1903	pm_runtime_get_noresume(&xd->dev);
1904	pm_runtime_enable(&xd->dev);
1905
1906	return xd;
1907
1908err_free_local_uuid:
1909	kfree(xd->local_uuid);
1910err_free:
1911	kfree(xd);
1912
1913	return NULL;
1914}
1915
1916/**
1917 * tb_xdomain_add() - Add XDomain to the bus
1918 * @xd: XDomain to add
1919 *
1920 * This function starts XDomain discovery protocol handshake and
1921 * eventually adds the XDomain to the bus. After calling this function
1922 * the caller needs to call tb_xdomain_remove() in order to remove and
1923 * release the object regardless whether the handshake succeeded or not.
1924 */
1925void tb_xdomain_add(struct tb_xdomain *xd)
1926{
1927	/* Start exchanging properties with the other host */
1928	start_handshake(xd);
1929}
1930
1931static int unregister_service(struct device *dev, void *data)
1932{
1933	device_unregister(dev);
1934	return 0;
1935}
1936
1937/**
1938 * tb_xdomain_remove() - Remove XDomain from the bus
1939 * @xd: XDomain to remove
1940 *
1941 * This will stop all ongoing configuration work and remove the XDomain
1942 * along with any services from the bus. When the last reference to @xd
1943 * is released the object will be released as well.
1944 */
1945void tb_xdomain_remove(struct tb_xdomain *xd)
1946{
1947	tb_xdomain_debugfs_remove(xd);
1948
1949	stop_handshake(xd);
1950
1951	device_for_each_child_reverse(&xd->dev, xd, unregister_service);
1952
1953	/*
1954	 * Undo runtime PM here explicitly because it is possible that
1955	 * the XDomain was never added to the bus and thus device_del()
1956	 * is not called for it (device_del() would handle this otherwise).
1957	 */
1958	pm_runtime_disable(&xd->dev);
1959	pm_runtime_put_noidle(&xd->dev);
1960	pm_runtime_set_suspended(&xd->dev);
1961
1962	if (!device_is_registered(&xd->dev)) {
1963		put_device(&xd->dev);
1964	} else {
1965		dev_info(&xd->dev, "host disconnected\n");
1966		device_unregister(&xd->dev);
1967	}
1968}
1969
1970/**
1971 * tb_xdomain_lane_bonding_enable() - Enable lane bonding on XDomain
1972 * @xd: XDomain connection
1973 *
1974 * Lane bonding is disabled by default for XDomains. This function tries
1975 * to enable bonding by first enabling the port and waiting for the CL0
1976 * state.
1977 *
1978 * Return: %0 in case of success and negative errno in case of error.
1979 */
1980int tb_xdomain_lane_bonding_enable(struct tb_xdomain *xd)
1981{
1982	struct tb_port *port;
1983	int ret;
1984
1985	port = tb_port_at(xd->route, tb_xdomain_parent(xd));
1986	if (!port->dual_link_port)
1987		return -ENODEV;
1988
1989	ret = tb_port_enable(port->dual_link_port);
1990	if (ret)
1991		return ret;
1992
1993	ret = tb_wait_for_port(port->dual_link_port, true);
1994	if (ret < 0)
1995		return ret;
1996	if (!ret)
1997		return -ENOTCONN;
1998
1999	ret = tb_port_lane_bonding_enable(port);
2000	if (ret) {
2001		tb_port_warn(port, "failed to enable lane bonding\n");
2002		return ret;
2003	}
2004
2005	ret = tb_port_wait_for_link_width(port, 2, XDOMAIN_BONDING_TIMEOUT);
2006	if (ret) {
2007		tb_port_warn(port, "failed to enable lane bonding\n");
2008		return ret;
2009	}
2010
2011	tb_port_update_credits(port);
2012	tb_xdomain_update_link_attributes(xd);
2013
2014	dev_dbg(&xd->dev, "lane bonding enabled\n");
2015	return 0;
2016}
2017EXPORT_SYMBOL_GPL(tb_xdomain_lane_bonding_enable);
2018
2019/**
2020 * tb_xdomain_lane_bonding_disable() - Disable lane bonding
2021 * @xd: XDomain connection
2022 *
2023 * Lane bonding is disabled by default for XDomains. If bonding has been
2024 * enabled, this function can be used to disable it.
2025 */
2026void tb_xdomain_lane_bonding_disable(struct tb_xdomain *xd)
2027{
2028	struct tb_port *port;
2029
2030	port = tb_port_at(xd->route, tb_xdomain_parent(xd));
2031	if (port->dual_link_port) {
2032		tb_port_lane_bonding_disable(port);
2033		if (tb_port_wait_for_link_width(port, 1, 100) == -ETIMEDOUT)
2034			tb_port_warn(port, "timeout disabling lane bonding\n");
2035		tb_port_disable(port->dual_link_port);
2036		tb_port_update_credits(port);
2037		tb_xdomain_update_link_attributes(xd);
2038
2039		dev_dbg(&xd->dev, "lane bonding disabled\n");
2040	}
2041}
2042EXPORT_SYMBOL_GPL(tb_xdomain_lane_bonding_disable);
2043
2044/**
2045 * tb_xdomain_alloc_in_hopid() - Allocate input HopID for tunneling
2046 * @xd: XDomain connection
2047 * @hopid: Preferred HopID or %-1 for next available
2048 *
2049 * Returns allocated HopID or negative errno. Specifically returns
2050 * %-ENOSPC if there are no more available HopIDs. Returned HopID is
2051 * guaranteed to be within range supported by the input lane adapter.
2052 * Call tb_xdomain_release_in_hopid() to release the allocated HopID.
2053 */
2054int tb_xdomain_alloc_in_hopid(struct tb_xdomain *xd, int hopid)
2055{
2056	if (hopid < 0)
2057		hopid = TB_PATH_MIN_HOPID;
2058	if (hopid < TB_PATH_MIN_HOPID || hopid > xd->local_max_hopid)
2059		return -EINVAL;
2060
2061	return ida_alloc_range(&xd->in_hopids, hopid, xd->local_max_hopid,
2062			       GFP_KERNEL);
2063}
2064EXPORT_SYMBOL_GPL(tb_xdomain_alloc_in_hopid);
2065
2066/**
2067 * tb_xdomain_alloc_out_hopid() - Allocate output HopID for tunneling
2068 * @xd: XDomain connection
2069 * @hopid: Preferred HopID or %-1 for next available
2070 *
2071 * Returns allocated HopID or negative errno. Specifically returns
2072 * %-ENOSPC if there are no more available HopIDs. Returned HopID is
2073 * guaranteed to be within range supported by the output lane adapter.
2074 * Call tb_xdomain_release_in_hopid() to release the allocated HopID.
2075 */
2076int tb_xdomain_alloc_out_hopid(struct tb_xdomain *xd, int hopid)
2077{
2078	if (hopid < 0)
2079		hopid = TB_PATH_MIN_HOPID;
2080	if (hopid < TB_PATH_MIN_HOPID || hopid > xd->remote_max_hopid)
2081		return -EINVAL;
2082
2083	return ida_alloc_range(&xd->out_hopids, hopid, xd->remote_max_hopid,
2084			       GFP_KERNEL);
2085}
2086EXPORT_SYMBOL_GPL(tb_xdomain_alloc_out_hopid);
2087
2088/**
2089 * tb_xdomain_release_in_hopid() - Release input HopID
2090 * @xd: XDomain connection
2091 * @hopid: HopID to release
2092 */
2093void tb_xdomain_release_in_hopid(struct tb_xdomain *xd, int hopid)
2094{
2095	ida_free(&xd->in_hopids, hopid);
2096}
2097EXPORT_SYMBOL_GPL(tb_xdomain_release_in_hopid);
2098
2099/**
2100 * tb_xdomain_release_out_hopid() - Release output HopID
2101 * @xd: XDomain connection
2102 * @hopid: HopID to release
2103 */
2104void tb_xdomain_release_out_hopid(struct tb_xdomain *xd, int hopid)
2105{
2106	ida_free(&xd->out_hopids, hopid);
2107}
2108EXPORT_SYMBOL_GPL(tb_xdomain_release_out_hopid);
2109
2110/**
2111 * tb_xdomain_enable_paths() - Enable DMA paths for XDomain connection
2112 * @xd: XDomain connection
2113 * @transmit_path: HopID we are using to send out packets
2114 * @transmit_ring: DMA ring used to send out packets
2115 * @receive_path: HopID the other end is using to send packets to us
2116 * @receive_ring: DMA ring used to receive packets from @receive_path
2117 *
2118 * The function enables DMA paths accordingly so that after successful
2119 * return the caller can send and receive packets using high-speed DMA
2120 * path. If a transmit or receive path is not needed, pass %-1 for those
2121 * parameters.
2122 *
2123 * Return: %0 in case of success and negative errno in case of error
2124 */
2125int tb_xdomain_enable_paths(struct tb_xdomain *xd, int transmit_path,
2126			    int transmit_ring, int receive_path,
2127			    int receive_ring)
2128{
2129	return tb_domain_approve_xdomain_paths(xd->tb, xd, transmit_path,
2130					       transmit_ring, receive_path,
2131					       receive_ring);
2132}
2133EXPORT_SYMBOL_GPL(tb_xdomain_enable_paths);
2134
2135/**
2136 * tb_xdomain_disable_paths() - Disable DMA paths for XDomain connection
2137 * @xd: XDomain connection
2138 * @transmit_path: HopID we are using to send out packets
2139 * @transmit_ring: DMA ring used to send out packets
2140 * @receive_path: HopID the other end is using to send packets to us
2141 * @receive_ring: DMA ring used to receive packets from @receive_path
2142 *
2143 * This does the opposite of tb_xdomain_enable_paths(). After call to
2144 * this the caller is not expected to use the rings anymore. Passing %-1
2145 * as path/ring parameter means don't care. Normally the callers should
2146 * pass the same values here as they do when paths are enabled.
2147 *
2148 * Return: %0 in case of success and negative errno in case of error
2149 */
2150int tb_xdomain_disable_paths(struct tb_xdomain *xd, int transmit_path,
2151			     int transmit_ring, int receive_path,
2152			     int receive_ring)
2153{
2154	return tb_domain_disconnect_xdomain_paths(xd->tb, xd, transmit_path,
2155						  transmit_ring, receive_path,
2156						  receive_ring);
2157}
2158EXPORT_SYMBOL_GPL(tb_xdomain_disable_paths);
2159
2160struct tb_xdomain_lookup {
2161	const uuid_t *uuid;
2162	u8 link;
2163	u8 depth;
2164	u64 route;
2165};
2166
2167static struct tb_xdomain *switch_find_xdomain(struct tb_switch *sw,
2168	const struct tb_xdomain_lookup *lookup)
2169{
2170	struct tb_port *port;
2171
2172	tb_switch_for_each_port(sw, port) {
2173		struct tb_xdomain *xd;
2174
2175		if (port->xdomain) {
2176			xd = port->xdomain;
2177
2178			if (lookup->uuid) {
2179				if (xd->remote_uuid &&
2180				    uuid_equal(xd->remote_uuid, lookup->uuid))
2181					return xd;
2182			} else if (lookup->link &&
2183				   lookup->link == xd->link &&
2184				   lookup->depth == xd->depth) {
2185				return xd;
2186			} else if (lookup->route &&
2187				   lookup->route == xd->route) {
2188				return xd;
2189			}
2190		} else if (tb_port_has_remote(port)) {
2191			xd = switch_find_xdomain(port->remote->sw, lookup);
2192			if (xd)
2193				return xd;
2194		}
2195	}
2196
2197	return NULL;
2198}
2199
2200/**
2201 * tb_xdomain_find_by_uuid() - Find an XDomain by UUID
2202 * @tb: Domain where the XDomain belongs to
2203 * @uuid: UUID to look for
2204 *
2205 * Finds XDomain by walking through the Thunderbolt topology below @tb.
2206 * The returned XDomain will have its reference count increased so the
2207 * caller needs to call tb_xdomain_put() when it is done with the
2208 * object.
2209 *
2210 * This will find all XDomains including the ones that are not yet added
2211 * to the bus (handshake is still in progress).
2212 *
2213 * The caller needs to hold @tb->lock.
2214 */
2215struct tb_xdomain *tb_xdomain_find_by_uuid(struct tb *tb, const uuid_t *uuid)
2216{
2217	struct tb_xdomain_lookup lookup;
2218	struct tb_xdomain *xd;
2219
2220	memset(&lookup, 0, sizeof(lookup));
2221	lookup.uuid = uuid;
2222
2223	xd = switch_find_xdomain(tb->root_switch, &lookup);
2224	return tb_xdomain_get(xd);
2225}
2226EXPORT_SYMBOL_GPL(tb_xdomain_find_by_uuid);
2227
2228/**
2229 * tb_xdomain_find_by_link_depth() - Find an XDomain by link and depth
2230 * @tb: Domain where the XDomain belongs to
2231 * @link: Root switch link number
2232 * @depth: Depth in the link
2233 *
2234 * Finds XDomain by walking through the Thunderbolt topology below @tb.
2235 * The returned XDomain will have its reference count increased so the
2236 * caller needs to call tb_xdomain_put() when it is done with the
2237 * object.
2238 *
2239 * This will find all XDomains including the ones that are not yet added
2240 * to the bus (handshake is still in progress).
2241 *
2242 * The caller needs to hold @tb->lock.
2243 */
2244struct tb_xdomain *tb_xdomain_find_by_link_depth(struct tb *tb, u8 link,
2245						 u8 depth)
2246{
2247	struct tb_xdomain_lookup lookup;
2248	struct tb_xdomain *xd;
2249
2250	memset(&lookup, 0, sizeof(lookup));
2251	lookup.link = link;
2252	lookup.depth = depth;
2253
2254	xd = switch_find_xdomain(tb->root_switch, &lookup);
2255	return tb_xdomain_get(xd);
2256}
2257
2258/**
2259 * tb_xdomain_find_by_route() - Find an XDomain by route string
2260 * @tb: Domain where the XDomain belongs to
2261 * @route: XDomain route string
2262 *
2263 * Finds XDomain by walking through the Thunderbolt topology below @tb.
2264 * The returned XDomain will have its reference count increased so the
2265 * caller needs to call tb_xdomain_put() when it is done with the
2266 * object.
2267 *
2268 * This will find all XDomains including the ones that are not yet added
2269 * to the bus (handshake is still in progress).
2270 *
2271 * The caller needs to hold @tb->lock.
2272 */
2273struct tb_xdomain *tb_xdomain_find_by_route(struct tb *tb, u64 route)
2274{
2275	struct tb_xdomain_lookup lookup;
2276	struct tb_xdomain *xd;
2277
2278	memset(&lookup, 0, sizeof(lookup));
2279	lookup.route = route;
2280
2281	xd = switch_find_xdomain(tb->root_switch, &lookup);
2282	return tb_xdomain_get(xd);
2283}
2284EXPORT_SYMBOL_GPL(tb_xdomain_find_by_route);
2285
2286bool tb_xdomain_handle_request(struct tb *tb, enum tb_cfg_pkg_type type,
2287			       const void *buf, size_t size)
2288{
2289	const struct tb_protocol_handler *handler, *tmp;
2290	const struct tb_xdp_header *hdr = buf;
2291	unsigned int length;
2292	int ret = 0;
2293
2294	/* We expect the packet is at least size of the header */
2295	length = hdr->xd_hdr.length_sn & TB_XDOMAIN_LENGTH_MASK;
2296	if (length != size / 4 - sizeof(hdr->xd_hdr) / 4)
2297		return true;
2298	if (length < sizeof(*hdr) / 4 - sizeof(hdr->xd_hdr) / 4)
2299		return true;
2300
2301	/*
2302	 * Handle XDomain discovery protocol packets directly here. For
2303	 * other protocols (based on their UUID) we call registered
2304	 * handlers in turn.
2305	 */
2306	if (uuid_equal(&hdr->uuid, &tb_xdp_uuid)) {
2307		if (type == TB_CFG_PKG_XDOMAIN_REQ)
2308			return tb_xdp_schedule_request(tb, hdr, size);
2309		return false;
2310	}
2311
2312	mutex_lock(&xdomain_lock);
2313	list_for_each_entry_safe(handler, tmp, &protocol_handlers, list) {
2314		if (!uuid_equal(&hdr->uuid, handler->uuid))
2315			continue;
2316
2317		mutex_unlock(&xdomain_lock);
2318		ret = handler->callback(buf, size, handler->data);
2319		mutex_lock(&xdomain_lock);
2320
2321		if (ret)
2322			break;
2323	}
2324	mutex_unlock(&xdomain_lock);
2325
2326	return ret > 0;
2327}
2328
2329static int update_xdomain(struct device *dev, void *data)
2330{
2331	struct tb_xdomain *xd;
2332
2333	xd = tb_to_xdomain(dev);
2334	if (xd) {
2335		queue_delayed_work(xd->tb->wq, &xd->properties_changed_work,
2336				   msecs_to_jiffies(50));
2337	}
2338
2339	return 0;
2340}
2341
2342static void update_all_xdomains(void)
2343{
2344	bus_for_each_dev(&tb_bus_type, NULL, NULL, update_xdomain);
2345}
2346
2347static bool remove_directory(const char *key, const struct tb_property_dir *dir)
2348{
2349	struct tb_property *p;
2350
2351	p = tb_property_find(xdomain_property_dir, key,
2352			     TB_PROPERTY_TYPE_DIRECTORY);
2353	if (p && p->value.dir == dir) {
2354		tb_property_remove(p);
2355		return true;
2356	}
2357	return false;
2358}
2359
2360/**
2361 * tb_register_property_dir() - Register property directory to the host
2362 * @key: Key (name) of the directory to add
2363 * @dir: Directory to add
2364 *
2365 * Service drivers can use this function to add new property directory
2366 * to the host available properties. The other connected hosts are
2367 * notified so they can re-read properties of this host if they are
2368 * interested.
2369 *
2370 * Return: %0 on success and negative errno on failure
2371 */
2372int tb_register_property_dir(const char *key, struct tb_property_dir *dir)
2373{
2374	int ret;
2375
2376	if (WARN_ON(!xdomain_property_dir))
2377		return -EAGAIN;
2378
2379	if (!key || strlen(key) > 8)
2380		return -EINVAL;
2381
2382	mutex_lock(&xdomain_lock);
2383	if (tb_property_find(xdomain_property_dir, key,
2384			     TB_PROPERTY_TYPE_DIRECTORY)) {
2385		ret = -EEXIST;
2386		goto err_unlock;
2387	}
2388
2389	ret = tb_property_add_dir(xdomain_property_dir, key, dir);
2390	if (ret)
2391		goto err_unlock;
2392
2393	xdomain_property_block_gen++;
2394
2395	mutex_unlock(&xdomain_lock);
2396	update_all_xdomains();
2397	return 0;
2398
2399err_unlock:
2400	mutex_unlock(&xdomain_lock);
2401	return ret;
2402}
2403EXPORT_SYMBOL_GPL(tb_register_property_dir);
2404
2405/**
2406 * tb_unregister_property_dir() - Removes property directory from host
2407 * @key: Key (name) of the directory
2408 * @dir: Directory to remove
2409 *
2410 * This will remove the existing directory from this host and notify the
2411 * connected hosts about the change.
2412 */
2413void tb_unregister_property_dir(const char *key, struct tb_property_dir *dir)
2414{
2415	int ret = 0;
2416
2417	mutex_lock(&xdomain_lock);
2418	if (remove_directory(key, dir))
2419		xdomain_property_block_gen++;
2420	mutex_unlock(&xdomain_lock);
2421
2422	if (!ret)
2423		update_all_xdomains();
2424}
2425EXPORT_SYMBOL_GPL(tb_unregister_property_dir);
2426
2427int tb_xdomain_init(void)
2428{
2429	xdomain_property_dir = tb_property_create_dir(NULL);
2430	if (!xdomain_property_dir)
2431		return -ENOMEM;
2432
2433	/*
2434	 * Initialize standard set of properties without any service
2435	 * directories. Those will be added by service drivers
2436	 * themselves when they are loaded.
2437	 *
2438	 * Rest of the properties are filled dynamically based on these
2439	 * when the P2P connection is made.
2440	 */
2441	tb_property_add_immediate(xdomain_property_dir, "vendorid",
2442				  PCI_VENDOR_ID_INTEL);
2443	tb_property_add_text(xdomain_property_dir, "vendorid", "Intel Corp.");
2444	tb_property_add_immediate(xdomain_property_dir, "deviceid", 0x1);
2445	tb_property_add_immediate(xdomain_property_dir, "devicerv", 0x80000100);
2446
2447	xdomain_property_block_gen = get_random_u32();
2448	return 0;
2449}
2450
2451void tb_xdomain_exit(void)
2452{
2453	tb_property_free_dir(xdomain_property_dir);
2454}