Linux Audio

Check our new training course

Loading...
v5.9
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 * USB4 specific functionality
   4 *
   5 * Copyright (C) 2019, Intel Corporation
   6 * Authors: Mika Westerberg <mika.westerberg@linux.intel.com>
   7 *	    Rajmohan Mani <rajmohan.mani@intel.com>
   8 */
   9
  10#include <linux/delay.h>
  11#include <linux/ktime.h>
  12
  13#include "sb_regs.h"
  14#include "tb.h"
  15
  16#define USB4_DATA_DWORDS		16
  17#define USB4_DATA_RETRIES		3
  18
  19enum usb4_switch_op {
  20	USB4_SWITCH_OP_QUERY_DP_RESOURCE = 0x10,
  21	USB4_SWITCH_OP_ALLOC_DP_RESOURCE = 0x11,
  22	USB4_SWITCH_OP_DEALLOC_DP_RESOURCE = 0x12,
  23	USB4_SWITCH_OP_NVM_WRITE = 0x20,
  24	USB4_SWITCH_OP_NVM_AUTH = 0x21,
  25	USB4_SWITCH_OP_NVM_READ = 0x22,
  26	USB4_SWITCH_OP_NVM_SET_OFFSET = 0x23,
  27	USB4_SWITCH_OP_DROM_READ = 0x24,
  28	USB4_SWITCH_OP_NVM_SECTOR_SIZE = 0x25,
  29};
  30
  31enum usb4_sb_target {
  32	USB4_SB_TARGET_ROUTER,
  33	USB4_SB_TARGET_PARTNER,
  34	USB4_SB_TARGET_RETIMER,
  35};
  36
  37#define USB4_NVM_READ_OFFSET_MASK	GENMASK(23, 2)
  38#define USB4_NVM_READ_OFFSET_SHIFT	2
  39#define USB4_NVM_READ_LENGTH_MASK	GENMASK(27, 24)
  40#define USB4_NVM_READ_LENGTH_SHIFT	24
  41
  42#define USB4_NVM_SET_OFFSET_MASK	USB4_NVM_READ_OFFSET_MASK
  43#define USB4_NVM_SET_OFFSET_SHIFT	USB4_NVM_READ_OFFSET_SHIFT
  44
  45#define USB4_DROM_ADDRESS_MASK		GENMASK(14, 2)
  46#define USB4_DROM_ADDRESS_SHIFT		2
  47#define USB4_DROM_SIZE_MASK		GENMASK(19, 15)
  48#define USB4_DROM_SIZE_SHIFT		15
  49
  50#define USB4_NVM_SECTOR_SIZE_MASK	GENMASK(23, 0)
  51
  52typedef int (*read_block_fn)(void *, unsigned int, void *, size_t);
  53typedef int (*write_block_fn)(void *, const void *, size_t);
  54
  55static int usb4_switch_wait_for_bit(struct tb_switch *sw, u32 offset, u32 bit,
  56				    u32 value, int timeout_msec)
  57{
  58	ktime_t timeout = ktime_add_ms(ktime_get(), timeout_msec);
 
 
 
  59
  60	do {
  61		u32 val;
  62		int ret;
  63
  64		ret = tb_sw_read(sw, &val, TB_CFG_SWITCH, offset, 1);
 
 
 
 
 
 
 
 
 
  65		if (ret)
  66			return ret;
 
 
 
 
 
 
 
  67
  68		if ((val & bit) == value)
  69			return 0;
  70
  71		usleep_range(50, 100);
  72	} while (ktime_before(ktime_get(), timeout));
  73
  74	return -ETIMEDOUT;
  75}
  76
  77static int usb4_switch_op_read_data(struct tb_switch *sw, void *data,
  78				    size_t dwords)
  79{
  80	if (dwords > USB4_DATA_DWORDS)
  81		return -EINVAL;
  82
  83	return tb_sw_read(sw, data, TB_CFG_SWITCH, ROUTER_CS_9, dwords);
  84}
 
  85
  86static int usb4_switch_op_write_data(struct tb_switch *sw, const void *data,
  87				     size_t dwords)
  88{
  89	if (dwords > USB4_DATA_DWORDS)
  90		return -EINVAL;
  91
  92	return tb_sw_write(sw, data, TB_CFG_SWITCH, ROUTER_CS_9, dwords);
  93}
 
  94
  95static int usb4_switch_op_read_metadata(struct tb_switch *sw, u32 *metadata)
  96{
  97	return tb_sw_read(sw, metadata, TB_CFG_SWITCH, ROUTER_CS_25, 1);
  98}
 
 
 
 
 
 
 
  99
 100static int usb4_switch_op_write_metadata(struct tb_switch *sw, u32 metadata)
 101{
 102	return tb_sw_write(sw, &metadata, TB_CFG_SWITCH, ROUTER_CS_25, 1);
 103}
 104
 105static int usb4_do_read_data(u16 address, void *buf, size_t size,
 106			     read_block_fn read_block, void *read_block_data)
 
 107{
 108	unsigned int retries = USB4_DATA_RETRIES;
 109	unsigned int offset;
 110
 111	offset = address & 3;
 112	address = address & ~3;
 113
 114	do {
 115		size_t nbytes = min_t(size_t, size, USB4_DATA_DWORDS * 4);
 116		unsigned int dwaddress, dwords;
 117		u8 data[USB4_DATA_DWORDS * 4];
 
 
 118		int ret;
 119
 120		dwaddress = address / 4;
 121		dwords = ALIGN(nbytes, 4) / 4;
 122
 123		ret = read_block(read_block_data, dwaddress, data, dwords);
 124		if (ret) {
 125			if (ret != -ENODEV && retries--)
 126				continue;
 127			return ret;
 128		}
 129
 130		memcpy(buf, data + offset, nbytes);
 131
 132		size -= nbytes;
 133		address += nbytes;
 134		buf += nbytes;
 135	} while (size > 0);
 136
 137	return 0;
 
 138}
 139
 140static int usb4_do_write_data(unsigned int address, const void *buf, size_t size,
 141	write_block_fn write_next_block, void *write_block_data)
 142{
 143	unsigned int retries = USB4_DATA_RETRIES;
 144	unsigned int offset;
 145
 146	offset = address & 3;
 147	address = address & ~3;
 148
 149	do {
 150		u32 nbytes = min_t(u32, size, USB4_DATA_DWORDS * 4);
 151		u8 data[USB4_DATA_DWORDS * 4];
 152		int ret;
 153
 154		memcpy(data + offset, buf, nbytes);
 155
 156		ret = write_next_block(write_block_data, data, nbytes / 4);
 157		if (ret) {
 158			if (ret == -ETIMEDOUT) {
 159				if (retries--)
 160					continue;
 161				ret = -EIO;
 162			}
 163			return ret;
 164		}
 165
 166		size -= nbytes;
 167		address += nbytes;
 168		buf += nbytes;
 169	} while (size > 0);
 170
 171	return 0;
 
 
 
 
 
 
 172}
 173
 174static int usb4_switch_op(struct tb_switch *sw, u16 opcode, u8 *status)
 175{
 
 
 
 
 176	u32 val;
 177	int ret;
 178
 179	val = opcode | ROUTER_CS_26_OV;
 180	ret = tb_sw_write(sw, &val, TB_CFG_SWITCH, ROUTER_CS_26, 1);
 181	if (ret)
 182		return ret;
 183
 184	ret = usb4_switch_wait_for_bit(sw, ROUTER_CS_26, ROUTER_CS_26_OV, 0, 500);
 185	if (ret)
 186		return ret;
 
 
 
 
 187
 188	ret = tb_sw_read(sw, &val, TB_CFG_SWITCH, ROUTER_CS_26, 1);
 189	if (ret)
 190		return ret;
 191
 192	if (val & ROUTER_CS_26_ONS)
 193		return -EOPNOTSUPP;
 
 
 
 
 
 194
 195	*status = (val & ROUTER_CS_26_STATUS_MASK) >> ROUTER_CS_26_STATUS_SHIFT;
 196	return 0;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 197}
 198
 199static bool link_is_usb4(struct tb_port *port)
 200{
 201	u32 val;
 202
 203	if (!port->cap_usb4)
 204		return false;
 205
 206	if (tb_port_read(port, &val, TB_CFG_PORT,
 207			 port->cap_usb4 + PORT_CS_18, 1))
 208		return false;
 209
 210	return !(val & PORT_CS_18_TCM);
 211}
 212
 213/**
 214 * usb4_switch_setup() - Additional setup for USB4 device
 215 * @sw: USB4 router to setup
 216 *
 217 * USB4 routers need additional settings in order to enable all the
 218 * tunneling. This function enables USB and PCIe tunneling if it can be
 219 * enabled (e.g the parent switch also supports them). If USB tunneling
 220 * is not available for some reason (like that there is Thunderbolt 3
 221 * switch upstream) then the internal xHCI controller is enabled
 222 * instead.
 223 */
 224int usb4_switch_setup(struct tb_switch *sw)
 225{
 226	struct tb_port *downstream_port;
 227	struct tb_switch *parent;
 228	bool tbt3, xhci;
 229	u32 val = 0;
 230	int ret;
 231
 
 
 232	if (!tb_route(sw))
 233		return 0;
 234
 235	ret = tb_sw_read(sw, &val, TB_CFG_SWITCH, ROUTER_CS_6, 1);
 236	if (ret)
 237		return ret;
 238
 239	parent = tb_switch_parent(sw);
 240	downstream_port = tb_port_at(tb_route(sw), parent);
 241	sw->link_usb4 = link_is_usb4(downstream_port);
 242	tb_sw_dbg(sw, "link: %s\n", sw->link_usb4 ? "USB4" : "TBT3");
 243
 244	xhci = val & ROUTER_CS_6_HCI;
 245	tbt3 = !(val & ROUTER_CS_6_TNS);
 246
 247	tb_sw_dbg(sw, "TBT3 support: %s, xHCI: %s\n",
 248		  tbt3 ? "yes" : "no", xhci ? "yes" : "no");
 249
 250	ret = tb_sw_read(sw, &val, TB_CFG_SWITCH, ROUTER_CS_5, 1);
 251	if (ret)
 252		return ret;
 253
 254	if (sw->link_usb4 && tb_switch_find_port(parent, TB_TYPE_USB3_DOWN)) {
 
 255		val |= ROUTER_CS_5_UTO;
 256		xhci = false;
 257	}
 258
 259	/* Only enable PCIe tunneling if the parent router supports it */
 260	if (tb_switch_find_port(parent, TB_TYPE_PCIE_DOWN)) {
 
 
 
 
 261		val |= ROUTER_CS_5_PTO;
 262		/*
 263		 * xHCI can be enabled if PCIe tunneling is supported
 264		 * and the parent does not have any USB3 dowstream
 265		 * adapters (so we cannot do USB 3.x tunneling).
 266		 */
 267		if (xhci)
 268			val |= ROUTER_CS_5_HCO;
 269	}
 270
 271	/* TBT3 supported by the CM */
 272	val |= ROUTER_CS_5_C3S;
 273	/* Tunneling configuration is ready now */
 274	val |= ROUTER_CS_5_CV;
 275
 276	ret = tb_sw_write(sw, &val, TB_CFG_SWITCH, ROUTER_CS_5, 1);
 277	if (ret)
 278		return ret;
 279
 280	return usb4_switch_wait_for_bit(sw, ROUTER_CS_6, ROUTER_CS_6_CR,
 281					ROUTER_CS_6_CR, 50);
 282}
 283
 284/**
 285 * usb4_switch_read_uid() - Read UID from USB4 router
 286 * @sw: USB4 router
 287 * @uid: UID is stored here
 288 *
 289 * Reads 64-bit UID from USB4 router config space.
 290 */
 291int usb4_switch_read_uid(struct tb_switch *sw, u64 *uid)
 292{
 293	return tb_sw_read(sw, uid, TB_CFG_SWITCH, ROUTER_CS_7, 2);
 294}
 295
 296static int usb4_switch_drom_read_block(void *data,
 297				       unsigned int dwaddress, void *buf,
 298				       size_t dwords)
 299{
 300	struct tb_switch *sw = data;
 301	u8 status = 0;
 302	u32 metadata;
 303	int ret;
 304
 305	metadata = (dwords << USB4_DROM_SIZE_SHIFT) & USB4_DROM_SIZE_MASK;
 306	metadata |= (dwaddress << USB4_DROM_ADDRESS_SHIFT) &
 307		USB4_DROM_ADDRESS_MASK;
 308
 309	ret = usb4_switch_op_write_metadata(sw, metadata);
 310	if (ret)
 311		return ret;
 312
 313	ret = usb4_switch_op(sw, USB4_SWITCH_OP_DROM_READ, &status);
 314	if (ret)
 315		return ret;
 316
 317	if (status)
 318		return -EIO;
 319
 320	return usb4_switch_op_read_data(sw, buf, dwords);
 321}
 322
 323/**
 324 * usb4_switch_drom_read() - Read arbitrary bytes from USB4 router DROM
 325 * @sw: USB4 router
 326 * @address: Byte address inside DROM to start reading
 327 * @buf: Buffer where the DROM content is stored
 328 * @size: Number of bytes to read from DROM
 329 *
 330 * Uses USB4 router operations to read router DROM. For devices this
 331 * should always work but for hosts it may return %-EOPNOTSUPP in which
 332 * case the host router does not have DROM.
 333 */
 334int usb4_switch_drom_read(struct tb_switch *sw, unsigned int address, void *buf,
 335			  size_t size)
 336{
 337	return usb4_do_read_data(address, buf, size,
 338				 usb4_switch_drom_read_block, sw);
 339}
 340
 341static int usb4_set_port_configured(struct tb_port *port, bool configured)
 
 
 
 
 
 
 
 342{
 
 343	int ret;
 344	u32 val;
 345
 346	ret = tb_port_read(port, &val, TB_CFG_PORT,
 347			   port->cap_usb4 + PORT_CS_19, 1);
 348	if (ret)
 349		return ret;
 350
 351	if (configured)
 352		val |= PORT_CS_19_PC;
 353	else
 354		val &= ~PORT_CS_19_PC;
 355
 356	return tb_port_write(port, &val, TB_CFG_PORT,
 357			     port->cap_usb4 + PORT_CS_19, 1);
 358}
 359
 360/**
 361 * usb4_switch_configure_link() - Set upstream USB4 link configured
 362 * @sw: USB4 router
 
 363 *
 364 * Sets the upstream USB4 link to be configured for power management
 365 * purposes.
 366 */
 367int usb4_switch_configure_link(struct tb_switch *sw)
 368{
 369	struct tb_port *up;
 
 
 
 
 370
 371	if (!tb_route(sw))
 372		return 0;
 
 
 
 
 
 
 
 
 
 
 373
 374	up = tb_upstream_port(sw);
 375	return usb4_set_port_configured(up, true);
 376}
 
 377
 378/**
 379 * usb4_switch_unconfigure_link() - Un-set upstream USB4 link configuration
 380 * @sw: USB4 router
 381 *
 382 * Reverse of usb4_switch_configure_link().
 383 */
 384void usb4_switch_unconfigure_link(struct tb_switch *sw)
 385{
 386	struct tb_port *up;
 387
 388	if (sw->is_unplugged || !tb_route(sw))
 389		return;
 
 
 
 
 
 
 
 
 
 
 
 
 
 390
 391	up = tb_upstream_port(sw);
 392	usb4_set_port_configured(up, false);
 393}
 
 
 394
 395/**
 396 * usb4_switch_lane_bonding_possible() - Are conditions met for lane bonding
 397 * @sw: USB4 router
 398 *
 399 * Checks whether conditions are met so that lane bonding can be
 400 * established with the upstream router. Call only for device routers.
 401 */
 402bool usb4_switch_lane_bonding_possible(struct tb_switch *sw)
 403{
 404	struct tb_port *up;
 405	int ret;
 406	u32 val;
 407
 408	up = tb_upstream_port(sw);
 409	ret = tb_port_read(up, &val, TB_CFG_PORT, up->cap_usb4 + PORT_CS_18, 1);
 410	if (ret)
 411		return false;
 
 
 
 412
 413	return !!(val & PORT_CS_18_BE);
 
 
 
 
 
 414}
 415
 416/**
 417 * usb4_switch_set_sleep() - Prepare the router to enter sleep
 418 * @sw: USB4 router
 419 *
 420 * Enables wakes and sets sleep bit for the router. Returns when the
 421 * router sleep ready bit has been asserted.
 422 */
 423int usb4_switch_set_sleep(struct tb_switch *sw)
 424{
 425	int ret;
 426	u32 val;
 427
 428	/* Set sleep bit and wait for sleep ready to be asserted */
 429	ret = tb_sw_read(sw, &val, TB_CFG_SWITCH, ROUTER_CS_5, 1);
 430	if (ret)
 431		return ret;
 432
 433	val |= ROUTER_CS_5_SLP;
 434
 435	ret = tb_sw_write(sw, &val, TB_CFG_SWITCH, ROUTER_CS_5, 1);
 436	if (ret)
 437		return ret;
 438
 439	return usb4_switch_wait_for_bit(sw, ROUTER_CS_6, ROUTER_CS_6_SLPR,
 440					ROUTER_CS_6_SLPR, 500);
 441}
 442
 443/**
 444 * usb4_switch_nvm_sector_size() - Return router NVM sector size
 445 * @sw: USB4 router
 446 *
 447 * If the router supports NVM operations this function returns the NVM
 448 * sector size in bytes. If NVM operations are not supported returns
 449 * %-EOPNOTSUPP.
 450 */
 451int usb4_switch_nvm_sector_size(struct tb_switch *sw)
 452{
 453	u32 metadata;
 454	u8 status;
 455	int ret;
 456
 457	ret = usb4_switch_op(sw, USB4_SWITCH_OP_NVM_SECTOR_SIZE, &status);
 
 458	if (ret)
 459		return ret;
 460
 461	if (status)
 462		return status == 0x2 ? -EOPNOTSUPP : -EIO;
 463
 464	ret = usb4_switch_op_read_metadata(sw, &metadata);
 465	if (ret)
 466		return ret;
 467
 468	return metadata & USB4_NVM_SECTOR_SIZE_MASK;
 469}
 470
 471static int usb4_switch_nvm_read_block(void *data,
 472	unsigned int dwaddress, void *buf, size_t dwords)
 473{
 474	struct tb_switch *sw = data;
 475	u8 status = 0;
 476	u32 metadata;
 477	int ret;
 478
 479	metadata = (dwords << USB4_NVM_READ_LENGTH_SHIFT) &
 480		   USB4_NVM_READ_LENGTH_MASK;
 481	metadata |= (dwaddress << USB4_NVM_READ_OFFSET_SHIFT) &
 482		   USB4_NVM_READ_OFFSET_MASK;
 483
 484	ret = usb4_switch_op_write_metadata(sw, metadata);
 485	if (ret)
 486		return ret;
 487
 488	ret = usb4_switch_op(sw, USB4_SWITCH_OP_NVM_READ, &status);
 489	if (ret)
 490		return ret;
 491
 492	if (status)
 493		return -EIO;
 494
 495	return usb4_switch_op_read_data(sw, buf, dwords);
 496}
 497
 498/**
 499 * usb4_switch_nvm_read() - Read arbitrary bytes from router NVM
 500 * @sw: USB4 router
 501 * @address: Starting address in bytes
 502 * @buf: Read data is placed here
 503 * @size: How many bytes to read
 504 *
 505 * Reads NVM contents of the router. If NVM is not supported returns
 506 * %-EOPNOTSUPP.
 507 */
 508int usb4_switch_nvm_read(struct tb_switch *sw, unsigned int address, void *buf,
 509			 size_t size)
 510{
 511	return usb4_do_read_data(address, buf, size,
 512				 usb4_switch_nvm_read_block, sw);
 513}
 514
 515static int usb4_switch_nvm_set_offset(struct tb_switch *sw,
 516				      unsigned int address)
 
 
 
 
 
 
 
 
 
 517{
 518	u32 metadata, dwaddress;
 519	u8 status = 0;
 520	int ret;
 521
 522	dwaddress = address / 4;
 523	metadata = (dwaddress << USB4_NVM_SET_OFFSET_SHIFT) &
 524		   USB4_NVM_SET_OFFSET_MASK;
 525
 526	ret = usb4_switch_op_write_metadata(sw, metadata);
 527	if (ret)
 528		return ret;
 529
 530	ret = usb4_switch_op(sw, USB4_SWITCH_OP_NVM_SET_OFFSET, &status);
 531	if (ret)
 532		return ret;
 533
 534	return status ? -EIO : 0;
 535}
 536
 537static int usb4_switch_nvm_write_next_block(void *data, const void *buf,
 538					    size_t dwords)
 539{
 540	struct tb_switch *sw = data;
 541	u8 status;
 542	int ret;
 543
 544	ret = usb4_switch_op_write_data(sw, buf, dwords);
 545	if (ret)
 546		return ret;
 547
 548	ret = usb4_switch_op(sw, USB4_SWITCH_OP_NVM_WRITE, &status);
 549	if (ret)
 550		return ret;
 551
 552	return status ? -EIO : 0;
 553}
 554
 555/**
 556 * usb4_switch_nvm_write() - Write to the router NVM
 557 * @sw: USB4 router
 558 * @address: Start address where to write in bytes
 559 * @buf: Pointer to the data to write
 560 * @size: Size of @buf in bytes
 561 *
 562 * Writes @buf to the router NVM using USB4 router operations. If NVM
 563 * write is not supported returns %-EOPNOTSUPP.
 564 */
 565int usb4_switch_nvm_write(struct tb_switch *sw, unsigned int address,
 566			  const void *buf, size_t size)
 567{
 568	int ret;
 569
 570	ret = usb4_switch_nvm_set_offset(sw, address);
 571	if (ret)
 572		return ret;
 573
 574	return usb4_do_write_data(address, buf, size,
 575				  usb4_switch_nvm_write_next_block, sw);
 576}
 577
 578/**
 579 * usb4_switch_nvm_authenticate() - Authenticate new NVM
 580 * @sw: USB4 router
 581 *
 582 * After the new NVM has been written via usb4_switch_nvm_write(), this
 583 * function triggers NVM authentication process. If the authentication
 584 * is successful the router is power cycled and the new NVM starts
 585 * running. In case of failure returns negative errno.
 
 
 
 
 586 */
 587int usb4_switch_nvm_authenticate(struct tb_switch *sw)
 588{
 589	u8 status = 0;
 590	int ret;
 591
 592	ret = usb4_switch_op(sw, USB4_SWITCH_OP_NVM_AUTH, &status);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 593	if (ret)
 594		return ret;
 595
 596	switch (status) {
 597	case 0x0:
 598		tb_sw_dbg(sw, "NVM authentication successful\n");
 599		return 0;
 600	case 0x1:
 601		return -EINVAL;
 602	case 0x2:
 603		return -EAGAIN;
 604	case 0x3:
 605		return -EOPNOTSUPP;
 606	default:
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 607		return -EIO;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 608	}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 609}
 610
 611/**
 612 * usb4_switch_query_dp_resource() - Query availability of DP IN resource
 613 * @sw: USB4 router
 614 * @in: DP IN adapter
 615 *
 616 * For DP tunneling this function can be used to query availability of
 617 * DP IN resource. Returns true if the resource is available for DP
 618 * tunneling, false otherwise.
 619 */
 620bool usb4_switch_query_dp_resource(struct tb_switch *sw, struct tb_port *in)
 621{
 
 622	u8 status;
 623	int ret;
 624
 625	ret = usb4_switch_op_write_metadata(sw, in->port);
 626	if (ret)
 627		return false;
 628
 629	ret = usb4_switch_op(sw, USB4_SWITCH_OP_QUERY_DP_RESOURCE, &status);
 630	/*
 631	 * If DP resource allocation is not supported assume it is
 632	 * always available.
 633	 */
 634	if (ret == -EOPNOTSUPP)
 635		return true;
 636	else if (ret)
 637		return false;
 638
 639	return !status;
 640}
 641
 642/**
 643 * usb4_switch_alloc_dp_resource() - Allocate DP IN resource
 644 * @sw: USB4 router
 645 * @in: DP IN adapter
 646 *
 647 * Allocates DP IN resource for DP tunneling using USB4 router
 648 * operations. If the resource was allocated returns %0. Otherwise
 649 * returns negative errno, in particular %-EBUSY if the resource is
 650 * already allocated.
 651 */
 652int usb4_switch_alloc_dp_resource(struct tb_switch *sw, struct tb_port *in)
 653{
 
 654	u8 status;
 655	int ret;
 656
 657	ret = usb4_switch_op_write_metadata(sw, in->port);
 658	if (ret)
 659		return ret;
 660
 661	ret = usb4_switch_op(sw, USB4_SWITCH_OP_ALLOC_DP_RESOURCE, &status);
 662	if (ret == -EOPNOTSUPP)
 663		return 0;
 664	else if (ret)
 665		return ret;
 666
 667	return status ? -EBUSY : 0;
 668}
 669
 670/**
 671 * usb4_switch_dealloc_dp_resource() - Releases allocated DP IN resource
 672 * @sw: USB4 router
 673 * @in: DP IN adapter
 674 *
 675 * Releases the previously allocated DP IN resource.
 676 */
 677int usb4_switch_dealloc_dp_resource(struct tb_switch *sw, struct tb_port *in)
 678{
 
 679	u8 status;
 680	int ret;
 681
 682	ret = usb4_switch_op_write_metadata(sw, in->port);
 683	if (ret)
 684		return ret;
 685
 686	ret = usb4_switch_op(sw, USB4_SWITCH_OP_DEALLOC_DP_RESOURCE, &status);
 687	if (ret == -EOPNOTSUPP)
 688		return 0;
 689	else if (ret)
 690		return ret;
 691
 692	return status ? -EIO : 0;
 693}
 694
 695static int usb4_port_idx(const struct tb_switch *sw, const struct tb_port *port)
 696{
 697	struct tb_port *p;
 698	int usb4_idx = 0;
 699
 700	/* Assume port is primary */
 701	tb_switch_for_each_port(sw, p) {
 702		if (!tb_port_is_null(p))
 703			continue;
 704		if (tb_is_upstream_port(p))
 705			continue;
 706		if (!p->link_nr) {
 707			if (p == port)
 708				break;
 709			usb4_idx++;
 710		}
 711	}
 712
 713	return usb4_idx;
 714}
 715
 716/**
 717 * usb4_switch_map_pcie_down() - Map USB4 port to a PCIe downstream adapter
 718 * @sw: USB4 router
 719 * @port: USB4 port
 720 *
 721 * USB4 routers have direct mapping between USB4 ports and PCIe
 722 * downstream adapters where the PCIe topology is extended. This
 723 * function returns the corresponding downstream PCIe adapter or %NULL
 724 * if no such mapping was possible.
 725 */
 726struct tb_port *usb4_switch_map_pcie_down(struct tb_switch *sw,
 727					  const struct tb_port *port)
 728{
 729	int usb4_idx = usb4_port_idx(sw, port);
 730	struct tb_port *p;
 731	int pcie_idx = 0;
 732
 733	/* Find PCIe down port matching usb4_port */
 734	tb_switch_for_each_port(sw, p) {
 735		if (!tb_port_is_pcie_down(p))
 736			continue;
 737
 738		if (pcie_idx == usb4_idx)
 739			return p;
 740
 741		pcie_idx++;
 742	}
 743
 744	return NULL;
 745}
 746
 747/**
 748 * usb4_switch_map_usb3_down() - Map USB4 port to a USB3 downstream adapter
 749 * @sw: USB4 router
 750 * @port: USB4 port
 751 *
 752 * USB4 routers have direct mapping between USB4 ports and USB 3.x
 753 * downstream adapters where the USB 3.x topology is extended. This
 754 * function returns the corresponding downstream USB 3.x adapter or
 755 * %NULL if no such mapping was possible.
 756 */
 757struct tb_port *usb4_switch_map_usb3_down(struct tb_switch *sw,
 758					  const struct tb_port *port)
 759{
 760	int usb4_idx = usb4_port_idx(sw, port);
 761	struct tb_port *p;
 762	int usb_idx = 0;
 763
 764	/* Find USB3 down port matching usb4_port */
 765	tb_switch_for_each_port(sw, p) {
 766		if (!tb_port_is_usb3_down(p))
 767			continue;
 768
 769		if (usb_idx == usb4_idx)
 770			return p;
 771
 772		usb_idx++;
 773	}
 774
 775	return NULL;
 776}
 777
 778/**
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 779 * usb4_port_unlock() - Unlock USB4 downstream port
 780 * @port: USB4 port to unlock
 781 *
 782 * Unlocks USB4 downstream port so that the connection manager can
 783 * access the router below this port.
 784 */
 785int usb4_port_unlock(struct tb_port *port)
 786{
 787	int ret;
 788	u32 val;
 789
 790	ret = tb_port_read(port, &val, TB_CFG_PORT, ADP_CS_4, 1);
 791	if (ret)
 792		return ret;
 793
 794	val &= ~ADP_CS_4_LCK;
 795	return tb_port_write(port, &val, TB_CFG_PORT, ADP_CS_4, 1);
 796}
 797
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 798static int usb4_port_wait_for_bit(struct tb_port *port, u32 offset, u32 bit,
 799				  u32 value, int timeout_msec)
 800{
 801	ktime_t timeout = ktime_add_ms(ktime_get(), timeout_msec);
 802
 803	do {
 804		u32 val;
 805		int ret;
 806
 807		ret = tb_port_read(port, &val, TB_CFG_PORT, offset, 1);
 808		if (ret)
 809			return ret;
 810
 811		if ((val & bit) == value)
 812			return 0;
 813
 814		usleep_range(50, 100);
 815	} while (ktime_before(ktime_get(), timeout));
 816
 817	return -ETIMEDOUT;
 818}
 819
 820static int usb4_port_read_data(struct tb_port *port, void *data, size_t dwords)
 821{
 822	if (dwords > USB4_DATA_DWORDS)
 823		return -EINVAL;
 824
 825	return tb_port_read(port, data, TB_CFG_PORT, port->cap_usb4 + PORT_CS_2,
 826			    dwords);
 827}
 828
 829static int usb4_port_write_data(struct tb_port *port, const void *data,
 830				size_t dwords)
 831{
 832	if (dwords > USB4_DATA_DWORDS)
 833		return -EINVAL;
 834
 835	return tb_port_write(port, data, TB_CFG_PORT, port->cap_usb4 + PORT_CS_2,
 836			     dwords);
 837}
 838
 839static int usb4_port_sb_read(struct tb_port *port, enum usb4_sb_target target,
 840			     u8 index, u8 reg, void *buf, u8 size)
 841{
 842	size_t dwords = DIV_ROUND_UP(size, 4);
 843	int ret;
 844	u32 val;
 845
 846	if (!port->cap_usb4)
 847		return -EINVAL;
 848
 849	val = reg;
 850	val |= size << PORT_CS_1_LENGTH_SHIFT;
 851	val |= (target << PORT_CS_1_TARGET_SHIFT) & PORT_CS_1_TARGET_MASK;
 852	if (target == USB4_SB_TARGET_RETIMER)
 853		val |= (index << PORT_CS_1_RETIMER_INDEX_SHIFT);
 854	val |= PORT_CS_1_PND;
 855
 856	ret = tb_port_write(port, &val, TB_CFG_PORT,
 857			    port->cap_usb4 + PORT_CS_1, 1);
 858	if (ret)
 859		return ret;
 860
 861	ret = usb4_port_wait_for_bit(port, port->cap_usb4 + PORT_CS_1,
 862				     PORT_CS_1_PND, 0, 500);
 863	if (ret)
 864		return ret;
 865
 866	ret = tb_port_read(port, &val, TB_CFG_PORT,
 867			    port->cap_usb4 + PORT_CS_1, 1);
 868	if (ret)
 869		return ret;
 870
 871	if (val & PORT_CS_1_NR)
 872		return -ENODEV;
 873	if (val & PORT_CS_1_RC)
 874		return -EIO;
 875
 876	return buf ? usb4_port_read_data(port, buf, dwords) : 0;
 877}
 878
 879static int usb4_port_sb_write(struct tb_port *port, enum usb4_sb_target target,
 880			      u8 index, u8 reg, const void *buf, u8 size)
 881{
 882	size_t dwords = DIV_ROUND_UP(size, 4);
 883	int ret;
 884	u32 val;
 885
 886	if (!port->cap_usb4)
 887		return -EINVAL;
 888
 889	if (buf) {
 890		ret = usb4_port_write_data(port, buf, dwords);
 891		if (ret)
 892			return ret;
 893	}
 894
 895	val = reg;
 896	val |= size << PORT_CS_1_LENGTH_SHIFT;
 897	val |= PORT_CS_1_WNR_WRITE;
 898	val |= (target << PORT_CS_1_TARGET_SHIFT) & PORT_CS_1_TARGET_MASK;
 899	if (target == USB4_SB_TARGET_RETIMER)
 900		val |= (index << PORT_CS_1_RETIMER_INDEX_SHIFT);
 901	val |= PORT_CS_1_PND;
 902
 903	ret = tb_port_write(port, &val, TB_CFG_PORT,
 904			    port->cap_usb4 + PORT_CS_1, 1);
 905	if (ret)
 906		return ret;
 907
 908	ret = usb4_port_wait_for_bit(port, port->cap_usb4 + PORT_CS_1,
 909				     PORT_CS_1_PND, 0, 500);
 910	if (ret)
 911		return ret;
 912
 913	ret = tb_port_read(port, &val, TB_CFG_PORT,
 914			    port->cap_usb4 + PORT_CS_1, 1);
 915	if (ret)
 916		return ret;
 917
 918	if (val & PORT_CS_1_NR)
 919		return -ENODEV;
 920	if (val & PORT_CS_1_RC)
 921		return -EIO;
 922
 923	return 0;
 924}
 925
 926static int usb4_port_sb_op(struct tb_port *port, enum usb4_sb_target target,
 927			   u8 index, enum usb4_sb_opcode opcode, int timeout_msec)
 928{
 929	ktime_t timeout;
 930	u32 val;
 931	int ret;
 932
 933	val = opcode;
 934	ret = usb4_port_sb_write(port, target, index, USB4_SB_OPCODE, &val,
 935				 sizeof(val));
 936	if (ret)
 937		return ret;
 938
 939	timeout = ktime_add_ms(ktime_get(), timeout_msec);
 940
 941	do {
 942		/* Check results */
 943		ret = usb4_port_sb_read(port, target, index, USB4_SB_OPCODE,
 944					&val, sizeof(val));
 945		if (ret)
 946			return ret;
 947
 948		switch (val) {
 949		case 0:
 950			return 0;
 951
 952		case USB4_SB_OPCODE_ERR:
 953			return -EAGAIN;
 954
 955		case USB4_SB_OPCODE_ONS:
 956			return -EOPNOTSUPP;
 957
 958		default:
 959			if (val != opcode)
 960				return -EIO;
 961			break;
 962		}
 963	} while (ktime_before(ktime_get(), timeout));
 964
 965	return -ETIMEDOUT;
 966}
 967
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 968/**
 969 * usb4_port_enumerate_retimers() - Send RT broadcast transaction
 970 * @port: USB4 port
 971 *
 972 * This forces the USB4 port to send broadcast RT transaction which
 973 * makes the retimers on the link to assign index to themselves. Returns
 974 * %0 in case of success and negative errno if there was an error.
 975 */
 976int usb4_port_enumerate_retimers(struct tb_port *port)
 977{
 978	u32 val;
 979
 980	val = USB4_SB_OPCODE_ENUMERATE_RETIMERS;
 981	return usb4_port_sb_write(port, USB4_SB_TARGET_ROUTER, 0,
 982				  USB4_SB_OPCODE, &val, sizeof(val));
 983}
 984
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 985static inline int usb4_port_retimer_op(struct tb_port *port, u8 index,
 986				       enum usb4_sb_opcode opcode,
 987				       int timeout_msec)
 988{
 989	return usb4_port_sb_op(port, USB4_SB_TARGET_RETIMER, index, opcode,
 990			       timeout_msec);
 991}
 992
 993/**
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 994 * usb4_port_retimer_read() - Read from retimer sideband registers
 995 * @port: USB4 port
 996 * @index: Retimer index
 997 * @reg: Sideband register to read
 998 * @buf: Data from @reg is stored here
 999 * @size: Number of bytes to read
1000 *
1001 * Function reads retimer sideband registers starting from @reg. The
1002 * retimer is connected to @port at @index. Returns %0 in case of
1003 * success, and read data is copied to @buf. If there is no retimer
1004 * present at given @index returns %-ENODEV. In any other failure
1005 * returns negative errno.
1006 */
1007int usb4_port_retimer_read(struct tb_port *port, u8 index, u8 reg, void *buf,
1008			   u8 size)
1009{
1010	return usb4_port_sb_read(port, USB4_SB_TARGET_RETIMER, index, reg, buf,
1011				 size);
1012}
1013
1014/**
1015 * usb4_port_retimer_write() - Write to retimer sideband registers
1016 * @port: USB4 port
1017 * @index: Retimer index
1018 * @reg: Sideband register to write
1019 * @buf: Data that is written starting from @reg
1020 * @size: Number of bytes to write
1021 *
1022 * Writes retimer sideband registers starting from @reg. The retimer is
1023 * connected to @port at @index. Returns %0 in case of success. If there
1024 * is no retimer present at given @index returns %-ENODEV. In any other
1025 * failure returns negative errno.
1026 */
1027int usb4_port_retimer_write(struct tb_port *port, u8 index, u8 reg,
1028			    const void *buf, u8 size)
1029{
1030	return usb4_port_sb_write(port, USB4_SB_TARGET_RETIMER, index, reg, buf,
1031				  size);
1032}
1033
1034/**
1035 * usb4_port_retimer_is_last() - Is the retimer last on-board retimer
1036 * @port: USB4 port
1037 * @index: Retimer index
1038 *
1039 * If the retimer at @index is last one (connected directly to the
1040 * Type-C port) this function returns %1. If it is not returns %0. If
1041 * the retimer is not present returns %-ENODEV. Otherwise returns
1042 * negative errno.
1043 */
1044int usb4_port_retimer_is_last(struct tb_port *port, u8 index)
1045{
1046	u32 metadata;
1047	int ret;
1048
1049	ret = usb4_port_retimer_op(port, index, USB4_SB_OPCODE_QUERY_LAST_RETIMER,
1050				   500);
1051	if (ret)
1052		return ret;
1053
1054	ret = usb4_port_retimer_read(port, index, USB4_SB_METADATA, &metadata,
1055				     sizeof(metadata));
1056	return ret ? ret : metadata & 1;
1057}
1058
1059/**
1060 * usb4_port_retimer_nvm_sector_size() - Read retimer NVM sector size
1061 * @port: USB4 port
1062 * @index: Retimer index
1063 *
1064 * Reads NVM sector size (in bytes) of a retimer at @index. This
1065 * operation can be used to determine whether the retimer supports NVM
1066 * upgrade for example. Returns sector size in bytes or negative errno
1067 * in case of error. Specifically returns %-ENODEV if there is no
1068 * retimer at @index.
1069 */
1070int usb4_port_retimer_nvm_sector_size(struct tb_port *port, u8 index)
1071{
1072	u32 metadata;
1073	int ret;
1074
1075	ret = usb4_port_retimer_op(port, index, USB4_SB_OPCODE_GET_NVM_SECTOR_SIZE,
1076				   500);
1077	if (ret)
1078		return ret;
1079
1080	ret = usb4_port_retimer_read(port, index, USB4_SB_METADATA, &metadata,
1081				     sizeof(metadata));
1082	return ret ? ret : metadata & USB4_NVM_SECTOR_SIZE_MASK;
1083}
1084
1085static int usb4_port_retimer_nvm_set_offset(struct tb_port *port, u8 index,
1086					    unsigned int address)
 
 
 
 
 
 
 
 
 
 
 
1087{
1088	u32 metadata, dwaddress;
1089	int ret;
1090
1091	dwaddress = address / 4;
1092	metadata = (dwaddress << USB4_NVM_SET_OFFSET_SHIFT) &
1093		  USB4_NVM_SET_OFFSET_MASK;
1094
1095	ret = usb4_port_retimer_write(port, index, USB4_SB_METADATA, &metadata,
1096				      sizeof(metadata));
1097	if (ret)
1098		return ret;
1099
1100	return usb4_port_retimer_op(port, index, USB4_SB_OPCODE_NVM_SET_OFFSET,
1101				    500);
1102}
1103
1104struct retimer_info {
1105	struct tb_port *port;
1106	u8 index;
1107};
1108
1109static int usb4_port_retimer_nvm_write_next_block(void *data, const void *buf,
1110						  size_t dwords)
1111
1112{
1113	const struct retimer_info *info = data;
1114	struct tb_port *port = info->port;
1115	u8 index = info->index;
1116	int ret;
1117
1118	ret = usb4_port_retimer_write(port, index, USB4_SB_DATA,
1119				      buf, dwords * 4);
1120	if (ret)
1121		return ret;
1122
1123	return usb4_port_retimer_op(port, index,
1124			USB4_SB_OPCODE_NVM_BLOCK_WRITE, 1000);
1125}
1126
1127/**
1128 * usb4_port_retimer_nvm_write() - Write to retimer NVM
1129 * @port: USB4 port
1130 * @index: Retimer index
1131 * @address: Byte address where to start the write
1132 * @buf: Data to write
1133 * @size: Size in bytes how much to write
1134 *
1135 * Writes @size bytes from @buf to the retimer NVM. Used for NVM
1136 * upgrade. Returns %0 if the data was written successfully and negative
1137 * errno in case of failure. Specifically returns %-ENODEV if there is
1138 * no retimer at @index.
1139 */
1140int usb4_port_retimer_nvm_write(struct tb_port *port, u8 index, unsigned int address,
1141				const void *buf, size_t size)
1142{
1143	struct retimer_info info = { .port = port, .index = index };
1144	int ret;
1145
1146	ret = usb4_port_retimer_nvm_set_offset(port, index, address);
1147	if (ret)
1148		return ret;
1149
1150	return usb4_do_write_data(address, buf, size,
1151			usb4_port_retimer_nvm_write_next_block, &info);
1152}
1153
1154/**
1155 * usb4_port_retimer_nvm_authenticate() - Start retimer NVM upgrade
1156 * @port: USB4 port
1157 * @index: Retimer index
1158 *
1159 * After the new NVM image has been written via usb4_port_retimer_nvm_write()
1160 * this function can be used to trigger the NVM upgrade process. If
1161 * successful the retimer restarts with the new NVM and may not have the
1162 * index set so one needs to call usb4_port_enumerate_retimers() to
1163 * force index to be assigned.
1164 */
1165int usb4_port_retimer_nvm_authenticate(struct tb_port *port, u8 index)
1166{
1167	u32 val;
1168
1169	/*
1170	 * We need to use the raw operation here because once the
1171	 * authentication completes the retimer index is not set anymore
1172	 * so we do not get back the status now.
1173	 */
1174	val = USB4_SB_OPCODE_NVM_AUTH_WRITE;
1175	return usb4_port_sb_write(port, USB4_SB_TARGET_RETIMER, index,
1176				  USB4_SB_OPCODE, &val, sizeof(val));
1177}
1178
1179/**
1180 * usb4_port_retimer_nvm_authenticate_status() - Read status of NVM upgrade
1181 * @port: USB4 port
1182 * @index: Retimer index
1183 * @status: Raw status code read from metadata
1184 *
1185 * This can be called after usb4_port_retimer_nvm_authenticate() and
1186 * usb4_port_enumerate_retimers() to fetch status of the NVM upgrade.
1187 *
1188 * Returns %0 if the authentication status was successfully read. The
1189 * completion metadata (the result) is then stored into @status. If
1190 * reading the status fails, returns negative errno.
1191 */
1192int usb4_port_retimer_nvm_authenticate_status(struct tb_port *port, u8 index,
1193					      u32 *status)
1194{
1195	u32 metadata, val;
1196	int ret;
1197
1198	ret = usb4_port_retimer_read(port, index, USB4_SB_OPCODE, &val,
1199				     sizeof(val));
1200	if (ret)
1201		return ret;
1202
1203	switch (val) {
1204	case 0:
1205		*status = 0;
1206		return 0;
1207
1208	case USB4_SB_OPCODE_ERR:
1209		ret = usb4_port_retimer_read(port, index, USB4_SB_METADATA,
1210					     &metadata, sizeof(metadata));
1211		if (ret)
1212			return ret;
1213
1214		*status = metadata & USB4_SB_METADATA_NVM_AUTH_WRITE_MASK;
1215		return 0;
1216
1217	case USB4_SB_OPCODE_ONS:
1218		return -EOPNOTSUPP;
1219
1220	default:
1221		return -EIO;
1222	}
1223}
1224
1225static int usb4_port_retimer_nvm_read_block(void *data, unsigned int dwaddress,
1226					    void *buf, size_t dwords)
1227{
1228	const struct retimer_info *info = data;
1229	struct tb_port *port = info->port;
1230	u8 index = info->index;
1231	u32 metadata;
1232	int ret;
1233
1234	metadata = dwaddress << USB4_NVM_READ_OFFSET_SHIFT;
1235	if (dwords < USB4_DATA_DWORDS)
1236		metadata |= dwords << USB4_NVM_READ_LENGTH_SHIFT;
1237
1238	ret = usb4_port_retimer_write(port, index, USB4_SB_METADATA, &metadata,
1239				      sizeof(metadata));
1240	if (ret)
1241		return ret;
1242
1243	ret = usb4_port_retimer_op(port, index, USB4_SB_OPCODE_NVM_READ, 500);
1244	if (ret)
1245		return ret;
1246
1247	return usb4_port_retimer_read(port, index, USB4_SB_DATA, buf,
1248				      dwords * 4);
1249}
1250
1251/**
1252 * usb4_port_retimer_nvm_read() - Read contents of retimer NVM
1253 * @port: USB4 port
1254 * @index: Retimer index
1255 * @address: NVM address (in bytes) to start reading
1256 * @buf: Data read from NVM is stored here
1257 * @size: Number of bytes to read
1258 *
1259 * Reads retimer NVM and copies the contents to @buf. Returns %0 if the
1260 * read was successful and negative errno in case of failure.
1261 * Specifically returns %-ENODEV if there is no retimer at @index.
1262 */
1263int usb4_port_retimer_nvm_read(struct tb_port *port, u8 index,
1264			       unsigned int address, void *buf, size_t size)
1265{
1266	struct retimer_info info = { .port = port, .index = index };
1267
1268	return usb4_do_read_data(address, buf, size,
1269			usb4_port_retimer_nvm_read_block, &info);
1270}
1271
1272/**
1273 * usb4_usb3_port_max_link_rate() - Maximum support USB3 link rate
1274 * @port: USB3 adapter port
1275 *
1276 * Return maximum supported link rate of a USB3 adapter in Mb/s.
1277 * Negative errno in case of error.
1278 */
1279int usb4_usb3_port_max_link_rate(struct tb_port *port)
1280{
1281	int ret, lr;
1282	u32 val;
1283
1284	if (!tb_port_is_usb3_down(port) && !tb_port_is_usb3_up(port))
1285		return -EINVAL;
1286
1287	ret = tb_port_read(port, &val, TB_CFG_PORT,
1288			   port->cap_adap + ADP_USB3_CS_4, 1);
1289	if (ret)
1290		return ret;
1291
1292	lr = (val & ADP_USB3_CS_4_MSLR_MASK) >> ADP_USB3_CS_4_MSLR_SHIFT;
1293	return lr == ADP_USB3_CS_4_MSLR_20G ? 20000 : 10000;
1294}
1295
1296/**
1297 * usb4_usb3_port_actual_link_rate() - Established USB3 link rate
1298 * @port: USB3 adapter port
1299 *
1300 * Return actual established link rate of a USB3 adapter in Mb/s. If the
1301 * link is not up returns %0 and negative errno in case of failure.
1302 */
1303int usb4_usb3_port_actual_link_rate(struct tb_port *port)
1304{
1305	int ret, lr;
1306	u32 val;
1307
1308	if (!tb_port_is_usb3_down(port) && !tb_port_is_usb3_up(port))
1309		return -EINVAL;
1310
1311	ret = tb_port_read(port, &val, TB_CFG_PORT,
1312			   port->cap_adap + ADP_USB3_CS_4, 1);
1313	if (ret)
1314		return ret;
1315
1316	if (!(val & ADP_USB3_CS_4_ULV))
1317		return 0;
1318
1319	lr = val & ADP_USB3_CS_4_ALR_MASK;
1320	return lr == ADP_USB3_CS_4_ALR_20G ? 20000 : 10000;
1321}
1322
1323static int usb4_usb3_port_cm_request(struct tb_port *port, bool request)
1324{
1325	int ret;
1326	u32 val;
1327
1328	if (!tb_port_is_usb3_down(port))
1329		return -EINVAL;
1330	if (tb_route(port->sw))
1331		return -EINVAL;
1332
1333	ret = tb_port_read(port, &val, TB_CFG_PORT,
1334			   port->cap_adap + ADP_USB3_CS_2, 1);
1335	if (ret)
1336		return ret;
1337
1338	if (request)
1339		val |= ADP_USB3_CS_2_CMR;
1340	else
1341		val &= ~ADP_USB3_CS_2_CMR;
1342
1343	ret = tb_port_write(port, &val, TB_CFG_PORT,
1344			    port->cap_adap + ADP_USB3_CS_2, 1);
1345	if (ret)
1346		return ret;
1347
1348	/*
1349	 * We can use val here directly as the CMR bit is in the same place
1350	 * as HCA. Just mask out others.
1351	 */
1352	val &= ADP_USB3_CS_2_CMR;
1353	return usb4_port_wait_for_bit(port, port->cap_adap + ADP_USB3_CS_1,
1354				      ADP_USB3_CS_1_HCA, val, 1500);
1355}
1356
1357static inline int usb4_usb3_port_set_cm_request(struct tb_port *port)
1358{
1359	return usb4_usb3_port_cm_request(port, true);
1360}
1361
1362static inline int usb4_usb3_port_clear_cm_request(struct tb_port *port)
1363{
1364	return usb4_usb3_port_cm_request(port, false);
1365}
1366
1367static unsigned int usb3_bw_to_mbps(u32 bw, u8 scale)
1368{
1369	unsigned long uframes;
1370
1371	uframes = bw * 512UL << scale;
1372	return DIV_ROUND_CLOSEST(uframes * 8000, 1000 * 1000);
1373}
1374
1375static u32 mbps_to_usb3_bw(unsigned int mbps, u8 scale)
1376{
1377	unsigned long uframes;
1378
1379	/* 1 uframe is 1/8 ms (125 us) -> 1 / 8000 s */
1380	uframes = ((unsigned long)mbps * 1000 *  1000) / 8000;
1381	return DIV_ROUND_UP(uframes, 512UL << scale);
1382}
1383
1384static int usb4_usb3_port_read_allocated_bandwidth(struct tb_port *port,
1385						   int *upstream_bw,
1386						   int *downstream_bw)
1387{
1388	u32 val, bw, scale;
1389	int ret;
1390
1391	ret = tb_port_read(port, &val, TB_CFG_PORT,
1392			   port->cap_adap + ADP_USB3_CS_2, 1);
1393	if (ret)
1394		return ret;
1395
1396	ret = tb_port_read(port, &scale, TB_CFG_PORT,
1397			   port->cap_adap + ADP_USB3_CS_3, 1);
1398	if (ret)
1399		return ret;
1400
1401	scale &= ADP_USB3_CS_3_SCALE_MASK;
1402
1403	bw = val & ADP_USB3_CS_2_AUBW_MASK;
1404	*upstream_bw = usb3_bw_to_mbps(bw, scale);
1405
1406	bw = (val & ADP_USB3_CS_2_ADBW_MASK) >> ADP_USB3_CS_2_ADBW_SHIFT;
1407	*downstream_bw = usb3_bw_to_mbps(bw, scale);
1408
1409	return 0;
1410}
1411
1412/**
1413 * usb4_usb3_port_allocated_bandwidth() - Bandwidth allocated for USB3
1414 * @port: USB3 adapter port
1415 * @upstream_bw: Allocated upstream bandwidth is stored here
1416 * @downstream_bw: Allocated downstream bandwidth is stored here
1417 *
1418 * Stores currently allocated USB3 bandwidth into @upstream_bw and
1419 * @downstream_bw in Mb/s. Returns %0 in case of success and negative
1420 * errno in failure.
1421 */
1422int usb4_usb3_port_allocated_bandwidth(struct tb_port *port, int *upstream_bw,
1423				       int *downstream_bw)
1424{
1425	int ret;
1426
1427	ret = usb4_usb3_port_set_cm_request(port);
1428	if (ret)
1429		return ret;
1430
1431	ret = usb4_usb3_port_read_allocated_bandwidth(port, upstream_bw,
1432						      downstream_bw);
1433	usb4_usb3_port_clear_cm_request(port);
1434
1435	return ret;
1436}
1437
1438static int usb4_usb3_port_read_consumed_bandwidth(struct tb_port *port,
1439						  int *upstream_bw,
1440						  int *downstream_bw)
1441{
1442	u32 val, bw, scale;
1443	int ret;
1444
1445	ret = tb_port_read(port, &val, TB_CFG_PORT,
1446			   port->cap_adap + ADP_USB3_CS_1, 1);
1447	if (ret)
1448		return ret;
1449
1450	ret = tb_port_read(port, &scale, TB_CFG_PORT,
1451			   port->cap_adap + ADP_USB3_CS_3, 1);
1452	if (ret)
1453		return ret;
1454
1455	scale &= ADP_USB3_CS_3_SCALE_MASK;
1456
1457	bw = val & ADP_USB3_CS_1_CUBW_MASK;
1458	*upstream_bw = usb3_bw_to_mbps(bw, scale);
1459
1460	bw = (val & ADP_USB3_CS_1_CDBW_MASK) >> ADP_USB3_CS_1_CDBW_SHIFT;
1461	*downstream_bw = usb3_bw_to_mbps(bw, scale);
1462
1463	return 0;
1464}
1465
1466static int usb4_usb3_port_write_allocated_bandwidth(struct tb_port *port,
1467						    int upstream_bw,
1468						    int downstream_bw)
1469{
1470	u32 val, ubw, dbw, scale;
1471	int ret;
1472
1473	/* Read the used scale, hardware default is 0 */
1474	ret = tb_port_read(port, &scale, TB_CFG_PORT,
1475			   port->cap_adap + ADP_USB3_CS_3, 1);
1476	if (ret)
1477		return ret;
1478
1479	scale &= ADP_USB3_CS_3_SCALE_MASK;
1480	ubw = mbps_to_usb3_bw(upstream_bw, scale);
1481	dbw = mbps_to_usb3_bw(downstream_bw, scale);
1482
1483	ret = tb_port_read(port, &val, TB_CFG_PORT,
1484			   port->cap_adap + ADP_USB3_CS_2, 1);
1485	if (ret)
1486		return ret;
1487
1488	val &= ~(ADP_USB3_CS_2_AUBW_MASK | ADP_USB3_CS_2_ADBW_MASK);
1489	val |= dbw << ADP_USB3_CS_2_ADBW_SHIFT;
1490	val |= ubw;
1491
1492	return tb_port_write(port, &val, TB_CFG_PORT,
1493			     port->cap_adap + ADP_USB3_CS_2, 1);
1494}
1495
1496/**
1497 * usb4_usb3_port_allocate_bandwidth() - Allocate bandwidth for USB3
1498 * @port: USB3 adapter port
1499 * @upstream_bw: New upstream bandwidth
1500 * @downstream_bw: New downstream bandwidth
1501 *
1502 * This can be used to set how much bandwidth is allocated for the USB3
1503 * tunneled isochronous traffic. @upstream_bw and @downstream_bw are the
1504 * new values programmed to the USB3 adapter allocation registers. If
1505 * the values are lower than what is currently consumed the allocation
1506 * is set to what is currently consumed instead (consumed bandwidth
1507 * cannot be taken away by CM). The actual new values are returned in
1508 * @upstream_bw and @downstream_bw.
1509 *
1510 * Returns %0 in case of success and negative errno if there was a
1511 * failure.
1512 */
1513int usb4_usb3_port_allocate_bandwidth(struct tb_port *port, int *upstream_bw,
1514				      int *downstream_bw)
1515{
1516	int ret, consumed_up, consumed_down, allocate_up, allocate_down;
1517
1518	ret = usb4_usb3_port_set_cm_request(port);
1519	if (ret)
1520		return ret;
1521
1522	ret = usb4_usb3_port_read_consumed_bandwidth(port, &consumed_up,
1523						     &consumed_down);
1524	if (ret)
1525		goto err_request;
1526
1527	/* Don't allow it go lower than what is consumed */
1528	allocate_up = max(*upstream_bw, consumed_up);
1529	allocate_down = max(*downstream_bw, consumed_down);
1530
1531	ret = usb4_usb3_port_write_allocated_bandwidth(port, allocate_up,
1532						       allocate_down);
1533	if (ret)
1534		goto err_request;
1535
1536	*upstream_bw = allocate_up;
1537	*downstream_bw = allocate_down;
1538
1539err_request:
1540	usb4_usb3_port_clear_cm_request(port);
1541	return ret;
1542}
1543
1544/**
1545 * usb4_usb3_port_release_bandwidth() - Release allocated USB3 bandwidth
1546 * @port: USB3 adapter port
1547 * @upstream_bw: New allocated upstream bandwidth
1548 * @downstream_bw: New allocated downstream bandwidth
1549 *
1550 * Releases USB3 allocated bandwidth down to what is actually consumed.
1551 * The new bandwidth is returned in @upstream_bw and @downstream_bw.
1552 *
1553 * Returns 0% in success and negative errno in case of failure.
1554 */
1555int usb4_usb3_port_release_bandwidth(struct tb_port *port, int *upstream_bw,
1556				     int *downstream_bw)
1557{
1558	int ret, consumed_up, consumed_down;
1559
1560	ret = usb4_usb3_port_set_cm_request(port);
1561	if (ret)
1562		return ret;
1563
1564	ret = usb4_usb3_port_read_consumed_bandwidth(port, &consumed_up,
1565						     &consumed_down);
1566	if (ret)
1567		goto err_request;
1568
1569	/*
1570	 * Always keep 1000 Mb/s to make sure xHCI has at least some
1571	 * bandwidth available for isochronous traffic.
1572	 */
1573	if (consumed_up < 1000)
1574		consumed_up = 1000;
1575	if (consumed_down < 1000)
1576		consumed_down = 1000;
1577
1578	ret = usb4_usb3_port_write_allocated_bandwidth(port, consumed_up,
1579						       consumed_down);
1580	if (ret)
1581		goto err_request;
1582
1583	*upstream_bw = consumed_up;
1584	*downstream_bw = consumed_down;
1585
1586err_request:
1587	usb4_usb3_port_clear_cm_request(port);
1588	return ret;
1589}
v6.2
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 * USB4 specific functionality
   4 *
   5 * Copyright (C) 2019, Intel Corporation
   6 * Authors: Mika Westerberg <mika.westerberg@linux.intel.com>
   7 *	    Rajmohan Mani <rajmohan.mani@intel.com>
   8 */
   9
  10#include <linux/delay.h>
  11#include <linux/ktime.h>
  12
  13#include "sb_regs.h"
  14#include "tb.h"
  15
 
  16#define USB4_DATA_RETRIES		3
  17
 
 
 
 
 
 
 
 
 
 
 
 
  18enum usb4_sb_target {
  19	USB4_SB_TARGET_ROUTER,
  20	USB4_SB_TARGET_PARTNER,
  21	USB4_SB_TARGET_RETIMER,
  22};
  23
  24#define USB4_NVM_READ_OFFSET_MASK	GENMASK(23, 2)
  25#define USB4_NVM_READ_OFFSET_SHIFT	2
  26#define USB4_NVM_READ_LENGTH_MASK	GENMASK(27, 24)
  27#define USB4_NVM_READ_LENGTH_SHIFT	24
  28
  29#define USB4_NVM_SET_OFFSET_MASK	USB4_NVM_READ_OFFSET_MASK
  30#define USB4_NVM_SET_OFFSET_SHIFT	USB4_NVM_READ_OFFSET_SHIFT
  31
  32#define USB4_DROM_ADDRESS_MASK		GENMASK(14, 2)
  33#define USB4_DROM_ADDRESS_SHIFT		2
  34#define USB4_DROM_SIZE_MASK		GENMASK(19, 15)
  35#define USB4_DROM_SIZE_SHIFT		15
  36
  37#define USB4_NVM_SECTOR_SIZE_MASK	GENMASK(23, 0)
  38
  39#define USB4_BA_LENGTH_MASK		GENMASK(7, 0)
  40#define USB4_BA_INDEX_MASK		GENMASK(15, 0)
  41
  42enum usb4_ba_index {
  43	USB4_BA_MAX_USB3 = 0x1,
  44	USB4_BA_MIN_DP_AUX = 0x2,
  45	USB4_BA_MIN_DP_MAIN = 0x3,
  46	USB4_BA_MAX_PCIE = 0x4,
  47	USB4_BA_MAX_HI = 0x5,
  48};
  49
  50#define USB4_BA_VALUE_MASK		GENMASK(31, 16)
  51#define USB4_BA_VALUE_SHIFT		16
 
  52
  53static int usb4_native_switch_op(struct tb_switch *sw, u16 opcode,
  54				 u32 *metadata, u8 *status,
  55				 const void *tx_data, size_t tx_dwords,
  56				 void *rx_data, size_t rx_dwords)
  57{
  58	u32 val;
  59	int ret;
  60
  61	if (metadata) {
  62		ret = tb_sw_write(sw, metadata, TB_CFG_SWITCH, ROUTER_CS_25, 1);
  63		if (ret)
  64			return ret;
  65	}
  66	if (tx_dwords) {
  67		ret = tb_sw_write(sw, tx_data, TB_CFG_SWITCH, ROUTER_CS_9,
  68				  tx_dwords);
  69		if (ret)
  70			return ret;
  71	}
  72
  73	val = opcode | ROUTER_CS_26_OV;
  74	ret = tb_sw_write(sw, &val, TB_CFG_SWITCH, ROUTER_CS_26, 1);
  75	if (ret)
  76		return ret;
 
 
 
 
  77
  78	ret = tb_switch_wait_for_bit(sw, ROUTER_CS_26, ROUTER_CS_26_OV, 0, 500);
  79	if (ret)
  80		return ret;
 
 
  81
  82	ret = tb_sw_read(sw, &val, TB_CFG_SWITCH, ROUTER_CS_26, 1);
  83	if (ret)
  84		return ret;
  85
  86	if (val & ROUTER_CS_26_ONS)
  87		return -EOPNOTSUPP;
 
 
 
  88
  89	if (status)
  90		*status = (val & ROUTER_CS_26_STATUS_MASK) >>
  91			ROUTER_CS_26_STATUS_SHIFT;
  92
  93	if (metadata) {
  94		ret = tb_sw_read(sw, metadata, TB_CFG_SWITCH, ROUTER_CS_25, 1);
  95		if (ret)
  96			return ret;
  97	}
  98	if (rx_dwords) {
  99		ret = tb_sw_read(sw, rx_data, TB_CFG_SWITCH, ROUTER_CS_9,
 100				 rx_dwords);
 101		if (ret)
 102			return ret;
 103	}
 104
 105	return 0;
 
 
 106}
 107
 108static int __usb4_switch_op(struct tb_switch *sw, u16 opcode, u32 *metadata,
 109			    u8 *status, const void *tx_data, size_t tx_dwords,
 110			    void *rx_data, size_t rx_dwords)
 111{
 112	const struct tb_cm_ops *cm_ops = sw->tb->cm_ops;
 
 113
 114	if (tx_dwords > NVM_DATA_DWORDS || rx_dwords > NVM_DATA_DWORDS)
 115		return -EINVAL;
 116
 117	/*
 118	 * If the connection manager implementation provides USB4 router
 119	 * operation proxy callback, call it here instead of running the
 120	 * operation natively.
 121	 */
 122	if (cm_ops->usb4_switch_op) {
 123		int ret;
 124
 125		ret = cm_ops->usb4_switch_op(sw, opcode, metadata, status,
 126					     tx_data, tx_dwords, rx_data,
 127					     rx_dwords);
 128		if (ret != -EOPNOTSUPP)
 
 
 
 129			return ret;
 
 130
 131		/*
 132		 * If the proxy was not supported then run the native
 133		 * router operation instead.
 134		 */
 135	}
 
 136
 137	return usb4_native_switch_op(sw, opcode, metadata, status, tx_data,
 138				     tx_dwords, rx_data, rx_dwords);
 139}
 140
 141static inline int usb4_switch_op(struct tb_switch *sw, u16 opcode,
 142				 u32 *metadata, u8 *status)
 143{
 144	return __usb4_switch_op(sw, opcode, metadata, status, NULL, 0, NULL, 0);
 145}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 146
 147static inline int usb4_switch_op_data(struct tb_switch *sw, u16 opcode,
 148				      u32 *metadata, u8 *status,
 149				      const void *tx_data, size_t tx_dwords,
 150				      void *rx_data, size_t rx_dwords)
 151{
 152	return __usb4_switch_op(sw, opcode, metadata, status, tx_data,
 153				tx_dwords, rx_data, rx_dwords);
 154}
 155
 156static void usb4_switch_check_wakes(struct tb_switch *sw)
 157{
 158	bool wakeup_usb4 = false;
 159	struct usb4_port *usb4;
 160	struct tb_port *port;
 161	bool wakeup = false;
 162	u32 val;
 
 163
 164	if (!device_may_wakeup(&sw->dev))
 165		return;
 
 
 166
 167	if (tb_route(sw)) {
 168		if (tb_sw_read(sw, &val, TB_CFG_SWITCH, ROUTER_CS_6, 1))
 169			return;
 170
 171		tb_sw_dbg(sw, "PCIe wake: %s, USB3 wake: %s\n",
 172			  (val & ROUTER_CS_6_WOPS) ? "yes" : "no",
 173			  (val & ROUTER_CS_6_WOUS) ? "yes" : "no");
 174
 175		wakeup = val & (ROUTER_CS_6_WOPS | ROUTER_CS_6_WOUS);
 176	}
 
 177
 178	/*
 179	 * Check for any downstream ports for USB4 wake,
 180	 * connection wake and disconnection wake.
 181	 */
 182	tb_switch_for_each_port(sw, port) {
 183		if (!port->cap_usb4)
 184			continue;
 185
 186		if (tb_port_read(port, &val, TB_CFG_PORT,
 187				 port->cap_usb4 + PORT_CS_18, 1))
 188			break;
 189
 190		tb_port_dbg(port, "USB4 wake: %s, connection wake: %s, disconnection wake: %s\n",
 191			    (val & PORT_CS_18_WOU4S) ? "yes" : "no",
 192			    (val & PORT_CS_18_WOCS) ? "yes" : "no",
 193			    (val & PORT_CS_18_WODS) ? "yes" : "no");
 194
 195		wakeup_usb4 = val & (PORT_CS_18_WOU4S | PORT_CS_18_WOCS |
 196				     PORT_CS_18_WODS);
 197
 198		usb4 = port->usb4;
 199		if (device_may_wakeup(&usb4->dev) && wakeup_usb4)
 200			pm_wakeup_event(&usb4->dev, 0);
 201
 202		wakeup |= wakeup_usb4;
 203	}
 204
 205	if (wakeup)
 206		pm_wakeup_event(&sw->dev, 0);
 207}
 208
 209static bool link_is_usb4(struct tb_port *port)
 210{
 211	u32 val;
 212
 213	if (!port->cap_usb4)
 214		return false;
 215
 216	if (tb_port_read(port, &val, TB_CFG_PORT,
 217			 port->cap_usb4 + PORT_CS_18, 1))
 218		return false;
 219
 220	return !(val & PORT_CS_18_TCM);
 221}
 222
 223/**
 224 * usb4_switch_setup() - Additional setup for USB4 device
 225 * @sw: USB4 router to setup
 226 *
 227 * USB4 routers need additional settings in order to enable all the
 228 * tunneling. This function enables USB and PCIe tunneling if it can be
 229 * enabled (e.g the parent switch also supports them). If USB tunneling
 230 * is not available for some reason (like that there is Thunderbolt 3
 231 * switch upstream) then the internal xHCI controller is enabled
 232 * instead.
 233 */
 234int usb4_switch_setup(struct tb_switch *sw)
 235{
 236	struct tb_port *downstream_port;
 237	struct tb_switch *parent;
 238	bool tbt3, xhci;
 239	u32 val = 0;
 240	int ret;
 241
 242	usb4_switch_check_wakes(sw);
 243
 244	if (!tb_route(sw))
 245		return 0;
 246
 247	ret = tb_sw_read(sw, &val, TB_CFG_SWITCH, ROUTER_CS_6, 1);
 248	if (ret)
 249		return ret;
 250
 251	parent = tb_switch_parent(sw);
 252	downstream_port = tb_port_at(tb_route(sw), parent);
 253	sw->link_usb4 = link_is_usb4(downstream_port);
 254	tb_sw_dbg(sw, "link: %s\n", sw->link_usb4 ? "USB4" : "TBT");
 255
 256	xhci = val & ROUTER_CS_6_HCI;
 257	tbt3 = !(val & ROUTER_CS_6_TNS);
 258
 259	tb_sw_dbg(sw, "TBT3 support: %s, xHCI: %s\n",
 260		  tbt3 ? "yes" : "no", xhci ? "yes" : "no");
 261
 262	ret = tb_sw_read(sw, &val, TB_CFG_SWITCH, ROUTER_CS_5, 1);
 263	if (ret)
 264		return ret;
 265
 266	if (tb_acpi_may_tunnel_usb3() && sw->link_usb4 &&
 267	    tb_switch_find_port(parent, TB_TYPE_USB3_DOWN)) {
 268		val |= ROUTER_CS_5_UTO;
 269		xhci = false;
 270	}
 271
 272	/*
 273	 * Only enable PCIe tunneling if the parent router supports it
 274	 * and it is not disabled.
 275	 */
 276	if (tb_acpi_may_tunnel_pcie() &&
 277	    tb_switch_find_port(parent, TB_TYPE_PCIE_DOWN)) {
 278		val |= ROUTER_CS_5_PTO;
 279		/*
 280		 * xHCI can be enabled if PCIe tunneling is supported
 281		 * and the parent does not have any USB3 dowstream
 282		 * adapters (so we cannot do USB 3.x tunneling).
 283		 */
 284		if (xhci)
 285			val |= ROUTER_CS_5_HCO;
 286	}
 287
 288	/* TBT3 supported by the CM */
 289	val |= ROUTER_CS_5_C3S;
 290	/* Tunneling configuration is ready now */
 291	val |= ROUTER_CS_5_CV;
 292
 293	ret = tb_sw_write(sw, &val, TB_CFG_SWITCH, ROUTER_CS_5, 1);
 294	if (ret)
 295		return ret;
 296
 297	return tb_switch_wait_for_bit(sw, ROUTER_CS_6, ROUTER_CS_6_CR,
 298				      ROUTER_CS_6_CR, 50);
 299}
 300
 301/**
 302 * usb4_switch_read_uid() - Read UID from USB4 router
 303 * @sw: USB4 router
 304 * @uid: UID is stored here
 305 *
 306 * Reads 64-bit UID from USB4 router config space.
 307 */
 308int usb4_switch_read_uid(struct tb_switch *sw, u64 *uid)
 309{
 310	return tb_sw_read(sw, uid, TB_CFG_SWITCH, ROUTER_CS_7, 2);
 311}
 312
 313static int usb4_switch_drom_read_block(void *data,
 314				       unsigned int dwaddress, void *buf,
 315				       size_t dwords)
 316{
 317	struct tb_switch *sw = data;
 318	u8 status = 0;
 319	u32 metadata;
 320	int ret;
 321
 322	metadata = (dwords << USB4_DROM_SIZE_SHIFT) & USB4_DROM_SIZE_MASK;
 323	metadata |= (dwaddress << USB4_DROM_ADDRESS_SHIFT) &
 324		USB4_DROM_ADDRESS_MASK;
 325
 326	ret = usb4_switch_op_data(sw, USB4_SWITCH_OP_DROM_READ, &metadata,
 327				  &status, NULL, 0, buf, dwords);
 
 
 
 328	if (ret)
 329		return ret;
 330
 331	return status ? -EIO : 0;
 
 
 
 332}
 333
 334/**
 335 * usb4_switch_drom_read() - Read arbitrary bytes from USB4 router DROM
 336 * @sw: USB4 router
 337 * @address: Byte address inside DROM to start reading
 338 * @buf: Buffer where the DROM content is stored
 339 * @size: Number of bytes to read from DROM
 340 *
 341 * Uses USB4 router operations to read router DROM. For devices this
 342 * should always work but for hosts it may return %-EOPNOTSUPP in which
 343 * case the host router does not have DROM.
 344 */
 345int usb4_switch_drom_read(struct tb_switch *sw, unsigned int address, void *buf,
 346			  size_t size)
 347{
 348	return tb_nvm_read_data(address, buf, size, USB4_DATA_RETRIES,
 349				usb4_switch_drom_read_block, sw);
 350}
 351
 352/**
 353 * usb4_switch_lane_bonding_possible() - Are conditions met for lane bonding
 354 * @sw: USB4 router
 355 *
 356 * Checks whether conditions are met so that lane bonding can be
 357 * established with the upstream router. Call only for device routers.
 358 */
 359bool usb4_switch_lane_bonding_possible(struct tb_switch *sw)
 360{
 361	struct tb_port *up;
 362	int ret;
 363	u32 val;
 364
 365	up = tb_upstream_port(sw);
 366	ret = tb_port_read(up, &val, TB_CFG_PORT, up->cap_usb4 + PORT_CS_18, 1);
 367	if (ret)
 368		return false;
 
 
 
 
 
 369
 370	return !!(val & PORT_CS_18_BE);
 
 371}
 372
 373/**
 374 * usb4_switch_set_wake() - Enabled/disable wake
 375 * @sw: USB4 router
 376 * @flags: Wakeup flags (%0 to disable)
 377 *
 378 * Enables/disables router to wake up from sleep.
 
 379 */
 380int usb4_switch_set_wake(struct tb_switch *sw, unsigned int flags)
 381{
 382	struct usb4_port *usb4;
 383	struct tb_port *port;
 384	u64 route = tb_route(sw);
 385	u32 val;
 386	int ret;
 387
 388	/*
 389	 * Enable wakes coming from all USB4 downstream ports (from
 390	 * child routers). For device routers do this also for the
 391	 * upstream USB4 port.
 392	 */
 393	tb_switch_for_each_port(sw, port) {
 394		if (!tb_port_is_null(port))
 395			continue;
 396		if (!route && tb_is_upstream_port(port))
 397			continue;
 398		if (!port->cap_usb4)
 399			continue;
 400
 401		ret = tb_port_read(port, &val, TB_CFG_PORT,
 402				   port->cap_usb4 + PORT_CS_19, 1);
 403		if (ret)
 404			return ret;
 405
 406		val &= ~(PORT_CS_19_WOC | PORT_CS_19_WOD | PORT_CS_19_WOU4);
 
 
 
 
 
 
 
 
 407
 408		if (tb_is_upstream_port(port)) {
 409			val |= PORT_CS_19_WOU4;
 410		} else {
 411			bool configured = val & PORT_CS_19_PC;
 412			usb4 = port->usb4;
 413
 414			if (((flags & TB_WAKE_ON_CONNECT) |
 415			      device_may_wakeup(&usb4->dev)) && !configured)
 416				val |= PORT_CS_19_WOC;
 417			if (((flags & TB_WAKE_ON_DISCONNECT) |
 418			      device_may_wakeup(&usb4->dev)) && configured)
 419				val |= PORT_CS_19_WOD;
 420			if ((flags & TB_WAKE_ON_USB4) && configured)
 421				val |= PORT_CS_19_WOU4;
 422		}
 423
 424		ret = tb_port_write(port, &val, TB_CFG_PORT,
 425				    port->cap_usb4 + PORT_CS_19, 1);
 426		if (ret)
 427			return ret;
 428	}
 429
 430	/*
 431	 * Enable wakes from PCIe, USB 3.x and DP on this router. Only
 432	 * needed for device routers.
 433	 */
 434	if (route) {
 435		ret = tb_sw_read(sw, &val, TB_CFG_SWITCH, ROUTER_CS_5, 1);
 436		if (ret)
 437			return ret;
 
 
 
 
 438
 439		val &= ~(ROUTER_CS_5_WOP | ROUTER_CS_5_WOU | ROUTER_CS_5_WOD);
 440		if (flags & TB_WAKE_ON_USB3)
 441			val |= ROUTER_CS_5_WOU;
 442		if (flags & TB_WAKE_ON_PCIE)
 443			val |= ROUTER_CS_5_WOP;
 444		if (flags & TB_WAKE_ON_DP)
 445			val |= ROUTER_CS_5_WOD;
 446
 447		ret = tb_sw_write(sw, &val, TB_CFG_SWITCH, ROUTER_CS_5, 1);
 448		if (ret)
 449			return ret;
 450	}
 451
 452	return 0;
 453}
 454
 455/**
 456 * usb4_switch_set_sleep() - Prepare the router to enter sleep
 457 * @sw: USB4 router
 458 *
 459 * Sets sleep bit for the router. Returns when the router sleep ready
 460 * bit has been asserted.
 461 */
 462int usb4_switch_set_sleep(struct tb_switch *sw)
 463{
 464	int ret;
 465	u32 val;
 466
 467	/* Set sleep bit and wait for sleep ready to be asserted */
 468	ret = tb_sw_read(sw, &val, TB_CFG_SWITCH, ROUTER_CS_5, 1);
 469	if (ret)
 470		return ret;
 471
 472	val |= ROUTER_CS_5_SLP;
 473
 474	ret = tb_sw_write(sw, &val, TB_CFG_SWITCH, ROUTER_CS_5, 1);
 475	if (ret)
 476		return ret;
 477
 478	return tb_switch_wait_for_bit(sw, ROUTER_CS_6, ROUTER_CS_6_SLPR,
 479				      ROUTER_CS_6_SLPR, 500);
 480}
 481
 482/**
 483 * usb4_switch_nvm_sector_size() - Return router NVM sector size
 484 * @sw: USB4 router
 485 *
 486 * If the router supports NVM operations this function returns the NVM
 487 * sector size in bytes. If NVM operations are not supported returns
 488 * %-EOPNOTSUPP.
 489 */
 490int usb4_switch_nvm_sector_size(struct tb_switch *sw)
 491{
 492	u32 metadata;
 493	u8 status;
 494	int ret;
 495
 496	ret = usb4_switch_op(sw, USB4_SWITCH_OP_NVM_SECTOR_SIZE, &metadata,
 497			     &status);
 498	if (ret)
 499		return ret;
 500
 501	if (status)
 502		return status == 0x2 ? -EOPNOTSUPP : -EIO;
 503
 
 
 
 
 504	return metadata & USB4_NVM_SECTOR_SIZE_MASK;
 505}
 506
 507static int usb4_switch_nvm_read_block(void *data,
 508	unsigned int dwaddress, void *buf, size_t dwords)
 509{
 510	struct tb_switch *sw = data;
 511	u8 status = 0;
 512	u32 metadata;
 513	int ret;
 514
 515	metadata = (dwords << USB4_NVM_READ_LENGTH_SHIFT) &
 516		   USB4_NVM_READ_LENGTH_MASK;
 517	metadata |= (dwaddress << USB4_NVM_READ_OFFSET_SHIFT) &
 518		   USB4_NVM_READ_OFFSET_MASK;
 519
 520	ret = usb4_switch_op_data(sw, USB4_SWITCH_OP_NVM_READ, &metadata,
 521				  &status, NULL, 0, buf, dwords);
 
 
 
 522	if (ret)
 523		return ret;
 524
 525	return status ? -EIO : 0;
 
 
 
 526}
 527
 528/**
 529 * usb4_switch_nvm_read() - Read arbitrary bytes from router NVM
 530 * @sw: USB4 router
 531 * @address: Starting address in bytes
 532 * @buf: Read data is placed here
 533 * @size: How many bytes to read
 534 *
 535 * Reads NVM contents of the router. If NVM is not supported returns
 536 * %-EOPNOTSUPP.
 537 */
 538int usb4_switch_nvm_read(struct tb_switch *sw, unsigned int address, void *buf,
 539			 size_t size)
 540{
 541	return tb_nvm_read_data(address, buf, size, USB4_DATA_RETRIES,
 542				usb4_switch_nvm_read_block, sw);
 543}
 544
 545/**
 546 * usb4_switch_nvm_set_offset() - Set NVM write offset
 547 * @sw: USB4 router
 548 * @address: Start offset
 549 *
 550 * Explicitly sets NVM write offset. Normally when writing to NVM this
 551 * is done automatically by usb4_switch_nvm_write().
 552 *
 553 * Returns %0 in success and negative errno if there was a failure.
 554 */
 555int usb4_switch_nvm_set_offset(struct tb_switch *sw, unsigned int address)
 556{
 557	u32 metadata, dwaddress;
 558	u8 status = 0;
 559	int ret;
 560
 561	dwaddress = address / 4;
 562	metadata = (dwaddress << USB4_NVM_SET_OFFSET_SHIFT) &
 563		   USB4_NVM_SET_OFFSET_MASK;
 564
 565	ret = usb4_switch_op(sw, USB4_SWITCH_OP_NVM_SET_OFFSET, &metadata,
 566			     &status);
 
 
 
 567	if (ret)
 568		return ret;
 569
 570	return status ? -EIO : 0;
 571}
 572
 573static int usb4_switch_nvm_write_next_block(void *data, unsigned int dwaddress,
 574					    const void *buf, size_t dwords)
 575{
 576	struct tb_switch *sw = data;
 577	u8 status;
 578	int ret;
 579
 580	ret = usb4_switch_op_data(sw, USB4_SWITCH_OP_NVM_WRITE, NULL, &status,
 581				  buf, dwords, NULL, 0);
 
 
 
 582	if (ret)
 583		return ret;
 584
 585	return status ? -EIO : 0;
 586}
 587
 588/**
 589 * usb4_switch_nvm_write() - Write to the router NVM
 590 * @sw: USB4 router
 591 * @address: Start address where to write in bytes
 592 * @buf: Pointer to the data to write
 593 * @size: Size of @buf in bytes
 594 *
 595 * Writes @buf to the router NVM using USB4 router operations. If NVM
 596 * write is not supported returns %-EOPNOTSUPP.
 597 */
 598int usb4_switch_nvm_write(struct tb_switch *sw, unsigned int address,
 599			  const void *buf, size_t size)
 600{
 601	int ret;
 602
 603	ret = usb4_switch_nvm_set_offset(sw, address);
 604	if (ret)
 605		return ret;
 606
 607	return tb_nvm_write_data(address, buf, size, USB4_DATA_RETRIES,
 608				 usb4_switch_nvm_write_next_block, sw);
 609}
 610
 611/**
 612 * usb4_switch_nvm_authenticate() - Authenticate new NVM
 613 * @sw: USB4 router
 614 *
 615 * After the new NVM has been written via usb4_switch_nvm_write(), this
 616 * function triggers NVM authentication process. The router gets power
 617 * cycled and if the authentication is successful the new NVM starts
 618 * running. In case of failure returns negative errno.
 619 *
 620 * The caller should call usb4_switch_nvm_authenticate_status() to read
 621 * the status of the authentication after power cycle. It should be the
 622 * first router operation to avoid the status being lost.
 623 */
 624int usb4_switch_nvm_authenticate(struct tb_switch *sw)
 625{
 
 626	int ret;
 627
 628	ret = usb4_switch_op(sw, USB4_SWITCH_OP_NVM_AUTH, NULL, NULL);
 629	switch (ret) {
 630	/*
 631	 * The router is power cycled once NVM_AUTH is started so it is
 632	 * expected to get any of the following errors back.
 633	 */
 634	case -EACCES:
 635	case -ENOTCONN:
 636	case -ETIMEDOUT:
 637		return 0;
 638
 639	default:
 640		return ret;
 641	}
 642}
 643
 644/**
 645 * usb4_switch_nvm_authenticate_status() - Read status of last NVM authenticate
 646 * @sw: USB4 router
 647 * @status: Status code of the operation
 648 *
 649 * The function checks if there is status available from the last NVM
 650 * authenticate router operation. If there is status then %0 is returned
 651 * and the status code is placed in @status. Returns negative errno in case
 652 * of failure.
 653 *
 654 * Must be called before any other router operation.
 655 */
 656int usb4_switch_nvm_authenticate_status(struct tb_switch *sw, u32 *status)
 657{
 658	const struct tb_cm_ops *cm_ops = sw->tb->cm_ops;
 659	u16 opcode;
 660	u32 val;
 661	int ret;
 662
 663	if (cm_ops->usb4_switch_nvm_authenticate_status) {
 664		ret = cm_ops->usb4_switch_nvm_authenticate_status(sw, status);
 665		if (ret != -EOPNOTSUPP)
 666			return ret;
 667	}
 668
 669	ret = tb_sw_read(sw, &val, TB_CFG_SWITCH, ROUTER_CS_26, 1);
 670	if (ret)
 671		return ret;
 672
 673	/* Check that the opcode is correct */
 674	opcode = val & ROUTER_CS_26_OPCODE_MASK;
 675	if (opcode == USB4_SWITCH_OP_NVM_AUTH) {
 676		if (val & ROUTER_CS_26_OV)
 677			return -EBUSY;
 678		if (val & ROUTER_CS_26_ONS)
 679			return -EOPNOTSUPP;
 680
 681		*status = (val & ROUTER_CS_26_STATUS_MASK) >>
 682			ROUTER_CS_26_STATUS_SHIFT;
 683	} else {
 684		*status = 0;
 685	}
 686
 687	return 0;
 688}
 689
 690/**
 691 * usb4_switch_credits_init() - Read buffer allocation parameters
 692 * @sw: USB4 router
 693 *
 694 * Reads @sw buffer allocation parameters and initializes @sw buffer
 695 * allocation fields accordingly. Specifically @sw->credits_allocation
 696 * is set to %true if these parameters can be used in tunneling.
 697 *
 698 * Returns %0 on success and negative errno otherwise.
 699 */
 700int usb4_switch_credits_init(struct tb_switch *sw)
 701{
 702	int max_usb3, min_dp_aux, min_dp_main, max_pcie, max_dma;
 703	int ret, length, i, nports;
 704	const struct tb_port *port;
 705	u32 data[NVM_DATA_DWORDS];
 706	u32 metadata = 0;
 707	u8 status = 0;
 708
 709	memset(data, 0, sizeof(data));
 710	ret = usb4_switch_op_data(sw, USB4_SWITCH_OP_BUFFER_ALLOC, &metadata,
 711				  &status, NULL, 0, data, ARRAY_SIZE(data));
 712	if (ret)
 713		return ret;
 714	if (status)
 715		return -EIO;
 716
 717	length = metadata & USB4_BA_LENGTH_MASK;
 718	if (WARN_ON(length > ARRAY_SIZE(data)))
 719		return -EMSGSIZE;
 720
 721	max_usb3 = -1;
 722	min_dp_aux = -1;
 723	min_dp_main = -1;
 724	max_pcie = -1;
 725	max_dma = -1;
 726
 727	tb_sw_dbg(sw, "credit allocation parameters:\n");
 728
 729	for (i = 0; i < length; i++) {
 730		u16 index, value;
 731
 732		index = data[i] & USB4_BA_INDEX_MASK;
 733		value = (data[i] & USB4_BA_VALUE_MASK) >> USB4_BA_VALUE_SHIFT;
 734
 735		switch (index) {
 736		case USB4_BA_MAX_USB3:
 737			tb_sw_dbg(sw, " USB3: %u\n", value);
 738			max_usb3 = value;
 739			break;
 740		case USB4_BA_MIN_DP_AUX:
 741			tb_sw_dbg(sw, " DP AUX: %u\n", value);
 742			min_dp_aux = value;
 743			break;
 744		case USB4_BA_MIN_DP_MAIN:
 745			tb_sw_dbg(sw, " DP main: %u\n", value);
 746			min_dp_main = value;
 747			break;
 748		case USB4_BA_MAX_PCIE:
 749			tb_sw_dbg(sw, " PCIe: %u\n", value);
 750			max_pcie = value;
 751			break;
 752		case USB4_BA_MAX_HI:
 753			tb_sw_dbg(sw, " DMA: %u\n", value);
 754			max_dma = value;
 755			break;
 756		default:
 757			tb_sw_dbg(sw, " unknown credit allocation index %#x, skipping\n",
 758				  index);
 759			break;
 760		}
 761	}
 762
 763	/*
 764	 * Validate the buffer allocation preferences. If we find
 765	 * issues, log a warning and fall back using the hard-coded
 766	 * values.
 767	 */
 768
 769	/* Host router must report baMaxHI */
 770	if (!tb_route(sw) && max_dma < 0) {
 771		tb_sw_warn(sw, "host router is missing baMaxHI\n");
 772		goto err_invalid;
 773	}
 774
 775	nports = 0;
 776	tb_switch_for_each_port(sw, port) {
 777		if (tb_port_is_null(port))
 778			nports++;
 779	}
 780
 781	/* Must have DP buffer allocation (multiple USB4 ports) */
 782	if (nports > 2 && (min_dp_aux < 0 || min_dp_main < 0)) {
 783		tb_sw_warn(sw, "multiple USB4 ports require baMinDPaux/baMinDPmain\n");
 784		goto err_invalid;
 785	}
 786
 787	tb_switch_for_each_port(sw, port) {
 788		if (tb_port_is_dpout(port) && min_dp_main < 0) {
 789			tb_sw_warn(sw, "missing baMinDPmain");
 790			goto err_invalid;
 791		}
 792		if ((tb_port_is_dpin(port) || tb_port_is_dpout(port)) &&
 793		    min_dp_aux < 0) {
 794			tb_sw_warn(sw, "missing baMinDPaux");
 795			goto err_invalid;
 796		}
 797		if ((tb_port_is_usb3_down(port) || tb_port_is_usb3_up(port)) &&
 798		    max_usb3 < 0) {
 799			tb_sw_warn(sw, "missing baMaxUSB3");
 800			goto err_invalid;
 801		}
 802		if ((tb_port_is_pcie_down(port) || tb_port_is_pcie_up(port)) &&
 803		    max_pcie < 0) {
 804			tb_sw_warn(sw, "missing baMaxPCIe");
 805			goto err_invalid;
 806		}
 807	}
 808
 809	/*
 810	 * Buffer allocation passed the validation so we can use it in
 811	 * path creation.
 812	 */
 813	sw->credit_allocation = true;
 814	if (max_usb3 > 0)
 815		sw->max_usb3_credits = max_usb3;
 816	if (min_dp_aux > 0)
 817		sw->min_dp_aux_credits = min_dp_aux;
 818	if (min_dp_main > 0)
 819		sw->min_dp_main_credits = min_dp_main;
 820	if (max_pcie > 0)
 821		sw->max_pcie_credits = max_pcie;
 822	if (max_dma > 0)
 823		sw->max_dma_credits = max_dma;
 824
 825	return 0;
 826
 827err_invalid:
 828	return -EINVAL;
 829}
 830
 831/**
 832 * usb4_switch_query_dp_resource() - Query availability of DP IN resource
 833 * @sw: USB4 router
 834 * @in: DP IN adapter
 835 *
 836 * For DP tunneling this function can be used to query availability of
 837 * DP IN resource. Returns true if the resource is available for DP
 838 * tunneling, false otherwise.
 839 */
 840bool usb4_switch_query_dp_resource(struct tb_switch *sw, struct tb_port *in)
 841{
 842	u32 metadata = in->port;
 843	u8 status;
 844	int ret;
 845
 846	ret = usb4_switch_op(sw, USB4_SWITCH_OP_QUERY_DP_RESOURCE, &metadata,
 847			     &status);
 
 
 
 848	/*
 849	 * If DP resource allocation is not supported assume it is
 850	 * always available.
 851	 */
 852	if (ret == -EOPNOTSUPP)
 853		return true;
 854	else if (ret)
 855		return false;
 856
 857	return !status;
 858}
 859
 860/**
 861 * usb4_switch_alloc_dp_resource() - Allocate DP IN resource
 862 * @sw: USB4 router
 863 * @in: DP IN adapter
 864 *
 865 * Allocates DP IN resource for DP tunneling using USB4 router
 866 * operations. If the resource was allocated returns %0. Otherwise
 867 * returns negative errno, in particular %-EBUSY if the resource is
 868 * already allocated.
 869 */
 870int usb4_switch_alloc_dp_resource(struct tb_switch *sw, struct tb_port *in)
 871{
 872	u32 metadata = in->port;
 873	u8 status;
 874	int ret;
 875
 876	ret = usb4_switch_op(sw, USB4_SWITCH_OP_ALLOC_DP_RESOURCE, &metadata,
 877			     &status);
 
 
 
 878	if (ret == -EOPNOTSUPP)
 879		return 0;
 880	else if (ret)
 881		return ret;
 882
 883	return status ? -EBUSY : 0;
 884}
 885
 886/**
 887 * usb4_switch_dealloc_dp_resource() - Releases allocated DP IN resource
 888 * @sw: USB4 router
 889 * @in: DP IN adapter
 890 *
 891 * Releases the previously allocated DP IN resource.
 892 */
 893int usb4_switch_dealloc_dp_resource(struct tb_switch *sw, struct tb_port *in)
 894{
 895	u32 metadata = in->port;
 896	u8 status;
 897	int ret;
 898
 899	ret = usb4_switch_op(sw, USB4_SWITCH_OP_DEALLOC_DP_RESOURCE, &metadata,
 900			     &status);
 
 
 
 901	if (ret == -EOPNOTSUPP)
 902		return 0;
 903	else if (ret)
 904		return ret;
 905
 906	return status ? -EIO : 0;
 907}
 908
 909static int usb4_port_idx(const struct tb_switch *sw, const struct tb_port *port)
 910{
 911	struct tb_port *p;
 912	int usb4_idx = 0;
 913
 914	/* Assume port is primary */
 915	tb_switch_for_each_port(sw, p) {
 916		if (!tb_port_is_null(p))
 917			continue;
 918		if (tb_is_upstream_port(p))
 919			continue;
 920		if (!p->link_nr) {
 921			if (p == port)
 922				break;
 923			usb4_idx++;
 924		}
 925	}
 926
 927	return usb4_idx;
 928}
 929
 930/**
 931 * usb4_switch_map_pcie_down() - Map USB4 port to a PCIe downstream adapter
 932 * @sw: USB4 router
 933 * @port: USB4 port
 934 *
 935 * USB4 routers have direct mapping between USB4 ports and PCIe
 936 * downstream adapters where the PCIe topology is extended. This
 937 * function returns the corresponding downstream PCIe adapter or %NULL
 938 * if no such mapping was possible.
 939 */
 940struct tb_port *usb4_switch_map_pcie_down(struct tb_switch *sw,
 941					  const struct tb_port *port)
 942{
 943	int usb4_idx = usb4_port_idx(sw, port);
 944	struct tb_port *p;
 945	int pcie_idx = 0;
 946
 947	/* Find PCIe down port matching usb4_port */
 948	tb_switch_for_each_port(sw, p) {
 949		if (!tb_port_is_pcie_down(p))
 950			continue;
 951
 952		if (pcie_idx == usb4_idx)
 953			return p;
 954
 955		pcie_idx++;
 956	}
 957
 958	return NULL;
 959}
 960
 961/**
 962 * usb4_switch_map_usb3_down() - Map USB4 port to a USB3 downstream adapter
 963 * @sw: USB4 router
 964 * @port: USB4 port
 965 *
 966 * USB4 routers have direct mapping between USB4 ports and USB 3.x
 967 * downstream adapters where the USB 3.x topology is extended. This
 968 * function returns the corresponding downstream USB 3.x adapter or
 969 * %NULL if no such mapping was possible.
 970 */
 971struct tb_port *usb4_switch_map_usb3_down(struct tb_switch *sw,
 972					  const struct tb_port *port)
 973{
 974	int usb4_idx = usb4_port_idx(sw, port);
 975	struct tb_port *p;
 976	int usb_idx = 0;
 977
 978	/* Find USB3 down port matching usb4_port */
 979	tb_switch_for_each_port(sw, p) {
 980		if (!tb_port_is_usb3_down(p))
 981			continue;
 982
 983		if (usb_idx == usb4_idx)
 984			return p;
 985
 986		usb_idx++;
 987	}
 988
 989	return NULL;
 990}
 991
 992/**
 993 * usb4_switch_add_ports() - Add USB4 ports for this router
 994 * @sw: USB4 router
 995 *
 996 * For USB4 router finds all USB4 ports and registers devices for each.
 997 * Can be called to any router.
 998 *
 999 * Return %0 in case of success and negative errno in case of failure.
1000 */
1001int usb4_switch_add_ports(struct tb_switch *sw)
1002{
1003	struct tb_port *port;
1004
1005	if (tb_switch_is_icm(sw) || !tb_switch_is_usb4(sw))
1006		return 0;
1007
1008	tb_switch_for_each_port(sw, port) {
1009		struct usb4_port *usb4;
1010
1011		if (!tb_port_is_null(port))
1012			continue;
1013		if (!port->cap_usb4)
1014			continue;
1015
1016		usb4 = usb4_port_device_add(port);
1017		if (IS_ERR(usb4)) {
1018			usb4_switch_remove_ports(sw);
1019			return PTR_ERR(usb4);
1020		}
1021
1022		port->usb4 = usb4;
1023	}
1024
1025	return 0;
1026}
1027
1028/**
1029 * usb4_switch_remove_ports() - Removes USB4 ports from this router
1030 * @sw: USB4 router
1031 *
1032 * Unregisters previously registered USB4 ports.
1033 */
1034void usb4_switch_remove_ports(struct tb_switch *sw)
1035{
1036	struct tb_port *port;
1037
1038	tb_switch_for_each_port(sw, port) {
1039		if (port->usb4) {
1040			usb4_port_device_remove(port->usb4);
1041			port->usb4 = NULL;
1042		}
1043	}
1044}
1045
1046/**
1047 * usb4_port_unlock() - Unlock USB4 downstream port
1048 * @port: USB4 port to unlock
1049 *
1050 * Unlocks USB4 downstream port so that the connection manager can
1051 * access the router below this port.
1052 */
1053int usb4_port_unlock(struct tb_port *port)
1054{
1055	int ret;
1056	u32 val;
1057
1058	ret = tb_port_read(port, &val, TB_CFG_PORT, ADP_CS_4, 1);
1059	if (ret)
1060		return ret;
1061
1062	val &= ~ADP_CS_4_LCK;
1063	return tb_port_write(port, &val, TB_CFG_PORT, ADP_CS_4, 1);
1064}
1065
1066/**
1067 * usb4_port_hotplug_enable() - Enables hotplug for a port
1068 * @port: USB4 port to operate on
1069 *
1070 * Enables hot plug events on a given port. This is only intended
1071 * to be used on lane, DP-IN, and DP-OUT adapters.
1072 */
1073int usb4_port_hotplug_enable(struct tb_port *port)
1074{
1075	int ret;
1076	u32 val;
1077
1078	ret = tb_port_read(port, &val, TB_CFG_PORT, ADP_CS_5, 1);
1079	if (ret)
1080		return ret;
1081
1082	val &= ~ADP_CS_5_DHP;
1083	return tb_port_write(port, &val, TB_CFG_PORT, ADP_CS_5, 1);
1084}
1085
1086static int usb4_port_set_configured(struct tb_port *port, bool configured)
1087{
1088	int ret;
1089	u32 val;
1090
1091	if (!port->cap_usb4)
1092		return -EINVAL;
1093
1094	ret = tb_port_read(port, &val, TB_CFG_PORT,
1095			   port->cap_usb4 + PORT_CS_19, 1);
1096	if (ret)
1097		return ret;
1098
1099	if (configured)
1100		val |= PORT_CS_19_PC;
1101	else
1102		val &= ~PORT_CS_19_PC;
1103
1104	return tb_port_write(port, &val, TB_CFG_PORT,
1105			     port->cap_usb4 + PORT_CS_19, 1);
1106}
1107
1108/**
1109 * usb4_port_configure() - Set USB4 port configured
1110 * @port: USB4 router
1111 *
1112 * Sets the USB4 link to be configured for power management purposes.
1113 */
1114int usb4_port_configure(struct tb_port *port)
1115{
1116	return usb4_port_set_configured(port, true);
1117}
1118
1119/**
1120 * usb4_port_unconfigure() - Set USB4 port unconfigured
1121 * @port: USB4 router
1122 *
1123 * Sets the USB4 link to be unconfigured for power management purposes.
1124 */
1125void usb4_port_unconfigure(struct tb_port *port)
1126{
1127	usb4_port_set_configured(port, false);
1128}
1129
1130static int usb4_set_xdomain_configured(struct tb_port *port, bool configured)
1131{
1132	int ret;
1133	u32 val;
1134
1135	if (!port->cap_usb4)
1136		return -EINVAL;
1137
1138	ret = tb_port_read(port, &val, TB_CFG_PORT,
1139			   port->cap_usb4 + PORT_CS_19, 1);
1140	if (ret)
1141		return ret;
1142
1143	if (configured)
1144		val |= PORT_CS_19_PID;
1145	else
1146		val &= ~PORT_CS_19_PID;
1147
1148	return tb_port_write(port, &val, TB_CFG_PORT,
1149			     port->cap_usb4 + PORT_CS_19, 1);
1150}
1151
1152/**
1153 * usb4_port_configure_xdomain() - Configure port for XDomain
1154 * @port: USB4 port connected to another host
1155 * @xd: XDomain that is connected to the port
1156 *
1157 * Marks the USB4 port as being connected to another host and updates
1158 * the link type. Returns %0 in success and negative errno in failure.
1159 */
1160int usb4_port_configure_xdomain(struct tb_port *port, struct tb_xdomain *xd)
1161{
1162	xd->link_usb4 = link_is_usb4(port);
1163	return usb4_set_xdomain_configured(port, true);
1164}
1165
1166/**
1167 * usb4_port_unconfigure_xdomain() - Unconfigure port for XDomain
1168 * @port: USB4 port that was connected to another host
1169 *
1170 * Clears USB4 port from being marked as XDomain.
1171 */
1172void usb4_port_unconfigure_xdomain(struct tb_port *port)
1173{
1174	usb4_set_xdomain_configured(port, false);
1175}
1176
1177static int usb4_port_wait_for_bit(struct tb_port *port, u32 offset, u32 bit,
1178				  u32 value, int timeout_msec)
1179{
1180	ktime_t timeout = ktime_add_ms(ktime_get(), timeout_msec);
1181
1182	do {
1183		u32 val;
1184		int ret;
1185
1186		ret = tb_port_read(port, &val, TB_CFG_PORT, offset, 1);
1187		if (ret)
1188			return ret;
1189
1190		if ((val & bit) == value)
1191			return 0;
1192
1193		usleep_range(50, 100);
1194	} while (ktime_before(ktime_get(), timeout));
1195
1196	return -ETIMEDOUT;
1197}
1198
1199static int usb4_port_read_data(struct tb_port *port, void *data, size_t dwords)
1200{
1201	if (dwords > NVM_DATA_DWORDS)
1202		return -EINVAL;
1203
1204	return tb_port_read(port, data, TB_CFG_PORT, port->cap_usb4 + PORT_CS_2,
1205			    dwords);
1206}
1207
1208static int usb4_port_write_data(struct tb_port *port, const void *data,
1209				size_t dwords)
1210{
1211	if (dwords > NVM_DATA_DWORDS)
1212		return -EINVAL;
1213
1214	return tb_port_write(port, data, TB_CFG_PORT, port->cap_usb4 + PORT_CS_2,
1215			     dwords);
1216}
1217
1218static int usb4_port_sb_read(struct tb_port *port, enum usb4_sb_target target,
1219			     u8 index, u8 reg, void *buf, u8 size)
1220{
1221	size_t dwords = DIV_ROUND_UP(size, 4);
1222	int ret;
1223	u32 val;
1224
1225	if (!port->cap_usb4)
1226		return -EINVAL;
1227
1228	val = reg;
1229	val |= size << PORT_CS_1_LENGTH_SHIFT;
1230	val |= (target << PORT_CS_1_TARGET_SHIFT) & PORT_CS_1_TARGET_MASK;
1231	if (target == USB4_SB_TARGET_RETIMER)
1232		val |= (index << PORT_CS_1_RETIMER_INDEX_SHIFT);
1233	val |= PORT_CS_1_PND;
1234
1235	ret = tb_port_write(port, &val, TB_CFG_PORT,
1236			    port->cap_usb4 + PORT_CS_1, 1);
1237	if (ret)
1238		return ret;
1239
1240	ret = usb4_port_wait_for_bit(port, port->cap_usb4 + PORT_CS_1,
1241				     PORT_CS_1_PND, 0, 500);
1242	if (ret)
1243		return ret;
1244
1245	ret = tb_port_read(port, &val, TB_CFG_PORT,
1246			    port->cap_usb4 + PORT_CS_1, 1);
1247	if (ret)
1248		return ret;
1249
1250	if (val & PORT_CS_1_NR)
1251		return -ENODEV;
1252	if (val & PORT_CS_1_RC)
1253		return -EIO;
1254
1255	return buf ? usb4_port_read_data(port, buf, dwords) : 0;
1256}
1257
1258static int usb4_port_sb_write(struct tb_port *port, enum usb4_sb_target target,
1259			      u8 index, u8 reg, const void *buf, u8 size)
1260{
1261	size_t dwords = DIV_ROUND_UP(size, 4);
1262	int ret;
1263	u32 val;
1264
1265	if (!port->cap_usb4)
1266		return -EINVAL;
1267
1268	if (buf) {
1269		ret = usb4_port_write_data(port, buf, dwords);
1270		if (ret)
1271			return ret;
1272	}
1273
1274	val = reg;
1275	val |= size << PORT_CS_1_LENGTH_SHIFT;
1276	val |= PORT_CS_1_WNR_WRITE;
1277	val |= (target << PORT_CS_1_TARGET_SHIFT) & PORT_CS_1_TARGET_MASK;
1278	if (target == USB4_SB_TARGET_RETIMER)
1279		val |= (index << PORT_CS_1_RETIMER_INDEX_SHIFT);
1280	val |= PORT_CS_1_PND;
1281
1282	ret = tb_port_write(port, &val, TB_CFG_PORT,
1283			    port->cap_usb4 + PORT_CS_1, 1);
1284	if (ret)
1285		return ret;
1286
1287	ret = usb4_port_wait_for_bit(port, port->cap_usb4 + PORT_CS_1,
1288				     PORT_CS_1_PND, 0, 500);
1289	if (ret)
1290		return ret;
1291
1292	ret = tb_port_read(port, &val, TB_CFG_PORT,
1293			    port->cap_usb4 + PORT_CS_1, 1);
1294	if (ret)
1295		return ret;
1296
1297	if (val & PORT_CS_1_NR)
1298		return -ENODEV;
1299	if (val & PORT_CS_1_RC)
1300		return -EIO;
1301
1302	return 0;
1303}
1304
1305static int usb4_port_sb_op(struct tb_port *port, enum usb4_sb_target target,
1306			   u8 index, enum usb4_sb_opcode opcode, int timeout_msec)
1307{
1308	ktime_t timeout;
1309	u32 val;
1310	int ret;
1311
1312	val = opcode;
1313	ret = usb4_port_sb_write(port, target, index, USB4_SB_OPCODE, &val,
1314				 sizeof(val));
1315	if (ret)
1316		return ret;
1317
1318	timeout = ktime_add_ms(ktime_get(), timeout_msec);
1319
1320	do {
1321		/* Check results */
1322		ret = usb4_port_sb_read(port, target, index, USB4_SB_OPCODE,
1323					&val, sizeof(val));
1324		if (ret)
1325			return ret;
1326
1327		switch (val) {
1328		case 0:
1329			return 0;
1330
1331		case USB4_SB_OPCODE_ERR:
1332			return -EAGAIN;
1333
1334		case USB4_SB_OPCODE_ONS:
1335			return -EOPNOTSUPP;
1336
1337		default:
1338			if (val != opcode)
1339				return -EIO;
1340			break;
1341		}
1342	} while (ktime_before(ktime_get(), timeout));
1343
1344	return -ETIMEDOUT;
1345}
1346
1347static int usb4_port_set_router_offline(struct tb_port *port, bool offline)
1348{
1349	u32 val = !offline;
1350	int ret;
1351
1352	ret = usb4_port_sb_write(port, USB4_SB_TARGET_ROUTER, 0,
1353				  USB4_SB_METADATA, &val, sizeof(val));
1354	if (ret)
1355		return ret;
1356
1357	val = USB4_SB_OPCODE_ROUTER_OFFLINE;
1358	return usb4_port_sb_write(port, USB4_SB_TARGET_ROUTER, 0,
1359				  USB4_SB_OPCODE, &val, sizeof(val));
1360}
1361
1362/**
1363 * usb4_port_router_offline() - Put the USB4 port to offline mode
1364 * @port: USB4 port
1365 *
1366 * This function puts the USB4 port into offline mode. In this mode the
1367 * port does not react on hotplug events anymore. This needs to be
1368 * called before retimer access is done when the USB4 links is not up.
1369 *
1370 * Returns %0 in case of success and negative errno if there was an
1371 * error.
1372 */
1373int usb4_port_router_offline(struct tb_port *port)
1374{
1375	return usb4_port_set_router_offline(port, true);
1376}
1377
1378/**
1379 * usb4_port_router_online() - Put the USB4 port back to online
1380 * @port: USB4 port
1381 *
1382 * Makes the USB4 port functional again.
1383 */
1384int usb4_port_router_online(struct tb_port *port)
1385{
1386	return usb4_port_set_router_offline(port, false);
1387}
1388
1389/**
1390 * usb4_port_enumerate_retimers() - Send RT broadcast transaction
1391 * @port: USB4 port
1392 *
1393 * This forces the USB4 port to send broadcast RT transaction which
1394 * makes the retimers on the link to assign index to themselves. Returns
1395 * %0 in case of success and negative errno if there was an error.
1396 */
1397int usb4_port_enumerate_retimers(struct tb_port *port)
1398{
1399	u32 val;
1400
1401	val = USB4_SB_OPCODE_ENUMERATE_RETIMERS;
1402	return usb4_port_sb_write(port, USB4_SB_TARGET_ROUTER, 0,
1403				  USB4_SB_OPCODE, &val, sizeof(val));
1404}
1405
1406/**
1407 * usb4_port_clx_supported() - Check if CLx is supported by the link
1408 * @port: Port to check for CLx support for
1409 *
1410 * PORT_CS_18_CPS bit reflects if the link supports CLx including
1411 * active cables (if connected on the link).
1412 */
1413bool usb4_port_clx_supported(struct tb_port *port)
1414{
1415	int ret;
1416	u32 val;
1417
1418	ret = tb_port_read(port, &val, TB_CFG_PORT,
1419			   port->cap_usb4 + PORT_CS_18, 1);
1420	if (ret)
1421		return false;
1422
1423	return !!(val & PORT_CS_18_CPS);
1424}
1425
1426/**
1427 * usb4_port_margining_caps() - Read USB4 port marginig capabilities
1428 * @port: USB4 port
1429 * @caps: Array with at least two elements to hold the results
1430 *
1431 * Reads the USB4 port lane margining capabilities into @caps.
1432 */
1433int usb4_port_margining_caps(struct tb_port *port, u32 *caps)
1434{
1435	int ret;
1436
1437	ret = usb4_port_sb_op(port, USB4_SB_TARGET_ROUTER, 0,
1438			      USB4_SB_OPCODE_READ_LANE_MARGINING_CAP, 500);
1439	if (ret)
1440		return ret;
1441
1442	return usb4_port_sb_read(port, USB4_SB_TARGET_ROUTER, 0,
1443				 USB4_SB_DATA, caps, sizeof(*caps) * 2);
1444}
1445
1446/**
1447 * usb4_port_hw_margin() - Run hardware lane margining on port
1448 * @port: USB4 port
1449 * @lanes: Which lanes to run (must match the port capabilities). Can be
1450 *	   %0, %1 or %7.
1451 * @ber_level: BER level contour value
1452 * @timing: Perform timing margining instead of voltage
1453 * @right_high: Use Right/high margin instead of left/low
1454 * @results: Array with at least two elements to hold the results
1455 *
1456 * Runs hardware lane margining on USB4 port and returns the result in
1457 * @results.
1458 */
1459int usb4_port_hw_margin(struct tb_port *port, unsigned int lanes,
1460			unsigned int ber_level, bool timing, bool right_high,
1461			u32 *results)
1462{
1463	u32 val;
1464	int ret;
1465
1466	val = lanes;
1467	if (timing)
1468		val |= USB4_MARGIN_HW_TIME;
1469	if (right_high)
1470		val |= USB4_MARGIN_HW_RH;
1471	if (ber_level)
1472		val |= (ber_level << USB4_MARGIN_HW_BER_SHIFT) &
1473			USB4_MARGIN_HW_BER_MASK;
1474
1475	ret = usb4_port_sb_write(port, USB4_SB_TARGET_ROUTER, 0,
1476				 USB4_SB_METADATA, &val, sizeof(val));
1477	if (ret)
1478		return ret;
1479
1480	ret = usb4_port_sb_op(port, USB4_SB_TARGET_ROUTER, 0,
1481			      USB4_SB_OPCODE_RUN_HW_LANE_MARGINING, 2500);
1482	if (ret)
1483		return ret;
1484
1485	return usb4_port_sb_read(port, USB4_SB_TARGET_ROUTER, 0,
1486				 USB4_SB_DATA, results, sizeof(*results) * 2);
1487}
1488
1489/**
1490 * usb4_port_sw_margin() - Run software lane margining on port
1491 * @port: USB4 port
1492 * @lanes: Which lanes to run (must match the port capabilities). Can be
1493 *	   %0, %1 or %7.
1494 * @timing: Perform timing margining instead of voltage
1495 * @right_high: Use Right/high margin instead of left/low
1496 * @counter: What to do with the error counter
1497 *
1498 * Runs software lane margining on USB4 port. Read back the error
1499 * counters by calling usb4_port_sw_margin_errors(). Returns %0 in
1500 * success and negative errno otherwise.
1501 */
1502int usb4_port_sw_margin(struct tb_port *port, unsigned int lanes, bool timing,
1503			bool right_high, u32 counter)
1504{
1505	u32 val;
1506	int ret;
1507
1508	val = lanes;
1509	if (timing)
1510		val |= USB4_MARGIN_SW_TIME;
1511	if (right_high)
1512		val |= USB4_MARGIN_SW_RH;
1513	val |= (counter << USB4_MARGIN_SW_COUNTER_SHIFT) &
1514		USB4_MARGIN_SW_COUNTER_MASK;
1515
1516	ret = usb4_port_sb_write(port, USB4_SB_TARGET_ROUTER, 0,
1517				 USB4_SB_METADATA, &val, sizeof(val));
1518	if (ret)
1519		return ret;
1520
1521	return usb4_port_sb_op(port, USB4_SB_TARGET_ROUTER, 0,
1522			       USB4_SB_OPCODE_RUN_SW_LANE_MARGINING, 2500);
1523}
1524
1525/**
1526 * usb4_port_sw_margin_errors() - Read the software margining error counters
1527 * @port: USB4 port
1528 * @errors: Error metadata is copied here.
1529 *
1530 * This reads back the software margining error counters from the port.
1531 * Returns %0 in success and negative errno otherwise.
1532 */
1533int usb4_port_sw_margin_errors(struct tb_port *port, u32 *errors)
1534{
1535	int ret;
1536
1537	ret = usb4_port_sb_op(port, USB4_SB_TARGET_ROUTER, 0,
1538			      USB4_SB_OPCODE_READ_SW_MARGIN_ERR, 150);
1539	if (ret)
1540		return ret;
1541
1542	return usb4_port_sb_read(port, USB4_SB_TARGET_ROUTER, 0,
1543				 USB4_SB_METADATA, errors, sizeof(*errors));
1544}
1545
1546static inline int usb4_port_retimer_op(struct tb_port *port, u8 index,
1547				       enum usb4_sb_opcode opcode,
1548				       int timeout_msec)
1549{
1550	return usb4_port_sb_op(port, USB4_SB_TARGET_RETIMER, index, opcode,
1551			       timeout_msec);
1552}
1553
1554/**
1555 * usb4_port_retimer_set_inbound_sbtx() - Enable sideband channel transactions
1556 * @port: USB4 port
1557 * @index: Retimer index
1558 *
1559 * Enables sideband channel transations on SBTX. Can be used when USB4
1560 * link does not go up, for example if there is no device connected.
1561 */
1562int usb4_port_retimer_set_inbound_sbtx(struct tb_port *port, u8 index)
1563{
1564	int ret;
1565
1566	ret = usb4_port_retimer_op(port, index, USB4_SB_OPCODE_SET_INBOUND_SBTX,
1567				   500);
1568
1569	if (ret != -ENODEV)
1570		return ret;
1571
1572	/*
1573	 * Per the USB4 retimer spec, the retimer is not required to
1574	 * send an RT (Retimer Transaction) response for the first
1575	 * SET_INBOUND_SBTX command
1576	 */
1577	return usb4_port_retimer_op(port, index, USB4_SB_OPCODE_SET_INBOUND_SBTX,
1578				    500);
1579}
1580
1581/**
1582 * usb4_port_retimer_read() - Read from retimer sideband registers
1583 * @port: USB4 port
1584 * @index: Retimer index
1585 * @reg: Sideband register to read
1586 * @buf: Data from @reg is stored here
1587 * @size: Number of bytes to read
1588 *
1589 * Function reads retimer sideband registers starting from @reg. The
1590 * retimer is connected to @port at @index. Returns %0 in case of
1591 * success, and read data is copied to @buf. If there is no retimer
1592 * present at given @index returns %-ENODEV. In any other failure
1593 * returns negative errno.
1594 */
1595int usb4_port_retimer_read(struct tb_port *port, u8 index, u8 reg, void *buf,
1596			   u8 size)
1597{
1598	return usb4_port_sb_read(port, USB4_SB_TARGET_RETIMER, index, reg, buf,
1599				 size);
1600}
1601
1602/**
1603 * usb4_port_retimer_write() - Write to retimer sideband registers
1604 * @port: USB4 port
1605 * @index: Retimer index
1606 * @reg: Sideband register to write
1607 * @buf: Data that is written starting from @reg
1608 * @size: Number of bytes to write
1609 *
1610 * Writes retimer sideband registers starting from @reg. The retimer is
1611 * connected to @port at @index. Returns %0 in case of success. If there
1612 * is no retimer present at given @index returns %-ENODEV. In any other
1613 * failure returns negative errno.
1614 */
1615int usb4_port_retimer_write(struct tb_port *port, u8 index, u8 reg,
1616			    const void *buf, u8 size)
1617{
1618	return usb4_port_sb_write(port, USB4_SB_TARGET_RETIMER, index, reg, buf,
1619				  size);
1620}
1621
1622/**
1623 * usb4_port_retimer_is_last() - Is the retimer last on-board retimer
1624 * @port: USB4 port
1625 * @index: Retimer index
1626 *
1627 * If the retimer at @index is last one (connected directly to the
1628 * Type-C port) this function returns %1. If it is not returns %0. If
1629 * the retimer is not present returns %-ENODEV. Otherwise returns
1630 * negative errno.
1631 */
1632int usb4_port_retimer_is_last(struct tb_port *port, u8 index)
1633{
1634	u32 metadata;
1635	int ret;
1636
1637	ret = usb4_port_retimer_op(port, index, USB4_SB_OPCODE_QUERY_LAST_RETIMER,
1638				   500);
1639	if (ret)
1640		return ret;
1641
1642	ret = usb4_port_retimer_read(port, index, USB4_SB_METADATA, &metadata,
1643				     sizeof(metadata));
1644	return ret ? ret : metadata & 1;
1645}
1646
1647/**
1648 * usb4_port_retimer_nvm_sector_size() - Read retimer NVM sector size
1649 * @port: USB4 port
1650 * @index: Retimer index
1651 *
1652 * Reads NVM sector size (in bytes) of a retimer at @index. This
1653 * operation can be used to determine whether the retimer supports NVM
1654 * upgrade for example. Returns sector size in bytes or negative errno
1655 * in case of error. Specifically returns %-ENODEV if there is no
1656 * retimer at @index.
1657 */
1658int usb4_port_retimer_nvm_sector_size(struct tb_port *port, u8 index)
1659{
1660	u32 metadata;
1661	int ret;
1662
1663	ret = usb4_port_retimer_op(port, index, USB4_SB_OPCODE_GET_NVM_SECTOR_SIZE,
1664				   500);
1665	if (ret)
1666		return ret;
1667
1668	ret = usb4_port_retimer_read(port, index, USB4_SB_METADATA, &metadata,
1669				     sizeof(metadata));
1670	return ret ? ret : metadata & USB4_NVM_SECTOR_SIZE_MASK;
1671}
1672
1673/**
1674 * usb4_port_retimer_nvm_set_offset() - Set NVM write offset
1675 * @port: USB4 port
1676 * @index: Retimer index
1677 * @address: Start offset
1678 *
1679 * Exlicitly sets NVM write offset. Normally when writing to NVM this is
1680 * done automatically by usb4_port_retimer_nvm_write().
1681 *
1682 * Returns %0 in success and negative errno if there was a failure.
1683 */
1684int usb4_port_retimer_nvm_set_offset(struct tb_port *port, u8 index,
1685				     unsigned int address)
1686{
1687	u32 metadata, dwaddress;
1688	int ret;
1689
1690	dwaddress = address / 4;
1691	metadata = (dwaddress << USB4_NVM_SET_OFFSET_SHIFT) &
1692		  USB4_NVM_SET_OFFSET_MASK;
1693
1694	ret = usb4_port_retimer_write(port, index, USB4_SB_METADATA, &metadata,
1695				      sizeof(metadata));
1696	if (ret)
1697		return ret;
1698
1699	return usb4_port_retimer_op(port, index, USB4_SB_OPCODE_NVM_SET_OFFSET,
1700				    500);
1701}
1702
1703struct retimer_info {
1704	struct tb_port *port;
1705	u8 index;
1706};
1707
1708static int usb4_port_retimer_nvm_write_next_block(void *data,
1709	unsigned int dwaddress, const void *buf, size_t dwords)
1710
1711{
1712	const struct retimer_info *info = data;
1713	struct tb_port *port = info->port;
1714	u8 index = info->index;
1715	int ret;
1716
1717	ret = usb4_port_retimer_write(port, index, USB4_SB_DATA,
1718				      buf, dwords * 4);
1719	if (ret)
1720		return ret;
1721
1722	return usb4_port_retimer_op(port, index,
1723			USB4_SB_OPCODE_NVM_BLOCK_WRITE, 1000);
1724}
1725
1726/**
1727 * usb4_port_retimer_nvm_write() - Write to retimer NVM
1728 * @port: USB4 port
1729 * @index: Retimer index
1730 * @address: Byte address where to start the write
1731 * @buf: Data to write
1732 * @size: Size in bytes how much to write
1733 *
1734 * Writes @size bytes from @buf to the retimer NVM. Used for NVM
1735 * upgrade. Returns %0 if the data was written successfully and negative
1736 * errno in case of failure. Specifically returns %-ENODEV if there is
1737 * no retimer at @index.
1738 */
1739int usb4_port_retimer_nvm_write(struct tb_port *port, u8 index, unsigned int address,
1740				const void *buf, size_t size)
1741{
1742	struct retimer_info info = { .port = port, .index = index };
1743	int ret;
1744
1745	ret = usb4_port_retimer_nvm_set_offset(port, index, address);
1746	if (ret)
1747		return ret;
1748
1749	return tb_nvm_write_data(address, buf, size, USB4_DATA_RETRIES,
1750				 usb4_port_retimer_nvm_write_next_block, &info);
1751}
1752
1753/**
1754 * usb4_port_retimer_nvm_authenticate() - Start retimer NVM upgrade
1755 * @port: USB4 port
1756 * @index: Retimer index
1757 *
1758 * After the new NVM image has been written via usb4_port_retimer_nvm_write()
1759 * this function can be used to trigger the NVM upgrade process. If
1760 * successful the retimer restarts with the new NVM and may not have the
1761 * index set so one needs to call usb4_port_enumerate_retimers() to
1762 * force index to be assigned.
1763 */
1764int usb4_port_retimer_nvm_authenticate(struct tb_port *port, u8 index)
1765{
1766	u32 val;
1767
1768	/*
1769	 * We need to use the raw operation here because once the
1770	 * authentication completes the retimer index is not set anymore
1771	 * so we do not get back the status now.
1772	 */
1773	val = USB4_SB_OPCODE_NVM_AUTH_WRITE;
1774	return usb4_port_sb_write(port, USB4_SB_TARGET_RETIMER, index,
1775				  USB4_SB_OPCODE, &val, sizeof(val));
1776}
1777
1778/**
1779 * usb4_port_retimer_nvm_authenticate_status() - Read status of NVM upgrade
1780 * @port: USB4 port
1781 * @index: Retimer index
1782 * @status: Raw status code read from metadata
1783 *
1784 * This can be called after usb4_port_retimer_nvm_authenticate() and
1785 * usb4_port_enumerate_retimers() to fetch status of the NVM upgrade.
1786 *
1787 * Returns %0 if the authentication status was successfully read. The
1788 * completion metadata (the result) is then stored into @status. If
1789 * reading the status fails, returns negative errno.
1790 */
1791int usb4_port_retimer_nvm_authenticate_status(struct tb_port *port, u8 index,
1792					      u32 *status)
1793{
1794	u32 metadata, val;
1795	int ret;
1796
1797	ret = usb4_port_retimer_read(port, index, USB4_SB_OPCODE, &val,
1798				     sizeof(val));
1799	if (ret)
1800		return ret;
1801
1802	switch (val) {
1803	case 0:
1804		*status = 0;
1805		return 0;
1806
1807	case USB4_SB_OPCODE_ERR:
1808		ret = usb4_port_retimer_read(port, index, USB4_SB_METADATA,
1809					     &metadata, sizeof(metadata));
1810		if (ret)
1811			return ret;
1812
1813		*status = metadata & USB4_SB_METADATA_NVM_AUTH_WRITE_MASK;
1814		return 0;
1815
1816	case USB4_SB_OPCODE_ONS:
1817		return -EOPNOTSUPP;
1818
1819	default:
1820		return -EIO;
1821	}
1822}
1823
1824static int usb4_port_retimer_nvm_read_block(void *data, unsigned int dwaddress,
1825					    void *buf, size_t dwords)
1826{
1827	const struct retimer_info *info = data;
1828	struct tb_port *port = info->port;
1829	u8 index = info->index;
1830	u32 metadata;
1831	int ret;
1832
1833	metadata = dwaddress << USB4_NVM_READ_OFFSET_SHIFT;
1834	if (dwords < NVM_DATA_DWORDS)
1835		metadata |= dwords << USB4_NVM_READ_LENGTH_SHIFT;
1836
1837	ret = usb4_port_retimer_write(port, index, USB4_SB_METADATA, &metadata,
1838				      sizeof(metadata));
1839	if (ret)
1840		return ret;
1841
1842	ret = usb4_port_retimer_op(port, index, USB4_SB_OPCODE_NVM_READ, 500);
1843	if (ret)
1844		return ret;
1845
1846	return usb4_port_retimer_read(port, index, USB4_SB_DATA, buf,
1847				      dwords * 4);
1848}
1849
1850/**
1851 * usb4_port_retimer_nvm_read() - Read contents of retimer NVM
1852 * @port: USB4 port
1853 * @index: Retimer index
1854 * @address: NVM address (in bytes) to start reading
1855 * @buf: Data read from NVM is stored here
1856 * @size: Number of bytes to read
1857 *
1858 * Reads retimer NVM and copies the contents to @buf. Returns %0 if the
1859 * read was successful and negative errno in case of failure.
1860 * Specifically returns %-ENODEV if there is no retimer at @index.
1861 */
1862int usb4_port_retimer_nvm_read(struct tb_port *port, u8 index,
1863			       unsigned int address, void *buf, size_t size)
1864{
1865	struct retimer_info info = { .port = port, .index = index };
1866
1867	return tb_nvm_read_data(address, buf, size, USB4_DATA_RETRIES,
1868				usb4_port_retimer_nvm_read_block, &info);
1869}
1870
1871/**
1872 * usb4_usb3_port_max_link_rate() - Maximum support USB3 link rate
1873 * @port: USB3 adapter port
1874 *
1875 * Return maximum supported link rate of a USB3 adapter in Mb/s.
1876 * Negative errno in case of error.
1877 */
1878int usb4_usb3_port_max_link_rate(struct tb_port *port)
1879{
1880	int ret, lr;
1881	u32 val;
1882
1883	if (!tb_port_is_usb3_down(port) && !tb_port_is_usb3_up(port))
1884		return -EINVAL;
1885
1886	ret = tb_port_read(port, &val, TB_CFG_PORT,
1887			   port->cap_adap + ADP_USB3_CS_4, 1);
1888	if (ret)
1889		return ret;
1890
1891	lr = (val & ADP_USB3_CS_4_MSLR_MASK) >> ADP_USB3_CS_4_MSLR_SHIFT;
1892	return lr == ADP_USB3_CS_4_MSLR_20G ? 20000 : 10000;
1893}
1894
1895/**
1896 * usb4_usb3_port_actual_link_rate() - Established USB3 link rate
1897 * @port: USB3 adapter port
1898 *
1899 * Return actual established link rate of a USB3 adapter in Mb/s. If the
1900 * link is not up returns %0 and negative errno in case of failure.
1901 */
1902int usb4_usb3_port_actual_link_rate(struct tb_port *port)
1903{
1904	int ret, lr;
1905	u32 val;
1906
1907	if (!tb_port_is_usb3_down(port) && !tb_port_is_usb3_up(port))
1908		return -EINVAL;
1909
1910	ret = tb_port_read(port, &val, TB_CFG_PORT,
1911			   port->cap_adap + ADP_USB3_CS_4, 1);
1912	if (ret)
1913		return ret;
1914
1915	if (!(val & ADP_USB3_CS_4_ULV))
1916		return 0;
1917
1918	lr = val & ADP_USB3_CS_4_ALR_MASK;
1919	return lr == ADP_USB3_CS_4_ALR_20G ? 20000 : 10000;
1920}
1921
1922static int usb4_usb3_port_cm_request(struct tb_port *port, bool request)
1923{
1924	int ret;
1925	u32 val;
1926
1927	if (!tb_port_is_usb3_down(port))
1928		return -EINVAL;
1929	if (tb_route(port->sw))
1930		return -EINVAL;
1931
1932	ret = tb_port_read(port, &val, TB_CFG_PORT,
1933			   port->cap_adap + ADP_USB3_CS_2, 1);
1934	if (ret)
1935		return ret;
1936
1937	if (request)
1938		val |= ADP_USB3_CS_2_CMR;
1939	else
1940		val &= ~ADP_USB3_CS_2_CMR;
1941
1942	ret = tb_port_write(port, &val, TB_CFG_PORT,
1943			    port->cap_adap + ADP_USB3_CS_2, 1);
1944	if (ret)
1945		return ret;
1946
1947	/*
1948	 * We can use val here directly as the CMR bit is in the same place
1949	 * as HCA. Just mask out others.
1950	 */
1951	val &= ADP_USB3_CS_2_CMR;
1952	return usb4_port_wait_for_bit(port, port->cap_adap + ADP_USB3_CS_1,
1953				      ADP_USB3_CS_1_HCA, val, 1500);
1954}
1955
1956static inline int usb4_usb3_port_set_cm_request(struct tb_port *port)
1957{
1958	return usb4_usb3_port_cm_request(port, true);
1959}
1960
1961static inline int usb4_usb3_port_clear_cm_request(struct tb_port *port)
1962{
1963	return usb4_usb3_port_cm_request(port, false);
1964}
1965
1966static unsigned int usb3_bw_to_mbps(u32 bw, u8 scale)
1967{
1968	unsigned long uframes;
1969
1970	uframes = bw * 512UL << scale;
1971	return DIV_ROUND_CLOSEST(uframes * 8000, 1000 * 1000);
1972}
1973
1974static u32 mbps_to_usb3_bw(unsigned int mbps, u8 scale)
1975{
1976	unsigned long uframes;
1977
1978	/* 1 uframe is 1/8 ms (125 us) -> 1 / 8000 s */
1979	uframes = ((unsigned long)mbps * 1000 *  1000) / 8000;
1980	return DIV_ROUND_UP(uframes, 512UL << scale);
1981}
1982
1983static int usb4_usb3_port_read_allocated_bandwidth(struct tb_port *port,
1984						   int *upstream_bw,
1985						   int *downstream_bw)
1986{
1987	u32 val, bw, scale;
1988	int ret;
1989
1990	ret = tb_port_read(port, &val, TB_CFG_PORT,
1991			   port->cap_adap + ADP_USB3_CS_2, 1);
1992	if (ret)
1993		return ret;
1994
1995	ret = tb_port_read(port, &scale, TB_CFG_PORT,
1996			   port->cap_adap + ADP_USB3_CS_3, 1);
1997	if (ret)
1998		return ret;
1999
2000	scale &= ADP_USB3_CS_3_SCALE_MASK;
2001
2002	bw = val & ADP_USB3_CS_2_AUBW_MASK;
2003	*upstream_bw = usb3_bw_to_mbps(bw, scale);
2004
2005	bw = (val & ADP_USB3_CS_2_ADBW_MASK) >> ADP_USB3_CS_2_ADBW_SHIFT;
2006	*downstream_bw = usb3_bw_to_mbps(bw, scale);
2007
2008	return 0;
2009}
2010
2011/**
2012 * usb4_usb3_port_allocated_bandwidth() - Bandwidth allocated for USB3
2013 * @port: USB3 adapter port
2014 * @upstream_bw: Allocated upstream bandwidth is stored here
2015 * @downstream_bw: Allocated downstream bandwidth is stored here
2016 *
2017 * Stores currently allocated USB3 bandwidth into @upstream_bw and
2018 * @downstream_bw in Mb/s. Returns %0 in case of success and negative
2019 * errno in failure.
2020 */
2021int usb4_usb3_port_allocated_bandwidth(struct tb_port *port, int *upstream_bw,
2022				       int *downstream_bw)
2023{
2024	int ret;
2025
2026	ret = usb4_usb3_port_set_cm_request(port);
2027	if (ret)
2028		return ret;
2029
2030	ret = usb4_usb3_port_read_allocated_bandwidth(port, upstream_bw,
2031						      downstream_bw);
2032	usb4_usb3_port_clear_cm_request(port);
2033
2034	return ret;
2035}
2036
2037static int usb4_usb3_port_read_consumed_bandwidth(struct tb_port *port,
2038						  int *upstream_bw,
2039						  int *downstream_bw)
2040{
2041	u32 val, bw, scale;
2042	int ret;
2043
2044	ret = tb_port_read(port, &val, TB_CFG_PORT,
2045			   port->cap_adap + ADP_USB3_CS_1, 1);
2046	if (ret)
2047		return ret;
2048
2049	ret = tb_port_read(port, &scale, TB_CFG_PORT,
2050			   port->cap_adap + ADP_USB3_CS_3, 1);
2051	if (ret)
2052		return ret;
2053
2054	scale &= ADP_USB3_CS_3_SCALE_MASK;
2055
2056	bw = val & ADP_USB3_CS_1_CUBW_MASK;
2057	*upstream_bw = usb3_bw_to_mbps(bw, scale);
2058
2059	bw = (val & ADP_USB3_CS_1_CDBW_MASK) >> ADP_USB3_CS_1_CDBW_SHIFT;
2060	*downstream_bw = usb3_bw_to_mbps(bw, scale);
2061
2062	return 0;
2063}
2064
2065static int usb4_usb3_port_write_allocated_bandwidth(struct tb_port *port,
2066						    int upstream_bw,
2067						    int downstream_bw)
2068{
2069	u32 val, ubw, dbw, scale;
2070	int ret;
2071
2072	/* Read the used scale, hardware default is 0 */
2073	ret = tb_port_read(port, &scale, TB_CFG_PORT,
2074			   port->cap_adap + ADP_USB3_CS_3, 1);
2075	if (ret)
2076		return ret;
2077
2078	scale &= ADP_USB3_CS_3_SCALE_MASK;
2079	ubw = mbps_to_usb3_bw(upstream_bw, scale);
2080	dbw = mbps_to_usb3_bw(downstream_bw, scale);
2081
2082	ret = tb_port_read(port, &val, TB_CFG_PORT,
2083			   port->cap_adap + ADP_USB3_CS_2, 1);
2084	if (ret)
2085		return ret;
2086
2087	val &= ~(ADP_USB3_CS_2_AUBW_MASK | ADP_USB3_CS_2_ADBW_MASK);
2088	val |= dbw << ADP_USB3_CS_2_ADBW_SHIFT;
2089	val |= ubw;
2090
2091	return tb_port_write(port, &val, TB_CFG_PORT,
2092			     port->cap_adap + ADP_USB3_CS_2, 1);
2093}
2094
2095/**
2096 * usb4_usb3_port_allocate_bandwidth() - Allocate bandwidth for USB3
2097 * @port: USB3 adapter port
2098 * @upstream_bw: New upstream bandwidth
2099 * @downstream_bw: New downstream bandwidth
2100 *
2101 * This can be used to set how much bandwidth is allocated for the USB3
2102 * tunneled isochronous traffic. @upstream_bw and @downstream_bw are the
2103 * new values programmed to the USB3 adapter allocation registers. If
2104 * the values are lower than what is currently consumed the allocation
2105 * is set to what is currently consumed instead (consumed bandwidth
2106 * cannot be taken away by CM). The actual new values are returned in
2107 * @upstream_bw and @downstream_bw.
2108 *
2109 * Returns %0 in case of success and negative errno if there was a
2110 * failure.
2111 */
2112int usb4_usb3_port_allocate_bandwidth(struct tb_port *port, int *upstream_bw,
2113				      int *downstream_bw)
2114{
2115	int ret, consumed_up, consumed_down, allocate_up, allocate_down;
2116
2117	ret = usb4_usb3_port_set_cm_request(port);
2118	if (ret)
2119		return ret;
2120
2121	ret = usb4_usb3_port_read_consumed_bandwidth(port, &consumed_up,
2122						     &consumed_down);
2123	if (ret)
2124		goto err_request;
2125
2126	/* Don't allow it go lower than what is consumed */
2127	allocate_up = max(*upstream_bw, consumed_up);
2128	allocate_down = max(*downstream_bw, consumed_down);
2129
2130	ret = usb4_usb3_port_write_allocated_bandwidth(port, allocate_up,
2131						       allocate_down);
2132	if (ret)
2133		goto err_request;
2134
2135	*upstream_bw = allocate_up;
2136	*downstream_bw = allocate_down;
2137
2138err_request:
2139	usb4_usb3_port_clear_cm_request(port);
2140	return ret;
2141}
2142
2143/**
2144 * usb4_usb3_port_release_bandwidth() - Release allocated USB3 bandwidth
2145 * @port: USB3 adapter port
2146 * @upstream_bw: New allocated upstream bandwidth
2147 * @downstream_bw: New allocated downstream bandwidth
2148 *
2149 * Releases USB3 allocated bandwidth down to what is actually consumed.
2150 * The new bandwidth is returned in @upstream_bw and @downstream_bw.
2151 *
2152 * Returns 0% in success and negative errno in case of failure.
2153 */
2154int usb4_usb3_port_release_bandwidth(struct tb_port *port, int *upstream_bw,
2155				     int *downstream_bw)
2156{
2157	int ret, consumed_up, consumed_down;
2158
2159	ret = usb4_usb3_port_set_cm_request(port);
2160	if (ret)
2161		return ret;
2162
2163	ret = usb4_usb3_port_read_consumed_bandwidth(port, &consumed_up,
2164						     &consumed_down);
2165	if (ret)
2166		goto err_request;
2167
2168	/*
2169	 * Always keep 1000 Mb/s to make sure xHCI has at least some
2170	 * bandwidth available for isochronous traffic.
2171	 */
2172	if (consumed_up < 1000)
2173		consumed_up = 1000;
2174	if (consumed_down < 1000)
2175		consumed_down = 1000;
2176
2177	ret = usb4_usb3_port_write_allocated_bandwidth(port, consumed_up,
2178						       consumed_down);
2179	if (ret)
2180		goto err_request;
2181
2182	*upstream_bw = consumed_up;
2183	*downstream_bw = consumed_down;
2184
2185err_request:
2186	usb4_usb3_port_clear_cm_request(port);
2187	return ret;
2188}