Linux Audio

Check our new training course

Loading...
v6.8
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 * KUnit tests
   4 *
   5 * Copyright (C) 2020, Intel Corporation
   6 * Author: Mika Westerberg <mika.westerberg@linux.intel.com>
   7 */
   8
   9#include <kunit/test.h>
  10#include <linux/idr.h>
  11
  12#include "tb.h"
  13#include "tunnel.h"
  14
  15static int __ida_init(struct kunit_resource *res, void *context)
  16{
  17	struct ida *ida = context;
  18
  19	ida_init(ida);
  20	res->data = ida;
  21	return 0;
  22}
  23
  24static void __ida_destroy(struct kunit_resource *res)
  25{
  26	struct ida *ida = res->data;
  27
  28	ida_destroy(ida);
  29}
  30
  31static void kunit_ida_init(struct kunit *test, struct ida *ida)
  32{
  33	kunit_alloc_resource(test, __ida_init, __ida_destroy, GFP_KERNEL, ida);
  34}
  35
  36static struct tb_switch *alloc_switch(struct kunit *test, u64 route,
  37				      u8 upstream_port, u8 max_port_number)
  38{
  39	struct tb_switch *sw;
  40	size_t size;
  41	int i;
  42
  43	sw = kunit_kzalloc(test, sizeof(*sw), GFP_KERNEL);
  44	if (!sw)
  45		return NULL;
  46
  47	sw->config.upstream_port_number = upstream_port;
  48	sw->config.depth = tb_route_length(route);
  49	sw->config.route_hi = upper_32_bits(route);
  50	sw->config.route_lo = lower_32_bits(route);
  51	sw->config.enabled = 0;
  52	sw->config.max_port_number = max_port_number;
  53
  54	size = (sw->config.max_port_number + 1) * sizeof(*sw->ports);
  55	sw->ports = kunit_kzalloc(test, size, GFP_KERNEL);
  56	if (!sw->ports)
  57		return NULL;
  58
  59	for (i = 0; i <= sw->config.max_port_number; i++) {
  60		sw->ports[i].sw = sw;
  61		sw->ports[i].port = i;
  62		sw->ports[i].config.port_number = i;
  63		if (i) {
  64			kunit_ida_init(test, &sw->ports[i].in_hopids);
  65			kunit_ida_init(test, &sw->ports[i].out_hopids);
  66		}
  67	}
  68
  69	return sw;
  70}
  71
  72static struct tb_switch *alloc_host(struct kunit *test)
  73{
  74	struct tb_switch *sw;
  75
  76	sw = alloc_switch(test, 0, 7, 13);
  77	if (!sw)
  78		return NULL;
  79
  80	sw->config.vendor_id = 0x8086;
  81	sw->config.device_id = 0x9a1b;
  82
  83	sw->ports[0].config.type = TB_TYPE_PORT;
  84	sw->ports[0].config.max_in_hop_id = 7;
  85	sw->ports[0].config.max_out_hop_id = 7;
  86
  87	sw->ports[1].config.type = TB_TYPE_PORT;
  88	sw->ports[1].config.max_in_hop_id = 19;
  89	sw->ports[1].config.max_out_hop_id = 19;
  90	sw->ports[1].total_credits = 60;
  91	sw->ports[1].ctl_credits = 2;
  92	sw->ports[1].dual_link_port = &sw->ports[2];
  93
  94	sw->ports[2].config.type = TB_TYPE_PORT;
  95	sw->ports[2].config.max_in_hop_id = 19;
  96	sw->ports[2].config.max_out_hop_id = 19;
  97	sw->ports[2].total_credits = 60;
  98	sw->ports[2].ctl_credits = 2;
  99	sw->ports[2].dual_link_port = &sw->ports[1];
 100	sw->ports[2].link_nr = 1;
 101
 102	sw->ports[3].config.type = TB_TYPE_PORT;
 103	sw->ports[3].config.max_in_hop_id = 19;
 104	sw->ports[3].config.max_out_hop_id = 19;
 105	sw->ports[3].total_credits = 60;
 106	sw->ports[3].ctl_credits = 2;
 107	sw->ports[3].dual_link_port = &sw->ports[4];
 108
 109	sw->ports[4].config.type = TB_TYPE_PORT;
 110	sw->ports[4].config.max_in_hop_id = 19;
 111	sw->ports[4].config.max_out_hop_id = 19;
 112	sw->ports[4].total_credits = 60;
 113	sw->ports[4].ctl_credits = 2;
 114	sw->ports[4].dual_link_port = &sw->ports[3];
 115	sw->ports[4].link_nr = 1;
 116
 117	sw->ports[5].config.type = TB_TYPE_DP_HDMI_IN;
 118	sw->ports[5].config.max_in_hop_id = 9;
 119	sw->ports[5].config.max_out_hop_id = 9;
 120	sw->ports[5].cap_adap = -1;
 121
 122	sw->ports[6].config.type = TB_TYPE_DP_HDMI_IN;
 123	sw->ports[6].config.max_in_hop_id = 9;
 124	sw->ports[6].config.max_out_hop_id = 9;
 125	sw->ports[6].cap_adap = -1;
 126
 127	sw->ports[7].config.type = TB_TYPE_NHI;
 128	sw->ports[7].config.max_in_hop_id = 11;
 129	sw->ports[7].config.max_out_hop_id = 11;
 130	sw->ports[7].config.nfc_credits = 0x41800000;
 131
 132	sw->ports[8].config.type = TB_TYPE_PCIE_DOWN;
 133	sw->ports[8].config.max_in_hop_id = 8;
 134	sw->ports[8].config.max_out_hop_id = 8;
 135
 136	sw->ports[9].config.type = TB_TYPE_PCIE_DOWN;
 137	sw->ports[9].config.max_in_hop_id = 8;
 138	sw->ports[9].config.max_out_hop_id = 8;
 139
 140	sw->ports[10].disabled = true;
 141	sw->ports[11].disabled = true;
 142
 143	sw->ports[12].config.type = TB_TYPE_USB3_DOWN;
 144	sw->ports[12].config.max_in_hop_id = 8;
 145	sw->ports[12].config.max_out_hop_id = 8;
 146
 147	sw->ports[13].config.type = TB_TYPE_USB3_DOWN;
 148	sw->ports[13].config.max_in_hop_id = 8;
 149	sw->ports[13].config.max_out_hop_id = 8;
 150
 151	return sw;
 152}
 153
 154static struct tb_switch *alloc_host_usb4(struct kunit *test)
 155{
 156	struct tb_switch *sw;
 157
 158	sw = alloc_host(test);
 159	if (!sw)
 160		return NULL;
 161
 162	sw->generation = 4;
 163	sw->credit_allocation = true;
 164	sw->max_usb3_credits = 32;
 165	sw->min_dp_aux_credits = 1;
 166	sw->min_dp_main_credits = 0;
 167	sw->max_pcie_credits = 64;
 168	sw->max_dma_credits = 14;
 169
 170	return sw;
 171}
 172
 173static struct tb_switch *alloc_host_br(struct kunit *test)
 174{
 175	struct tb_switch *sw;
 176
 177	sw = alloc_host_usb4(test);
 178	if (!sw)
 179		return NULL;
 180
 181	sw->ports[10].config.type = TB_TYPE_DP_HDMI_IN;
 182	sw->ports[10].config.max_in_hop_id = 9;
 183	sw->ports[10].config.max_out_hop_id = 9;
 184	sw->ports[10].cap_adap = -1;
 185	sw->ports[10].disabled = false;
 186
 187	return sw;
 188}
 189
 190static struct tb_switch *alloc_dev_default(struct kunit *test,
 191					   struct tb_switch *parent,
 192					   u64 route, bool bonded)
 193{
 194	struct tb_port *port, *upstream_port;
 195	struct tb_switch *sw;
 196
 197	sw = alloc_switch(test, route, 1, 19);
 198	if (!sw)
 199		return NULL;
 200
 201	sw->config.vendor_id = 0x8086;
 202	sw->config.device_id = 0x15ef;
 203
 204	sw->ports[0].config.type = TB_TYPE_PORT;
 205	sw->ports[0].config.max_in_hop_id = 8;
 206	sw->ports[0].config.max_out_hop_id = 8;
 207
 208	sw->ports[1].config.type = TB_TYPE_PORT;
 209	sw->ports[1].config.max_in_hop_id = 19;
 210	sw->ports[1].config.max_out_hop_id = 19;
 211	sw->ports[1].total_credits = 60;
 212	sw->ports[1].ctl_credits = 2;
 213	sw->ports[1].dual_link_port = &sw->ports[2];
 214
 215	sw->ports[2].config.type = TB_TYPE_PORT;
 216	sw->ports[2].config.max_in_hop_id = 19;
 217	sw->ports[2].config.max_out_hop_id = 19;
 218	sw->ports[2].total_credits = 60;
 219	sw->ports[2].ctl_credits = 2;
 220	sw->ports[2].dual_link_port = &sw->ports[1];
 221	sw->ports[2].link_nr = 1;
 222
 223	sw->ports[3].config.type = TB_TYPE_PORT;
 224	sw->ports[3].config.max_in_hop_id = 19;
 225	sw->ports[3].config.max_out_hop_id = 19;
 226	sw->ports[3].total_credits = 60;
 227	sw->ports[3].ctl_credits = 2;
 228	sw->ports[3].dual_link_port = &sw->ports[4];
 229
 230	sw->ports[4].config.type = TB_TYPE_PORT;
 231	sw->ports[4].config.max_in_hop_id = 19;
 232	sw->ports[4].config.max_out_hop_id = 19;
 233	sw->ports[4].total_credits = 60;
 234	sw->ports[4].ctl_credits = 2;
 235	sw->ports[4].dual_link_port = &sw->ports[3];
 236	sw->ports[4].link_nr = 1;
 237
 238	sw->ports[5].config.type = TB_TYPE_PORT;
 239	sw->ports[5].config.max_in_hop_id = 19;
 240	sw->ports[5].config.max_out_hop_id = 19;
 241	sw->ports[5].total_credits = 60;
 242	sw->ports[5].ctl_credits = 2;
 243	sw->ports[5].dual_link_port = &sw->ports[6];
 244
 245	sw->ports[6].config.type = TB_TYPE_PORT;
 246	sw->ports[6].config.max_in_hop_id = 19;
 247	sw->ports[6].config.max_out_hop_id = 19;
 248	sw->ports[6].total_credits = 60;
 249	sw->ports[6].ctl_credits = 2;
 250	sw->ports[6].dual_link_port = &sw->ports[5];
 251	sw->ports[6].link_nr = 1;
 252
 253	sw->ports[7].config.type = TB_TYPE_PORT;
 254	sw->ports[7].config.max_in_hop_id = 19;
 255	sw->ports[7].config.max_out_hop_id = 19;
 256	sw->ports[7].total_credits = 60;
 257	sw->ports[7].ctl_credits = 2;
 258	sw->ports[7].dual_link_port = &sw->ports[8];
 259
 260	sw->ports[8].config.type = TB_TYPE_PORT;
 261	sw->ports[8].config.max_in_hop_id = 19;
 262	sw->ports[8].config.max_out_hop_id = 19;
 263	sw->ports[8].total_credits = 60;
 264	sw->ports[8].ctl_credits = 2;
 265	sw->ports[8].dual_link_port = &sw->ports[7];
 266	sw->ports[8].link_nr = 1;
 267
 268	sw->ports[9].config.type = TB_TYPE_PCIE_UP;
 269	sw->ports[9].config.max_in_hop_id = 8;
 270	sw->ports[9].config.max_out_hop_id = 8;
 271
 272	sw->ports[10].config.type = TB_TYPE_PCIE_DOWN;
 273	sw->ports[10].config.max_in_hop_id = 8;
 274	sw->ports[10].config.max_out_hop_id = 8;
 275
 276	sw->ports[11].config.type = TB_TYPE_PCIE_DOWN;
 277	sw->ports[11].config.max_in_hop_id = 8;
 278	sw->ports[11].config.max_out_hop_id = 8;
 279
 280	sw->ports[12].config.type = TB_TYPE_PCIE_DOWN;
 281	sw->ports[12].config.max_in_hop_id = 8;
 282	sw->ports[12].config.max_out_hop_id = 8;
 283
 284	sw->ports[13].config.type = TB_TYPE_DP_HDMI_OUT;
 285	sw->ports[13].config.max_in_hop_id = 9;
 286	sw->ports[13].config.max_out_hop_id = 9;
 287	sw->ports[13].cap_adap = -1;
 288
 289	sw->ports[14].config.type = TB_TYPE_DP_HDMI_OUT;
 290	sw->ports[14].config.max_in_hop_id = 9;
 291	sw->ports[14].config.max_out_hop_id = 9;
 292	sw->ports[14].cap_adap = -1;
 293
 294	sw->ports[15].disabled = true;
 295
 296	sw->ports[16].config.type = TB_TYPE_USB3_UP;
 297	sw->ports[16].config.max_in_hop_id = 8;
 298	sw->ports[16].config.max_out_hop_id = 8;
 299
 300	sw->ports[17].config.type = TB_TYPE_USB3_DOWN;
 301	sw->ports[17].config.max_in_hop_id = 8;
 302	sw->ports[17].config.max_out_hop_id = 8;
 303
 304	sw->ports[18].config.type = TB_TYPE_USB3_DOWN;
 305	sw->ports[18].config.max_in_hop_id = 8;
 306	sw->ports[18].config.max_out_hop_id = 8;
 307
 308	sw->ports[19].config.type = TB_TYPE_USB3_DOWN;
 309	sw->ports[19].config.max_in_hop_id = 8;
 310	sw->ports[19].config.max_out_hop_id = 8;
 311
 312	if (!parent)
 313		return sw;
 314
 315	/* Link them */
 316	upstream_port = tb_upstream_port(sw);
 317	port = tb_port_at(route, parent);
 318	port->remote = upstream_port;
 319	upstream_port->remote = port;
 320	if (port->dual_link_port && upstream_port->dual_link_port) {
 321		port->dual_link_port->remote = upstream_port->dual_link_port;
 322		upstream_port->dual_link_port->remote = port->dual_link_port;
 323
 324		if (bonded) {
 325			/* Bonding is used */
 326			port->bonded = true;
 327			port->total_credits *= 2;
 328			port->dual_link_port->bonded = true;
 329			port->dual_link_port->total_credits = 0;
 330			upstream_port->bonded = true;
 331			upstream_port->total_credits *= 2;
 332			upstream_port->dual_link_port->bonded = true;
 333			upstream_port->dual_link_port->total_credits = 0;
 334		}
 335	}
 336
 337	return sw;
 338}
 339
 340static struct tb_switch *alloc_dev_with_dpin(struct kunit *test,
 341					     struct tb_switch *parent,
 342					     u64 route, bool bonded)
 343{
 344	struct tb_switch *sw;
 345
 346	sw = alloc_dev_default(test, parent, route, bonded);
 347	if (!sw)
 348		return NULL;
 349
 350	sw->ports[13].config.type = TB_TYPE_DP_HDMI_IN;
 351	sw->ports[13].config.max_in_hop_id = 9;
 352	sw->ports[13].config.max_out_hop_id = 9;
 353
 354	sw->ports[14].config.type = TB_TYPE_DP_HDMI_IN;
 355	sw->ports[14].config.max_in_hop_id = 9;
 356	sw->ports[14].config.max_out_hop_id = 9;
 357
 358	return sw;
 359}
 360
 361static struct tb_switch *alloc_dev_without_dp(struct kunit *test,
 362					      struct tb_switch *parent,
 363					      u64 route, bool bonded)
 364{
 365	struct tb_switch *sw;
 366	int i;
 367
 368	sw = alloc_dev_default(test, parent, route, bonded);
 369	if (!sw)
 370		return NULL;
 371	/*
 372	 * Device with:
 373	 * 2x USB4 Adapters (adapters 1,2 and 3,4),
 374	 * 1x PCIe Upstream (adapter 9),
 375	 * 1x PCIe Downstream (adapter 10),
 376	 * 1x USB3 Upstream (adapter 16),
 377	 * 1x USB3 Downstream (adapter 17)
 378	 */
 379	for (i = 5; i <= 8; i++)
 380		sw->ports[i].disabled = true;
 381
 382	for (i = 11; i <= 14; i++)
 383		sw->ports[i].disabled = true;
 384
 385	sw->ports[13].cap_adap = 0;
 386	sw->ports[14].cap_adap = 0;
 387
 388	for (i = 18; i <= 19; i++)
 389		sw->ports[i].disabled = true;
 390
 391	sw->generation = 4;
 392	sw->credit_allocation = true;
 393	sw->max_usb3_credits = 109;
 394	sw->min_dp_aux_credits = 0;
 395	sw->min_dp_main_credits = 0;
 396	sw->max_pcie_credits = 30;
 397	sw->max_dma_credits = 1;
 398
 399	return sw;
 400}
 401
 402static struct tb_switch *alloc_dev_usb4(struct kunit *test,
 403					struct tb_switch *parent,
 404					u64 route, bool bonded)
 405{
 406	struct tb_switch *sw;
 407
 408	sw = alloc_dev_default(test, parent, route, bonded);
 409	if (!sw)
 410		return NULL;
 411
 412	sw->generation = 4;
 413	sw->credit_allocation = true;
 414	sw->max_usb3_credits = 14;
 415	sw->min_dp_aux_credits = 1;
 416	sw->min_dp_main_credits = 18;
 417	sw->max_pcie_credits = 32;
 418	sw->max_dma_credits = 14;
 419
 420	return sw;
 421}
 422
 423static void tb_test_path_basic(struct kunit *test)
 424{
 425	struct tb_port *src_port, *dst_port, *p;
 426	struct tb_switch *host;
 427
 428	host = alloc_host(test);
 429
 430	src_port = &host->ports[5];
 431	dst_port = src_port;
 432
 433	p = tb_next_port_on_path(src_port, dst_port, NULL);
 434	KUNIT_EXPECT_PTR_EQ(test, p, dst_port);
 435
 436	p = tb_next_port_on_path(src_port, dst_port, p);
 437	KUNIT_EXPECT_TRUE(test, !p);
 438}
 439
 440static void tb_test_path_not_connected_walk(struct kunit *test)
 441{
 442	struct tb_port *src_port, *dst_port, *p;
 443	struct tb_switch *host, *dev;
 444
 445	host = alloc_host(test);
 446	/* No connection between host and dev */
 447	dev = alloc_dev_default(test, NULL, 3, true);
 448
 449	src_port = &host->ports[12];
 450	dst_port = &dev->ports[16];
 451
 452	p = tb_next_port_on_path(src_port, dst_port, NULL);
 453	KUNIT_EXPECT_PTR_EQ(test, p, src_port);
 454
 455	p = tb_next_port_on_path(src_port, dst_port, p);
 456	KUNIT_EXPECT_PTR_EQ(test, p, &host->ports[3]);
 457
 458	p = tb_next_port_on_path(src_port, dst_port, p);
 459	KUNIT_EXPECT_TRUE(test, !p);
 460
 461	/* Other direction */
 462
 463	p = tb_next_port_on_path(dst_port, src_port, NULL);
 464	KUNIT_EXPECT_PTR_EQ(test, p, dst_port);
 465
 466	p = tb_next_port_on_path(dst_port, src_port, p);
 467	KUNIT_EXPECT_PTR_EQ(test, p, &dev->ports[1]);
 468
 469	p = tb_next_port_on_path(dst_port, src_port, p);
 470	KUNIT_EXPECT_TRUE(test, !p);
 471}
 472
 473struct port_expectation {
 474	u64 route;
 475	u8 port;
 476	enum tb_port_type type;
 477};
 478
 479static void tb_test_path_single_hop_walk(struct kunit *test)
 480{
 481	/*
 482	 * Walks from Host PCIe downstream port to Device #1 PCIe
 483	 * upstream port.
 484	 *
 485	 *   [Host]
 486	 *   1 |
 487	 *   1 |
 488	 *  [Device]
 489	 */
 490	static const struct port_expectation test_data[] = {
 491		{ .route = 0x0, .port = 8, .type = TB_TYPE_PCIE_DOWN },
 492		{ .route = 0x0, .port = 1, .type = TB_TYPE_PORT },
 493		{ .route = 0x1, .port = 1, .type = TB_TYPE_PORT },
 494		{ .route = 0x1, .port = 9, .type = TB_TYPE_PCIE_UP },
 495	};
 496	struct tb_port *src_port, *dst_port, *p;
 497	struct tb_switch *host, *dev;
 498	int i;
 499
 500	host = alloc_host(test);
 501	dev = alloc_dev_default(test, host, 1, true);
 502
 503	src_port = &host->ports[8];
 504	dst_port = &dev->ports[9];
 505
 506	/* Walk both directions */
 507
 508	i = 0;
 509	tb_for_each_port_on_path(src_port, dst_port, p) {
 510		KUNIT_EXPECT_TRUE(test, i < ARRAY_SIZE(test_data));
 511		KUNIT_EXPECT_EQ(test, tb_route(p->sw), test_data[i].route);
 512		KUNIT_EXPECT_EQ(test, p->port, test_data[i].port);
 513		KUNIT_EXPECT_EQ(test, (enum tb_port_type)p->config.type,
 514				test_data[i].type);
 515		i++;
 516	}
 517
 518	KUNIT_EXPECT_EQ(test, i, ARRAY_SIZE(test_data));
 519
 520	i = ARRAY_SIZE(test_data) - 1;
 521	tb_for_each_port_on_path(dst_port, src_port, p) {
 522		KUNIT_EXPECT_TRUE(test, i < ARRAY_SIZE(test_data));
 523		KUNIT_EXPECT_EQ(test, tb_route(p->sw), test_data[i].route);
 524		KUNIT_EXPECT_EQ(test, p->port, test_data[i].port);
 525		KUNIT_EXPECT_EQ(test, (enum tb_port_type)p->config.type,
 526				test_data[i].type);
 527		i--;
 528	}
 529
 530	KUNIT_EXPECT_EQ(test, i, -1);
 531}
 532
 533static void tb_test_path_daisy_chain_walk(struct kunit *test)
 534{
 535	/*
 536	 * Walks from Host DP IN to Device #2 DP OUT.
 537	 *
 538	 *           [Host]
 539	 *            1 |
 540	 *            1 |
 541	 *         [Device #1]
 542	 *       3 /
 543	 *      1 /
 544	 * [Device #2]
 545	 */
 546	static const struct port_expectation test_data[] = {
 547		{ .route = 0x0, .port = 5, .type = TB_TYPE_DP_HDMI_IN },
 548		{ .route = 0x0, .port = 1, .type = TB_TYPE_PORT },
 549		{ .route = 0x1, .port = 1, .type = TB_TYPE_PORT },
 550		{ .route = 0x1, .port = 3, .type = TB_TYPE_PORT },
 551		{ .route = 0x301, .port = 1, .type = TB_TYPE_PORT },
 552		{ .route = 0x301, .port = 13, .type = TB_TYPE_DP_HDMI_OUT },
 553	};
 554	struct tb_port *src_port, *dst_port, *p;
 555	struct tb_switch *host, *dev1, *dev2;
 556	int i;
 557
 558	host = alloc_host(test);
 559	dev1 = alloc_dev_default(test, host, 0x1, true);
 560	dev2 = alloc_dev_default(test, dev1, 0x301, true);
 561
 562	src_port = &host->ports[5];
 563	dst_port = &dev2->ports[13];
 564
 565	/* Walk both directions */
 566
 567	i = 0;
 568	tb_for_each_port_on_path(src_port, dst_port, p) {
 569		KUNIT_EXPECT_TRUE(test, i < ARRAY_SIZE(test_data));
 570		KUNIT_EXPECT_EQ(test, tb_route(p->sw), test_data[i].route);
 571		KUNIT_EXPECT_EQ(test, p->port, test_data[i].port);
 572		KUNIT_EXPECT_EQ(test, (enum tb_port_type)p->config.type,
 573				test_data[i].type);
 574		i++;
 575	}
 576
 577	KUNIT_EXPECT_EQ(test, i, ARRAY_SIZE(test_data));
 578
 579	i = ARRAY_SIZE(test_data) - 1;
 580	tb_for_each_port_on_path(dst_port, src_port, p) {
 581		KUNIT_EXPECT_TRUE(test, i < ARRAY_SIZE(test_data));
 582		KUNIT_EXPECT_EQ(test, tb_route(p->sw), test_data[i].route);
 583		KUNIT_EXPECT_EQ(test, p->port, test_data[i].port);
 584		KUNIT_EXPECT_EQ(test, (enum tb_port_type)p->config.type,
 585				test_data[i].type);
 586		i--;
 587	}
 588
 589	KUNIT_EXPECT_EQ(test, i, -1);
 590}
 591
 592static void tb_test_path_simple_tree_walk(struct kunit *test)
 593{
 594	/*
 595	 * Walks from Host DP IN to Device #3 DP OUT.
 596	 *
 597	 *           [Host]
 598	 *            1 |
 599	 *            1 |
 600	 *         [Device #1]
 601	 *       3 /   | 5  \ 7
 602	 *      1 /    |     \ 1
 603	 * [Device #2] |    [Device #4]
 604	 *             | 1
 605	 *         [Device #3]
 606	 */
 607	static const struct port_expectation test_data[] = {
 608		{ .route = 0x0, .port = 5, .type = TB_TYPE_DP_HDMI_IN },
 609		{ .route = 0x0, .port = 1, .type = TB_TYPE_PORT },
 610		{ .route = 0x1, .port = 1, .type = TB_TYPE_PORT },
 611		{ .route = 0x1, .port = 5, .type = TB_TYPE_PORT },
 612		{ .route = 0x501, .port = 1, .type = TB_TYPE_PORT },
 613		{ .route = 0x501, .port = 13, .type = TB_TYPE_DP_HDMI_OUT },
 614	};
 615	struct tb_port *src_port, *dst_port, *p;
 616	struct tb_switch *host, *dev1, *dev3;
 617	int i;
 618
 619	host = alloc_host(test);
 620	dev1 = alloc_dev_default(test, host, 0x1, true);
 621	alloc_dev_default(test, dev1, 0x301, true);
 622	dev3 = alloc_dev_default(test, dev1, 0x501, true);
 623	alloc_dev_default(test, dev1, 0x701, true);
 624
 625	src_port = &host->ports[5];
 626	dst_port = &dev3->ports[13];
 627
 628	/* Walk both directions */
 629
 630	i = 0;
 631	tb_for_each_port_on_path(src_port, dst_port, p) {
 632		KUNIT_EXPECT_TRUE(test, i < ARRAY_SIZE(test_data));
 633		KUNIT_EXPECT_EQ(test, tb_route(p->sw), test_data[i].route);
 634		KUNIT_EXPECT_EQ(test, p->port, test_data[i].port);
 635		KUNIT_EXPECT_EQ(test, (enum tb_port_type)p->config.type,
 636				test_data[i].type);
 637		i++;
 638	}
 639
 640	KUNIT_EXPECT_EQ(test, i, ARRAY_SIZE(test_data));
 641
 642	i = ARRAY_SIZE(test_data) - 1;
 643	tb_for_each_port_on_path(dst_port, src_port, p) {
 644		KUNIT_EXPECT_TRUE(test, i < ARRAY_SIZE(test_data));
 645		KUNIT_EXPECT_EQ(test, tb_route(p->sw), test_data[i].route);
 646		KUNIT_EXPECT_EQ(test, p->port, test_data[i].port);
 647		KUNIT_EXPECT_EQ(test, (enum tb_port_type)p->config.type,
 648				test_data[i].type);
 649		i--;
 650	}
 651
 652	KUNIT_EXPECT_EQ(test, i, -1);
 653}
 654
 655static void tb_test_path_complex_tree_walk(struct kunit *test)
 656{
 657	/*
 658	 * Walks from Device #3 DP IN to Device #9 DP OUT.
 659	 *
 660	 *           [Host]
 661	 *            1 |
 662	 *            1 |
 663	 *         [Device #1]
 664	 *       3 /   | 5  \ 7
 665	 *      1 /    |     \ 1
 666	 * [Device #2] |    [Device #5]
 667	 *    5 |      | 1         \ 7
 668	 *    1 |  [Device #4]      \ 1
 669	 * [Device #3]             [Device #6]
 670	 *                       3 /
 671	 *                      1 /
 672	 *                    [Device #7]
 673	 *                  3 /      | 5
 674	 *                 1 /       |
 675	 *               [Device #8] | 1
 676	 *                       [Device #9]
 677	 */
 678	static const struct port_expectation test_data[] = {
 679		{ .route = 0x50301, .port = 13, .type = TB_TYPE_DP_HDMI_IN },
 680		{ .route = 0x50301, .port = 1, .type = TB_TYPE_PORT },
 681		{ .route = 0x301, .port = 5, .type = TB_TYPE_PORT },
 682		{ .route = 0x301, .port = 1, .type = TB_TYPE_PORT },
 683		{ .route = 0x1, .port = 3, .type = TB_TYPE_PORT },
 684		{ .route = 0x1, .port = 7, .type = TB_TYPE_PORT },
 685		{ .route = 0x701, .port = 1, .type = TB_TYPE_PORT },
 686		{ .route = 0x701, .port = 7, .type = TB_TYPE_PORT },
 687		{ .route = 0x70701, .port = 1, .type = TB_TYPE_PORT },
 688		{ .route = 0x70701, .port = 3, .type = TB_TYPE_PORT },
 689		{ .route = 0x3070701, .port = 1, .type = TB_TYPE_PORT },
 690		{ .route = 0x3070701, .port = 5, .type = TB_TYPE_PORT },
 691		{ .route = 0x503070701, .port = 1, .type = TB_TYPE_PORT },
 692		{ .route = 0x503070701, .port = 14, .type = TB_TYPE_DP_HDMI_OUT },
 693	};
 694	struct tb_switch *host, *dev1, *dev2, *dev3, *dev5, *dev6, *dev7, *dev9;
 695	struct tb_port *src_port, *dst_port, *p;
 696	int i;
 697
 698	host = alloc_host(test);
 699	dev1 = alloc_dev_default(test, host, 0x1, true);
 700	dev2 = alloc_dev_default(test, dev1, 0x301, true);
 701	dev3 = alloc_dev_with_dpin(test, dev2, 0x50301, true);
 702	alloc_dev_default(test, dev1, 0x501, true);
 703	dev5 = alloc_dev_default(test, dev1, 0x701, true);
 704	dev6 = alloc_dev_default(test, dev5, 0x70701, true);
 705	dev7 = alloc_dev_default(test, dev6, 0x3070701, true);
 706	alloc_dev_default(test, dev7, 0x303070701, true);
 707	dev9 = alloc_dev_default(test, dev7, 0x503070701, true);
 708
 709	src_port = &dev3->ports[13];
 710	dst_port = &dev9->ports[14];
 711
 712	/* Walk both directions */
 713
 714	i = 0;
 715	tb_for_each_port_on_path(src_port, dst_port, p) {
 716		KUNIT_EXPECT_TRUE(test, i < ARRAY_SIZE(test_data));
 717		KUNIT_EXPECT_EQ(test, tb_route(p->sw), test_data[i].route);
 718		KUNIT_EXPECT_EQ(test, p->port, test_data[i].port);
 719		KUNIT_EXPECT_EQ(test, (enum tb_port_type)p->config.type,
 720				test_data[i].type);
 721		i++;
 722	}
 723
 724	KUNIT_EXPECT_EQ(test, i, ARRAY_SIZE(test_data));
 725
 726	i = ARRAY_SIZE(test_data) - 1;
 727	tb_for_each_port_on_path(dst_port, src_port, p) {
 728		KUNIT_EXPECT_TRUE(test, i < ARRAY_SIZE(test_data));
 729		KUNIT_EXPECT_EQ(test, tb_route(p->sw), test_data[i].route);
 730		KUNIT_EXPECT_EQ(test, p->port, test_data[i].port);
 731		KUNIT_EXPECT_EQ(test, (enum tb_port_type)p->config.type,
 732				test_data[i].type);
 733		i--;
 734	}
 735
 736	KUNIT_EXPECT_EQ(test, i, -1);
 737}
 738
 739static void tb_test_path_max_length_walk(struct kunit *test)
 740{
 741	struct tb_switch *host, *dev1, *dev2, *dev3, *dev4, *dev5, *dev6;
 742	struct tb_switch *dev7, *dev8, *dev9, *dev10, *dev11, *dev12;
 743	struct tb_port *src_port, *dst_port, *p;
 744	int i;
 745
 746	/*
 747	 * Walks from Device #6 DP IN to Device #12 DP OUT.
 748	 *
 749	 *          [Host]
 750	 *         1 /  \ 3
 751	 *        1 /    \ 1
 752	 * [Device #1]   [Device #7]
 753	 *     3 |           | 3
 754	 *     1 |           | 1
 755	 * [Device #2]   [Device #8]
 756	 *     3 |           | 3
 757	 *     1 |           | 1
 758	 * [Device #3]   [Device #9]
 759	 *     3 |           | 3
 760	 *     1 |           | 1
 761	 * [Device #4]   [Device #10]
 762	 *     3 |           | 3
 763	 *     1 |           | 1
 764	 * [Device #5]   [Device #11]
 765	 *     3 |           | 3
 766	 *     1 |           | 1
 767	 * [Device #6]   [Device #12]
 768	 */
 769	static const struct port_expectation test_data[] = {
 770		{ .route = 0x30303030301, .port = 13, .type = TB_TYPE_DP_HDMI_IN },
 771		{ .route = 0x30303030301, .port = 1, .type = TB_TYPE_PORT },
 772		{ .route = 0x303030301, .port = 3, .type = TB_TYPE_PORT },
 773		{ .route = 0x303030301, .port = 1, .type = TB_TYPE_PORT },
 774		{ .route = 0x3030301, .port = 3, .type = TB_TYPE_PORT },
 775		{ .route = 0x3030301, .port = 1, .type = TB_TYPE_PORT },
 776		{ .route = 0x30301, .port = 3, .type = TB_TYPE_PORT },
 777		{ .route = 0x30301, .port = 1, .type = TB_TYPE_PORT },
 778		{ .route = 0x301, .port = 3, .type = TB_TYPE_PORT },
 779		{ .route = 0x301, .port = 1, .type = TB_TYPE_PORT },
 780		{ .route = 0x1, .port = 3, .type = TB_TYPE_PORT },
 781		{ .route = 0x1, .port = 1, .type = TB_TYPE_PORT },
 782		{ .route = 0x0, .port = 1, .type = TB_TYPE_PORT },
 783		{ .route = 0x0, .port = 3, .type = TB_TYPE_PORT },
 784		{ .route = 0x3, .port = 1, .type = TB_TYPE_PORT },
 785		{ .route = 0x3, .port = 3, .type = TB_TYPE_PORT },
 786		{ .route = 0x303, .port = 1, .type = TB_TYPE_PORT },
 787		{ .route = 0x303, .port = 3, .type = TB_TYPE_PORT },
 788		{ .route = 0x30303, .port = 1, .type = TB_TYPE_PORT },
 789		{ .route = 0x30303, .port = 3, .type = TB_TYPE_PORT },
 790		{ .route = 0x3030303, .port = 1, .type = TB_TYPE_PORT },
 791		{ .route = 0x3030303, .port = 3, .type = TB_TYPE_PORT },
 792		{ .route = 0x303030303, .port = 1, .type = TB_TYPE_PORT },
 793		{ .route = 0x303030303, .port = 3, .type = TB_TYPE_PORT },
 794		{ .route = 0x30303030303, .port = 1, .type = TB_TYPE_PORT },
 795		{ .route = 0x30303030303, .port = 13, .type = TB_TYPE_DP_HDMI_OUT },
 796	};
 797
 798	host = alloc_host(test);
 799	dev1 = alloc_dev_default(test, host, 0x1, true);
 800	dev2 = alloc_dev_default(test, dev1, 0x301, true);
 801	dev3 = alloc_dev_default(test, dev2, 0x30301, true);
 802	dev4 = alloc_dev_default(test, dev3, 0x3030301, true);
 803	dev5 = alloc_dev_default(test, dev4, 0x303030301, true);
 804	dev6 = alloc_dev_with_dpin(test, dev5, 0x30303030301, true);
 805	dev7 = alloc_dev_default(test, host, 0x3, true);
 806	dev8 = alloc_dev_default(test, dev7, 0x303, true);
 807	dev9 = alloc_dev_default(test, dev8, 0x30303, true);
 808	dev10 = alloc_dev_default(test, dev9, 0x3030303, true);
 809	dev11 = alloc_dev_default(test, dev10, 0x303030303, true);
 810	dev12 = alloc_dev_default(test, dev11, 0x30303030303, true);
 811
 812	src_port = &dev6->ports[13];
 813	dst_port = &dev12->ports[13];
 814
 815	/* Walk both directions */
 816
 817	i = 0;
 818	tb_for_each_port_on_path(src_port, dst_port, p) {
 819		KUNIT_EXPECT_TRUE(test, i < ARRAY_SIZE(test_data));
 820		KUNIT_EXPECT_EQ(test, tb_route(p->sw), test_data[i].route);
 821		KUNIT_EXPECT_EQ(test, p->port, test_data[i].port);
 822		KUNIT_EXPECT_EQ(test, (enum tb_port_type)p->config.type,
 823				test_data[i].type);
 824		i++;
 825	}
 826
 827	KUNIT_EXPECT_EQ(test, i, ARRAY_SIZE(test_data));
 828
 829	i = ARRAY_SIZE(test_data) - 1;
 830	tb_for_each_port_on_path(dst_port, src_port, p) {
 831		KUNIT_EXPECT_TRUE(test, i < ARRAY_SIZE(test_data));
 832		KUNIT_EXPECT_EQ(test, tb_route(p->sw), test_data[i].route);
 833		KUNIT_EXPECT_EQ(test, p->port, test_data[i].port);
 834		KUNIT_EXPECT_EQ(test, (enum tb_port_type)p->config.type,
 835				test_data[i].type);
 836		i--;
 837	}
 838
 839	KUNIT_EXPECT_EQ(test, i, -1);
 840}
 841
 842static void tb_test_path_not_connected(struct kunit *test)
 843{
 844	struct tb_switch *host, *dev1, *dev2;
 845	struct tb_port *down, *up;
 846	struct tb_path *path;
 847
 848	host = alloc_host(test);
 849	dev1 = alloc_dev_default(test, host, 0x3, false);
 850	/* Not connected to anything */
 851	dev2 = alloc_dev_default(test, NULL, 0x303, false);
 852
 853	down = &dev1->ports[10];
 854	up = &dev2->ports[9];
 855
 856	path = tb_path_alloc(NULL, down, 8, up, 8, 0, "PCIe Down");
 857	KUNIT_ASSERT_NULL(test, path);
 858	path = tb_path_alloc(NULL, down, 8, up, 8, 1, "PCIe Down");
 859	KUNIT_ASSERT_NULL(test, path);
 860}
 861
 862struct hop_expectation {
 863	u64 route;
 864	u8 in_port;
 865	enum tb_port_type in_type;
 866	u8 out_port;
 867	enum tb_port_type out_type;
 868};
 869
 870static void tb_test_path_not_bonded_lane0(struct kunit *test)
 871{
 872	/*
 873	 * PCIe path from host to device using lane 0.
 874	 *
 875	 *   [Host]
 876	 *   3 |: 4
 877	 *   1 |: 2
 878	 *  [Device]
 879	 */
 880	static const struct hop_expectation test_data[] = {
 881		{
 882			.route = 0x0,
 883			.in_port = 9,
 884			.in_type = TB_TYPE_PCIE_DOWN,
 885			.out_port = 3,
 886			.out_type = TB_TYPE_PORT,
 887		},
 888		{
 889			.route = 0x3,
 890			.in_port = 1,
 891			.in_type = TB_TYPE_PORT,
 892			.out_port = 9,
 893			.out_type = TB_TYPE_PCIE_UP,
 894		},
 895	};
 896	struct tb_switch *host, *dev;
 897	struct tb_port *down, *up;
 898	struct tb_path *path;
 899	int i;
 900
 901	host = alloc_host(test);
 902	dev = alloc_dev_default(test, host, 0x3, false);
 903
 904	down = &host->ports[9];
 905	up = &dev->ports[9];
 906
 907	path = tb_path_alloc(NULL, down, 8, up, 8, 0, "PCIe Down");
 908	KUNIT_ASSERT_NOT_NULL(test, path);
 909	KUNIT_ASSERT_EQ(test, path->path_length, ARRAY_SIZE(test_data));
 910	for (i = 0; i < ARRAY_SIZE(test_data); i++) {
 911		const struct tb_port *in_port, *out_port;
 912
 913		in_port = path->hops[i].in_port;
 914		out_port = path->hops[i].out_port;
 915
 916		KUNIT_EXPECT_EQ(test, tb_route(in_port->sw), test_data[i].route);
 917		KUNIT_EXPECT_EQ(test, in_port->port, test_data[i].in_port);
 918		KUNIT_EXPECT_EQ(test, (enum tb_port_type)in_port->config.type,
 919				test_data[i].in_type);
 920		KUNIT_EXPECT_EQ(test, tb_route(out_port->sw), test_data[i].route);
 921		KUNIT_EXPECT_EQ(test, out_port->port, test_data[i].out_port);
 922		KUNIT_EXPECT_EQ(test, (enum tb_port_type)out_port->config.type,
 923				test_data[i].out_type);
 924	}
 925	tb_path_free(path);
 926}
 927
 928static void tb_test_path_not_bonded_lane1(struct kunit *test)
 929{
 930	/*
 931	 * DP Video path from host to device using lane 1. Paths like
 932	 * these are only used with Thunderbolt 1 devices where lane
 933	 * bonding is not possible. USB4 specifically does not allow
 934	 * paths like this (you either use lane 0 where lane 1 is
 935	 * disabled or both lanes are bonded).
 936	 *
 937	 *   [Host]
 938	 *   1 :| 2
 939	 *   1 :| 2
 940	 *  [Device]
 941	 */
 942	static const struct hop_expectation test_data[] = {
 943		{
 944			.route = 0x0,
 945			.in_port = 5,
 946			.in_type = TB_TYPE_DP_HDMI_IN,
 947			.out_port = 2,
 948			.out_type = TB_TYPE_PORT,
 949		},
 950		{
 951			.route = 0x1,
 952			.in_port = 2,
 953			.in_type = TB_TYPE_PORT,
 954			.out_port = 13,
 955			.out_type = TB_TYPE_DP_HDMI_OUT,
 956		},
 957	};
 958	struct tb_switch *host, *dev;
 959	struct tb_port *in, *out;
 960	struct tb_path *path;
 961	int i;
 962
 963	host = alloc_host(test);
 964	dev = alloc_dev_default(test, host, 0x1, false);
 965
 966	in = &host->ports[5];
 967	out = &dev->ports[13];
 968
 969	path = tb_path_alloc(NULL, in, 9, out, 9, 1, "Video");
 970	KUNIT_ASSERT_NOT_NULL(test, path);
 971	KUNIT_ASSERT_EQ(test, path->path_length, ARRAY_SIZE(test_data));
 972	for (i = 0; i < ARRAY_SIZE(test_data); i++) {
 973		const struct tb_port *in_port, *out_port;
 974
 975		in_port = path->hops[i].in_port;
 976		out_port = path->hops[i].out_port;
 977
 978		KUNIT_EXPECT_EQ(test, tb_route(in_port->sw), test_data[i].route);
 979		KUNIT_EXPECT_EQ(test, in_port->port, test_data[i].in_port);
 980		KUNIT_EXPECT_EQ(test, (enum tb_port_type)in_port->config.type,
 981				test_data[i].in_type);
 982		KUNIT_EXPECT_EQ(test, tb_route(out_port->sw), test_data[i].route);
 983		KUNIT_EXPECT_EQ(test, out_port->port, test_data[i].out_port);
 984		KUNIT_EXPECT_EQ(test, (enum tb_port_type)out_port->config.type,
 985				test_data[i].out_type);
 986	}
 987	tb_path_free(path);
 988}
 989
 990static void tb_test_path_not_bonded_lane1_chain(struct kunit *test)
 991{
 992	/*
 993	 * DP Video path from host to device 3 using lane 1.
 994	 *
 995	 *    [Host]
 996	 *    1 :| 2
 997	 *    1 :| 2
 998	 *  [Device #1]
 999	 *    7 :| 8
1000	 *    1 :| 2
1001	 *  [Device #2]
1002	 *    5 :| 6
1003	 *    1 :| 2
1004	 *  [Device #3]
1005	 */
1006	static const struct hop_expectation test_data[] = {
1007		{
1008			.route = 0x0,
1009			.in_port = 5,
1010			.in_type = TB_TYPE_DP_HDMI_IN,
1011			.out_port = 2,
1012			.out_type = TB_TYPE_PORT,
1013		},
1014		{
1015			.route = 0x1,
1016			.in_port = 2,
1017			.in_type = TB_TYPE_PORT,
1018			.out_port = 8,
1019			.out_type = TB_TYPE_PORT,
1020		},
1021		{
1022			.route = 0x701,
1023			.in_port = 2,
1024			.in_type = TB_TYPE_PORT,
1025			.out_port = 6,
1026			.out_type = TB_TYPE_PORT,
1027		},
1028		{
1029			.route = 0x50701,
1030			.in_port = 2,
1031			.in_type = TB_TYPE_PORT,
1032			.out_port = 13,
1033			.out_type = TB_TYPE_DP_HDMI_OUT,
1034		},
1035	};
1036	struct tb_switch *host, *dev1, *dev2, *dev3;
1037	struct tb_port *in, *out;
1038	struct tb_path *path;
1039	int i;
1040
1041	host = alloc_host(test);
1042	dev1 = alloc_dev_default(test, host, 0x1, false);
1043	dev2 = alloc_dev_default(test, dev1, 0x701, false);
1044	dev3 = alloc_dev_default(test, dev2, 0x50701, false);
1045
1046	in = &host->ports[5];
1047	out = &dev3->ports[13];
1048
1049	path = tb_path_alloc(NULL, in, 9, out, 9, 1, "Video");
1050	KUNIT_ASSERT_NOT_NULL(test, path);
1051	KUNIT_ASSERT_EQ(test, path->path_length, ARRAY_SIZE(test_data));
1052	for (i = 0; i < ARRAY_SIZE(test_data); i++) {
1053		const struct tb_port *in_port, *out_port;
1054
1055		in_port = path->hops[i].in_port;
1056		out_port = path->hops[i].out_port;
1057
1058		KUNIT_EXPECT_EQ(test, tb_route(in_port->sw), test_data[i].route);
1059		KUNIT_EXPECT_EQ(test, in_port->port, test_data[i].in_port);
1060		KUNIT_EXPECT_EQ(test, (enum tb_port_type)in_port->config.type,
1061				test_data[i].in_type);
1062		KUNIT_EXPECT_EQ(test, tb_route(out_port->sw), test_data[i].route);
1063		KUNIT_EXPECT_EQ(test, out_port->port, test_data[i].out_port);
1064		KUNIT_EXPECT_EQ(test, (enum tb_port_type)out_port->config.type,
1065				test_data[i].out_type);
1066	}
1067	tb_path_free(path);
1068}
1069
1070static void tb_test_path_not_bonded_lane1_chain_reverse(struct kunit *test)
1071{
1072	/*
1073	 * DP Video path from device 3 to host using lane 1.
1074	 *
1075	 *    [Host]
1076	 *    1 :| 2
1077	 *    1 :| 2
1078	 *  [Device #1]
1079	 *    7 :| 8
1080	 *    1 :| 2
1081	 *  [Device #2]
1082	 *    5 :| 6
1083	 *    1 :| 2
1084	 *  [Device #3]
1085	 */
1086	static const struct hop_expectation test_data[] = {
1087		{
1088			.route = 0x50701,
1089			.in_port = 13,
1090			.in_type = TB_TYPE_DP_HDMI_IN,
1091			.out_port = 2,
1092			.out_type = TB_TYPE_PORT,
1093		},
1094		{
1095			.route = 0x701,
1096			.in_port = 6,
1097			.in_type = TB_TYPE_PORT,
1098			.out_port = 2,
1099			.out_type = TB_TYPE_PORT,
1100		},
1101		{
1102			.route = 0x1,
1103			.in_port = 8,
1104			.in_type = TB_TYPE_PORT,
1105			.out_port = 2,
1106			.out_type = TB_TYPE_PORT,
1107		},
1108		{
1109			.route = 0x0,
1110			.in_port = 2,
1111			.in_type = TB_TYPE_PORT,
1112			.out_port = 5,
1113			.out_type = TB_TYPE_DP_HDMI_IN,
1114		},
1115	};
1116	struct tb_switch *host, *dev1, *dev2, *dev3;
1117	struct tb_port *in, *out;
1118	struct tb_path *path;
1119	int i;
1120
1121	host = alloc_host(test);
1122	dev1 = alloc_dev_default(test, host, 0x1, false);
1123	dev2 = alloc_dev_default(test, dev1, 0x701, false);
1124	dev3 = alloc_dev_with_dpin(test, dev2, 0x50701, false);
1125
1126	in = &dev3->ports[13];
1127	out = &host->ports[5];
1128
1129	path = tb_path_alloc(NULL, in, 9, out, 9, 1, "Video");
1130	KUNIT_ASSERT_NOT_NULL(test, path);
1131	KUNIT_ASSERT_EQ(test, path->path_length, ARRAY_SIZE(test_data));
1132	for (i = 0; i < ARRAY_SIZE(test_data); i++) {
1133		const struct tb_port *in_port, *out_port;
1134
1135		in_port = path->hops[i].in_port;
1136		out_port = path->hops[i].out_port;
1137
1138		KUNIT_EXPECT_EQ(test, tb_route(in_port->sw), test_data[i].route);
1139		KUNIT_EXPECT_EQ(test, in_port->port, test_data[i].in_port);
1140		KUNIT_EXPECT_EQ(test, (enum tb_port_type)in_port->config.type,
1141				test_data[i].in_type);
1142		KUNIT_EXPECT_EQ(test, tb_route(out_port->sw), test_data[i].route);
1143		KUNIT_EXPECT_EQ(test, out_port->port, test_data[i].out_port);
1144		KUNIT_EXPECT_EQ(test, (enum tb_port_type)out_port->config.type,
1145				test_data[i].out_type);
1146	}
1147	tb_path_free(path);
1148}
1149
1150static void tb_test_path_mixed_chain(struct kunit *test)
1151{
1152	/*
1153	 * DP Video path from host to device 4 where first and last link
1154	 * is bonded.
1155	 *
1156	 *    [Host]
1157	 *    1 |
1158	 *    1 |
1159	 *  [Device #1]
1160	 *    7 :| 8
1161	 *    1 :| 2
1162	 *  [Device #2]
1163	 *    5 :| 6
1164	 *    1 :| 2
1165	 *  [Device #3]
1166	 *    3 |
1167	 *    1 |
1168	 *  [Device #4]
1169	 */
1170	static const struct hop_expectation test_data[] = {
1171		{
1172			.route = 0x0,
1173			.in_port = 5,
1174			.in_type = TB_TYPE_DP_HDMI_IN,
1175			.out_port = 1,
1176			.out_type = TB_TYPE_PORT,
1177		},
1178		{
1179			.route = 0x1,
1180			.in_port = 1,
1181			.in_type = TB_TYPE_PORT,
1182			.out_port = 8,
1183			.out_type = TB_TYPE_PORT,
1184		},
1185		{
1186			.route = 0x701,
1187			.in_port = 2,
1188			.in_type = TB_TYPE_PORT,
1189			.out_port = 6,
1190			.out_type = TB_TYPE_PORT,
1191		},
1192		{
1193			.route = 0x50701,
1194			.in_port = 2,
1195			.in_type = TB_TYPE_PORT,
1196			.out_port = 3,
1197			.out_type = TB_TYPE_PORT,
1198		},
1199		{
1200			.route = 0x3050701,
1201			.in_port = 1,
1202			.in_type = TB_TYPE_PORT,
1203			.out_port = 13,
1204			.out_type = TB_TYPE_DP_HDMI_OUT,
1205		},
1206	};
1207	struct tb_switch *host, *dev1, *dev2, *dev3, *dev4;
1208	struct tb_port *in, *out;
1209	struct tb_path *path;
1210	int i;
1211
1212	host = alloc_host(test);
1213	dev1 = alloc_dev_default(test, host, 0x1, true);
1214	dev2 = alloc_dev_default(test, dev1, 0x701, false);
1215	dev3 = alloc_dev_default(test, dev2, 0x50701, false);
1216	dev4 = alloc_dev_default(test, dev3, 0x3050701, true);
1217
1218	in = &host->ports[5];
1219	out = &dev4->ports[13];
1220
1221	path = tb_path_alloc(NULL, in, 9, out, 9, 1, "Video");
1222	KUNIT_ASSERT_NOT_NULL(test, path);
1223	KUNIT_ASSERT_EQ(test, path->path_length, ARRAY_SIZE(test_data));
1224	for (i = 0; i < ARRAY_SIZE(test_data); i++) {
1225		const struct tb_port *in_port, *out_port;
1226
1227		in_port = path->hops[i].in_port;
1228		out_port = path->hops[i].out_port;
1229
1230		KUNIT_EXPECT_EQ(test, tb_route(in_port->sw), test_data[i].route);
1231		KUNIT_EXPECT_EQ(test, in_port->port, test_data[i].in_port);
1232		KUNIT_EXPECT_EQ(test, (enum tb_port_type)in_port->config.type,
1233				test_data[i].in_type);
1234		KUNIT_EXPECT_EQ(test, tb_route(out_port->sw), test_data[i].route);
1235		KUNIT_EXPECT_EQ(test, out_port->port, test_data[i].out_port);
1236		KUNIT_EXPECT_EQ(test, (enum tb_port_type)out_port->config.type,
1237				test_data[i].out_type);
1238	}
1239	tb_path_free(path);
1240}
1241
1242static void tb_test_path_mixed_chain_reverse(struct kunit *test)
1243{
1244	/*
1245	 * DP Video path from device 4 to host where first and last link
1246	 * is bonded.
1247	 *
1248	 *    [Host]
1249	 *    1 |
1250	 *    1 |
1251	 *  [Device #1]
1252	 *    7 :| 8
1253	 *    1 :| 2
1254	 *  [Device #2]
1255	 *    5 :| 6
1256	 *    1 :| 2
1257	 *  [Device #3]
1258	 *    3 |
1259	 *    1 |
1260	 *  [Device #4]
1261	 */
1262	static const struct hop_expectation test_data[] = {
1263		{
1264			.route = 0x3050701,
1265			.in_port = 13,
1266			.in_type = TB_TYPE_DP_HDMI_OUT,
1267			.out_port = 1,
1268			.out_type = TB_TYPE_PORT,
1269		},
1270		{
1271			.route = 0x50701,
1272			.in_port = 3,
1273			.in_type = TB_TYPE_PORT,
1274			.out_port = 2,
1275			.out_type = TB_TYPE_PORT,
1276		},
1277		{
1278			.route = 0x701,
1279			.in_port = 6,
1280			.in_type = TB_TYPE_PORT,
1281			.out_port = 2,
1282			.out_type = TB_TYPE_PORT,
1283		},
1284		{
1285			.route = 0x1,
1286			.in_port = 8,
1287			.in_type = TB_TYPE_PORT,
1288			.out_port = 1,
1289			.out_type = TB_TYPE_PORT,
1290		},
1291		{
1292			.route = 0x0,
1293			.in_port = 1,
1294			.in_type = TB_TYPE_PORT,
1295			.out_port = 5,
1296			.out_type = TB_TYPE_DP_HDMI_IN,
1297		},
1298	};
1299	struct tb_switch *host, *dev1, *dev2, *dev3, *dev4;
1300	struct tb_port *in, *out;
1301	struct tb_path *path;
1302	int i;
1303
1304	host = alloc_host(test);
1305	dev1 = alloc_dev_default(test, host, 0x1, true);
1306	dev2 = alloc_dev_default(test, dev1, 0x701, false);
1307	dev3 = alloc_dev_default(test, dev2, 0x50701, false);
1308	dev4 = alloc_dev_default(test, dev3, 0x3050701, true);
1309
1310	in = &dev4->ports[13];
1311	out = &host->ports[5];
1312
1313	path = tb_path_alloc(NULL, in, 9, out, 9, 1, "Video");
1314	KUNIT_ASSERT_NOT_NULL(test, path);
1315	KUNIT_ASSERT_EQ(test, path->path_length, ARRAY_SIZE(test_data));
1316	for (i = 0; i < ARRAY_SIZE(test_data); i++) {
1317		const struct tb_port *in_port, *out_port;
1318
1319		in_port = path->hops[i].in_port;
1320		out_port = path->hops[i].out_port;
1321
1322		KUNIT_EXPECT_EQ(test, tb_route(in_port->sw), test_data[i].route);
1323		KUNIT_EXPECT_EQ(test, in_port->port, test_data[i].in_port);
1324		KUNIT_EXPECT_EQ(test, (enum tb_port_type)in_port->config.type,
1325				test_data[i].in_type);
1326		KUNIT_EXPECT_EQ(test, tb_route(out_port->sw), test_data[i].route);
1327		KUNIT_EXPECT_EQ(test, out_port->port, test_data[i].out_port);
1328		KUNIT_EXPECT_EQ(test, (enum tb_port_type)out_port->config.type,
1329				test_data[i].out_type);
1330	}
1331	tb_path_free(path);
1332}
1333
1334static void tb_test_tunnel_pcie(struct kunit *test)
1335{
1336	struct tb_switch *host, *dev1, *dev2;
1337	struct tb_tunnel *tunnel1, *tunnel2;
1338	struct tb_port *down, *up;
1339
1340	/*
1341	 * Create PCIe tunnel between host and two devices.
1342	 *
1343	 *   [Host]
1344	 *    1 |
1345	 *    1 |
1346	 *  [Device #1]
1347	 *    5 |
1348	 *    1 |
1349	 *  [Device #2]
1350	 */
1351	host = alloc_host(test);
1352	dev1 = alloc_dev_default(test, host, 0x1, true);
1353	dev2 = alloc_dev_default(test, dev1, 0x501, true);
1354
1355	down = &host->ports[8];
1356	up = &dev1->ports[9];
1357	tunnel1 = tb_tunnel_alloc_pci(NULL, up, down);
1358	KUNIT_ASSERT_NOT_NULL(test, tunnel1);
1359	KUNIT_EXPECT_EQ(test, tunnel1->type, TB_TUNNEL_PCI);
1360	KUNIT_EXPECT_PTR_EQ(test, tunnel1->src_port, down);
1361	KUNIT_EXPECT_PTR_EQ(test, tunnel1->dst_port, up);
1362	KUNIT_ASSERT_EQ(test, tunnel1->npaths, 2);
1363	KUNIT_ASSERT_EQ(test, tunnel1->paths[0]->path_length, 2);
1364	KUNIT_EXPECT_PTR_EQ(test, tunnel1->paths[0]->hops[0].in_port, down);
1365	KUNIT_EXPECT_PTR_EQ(test, tunnel1->paths[0]->hops[1].out_port, up);
1366	KUNIT_ASSERT_EQ(test, tunnel1->paths[1]->path_length, 2);
1367	KUNIT_EXPECT_PTR_EQ(test, tunnel1->paths[1]->hops[0].in_port, up);
1368	KUNIT_EXPECT_PTR_EQ(test, tunnel1->paths[1]->hops[1].out_port, down);
1369
1370	down = &dev1->ports[10];
1371	up = &dev2->ports[9];
1372	tunnel2 = tb_tunnel_alloc_pci(NULL, up, down);
1373	KUNIT_ASSERT_NOT_NULL(test, tunnel2);
1374	KUNIT_EXPECT_EQ(test, tunnel2->type, TB_TUNNEL_PCI);
1375	KUNIT_EXPECT_PTR_EQ(test, tunnel2->src_port, down);
1376	KUNIT_EXPECT_PTR_EQ(test, tunnel2->dst_port, up);
1377	KUNIT_ASSERT_EQ(test, tunnel2->npaths, 2);
1378	KUNIT_ASSERT_EQ(test, tunnel2->paths[0]->path_length, 2);
1379	KUNIT_EXPECT_PTR_EQ(test, tunnel2->paths[0]->hops[0].in_port, down);
1380	KUNIT_EXPECT_PTR_EQ(test, tunnel2->paths[0]->hops[1].out_port, up);
1381	KUNIT_ASSERT_EQ(test, tunnel2->paths[1]->path_length, 2);
1382	KUNIT_EXPECT_PTR_EQ(test, tunnel2->paths[1]->hops[0].in_port, up);
1383	KUNIT_EXPECT_PTR_EQ(test, tunnel2->paths[1]->hops[1].out_port, down);
1384
1385	tb_tunnel_free(tunnel2);
1386	tb_tunnel_free(tunnel1);
1387}
1388
1389static void tb_test_tunnel_dp(struct kunit *test)
1390{
1391	struct tb_switch *host, *dev;
1392	struct tb_port *in, *out;
1393	struct tb_tunnel *tunnel;
1394
1395	/*
1396	 * Create DP tunnel between Host and Device
1397	 *
1398	 *   [Host]
1399	 *   1 |
1400	 *   1 |
1401	 *  [Device]
1402	 */
1403	host = alloc_host(test);
1404	dev = alloc_dev_default(test, host, 0x3, true);
1405
1406	in = &host->ports[5];
1407	out = &dev->ports[13];
1408
1409	tunnel = tb_tunnel_alloc_dp(NULL, in, out, 1, 0, 0);
1410	KUNIT_ASSERT_NOT_NULL(test, tunnel);
1411	KUNIT_EXPECT_EQ(test, tunnel->type, TB_TUNNEL_DP);
1412	KUNIT_EXPECT_PTR_EQ(test, tunnel->src_port, in);
1413	KUNIT_EXPECT_PTR_EQ(test, tunnel->dst_port, out);
1414	KUNIT_ASSERT_EQ(test, tunnel->npaths, 3);
1415	KUNIT_ASSERT_EQ(test, tunnel->paths[0]->path_length, 2);
1416	KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[0]->hops[0].in_port, in);
1417	KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[0]->hops[1].out_port, out);
1418	KUNIT_ASSERT_EQ(test, tunnel->paths[1]->path_length, 2);
1419	KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[1]->hops[0].in_port, in);
1420	KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[1]->hops[1].out_port, out);
1421	KUNIT_ASSERT_EQ(test, tunnel->paths[2]->path_length, 2);
1422	KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[2]->hops[0].in_port, out);
1423	KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[2]->hops[1].out_port, in);
1424	tb_tunnel_free(tunnel);
1425}
1426
1427static void tb_test_tunnel_dp_chain(struct kunit *test)
1428{
1429	struct tb_switch *host, *dev1, *dev4;
1430	struct tb_port *in, *out;
1431	struct tb_tunnel *tunnel;
1432
1433	/*
1434	 * Create DP tunnel from Host DP IN to Device #4 DP OUT.
1435	 *
1436	 *           [Host]
1437	 *            1 |
1438	 *            1 |
1439	 *         [Device #1]
1440	 *       3 /   | 5  \ 7
1441	 *      1 /    |     \ 1
1442	 * [Device #2] |    [Device #4]
1443	 *             | 1
1444	 *         [Device #3]
1445	 */
1446	host = alloc_host(test);
1447	dev1 = alloc_dev_default(test, host, 0x1, true);
1448	alloc_dev_default(test, dev1, 0x301, true);
1449	alloc_dev_default(test, dev1, 0x501, true);
1450	dev4 = alloc_dev_default(test, dev1, 0x701, true);
1451
1452	in = &host->ports[5];
1453	out = &dev4->ports[14];
1454
1455	tunnel = tb_tunnel_alloc_dp(NULL, in, out, 1, 0, 0);
1456	KUNIT_ASSERT_NOT_NULL(test, tunnel);
1457	KUNIT_EXPECT_EQ(test, tunnel->type, TB_TUNNEL_DP);
1458	KUNIT_EXPECT_PTR_EQ(test, tunnel->src_port, in);
1459	KUNIT_EXPECT_PTR_EQ(test, tunnel->dst_port, out);
1460	KUNIT_ASSERT_EQ(test, tunnel->npaths, 3);
1461	KUNIT_ASSERT_EQ(test, tunnel->paths[0]->path_length, 3);
1462	KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[0]->hops[0].in_port, in);
1463	KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[0]->hops[2].out_port, out);
1464	KUNIT_ASSERT_EQ(test, tunnel->paths[1]->path_length, 3);
1465	KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[1]->hops[0].in_port, in);
1466	KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[1]->hops[2].out_port, out);
1467	KUNIT_ASSERT_EQ(test, tunnel->paths[2]->path_length, 3);
1468	KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[2]->hops[0].in_port, out);
1469	KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[2]->hops[2].out_port, in);
1470	tb_tunnel_free(tunnel);
1471}
1472
1473static void tb_test_tunnel_dp_tree(struct kunit *test)
1474{
1475	struct tb_switch *host, *dev1, *dev2, *dev3, *dev5;
1476	struct tb_port *in, *out;
1477	struct tb_tunnel *tunnel;
1478
1479	/*
1480	 * Create DP tunnel from Device #2 DP IN to Device #5 DP OUT.
1481	 *
1482	 *          [Host]
1483	 *           3 |
1484	 *           1 |
1485	 *         [Device #1]
1486	 *       3 /   | 5  \ 7
1487	 *      1 /    |     \ 1
1488	 * [Device #2] |    [Device #4]
1489	 *             | 1
1490	 *         [Device #3]
1491	 *             | 5
1492	 *             | 1
1493	 *         [Device #5]
1494	 */
1495	host = alloc_host(test);
1496	dev1 = alloc_dev_default(test, host, 0x3, true);
1497	dev2 = alloc_dev_with_dpin(test, dev1, 0x303, true);
1498	dev3 = alloc_dev_default(test, dev1, 0x503, true);
1499	alloc_dev_default(test, dev1, 0x703, true);
1500	dev5 = alloc_dev_default(test, dev3, 0x50503, true);
1501
1502	in = &dev2->ports[13];
1503	out = &dev5->ports[13];
1504
1505	tunnel = tb_tunnel_alloc_dp(NULL, in, out, 1, 0, 0);
1506	KUNIT_ASSERT_NOT_NULL(test, tunnel);
1507	KUNIT_EXPECT_EQ(test, tunnel->type, TB_TUNNEL_DP);
1508	KUNIT_EXPECT_PTR_EQ(test, tunnel->src_port, in);
1509	KUNIT_EXPECT_PTR_EQ(test, tunnel->dst_port, out);
1510	KUNIT_ASSERT_EQ(test, tunnel->npaths, 3);
1511	KUNIT_ASSERT_EQ(test, tunnel->paths[0]->path_length, 4);
1512	KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[0]->hops[0].in_port, in);
1513	KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[0]->hops[3].out_port, out);
1514	KUNIT_ASSERT_EQ(test, tunnel->paths[1]->path_length, 4);
1515	KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[1]->hops[0].in_port, in);
1516	KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[1]->hops[3].out_port, out);
1517	KUNIT_ASSERT_EQ(test, tunnel->paths[2]->path_length, 4);
1518	KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[2]->hops[0].in_port, out);
1519	KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[2]->hops[3].out_port, in);
1520	tb_tunnel_free(tunnel);
1521}
1522
1523static void tb_test_tunnel_dp_max_length(struct kunit *test)
1524{
1525	struct tb_switch *host, *dev1, *dev2, *dev3, *dev4, *dev5, *dev6;
1526	struct tb_switch *dev7, *dev8, *dev9, *dev10, *dev11, *dev12;
1527	struct tb_port *in, *out;
1528	struct tb_tunnel *tunnel;
1529
1530	/*
1531	 * Creates DP tunnel from Device #6 to Device #12.
1532	 *
1533	 *          [Host]
1534	 *         1 /  \ 3
1535	 *        1 /    \ 1
1536	 * [Device #1]   [Device #7]
1537	 *     3 |           | 3
1538	 *     1 |           | 1
1539	 * [Device #2]   [Device #8]
1540	 *     3 |           | 3
1541	 *     1 |           | 1
1542	 * [Device #3]   [Device #9]
1543	 *     3 |           | 3
1544	 *     1 |           | 1
1545	 * [Device #4]   [Device #10]
1546	 *     3 |           | 3
1547	 *     1 |           | 1
1548	 * [Device #5]   [Device #11]
1549	 *     3 |           | 3
1550	 *     1 |           | 1
1551	 * [Device #6]   [Device #12]
1552	 */
1553	host = alloc_host(test);
1554	dev1 = alloc_dev_default(test, host, 0x1, true);
1555	dev2 = alloc_dev_default(test, dev1, 0x301, true);
1556	dev3 = alloc_dev_default(test, dev2, 0x30301, true);
1557	dev4 = alloc_dev_default(test, dev3, 0x3030301, true);
1558	dev5 = alloc_dev_default(test, dev4, 0x303030301, true);
1559	dev6 = alloc_dev_with_dpin(test, dev5, 0x30303030301, true);
1560	dev7 = alloc_dev_default(test, host, 0x3, true);
1561	dev8 = alloc_dev_default(test, dev7, 0x303, true);
1562	dev9 = alloc_dev_default(test, dev8, 0x30303, true);
1563	dev10 = alloc_dev_default(test, dev9, 0x3030303, true);
1564	dev11 = alloc_dev_default(test, dev10, 0x303030303, true);
1565	dev12 = alloc_dev_default(test, dev11, 0x30303030303, true);
1566
1567	in = &dev6->ports[13];
1568	out = &dev12->ports[13];
1569
1570	tunnel = tb_tunnel_alloc_dp(NULL, in, out, 1, 0, 0);
1571	KUNIT_ASSERT_NOT_NULL(test, tunnel);
1572	KUNIT_EXPECT_EQ(test, tunnel->type, TB_TUNNEL_DP);
1573	KUNIT_EXPECT_PTR_EQ(test, tunnel->src_port, in);
1574	KUNIT_EXPECT_PTR_EQ(test, tunnel->dst_port, out);
1575	KUNIT_ASSERT_EQ(test, tunnel->npaths, 3);
1576	KUNIT_ASSERT_EQ(test, tunnel->paths[0]->path_length, 13);
1577	/* First hop */
1578	KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[0]->hops[0].in_port, in);
1579	/* Middle */
1580	KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[0]->hops[6].in_port,
1581			    &host->ports[1]);
1582	KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[0]->hops[6].out_port,
1583			    &host->ports[3]);
1584	/* Last */
1585	KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[0]->hops[12].out_port, out);
1586	KUNIT_ASSERT_EQ(test, tunnel->paths[1]->path_length, 13);
1587	KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[1]->hops[0].in_port, in);
1588	KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[1]->hops[6].in_port,
1589			    &host->ports[1]);
1590	KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[1]->hops[6].out_port,
1591			    &host->ports[3]);
1592	KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[1]->hops[12].out_port, out);
1593	KUNIT_ASSERT_EQ(test, tunnel->paths[2]->path_length, 13);
1594	KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[2]->hops[0].in_port, out);
1595	KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[2]->hops[6].in_port,
1596			    &host->ports[3]);
1597	KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[2]->hops[6].out_port,
1598			    &host->ports[1]);
1599	KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[2]->hops[12].out_port, in);
1600	tb_tunnel_free(tunnel);
1601}
1602
1603static void tb_test_tunnel_3dp(struct kunit *test)
1604{
1605	struct tb_switch *host, *dev1, *dev2, *dev3, *dev4, *dev5;
1606	struct tb_port *in1, *in2, *in3, *out1, *out2, *out3;
1607	struct tb_tunnel *tunnel1, *tunnel2, *tunnel3;
1608
1609	/*
1610	 * Create 3 DP tunnels from Host to Devices #2, #5 and #4.
1611	 *
1612	 *          [Host]
1613	 *           3 |
1614	 *           1 |
1615	 *         [Device #1]
1616	 *       3 /   | 5  \ 7
1617	 *      1 /    |     \ 1
1618	 * [Device #2] |    [Device #4]
1619	 *             | 1
1620	 *         [Device #3]
1621	 *             | 5
1622	 *             | 1
1623	 *         [Device #5]
1624	 */
1625	host = alloc_host_br(test);
1626	dev1 = alloc_dev_default(test, host, 0x3, true);
1627	dev2 = alloc_dev_default(test, dev1, 0x303, true);
1628	dev3 = alloc_dev_default(test, dev1, 0x503, true);
1629	dev4 = alloc_dev_default(test, dev1, 0x703, true);
1630	dev5 = alloc_dev_default(test, dev3, 0x50503, true);
1631
1632	in1 = &host->ports[5];
1633	in2 = &host->ports[6];
1634	in3 = &host->ports[10];
1635
1636	out1 = &dev2->ports[13];
1637	out2 = &dev5->ports[13];
1638	out3 = &dev4->ports[14];
1639
1640	tunnel1 = tb_tunnel_alloc_dp(NULL, in1, out1, 1, 0, 0);
1641	KUNIT_ASSERT_TRUE(test, tunnel1 != NULL);
1642	KUNIT_EXPECT_EQ(test, tunnel1->type, TB_TUNNEL_DP);
1643	KUNIT_EXPECT_PTR_EQ(test, tunnel1->src_port, in1);
1644	KUNIT_EXPECT_PTR_EQ(test, tunnel1->dst_port, out1);
1645	KUNIT_ASSERT_EQ(test, tunnel1->npaths, 3);
1646	KUNIT_ASSERT_EQ(test, tunnel1->paths[0]->path_length, 3);
1647
1648	tunnel2 = tb_tunnel_alloc_dp(NULL, in2, out2, 1, 0, 0);
1649	KUNIT_ASSERT_TRUE(test, tunnel2 != NULL);
1650	KUNIT_EXPECT_EQ(test, tunnel2->type, TB_TUNNEL_DP);
1651	KUNIT_EXPECT_PTR_EQ(test, tunnel2->src_port, in2);
1652	KUNIT_EXPECT_PTR_EQ(test, tunnel2->dst_port, out2);
1653	KUNIT_ASSERT_EQ(test, tunnel2->npaths, 3);
1654	KUNIT_ASSERT_EQ(test, tunnel2->paths[0]->path_length, 4);
1655
1656	tunnel3 = tb_tunnel_alloc_dp(NULL, in3, out3, 1, 0, 0);
1657	KUNIT_ASSERT_TRUE(test, tunnel3 != NULL);
1658	KUNIT_EXPECT_EQ(test, tunnel3->type, TB_TUNNEL_DP);
1659	KUNIT_EXPECT_PTR_EQ(test, tunnel3->src_port, in3);
1660	KUNIT_EXPECT_PTR_EQ(test, tunnel3->dst_port, out3);
1661	KUNIT_ASSERT_EQ(test, tunnel3->npaths, 3);
1662	KUNIT_ASSERT_EQ(test, tunnel3->paths[0]->path_length, 3);
1663
1664	tb_tunnel_free(tunnel2);
1665	tb_tunnel_free(tunnel1);
1666}
1667
1668static void tb_test_tunnel_usb3(struct kunit *test)
1669{
1670	struct tb_switch *host, *dev1, *dev2;
1671	struct tb_tunnel *tunnel1, *tunnel2;
1672	struct tb_port *down, *up;
1673
1674	/*
1675	 * Create USB3 tunnel between host and two devices.
1676	 *
1677	 *   [Host]
1678	 *    1 |
1679	 *    1 |
1680	 *  [Device #1]
1681	 *          \ 7
1682	 *           \ 1
1683	 *         [Device #2]
1684	 */
1685	host = alloc_host(test);
1686	dev1 = alloc_dev_default(test, host, 0x1, true);
1687	dev2 = alloc_dev_default(test, dev1, 0x701, true);
1688
1689	down = &host->ports[12];
1690	up = &dev1->ports[16];
1691	tunnel1 = tb_tunnel_alloc_usb3(NULL, up, down, 0, 0);
1692	KUNIT_ASSERT_NOT_NULL(test, tunnel1);
1693	KUNIT_EXPECT_EQ(test, tunnel1->type, TB_TUNNEL_USB3);
1694	KUNIT_EXPECT_PTR_EQ(test, tunnel1->src_port, down);
1695	KUNIT_EXPECT_PTR_EQ(test, tunnel1->dst_port, up);
1696	KUNIT_ASSERT_EQ(test, tunnel1->npaths, 2);
1697	KUNIT_ASSERT_EQ(test, tunnel1->paths[0]->path_length, 2);
1698	KUNIT_EXPECT_PTR_EQ(test, tunnel1->paths[0]->hops[0].in_port, down);
1699	KUNIT_EXPECT_PTR_EQ(test, tunnel1->paths[0]->hops[1].out_port, up);
1700	KUNIT_ASSERT_EQ(test, tunnel1->paths[1]->path_length, 2);
1701	KUNIT_EXPECT_PTR_EQ(test, tunnel1->paths[1]->hops[0].in_port, up);
1702	KUNIT_EXPECT_PTR_EQ(test, tunnel1->paths[1]->hops[1].out_port, down);
1703
1704	down = &dev1->ports[17];
1705	up = &dev2->ports[16];
1706	tunnel2 = tb_tunnel_alloc_usb3(NULL, up, down, 0, 0);
1707	KUNIT_ASSERT_NOT_NULL(test, tunnel2);
1708	KUNIT_EXPECT_EQ(test, tunnel2->type, TB_TUNNEL_USB3);
1709	KUNIT_EXPECT_PTR_EQ(test, tunnel2->src_port, down);
1710	KUNIT_EXPECT_PTR_EQ(test, tunnel2->dst_port, up);
1711	KUNIT_ASSERT_EQ(test, tunnel2->npaths, 2);
1712	KUNIT_ASSERT_EQ(test, tunnel2->paths[0]->path_length, 2);
1713	KUNIT_EXPECT_PTR_EQ(test, tunnel2->paths[0]->hops[0].in_port, down);
1714	KUNIT_EXPECT_PTR_EQ(test, tunnel2->paths[0]->hops[1].out_port, up);
1715	KUNIT_ASSERT_EQ(test, tunnel2->paths[1]->path_length, 2);
1716	KUNIT_EXPECT_PTR_EQ(test, tunnel2->paths[1]->hops[0].in_port, up);
1717	KUNIT_EXPECT_PTR_EQ(test, tunnel2->paths[1]->hops[1].out_port, down);
1718
1719	tb_tunnel_free(tunnel2);
1720	tb_tunnel_free(tunnel1);
1721}
1722
1723static void tb_test_tunnel_port_on_path(struct kunit *test)
1724{
1725	struct tb_switch *host, *dev1, *dev2, *dev3, *dev4, *dev5;
1726	struct tb_port *in, *out, *port;
1727	struct tb_tunnel *dp_tunnel;
1728
1729	/*
1730	 *          [Host]
1731	 *           3 |
1732	 *           1 |
1733	 *         [Device #1]
1734	 *       3 /   | 5  \ 7
1735	 *      1 /    |     \ 1
1736	 * [Device #2] |    [Device #4]
1737	 *             | 1
1738	 *         [Device #3]
1739	 *             | 5
1740	 *             | 1
1741	 *         [Device #5]
1742	 */
1743	host = alloc_host(test);
1744	dev1 = alloc_dev_default(test, host, 0x3, true);
1745	dev2 = alloc_dev_with_dpin(test, dev1, 0x303, true);
1746	dev3 = alloc_dev_default(test, dev1, 0x503, true);
1747	dev4 = alloc_dev_default(test, dev1, 0x703, true);
1748	dev5 = alloc_dev_default(test, dev3, 0x50503, true);
1749
1750	in = &dev2->ports[13];
1751	out = &dev5->ports[13];
1752
1753	dp_tunnel = tb_tunnel_alloc_dp(NULL, in, out, 1, 0, 0);
1754	KUNIT_ASSERT_NOT_NULL(test, dp_tunnel);
1755
1756	KUNIT_EXPECT_TRUE(test, tb_tunnel_port_on_path(dp_tunnel, in));
1757	KUNIT_EXPECT_TRUE(test, tb_tunnel_port_on_path(dp_tunnel, out));
1758
1759	port = &host->ports[8];
1760	KUNIT_EXPECT_FALSE(test, tb_tunnel_port_on_path(dp_tunnel, port));
1761
1762	port = &host->ports[3];
1763	KUNIT_EXPECT_FALSE(test, tb_tunnel_port_on_path(dp_tunnel, port));
1764
1765	port = &dev1->ports[1];
1766	KUNIT_EXPECT_FALSE(test, tb_tunnel_port_on_path(dp_tunnel, port));
1767
1768	port = &dev1->ports[3];
1769	KUNIT_EXPECT_TRUE(test, tb_tunnel_port_on_path(dp_tunnel, port));
1770
1771	port = &dev1->ports[5];
1772	KUNIT_EXPECT_TRUE(test, tb_tunnel_port_on_path(dp_tunnel, port));
1773
1774	port = &dev1->ports[7];
1775	KUNIT_EXPECT_FALSE(test, tb_tunnel_port_on_path(dp_tunnel, port));
1776
1777	port = &dev3->ports[1];
1778	KUNIT_EXPECT_TRUE(test, tb_tunnel_port_on_path(dp_tunnel, port));
1779
1780	port = &dev5->ports[1];
1781	KUNIT_EXPECT_TRUE(test, tb_tunnel_port_on_path(dp_tunnel, port));
1782
1783	port = &dev4->ports[1];
1784	KUNIT_EXPECT_FALSE(test, tb_tunnel_port_on_path(dp_tunnel, port));
1785
1786	tb_tunnel_free(dp_tunnel);
1787}
1788
1789static void tb_test_tunnel_dma(struct kunit *test)
1790{
1791	struct tb_port *nhi, *port;
1792	struct tb_tunnel *tunnel;
1793	struct tb_switch *host;
1794
1795	/*
1796	 * Create DMA tunnel from NHI to port 1 and back.
1797	 *
1798	 *   [Host 1]
1799	 *    1 ^ In HopID 1 -> Out HopID 8
1800	 *      |
1801	 *      v In HopID 8 -> Out HopID 1
1802	 * ............ Domain border
1803	 *      |
1804	 *   [Host 2]
1805	 */
1806	host = alloc_host(test);
1807	nhi = &host->ports[7];
1808	port = &host->ports[1];
1809
1810	tunnel = tb_tunnel_alloc_dma(NULL, nhi, port, 8, 1, 8, 1);
1811	KUNIT_ASSERT_NOT_NULL(test, tunnel);
1812	KUNIT_EXPECT_EQ(test, tunnel->type, TB_TUNNEL_DMA);
1813	KUNIT_EXPECT_PTR_EQ(test, tunnel->src_port, nhi);
1814	KUNIT_EXPECT_PTR_EQ(test, tunnel->dst_port, port);
1815	KUNIT_ASSERT_EQ(test, tunnel->npaths, 2);
1816	/* RX path */
1817	KUNIT_ASSERT_EQ(test, tunnel->paths[0]->path_length, 1);
1818	KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[0]->hops[0].in_port, port);
1819	KUNIT_EXPECT_EQ(test, tunnel->paths[0]->hops[0].in_hop_index, 8);
1820	KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[0]->hops[0].out_port, nhi);
1821	KUNIT_EXPECT_EQ(test, tunnel->paths[0]->hops[0].next_hop_index, 1);
1822	/* TX path */
1823	KUNIT_ASSERT_EQ(test, tunnel->paths[1]->path_length, 1);
1824	KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[1]->hops[0].in_port, nhi);
1825	KUNIT_EXPECT_EQ(test, tunnel->paths[1]->hops[0].in_hop_index, 1);
1826	KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[1]->hops[0].out_port, port);
1827	KUNIT_EXPECT_EQ(test, tunnel->paths[1]->hops[0].next_hop_index, 8);
1828
1829	tb_tunnel_free(tunnel);
1830}
1831
1832static void tb_test_tunnel_dma_rx(struct kunit *test)
1833{
1834	struct tb_port *nhi, *port;
1835	struct tb_tunnel *tunnel;
1836	struct tb_switch *host;
1837
1838	/*
1839	 * Create DMA RX tunnel from port 1 to NHI.
1840	 *
1841	 *   [Host 1]
1842	 *    1 ^
1843	 *      |
1844	 *      | In HopID 15 -> Out HopID 2
1845	 * ............ Domain border
1846	 *      |
1847	 *   [Host 2]
1848	 */
1849	host = alloc_host(test);
1850	nhi = &host->ports[7];
1851	port = &host->ports[1];
1852
1853	tunnel = tb_tunnel_alloc_dma(NULL, nhi, port, -1, -1, 15, 2);
1854	KUNIT_ASSERT_NOT_NULL(test, tunnel);
1855	KUNIT_EXPECT_EQ(test, tunnel->type, TB_TUNNEL_DMA);
1856	KUNIT_EXPECT_PTR_EQ(test, tunnel->src_port, nhi);
1857	KUNIT_EXPECT_PTR_EQ(test, tunnel->dst_port, port);
1858	KUNIT_ASSERT_EQ(test, tunnel->npaths, 1);
1859	/* RX path */
1860	KUNIT_ASSERT_EQ(test, tunnel->paths[0]->path_length, 1);
1861	KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[0]->hops[0].in_port, port);
1862	KUNIT_EXPECT_EQ(test, tunnel->paths[0]->hops[0].in_hop_index, 15);
1863	KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[0]->hops[0].out_port, nhi);
1864	KUNIT_EXPECT_EQ(test, tunnel->paths[0]->hops[0].next_hop_index, 2);
1865
1866	tb_tunnel_free(tunnel);
1867}
1868
1869static void tb_test_tunnel_dma_tx(struct kunit *test)
1870{
1871	struct tb_port *nhi, *port;
1872	struct tb_tunnel *tunnel;
1873	struct tb_switch *host;
1874
1875	/*
1876	 * Create DMA TX tunnel from NHI to port 1.
1877	 *
1878	 *   [Host 1]
1879	 *    1 | In HopID 2 -> Out HopID 15
1880	 *      |
1881	 *      v
1882	 * ............ Domain border
1883	 *      |
1884	 *   [Host 2]
1885	 */
1886	host = alloc_host(test);
1887	nhi = &host->ports[7];
1888	port = &host->ports[1];
1889
1890	tunnel = tb_tunnel_alloc_dma(NULL, nhi, port, 15, 2, -1, -1);
1891	KUNIT_ASSERT_NOT_NULL(test, tunnel);
1892	KUNIT_EXPECT_EQ(test, tunnel->type, TB_TUNNEL_DMA);
1893	KUNIT_EXPECT_PTR_EQ(test, tunnel->src_port, nhi);
1894	KUNIT_EXPECT_PTR_EQ(test, tunnel->dst_port, port);
1895	KUNIT_ASSERT_EQ(test, tunnel->npaths, 1);
1896	/* TX path */
1897	KUNIT_ASSERT_EQ(test, tunnel->paths[0]->path_length, 1);
1898	KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[0]->hops[0].in_port, nhi);
1899	KUNIT_EXPECT_EQ(test, tunnel->paths[0]->hops[0].in_hop_index, 2);
1900	KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[0]->hops[0].out_port, port);
1901	KUNIT_EXPECT_EQ(test, tunnel->paths[0]->hops[0].next_hop_index, 15);
1902
1903	tb_tunnel_free(tunnel);
1904}
1905
1906static void tb_test_tunnel_dma_chain(struct kunit *test)
1907{
1908	struct tb_switch *host, *dev1, *dev2;
1909	struct tb_port *nhi, *port;
1910	struct tb_tunnel *tunnel;
1911
1912	/*
1913	 * Create DMA tunnel from NHI to Device #2 port 3 and back.
1914	 *
1915	 *   [Host 1]
1916	 *    1 ^ In HopID 1 -> Out HopID x
1917	 *      |
1918	 *    1 | In HopID x -> Out HopID 1
1919	 *  [Device #1]
1920	 *         7 \
1921	 *          1 \
1922	 *         [Device #2]
1923	 *           3 | In HopID x -> Out HopID 8
1924	 *             |
1925	 *             v In HopID 8 -> Out HopID x
1926	 * ............ Domain border
1927	 *             |
1928	 *          [Host 2]
1929	 */
1930	host = alloc_host(test);
1931	dev1 = alloc_dev_default(test, host, 0x1, true);
1932	dev2 = alloc_dev_default(test, dev1, 0x701, true);
1933
1934	nhi = &host->ports[7];
1935	port = &dev2->ports[3];
1936	tunnel = tb_tunnel_alloc_dma(NULL, nhi, port, 8, 1, 8, 1);
1937	KUNIT_ASSERT_NOT_NULL(test, tunnel);
1938	KUNIT_EXPECT_EQ(test, tunnel->type, TB_TUNNEL_DMA);
1939	KUNIT_EXPECT_PTR_EQ(test, tunnel->src_port, nhi);
1940	KUNIT_EXPECT_PTR_EQ(test, tunnel->dst_port, port);
1941	KUNIT_ASSERT_EQ(test, tunnel->npaths, 2);
1942	/* RX path */
1943	KUNIT_ASSERT_EQ(test, tunnel->paths[0]->path_length, 3);
1944	KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[0]->hops[0].in_port, port);
1945	KUNIT_EXPECT_EQ(test, tunnel->paths[0]->hops[0].in_hop_index, 8);
1946	KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[0]->hops[0].out_port,
1947			    &dev2->ports[1]);
1948	KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[0]->hops[1].in_port,
1949			    &dev1->ports[7]);
1950	KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[0]->hops[1].out_port,
1951			    &dev1->ports[1]);
1952	KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[0]->hops[2].in_port,
1953			    &host->ports[1]);
1954	KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[0]->hops[2].out_port, nhi);
1955	KUNIT_EXPECT_EQ(test, tunnel->paths[0]->hops[2].next_hop_index, 1);
1956	/* TX path */
1957	KUNIT_ASSERT_EQ(test, tunnel->paths[1]->path_length, 3);
1958	KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[1]->hops[0].in_port, nhi);
1959	KUNIT_EXPECT_EQ(test, tunnel->paths[1]->hops[0].in_hop_index, 1);
1960	KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[1]->hops[1].in_port,
1961			    &dev1->ports[1]);
1962	KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[1]->hops[1].out_port,
1963			    &dev1->ports[7]);
1964	KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[1]->hops[2].in_port,
1965			    &dev2->ports[1]);
1966	KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[1]->hops[2].out_port, port);
1967	KUNIT_EXPECT_EQ(test, tunnel->paths[1]->hops[2].next_hop_index, 8);
1968
1969	tb_tunnel_free(tunnel);
1970}
1971
1972static void tb_test_tunnel_dma_match(struct kunit *test)
1973{
1974	struct tb_port *nhi, *port;
1975	struct tb_tunnel *tunnel;
1976	struct tb_switch *host;
1977
1978	host = alloc_host(test);
1979	nhi = &host->ports[7];
1980	port = &host->ports[1];
1981
1982	tunnel = tb_tunnel_alloc_dma(NULL, nhi, port, 15, 1, 15, 1);
1983	KUNIT_ASSERT_NOT_NULL(test, tunnel);
1984
1985	KUNIT_ASSERT_TRUE(test, tb_tunnel_match_dma(tunnel, 15, 1, 15, 1));
1986	KUNIT_ASSERT_FALSE(test, tb_tunnel_match_dma(tunnel, 8, 1, 15, 1));
1987	KUNIT_ASSERT_TRUE(test, tb_tunnel_match_dma(tunnel, -1, -1, 15, 1));
1988	KUNIT_ASSERT_TRUE(test, tb_tunnel_match_dma(tunnel, 15, 1, -1, -1));
1989	KUNIT_ASSERT_TRUE(test, tb_tunnel_match_dma(tunnel, 15, -1, -1, -1));
1990	KUNIT_ASSERT_TRUE(test, tb_tunnel_match_dma(tunnel, -1, 1, -1, -1));
1991	KUNIT_ASSERT_TRUE(test, tb_tunnel_match_dma(tunnel, -1, -1, 15, -1));
1992	KUNIT_ASSERT_TRUE(test, tb_tunnel_match_dma(tunnel, -1, -1, -1, 1));
1993	KUNIT_ASSERT_TRUE(test, tb_tunnel_match_dma(tunnel, -1, -1, -1, -1));
1994	KUNIT_ASSERT_FALSE(test, tb_tunnel_match_dma(tunnel, 8, -1, 8, -1));
1995
1996	tb_tunnel_free(tunnel);
1997
1998	tunnel = tb_tunnel_alloc_dma(NULL, nhi, port, 15, 1, -1, -1);
1999	KUNIT_ASSERT_NOT_NULL(test, tunnel);
2000	KUNIT_ASSERT_TRUE(test, tb_tunnel_match_dma(tunnel, 15, 1, -1, -1));
2001	KUNIT_ASSERT_TRUE(test, tb_tunnel_match_dma(tunnel, 15, -1, -1, -1));
2002	KUNIT_ASSERT_TRUE(test, tb_tunnel_match_dma(tunnel, -1, 1, -1, -1));
2003	KUNIT_ASSERT_TRUE(test, tb_tunnel_match_dma(tunnel, -1, -1, -1, -1));
2004	KUNIT_ASSERT_FALSE(test, tb_tunnel_match_dma(tunnel, 15, 1, 15, 1));
2005	KUNIT_ASSERT_FALSE(test, tb_tunnel_match_dma(tunnel, -1, -1, 15, 1));
2006	KUNIT_ASSERT_FALSE(test, tb_tunnel_match_dma(tunnel, 15, 11, -1, -1));
2007
2008	tb_tunnel_free(tunnel);
2009
2010	tunnel = tb_tunnel_alloc_dma(NULL, nhi, port, -1, -1, 15, 11);
2011	KUNIT_ASSERT_NOT_NULL(test, tunnel);
2012	KUNIT_ASSERT_TRUE(test, tb_tunnel_match_dma(tunnel, -1, -1, 15, 11));
2013	KUNIT_ASSERT_TRUE(test, tb_tunnel_match_dma(tunnel, -1, -1, 15, -1));
2014	KUNIT_ASSERT_TRUE(test, tb_tunnel_match_dma(tunnel, -1, -1, -1, 11));
2015	KUNIT_ASSERT_TRUE(test, tb_tunnel_match_dma(tunnel, -1, -1, -1, -1));
2016	KUNIT_ASSERT_FALSE(test, tb_tunnel_match_dma(tunnel, -1, -1, 15, 1));
2017	KUNIT_ASSERT_FALSE(test, tb_tunnel_match_dma(tunnel, -1, -1, 10, 11));
2018	KUNIT_ASSERT_FALSE(test, tb_tunnel_match_dma(tunnel, 15, 11, -1, -1));
2019
2020	tb_tunnel_free(tunnel);
2021}
2022
2023static void tb_test_credit_alloc_legacy_not_bonded(struct kunit *test)
2024{
2025	struct tb_switch *host, *dev;
2026	struct tb_port *up, *down;
2027	struct tb_tunnel *tunnel;
2028	struct tb_path *path;
2029
2030	host = alloc_host(test);
2031	dev = alloc_dev_default(test, host, 0x1, false);
2032
2033	down = &host->ports[8];
2034	up = &dev->ports[9];
2035	tunnel = tb_tunnel_alloc_pci(NULL, up, down);
2036	KUNIT_ASSERT_NOT_NULL(test, tunnel);
2037	KUNIT_ASSERT_EQ(test, tunnel->npaths, (size_t)2);
2038
2039	path = tunnel->paths[0];
2040	KUNIT_ASSERT_EQ(test, path->path_length, 2);
2041	KUNIT_EXPECT_EQ(test, path->hops[0].nfc_credits, 0U);
2042	KUNIT_EXPECT_EQ(test, path->hops[0].initial_credits, 7U);
2043	KUNIT_EXPECT_EQ(test, path->hops[1].nfc_credits, 0U);
2044	KUNIT_EXPECT_EQ(test, path->hops[1].initial_credits, 16U);
2045
2046	path = tunnel->paths[1];
2047	KUNIT_ASSERT_EQ(test, path->path_length, 2);
2048	KUNIT_EXPECT_EQ(test, path->hops[0].nfc_credits, 0U);
2049	KUNIT_EXPECT_EQ(test, path->hops[0].initial_credits, 7U);
2050	KUNIT_EXPECT_EQ(test, path->hops[1].nfc_credits, 0U);
2051	KUNIT_EXPECT_EQ(test, path->hops[1].initial_credits, 16U);
2052
2053	tb_tunnel_free(tunnel);
2054}
2055
2056static void tb_test_credit_alloc_legacy_bonded(struct kunit *test)
2057{
2058	struct tb_switch *host, *dev;
2059	struct tb_port *up, *down;
2060	struct tb_tunnel *tunnel;
2061	struct tb_path *path;
2062
2063	host = alloc_host(test);
2064	dev = alloc_dev_default(test, host, 0x1, true);
2065
2066	down = &host->ports[8];
2067	up = &dev->ports[9];
2068	tunnel = tb_tunnel_alloc_pci(NULL, up, down);
2069	KUNIT_ASSERT_NOT_NULL(test, tunnel);
2070	KUNIT_ASSERT_EQ(test, tunnel->npaths, (size_t)2);
2071
2072	path = tunnel->paths[0];
2073	KUNIT_ASSERT_EQ(test, path->path_length, 2);
2074	KUNIT_EXPECT_EQ(test, path->hops[0].nfc_credits, 0U);
2075	KUNIT_EXPECT_EQ(test, path->hops[0].initial_credits, 7U);
2076	KUNIT_EXPECT_EQ(test, path->hops[1].nfc_credits, 0U);
2077	KUNIT_EXPECT_EQ(test, path->hops[1].initial_credits, 32U);
2078
2079	path = tunnel->paths[1];
2080	KUNIT_ASSERT_EQ(test, path->path_length, 2);
2081	KUNIT_EXPECT_EQ(test, path->hops[0].nfc_credits, 0U);
2082	KUNIT_EXPECT_EQ(test, path->hops[0].initial_credits, 7U);
2083	KUNIT_EXPECT_EQ(test, path->hops[1].nfc_credits, 0U);
2084	KUNIT_EXPECT_EQ(test, path->hops[1].initial_credits, 32U);
2085
2086	tb_tunnel_free(tunnel);
2087}
2088
2089static void tb_test_credit_alloc_pcie(struct kunit *test)
2090{
2091	struct tb_switch *host, *dev;
2092	struct tb_port *up, *down;
2093	struct tb_tunnel *tunnel;
2094	struct tb_path *path;
2095
2096	host = alloc_host_usb4(test);
2097	dev = alloc_dev_usb4(test, host, 0x1, true);
2098
2099	down = &host->ports[8];
2100	up = &dev->ports[9];
2101	tunnel = tb_tunnel_alloc_pci(NULL, up, down);
2102	KUNIT_ASSERT_NOT_NULL(test, tunnel);
2103	KUNIT_ASSERT_EQ(test, tunnel->npaths, (size_t)2);
2104
2105	path = tunnel->paths[0];
2106	KUNIT_ASSERT_EQ(test, path->path_length, 2);
2107	KUNIT_EXPECT_EQ(test, path->hops[0].nfc_credits, 0U);
2108	KUNIT_EXPECT_EQ(test, path->hops[0].initial_credits, 7U);
2109	KUNIT_EXPECT_EQ(test, path->hops[1].nfc_credits, 0U);
2110	KUNIT_EXPECT_EQ(test, path->hops[1].initial_credits, 32U);
2111
2112	path = tunnel->paths[1];
2113	KUNIT_ASSERT_EQ(test, path->path_length, 2);
2114	KUNIT_EXPECT_EQ(test, path->hops[0].nfc_credits, 0U);
2115	KUNIT_EXPECT_EQ(test, path->hops[0].initial_credits, 7U);
2116	KUNIT_EXPECT_EQ(test, path->hops[1].nfc_credits, 0U);
2117	KUNIT_EXPECT_EQ(test, path->hops[1].initial_credits, 64U);
2118
2119	tb_tunnel_free(tunnel);
2120}
2121
2122static void tb_test_credit_alloc_without_dp(struct kunit *test)
2123{
2124	struct tb_switch *host, *dev;
2125	struct tb_port *up, *down;
2126	struct tb_tunnel *tunnel;
2127	struct tb_path *path;
2128
2129	host = alloc_host_usb4(test);
2130	dev = alloc_dev_without_dp(test, host, 0x1, true);
2131
2132	/*
2133	 * The device has no DP therefore baMinDPmain = baMinDPaux = 0
2134	 *
2135	 * Create PCIe path with buffers less than baMaxPCIe.
2136	 *
2137	 * For a device with buffers configurations:
2138	 * baMaxUSB3 = 109
2139	 * baMinDPaux = 0
2140	 * baMinDPmain = 0
2141	 * baMaxPCIe = 30
2142	 * baMaxHI = 1
2143	 * Remaining Buffers = Total - (CP + DP) = 120 - (2 + 0) = 118
2144	 * PCIe Credits = Max(6, Min(baMaxPCIe, Remaining Buffers - baMaxUSB3)
2145	 *		= Max(6, Min(30, 9) = 9
2146	 */
2147	down = &host->ports[8];
2148	up = &dev->ports[9];
2149	tunnel = tb_tunnel_alloc_pci(NULL, up, down);
2150	KUNIT_ASSERT_TRUE(test, tunnel != NULL);
2151	KUNIT_ASSERT_EQ(test, tunnel->npaths, (size_t)2);
2152
2153	/* PCIe downstream path */
2154	path = tunnel->paths[0];
2155	KUNIT_ASSERT_EQ(test, path->path_length, 2);
2156	KUNIT_EXPECT_EQ(test, path->hops[0].nfc_credits, 0U);
2157	KUNIT_EXPECT_EQ(test, path->hops[0].initial_credits, 7U);
2158	KUNIT_EXPECT_EQ(test, path->hops[1].nfc_credits, 0U);
2159	KUNIT_EXPECT_EQ(test, path->hops[1].initial_credits, 9U);
2160
2161	/* PCIe upstream path */
2162	path = tunnel->paths[1];
2163	KUNIT_ASSERT_EQ(test, path->path_length, 2);
2164	KUNIT_EXPECT_EQ(test, path->hops[0].nfc_credits, 0U);
2165	KUNIT_EXPECT_EQ(test, path->hops[0].initial_credits, 7U);
2166	KUNIT_EXPECT_EQ(test, path->hops[1].nfc_credits, 0U);
2167	KUNIT_EXPECT_EQ(test, path->hops[1].initial_credits, 64U);
2168
2169	tb_tunnel_free(tunnel);
2170}
2171
2172static void tb_test_credit_alloc_dp(struct kunit *test)
2173{
2174	struct tb_switch *host, *dev;
2175	struct tb_port *in, *out;
2176	struct tb_tunnel *tunnel;
2177	struct tb_path *path;
2178
2179	host = alloc_host_usb4(test);
2180	dev = alloc_dev_usb4(test, host, 0x1, true);
2181
2182	in = &host->ports[5];
2183	out = &dev->ports[14];
2184
2185	tunnel = tb_tunnel_alloc_dp(NULL, in, out, 1, 0, 0);
2186	KUNIT_ASSERT_NOT_NULL(test, tunnel);
2187	KUNIT_ASSERT_EQ(test, tunnel->npaths, (size_t)3);
2188
2189	/* Video (main) path */
2190	path = tunnel->paths[0];
2191	KUNIT_ASSERT_EQ(test, path->path_length, 2);
2192	KUNIT_EXPECT_EQ(test, path->hops[0].nfc_credits, 12U);
2193	KUNIT_EXPECT_EQ(test, path->hops[0].initial_credits, 0U);
2194	KUNIT_EXPECT_EQ(test, path->hops[1].nfc_credits, 18U);
2195	KUNIT_EXPECT_EQ(test, path->hops[1].initial_credits, 0U);
2196
2197	/* AUX TX */
2198	path = tunnel->paths[1];
2199	KUNIT_ASSERT_EQ(test, path->path_length, 2);
2200	KUNIT_EXPECT_EQ(test, path->hops[0].nfc_credits, 0U);
2201	KUNIT_EXPECT_EQ(test, path->hops[0].initial_credits, 1U);
2202	KUNIT_EXPECT_EQ(test, path->hops[1].nfc_credits, 0U);
2203	KUNIT_EXPECT_EQ(test, path->hops[1].initial_credits, 1U);
2204
2205	/* AUX RX */
2206	path = tunnel->paths[2];
2207	KUNIT_ASSERT_EQ(test, path->path_length, 2);
2208	KUNIT_EXPECT_EQ(test, path->hops[0].nfc_credits, 0U);
2209	KUNIT_EXPECT_EQ(test, path->hops[0].initial_credits, 1U);
2210	KUNIT_EXPECT_EQ(test, path->hops[1].nfc_credits, 0U);
2211	KUNIT_EXPECT_EQ(test, path->hops[1].initial_credits, 1U);
2212
2213	tb_tunnel_free(tunnel);
2214}
2215
2216static void tb_test_credit_alloc_usb3(struct kunit *test)
2217{
2218	struct tb_switch *host, *dev;
2219	struct tb_port *up, *down;
2220	struct tb_tunnel *tunnel;
2221	struct tb_path *path;
2222
2223	host = alloc_host_usb4(test);
2224	dev = alloc_dev_usb4(test, host, 0x1, true);
2225
2226	down = &host->ports[12];
2227	up = &dev->ports[16];
2228	tunnel = tb_tunnel_alloc_usb3(NULL, up, down, 0, 0);
2229	KUNIT_ASSERT_NOT_NULL(test, tunnel);
2230	KUNIT_ASSERT_EQ(test, tunnel->npaths, (size_t)2);
2231
2232	path = tunnel->paths[0];
2233	KUNIT_ASSERT_EQ(test, path->path_length, 2);
2234	KUNIT_EXPECT_EQ(test, path->hops[0].nfc_credits, 0U);
2235	KUNIT_EXPECT_EQ(test, path->hops[0].initial_credits, 7U);
2236	KUNIT_EXPECT_EQ(test, path->hops[1].nfc_credits, 0U);
2237	KUNIT_EXPECT_EQ(test, path->hops[1].initial_credits, 14U);
2238
2239	path = tunnel->paths[1];
2240	KUNIT_ASSERT_EQ(test, path->path_length, 2);
2241	KUNIT_EXPECT_EQ(test, path->hops[0].nfc_credits, 0U);
2242	KUNIT_EXPECT_EQ(test, path->hops[0].initial_credits, 7U);
2243	KUNIT_EXPECT_EQ(test, path->hops[1].nfc_credits, 0U);
2244	KUNIT_EXPECT_EQ(test, path->hops[1].initial_credits, 32U);
2245
2246	tb_tunnel_free(tunnel);
2247}
2248
2249static void tb_test_credit_alloc_dma(struct kunit *test)
2250{
2251	struct tb_switch *host, *dev;
2252	struct tb_port *nhi, *port;
2253	struct tb_tunnel *tunnel;
2254	struct tb_path *path;
2255
2256	host = alloc_host_usb4(test);
2257	dev = alloc_dev_usb4(test, host, 0x1, true);
2258
2259	nhi = &host->ports[7];
2260	port = &dev->ports[3];
2261
2262	tunnel = tb_tunnel_alloc_dma(NULL, nhi, port, 8, 1, 8, 1);
2263	KUNIT_ASSERT_NOT_NULL(test, tunnel);
2264	KUNIT_ASSERT_EQ(test, tunnel->npaths, (size_t)2);
2265
2266	/* DMA RX */
2267	path = tunnel->paths[0];
2268	KUNIT_ASSERT_EQ(test, path->path_length, 2);
2269	KUNIT_EXPECT_EQ(test, path->hops[0].nfc_credits, 0U);
2270	KUNIT_EXPECT_EQ(test, path->hops[0].initial_credits, 14U);
2271	KUNIT_EXPECT_EQ(test, path->hops[1].nfc_credits, 0U);
2272	KUNIT_EXPECT_EQ(test, path->hops[1].initial_credits, 14U);
2273
2274	/* DMA TX */
2275	path = tunnel->paths[1];
2276	KUNIT_ASSERT_EQ(test, path->path_length, 2);
2277	KUNIT_EXPECT_EQ(test, path->hops[0].nfc_credits, 0U);
2278	KUNIT_EXPECT_EQ(test, path->hops[0].initial_credits, 0U);
2279	KUNIT_EXPECT_EQ(test, path->hops[1].nfc_credits, 0U);
2280	KUNIT_EXPECT_EQ(test, path->hops[1].initial_credits, 14U);
2281
2282	tb_tunnel_free(tunnel);
2283}
2284
2285static void tb_test_credit_alloc_dma_multiple(struct kunit *test)
2286{
2287	struct tb_tunnel *tunnel1, *tunnel2, *tunnel3;
2288	struct tb_switch *host, *dev;
2289	struct tb_port *nhi, *port;
2290	struct tb_path *path;
2291
2292	host = alloc_host_usb4(test);
2293	dev = alloc_dev_usb4(test, host, 0x1, true);
2294
2295	nhi = &host->ports[7];
2296	port = &dev->ports[3];
2297
2298	/*
2299	 * Create three DMA tunnels through the same ports. With the
2300	 * default buffers we should be able to create two and the last
2301	 * one fails.
2302	 *
2303	 * For default host we have following buffers for DMA:
2304	 *
2305	 *   120 - (2 + 2 * (1 + 0) + 32 + 64 + spare) = 20
2306	 *
2307	 * For device we have following:
2308	 *
2309	 *  120 - (2 + 2 * (1 + 18) + 14 + 32 + spare) = 34
2310	 *
2311	 * spare = 14 + 1 = 15
2312	 *
2313	 * So on host the first tunnel gets 14 and the second gets the
2314	 * remaining 1 and then we run out of buffers.
2315	 */
2316	tunnel1 = tb_tunnel_alloc_dma(NULL, nhi, port, 8, 1, 8, 1);
2317	KUNIT_ASSERT_NOT_NULL(test, tunnel1);
2318	KUNIT_ASSERT_EQ(test, tunnel1->npaths, (size_t)2);
2319
2320	path = tunnel1->paths[0];
2321	KUNIT_ASSERT_EQ(test, path->path_length, 2);
2322	KUNIT_EXPECT_EQ(test, path->hops[0].nfc_credits, 0U);
2323	KUNIT_EXPECT_EQ(test, path->hops[0].initial_credits, 14U);
2324	KUNIT_EXPECT_EQ(test, path->hops[1].nfc_credits, 0U);
2325	KUNIT_EXPECT_EQ(test, path->hops[1].initial_credits, 14U);
2326
2327	path = tunnel1->paths[1];
2328	KUNIT_ASSERT_EQ(test, path->path_length, 2);
2329	KUNIT_EXPECT_EQ(test, path->hops[0].nfc_credits, 0U);
2330	KUNIT_EXPECT_EQ(test, path->hops[0].initial_credits, 0U);
2331	KUNIT_EXPECT_EQ(test, path->hops[1].nfc_credits, 0U);
2332	KUNIT_EXPECT_EQ(test, path->hops[1].initial_credits, 14U);
2333
2334	tunnel2 = tb_tunnel_alloc_dma(NULL, nhi, port, 9, 2, 9, 2);
2335	KUNIT_ASSERT_NOT_NULL(test, tunnel2);
2336	KUNIT_ASSERT_EQ(test, tunnel2->npaths, (size_t)2);
2337
2338	path = tunnel2->paths[0];
2339	KUNIT_ASSERT_EQ(test, path->path_length, 2);
2340	KUNIT_EXPECT_EQ(test, path->hops[0].nfc_credits, 0U);
2341	KUNIT_EXPECT_EQ(test, path->hops[0].initial_credits, 14U);
2342	KUNIT_EXPECT_EQ(test, path->hops[1].nfc_credits, 0U);
2343	KUNIT_EXPECT_EQ(test, path->hops[1].initial_credits, 1U);
2344
2345	path = tunnel2->paths[1];
2346	KUNIT_ASSERT_EQ(test, path->path_length, 2);
2347	KUNIT_EXPECT_EQ(test, path->hops[0].nfc_credits, 0U);
2348	KUNIT_EXPECT_EQ(test, path->hops[0].initial_credits, 0U);
2349	KUNIT_EXPECT_EQ(test, path->hops[1].nfc_credits, 0U);
2350	KUNIT_EXPECT_EQ(test, path->hops[1].initial_credits, 1U);
2351
2352	tunnel3 = tb_tunnel_alloc_dma(NULL, nhi, port, 10, 3, 10, 3);
2353	KUNIT_ASSERT_NULL(test, tunnel3);
2354
2355	/*
2356	 * Release the first DMA tunnel. That should make 14 buffers
2357	 * available for the next tunnel.
2358	 */
2359	tb_tunnel_free(tunnel1);
2360
2361	tunnel3 = tb_tunnel_alloc_dma(NULL, nhi, port, 10, 3, 10, 3);
2362	KUNIT_ASSERT_NOT_NULL(test, tunnel3);
2363
2364	path = tunnel3->paths[0];
2365	KUNIT_ASSERT_EQ(test, path->path_length, 2);
2366	KUNIT_EXPECT_EQ(test, path->hops[0].nfc_credits, 0U);
2367	KUNIT_EXPECT_EQ(test, path->hops[0].initial_credits, 14U);
2368	KUNIT_EXPECT_EQ(test, path->hops[1].nfc_credits, 0U);
2369	KUNIT_EXPECT_EQ(test, path->hops[1].initial_credits, 14U);
2370
2371	path = tunnel3->paths[1];
2372	KUNIT_ASSERT_EQ(test, path->path_length, 2);
2373	KUNIT_EXPECT_EQ(test, path->hops[0].nfc_credits, 0U);
2374	KUNIT_EXPECT_EQ(test, path->hops[0].initial_credits, 0U);
2375	KUNIT_EXPECT_EQ(test, path->hops[1].nfc_credits, 0U);
2376	KUNIT_EXPECT_EQ(test, path->hops[1].initial_credits, 14U);
2377
2378	tb_tunnel_free(tunnel3);
2379	tb_tunnel_free(tunnel2);
2380}
2381
2382static struct tb_tunnel *TB_TEST_PCIE_TUNNEL(struct kunit *test,
2383			struct tb_switch *host, struct tb_switch *dev)
2384{
2385	struct tb_port *up, *down;
2386	struct tb_tunnel *pcie_tunnel;
2387	struct tb_path *path;
2388
2389	down = &host->ports[8];
2390	up = &dev->ports[9];
2391	pcie_tunnel = tb_tunnel_alloc_pci(NULL, up, down);
2392	KUNIT_ASSERT_NOT_NULL(test, pcie_tunnel);
2393	KUNIT_ASSERT_EQ(test, pcie_tunnel->npaths, (size_t)2);
2394
2395	path = pcie_tunnel->paths[0];
2396	KUNIT_ASSERT_EQ(test, path->path_length, 2);
2397	KUNIT_EXPECT_EQ(test, path->hops[0].nfc_credits, 0U);
2398	KUNIT_EXPECT_EQ(test, path->hops[0].initial_credits, 7U);
2399	KUNIT_EXPECT_EQ(test, path->hops[1].nfc_credits, 0U);
2400	KUNIT_EXPECT_EQ(test, path->hops[1].initial_credits, 32U);
2401
2402	path = pcie_tunnel->paths[1];
2403	KUNIT_ASSERT_EQ(test, path->path_length, 2);
2404	KUNIT_EXPECT_EQ(test, path->hops[0].nfc_credits, 0U);
2405	KUNIT_EXPECT_EQ(test, path->hops[0].initial_credits, 7U);
2406	KUNIT_EXPECT_EQ(test, path->hops[1].nfc_credits, 0U);
2407	KUNIT_EXPECT_EQ(test, path->hops[1].initial_credits, 64U);
2408
2409	return pcie_tunnel;
2410}
2411
2412static struct tb_tunnel *TB_TEST_DP_TUNNEL1(struct kunit *test,
2413			struct tb_switch *host, struct tb_switch *dev)
2414{
2415	struct tb_port *in, *out;
2416	struct tb_tunnel *dp_tunnel1;
2417	struct tb_path *path;
2418
2419	in = &host->ports[5];
2420	out = &dev->ports[13];
2421	dp_tunnel1 = tb_tunnel_alloc_dp(NULL, in, out, 1, 0, 0);
2422	KUNIT_ASSERT_NOT_NULL(test, dp_tunnel1);
2423	KUNIT_ASSERT_EQ(test, dp_tunnel1->npaths, (size_t)3);
2424
2425	path = dp_tunnel1->paths[0];
2426	KUNIT_ASSERT_EQ(test, path->path_length, 2);
2427	KUNIT_EXPECT_EQ(test, path->hops[0].nfc_credits, 12U);
2428	KUNIT_EXPECT_EQ(test, path->hops[0].initial_credits, 0U);
2429	KUNIT_EXPECT_EQ(test, path->hops[1].nfc_credits, 18U);
2430	KUNIT_EXPECT_EQ(test, path->hops[1].initial_credits, 0U);
2431
2432	path = dp_tunnel1->paths[1];
2433	KUNIT_ASSERT_EQ(test, path->path_length, 2);
2434	KUNIT_EXPECT_EQ(test, path->hops[0].nfc_credits, 0U);
2435	KUNIT_EXPECT_EQ(test, path->hops[0].initial_credits, 1U);
2436	KUNIT_EXPECT_EQ(test, path->hops[1].nfc_credits, 0U);
2437	KUNIT_EXPECT_EQ(test, path->hops[1].initial_credits, 1U);
2438
2439	path = dp_tunnel1->paths[2];
2440	KUNIT_ASSERT_EQ(test, path->path_length, 2);
2441	KUNIT_EXPECT_EQ(test, path->hops[0].nfc_credits, 0U);
2442	KUNIT_EXPECT_EQ(test, path->hops[0].initial_credits, 1U);
2443	KUNIT_EXPECT_EQ(test, path->hops[1].nfc_credits, 0U);
2444	KUNIT_EXPECT_EQ(test, path->hops[1].initial_credits, 1U);
2445
2446	return dp_tunnel1;
2447}
2448
2449static struct tb_tunnel *TB_TEST_DP_TUNNEL2(struct kunit *test,
2450			struct tb_switch *host, struct tb_switch *dev)
2451{
2452	struct tb_port *in, *out;
2453	struct tb_tunnel *dp_tunnel2;
2454	struct tb_path *path;
2455
2456	in = &host->ports[6];
2457	out = &dev->ports[14];
2458	dp_tunnel2 = tb_tunnel_alloc_dp(NULL, in, out, 1, 0, 0);
2459	KUNIT_ASSERT_NOT_NULL(test, dp_tunnel2);
2460	KUNIT_ASSERT_EQ(test, dp_tunnel2->npaths, (size_t)3);
2461
2462	path = dp_tunnel2->paths[0];
2463	KUNIT_ASSERT_EQ(test, path->path_length, 2);
2464	KUNIT_EXPECT_EQ(test, path->hops[0].nfc_credits, 12U);
2465	KUNIT_EXPECT_EQ(test, path->hops[0].initial_credits, 0U);
2466	KUNIT_EXPECT_EQ(test, path->hops[1].nfc_credits, 18U);
2467	KUNIT_EXPECT_EQ(test, path->hops[1].initial_credits, 0U);
2468
2469	path = dp_tunnel2->paths[1];
2470	KUNIT_ASSERT_EQ(test, path->path_length, 2);
2471	KUNIT_EXPECT_EQ(test, path->hops[0].nfc_credits, 0U);
2472	KUNIT_EXPECT_EQ(test, path->hops[0].initial_credits, 1U);
2473	KUNIT_EXPECT_EQ(test, path->hops[1].nfc_credits, 0U);
2474	KUNIT_EXPECT_EQ(test, path->hops[1].initial_credits, 1U);
2475
2476	path = dp_tunnel2->paths[2];
2477	KUNIT_ASSERT_EQ(test, path->path_length, 2);
2478	KUNIT_EXPECT_EQ(test, path->hops[0].nfc_credits, 0U);
2479	KUNIT_EXPECT_EQ(test, path->hops[0].initial_credits, 1U);
2480	KUNIT_EXPECT_EQ(test, path->hops[1].nfc_credits, 0U);
2481	KUNIT_EXPECT_EQ(test, path->hops[1].initial_credits, 1U);
2482
2483	return dp_tunnel2;
2484}
2485
2486static struct tb_tunnel *TB_TEST_USB3_TUNNEL(struct kunit *test,
2487			struct tb_switch *host, struct tb_switch *dev)
2488{
2489	struct tb_port *up, *down;
2490	struct tb_tunnel *usb3_tunnel;
2491	struct tb_path *path;
2492
2493	down = &host->ports[12];
2494	up = &dev->ports[16];
2495	usb3_tunnel = tb_tunnel_alloc_usb3(NULL, up, down, 0, 0);
2496	KUNIT_ASSERT_NOT_NULL(test, usb3_tunnel);
2497	KUNIT_ASSERT_EQ(test, usb3_tunnel->npaths, (size_t)2);
2498
2499	path = usb3_tunnel->paths[0];
2500	KUNIT_ASSERT_EQ(test, path->path_length, 2);
2501	KUNIT_EXPECT_EQ(test, path->hops[0].nfc_credits, 0U);
2502	KUNIT_EXPECT_EQ(test, path->hops[0].initial_credits, 7U);
2503	KUNIT_EXPECT_EQ(test, path->hops[1].nfc_credits, 0U);
2504	KUNIT_EXPECT_EQ(test, path->hops[1].initial_credits, 14U);
2505
2506	path = usb3_tunnel->paths[1];
2507	KUNIT_ASSERT_EQ(test, path->path_length, 2);
2508	KUNIT_EXPECT_EQ(test, path->hops[0].nfc_credits, 0U);
2509	KUNIT_EXPECT_EQ(test, path->hops[0].initial_credits, 7U);
2510	KUNIT_EXPECT_EQ(test, path->hops[1].nfc_credits, 0U);
2511	KUNIT_EXPECT_EQ(test, path->hops[1].initial_credits, 32U);
2512
2513	return usb3_tunnel;
2514}
2515
2516static struct tb_tunnel *TB_TEST_DMA_TUNNEL1(struct kunit *test,
2517			struct tb_switch *host, struct tb_switch *dev)
2518{
2519	struct tb_port *nhi, *port;
2520	struct tb_tunnel *dma_tunnel1;
2521	struct tb_path *path;
2522
2523	nhi = &host->ports[7];
2524	port = &dev->ports[3];
2525	dma_tunnel1 = tb_tunnel_alloc_dma(NULL, nhi, port, 8, 1, 8, 1);
2526	KUNIT_ASSERT_NOT_NULL(test, dma_tunnel1);
2527	KUNIT_ASSERT_EQ(test, dma_tunnel1->npaths, (size_t)2);
2528
2529	path = dma_tunnel1->paths[0];
2530	KUNIT_ASSERT_EQ(test, path->path_length, 2);
2531	KUNIT_EXPECT_EQ(test, path->hops[0].nfc_credits, 0U);
2532	KUNIT_EXPECT_EQ(test, path->hops[0].initial_credits, 14U);
2533	KUNIT_EXPECT_EQ(test, path->hops[1].nfc_credits, 0U);
2534	KUNIT_EXPECT_EQ(test, path->hops[1].initial_credits, 14U);
2535
2536	path = dma_tunnel1->paths[1];
2537	KUNIT_ASSERT_EQ(test, path->path_length, 2);
2538	KUNIT_EXPECT_EQ(test, path->hops[0].nfc_credits, 0U);
2539	KUNIT_EXPECT_EQ(test, path->hops[0].initial_credits, 0U);
2540	KUNIT_EXPECT_EQ(test, path->hops[1].nfc_credits, 0U);
2541	KUNIT_EXPECT_EQ(test, path->hops[1].initial_credits, 14U);
2542
2543	return dma_tunnel1;
2544}
2545
2546static struct tb_tunnel *TB_TEST_DMA_TUNNEL2(struct kunit *test,
2547			struct tb_switch *host, struct tb_switch *dev)
2548{
2549	struct tb_port *nhi, *port;
2550	struct tb_tunnel *dma_tunnel2;
2551	struct tb_path *path;
2552
2553	nhi = &host->ports[7];
2554	port = &dev->ports[3];
2555	dma_tunnel2 = tb_tunnel_alloc_dma(NULL, nhi, port, 9, 2, 9, 2);
2556	KUNIT_ASSERT_NOT_NULL(test, dma_tunnel2);
2557	KUNIT_ASSERT_EQ(test, dma_tunnel2->npaths, (size_t)2);
2558
2559	path = dma_tunnel2->paths[0];
2560	KUNIT_ASSERT_EQ(test, path->path_length, 2);
2561	KUNIT_EXPECT_EQ(test, path->hops[0].nfc_credits, 0U);
2562	KUNIT_EXPECT_EQ(test, path->hops[0].initial_credits, 14U);
2563	KUNIT_EXPECT_EQ(test, path->hops[1].nfc_credits, 0U);
2564	KUNIT_EXPECT_EQ(test, path->hops[1].initial_credits, 1U);
2565
2566	path = dma_tunnel2->paths[1];
2567	KUNIT_ASSERT_EQ(test, path->path_length, 2);
2568	KUNIT_EXPECT_EQ(test, path->hops[0].nfc_credits, 0U);
2569	KUNIT_EXPECT_EQ(test, path->hops[0].initial_credits, 0U);
2570	KUNIT_EXPECT_EQ(test, path->hops[1].nfc_credits, 0U);
2571	KUNIT_EXPECT_EQ(test, path->hops[1].initial_credits, 1U);
2572
2573	return dma_tunnel2;
2574}
2575
2576static void tb_test_credit_alloc_all(struct kunit *test)
2577{
2578	struct tb_tunnel *pcie_tunnel, *dp_tunnel1, *dp_tunnel2, *usb3_tunnel;
2579	struct tb_tunnel *dma_tunnel1, *dma_tunnel2;
2580	struct tb_switch *host, *dev;
2581
2582	/*
2583	 * Create PCIe, 2 x DP, USB 3.x and two DMA tunnels from host to
2584	 * device. Expectation is that all these can be established with
2585	 * the default credit allocation found in Intel hardware.
2586	 */
2587
2588	host = alloc_host_usb4(test);
2589	dev = alloc_dev_usb4(test, host, 0x1, true);
2590
2591	pcie_tunnel = TB_TEST_PCIE_TUNNEL(test, host, dev);
2592	dp_tunnel1 = TB_TEST_DP_TUNNEL1(test, host, dev);
2593	dp_tunnel2 = TB_TEST_DP_TUNNEL2(test, host, dev);
2594	usb3_tunnel = TB_TEST_USB3_TUNNEL(test, host, dev);
2595	dma_tunnel1 = TB_TEST_DMA_TUNNEL1(test, host, dev);
2596	dma_tunnel2 = TB_TEST_DMA_TUNNEL2(test, host, dev);
2597
2598	tb_tunnel_free(dma_tunnel2);
2599	tb_tunnel_free(dma_tunnel1);
2600	tb_tunnel_free(usb3_tunnel);
2601	tb_tunnel_free(dp_tunnel2);
2602	tb_tunnel_free(dp_tunnel1);
2603	tb_tunnel_free(pcie_tunnel);
2604}
2605
2606static const u32 root_directory[] = {
2607	0x55584401,	/* "UXD" v1 */
2608	0x00000018,	/* Root directory length */
2609	0x76656e64,	/* "vend" */
2610	0x6f726964,	/* "orid" */
2611	0x76000001,	/* "v" R 1 */
2612	0x00000a27,	/* Immediate value, ! Vendor ID */
2613	0x76656e64,	/* "vend" */
2614	0x6f726964,	/* "orid" */
2615	0x74000003,	/* "t" R 3 */
2616	0x0000001a,	/* Text leaf offset, (“Apple Inc.”) */
2617	0x64657669,	/* "devi" */
2618	0x63656964,	/* "ceid" */
2619	0x76000001,	/* "v" R 1 */
2620	0x0000000a,	/* Immediate value, ! Device ID */
2621	0x64657669,	/* "devi" */
2622	0x63656964,	/* "ceid" */
2623	0x74000003,	/* "t" R 3 */
2624	0x0000001d,	/* Text leaf offset, (“Macintosh”) */
2625	0x64657669,	/* "devi" */
2626	0x63657276,	/* "cerv" */
2627	0x76000001,	/* "v" R 1 */
2628	0x80000100,	/* Immediate value, Device Revision */
2629	0x6e657477,	/* "netw" */
2630	0x6f726b00,	/* "ork" */
2631	0x44000014,	/* "D" R 20 */
2632	0x00000021,	/* Directory data offset, (Network Directory) */
2633	0x4170706c,	/* "Appl" */
2634	0x6520496e,	/* "e In" */
2635	0x632e0000,	/* "c." ! */
2636	0x4d616369,	/* "Maci" */
2637	0x6e746f73,	/* "ntos" */
2638	0x68000000,	/* "h" */
2639	0x00000000,	/* padding */
2640	0xca8961c6,	/* Directory UUID, Network Directory */
2641	0x9541ce1c,	/* Directory UUID, Network Directory */
2642	0x5949b8bd,	/* Directory UUID, Network Directory */
2643	0x4f5a5f2e,	/* Directory UUID, Network Directory */
2644	0x70727463,	/* "prtc" */
2645	0x69640000,	/* "id" */
2646	0x76000001,	/* "v" R 1 */
2647	0x00000001,	/* Immediate value, Network Protocol ID */
2648	0x70727463,	/* "prtc" */
2649	0x76657273,	/* "vers" */
2650	0x76000001,	/* "v" R 1 */
2651	0x00000001,	/* Immediate value, Network Protocol Version */
2652	0x70727463,	/* "prtc" */
2653	0x72657673,	/* "revs" */
2654	0x76000001,	/* "v" R 1 */
2655	0x00000001,	/* Immediate value, Network Protocol Revision */
2656	0x70727463,	/* "prtc" */
2657	0x73746e73,	/* "stns" */
2658	0x76000001,	/* "v" R 1 */
2659	0x00000000,	/* Immediate value, Network Protocol Settings */
2660};
2661
2662static const uuid_t network_dir_uuid =
2663	UUID_INIT(0xc66189ca, 0x1cce, 0x4195,
2664		  0xbd, 0xb8, 0x49, 0x59, 0x2e, 0x5f, 0x5a, 0x4f);
2665
2666static void tb_test_property_parse(struct kunit *test)
2667{
2668	struct tb_property_dir *dir, *network_dir;
2669	struct tb_property *p;
2670
2671	dir = tb_property_parse_dir(root_directory, ARRAY_SIZE(root_directory));
2672	KUNIT_ASSERT_NOT_NULL(test, dir);
2673
2674	p = tb_property_find(dir, "foo", TB_PROPERTY_TYPE_TEXT);
2675	KUNIT_ASSERT_NULL(test, p);
2676
2677	p = tb_property_find(dir, "vendorid", TB_PROPERTY_TYPE_TEXT);
2678	KUNIT_ASSERT_NOT_NULL(test, p);
2679	KUNIT_EXPECT_STREQ(test, p->value.text, "Apple Inc.");
2680
2681	p = tb_property_find(dir, "vendorid", TB_PROPERTY_TYPE_VALUE);
2682	KUNIT_ASSERT_NOT_NULL(test, p);
2683	KUNIT_EXPECT_EQ(test, p->value.immediate, 0xa27);
2684
2685	p = tb_property_find(dir, "deviceid", TB_PROPERTY_TYPE_TEXT);
2686	KUNIT_ASSERT_NOT_NULL(test, p);
2687	KUNIT_EXPECT_STREQ(test, p->value.text, "Macintosh");
2688
2689	p = tb_property_find(dir, "deviceid", TB_PROPERTY_TYPE_VALUE);
2690	KUNIT_ASSERT_NOT_NULL(test, p);
2691	KUNIT_EXPECT_EQ(test, p->value.immediate, 0xa);
2692
2693	p = tb_property_find(dir, "missing", TB_PROPERTY_TYPE_DIRECTORY);
2694	KUNIT_ASSERT_NULL(test, p);
2695
2696	p = tb_property_find(dir, "network", TB_PROPERTY_TYPE_DIRECTORY);
2697	KUNIT_ASSERT_NOT_NULL(test, p);
2698
2699	network_dir = p->value.dir;
2700	KUNIT_EXPECT_TRUE(test, uuid_equal(network_dir->uuid, &network_dir_uuid));
2701
2702	p = tb_property_find(network_dir, "prtcid", TB_PROPERTY_TYPE_VALUE);
2703	KUNIT_ASSERT_NOT_NULL(test, p);
2704	KUNIT_EXPECT_EQ(test, p->value.immediate, 0x1);
2705
2706	p = tb_property_find(network_dir, "prtcvers", TB_PROPERTY_TYPE_VALUE);
2707	KUNIT_ASSERT_NOT_NULL(test, p);
2708	KUNIT_EXPECT_EQ(test, p->value.immediate, 0x1);
2709
2710	p = tb_property_find(network_dir, "prtcrevs", TB_PROPERTY_TYPE_VALUE);
2711	KUNIT_ASSERT_NOT_NULL(test, p);
2712	KUNIT_EXPECT_EQ(test, p->value.immediate, 0x1);
2713
2714	p = tb_property_find(network_dir, "prtcstns", TB_PROPERTY_TYPE_VALUE);
2715	KUNIT_ASSERT_NOT_NULL(test, p);
2716	KUNIT_EXPECT_EQ(test, p->value.immediate, 0x0);
2717
2718	p = tb_property_find(network_dir, "deviceid", TB_PROPERTY_TYPE_VALUE);
2719	KUNIT_EXPECT_TRUE(test, !p);
2720	p = tb_property_find(network_dir, "deviceid", TB_PROPERTY_TYPE_TEXT);
2721	KUNIT_EXPECT_TRUE(test, !p);
2722
2723	tb_property_free_dir(dir);
2724}
2725
2726static void tb_test_property_format(struct kunit *test)
2727{
2728	struct tb_property_dir *dir;
2729	ssize_t block_len;
2730	u32 *block;
2731	int ret, i;
2732
2733	dir = tb_property_parse_dir(root_directory, ARRAY_SIZE(root_directory));
2734	KUNIT_ASSERT_NOT_NULL(test, dir);
2735
2736	ret = tb_property_format_dir(dir, NULL, 0);
2737	KUNIT_ASSERT_EQ(test, ret, ARRAY_SIZE(root_directory));
2738
2739	block_len = ret;
2740
2741	block = kunit_kzalloc(test, block_len * sizeof(u32), GFP_KERNEL);
2742	KUNIT_ASSERT_NOT_NULL(test, block);
2743
2744	ret = tb_property_format_dir(dir, block, block_len);
2745	KUNIT_EXPECT_EQ(test, ret, 0);
2746
2747	for (i = 0; i < ARRAY_SIZE(root_directory); i++)
2748		KUNIT_EXPECT_EQ(test, root_directory[i], block[i]);
2749
2750	tb_property_free_dir(dir);
2751}
2752
2753static void compare_dirs(struct kunit *test, struct tb_property_dir *d1,
2754			 struct tb_property_dir *d2)
2755{
2756	struct tb_property *p1, *p2, *tmp;
2757	int n1, n2, i;
2758
2759	if (d1->uuid) {
2760		KUNIT_ASSERT_NOT_NULL(test, d2->uuid);
2761		KUNIT_ASSERT_TRUE(test, uuid_equal(d1->uuid, d2->uuid));
2762	} else {
2763		KUNIT_ASSERT_NULL(test, d2->uuid);
2764	}
2765
2766	n1 = 0;
2767	tb_property_for_each(d1, tmp)
2768		n1++;
2769	KUNIT_ASSERT_NE(test, n1, 0);
2770
2771	n2 = 0;
2772	tb_property_for_each(d2, tmp)
2773		n2++;
2774	KUNIT_ASSERT_NE(test, n2, 0);
2775
2776	KUNIT_ASSERT_EQ(test, n1, n2);
2777
2778	p1 = NULL;
2779	p2 = NULL;
2780	for (i = 0; i < n1; i++) {
2781		p1 = tb_property_get_next(d1, p1);
2782		KUNIT_ASSERT_NOT_NULL(test, p1);
2783		p2 = tb_property_get_next(d2, p2);
2784		KUNIT_ASSERT_NOT_NULL(test, p2);
2785
2786		KUNIT_ASSERT_STREQ(test, &p1->key[0], &p2->key[0]);
2787		KUNIT_ASSERT_EQ(test, p1->type, p2->type);
2788		KUNIT_ASSERT_EQ(test, p1->length, p2->length);
2789
2790		switch (p1->type) {
2791		case TB_PROPERTY_TYPE_DIRECTORY:
2792			KUNIT_ASSERT_NOT_NULL(test, p1->value.dir);
2793			KUNIT_ASSERT_NOT_NULL(test, p2->value.dir);
2794			compare_dirs(test, p1->value.dir, p2->value.dir);
2795			break;
2796
2797		case TB_PROPERTY_TYPE_DATA:
2798			KUNIT_ASSERT_NOT_NULL(test, p1->value.data);
2799			KUNIT_ASSERT_NOT_NULL(test, p2->value.data);
2800			KUNIT_ASSERT_TRUE(test,
2801				!memcmp(p1->value.data, p2->value.data,
2802					p1->length * 4)
2803			);
2804			break;
2805
2806		case TB_PROPERTY_TYPE_TEXT:
2807			KUNIT_ASSERT_NOT_NULL(test, p1->value.text);
2808			KUNIT_ASSERT_NOT_NULL(test, p2->value.text);
2809			KUNIT_ASSERT_STREQ(test, p1->value.text, p2->value.text);
2810			break;
2811
2812		case TB_PROPERTY_TYPE_VALUE:
2813			KUNIT_ASSERT_EQ(test, p1->value.immediate,
2814					p2->value.immediate);
2815			break;
2816		default:
2817			KUNIT_FAIL(test, "unexpected property type");
2818			break;
2819		}
2820	}
2821}
2822
2823static void tb_test_property_copy(struct kunit *test)
2824{
2825	struct tb_property_dir *src, *dst;
2826	u32 *block;
2827	int ret, i;
2828
2829	src = tb_property_parse_dir(root_directory, ARRAY_SIZE(root_directory));
2830	KUNIT_ASSERT_NOT_NULL(test, src);
2831
2832	dst = tb_property_copy_dir(src);
2833	KUNIT_ASSERT_NOT_NULL(test, dst);
2834
2835	/* Compare the structures */
2836	compare_dirs(test, src, dst);
2837
2838	/* Compare the resulting property block */
2839	ret = tb_property_format_dir(dst, NULL, 0);
2840	KUNIT_ASSERT_EQ(test, ret, ARRAY_SIZE(root_directory));
2841
2842	block = kunit_kzalloc(test, sizeof(root_directory), GFP_KERNEL);
2843	KUNIT_ASSERT_NOT_NULL(test, block);
2844
2845	ret = tb_property_format_dir(dst, block, ARRAY_SIZE(root_directory));
2846	KUNIT_EXPECT_TRUE(test, !ret);
2847
2848	for (i = 0; i < ARRAY_SIZE(root_directory); i++)
2849		KUNIT_EXPECT_EQ(test, root_directory[i], block[i]);
2850
2851	tb_property_free_dir(dst);
2852	tb_property_free_dir(src);
2853}
2854
2855static struct kunit_case tb_test_cases[] = {
2856	KUNIT_CASE(tb_test_path_basic),
2857	KUNIT_CASE(tb_test_path_not_connected_walk),
2858	KUNIT_CASE(tb_test_path_single_hop_walk),
2859	KUNIT_CASE(tb_test_path_daisy_chain_walk),
2860	KUNIT_CASE(tb_test_path_simple_tree_walk),
2861	KUNIT_CASE(tb_test_path_complex_tree_walk),
2862	KUNIT_CASE(tb_test_path_max_length_walk),
2863	KUNIT_CASE(tb_test_path_not_connected),
2864	KUNIT_CASE(tb_test_path_not_bonded_lane0),
2865	KUNIT_CASE(tb_test_path_not_bonded_lane1),
2866	KUNIT_CASE(tb_test_path_not_bonded_lane1_chain),
2867	KUNIT_CASE(tb_test_path_not_bonded_lane1_chain_reverse),
2868	KUNIT_CASE(tb_test_path_mixed_chain),
2869	KUNIT_CASE(tb_test_path_mixed_chain_reverse),
2870	KUNIT_CASE(tb_test_tunnel_pcie),
2871	KUNIT_CASE(tb_test_tunnel_dp),
2872	KUNIT_CASE(tb_test_tunnel_dp_chain),
2873	KUNIT_CASE(tb_test_tunnel_dp_tree),
2874	KUNIT_CASE(tb_test_tunnel_dp_max_length),
2875	KUNIT_CASE(tb_test_tunnel_3dp),
2876	KUNIT_CASE(tb_test_tunnel_port_on_path),
2877	KUNIT_CASE(tb_test_tunnel_usb3),
2878	KUNIT_CASE(tb_test_tunnel_dma),
2879	KUNIT_CASE(tb_test_tunnel_dma_rx),
2880	KUNIT_CASE(tb_test_tunnel_dma_tx),
2881	KUNIT_CASE(tb_test_tunnel_dma_chain),
2882	KUNIT_CASE(tb_test_tunnel_dma_match),
2883	KUNIT_CASE(tb_test_credit_alloc_legacy_not_bonded),
2884	KUNIT_CASE(tb_test_credit_alloc_legacy_bonded),
2885	KUNIT_CASE(tb_test_credit_alloc_pcie),
2886	KUNIT_CASE(tb_test_credit_alloc_without_dp),
2887	KUNIT_CASE(tb_test_credit_alloc_dp),
2888	KUNIT_CASE(tb_test_credit_alloc_usb3),
2889	KUNIT_CASE(tb_test_credit_alloc_dma),
2890	KUNIT_CASE(tb_test_credit_alloc_dma_multiple),
2891	KUNIT_CASE(tb_test_credit_alloc_all),
2892	KUNIT_CASE(tb_test_property_parse),
2893	KUNIT_CASE(tb_test_property_format),
2894	KUNIT_CASE(tb_test_property_copy),
2895	{ }
2896};
2897
2898static struct kunit_suite tb_test_suite = {
2899	.name = "thunderbolt",
2900	.test_cases = tb_test_cases,
2901};
2902
2903kunit_test_suite(tb_test_suite);
v6.13.7
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 * KUnit tests
   4 *
   5 * Copyright (C) 2020, Intel Corporation
   6 * Author: Mika Westerberg <mika.westerberg@linux.intel.com>
   7 */
   8
   9#include <kunit/test.h>
  10#include <linux/idr.h>
  11
  12#include "tb.h"
  13#include "tunnel.h"
  14
  15static int __ida_init(struct kunit_resource *res, void *context)
  16{
  17	struct ida *ida = context;
  18
  19	ida_init(ida);
  20	res->data = ida;
  21	return 0;
  22}
  23
  24static void __ida_destroy(struct kunit_resource *res)
  25{
  26	struct ida *ida = res->data;
  27
  28	ida_destroy(ida);
  29}
  30
  31static void kunit_ida_init(struct kunit *test, struct ida *ida)
  32{
  33	kunit_alloc_resource(test, __ida_init, __ida_destroy, GFP_KERNEL, ida);
  34}
  35
  36static struct tb_switch *alloc_switch(struct kunit *test, u64 route,
  37				      u8 upstream_port, u8 max_port_number)
  38{
  39	struct tb_switch *sw;
  40	size_t size;
  41	int i;
  42
  43	sw = kunit_kzalloc(test, sizeof(*sw), GFP_KERNEL);
  44	if (!sw)
  45		return NULL;
  46
  47	sw->config.upstream_port_number = upstream_port;
  48	sw->config.depth = tb_route_length(route);
  49	sw->config.route_hi = upper_32_bits(route);
  50	sw->config.route_lo = lower_32_bits(route);
  51	sw->config.enabled = 0;
  52	sw->config.max_port_number = max_port_number;
  53
  54	size = (sw->config.max_port_number + 1) * sizeof(*sw->ports);
  55	sw->ports = kunit_kzalloc(test, size, GFP_KERNEL);
  56	if (!sw->ports)
  57		return NULL;
  58
  59	for (i = 0; i <= sw->config.max_port_number; i++) {
  60		sw->ports[i].sw = sw;
  61		sw->ports[i].port = i;
  62		sw->ports[i].config.port_number = i;
  63		if (i) {
  64			kunit_ida_init(test, &sw->ports[i].in_hopids);
  65			kunit_ida_init(test, &sw->ports[i].out_hopids);
  66		}
  67	}
  68
  69	return sw;
  70}
  71
  72static struct tb_switch *alloc_host(struct kunit *test)
  73{
  74	struct tb_switch *sw;
  75
  76	sw = alloc_switch(test, 0, 7, 13);
  77	if (!sw)
  78		return NULL;
  79
  80	sw->config.vendor_id = 0x8086;
  81	sw->config.device_id = 0x9a1b;
  82
  83	sw->ports[0].config.type = TB_TYPE_PORT;
  84	sw->ports[0].config.max_in_hop_id = 7;
  85	sw->ports[0].config.max_out_hop_id = 7;
  86
  87	sw->ports[1].config.type = TB_TYPE_PORT;
  88	sw->ports[1].config.max_in_hop_id = 19;
  89	sw->ports[1].config.max_out_hop_id = 19;
  90	sw->ports[1].total_credits = 60;
  91	sw->ports[1].ctl_credits = 2;
  92	sw->ports[1].dual_link_port = &sw->ports[2];
  93
  94	sw->ports[2].config.type = TB_TYPE_PORT;
  95	sw->ports[2].config.max_in_hop_id = 19;
  96	sw->ports[2].config.max_out_hop_id = 19;
  97	sw->ports[2].total_credits = 60;
  98	sw->ports[2].ctl_credits = 2;
  99	sw->ports[2].dual_link_port = &sw->ports[1];
 100	sw->ports[2].link_nr = 1;
 101
 102	sw->ports[3].config.type = TB_TYPE_PORT;
 103	sw->ports[3].config.max_in_hop_id = 19;
 104	sw->ports[3].config.max_out_hop_id = 19;
 105	sw->ports[3].total_credits = 60;
 106	sw->ports[3].ctl_credits = 2;
 107	sw->ports[3].dual_link_port = &sw->ports[4];
 108
 109	sw->ports[4].config.type = TB_TYPE_PORT;
 110	sw->ports[4].config.max_in_hop_id = 19;
 111	sw->ports[4].config.max_out_hop_id = 19;
 112	sw->ports[4].total_credits = 60;
 113	sw->ports[4].ctl_credits = 2;
 114	sw->ports[4].dual_link_port = &sw->ports[3];
 115	sw->ports[4].link_nr = 1;
 116
 117	sw->ports[5].config.type = TB_TYPE_DP_HDMI_IN;
 118	sw->ports[5].config.max_in_hop_id = 9;
 119	sw->ports[5].config.max_out_hop_id = 9;
 120	sw->ports[5].cap_adap = -1;
 121
 122	sw->ports[6].config.type = TB_TYPE_DP_HDMI_IN;
 123	sw->ports[6].config.max_in_hop_id = 9;
 124	sw->ports[6].config.max_out_hop_id = 9;
 125	sw->ports[6].cap_adap = -1;
 126
 127	sw->ports[7].config.type = TB_TYPE_NHI;
 128	sw->ports[7].config.max_in_hop_id = 11;
 129	sw->ports[7].config.max_out_hop_id = 11;
 130	sw->ports[7].config.nfc_credits = 0x41800000;
 131
 132	sw->ports[8].config.type = TB_TYPE_PCIE_DOWN;
 133	sw->ports[8].config.max_in_hop_id = 8;
 134	sw->ports[8].config.max_out_hop_id = 8;
 135
 136	sw->ports[9].config.type = TB_TYPE_PCIE_DOWN;
 137	sw->ports[9].config.max_in_hop_id = 8;
 138	sw->ports[9].config.max_out_hop_id = 8;
 139
 140	sw->ports[10].disabled = true;
 141	sw->ports[11].disabled = true;
 142
 143	sw->ports[12].config.type = TB_TYPE_USB3_DOWN;
 144	sw->ports[12].config.max_in_hop_id = 8;
 145	sw->ports[12].config.max_out_hop_id = 8;
 146
 147	sw->ports[13].config.type = TB_TYPE_USB3_DOWN;
 148	sw->ports[13].config.max_in_hop_id = 8;
 149	sw->ports[13].config.max_out_hop_id = 8;
 150
 151	return sw;
 152}
 153
 154static struct tb_switch *alloc_host_usb4(struct kunit *test)
 155{
 156	struct tb_switch *sw;
 157
 158	sw = alloc_host(test);
 159	if (!sw)
 160		return NULL;
 161
 162	sw->generation = 4;
 163	sw->credit_allocation = true;
 164	sw->max_usb3_credits = 32;
 165	sw->min_dp_aux_credits = 1;
 166	sw->min_dp_main_credits = 0;
 167	sw->max_pcie_credits = 64;
 168	sw->max_dma_credits = 14;
 169
 170	return sw;
 171}
 172
 173static struct tb_switch *alloc_host_br(struct kunit *test)
 174{
 175	struct tb_switch *sw;
 176
 177	sw = alloc_host_usb4(test);
 178	if (!sw)
 179		return NULL;
 180
 181	sw->ports[10].config.type = TB_TYPE_DP_HDMI_IN;
 182	sw->ports[10].config.max_in_hop_id = 9;
 183	sw->ports[10].config.max_out_hop_id = 9;
 184	sw->ports[10].cap_adap = -1;
 185	sw->ports[10].disabled = false;
 186
 187	return sw;
 188}
 189
 190static struct tb_switch *alloc_dev_default(struct kunit *test,
 191					   struct tb_switch *parent,
 192					   u64 route, bool bonded)
 193{
 194	struct tb_port *port, *upstream_port;
 195	struct tb_switch *sw;
 196
 197	sw = alloc_switch(test, route, 1, 19);
 198	if (!sw)
 199		return NULL;
 200
 201	sw->config.vendor_id = 0x8086;
 202	sw->config.device_id = 0x15ef;
 203
 204	sw->ports[0].config.type = TB_TYPE_PORT;
 205	sw->ports[0].config.max_in_hop_id = 8;
 206	sw->ports[0].config.max_out_hop_id = 8;
 207
 208	sw->ports[1].config.type = TB_TYPE_PORT;
 209	sw->ports[1].config.max_in_hop_id = 19;
 210	sw->ports[1].config.max_out_hop_id = 19;
 211	sw->ports[1].total_credits = 60;
 212	sw->ports[1].ctl_credits = 2;
 213	sw->ports[1].dual_link_port = &sw->ports[2];
 214
 215	sw->ports[2].config.type = TB_TYPE_PORT;
 216	sw->ports[2].config.max_in_hop_id = 19;
 217	sw->ports[2].config.max_out_hop_id = 19;
 218	sw->ports[2].total_credits = 60;
 219	sw->ports[2].ctl_credits = 2;
 220	sw->ports[2].dual_link_port = &sw->ports[1];
 221	sw->ports[2].link_nr = 1;
 222
 223	sw->ports[3].config.type = TB_TYPE_PORT;
 224	sw->ports[3].config.max_in_hop_id = 19;
 225	sw->ports[3].config.max_out_hop_id = 19;
 226	sw->ports[3].total_credits = 60;
 227	sw->ports[3].ctl_credits = 2;
 228	sw->ports[3].dual_link_port = &sw->ports[4];
 229
 230	sw->ports[4].config.type = TB_TYPE_PORT;
 231	sw->ports[4].config.max_in_hop_id = 19;
 232	sw->ports[4].config.max_out_hop_id = 19;
 233	sw->ports[4].total_credits = 60;
 234	sw->ports[4].ctl_credits = 2;
 235	sw->ports[4].dual_link_port = &sw->ports[3];
 236	sw->ports[4].link_nr = 1;
 237
 238	sw->ports[5].config.type = TB_TYPE_PORT;
 239	sw->ports[5].config.max_in_hop_id = 19;
 240	sw->ports[5].config.max_out_hop_id = 19;
 241	sw->ports[5].total_credits = 60;
 242	sw->ports[5].ctl_credits = 2;
 243	sw->ports[5].dual_link_port = &sw->ports[6];
 244
 245	sw->ports[6].config.type = TB_TYPE_PORT;
 246	sw->ports[6].config.max_in_hop_id = 19;
 247	sw->ports[6].config.max_out_hop_id = 19;
 248	sw->ports[6].total_credits = 60;
 249	sw->ports[6].ctl_credits = 2;
 250	sw->ports[6].dual_link_port = &sw->ports[5];
 251	sw->ports[6].link_nr = 1;
 252
 253	sw->ports[7].config.type = TB_TYPE_PORT;
 254	sw->ports[7].config.max_in_hop_id = 19;
 255	sw->ports[7].config.max_out_hop_id = 19;
 256	sw->ports[7].total_credits = 60;
 257	sw->ports[7].ctl_credits = 2;
 258	sw->ports[7].dual_link_port = &sw->ports[8];
 259
 260	sw->ports[8].config.type = TB_TYPE_PORT;
 261	sw->ports[8].config.max_in_hop_id = 19;
 262	sw->ports[8].config.max_out_hop_id = 19;
 263	sw->ports[8].total_credits = 60;
 264	sw->ports[8].ctl_credits = 2;
 265	sw->ports[8].dual_link_port = &sw->ports[7];
 266	sw->ports[8].link_nr = 1;
 267
 268	sw->ports[9].config.type = TB_TYPE_PCIE_UP;
 269	sw->ports[9].config.max_in_hop_id = 8;
 270	sw->ports[9].config.max_out_hop_id = 8;
 271
 272	sw->ports[10].config.type = TB_TYPE_PCIE_DOWN;
 273	sw->ports[10].config.max_in_hop_id = 8;
 274	sw->ports[10].config.max_out_hop_id = 8;
 275
 276	sw->ports[11].config.type = TB_TYPE_PCIE_DOWN;
 277	sw->ports[11].config.max_in_hop_id = 8;
 278	sw->ports[11].config.max_out_hop_id = 8;
 279
 280	sw->ports[12].config.type = TB_TYPE_PCIE_DOWN;
 281	sw->ports[12].config.max_in_hop_id = 8;
 282	sw->ports[12].config.max_out_hop_id = 8;
 283
 284	sw->ports[13].config.type = TB_TYPE_DP_HDMI_OUT;
 285	sw->ports[13].config.max_in_hop_id = 9;
 286	sw->ports[13].config.max_out_hop_id = 9;
 287	sw->ports[13].cap_adap = -1;
 288
 289	sw->ports[14].config.type = TB_TYPE_DP_HDMI_OUT;
 290	sw->ports[14].config.max_in_hop_id = 9;
 291	sw->ports[14].config.max_out_hop_id = 9;
 292	sw->ports[14].cap_adap = -1;
 293
 294	sw->ports[15].disabled = true;
 295
 296	sw->ports[16].config.type = TB_TYPE_USB3_UP;
 297	sw->ports[16].config.max_in_hop_id = 8;
 298	sw->ports[16].config.max_out_hop_id = 8;
 299
 300	sw->ports[17].config.type = TB_TYPE_USB3_DOWN;
 301	sw->ports[17].config.max_in_hop_id = 8;
 302	sw->ports[17].config.max_out_hop_id = 8;
 303
 304	sw->ports[18].config.type = TB_TYPE_USB3_DOWN;
 305	sw->ports[18].config.max_in_hop_id = 8;
 306	sw->ports[18].config.max_out_hop_id = 8;
 307
 308	sw->ports[19].config.type = TB_TYPE_USB3_DOWN;
 309	sw->ports[19].config.max_in_hop_id = 8;
 310	sw->ports[19].config.max_out_hop_id = 8;
 311
 312	if (!parent)
 313		return sw;
 314
 315	/* Link them */
 316	upstream_port = tb_upstream_port(sw);
 317	port = tb_port_at(route, parent);
 318	port->remote = upstream_port;
 319	upstream_port->remote = port;
 320	if (port->dual_link_port && upstream_port->dual_link_port) {
 321		port->dual_link_port->remote = upstream_port->dual_link_port;
 322		upstream_port->dual_link_port->remote = port->dual_link_port;
 323
 324		if (bonded) {
 325			/* Bonding is used */
 326			port->bonded = true;
 327			port->total_credits *= 2;
 328			port->dual_link_port->bonded = true;
 329			port->dual_link_port->total_credits = 0;
 330			upstream_port->bonded = true;
 331			upstream_port->total_credits *= 2;
 332			upstream_port->dual_link_port->bonded = true;
 333			upstream_port->dual_link_port->total_credits = 0;
 334		}
 335	}
 336
 337	return sw;
 338}
 339
 340static struct tb_switch *alloc_dev_with_dpin(struct kunit *test,
 341					     struct tb_switch *parent,
 342					     u64 route, bool bonded)
 343{
 344	struct tb_switch *sw;
 345
 346	sw = alloc_dev_default(test, parent, route, bonded);
 347	if (!sw)
 348		return NULL;
 349
 350	sw->ports[13].config.type = TB_TYPE_DP_HDMI_IN;
 351	sw->ports[13].config.max_in_hop_id = 9;
 352	sw->ports[13].config.max_out_hop_id = 9;
 353
 354	sw->ports[14].config.type = TB_TYPE_DP_HDMI_IN;
 355	sw->ports[14].config.max_in_hop_id = 9;
 356	sw->ports[14].config.max_out_hop_id = 9;
 357
 358	return sw;
 359}
 360
 361static struct tb_switch *alloc_dev_without_dp(struct kunit *test,
 362					      struct tb_switch *parent,
 363					      u64 route, bool bonded)
 364{
 365	struct tb_switch *sw;
 366	int i;
 367
 368	sw = alloc_dev_default(test, parent, route, bonded);
 369	if (!sw)
 370		return NULL;
 371	/*
 372	 * Device with:
 373	 * 2x USB4 Adapters (adapters 1,2 and 3,4),
 374	 * 1x PCIe Upstream (adapter 9),
 375	 * 1x PCIe Downstream (adapter 10),
 376	 * 1x USB3 Upstream (adapter 16),
 377	 * 1x USB3 Downstream (adapter 17)
 378	 */
 379	for (i = 5; i <= 8; i++)
 380		sw->ports[i].disabled = true;
 381
 382	for (i = 11; i <= 14; i++)
 383		sw->ports[i].disabled = true;
 384
 385	sw->ports[13].cap_adap = 0;
 386	sw->ports[14].cap_adap = 0;
 387
 388	for (i = 18; i <= 19; i++)
 389		sw->ports[i].disabled = true;
 390
 391	sw->generation = 4;
 392	sw->credit_allocation = true;
 393	sw->max_usb3_credits = 109;
 394	sw->min_dp_aux_credits = 0;
 395	sw->min_dp_main_credits = 0;
 396	sw->max_pcie_credits = 30;
 397	sw->max_dma_credits = 1;
 398
 399	return sw;
 400}
 401
 402static struct tb_switch *alloc_dev_usb4(struct kunit *test,
 403					struct tb_switch *parent,
 404					u64 route, bool bonded)
 405{
 406	struct tb_switch *sw;
 407
 408	sw = alloc_dev_default(test, parent, route, bonded);
 409	if (!sw)
 410		return NULL;
 411
 412	sw->generation = 4;
 413	sw->credit_allocation = true;
 414	sw->max_usb3_credits = 14;
 415	sw->min_dp_aux_credits = 1;
 416	sw->min_dp_main_credits = 18;
 417	sw->max_pcie_credits = 32;
 418	sw->max_dma_credits = 14;
 419
 420	return sw;
 421}
 422
 423static void tb_test_path_basic(struct kunit *test)
 424{
 425	struct tb_port *src_port, *dst_port, *p;
 426	struct tb_switch *host;
 427
 428	host = alloc_host(test);
 429
 430	src_port = &host->ports[5];
 431	dst_port = src_port;
 432
 433	p = tb_next_port_on_path(src_port, dst_port, NULL);
 434	KUNIT_EXPECT_PTR_EQ(test, p, dst_port);
 435
 436	p = tb_next_port_on_path(src_port, dst_port, p);
 437	KUNIT_EXPECT_TRUE(test, !p);
 438}
 439
 440static void tb_test_path_not_connected_walk(struct kunit *test)
 441{
 442	struct tb_port *src_port, *dst_port, *p;
 443	struct tb_switch *host, *dev;
 444
 445	host = alloc_host(test);
 446	/* No connection between host and dev */
 447	dev = alloc_dev_default(test, NULL, 3, true);
 448
 449	src_port = &host->ports[12];
 450	dst_port = &dev->ports[16];
 451
 452	p = tb_next_port_on_path(src_port, dst_port, NULL);
 453	KUNIT_EXPECT_PTR_EQ(test, p, src_port);
 454
 455	p = tb_next_port_on_path(src_port, dst_port, p);
 456	KUNIT_EXPECT_PTR_EQ(test, p, &host->ports[3]);
 457
 458	p = tb_next_port_on_path(src_port, dst_port, p);
 459	KUNIT_EXPECT_TRUE(test, !p);
 460
 461	/* Other direction */
 462
 463	p = tb_next_port_on_path(dst_port, src_port, NULL);
 464	KUNIT_EXPECT_PTR_EQ(test, p, dst_port);
 465
 466	p = tb_next_port_on_path(dst_port, src_port, p);
 467	KUNIT_EXPECT_PTR_EQ(test, p, &dev->ports[1]);
 468
 469	p = tb_next_port_on_path(dst_port, src_port, p);
 470	KUNIT_EXPECT_TRUE(test, !p);
 471}
 472
 473struct port_expectation {
 474	u64 route;
 475	u8 port;
 476	enum tb_port_type type;
 477};
 478
 479static void tb_test_path_single_hop_walk(struct kunit *test)
 480{
 481	/*
 482	 * Walks from Host PCIe downstream port to Device #1 PCIe
 483	 * upstream port.
 484	 *
 485	 *   [Host]
 486	 *   1 |
 487	 *   1 |
 488	 *  [Device]
 489	 */
 490	static const struct port_expectation test_data[] = {
 491		{ .route = 0x0, .port = 8, .type = TB_TYPE_PCIE_DOWN },
 492		{ .route = 0x0, .port = 1, .type = TB_TYPE_PORT },
 493		{ .route = 0x1, .port = 1, .type = TB_TYPE_PORT },
 494		{ .route = 0x1, .port = 9, .type = TB_TYPE_PCIE_UP },
 495	};
 496	struct tb_port *src_port, *dst_port, *p;
 497	struct tb_switch *host, *dev;
 498	int i;
 499
 500	host = alloc_host(test);
 501	dev = alloc_dev_default(test, host, 1, true);
 502
 503	src_port = &host->ports[8];
 504	dst_port = &dev->ports[9];
 505
 506	/* Walk both directions */
 507
 508	i = 0;
 509	tb_for_each_port_on_path(src_port, dst_port, p) {
 510		KUNIT_EXPECT_TRUE(test, i < ARRAY_SIZE(test_data));
 511		KUNIT_EXPECT_EQ(test, tb_route(p->sw), test_data[i].route);
 512		KUNIT_EXPECT_EQ(test, p->port, test_data[i].port);
 513		KUNIT_EXPECT_EQ(test, (enum tb_port_type)p->config.type,
 514				test_data[i].type);
 515		i++;
 516	}
 517
 518	KUNIT_EXPECT_EQ(test, i, ARRAY_SIZE(test_data));
 519
 520	i = ARRAY_SIZE(test_data) - 1;
 521	tb_for_each_port_on_path(dst_port, src_port, p) {
 522		KUNIT_EXPECT_TRUE(test, i < ARRAY_SIZE(test_data));
 523		KUNIT_EXPECT_EQ(test, tb_route(p->sw), test_data[i].route);
 524		KUNIT_EXPECT_EQ(test, p->port, test_data[i].port);
 525		KUNIT_EXPECT_EQ(test, (enum tb_port_type)p->config.type,
 526				test_data[i].type);
 527		i--;
 528	}
 529
 530	KUNIT_EXPECT_EQ(test, i, -1);
 531}
 532
 533static void tb_test_path_daisy_chain_walk(struct kunit *test)
 534{
 535	/*
 536	 * Walks from Host DP IN to Device #2 DP OUT.
 537	 *
 538	 *           [Host]
 539	 *            1 |
 540	 *            1 |
 541	 *         [Device #1]
 542	 *       3 /
 543	 *      1 /
 544	 * [Device #2]
 545	 */
 546	static const struct port_expectation test_data[] = {
 547		{ .route = 0x0, .port = 5, .type = TB_TYPE_DP_HDMI_IN },
 548		{ .route = 0x0, .port = 1, .type = TB_TYPE_PORT },
 549		{ .route = 0x1, .port = 1, .type = TB_TYPE_PORT },
 550		{ .route = 0x1, .port = 3, .type = TB_TYPE_PORT },
 551		{ .route = 0x301, .port = 1, .type = TB_TYPE_PORT },
 552		{ .route = 0x301, .port = 13, .type = TB_TYPE_DP_HDMI_OUT },
 553	};
 554	struct tb_port *src_port, *dst_port, *p;
 555	struct tb_switch *host, *dev1, *dev2;
 556	int i;
 557
 558	host = alloc_host(test);
 559	dev1 = alloc_dev_default(test, host, 0x1, true);
 560	dev2 = alloc_dev_default(test, dev1, 0x301, true);
 561
 562	src_port = &host->ports[5];
 563	dst_port = &dev2->ports[13];
 564
 565	/* Walk both directions */
 566
 567	i = 0;
 568	tb_for_each_port_on_path(src_port, dst_port, p) {
 569		KUNIT_EXPECT_TRUE(test, i < ARRAY_SIZE(test_data));
 570		KUNIT_EXPECT_EQ(test, tb_route(p->sw), test_data[i].route);
 571		KUNIT_EXPECT_EQ(test, p->port, test_data[i].port);
 572		KUNIT_EXPECT_EQ(test, (enum tb_port_type)p->config.type,
 573				test_data[i].type);
 574		i++;
 575	}
 576
 577	KUNIT_EXPECT_EQ(test, i, ARRAY_SIZE(test_data));
 578
 579	i = ARRAY_SIZE(test_data) - 1;
 580	tb_for_each_port_on_path(dst_port, src_port, p) {
 581		KUNIT_EXPECT_TRUE(test, i < ARRAY_SIZE(test_data));
 582		KUNIT_EXPECT_EQ(test, tb_route(p->sw), test_data[i].route);
 583		KUNIT_EXPECT_EQ(test, p->port, test_data[i].port);
 584		KUNIT_EXPECT_EQ(test, (enum tb_port_type)p->config.type,
 585				test_data[i].type);
 586		i--;
 587	}
 588
 589	KUNIT_EXPECT_EQ(test, i, -1);
 590}
 591
 592static void tb_test_path_simple_tree_walk(struct kunit *test)
 593{
 594	/*
 595	 * Walks from Host DP IN to Device #3 DP OUT.
 596	 *
 597	 *           [Host]
 598	 *            1 |
 599	 *            1 |
 600	 *         [Device #1]
 601	 *       3 /   | 5  \ 7
 602	 *      1 /    |     \ 1
 603	 * [Device #2] |    [Device #4]
 604	 *             | 1
 605	 *         [Device #3]
 606	 */
 607	static const struct port_expectation test_data[] = {
 608		{ .route = 0x0, .port = 5, .type = TB_TYPE_DP_HDMI_IN },
 609		{ .route = 0x0, .port = 1, .type = TB_TYPE_PORT },
 610		{ .route = 0x1, .port = 1, .type = TB_TYPE_PORT },
 611		{ .route = 0x1, .port = 5, .type = TB_TYPE_PORT },
 612		{ .route = 0x501, .port = 1, .type = TB_TYPE_PORT },
 613		{ .route = 0x501, .port = 13, .type = TB_TYPE_DP_HDMI_OUT },
 614	};
 615	struct tb_port *src_port, *dst_port, *p;
 616	struct tb_switch *host, *dev1, *dev3;
 617	int i;
 618
 619	host = alloc_host(test);
 620	dev1 = alloc_dev_default(test, host, 0x1, true);
 621	alloc_dev_default(test, dev1, 0x301, true);
 622	dev3 = alloc_dev_default(test, dev1, 0x501, true);
 623	alloc_dev_default(test, dev1, 0x701, true);
 624
 625	src_port = &host->ports[5];
 626	dst_port = &dev3->ports[13];
 627
 628	/* Walk both directions */
 629
 630	i = 0;
 631	tb_for_each_port_on_path(src_port, dst_port, p) {
 632		KUNIT_EXPECT_TRUE(test, i < ARRAY_SIZE(test_data));
 633		KUNIT_EXPECT_EQ(test, tb_route(p->sw), test_data[i].route);
 634		KUNIT_EXPECT_EQ(test, p->port, test_data[i].port);
 635		KUNIT_EXPECT_EQ(test, (enum tb_port_type)p->config.type,
 636				test_data[i].type);
 637		i++;
 638	}
 639
 640	KUNIT_EXPECT_EQ(test, i, ARRAY_SIZE(test_data));
 641
 642	i = ARRAY_SIZE(test_data) - 1;
 643	tb_for_each_port_on_path(dst_port, src_port, p) {
 644		KUNIT_EXPECT_TRUE(test, i < ARRAY_SIZE(test_data));
 645		KUNIT_EXPECT_EQ(test, tb_route(p->sw), test_data[i].route);
 646		KUNIT_EXPECT_EQ(test, p->port, test_data[i].port);
 647		KUNIT_EXPECT_EQ(test, (enum tb_port_type)p->config.type,
 648				test_data[i].type);
 649		i--;
 650	}
 651
 652	KUNIT_EXPECT_EQ(test, i, -1);
 653}
 654
 655static void tb_test_path_complex_tree_walk(struct kunit *test)
 656{
 657	/*
 658	 * Walks from Device #3 DP IN to Device #9 DP OUT.
 659	 *
 660	 *           [Host]
 661	 *            1 |
 662	 *            1 |
 663	 *         [Device #1]
 664	 *       3 /   | 5  \ 7
 665	 *      1 /    |     \ 1
 666	 * [Device #2] |    [Device #5]
 667	 *    5 |      | 1         \ 7
 668	 *    1 |  [Device #4]      \ 1
 669	 * [Device #3]             [Device #6]
 670	 *                       3 /
 671	 *                      1 /
 672	 *                    [Device #7]
 673	 *                  3 /      | 5
 674	 *                 1 /       |
 675	 *               [Device #8] | 1
 676	 *                       [Device #9]
 677	 */
 678	static const struct port_expectation test_data[] = {
 679		{ .route = 0x50301, .port = 13, .type = TB_TYPE_DP_HDMI_IN },
 680		{ .route = 0x50301, .port = 1, .type = TB_TYPE_PORT },
 681		{ .route = 0x301, .port = 5, .type = TB_TYPE_PORT },
 682		{ .route = 0x301, .port = 1, .type = TB_TYPE_PORT },
 683		{ .route = 0x1, .port = 3, .type = TB_TYPE_PORT },
 684		{ .route = 0x1, .port = 7, .type = TB_TYPE_PORT },
 685		{ .route = 0x701, .port = 1, .type = TB_TYPE_PORT },
 686		{ .route = 0x701, .port = 7, .type = TB_TYPE_PORT },
 687		{ .route = 0x70701, .port = 1, .type = TB_TYPE_PORT },
 688		{ .route = 0x70701, .port = 3, .type = TB_TYPE_PORT },
 689		{ .route = 0x3070701, .port = 1, .type = TB_TYPE_PORT },
 690		{ .route = 0x3070701, .port = 5, .type = TB_TYPE_PORT },
 691		{ .route = 0x503070701, .port = 1, .type = TB_TYPE_PORT },
 692		{ .route = 0x503070701, .port = 14, .type = TB_TYPE_DP_HDMI_OUT },
 693	};
 694	struct tb_switch *host, *dev1, *dev2, *dev3, *dev5, *dev6, *dev7, *dev9;
 695	struct tb_port *src_port, *dst_port, *p;
 696	int i;
 697
 698	host = alloc_host(test);
 699	dev1 = alloc_dev_default(test, host, 0x1, true);
 700	dev2 = alloc_dev_default(test, dev1, 0x301, true);
 701	dev3 = alloc_dev_with_dpin(test, dev2, 0x50301, true);
 702	alloc_dev_default(test, dev1, 0x501, true);
 703	dev5 = alloc_dev_default(test, dev1, 0x701, true);
 704	dev6 = alloc_dev_default(test, dev5, 0x70701, true);
 705	dev7 = alloc_dev_default(test, dev6, 0x3070701, true);
 706	alloc_dev_default(test, dev7, 0x303070701, true);
 707	dev9 = alloc_dev_default(test, dev7, 0x503070701, true);
 708
 709	src_port = &dev3->ports[13];
 710	dst_port = &dev9->ports[14];
 711
 712	/* Walk both directions */
 713
 714	i = 0;
 715	tb_for_each_port_on_path(src_port, dst_port, p) {
 716		KUNIT_EXPECT_TRUE(test, i < ARRAY_SIZE(test_data));
 717		KUNIT_EXPECT_EQ(test, tb_route(p->sw), test_data[i].route);
 718		KUNIT_EXPECT_EQ(test, p->port, test_data[i].port);
 719		KUNIT_EXPECT_EQ(test, (enum tb_port_type)p->config.type,
 720				test_data[i].type);
 721		i++;
 722	}
 723
 724	KUNIT_EXPECT_EQ(test, i, ARRAY_SIZE(test_data));
 725
 726	i = ARRAY_SIZE(test_data) - 1;
 727	tb_for_each_port_on_path(dst_port, src_port, p) {
 728		KUNIT_EXPECT_TRUE(test, i < ARRAY_SIZE(test_data));
 729		KUNIT_EXPECT_EQ(test, tb_route(p->sw), test_data[i].route);
 730		KUNIT_EXPECT_EQ(test, p->port, test_data[i].port);
 731		KUNIT_EXPECT_EQ(test, (enum tb_port_type)p->config.type,
 732				test_data[i].type);
 733		i--;
 734	}
 735
 736	KUNIT_EXPECT_EQ(test, i, -1);
 737}
 738
 739static void tb_test_path_max_length_walk(struct kunit *test)
 740{
 741	struct tb_switch *host, *dev1, *dev2, *dev3, *dev4, *dev5, *dev6;
 742	struct tb_switch *dev7, *dev8, *dev9, *dev10, *dev11, *dev12;
 743	struct tb_port *src_port, *dst_port, *p;
 744	int i;
 745
 746	/*
 747	 * Walks from Device #6 DP IN to Device #12 DP OUT.
 748	 *
 749	 *          [Host]
 750	 *         1 /  \ 3
 751	 *        1 /    \ 1
 752	 * [Device #1]   [Device #7]
 753	 *     3 |           | 3
 754	 *     1 |           | 1
 755	 * [Device #2]   [Device #8]
 756	 *     3 |           | 3
 757	 *     1 |           | 1
 758	 * [Device #3]   [Device #9]
 759	 *     3 |           | 3
 760	 *     1 |           | 1
 761	 * [Device #4]   [Device #10]
 762	 *     3 |           | 3
 763	 *     1 |           | 1
 764	 * [Device #5]   [Device #11]
 765	 *     3 |           | 3
 766	 *     1 |           | 1
 767	 * [Device #6]   [Device #12]
 768	 */
 769	static const struct port_expectation test_data[] = {
 770		{ .route = 0x30303030301, .port = 13, .type = TB_TYPE_DP_HDMI_IN },
 771		{ .route = 0x30303030301, .port = 1, .type = TB_TYPE_PORT },
 772		{ .route = 0x303030301, .port = 3, .type = TB_TYPE_PORT },
 773		{ .route = 0x303030301, .port = 1, .type = TB_TYPE_PORT },
 774		{ .route = 0x3030301, .port = 3, .type = TB_TYPE_PORT },
 775		{ .route = 0x3030301, .port = 1, .type = TB_TYPE_PORT },
 776		{ .route = 0x30301, .port = 3, .type = TB_TYPE_PORT },
 777		{ .route = 0x30301, .port = 1, .type = TB_TYPE_PORT },
 778		{ .route = 0x301, .port = 3, .type = TB_TYPE_PORT },
 779		{ .route = 0x301, .port = 1, .type = TB_TYPE_PORT },
 780		{ .route = 0x1, .port = 3, .type = TB_TYPE_PORT },
 781		{ .route = 0x1, .port = 1, .type = TB_TYPE_PORT },
 782		{ .route = 0x0, .port = 1, .type = TB_TYPE_PORT },
 783		{ .route = 0x0, .port = 3, .type = TB_TYPE_PORT },
 784		{ .route = 0x3, .port = 1, .type = TB_TYPE_PORT },
 785		{ .route = 0x3, .port = 3, .type = TB_TYPE_PORT },
 786		{ .route = 0x303, .port = 1, .type = TB_TYPE_PORT },
 787		{ .route = 0x303, .port = 3, .type = TB_TYPE_PORT },
 788		{ .route = 0x30303, .port = 1, .type = TB_TYPE_PORT },
 789		{ .route = 0x30303, .port = 3, .type = TB_TYPE_PORT },
 790		{ .route = 0x3030303, .port = 1, .type = TB_TYPE_PORT },
 791		{ .route = 0x3030303, .port = 3, .type = TB_TYPE_PORT },
 792		{ .route = 0x303030303, .port = 1, .type = TB_TYPE_PORT },
 793		{ .route = 0x303030303, .port = 3, .type = TB_TYPE_PORT },
 794		{ .route = 0x30303030303, .port = 1, .type = TB_TYPE_PORT },
 795		{ .route = 0x30303030303, .port = 13, .type = TB_TYPE_DP_HDMI_OUT },
 796	};
 797
 798	host = alloc_host(test);
 799	dev1 = alloc_dev_default(test, host, 0x1, true);
 800	dev2 = alloc_dev_default(test, dev1, 0x301, true);
 801	dev3 = alloc_dev_default(test, dev2, 0x30301, true);
 802	dev4 = alloc_dev_default(test, dev3, 0x3030301, true);
 803	dev5 = alloc_dev_default(test, dev4, 0x303030301, true);
 804	dev6 = alloc_dev_with_dpin(test, dev5, 0x30303030301, true);
 805	dev7 = alloc_dev_default(test, host, 0x3, true);
 806	dev8 = alloc_dev_default(test, dev7, 0x303, true);
 807	dev9 = alloc_dev_default(test, dev8, 0x30303, true);
 808	dev10 = alloc_dev_default(test, dev9, 0x3030303, true);
 809	dev11 = alloc_dev_default(test, dev10, 0x303030303, true);
 810	dev12 = alloc_dev_default(test, dev11, 0x30303030303, true);
 811
 812	src_port = &dev6->ports[13];
 813	dst_port = &dev12->ports[13];
 814
 815	/* Walk both directions */
 816
 817	i = 0;
 818	tb_for_each_port_on_path(src_port, dst_port, p) {
 819		KUNIT_EXPECT_TRUE(test, i < ARRAY_SIZE(test_data));
 820		KUNIT_EXPECT_EQ(test, tb_route(p->sw), test_data[i].route);
 821		KUNIT_EXPECT_EQ(test, p->port, test_data[i].port);
 822		KUNIT_EXPECT_EQ(test, (enum tb_port_type)p->config.type,
 823				test_data[i].type);
 824		i++;
 825	}
 826
 827	KUNIT_EXPECT_EQ(test, i, ARRAY_SIZE(test_data));
 828
 829	i = ARRAY_SIZE(test_data) - 1;
 830	tb_for_each_port_on_path(dst_port, src_port, p) {
 831		KUNIT_EXPECT_TRUE(test, i < ARRAY_SIZE(test_data));
 832		KUNIT_EXPECT_EQ(test, tb_route(p->sw), test_data[i].route);
 833		KUNIT_EXPECT_EQ(test, p->port, test_data[i].port);
 834		KUNIT_EXPECT_EQ(test, (enum tb_port_type)p->config.type,
 835				test_data[i].type);
 836		i--;
 837	}
 838
 839	KUNIT_EXPECT_EQ(test, i, -1);
 840}
 841
 842static void tb_test_path_not_connected(struct kunit *test)
 843{
 844	struct tb_switch *host, *dev1, *dev2;
 845	struct tb_port *down, *up;
 846	struct tb_path *path;
 847
 848	host = alloc_host(test);
 849	dev1 = alloc_dev_default(test, host, 0x3, false);
 850	/* Not connected to anything */
 851	dev2 = alloc_dev_default(test, NULL, 0x303, false);
 852
 853	down = &dev1->ports[10];
 854	up = &dev2->ports[9];
 855
 856	path = tb_path_alloc(NULL, down, 8, up, 8, 0, "PCIe Down");
 857	KUNIT_ASSERT_NULL(test, path);
 858	path = tb_path_alloc(NULL, down, 8, up, 8, 1, "PCIe Down");
 859	KUNIT_ASSERT_NULL(test, path);
 860}
 861
 862struct hop_expectation {
 863	u64 route;
 864	u8 in_port;
 865	enum tb_port_type in_type;
 866	u8 out_port;
 867	enum tb_port_type out_type;
 868};
 869
 870static void tb_test_path_not_bonded_lane0(struct kunit *test)
 871{
 872	/*
 873	 * PCIe path from host to device using lane 0.
 874	 *
 875	 *   [Host]
 876	 *   3 |: 4
 877	 *   1 |: 2
 878	 *  [Device]
 879	 */
 880	static const struct hop_expectation test_data[] = {
 881		{
 882			.route = 0x0,
 883			.in_port = 9,
 884			.in_type = TB_TYPE_PCIE_DOWN,
 885			.out_port = 3,
 886			.out_type = TB_TYPE_PORT,
 887		},
 888		{
 889			.route = 0x3,
 890			.in_port = 1,
 891			.in_type = TB_TYPE_PORT,
 892			.out_port = 9,
 893			.out_type = TB_TYPE_PCIE_UP,
 894		},
 895	};
 896	struct tb_switch *host, *dev;
 897	struct tb_port *down, *up;
 898	struct tb_path *path;
 899	int i;
 900
 901	host = alloc_host(test);
 902	dev = alloc_dev_default(test, host, 0x3, false);
 903
 904	down = &host->ports[9];
 905	up = &dev->ports[9];
 906
 907	path = tb_path_alloc(NULL, down, 8, up, 8, 0, "PCIe Down");
 908	KUNIT_ASSERT_NOT_NULL(test, path);
 909	KUNIT_ASSERT_EQ(test, path->path_length, ARRAY_SIZE(test_data));
 910	for (i = 0; i < ARRAY_SIZE(test_data); i++) {
 911		const struct tb_port *in_port, *out_port;
 912
 913		in_port = path->hops[i].in_port;
 914		out_port = path->hops[i].out_port;
 915
 916		KUNIT_EXPECT_EQ(test, tb_route(in_port->sw), test_data[i].route);
 917		KUNIT_EXPECT_EQ(test, in_port->port, test_data[i].in_port);
 918		KUNIT_EXPECT_EQ(test, (enum tb_port_type)in_port->config.type,
 919				test_data[i].in_type);
 920		KUNIT_EXPECT_EQ(test, tb_route(out_port->sw), test_data[i].route);
 921		KUNIT_EXPECT_EQ(test, out_port->port, test_data[i].out_port);
 922		KUNIT_EXPECT_EQ(test, (enum tb_port_type)out_port->config.type,
 923				test_data[i].out_type);
 924	}
 925	tb_path_free(path);
 926}
 927
 928static void tb_test_path_not_bonded_lane1(struct kunit *test)
 929{
 930	/*
 931	 * DP Video path from host to device using lane 1. Paths like
 932	 * these are only used with Thunderbolt 1 devices where lane
 933	 * bonding is not possible. USB4 specifically does not allow
 934	 * paths like this (you either use lane 0 where lane 1 is
 935	 * disabled or both lanes are bonded).
 936	 *
 937	 *   [Host]
 938	 *   1 :| 2
 939	 *   1 :| 2
 940	 *  [Device]
 941	 */
 942	static const struct hop_expectation test_data[] = {
 943		{
 944			.route = 0x0,
 945			.in_port = 5,
 946			.in_type = TB_TYPE_DP_HDMI_IN,
 947			.out_port = 2,
 948			.out_type = TB_TYPE_PORT,
 949		},
 950		{
 951			.route = 0x1,
 952			.in_port = 2,
 953			.in_type = TB_TYPE_PORT,
 954			.out_port = 13,
 955			.out_type = TB_TYPE_DP_HDMI_OUT,
 956		},
 957	};
 958	struct tb_switch *host, *dev;
 959	struct tb_port *in, *out;
 960	struct tb_path *path;
 961	int i;
 962
 963	host = alloc_host(test);
 964	dev = alloc_dev_default(test, host, 0x1, false);
 965
 966	in = &host->ports[5];
 967	out = &dev->ports[13];
 968
 969	path = tb_path_alloc(NULL, in, 9, out, 9, 1, "Video");
 970	KUNIT_ASSERT_NOT_NULL(test, path);
 971	KUNIT_ASSERT_EQ(test, path->path_length, ARRAY_SIZE(test_data));
 972	for (i = 0; i < ARRAY_SIZE(test_data); i++) {
 973		const struct tb_port *in_port, *out_port;
 974
 975		in_port = path->hops[i].in_port;
 976		out_port = path->hops[i].out_port;
 977
 978		KUNIT_EXPECT_EQ(test, tb_route(in_port->sw), test_data[i].route);
 979		KUNIT_EXPECT_EQ(test, in_port->port, test_data[i].in_port);
 980		KUNIT_EXPECT_EQ(test, (enum tb_port_type)in_port->config.type,
 981				test_data[i].in_type);
 982		KUNIT_EXPECT_EQ(test, tb_route(out_port->sw), test_data[i].route);
 983		KUNIT_EXPECT_EQ(test, out_port->port, test_data[i].out_port);
 984		KUNIT_EXPECT_EQ(test, (enum tb_port_type)out_port->config.type,
 985				test_data[i].out_type);
 986	}
 987	tb_path_free(path);
 988}
 989
 990static void tb_test_path_not_bonded_lane1_chain(struct kunit *test)
 991{
 992	/*
 993	 * DP Video path from host to device 3 using lane 1.
 994	 *
 995	 *    [Host]
 996	 *    1 :| 2
 997	 *    1 :| 2
 998	 *  [Device #1]
 999	 *    7 :| 8
1000	 *    1 :| 2
1001	 *  [Device #2]
1002	 *    5 :| 6
1003	 *    1 :| 2
1004	 *  [Device #3]
1005	 */
1006	static const struct hop_expectation test_data[] = {
1007		{
1008			.route = 0x0,
1009			.in_port = 5,
1010			.in_type = TB_TYPE_DP_HDMI_IN,
1011			.out_port = 2,
1012			.out_type = TB_TYPE_PORT,
1013		},
1014		{
1015			.route = 0x1,
1016			.in_port = 2,
1017			.in_type = TB_TYPE_PORT,
1018			.out_port = 8,
1019			.out_type = TB_TYPE_PORT,
1020		},
1021		{
1022			.route = 0x701,
1023			.in_port = 2,
1024			.in_type = TB_TYPE_PORT,
1025			.out_port = 6,
1026			.out_type = TB_TYPE_PORT,
1027		},
1028		{
1029			.route = 0x50701,
1030			.in_port = 2,
1031			.in_type = TB_TYPE_PORT,
1032			.out_port = 13,
1033			.out_type = TB_TYPE_DP_HDMI_OUT,
1034		},
1035	};
1036	struct tb_switch *host, *dev1, *dev2, *dev3;
1037	struct tb_port *in, *out;
1038	struct tb_path *path;
1039	int i;
1040
1041	host = alloc_host(test);
1042	dev1 = alloc_dev_default(test, host, 0x1, false);
1043	dev2 = alloc_dev_default(test, dev1, 0x701, false);
1044	dev3 = alloc_dev_default(test, dev2, 0x50701, false);
1045
1046	in = &host->ports[5];
1047	out = &dev3->ports[13];
1048
1049	path = tb_path_alloc(NULL, in, 9, out, 9, 1, "Video");
1050	KUNIT_ASSERT_NOT_NULL(test, path);
1051	KUNIT_ASSERT_EQ(test, path->path_length, ARRAY_SIZE(test_data));
1052	for (i = 0; i < ARRAY_SIZE(test_data); i++) {
1053		const struct tb_port *in_port, *out_port;
1054
1055		in_port = path->hops[i].in_port;
1056		out_port = path->hops[i].out_port;
1057
1058		KUNIT_EXPECT_EQ(test, tb_route(in_port->sw), test_data[i].route);
1059		KUNIT_EXPECT_EQ(test, in_port->port, test_data[i].in_port);
1060		KUNIT_EXPECT_EQ(test, (enum tb_port_type)in_port->config.type,
1061				test_data[i].in_type);
1062		KUNIT_EXPECT_EQ(test, tb_route(out_port->sw), test_data[i].route);
1063		KUNIT_EXPECT_EQ(test, out_port->port, test_data[i].out_port);
1064		KUNIT_EXPECT_EQ(test, (enum tb_port_type)out_port->config.type,
1065				test_data[i].out_type);
1066	}
1067	tb_path_free(path);
1068}
1069
1070static void tb_test_path_not_bonded_lane1_chain_reverse(struct kunit *test)
1071{
1072	/*
1073	 * DP Video path from device 3 to host using lane 1.
1074	 *
1075	 *    [Host]
1076	 *    1 :| 2
1077	 *    1 :| 2
1078	 *  [Device #1]
1079	 *    7 :| 8
1080	 *    1 :| 2
1081	 *  [Device #2]
1082	 *    5 :| 6
1083	 *    1 :| 2
1084	 *  [Device #3]
1085	 */
1086	static const struct hop_expectation test_data[] = {
1087		{
1088			.route = 0x50701,
1089			.in_port = 13,
1090			.in_type = TB_TYPE_DP_HDMI_IN,
1091			.out_port = 2,
1092			.out_type = TB_TYPE_PORT,
1093		},
1094		{
1095			.route = 0x701,
1096			.in_port = 6,
1097			.in_type = TB_TYPE_PORT,
1098			.out_port = 2,
1099			.out_type = TB_TYPE_PORT,
1100		},
1101		{
1102			.route = 0x1,
1103			.in_port = 8,
1104			.in_type = TB_TYPE_PORT,
1105			.out_port = 2,
1106			.out_type = TB_TYPE_PORT,
1107		},
1108		{
1109			.route = 0x0,
1110			.in_port = 2,
1111			.in_type = TB_TYPE_PORT,
1112			.out_port = 5,
1113			.out_type = TB_TYPE_DP_HDMI_IN,
1114		},
1115	};
1116	struct tb_switch *host, *dev1, *dev2, *dev3;
1117	struct tb_port *in, *out;
1118	struct tb_path *path;
1119	int i;
1120
1121	host = alloc_host(test);
1122	dev1 = alloc_dev_default(test, host, 0x1, false);
1123	dev2 = alloc_dev_default(test, dev1, 0x701, false);
1124	dev3 = alloc_dev_with_dpin(test, dev2, 0x50701, false);
1125
1126	in = &dev3->ports[13];
1127	out = &host->ports[5];
1128
1129	path = tb_path_alloc(NULL, in, 9, out, 9, 1, "Video");
1130	KUNIT_ASSERT_NOT_NULL(test, path);
1131	KUNIT_ASSERT_EQ(test, path->path_length, ARRAY_SIZE(test_data));
1132	for (i = 0; i < ARRAY_SIZE(test_data); i++) {
1133		const struct tb_port *in_port, *out_port;
1134
1135		in_port = path->hops[i].in_port;
1136		out_port = path->hops[i].out_port;
1137
1138		KUNIT_EXPECT_EQ(test, tb_route(in_port->sw), test_data[i].route);
1139		KUNIT_EXPECT_EQ(test, in_port->port, test_data[i].in_port);
1140		KUNIT_EXPECT_EQ(test, (enum tb_port_type)in_port->config.type,
1141				test_data[i].in_type);
1142		KUNIT_EXPECT_EQ(test, tb_route(out_port->sw), test_data[i].route);
1143		KUNIT_EXPECT_EQ(test, out_port->port, test_data[i].out_port);
1144		KUNIT_EXPECT_EQ(test, (enum tb_port_type)out_port->config.type,
1145				test_data[i].out_type);
1146	}
1147	tb_path_free(path);
1148}
1149
1150static void tb_test_path_mixed_chain(struct kunit *test)
1151{
1152	/*
1153	 * DP Video path from host to device 4 where first and last link
1154	 * is bonded.
1155	 *
1156	 *    [Host]
1157	 *    1 |
1158	 *    1 |
1159	 *  [Device #1]
1160	 *    7 :| 8
1161	 *    1 :| 2
1162	 *  [Device #2]
1163	 *    5 :| 6
1164	 *    1 :| 2
1165	 *  [Device #3]
1166	 *    3 |
1167	 *    1 |
1168	 *  [Device #4]
1169	 */
1170	static const struct hop_expectation test_data[] = {
1171		{
1172			.route = 0x0,
1173			.in_port = 5,
1174			.in_type = TB_TYPE_DP_HDMI_IN,
1175			.out_port = 1,
1176			.out_type = TB_TYPE_PORT,
1177		},
1178		{
1179			.route = 0x1,
1180			.in_port = 1,
1181			.in_type = TB_TYPE_PORT,
1182			.out_port = 8,
1183			.out_type = TB_TYPE_PORT,
1184		},
1185		{
1186			.route = 0x701,
1187			.in_port = 2,
1188			.in_type = TB_TYPE_PORT,
1189			.out_port = 6,
1190			.out_type = TB_TYPE_PORT,
1191		},
1192		{
1193			.route = 0x50701,
1194			.in_port = 2,
1195			.in_type = TB_TYPE_PORT,
1196			.out_port = 3,
1197			.out_type = TB_TYPE_PORT,
1198		},
1199		{
1200			.route = 0x3050701,
1201			.in_port = 1,
1202			.in_type = TB_TYPE_PORT,
1203			.out_port = 13,
1204			.out_type = TB_TYPE_DP_HDMI_OUT,
1205		},
1206	};
1207	struct tb_switch *host, *dev1, *dev2, *dev3, *dev4;
1208	struct tb_port *in, *out;
1209	struct tb_path *path;
1210	int i;
1211
1212	host = alloc_host(test);
1213	dev1 = alloc_dev_default(test, host, 0x1, true);
1214	dev2 = alloc_dev_default(test, dev1, 0x701, false);
1215	dev3 = alloc_dev_default(test, dev2, 0x50701, false);
1216	dev4 = alloc_dev_default(test, dev3, 0x3050701, true);
1217
1218	in = &host->ports[5];
1219	out = &dev4->ports[13];
1220
1221	path = tb_path_alloc(NULL, in, 9, out, 9, 1, "Video");
1222	KUNIT_ASSERT_NOT_NULL(test, path);
1223	KUNIT_ASSERT_EQ(test, path->path_length, ARRAY_SIZE(test_data));
1224	for (i = 0; i < ARRAY_SIZE(test_data); i++) {
1225		const struct tb_port *in_port, *out_port;
1226
1227		in_port = path->hops[i].in_port;
1228		out_port = path->hops[i].out_port;
1229
1230		KUNIT_EXPECT_EQ(test, tb_route(in_port->sw), test_data[i].route);
1231		KUNIT_EXPECT_EQ(test, in_port->port, test_data[i].in_port);
1232		KUNIT_EXPECT_EQ(test, (enum tb_port_type)in_port->config.type,
1233				test_data[i].in_type);
1234		KUNIT_EXPECT_EQ(test, tb_route(out_port->sw), test_data[i].route);
1235		KUNIT_EXPECT_EQ(test, out_port->port, test_data[i].out_port);
1236		KUNIT_EXPECT_EQ(test, (enum tb_port_type)out_port->config.type,
1237				test_data[i].out_type);
1238	}
1239	tb_path_free(path);
1240}
1241
1242static void tb_test_path_mixed_chain_reverse(struct kunit *test)
1243{
1244	/*
1245	 * DP Video path from device 4 to host where first and last link
1246	 * is bonded.
1247	 *
1248	 *    [Host]
1249	 *    1 |
1250	 *    1 |
1251	 *  [Device #1]
1252	 *    7 :| 8
1253	 *    1 :| 2
1254	 *  [Device #2]
1255	 *    5 :| 6
1256	 *    1 :| 2
1257	 *  [Device #3]
1258	 *    3 |
1259	 *    1 |
1260	 *  [Device #4]
1261	 */
1262	static const struct hop_expectation test_data[] = {
1263		{
1264			.route = 0x3050701,
1265			.in_port = 13,
1266			.in_type = TB_TYPE_DP_HDMI_OUT,
1267			.out_port = 1,
1268			.out_type = TB_TYPE_PORT,
1269		},
1270		{
1271			.route = 0x50701,
1272			.in_port = 3,
1273			.in_type = TB_TYPE_PORT,
1274			.out_port = 2,
1275			.out_type = TB_TYPE_PORT,
1276		},
1277		{
1278			.route = 0x701,
1279			.in_port = 6,
1280			.in_type = TB_TYPE_PORT,
1281			.out_port = 2,
1282			.out_type = TB_TYPE_PORT,
1283		},
1284		{
1285			.route = 0x1,
1286			.in_port = 8,
1287			.in_type = TB_TYPE_PORT,
1288			.out_port = 1,
1289			.out_type = TB_TYPE_PORT,
1290		},
1291		{
1292			.route = 0x0,
1293			.in_port = 1,
1294			.in_type = TB_TYPE_PORT,
1295			.out_port = 5,
1296			.out_type = TB_TYPE_DP_HDMI_IN,
1297		},
1298	};
1299	struct tb_switch *host, *dev1, *dev2, *dev3, *dev4;
1300	struct tb_port *in, *out;
1301	struct tb_path *path;
1302	int i;
1303
1304	host = alloc_host(test);
1305	dev1 = alloc_dev_default(test, host, 0x1, true);
1306	dev2 = alloc_dev_default(test, dev1, 0x701, false);
1307	dev3 = alloc_dev_default(test, dev2, 0x50701, false);
1308	dev4 = alloc_dev_default(test, dev3, 0x3050701, true);
1309
1310	in = &dev4->ports[13];
1311	out = &host->ports[5];
1312
1313	path = tb_path_alloc(NULL, in, 9, out, 9, 1, "Video");
1314	KUNIT_ASSERT_NOT_NULL(test, path);
1315	KUNIT_ASSERT_EQ(test, path->path_length, ARRAY_SIZE(test_data));
1316	for (i = 0; i < ARRAY_SIZE(test_data); i++) {
1317		const struct tb_port *in_port, *out_port;
1318
1319		in_port = path->hops[i].in_port;
1320		out_port = path->hops[i].out_port;
1321
1322		KUNIT_EXPECT_EQ(test, tb_route(in_port->sw), test_data[i].route);
1323		KUNIT_EXPECT_EQ(test, in_port->port, test_data[i].in_port);
1324		KUNIT_EXPECT_EQ(test, (enum tb_port_type)in_port->config.type,
1325				test_data[i].in_type);
1326		KUNIT_EXPECT_EQ(test, tb_route(out_port->sw), test_data[i].route);
1327		KUNIT_EXPECT_EQ(test, out_port->port, test_data[i].out_port);
1328		KUNIT_EXPECT_EQ(test, (enum tb_port_type)out_port->config.type,
1329				test_data[i].out_type);
1330	}
1331	tb_path_free(path);
1332}
1333
1334static void tb_test_tunnel_pcie(struct kunit *test)
1335{
1336	struct tb_switch *host, *dev1, *dev2;
1337	struct tb_tunnel *tunnel1, *tunnel2;
1338	struct tb_port *down, *up;
1339
1340	/*
1341	 * Create PCIe tunnel between host and two devices.
1342	 *
1343	 *   [Host]
1344	 *    1 |
1345	 *    1 |
1346	 *  [Device #1]
1347	 *    5 |
1348	 *    1 |
1349	 *  [Device #2]
1350	 */
1351	host = alloc_host(test);
1352	dev1 = alloc_dev_default(test, host, 0x1, true);
1353	dev2 = alloc_dev_default(test, dev1, 0x501, true);
1354
1355	down = &host->ports[8];
1356	up = &dev1->ports[9];
1357	tunnel1 = tb_tunnel_alloc_pci(NULL, up, down);
1358	KUNIT_ASSERT_NOT_NULL(test, tunnel1);
1359	KUNIT_EXPECT_EQ(test, tunnel1->type, TB_TUNNEL_PCI);
1360	KUNIT_EXPECT_PTR_EQ(test, tunnel1->src_port, down);
1361	KUNIT_EXPECT_PTR_EQ(test, tunnel1->dst_port, up);
1362	KUNIT_ASSERT_EQ(test, tunnel1->npaths, 2);
1363	KUNIT_ASSERT_EQ(test, tunnel1->paths[0]->path_length, 2);
1364	KUNIT_EXPECT_PTR_EQ(test, tunnel1->paths[0]->hops[0].in_port, down);
1365	KUNIT_EXPECT_PTR_EQ(test, tunnel1->paths[0]->hops[1].out_port, up);
1366	KUNIT_ASSERT_EQ(test, tunnel1->paths[1]->path_length, 2);
1367	KUNIT_EXPECT_PTR_EQ(test, tunnel1->paths[1]->hops[0].in_port, up);
1368	KUNIT_EXPECT_PTR_EQ(test, tunnel1->paths[1]->hops[1].out_port, down);
1369
1370	down = &dev1->ports[10];
1371	up = &dev2->ports[9];
1372	tunnel2 = tb_tunnel_alloc_pci(NULL, up, down);
1373	KUNIT_ASSERT_NOT_NULL(test, tunnel2);
1374	KUNIT_EXPECT_EQ(test, tunnel2->type, TB_TUNNEL_PCI);
1375	KUNIT_EXPECT_PTR_EQ(test, tunnel2->src_port, down);
1376	KUNIT_EXPECT_PTR_EQ(test, tunnel2->dst_port, up);
1377	KUNIT_ASSERT_EQ(test, tunnel2->npaths, 2);
1378	KUNIT_ASSERT_EQ(test, tunnel2->paths[0]->path_length, 2);
1379	KUNIT_EXPECT_PTR_EQ(test, tunnel2->paths[0]->hops[0].in_port, down);
1380	KUNIT_EXPECT_PTR_EQ(test, tunnel2->paths[0]->hops[1].out_port, up);
1381	KUNIT_ASSERT_EQ(test, tunnel2->paths[1]->path_length, 2);
1382	KUNIT_EXPECT_PTR_EQ(test, tunnel2->paths[1]->hops[0].in_port, up);
1383	KUNIT_EXPECT_PTR_EQ(test, tunnel2->paths[1]->hops[1].out_port, down);
1384
1385	tb_tunnel_free(tunnel2);
1386	tb_tunnel_free(tunnel1);
1387}
1388
1389static void tb_test_tunnel_dp(struct kunit *test)
1390{
1391	struct tb_switch *host, *dev;
1392	struct tb_port *in, *out;
1393	struct tb_tunnel *tunnel;
1394
1395	/*
1396	 * Create DP tunnel between Host and Device
1397	 *
1398	 *   [Host]
1399	 *   1 |
1400	 *   1 |
1401	 *  [Device]
1402	 */
1403	host = alloc_host(test);
1404	dev = alloc_dev_default(test, host, 0x3, true);
1405
1406	in = &host->ports[5];
1407	out = &dev->ports[13];
1408
1409	tunnel = tb_tunnel_alloc_dp(NULL, in, out, 1, 0, 0);
1410	KUNIT_ASSERT_NOT_NULL(test, tunnel);
1411	KUNIT_EXPECT_EQ(test, tunnel->type, TB_TUNNEL_DP);
1412	KUNIT_EXPECT_PTR_EQ(test, tunnel->src_port, in);
1413	KUNIT_EXPECT_PTR_EQ(test, tunnel->dst_port, out);
1414	KUNIT_ASSERT_EQ(test, tunnel->npaths, 3);
1415	KUNIT_ASSERT_EQ(test, tunnel->paths[0]->path_length, 2);
1416	KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[0]->hops[0].in_port, in);
1417	KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[0]->hops[1].out_port, out);
1418	KUNIT_ASSERT_EQ(test, tunnel->paths[1]->path_length, 2);
1419	KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[1]->hops[0].in_port, in);
1420	KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[1]->hops[1].out_port, out);
1421	KUNIT_ASSERT_EQ(test, tunnel->paths[2]->path_length, 2);
1422	KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[2]->hops[0].in_port, out);
1423	KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[2]->hops[1].out_port, in);
1424	tb_tunnel_free(tunnel);
1425}
1426
1427static void tb_test_tunnel_dp_chain(struct kunit *test)
1428{
1429	struct tb_switch *host, *dev1, *dev4;
1430	struct tb_port *in, *out;
1431	struct tb_tunnel *tunnel;
1432
1433	/*
1434	 * Create DP tunnel from Host DP IN to Device #4 DP OUT.
1435	 *
1436	 *           [Host]
1437	 *            1 |
1438	 *            1 |
1439	 *         [Device #1]
1440	 *       3 /   | 5  \ 7
1441	 *      1 /    |     \ 1
1442	 * [Device #2] |    [Device #4]
1443	 *             | 1
1444	 *         [Device #3]
1445	 */
1446	host = alloc_host(test);
1447	dev1 = alloc_dev_default(test, host, 0x1, true);
1448	alloc_dev_default(test, dev1, 0x301, true);
1449	alloc_dev_default(test, dev1, 0x501, true);
1450	dev4 = alloc_dev_default(test, dev1, 0x701, true);
1451
1452	in = &host->ports[5];
1453	out = &dev4->ports[14];
1454
1455	tunnel = tb_tunnel_alloc_dp(NULL, in, out, 1, 0, 0);
1456	KUNIT_ASSERT_NOT_NULL(test, tunnel);
1457	KUNIT_EXPECT_EQ(test, tunnel->type, TB_TUNNEL_DP);
1458	KUNIT_EXPECT_PTR_EQ(test, tunnel->src_port, in);
1459	KUNIT_EXPECT_PTR_EQ(test, tunnel->dst_port, out);
1460	KUNIT_ASSERT_EQ(test, tunnel->npaths, 3);
1461	KUNIT_ASSERT_EQ(test, tunnel->paths[0]->path_length, 3);
1462	KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[0]->hops[0].in_port, in);
1463	KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[0]->hops[2].out_port, out);
1464	KUNIT_ASSERT_EQ(test, tunnel->paths[1]->path_length, 3);
1465	KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[1]->hops[0].in_port, in);
1466	KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[1]->hops[2].out_port, out);
1467	KUNIT_ASSERT_EQ(test, tunnel->paths[2]->path_length, 3);
1468	KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[2]->hops[0].in_port, out);
1469	KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[2]->hops[2].out_port, in);
1470	tb_tunnel_free(tunnel);
1471}
1472
1473static void tb_test_tunnel_dp_tree(struct kunit *test)
1474{
1475	struct tb_switch *host, *dev1, *dev2, *dev3, *dev5;
1476	struct tb_port *in, *out;
1477	struct tb_tunnel *tunnel;
1478
1479	/*
1480	 * Create DP tunnel from Device #2 DP IN to Device #5 DP OUT.
1481	 *
1482	 *          [Host]
1483	 *           3 |
1484	 *           1 |
1485	 *         [Device #1]
1486	 *       3 /   | 5  \ 7
1487	 *      1 /    |     \ 1
1488	 * [Device #2] |    [Device #4]
1489	 *             | 1
1490	 *         [Device #3]
1491	 *             | 5
1492	 *             | 1
1493	 *         [Device #5]
1494	 */
1495	host = alloc_host(test);
1496	dev1 = alloc_dev_default(test, host, 0x3, true);
1497	dev2 = alloc_dev_with_dpin(test, dev1, 0x303, true);
1498	dev3 = alloc_dev_default(test, dev1, 0x503, true);
1499	alloc_dev_default(test, dev1, 0x703, true);
1500	dev5 = alloc_dev_default(test, dev3, 0x50503, true);
1501
1502	in = &dev2->ports[13];
1503	out = &dev5->ports[13];
1504
1505	tunnel = tb_tunnel_alloc_dp(NULL, in, out, 1, 0, 0);
1506	KUNIT_ASSERT_NOT_NULL(test, tunnel);
1507	KUNIT_EXPECT_EQ(test, tunnel->type, TB_TUNNEL_DP);
1508	KUNIT_EXPECT_PTR_EQ(test, tunnel->src_port, in);
1509	KUNIT_EXPECT_PTR_EQ(test, tunnel->dst_port, out);
1510	KUNIT_ASSERT_EQ(test, tunnel->npaths, 3);
1511	KUNIT_ASSERT_EQ(test, tunnel->paths[0]->path_length, 4);
1512	KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[0]->hops[0].in_port, in);
1513	KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[0]->hops[3].out_port, out);
1514	KUNIT_ASSERT_EQ(test, tunnel->paths[1]->path_length, 4);
1515	KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[1]->hops[0].in_port, in);
1516	KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[1]->hops[3].out_port, out);
1517	KUNIT_ASSERT_EQ(test, tunnel->paths[2]->path_length, 4);
1518	KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[2]->hops[0].in_port, out);
1519	KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[2]->hops[3].out_port, in);
1520	tb_tunnel_free(tunnel);
1521}
1522
1523static void tb_test_tunnel_dp_max_length(struct kunit *test)
1524{
1525	struct tb_switch *host, *dev1, *dev2, *dev3, *dev4, *dev5, *dev6;
1526	struct tb_switch *dev7, *dev8, *dev9, *dev10, *dev11, *dev12;
1527	struct tb_port *in, *out;
1528	struct tb_tunnel *tunnel;
1529
1530	/*
1531	 * Creates DP tunnel from Device #6 to Device #12.
1532	 *
1533	 *          [Host]
1534	 *         1 /  \ 3
1535	 *        1 /    \ 1
1536	 * [Device #1]   [Device #7]
1537	 *     3 |           | 3
1538	 *     1 |           | 1
1539	 * [Device #2]   [Device #8]
1540	 *     3 |           | 3
1541	 *     1 |           | 1
1542	 * [Device #3]   [Device #9]
1543	 *     3 |           | 3
1544	 *     1 |           | 1
1545	 * [Device #4]   [Device #10]
1546	 *     3 |           | 3
1547	 *     1 |           | 1
1548	 * [Device #5]   [Device #11]
1549	 *     3 |           | 3
1550	 *     1 |           | 1
1551	 * [Device #6]   [Device #12]
1552	 */
1553	host = alloc_host(test);
1554	dev1 = alloc_dev_default(test, host, 0x1, true);
1555	dev2 = alloc_dev_default(test, dev1, 0x301, true);
1556	dev3 = alloc_dev_default(test, dev2, 0x30301, true);
1557	dev4 = alloc_dev_default(test, dev3, 0x3030301, true);
1558	dev5 = alloc_dev_default(test, dev4, 0x303030301, true);
1559	dev6 = alloc_dev_with_dpin(test, dev5, 0x30303030301, true);
1560	dev7 = alloc_dev_default(test, host, 0x3, true);
1561	dev8 = alloc_dev_default(test, dev7, 0x303, true);
1562	dev9 = alloc_dev_default(test, dev8, 0x30303, true);
1563	dev10 = alloc_dev_default(test, dev9, 0x3030303, true);
1564	dev11 = alloc_dev_default(test, dev10, 0x303030303, true);
1565	dev12 = alloc_dev_default(test, dev11, 0x30303030303, true);
1566
1567	in = &dev6->ports[13];
1568	out = &dev12->ports[13];
1569
1570	tunnel = tb_tunnel_alloc_dp(NULL, in, out, 1, 0, 0);
1571	KUNIT_ASSERT_NOT_NULL(test, tunnel);
1572	KUNIT_EXPECT_EQ(test, tunnel->type, TB_TUNNEL_DP);
1573	KUNIT_EXPECT_PTR_EQ(test, tunnel->src_port, in);
1574	KUNIT_EXPECT_PTR_EQ(test, tunnel->dst_port, out);
1575	KUNIT_ASSERT_EQ(test, tunnel->npaths, 3);
1576	KUNIT_ASSERT_EQ(test, tunnel->paths[0]->path_length, 13);
1577	/* First hop */
1578	KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[0]->hops[0].in_port, in);
1579	/* Middle */
1580	KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[0]->hops[6].in_port,
1581			    &host->ports[1]);
1582	KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[0]->hops[6].out_port,
1583			    &host->ports[3]);
1584	/* Last */
1585	KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[0]->hops[12].out_port, out);
1586	KUNIT_ASSERT_EQ(test, tunnel->paths[1]->path_length, 13);
1587	KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[1]->hops[0].in_port, in);
1588	KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[1]->hops[6].in_port,
1589			    &host->ports[1]);
1590	KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[1]->hops[6].out_port,
1591			    &host->ports[3]);
1592	KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[1]->hops[12].out_port, out);
1593	KUNIT_ASSERT_EQ(test, tunnel->paths[2]->path_length, 13);
1594	KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[2]->hops[0].in_port, out);
1595	KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[2]->hops[6].in_port,
1596			    &host->ports[3]);
1597	KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[2]->hops[6].out_port,
1598			    &host->ports[1]);
1599	KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[2]->hops[12].out_port, in);
1600	tb_tunnel_free(tunnel);
1601}
1602
1603static void tb_test_tunnel_3dp(struct kunit *test)
1604{
1605	struct tb_switch *host, *dev1, *dev2, *dev3, *dev4, *dev5;
1606	struct tb_port *in1, *in2, *in3, *out1, *out2, *out3;
1607	struct tb_tunnel *tunnel1, *tunnel2, *tunnel3;
1608
1609	/*
1610	 * Create 3 DP tunnels from Host to Devices #2, #5 and #4.
1611	 *
1612	 *          [Host]
1613	 *           3 |
1614	 *           1 |
1615	 *         [Device #1]
1616	 *       3 /   | 5  \ 7
1617	 *      1 /    |     \ 1
1618	 * [Device #2] |    [Device #4]
1619	 *             | 1
1620	 *         [Device #3]
1621	 *             | 5
1622	 *             | 1
1623	 *         [Device #5]
1624	 */
1625	host = alloc_host_br(test);
1626	dev1 = alloc_dev_default(test, host, 0x3, true);
1627	dev2 = alloc_dev_default(test, dev1, 0x303, true);
1628	dev3 = alloc_dev_default(test, dev1, 0x503, true);
1629	dev4 = alloc_dev_default(test, dev1, 0x703, true);
1630	dev5 = alloc_dev_default(test, dev3, 0x50503, true);
1631
1632	in1 = &host->ports[5];
1633	in2 = &host->ports[6];
1634	in3 = &host->ports[10];
1635
1636	out1 = &dev2->ports[13];
1637	out2 = &dev5->ports[13];
1638	out3 = &dev4->ports[14];
1639
1640	tunnel1 = tb_tunnel_alloc_dp(NULL, in1, out1, 1, 0, 0);
1641	KUNIT_ASSERT_TRUE(test, tunnel1 != NULL);
1642	KUNIT_EXPECT_EQ(test, tunnel1->type, TB_TUNNEL_DP);
1643	KUNIT_EXPECT_PTR_EQ(test, tunnel1->src_port, in1);
1644	KUNIT_EXPECT_PTR_EQ(test, tunnel1->dst_port, out1);
1645	KUNIT_ASSERT_EQ(test, tunnel1->npaths, 3);
1646	KUNIT_ASSERT_EQ(test, tunnel1->paths[0]->path_length, 3);
1647
1648	tunnel2 = tb_tunnel_alloc_dp(NULL, in2, out2, 1, 0, 0);
1649	KUNIT_ASSERT_TRUE(test, tunnel2 != NULL);
1650	KUNIT_EXPECT_EQ(test, tunnel2->type, TB_TUNNEL_DP);
1651	KUNIT_EXPECT_PTR_EQ(test, tunnel2->src_port, in2);
1652	KUNIT_EXPECT_PTR_EQ(test, tunnel2->dst_port, out2);
1653	KUNIT_ASSERT_EQ(test, tunnel2->npaths, 3);
1654	KUNIT_ASSERT_EQ(test, tunnel2->paths[0]->path_length, 4);
1655
1656	tunnel3 = tb_tunnel_alloc_dp(NULL, in3, out3, 1, 0, 0);
1657	KUNIT_ASSERT_TRUE(test, tunnel3 != NULL);
1658	KUNIT_EXPECT_EQ(test, tunnel3->type, TB_TUNNEL_DP);
1659	KUNIT_EXPECT_PTR_EQ(test, tunnel3->src_port, in3);
1660	KUNIT_EXPECT_PTR_EQ(test, tunnel3->dst_port, out3);
1661	KUNIT_ASSERT_EQ(test, tunnel3->npaths, 3);
1662	KUNIT_ASSERT_EQ(test, tunnel3->paths[0]->path_length, 3);
1663
1664	tb_tunnel_free(tunnel2);
1665	tb_tunnel_free(tunnel1);
1666}
1667
1668static void tb_test_tunnel_usb3(struct kunit *test)
1669{
1670	struct tb_switch *host, *dev1, *dev2;
1671	struct tb_tunnel *tunnel1, *tunnel2;
1672	struct tb_port *down, *up;
1673
1674	/*
1675	 * Create USB3 tunnel between host and two devices.
1676	 *
1677	 *   [Host]
1678	 *    1 |
1679	 *    1 |
1680	 *  [Device #1]
1681	 *          \ 7
1682	 *           \ 1
1683	 *         [Device #2]
1684	 */
1685	host = alloc_host(test);
1686	dev1 = alloc_dev_default(test, host, 0x1, true);
1687	dev2 = alloc_dev_default(test, dev1, 0x701, true);
1688
1689	down = &host->ports[12];
1690	up = &dev1->ports[16];
1691	tunnel1 = tb_tunnel_alloc_usb3(NULL, up, down, 0, 0);
1692	KUNIT_ASSERT_NOT_NULL(test, tunnel1);
1693	KUNIT_EXPECT_EQ(test, tunnel1->type, TB_TUNNEL_USB3);
1694	KUNIT_EXPECT_PTR_EQ(test, tunnel1->src_port, down);
1695	KUNIT_EXPECT_PTR_EQ(test, tunnel1->dst_port, up);
1696	KUNIT_ASSERT_EQ(test, tunnel1->npaths, 2);
1697	KUNIT_ASSERT_EQ(test, tunnel1->paths[0]->path_length, 2);
1698	KUNIT_EXPECT_PTR_EQ(test, tunnel1->paths[0]->hops[0].in_port, down);
1699	KUNIT_EXPECT_PTR_EQ(test, tunnel1->paths[0]->hops[1].out_port, up);
1700	KUNIT_ASSERT_EQ(test, tunnel1->paths[1]->path_length, 2);
1701	KUNIT_EXPECT_PTR_EQ(test, tunnel1->paths[1]->hops[0].in_port, up);
1702	KUNIT_EXPECT_PTR_EQ(test, tunnel1->paths[1]->hops[1].out_port, down);
1703
1704	down = &dev1->ports[17];
1705	up = &dev2->ports[16];
1706	tunnel2 = tb_tunnel_alloc_usb3(NULL, up, down, 0, 0);
1707	KUNIT_ASSERT_NOT_NULL(test, tunnel2);
1708	KUNIT_EXPECT_EQ(test, tunnel2->type, TB_TUNNEL_USB3);
1709	KUNIT_EXPECT_PTR_EQ(test, tunnel2->src_port, down);
1710	KUNIT_EXPECT_PTR_EQ(test, tunnel2->dst_port, up);
1711	KUNIT_ASSERT_EQ(test, tunnel2->npaths, 2);
1712	KUNIT_ASSERT_EQ(test, tunnel2->paths[0]->path_length, 2);
1713	KUNIT_EXPECT_PTR_EQ(test, tunnel2->paths[0]->hops[0].in_port, down);
1714	KUNIT_EXPECT_PTR_EQ(test, tunnel2->paths[0]->hops[1].out_port, up);
1715	KUNIT_ASSERT_EQ(test, tunnel2->paths[1]->path_length, 2);
1716	KUNIT_EXPECT_PTR_EQ(test, tunnel2->paths[1]->hops[0].in_port, up);
1717	KUNIT_EXPECT_PTR_EQ(test, tunnel2->paths[1]->hops[1].out_port, down);
1718
1719	tb_tunnel_free(tunnel2);
1720	tb_tunnel_free(tunnel1);
1721}
1722
1723static void tb_test_tunnel_port_on_path(struct kunit *test)
1724{
1725	struct tb_switch *host, *dev1, *dev2, *dev3, *dev4, *dev5;
1726	struct tb_port *in, *out, *port;
1727	struct tb_tunnel *dp_tunnel;
1728
1729	/*
1730	 *          [Host]
1731	 *           3 |
1732	 *           1 |
1733	 *         [Device #1]
1734	 *       3 /   | 5  \ 7
1735	 *      1 /    |     \ 1
1736	 * [Device #2] |    [Device #4]
1737	 *             | 1
1738	 *         [Device #3]
1739	 *             | 5
1740	 *             | 1
1741	 *         [Device #5]
1742	 */
1743	host = alloc_host(test);
1744	dev1 = alloc_dev_default(test, host, 0x3, true);
1745	dev2 = alloc_dev_with_dpin(test, dev1, 0x303, true);
1746	dev3 = alloc_dev_default(test, dev1, 0x503, true);
1747	dev4 = alloc_dev_default(test, dev1, 0x703, true);
1748	dev5 = alloc_dev_default(test, dev3, 0x50503, true);
1749
1750	in = &dev2->ports[13];
1751	out = &dev5->ports[13];
1752
1753	dp_tunnel = tb_tunnel_alloc_dp(NULL, in, out, 1, 0, 0);
1754	KUNIT_ASSERT_NOT_NULL(test, dp_tunnel);
1755
1756	KUNIT_EXPECT_TRUE(test, tb_tunnel_port_on_path(dp_tunnel, in));
1757	KUNIT_EXPECT_TRUE(test, tb_tunnel_port_on_path(dp_tunnel, out));
1758
1759	port = &host->ports[8];
1760	KUNIT_EXPECT_FALSE(test, tb_tunnel_port_on_path(dp_tunnel, port));
1761
1762	port = &host->ports[3];
1763	KUNIT_EXPECT_FALSE(test, tb_tunnel_port_on_path(dp_tunnel, port));
1764
1765	port = &dev1->ports[1];
1766	KUNIT_EXPECT_FALSE(test, tb_tunnel_port_on_path(dp_tunnel, port));
1767
1768	port = &dev1->ports[3];
1769	KUNIT_EXPECT_TRUE(test, tb_tunnel_port_on_path(dp_tunnel, port));
1770
1771	port = &dev1->ports[5];
1772	KUNIT_EXPECT_TRUE(test, tb_tunnel_port_on_path(dp_tunnel, port));
1773
1774	port = &dev1->ports[7];
1775	KUNIT_EXPECT_FALSE(test, tb_tunnel_port_on_path(dp_tunnel, port));
1776
1777	port = &dev3->ports[1];
1778	KUNIT_EXPECT_TRUE(test, tb_tunnel_port_on_path(dp_tunnel, port));
1779
1780	port = &dev5->ports[1];
1781	KUNIT_EXPECT_TRUE(test, tb_tunnel_port_on_path(dp_tunnel, port));
1782
1783	port = &dev4->ports[1];
1784	KUNIT_EXPECT_FALSE(test, tb_tunnel_port_on_path(dp_tunnel, port));
1785
1786	tb_tunnel_free(dp_tunnel);
1787}
1788
1789static void tb_test_tunnel_dma(struct kunit *test)
1790{
1791	struct tb_port *nhi, *port;
1792	struct tb_tunnel *tunnel;
1793	struct tb_switch *host;
1794
1795	/*
1796	 * Create DMA tunnel from NHI to port 1 and back.
1797	 *
1798	 *   [Host 1]
1799	 *    1 ^ In HopID 1 -> Out HopID 8
1800	 *      |
1801	 *      v In HopID 8 -> Out HopID 1
1802	 * ............ Domain border
1803	 *      |
1804	 *   [Host 2]
1805	 */
1806	host = alloc_host(test);
1807	nhi = &host->ports[7];
1808	port = &host->ports[1];
1809
1810	tunnel = tb_tunnel_alloc_dma(NULL, nhi, port, 8, 1, 8, 1);
1811	KUNIT_ASSERT_NOT_NULL(test, tunnel);
1812	KUNIT_EXPECT_EQ(test, tunnel->type, TB_TUNNEL_DMA);
1813	KUNIT_EXPECT_PTR_EQ(test, tunnel->src_port, nhi);
1814	KUNIT_EXPECT_PTR_EQ(test, tunnel->dst_port, port);
1815	KUNIT_ASSERT_EQ(test, tunnel->npaths, 2);
1816	/* RX path */
1817	KUNIT_ASSERT_EQ(test, tunnel->paths[0]->path_length, 1);
1818	KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[0]->hops[0].in_port, port);
1819	KUNIT_EXPECT_EQ(test, tunnel->paths[0]->hops[0].in_hop_index, 8);
1820	KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[0]->hops[0].out_port, nhi);
1821	KUNIT_EXPECT_EQ(test, tunnel->paths[0]->hops[0].next_hop_index, 1);
1822	/* TX path */
1823	KUNIT_ASSERT_EQ(test, tunnel->paths[1]->path_length, 1);
1824	KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[1]->hops[0].in_port, nhi);
1825	KUNIT_EXPECT_EQ(test, tunnel->paths[1]->hops[0].in_hop_index, 1);
1826	KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[1]->hops[0].out_port, port);
1827	KUNIT_EXPECT_EQ(test, tunnel->paths[1]->hops[0].next_hop_index, 8);
1828
1829	tb_tunnel_free(tunnel);
1830}
1831
1832static void tb_test_tunnel_dma_rx(struct kunit *test)
1833{
1834	struct tb_port *nhi, *port;
1835	struct tb_tunnel *tunnel;
1836	struct tb_switch *host;
1837
1838	/*
1839	 * Create DMA RX tunnel from port 1 to NHI.
1840	 *
1841	 *   [Host 1]
1842	 *    1 ^
1843	 *      |
1844	 *      | In HopID 15 -> Out HopID 2
1845	 * ............ Domain border
1846	 *      |
1847	 *   [Host 2]
1848	 */
1849	host = alloc_host(test);
1850	nhi = &host->ports[7];
1851	port = &host->ports[1];
1852
1853	tunnel = tb_tunnel_alloc_dma(NULL, nhi, port, -1, -1, 15, 2);
1854	KUNIT_ASSERT_NOT_NULL(test, tunnel);
1855	KUNIT_EXPECT_EQ(test, tunnel->type, TB_TUNNEL_DMA);
1856	KUNIT_EXPECT_PTR_EQ(test, tunnel->src_port, nhi);
1857	KUNIT_EXPECT_PTR_EQ(test, tunnel->dst_port, port);
1858	KUNIT_ASSERT_EQ(test, tunnel->npaths, 1);
1859	/* RX path */
1860	KUNIT_ASSERT_EQ(test, tunnel->paths[0]->path_length, 1);
1861	KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[0]->hops[0].in_port, port);
1862	KUNIT_EXPECT_EQ(test, tunnel->paths[0]->hops[0].in_hop_index, 15);
1863	KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[0]->hops[0].out_port, nhi);
1864	KUNIT_EXPECT_EQ(test, tunnel->paths[0]->hops[0].next_hop_index, 2);
1865
1866	tb_tunnel_free(tunnel);
1867}
1868
1869static void tb_test_tunnel_dma_tx(struct kunit *test)
1870{
1871	struct tb_port *nhi, *port;
1872	struct tb_tunnel *tunnel;
1873	struct tb_switch *host;
1874
1875	/*
1876	 * Create DMA TX tunnel from NHI to port 1.
1877	 *
1878	 *   [Host 1]
1879	 *    1 | In HopID 2 -> Out HopID 15
1880	 *      |
1881	 *      v
1882	 * ............ Domain border
1883	 *      |
1884	 *   [Host 2]
1885	 */
1886	host = alloc_host(test);
1887	nhi = &host->ports[7];
1888	port = &host->ports[1];
1889
1890	tunnel = tb_tunnel_alloc_dma(NULL, nhi, port, 15, 2, -1, -1);
1891	KUNIT_ASSERT_NOT_NULL(test, tunnel);
1892	KUNIT_EXPECT_EQ(test, tunnel->type, TB_TUNNEL_DMA);
1893	KUNIT_EXPECT_PTR_EQ(test, tunnel->src_port, nhi);
1894	KUNIT_EXPECT_PTR_EQ(test, tunnel->dst_port, port);
1895	KUNIT_ASSERT_EQ(test, tunnel->npaths, 1);
1896	/* TX path */
1897	KUNIT_ASSERT_EQ(test, tunnel->paths[0]->path_length, 1);
1898	KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[0]->hops[0].in_port, nhi);
1899	KUNIT_EXPECT_EQ(test, tunnel->paths[0]->hops[0].in_hop_index, 2);
1900	KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[0]->hops[0].out_port, port);
1901	KUNIT_EXPECT_EQ(test, tunnel->paths[0]->hops[0].next_hop_index, 15);
1902
1903	tb_tunnel_free(tunnel);
1904}
1905
1906static void tb_test_tunnel_dma_chain(struct kunit *test)
1907{
1908	struct tb_switch *host, *dev1, *dev2;
1909	struct tb_port *nhi, *port;
1910	struct tb_tunnel *tunnel;
1911
1912	/*
1913	 * Create DMA tunnel from NHI to Device #2 port 3 and back.
1914	 *
1915	 *   [Host 1]
1916	 *    1 ^ In HopID 1 -> Out HopID x
1917	 *      |
1918	 *    1 | In HopID x -> Out HopID 1
1919	 *  [Device #1]
1920	 *         7 \
1921	 *          1 \
1922	 *         [Device #2]
1923	 *           3 | In HopID x -> Out HopID 8
1924	 *             |
1925	 *             v In HopID 8 -> Out HopID x
1926	 * ............ Domain border
1927	 *             |
1928	 *          [Host 2]
1929	 */
1930	host = alloc_host(test);
1931	dev1 = alloc_dev_default(test, host, 0x1, true);
1932	dev2 = alloc_dev_default(test, dev1, 0x701, true);
1933
1934	nhi = &host->ports[7];
1935	port = &dev2->ports[3];
1936	tunnel = tb_tunnel_alloc_dma(NULL, nhi, port, 8, 1, 8, 1);
1937	KUNIT_ASSERT_NOT_NULL(test, tunnel);
1938	KUNIT_EXPECT_EQ(test, tunnel->type, TB_TUNNEL_DMA);
1939	KUNIT_EXPECT_PTR_EQ(test, tunnel->src_port, nhi);
1940	KUNIT_EXPECT_PTR_EQ(test, tunnel->dst_port, port);
1941	KUNIT_ASSERT_EQ(test, tunnel->npaths, 2);
1942	/* RX path */
1943	KUNIT_ASSERT_EQ(test, tunnel->paths[0]->path_length, 3);
1944	KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[0]->hops[0].in_port, port);
1945	KUNIT_EXPECT_EQ(test, tunnel->paths[0]->hops[0].in_hop_index, 8);
1946	KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[0]->hops[0].out_port,
1947			    &dev2->ports[1]);
1948	KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[0]->hops[1].in_port,
1949			    &dev1->ports[7]);
1950	KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[0]->hops[1].out_port,
1951			    &dev1->ports[1]);
1952	KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[0]->hops[2].in_port,
1953			    &host->ports[1]);
1954	KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[0]->hops[2].out_port, nhi);
1955	KUNIT_EXPECT_EQ(test, tunnel->paths[0]->hops[2].next_hop_index, 1);
1956	/* TX path */
1957	KUNIT_ASSERT_EQ(test, tunnel->paths[1]->path_length, 3);
1958	KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[1]->hops[0].in_port, nhi);
1959	KUNIT_EXPECT_EQ(test, tunnel->paths[1]->hops[0].in_hop_index, 1);
1960	KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[1]->hops[1].in_port,
1961			    &dev1->ports[1]);
1962	KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[1]->hops[1].out_port,
1963			    &dev1->ports[7]);
1964	KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[1]->hops[2].in_port,
1965			    &dev2->ports[1]);
1966	KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[1]->hops[2].out_port, port);
1967	KUNIT_EXPECT_EQ(test, tunnel->paths[1]->hops[2].next_hop_index, 8);
1968
1969	tb_tunnel_free(tunnel);
1970}
1971
1972static void tb_test_tunnel_dma_match(struct kunit *test)
1973{
1974	struct tb_port *nhi, *port;
1975	struct tb_tunnel *tunnel;
1976	struct tb_switch *host;
1977
1978	host = alloc_host(test);
1979	nhi = &host->ports[7];
1980	port = &host->ports[1];
1981
1982	tunnel = tb_tunnel_alloc_dma(NULL, nhi, port, 15, 1, 15, 1);
1983	KUNIT_ASSERT_NOT_NULL(test, tunnel);
1984
1985	KUNIT_ASSERT_TRUE(test, tb_tunnel_match_dma(tunnel, 15, 1, 15, 1));
1986	KUNIT_ASSERT_FALSE(test, tb_tunnel_match_dma(tunnel, 8, 1, 15, 1));
1987	KUNIT_ASSERT_TRUE(test, tb_tunnel_match_dma(tunnel, -1, -1, 15, 1));
1988	KUNIT_ASSERT_TRUE(test, tb_tunnel_match_dma(tunnel, 15, 1, -1, -1));
1989	KUNIT_ASSERT_TRUE(test, tb_tunnel_match_dma(tunnel, 15, -1, -1, -1));
1990	KUNIT_ASSERT_TRUE(test, tb_tunnel_match_dma(tunnel, -1, 1, -1, -1));
1991	KUNIT_ASSERT_TRUE(test, tb_tunnel_match_dma(tunnel, -1, -1, 15, -1));
1992	KUNIT_ASSERT_TRUE(test, tb_tunnel_match_dma(tunnel, -1, -1, -1, 1));
1993	KUNIT_ASSERT_TRUE(test, tb_tunnel_match_dma(tunnel, -1, -1, -1, -1));
1994	KUNIT_ASSERT_FALSE(test, tb_tunnel_match_dma(tunnel, 8, -1, 8, -1));
1995
1996	tb_tunnel_free(tunnel);
1997
1998	tunnel = tb_tunnel_alloc_dma(NULL, nhi, port, 15, 1, -1, -1);
1999	KUNIT_ASSERT_NOT_NULL(test, tunnel);
2000	KUNIT_ASSERT_TRUE(test, tb_tunnel_match_dma(tunnel, 15, 1, -1, -1));
2001	KUNIT_ASSERT_TRUE(test, tb_tunnel_match_dma(tunnel, 15, -1, -1, -1));
2002	KUNIT_ASSERT_TRUE(test, tb_tunnel_match_dma(tunnel, -1, 1, -1, -1));
2003	KUNIT_ASSERT_TRUE(test, tb_tunnel_match_dma(tunnel, -1, -1, -1, -1));
2004	KUNIT_ASSERT_FALSE(test, tb_tunnel_match_dma(tunnel, 15, 1, 15, 1));
2005	KUNIT_ASSERT_FALSE(test, tb_tunnel_match_dma(tunnel, -1, -1, 15, 1));
2006	KUNIT_ASSERT_FALSE(test, tb_tunnel_match_dma(tunnel, 15, 11, -1, -1));
2007
2008	tb_tunnel_free(tunnel);
2009
2010	tunnel = tb_tunnel_alloc_dma(NULL, nhi, port, -1, -1, 15, 11);
2011	KUNIT_ASSERT_NOT_NULL(test, tunnel);
2012	KUNIT_ASSERT_TRUE(test, tb_tunnel_match_dma(tunnel, -1, -1, 15, 11));
2013	KUNIT_ASSERT_TRUE(test, tb_tunnel_match_dma(tunnel, -1, -1, 15, -1));
2014	KUNIT_ASSERT_TRUE(test, tb_tunnel_match_dma(tunnel, -1, -1, -1, 11));
2015	KUNIT_ASSERT_TRUE(test, tb_tunnel_match_dma(tunnel, -1, -1, -1, -1));
2016	KUNIT_ASSERT_FALSE(test, tb_tunnel_match_dma(tunnel, -1, -1, 15, 1));
2017	KUNIT_ASSERT_FALSE(test, tb_tunnel_match_dma(tunnel, -1, -1, 10, 11));
2018	KUNIT_ASSERT_FALSE(test, tb_tunnel_match_dma(tunnel, 15, 11, -1, -1));
2019
2020	tb_tunnel_free(tunnel);
2021}
2022
2023static void tb_test_credit_alloc_legacy_not_bonded(struct kunit *test)
2024{
2025	struct tb_switch *host, *dev;
2026	struct tb_port *up, *down;
2027	struct tb_tunnel *tunnel;
2028	struct tb_path *path;
2029
2030	host = alloc_host(test);
2031	dev = alloc_dev_default(test, host, 0x1, false);
2032
2033	down = &host->ports[8];
2034	up = &dev->ports[9];
2035	tunnel = tb_tunnel_alloc_pci(NULL, up, down);
2036	KUNIT_ASSERT_NOT_NULL(test, tunnel);
2037	KUNIT_ASSERT_EQ(test, tunnel->npaths, (size_t)2);
2038
2039	path = tunnel->paths[0];
2040	KUNIT_ASSERT_EQ(test, path->path_length, 2);
2041	KUNIT_EXPECT_EQ(test, path->hops[0].nfc_credits, 0U);
2042	KUNIT_EXPECT_EQ(test, path->hops[0].initial_credits, 7U);
2043	KUNIT_EXPECT_EQ(test, path->hops[1].nfc_credits, 0U);
2044	KUNIT_EXPECT_EQ(test, path->hops[1].initial_credits, 16U);
2045
2046	path = tunnel->paths[1];
2047	KUNIT_ASSERT_EQ(test, path->path_length, 2);
2048	KUNIT_EXPECT_EQ(test, path->hops[0].nfc_credits, 0U);
2049	KUNIT_EXPECT_EQ(test, path->hops[0].initial_credits, 7U);
2050	KUNIT_EXPECT_EQ(test, path->hops[1].nfc_credits, 0U);
2051	KUNIT_EXPECT_EQ(test, path->hops[1].initial_credits, 16U);
2052
2053	tb_tunnel_free(tunnel);
2054}
2055
2056static void tb_test_credit_alloc_legacy_bonded(struct kunit *test)
2057{
2058	struct tb_switch *host, *dev;
2059	struct tb_port *up, *down;
2060	struct tb_tunnel *tunnel;
2061	struct tb_path *path;
2062
2063	host = alloc_host(test);
2064	dev = alloc_dev_default(test, host, 0x1, true);
2065
2066	down = &host->ports[8];
2067	up = &dev->ports[9];
2068	tunnel = tb_tunnel_alloc_pci(NULL, up, down);
2069	KUNIT_ASSERT_NOT_NULL(test, tunnel);
2070	KUNIT_ASSERT_EQ(test, tunnel->npaths, (size_t)2);
2071
2072	path = tunnel->paths[0];
2073	KUNIT_ASSERT_EQ(test, path->path_length, 2);
2074	KUNIT_EXPECT_EQ(test, path->hops[0].nfc_credits, 0U);
2075	KUNIT_EXPECT_EQ(test, path->hops[0].initial_credits, 7U);
2076	KUNIT_EXPECT_EQ(test, path->hops[1].nfc_credits, 0U);
2077	KUNIT_EXPECT_EQ(test, path->hops[1].initial_credits, 32U);
2078
2079	path = tunnel->paths[1];
2080	KUNIT_ASSERT_EQ(test, path->path_length, 2);
2081	KUNIT_EXPECT_EQ(test, path->hops[0].nfc_credits, 0U);
2082	KUNIT_EXPECT_EQ(test, path->hops[0].initial_credits, 7U);
2083	KUNIT_EXPECT_EQ(test, path->hops[1].nfc_credits, 0U);
2084	KUNIT_EXPECT_EQ(test, path->hops[1].initial_credits, 32U);
2085
2086	tb_tunnel_free(tunnel);
2087}
2088
2089static void tb_test_credit_alloc_pcie(struct kunit *test)
2090{
2091	struct tb_switch *host, *dev;
2092	struct tb_port *up, *down;
2093	struct tb_tunnel *tunnel;
2094	struct tb_path *path;
2095
2096	host = alloc_host_usb4(test);
2097	dev = alloc_dev_usb4(test, host, 0x1, true);
2098
2099	down = &host->ports[8];
2100	up = &dev->ports[9];
2101	tunnel = tb_tunnel_alloc_pci(NULL, up, down);
2102	KUNIT_ASSERT_NOT_NULL(test, tunnel);
2103	KUNIT_ASSERT_EQ(test, tunnel->npaths, (size_t)2);
2104
2105	path = tunnel->paths[0];
2106	KUNIT_ASSERT_EQ(test, path->path_length, 2);
2107	KUNIT_EXPECT_EQ(test, path->hops[0].nfc_credits, 0U);
2108	KUNIT_EXPECT_EQ(test, path->hops[0].initial_credits, 7U);
2109	KUNIT_EXPECT_EQ(test, path->hops[1].nfc_credits, 0U);
2110	KUNIT_EXPECT_EQ(test, path->hops[1].initial_credits, 32U);
2111
2112	path = tunnel->paths[1];
2113	KUNIT_ASSERT_EQ(test, path->path_length, 2);
2114	KUNIT_EXPECT_EQ(test, path->hops[0].nfc_credits, 0U);
2115	KUNIT_EXPECT_EQ(test, path->hops[0].initial_credits, 7U);
2116	KUNIT_EXPECT_EQ(test, path->hops[1].nfc_credits, 0U);
2117	KUNIT_EXPECT_EQ(test, path->hops[1].initial_credits, 64U);
2118
2119	tb_tunnel_free(tunnel);
2120}
2121
2122static void tb_test_credit_alloc_without_dp(struct kunit *test)
2123{
2124	struct tb_switch *host, *dev;
2125	struct tb_port *up, *down;
2126	struct tb_tunnel *tunnel;
2127	struct tb_path *path;
2128
2129	host = alloc_host_usb4(test);
2130	dev = alloc_dev_without_dp(test, host, 0x1, true);
2131
2132	/*
2133	 * The device has no DP therefore baMinDPmain = baMinDPaux = 0
2134	 *
2135	 * Create PCIe path with buffers less than baMaxPCIe.
2136	 *
2137	 * For a device with buffers configurations:
2138	 * baMaxUSB3 = 109
2139	 * baMinDPaux = 0
2140	 * baMinDPmain = 0
2141	 * baMaxPCIe = 30
2142	 * baMaxHI = 1
2143	 * Remaining Buffers = Total - (CP + DP) = 120 - (2 + 0) = 118
2144	 * PCIe Credits = Max(6, Min(baMaxPCIe, Remaining Buffers - baMaxUSB3)
2145	 *		= Max(6, Min(30, 9) = 9
2146	 */
2147	down = &host->ports[8];
2148	up = &dev->ports[9];
2149	tunnel = tb_tunnel_alloc_pci(NULL, up, down);
2150	KUNIT_ASSERT_TRUE(test, tunnel != NULL);
2151	KUNIT_ASSERT_EQ(test, tunnel->npaths, (size_t)2);
2152
2153	/* PCIe downstream path */
2154	path = tunnel->paths[0];
2155	KUNIT_ASSERT_EQ(test, path->path_length, 2);
2156	KUNIT_EXPECT_EQ(test, path->hops[0].nfc_credits, 0U);
2157	KUNIT_EXPECT_EQ(test, path->hops[0].initial_credits, 7U);
2158	KUNIT_EXPECT_EQ(test, path->hops[1].nfc_credits, 0U);
2159	KUNIT_EXPECT_EQ(test, path->hops[1].initial_credits, 9U);
2160
2161	/* PCIe upstream path */
2162	path = tunnel->paths[1];
2163	KUNIT_ASSERT_EQ(test, path->path_length, 2);
2164	KUNIT_EXPECT_EQ(test, path->hops[0].nfc_credits, 0U);
2165	KUNIT_EXPECT_EQ(test, path->hops[0].initial_credits, 7U);
2166	KUNIT_EXPECT_EQ(test, path->hops[1].nfc_credits, 0U);
2167	KUNIT_EXPECT_EQ(test, path->hops[1].initial_credits, 64U);
2168
2169	tb_tunnel_free(tunnel);
2170}
2171
2172static void tb_test_credit_alloc_dp(struct kunit *test)
2173{
2174	struct tb_switch *host, *dev;
2175	struct tb_port *in, *out;
2176	struct tb_tunnel *tunnel;
2177	struct tb_path *path;
2178
2179	host = alloc_host_usb4(test);
2180	dev = alloc_dev_usb4(test, host, 0x1, true);
2181
2182	in = &host->ports[5];
2183	out = &dev->ports[14];
2184
2185	tunnel = tb_tunnel_alloc_dp(NULL, in, out, 1, 0, 0);
2186	KUNIT_ASSERT_NOT_NULL(test, tunnel);
2187	KUNIT_ASSERT_EQ(test, tunnel->npaths, (size_t)3);
2188
2189	/* Video (main) path */
2190	path = tunnel->paths[0];
2191	KUNIT_ASSERT_EQ(test, path->path_length, 2);
2192	KUNIT_EXPECT_EQ(test, path->hops[0].nfc_credits, 12U);
2193	KUNIT_EXPECT_EQ(test, path->hops[0].initial_credits, 0U);
2194	KUNIT_EXPECT_EQ(test, path->hops[1].nfc_credits, 18U);
2195	KUNIT_EXPECT_EQ(test, path->hops[1].initial_credits, 0U);
2196
2197	/* AUX TX */
2198	path = tunnel->paths[1];
2199	KUNIT_ASSERT_EQ(test, path->path_length, 2);
2200	KUNIT_EXPECT_EQ(test, path->hops[0].nfc_credits, 0U);
2201	KUNIT_EXPECT_EQ(test, path->hops[0].initial_credits, 1U);
2202	KUNIT_EXPECT_EQ(test, path->hops[1].nfc_credits, 0U);
2203	KUNIT_EXPECT_EQ(test, path->hops[1].initial_credits, 1U);
2204
2205	/* AUX RX */
2206	path = tunnel->paths[2];
2207	KUNIT_ASSERT_EQ(test, path->path_length, 2);
2208	KUNIT_EXPECT_EQ(test, path->hops[0].nfc_credits, 0U);
2209	KUNIT_EXPECT_EQ(test, path->hops[0].initial_credits, 1U);
2210	KUNIT_EXPECT_EQ(test, path->hops[1].nfc_credits, 0U);
2211	KUNIT_EXPECT_EQ(test, path->hops[1].initial_credits, 1U);
2212
2213	tb_tunnel_free(tunnel);
2214}
2215
2216static void tb_test_credit_alloc_usb3(struct kunit *test)
2217{
2218	struct tb_switch *host, *dev;
2219	struct tb_port *up, *down;
2220	struct tb_tunnel *tunnel;
2221	struct tb_path *path;
2222
2223	host = alloc_host_usb4(test);
2224	dev = alloc_dev_usb4(test, host, 0x1, true);
2225
2226	down = &host->ports[12];
2227	up = &dev->ports[16];
2228	tunnel = tb_tunnel_alloc_usb3(NULL, up, down, 0, 0);
2229	KUNIT_ASSERT_NOT_NULL(test, tunnel);
2230	KUNIT_ASSERT_EQ(test, tunnel->npaths, (size_t)2);
2231
2232	path = tunnel->paths[0];
2233	KUNIT_ASSERT_EQ(test, path->path_length, 2);
2234	KUNIT_EXPECT_EQ(test, path->hops[0].nfc_credits, 0U);
2235	KUNIT_EXPECT_EQ(test, path->hops[0].initial_credits, 7U);
2236	KUNIT_EXPECT_EQ(test, path->hops[1].nfc_credits, 0U);
2237	KUNIT_EXPECT_EQ(test, path->hops[1].initial_credits, 14U);
2238
2239	path = tunnel->paths[1];
2240	KUNIT_ASSERT_EQ(test, path->path_length, 2);
2241	KUNIT_EXPECT_EQ(test, path->hops[0].nfc_credits, 0U);
2242	KUNIT_EXPECT_EQ(test, path->hops[0].initial_credits, 7U);
2243	KUNIT_EXPECT_EQ(test, path->hops[1].nfc_credits, 0U);
2244	KUNIT_EXPECT_EQ(test, path->hops[1].initial_credits, 32U);
2245
2246	tb_tunnel_free(tunnel);
2247}
2248
2249static void tb_test_credit_alloc_dma(struct kunit *test)
2250{
2251	struct tb_switch *host, *dev;
2252	struct tb_port *nhi, *port;
2253	struct tb_tunnel *tunnel;
2254	struct tb_path *path;
2255
2256	host = alloc_host_usb4(test);
2257	dev = alloc_dev_usb4(test, host, 0x1, true);
2258
2259	nhi = &host->ports[7];
2260	port = &dev->ports[3];
2261
2262	tunnel = tb_tunnel_alloc_dma(NULL, nhi, port, 8, 1, 8, 1);
2263	KUNIT_ASSERT_NOT_NULL(test, tunnel);
2264	KUNIT_ASSERT_EQ(test, tunnel->npaths, (size_t)2);
2265
2266	/* DMA RX */
2267	path = tunnel->paths[0];
2268	KUNIT_ASSERT_EQ(test, path->path_length, 2);
2269	KUNIT_EXPECT_EQ(test, path->hops[0].nfc_credits, 0U);
2270	KUNIT_EXPECT_EQ(test, path->hops[0].initial_credits, 14U);
2271	KUNIT_EXPECT_EQ(test, path->hops[1].nfc_credits, 0U);
2272	KUNIT_EXPECT_EQ(test, path->hops[1].initial_credits, 14U);
2273
2274	/* DMA TX */
2275	path = tunnel->paths[1];
2276	KUNIT_ASSERT_EQ(test, path->path_length, 2);
2277	KUNIT_EXPECT_EQ(test, path->hops[0].nfc_credits, 0U);
2278	KUNIT_EXPECT_EQ(test, path->hops[0].initial_credits, 0U);
2279	KUNIT_EXPECT_EQ(test, path->hops[1].nfc_credits, 0U);
2280	KUNIT_EXPECT_EQ(test, path->hops[1].initial_credits, 14U);
2281
2282	tb_tunnel_free(tunnel);
2283}
2284
2285static void tb_test_credit_alloc_dma_multiple(struct kunit *test)
2286{
2287	struct tb_tunnel *tunnel1, *tunnel2, *tunnel3;
2288	struct tb_switch *host, *dev;
2289	struct tb_port *nhi, *port;
2290	struct tb_path *path;
2291
2292	host = alloc_host_usb4(test);
2293	dev = alloc_dev_usb4(test, host, 0x1, true);
2294
2295	nhi = &host->ports[7];
2296	port = &dev->ports[3];
2297
2298	/*
2299	 * Create three DMA tunnels through the same ports. With the
2300	 * default buffers we should be able to create two and the last
2301	 * one fails.
2302	 *
2303	 * For default host we have following buffers for DMA:
2304	 *
2305	 *   120 - (2 + 2 * (1 + 0) + 32 + 64 + spare) = 20
2306	 *
2307	 * For device we have following:
2308	 *
2309	 *  120 - (2 + 2 * (1 + 18) + 14 + 32 + spare) = 34
2310	 *
2311	 * spare = 14 + 1 = 15
2312	 *
2313	 * So on host the first tunnel gets 14 and the second gets the
2314	 * remaining 1 and then we run out of buffers.
2315	 */
2316	tunnel1 = tb_tunnel_alloc_dma(NULL, nhi, port, 8, 1, 8, 1);
2317	KUNIT_ASSERT_NOT_NULL(test, tunnel1);
2318	KUNIT_ASSERT_EQ(test, tunnel1->npaths, (size_t)2);
2319
2320	path = tunnel1->paths[0];
2321	KUNIT_ASSERT_EQ(test, path->path_length, 2);
2322	KUNIT_EXPECT_EQ(test, path->hops[0].nfc_credits, 0U);
2323	KUNIT_EXPECT_EQ(test, path->hops[0].initial_credits, 14U);
2324	KUNIT_EXPECT_EQ(test, path->hops[1].nfc_credits, 0U);
2325	KUNIT_EXPECT_EQ(test, path->hops[1].initial_credits, 14U);
2326
2327	path = tunnel1->paths[1];
2328	KUNIT_ASSERT_EQ(test, path->path_length, 2);
2329	KUNIT_EXPECT_EQ(test, path->hops[0].nfc_credits, 0U);
2330	KUNIT_EXPECT_EQ(test, path->hops[0].initial_credits, 0U);
2331	KUNIT_EXPECT_EQ(test, path->hops[1].nfc_credits, 0U);
2332	KUNIT_EXPECT_EQ(test, path->hops[1].initial_credits, 14U);
2333
2334	tunnel2 = tb_tunnel_alloc_dma(NULL, nhi, port, 9, 2, 9, 2);
2335	KUNIT_ASSERT_NOT_NULL(test, tunnel2);
2336	KUNIT_ASSERT_EQ(test, tunnel2->npaths, (size_t)2);
2337
2338	path = tunnel2->paths[0];
2339	KUNIT_ASSERT_EQ(test, path->path_length, 2);
2340	KUNIT_EXPECT_EQ(test, path->hops[0].nfc_credits, 0U);
2341	KUNIT_EXPECT_EQ(test, path->hops[0].initial_credits, 14U);
2342	KUNIT_EXPECT_EQ(test, path->hops[1].nfc_credits, 0U);
2343	KUNIT_EXPECT_EQ(test, path->hops[1].initial_credits, 1U);
2344
2345	path = tunnel2->paths[1];
2346	KUNIT_ASSERT_EQ(test, path->path_length, 2);
2347	KUNIT_EXPECT_EQ(test, path->hops[0].nfc_credits, 0U);
2348	KUNIT_EXPECT_EQ(test, path->hops[0].initial_credits, 0U);
2349	KUNIT_EXPECT_EQ(test, path->hops[1].nfc_credits, 0U);
2350	KUNIT_EXPECT_EQ(test, path->hops[1].initial_credits, 1U);
2351
2352	tunnel3 = tb_tunnel_alloc_dma(NULL, nhi, port, 10, 3, 10, 3);
2353	KUNIT_ASSERT_NULL(test, tunnel3);
2354
2355	/*
2356	 * Release the first DMA tunnel. That should make 14 buffers
2357	 * available for the next tunnel.
2358	 */
2359	tb_tunnel_free(tunnel1);
2360
2361	tunnel3 = tb_tunnel_alloc_dma(NULL, nhi, port, 10, 3, 10, 3);
2362	KUNIT_ASSERT_NOT_NULL(test, tunnel3);
2363
2364	path = tunnel3->paths[0];
2365	KUNIT_ASSERT_EQ(test, path->path_length, 2);
2366	KUNIT_EXPECT_EQ(test, path->hops[0].nfc_credits, 0U);
2367	KUNIT_EXPECT_EQ(test, path->hops[0].initial_credits, 14U);
2368	KUNIT_EXPECT_EQ(test, path->hops[1].nfc_credits, 0U);
2369	KUNIT_EXPECT_EQ(test, path->hops[1].initial_credits, 14U);
2370
2371	path = tunnel3->paths[1];
2372	KUNIT_ASSERT_EQ(test, path->path_length, 2);
2373	KUNIT_EXPECT_EQ(test, path->hops[0].nfc_credits, 0U);
2374	KUNIT_EXPECT_EQ(test, path->hops[0].initial_credits, 0U);
2375	KUNIT_EXPECT_EQ(test, path->hops[1].nfc_credits, 0U);
2376	KUNIT_EXPECT_EQ(test, path->hops[1].initial_credits, 14U);
2377
2378	tb_tunnel_free(tunnel3);
2379	tb_tunnel_free(tunnel2);
2380}
2381
2382static struct tb_tunnel *TB_TEST_PCIE_TUNNEL(struct kunit *test,
2383			struct tb_switch *host, struct tb_switch *dev)
2384{
2385	struct tb_port *up, *down;
2386	struct tb_tunnel *pcie_tunnel;
2387	struct tb_path *path;
2388
2389	down = &host->ports[8];
2390	up = &dev->ports[9];
2391	pcie_tunnel = tb_tunnel_alloc_pci(NULL, up, down);
2392	KUNIT_ASSERT_NOT_NULL(test, pcie_tunnel);
2393	KUNIT_ASSERT_EQ(test, pcie_tunnel->npaths, (size_t)2);
2394
2395	path = pcie_tunnel->paths[0];
2396	KUNIT_ASSERT_EQ(test, path->path_length, 2);
2397	KUNIT_EXPECT_EQ(test, path->hops[0].nfc_credits, 0U);
2398	KUNIT_EXPECT_EQ(test, path->hops[0].initial_credits, 7U);
2399	KUNIT_EXPECT_EQ(test, path->hops[1].nfc_credits, 0U);
2400	KUNIT_EXPECT_EQ(test, path->hops[1].initial_credits, 32U);
2401
2402	path = pcie_tunnel->paths[1];
2403	KUNIT_ASSERT_EQ(test, path->path_length, 2);
2404	KUNIT_EXPECT_EQ(test, path->hops[0].nfc_credits, 0U);
2405	KUNIT_EXPECT_EQ(test, path->hops[0].initial_credits, 7U);
2406	KUNIT_EXPECT_EQ(test, path->hops[1].nfc_credits, 0U);
2407	KUNIT_EXPECT_EQ(test, path->hops[1].initial_credits, 64U);
2408
2409	return pcie_tunnel;
2410}
2411
2412static struct tb_tunnel *TB_TEST_DP_TUNNEL1(struct kunit *test,
2413			struct tb_switch *host, struct tb_switch *dev)
2414{
2415	struct tb_port *in, *out;
2416	struct tb_tunnel *dp_tunnel1;
2417	struct tb_path *path;
2418
2419	in = &host->ports[5];
2420	out = &dev->ports[13];
2421	dp_tunnel1 = tb_tunnel_alloc_dp(NULL, in, out, 1, 0, 0);
2422	KUNIT_ASSERT_NOT_NULL(test, dp_tunnel1);
2423	KUNIT_ASSERT_EQ(test, dp_tunnel1->npaths, (size_t)3);
2424
2425	path = dp_tunnel1->paths[0];
2426	KUNIT_ASSERT_EQ(test, path->path_length, 2);
2427	KUNIT_EXPECT_EQ(test, path->hops[0].nfc_credits, 12U);
2428	KUNIT_EXPECT_EQ(test, path->hops[0].initial_credits, 0U);
2429	KUNIT_EXPECT_EQ(test, path->hops[1].nfc_credits, 18U);
2430	KUNIT_EXPECT_EQ(test, path->hops[1].initial_credits, 0U);
2431
2432	path = dp_tunnel1->paths[1];
2433	KUNIT_ASSERT_EQ(test, path->path_length, 2);
2434	KUNIT_EXPECT_EQ(test, path->hops[0].nfc_credits, 0U);
2435	KUNIT_EXPECT_EQ(test, path->hops[0].initial_credits, 1U);
2436	KUNIT_EXPECT_EQ(test, path->hops[1].nfc_credits, 0U);
2437	KUNIT_EXPECT_EQ(test, path->hops[1].initial_credits, 1U);
2438
2439	path = dp_tunnel1->paths[2];
2440	KUNIT_ASSERT_EQ(test, path->path_length, 2);
2441	KUNIT_EXPECT_EQ(test, path->hops[0].nfc_credits, 0U);
2442	KUNIT_EXPECT_EQ(test, path->hops[0].initial_credits, 1U);
2443	KUNIT_EXPECT_EQ(test, path->hops[1].nfc_credits, 0U);
2444	KUNIT_EXPECT_EQ(test, path->hops[1].initial_credits, 1U);
2445
2446	return dp_tunnel1;
2447}
2448
2449static struct tb_tunnel *TB_TEST_DP_TUNNEL2(struct kunit *test,
2450			struct tb_switch *host, struct tb_switch *dev)
2451{
2452	struct tb_port *in, *out;
2453	struct tb_tunnel *dp_tunnel2;
2454	struct tb_path *path;
2455
2456	in = &host->ports[6];
2457	out = &dev->ports[14];
2458	dp_tunnel2 = tb_tunnel_alloc_dp(NULL, in, out, 1, 0, 0);
2459	KUNIT_ASSERT_NOT_NULL(test, dp_tunnel2);
2460	KUNIT_ASSERT_EQ(test, dp_tunnel2->npaths, (size_t)3);
2461
2462	path = dp_tunnel2->paths[0];
2463	KUNIT_ASSERT_EQ(test, path->path_length, 2);
2464	KUNIT_EXPECT_EQ(test, path->hops[0].nfc_credits, 12U);
2465	KUNIT_EXPECT_EQ(test, path->hops[0].initial_credits, 0U);
2466	KUNIT_EXPECT_EQ(test, path->hops[1].nfc_credits, 18U);
2467	KUNIT_EXPECT_EQ(test, path->hops[1].initial_credits, 0U);
2468
2469	path = dp_tunnel2->paths[1];
2470	KUNIT_ASSERT_EQ(test, path->path_length, 2);
2471	KUNIT_EXPECT_EQ(test, path->hops[0].nfc_credits, 0U);
2472	KUNIT_EXPECT_EQ(test, path->hops[0].initial_credits, 1U);
2473	KUNIT_EXPECT_EQ(test, path->hops[1].nfc_credits, 0U);
2474	KUNIT_EXPECT_EQ(test, path->hops[1].initial_credits, 1U);
2475
2476	path = dp_tunnel2->paths[2];
2477	KUNIT_ASSERT_EQ(test, path->path_length, 2);
2478	KUNIT_EXPECT_EQ(test, path->hops[0].nfc_credits, 0U);
2479	KUNIT_EXPECT_EQ(test, path->hops[0].initial_credits, 1U);
2480	KUNIT_EXPECT_EQ(test, path->hops[1].nfc_credits, 0U);
2481	KUNIT_EXPECT_EQ(test, path->hops[1].initial_credits, 1U);
2482
2483	return dp_tunnel2;
2484}
2485
2486static struct tb_tunnel *TB_TEST_USB3_TUNNEL(struct kunit *test,
2487			struct tb_switch *host, struct tb_switch *dev)
2488{
2489	struct tb_port *up, *down;
2490	struct tb_tunnel *usb3_tunnel;
2491	struct tb_path *path;
2492
2493	down = &host->ports[12];
2494	up = &dev->ports[16];
2495	usb3_tunnel = tb_tunnel_alloc_usb3(NULL, up, down, 0, 0);
2496	KUNIT_ASSERT_NOT_NULL(test, usb3_tunnel);
2497	KUNIT_ASSERT_EQ(test, usb3_tunnel->npaths, (size_t)2);
2498
2499	path = usb3_tunnel->paths[0];
2500	KUNIT_ASSERT_EQ(test, path->path_length, 2);
2501	KUNIT_EXPECT_EQ(test, path->hops[0].nfc_credits, 0U);
2502	KUNIT_EXPECT_EQ(test, path->hops[0].initial_credits, 7U);
2503	KUNIT_EXPECT_EQ(test, path->hops[1].nfc_credits, 0U);
2504	KUNIT_EXPECT_EQ(test, path->hops[1].initial_credits, 14U);
2505
2506	path = usb3_tunnel->paths[1];
2507	KUNIT_ASSERT_EQ(test, path->path_length, 2);
2508	KUNIT_EXPECT_EQ(test, path->hops[0].nfc_credits, 0U);
2509	KUNIT_EXPECT_EQ(test, path->hops[0].initial_credits, 7U);
2510	KUNIT_EXPECT_EQ(test, path->hops[1].nfc_credits, 0U);
2511	KUNIT_EXPECT_EQ(test, path->hops[1].initial_credits, 32U);
2512
2513	return usb3_tunnel;
2514}
2515
2516static struct tb_tunnel *TB_TEST_DMA_TUNNEL1(struct kunit *test,
2517			struct tb_switch *host, struct tb_switch *dev)
2518{
2519	struct tb_port *nhi, *port;
2520	struct tb_tunnel *dma_tunnel1;
2521	struct tb_path *path;
2522
2523	nhi = &host->ports[7];
2524	port = &dev->ports[3];
2525	dma_tunnel1 = tb_tunnel_alloc_dma(NULL, nhi, port, 8, 1, 8, 1);
2526	KUNIT_ASSERT_NOT_NULL(test, dma_tunnel1);
2527	KUNIT_ASSERT_EQ(test, dma_tunnel1->npaths, (size_t)2);
2528
2529	path = dma_tunnel1->paths[0];
2530	KUNIT_ASSERT_EQ(test, path->path_length, 2);
2531	KUNIT_EXPECT_EQ(test, path->hops[0].nfc_credits, 0U);
2532	KUNIT_EXPECT_EQ(test, path->hops[0].initial_credits, 14U);
2533	KUNIT_EXPECT_EQ(test, path->hops[1].nfc_credits, 0U);
2534	KUNIT_EXPECT_EQ(test, path->hops[1].initial_credits, 14U);
2535
2536	path = dma_tunnel1->paths[1];
2537	KUNIT_ASSERT_EQ(test, path->path_length, 2);
2538	KUNIT_EXPECT_EQ(test, path->hops[0].nfc_credits, 0U);
2539	KUNIT_EXPECT_EQ(test, path->hops[0].initial_credits, 0U);
2540	KUNIT_EXPECT_EQ(test, path->hops[1].nfc_credits, 0U);
2541	KUNIT_EXPECT_EQ(test, path->hops[1].initial_credits, 14U);
2542
2543	return dma_tunnel1;
2544}
2545
2546static struct tb_tunnel *TB_TEST_DMA_TUNNEL2(struct kunit *test,
2547			struct tb_switch *host, struct tb_switch *dev)
2548{
2549	struct tb_port *nhi, *port;
2550	struct tb_tunnel *dma_tunnel2;
2551	struct tb_path *path;
2552
2553	nhi = &host->ports[7];
2554	port = &dev->ports[3];
2555	dma_tunnel2 = tb_tunnel_alloc_dma(NULL, nhi, port, 9, 2, 9, 2);
2556	KUNIT_ASSERT_NOT_NULL(test, dma_tunnel2);
2557	KUNIT_ASSERT_EQ(test, dma_tunnel2->npaths, (size_t)2);
2558
2559	path = dma_tunnel2->paths[0];
2560	KUNIT_ASSERT_EQ(test, path->path_length, 2);
2561	KUNIT_EXPECT_EQ(test, path->hops[0].nfc_credits, 0U);
2562	KUNIT_EXPECT_EQ(test, path->hops[0].initial_credits, 14U);
2563	KUNIT_EXPECT_EQ(test, path->hops[1].nfc_credits, 0U);
2564	KUNIT_EXPECT_EQ(test, path->hops[1].initial_credits, 1U);
2565
2566	path = dma_tunnel2->paths[1];
2567	KUNIT_ASSERT_EQ(test, path->path_length, 2);
2568	KUNIT_EXPECT_EQ(test, path->hops[0].nfc_credits, 0U);
2569	KUNIT_EXPECT_EQ(test, path->hops[0].initial_credits, 0U);
2570	KUNIT_EXPECT_EQ(test, path->hops[1].nfc_credits, 0U);
2571	KUNIT_EXPECT_EQ(test, path->hops[1].initial_credits, 1U);
2572
2573	return dma_tunnel2;
2574}
2575
2576static void tb_test_credit_alloc_all(struct kunit *test)
2577{
2578	struct tb_tunnel *pcie_tunnel, *dp_tunnel1, *dp_tunnel2, *usb3_tunnel;
2579	struct tb_tunnel *dma_tunnel1, *dma_tunnel2;
2580	struct tb_switch *host, *dev;
2581
2582	/*
2583	 * Create PCIe, 2 x DP, USB 3.x and two DMA tunnels from host to
2584	 * device. Expectation is that all these can be established with
2585	 * the default credit allocation found in Intel hardware.
2586	 */
2587
2588	host = alloc_host_usb4(test);
2589	dev = alloc_dev_usb4(test, host, 0x1, true);
2590
2591	pcie_tunnel = TB_TEST_PCIE_TUNNEL(test, host, dev);
2592	dp_tunnel1 = TB_TEST_DP_TUNNEL1(test, host, dev);
2593	dp_tunnel2 = TB_TEST_DP_TUNNEL2(test, host, dev);
2594	usb3_tunnel = TB_TEST_USB3_TUNNEL(test, host, dev);
2595	dma_tunnel1 = TB_TEST_DMA_TUNNEL1(test, host, dev);
2596	dma_tunnel2 = TB_TEST_DMA_TUNNEL2(test, host, dev);
2597
2598	tb_tunnel_free(dma_tunnel2);
2599	tb_tunnel_free(dma_tunnel1);
2600	tb_tunnel_free(usb3_tunnel);
2601	tb_tunnel_free(dp_tunnel2);
2602	tb_tunnel_free(dp_tunnel1);
2603	tb_tunnel_free(pcie_tunnel);
2604}
2605
2606static const u32 root_directory[] = {
2607	0x55584401,	/* "UXD" v1 */
2608	0x00000018,	/* Root directory length */
2609	0x76656e64,	/* "vend" */
2610	0x6f726964,	/* "orid" */
2611	0x76000001,	/* "v" R 1 */
2612	0x00000a27,	/* Immediate value, ! Vendor ID */
2613	0x76656e64,	/* "vend" */
2614	0x6f726964,	/* "orid" */
2615	0x74000003,	/* "t" R 3 */
2616	0x0000001a,	/* Text leaf offset, (“Apple Inc.”) */
2617	0x64657669,	/* "devi" */
2618	0x63656964,	/* "ceid" */
2619	0x76000001,	/* "v" R 1 */
2620	0x0000000a,	/* Immediate value, ! Device ID */
2621	0x64657669,	/* "devi" */
2622	0x63656964,	/* "ceid" */
2623	0x74000003,	/* "t" R 3 */
2624	0x0000001d,	/* Text leaf offset, (“Macintosh”) */
2625	0x64657669,	/* "devi" */
2626	0x63657276,	/* "cerv" */
2627	0x76000001,	/* "v" R 1 */
2628	0x80000100,	/* Immediate value, Device Revision */
2629	0x6e657477,	/* "netw" */
2630	0x6f726b00,	/* "ork" */
2631	0x44000014,	/* "D" R 20 */
2632	0x00000021,	/* Directory data offset, (Network Directory) */
2633	0x4170706c,	/* "Appl" */
2634	0x6520496e,	/* "e In" */
2635	0x632e0000,	/* "c." ! */
2636	0x4d616369,	/* "Maci" */
2637	0x6e746f73,	/* "ntos" */
2638	0x68000000,	/* "h" */
2639	0x00000000,	/* padding */
2640	0xca8961c6,	/* Directory UUID, Network Directory */
2641	0x9541ce1c,	/* Directory UUID, Network Directory */
2642	0x5949b8bd,	/* Directory UUID, Network Directory */
2643	0x4f5a5f2e,	/* Directory UUID, Network Directory */
2644	0x70727463,	/* "prtc" */
2645	0x69640000,	/* "id" */
2646	0x76000001,	/* "v" R 1 */
2647	0x00000001,	/* Immediate value, Network Protocol ID */
2648	0x70727463,	/* "prtc" */
2649	0x76657273,	/* "vers" */
2650	0x76000001,	/* "v" R 1 */
2651	0x00000001,	/* Immediate value, Network Protocol Version */
2652	0x70727463,	/* "prtc" */
2653	0x72657673,	/* "revs" */
2654	0x76000001,	/* "v" R 1 */
2655	0x00000001,	/* Immediate value, Network Protocol Revision */
2656	0x70727463,	/* "prtc" */
2657	0x73746e73,	/* "stns" */
2658	0x76000001,	/* "v" R 1 */
2659	0x00000000,	/* Immediate value, Network Protocol Settings */
2660};
2661
2662static const uuid_t network_dir_uuid =
2663	UUID_INIT(0xc66189ca, 0x1cce, 0x4195,
2664		  0xbd, 0xb8, 0x49, 0x59, 0x2e, 0x5f, 0x5a, 0x4f);
2665
2666static void tb_test_property_parse(struct kunit *test)
2667{
2668	struct tb_property_dir *dir, *network_dir;
2669	struct tb_property *p;
2670
2671	dir = tb_property_parse_dir(root_directory, ARRAY_SIZE(root_directory));
2672	KUNIT_ASSERT_NOT_NULL(test, dir);
2673
2674	p = tb_property_find(dir, "foo", TB_PROPERTY_TYPE_TEXT);
2675	KUNIT_ASSERT_NULL(test, p);
2676
2677	p = tb_property_find(dir, "vendorid", TB_PROPERTY_TYPE_TEXT);
2678	KUNIT_ASSERT_NOT_NULL(test, p);
2679	KUNIT_EXPECT_STREQ(test, p->value.text, "Apple Inc.");
2680
2681	p = tb_property_find(dir, "vendorid", TB_PROPERTY_TYPE_VALUE);
2682	KUNIT_ASSERT_NOT_NULL(test, p);
2683	KUNIT_EXPECT_EQ(test, p->value.immediate, 0xa27);
2684
2685	p = tb_property_find(dir, "deviceid", TB_PROPERTY_TYPE_TEXT);
2686	KUNIT_ASSERT_NOT_NULL(test, p);
2687	KUNIT_EXPECT_STREQ(test, p->value.text, "Macintosh");
2688
2689	p = tb_property_find(dir, "deviceid", TB_PROPERTY_TYPE_VALUE);
2690	KUNIT_ASSERT_NOT_NULL(test, p);
2691	KUNIT_EXPECT_EQ(test, p->value.immediate, 0xa);
2692
2693	p = tb_property_find(dir, "missing", TB_PROPERTY_TYPE_DIRECTORY);
2694	KUNIT_ASSERT_NULL(test, p);
2695
2696	p = tb_property_find(dir, "network", TB_PROPERTY_TYPE_DIRECTORY);
2697	KUNIT_ASSERT_NOT_NULL(test, p);
2698
2699	network_dir = p->value.dir;
2700	KUNIT_EXPECT_TRUE(test, uuid_equal(network_dir->uuid, &network_dir_uuid));
2701
2702	p = tb_property_find(network_dir, "prtcid", TB_PROPERTY_TYPE_VALUE);
2703	KUNIT_ASSERT_NOT_NULL(test, p);
2704	KUNIT_EXPECT_EQ(test, p->value.immediate, 0x1);
2705
2706	p = tb_property_find(network_dir, "prtcvers", TB_PROPERTY_TYPE_VALUE);
2707	KUNIT_ASSERT_NOT_NULL(test, p);
2708	KUNIT_EXPECT_EQ(test, p->value.immediate, 0x1);
2709
2710	p = tb_property_find(network_dir, "prtcrevs", TB_PROPERTY_TYPE_VALUE);
2711	KUNIT_ASSERT_NOT_NULL(test, p);
2712	KUNIT_EXPECT_EQ(test, p->value.immediate, 0x1);
2713
2714	p = tb_property_find(network_dir, "prtcstns", TB_PROPERTY_TYPE_VALUE);
2715	KUNIT_ASSERT_NOT_NULL(test, p);
2716	KUNIT_EXPECT_EQ(test, p->value.immediate, 0x0);
2717
2718	p = tb_property_find(network_dir, "deviceid", TB_PROPERTY_TYPE_VALUE);
2719	KUNIT_EXPECT_TRUE(test, !p);
2720	p = tb_property_find(network_dir, "deviceid", TB_PROPERTY_TYPE_TEXT);
2721	KUNIT_EXPECT_TRUE(test, !p);
2722
2723	tb_property_free_dir(dir);
2724}
2725
2726static void tb_test_property_format(struct kunit *test)
2727{
2728	struct tb_property_dir *dir;
2729	ssize_t block_len;
2730	u32 *block;
2731	int ret, i;
2732
2733	dir = tb_property_parse_dir(root_directory, ARRAY_SIZE(root_directory));
2734	KUNIT_ASSERT_NOT_NULL(test, dir);
2735
2736	ret = tb_property_format_dir(dir, NULL, 0);
2737	KUNIT_ASSERT_EQ(test, ret, ARRAY_SIZE(root_directory));
2738
2739	block_len = ret;
2740
2741	block = kunit_kzalloc(test, block_len * sizeof(u32), GFP_KERNEL);
2742	KUNIT_ASSERT_NOT_NULL(test, block);
2743
2744	ret = tb_property_format_dir(dir, block, block_len);
2745	KUNIT_EXPECT_EQ(test, ret, 0);
2746
2747	for (i = 0; i < ARRAY_SIZE(root_directory); i++)
2748		KUNIT_EXPECT_EQ(test, root_directory[i], block[i]);
2749
2750	tb_property_free_dir(dir);
2751}
2752
2753static void compare_dirs(struct kunit *test, struct tb_property_dir *d1,
2754			 struct tb_property_dir *d2)
2755{
2756	struct tb_property *p1, *p2, *tmp;
2757	int n1, n2, i;
2758
2759	if (d1->uuid) {
2760		KUNIT_ASSERT_NOT_NULL(test, d2->uuid);
2761		KUNIT_ASSERT_TRUE(test, uuid_equal(d1->uuid, d2->uuid));
2762	} else {
2763		KUNIT_ASSERT_NULL(test, d2->uuid);
2764	}
2765
2766	n1 = 0;
2767	tb_property_for_each(d1, tmp)
2768		n1++;
2769	KUNIT_ASSERT_NE(test, n1, 0);
2770
2771	n2 = 0;
2772	tb_property_for_each(d2, tmp)
2773		n2++;
2774	KUNIT_ASSERT_NE(test, n2, 0);
2775
2776	KUNIT_ASSERT_EQ(test, n1, n2);
2777
2778	p1 = NULL;
2779	p2 = NULL;
2780	for (i = 0; i < n1; i++) {
2781		p1 = tb_property_get_next(d1, p1);
2782		KUNIT_ASSERT_NOT_NULL(test, p1);
2783		p2 = tb_property_get_next(d2, p2);
2784		KUNIT_ASSERT_NOT_NULL(test, p2);
2785
2786		KUNIT_ASSERT_STREQ(test, &p1->key[0], &p2->key[0]);
2787		KUNIT_ASSERT_EQ(test, p1->type, p2->type);
2788		KUNIT_ASSERT_EQ(test, p1->length, p2->length);
2789
2790		switch (p1->type) {
2791		case TB_PROPERTY_TYPE_DIRECTORY:
2792			KUNIT_ASSERT_NOT_NULL(test, p1->value.dir);
2793			KUNIT_ASSERT_NOT_NULL(test, p2->value.dir);
2794			compare_dirs(test, p1->value.dir, p2->value.dir);
2795			break;
2796
2797		case TB_PROPERTY_TYPE_DATA:
2798			KUNIT_ASSERT_NOT_NULL(test, p1->value.data);
2799			KUNIT_ASSERT_NOT_NULL(test, p2->value.data);
2800			KUNIT_ASSERT_TRUE(test,
2801				!memcmp(p1->value.data, p2->value.data,
2802					p1->length * 4)
2803			);
2804			break;
2805
2806		case TB_PROPERTY_TYPE_TEXT:
2807			KUNIT_ASSERT_NOT_NULL(test, p1->value.text);
2808			KUNIT_ASSERT_NOT_NULL(test, p2->value.text);
2809			KUNIT_ASSERT_STREQ(test, p1->value.text, p2->value.text);
2810			break;
2811
2812		case TB_PROPERTY_TYPE_VALUE:
2813			KUNIT_ASSERT_EQ(test, p1->value.immediate,
2814					p2->value.immediate);
2815			break;
2816		default:
2817			KUNIT_FAIL(test, "unexpected property type");
2818			break;
2819		}
2820	}
2821}
2822
2823static void tb_test_property_copy(struct kunit *test)
2824{
2825	struct tb_property_dir *src, *dst;
2826	u32 *block;
2827	int ret, i;
2828
2829	src = tb_property_parse_dir(root_directory, ARRAY_SIZE(root_directory));
2830	KUNIT_ASSERT_NOT_NULL(test, src);
2831
2832	dst = tb_property_copy_dir(src);
2833	KUNIT_ASSERT_NOT_NULL(test, dst);
2834
2835	/* Compare the structures */
2836	compare_dirs(test, src, dst);
2837
2838	/* Compare the resulting property block */
2839	ret = tb_property_format_dir(dst, NULL, 0);
2840	KUNIT_ASSERT_EQ(test, ret, ARRAY_SIZE(root_directory));
2841
2842	block = kunit_kzalloc(test, sizeof(root_directory), GFP_KERNEL);
2843	KUNIT_ASSERT_NOT_NULL(test, block);
2844
2845	ret = tb_property_format_dir(dst, block, ARRAY_SIZE(root_directory));
2846	KUNIT_EXPECT_TRUE(test, !ret);
2847
2848	for (i = 0; i < ARRAY_SIZE(root_directory); i++)
2849		KUNIT_EXPECT_EQ(test, root_directory[i], block[i]);
2850
2851	tb_property_free_dir(dst);
2852	tb_property_free_dir(src);
2853}
2854
2855static struct kunit_case tb_test_cases[] = {
2856	KUNIT_CASE(tb_test_path_basic),
2857	KUNIT_CASE(tb_test_path_not_connected_walk),
2858	KUNIT_CASE(tb_test_path_single_hop_walk),
2859	KUNIT_CASE(tb_test_path_daisy_chain_walk),
2860	KUNIT_CASE(tb_test_path_simple_tree_walk),
2861	KUNIT_CASE(tb_test_path_complex_tree_walk),
2862	KUNIT_CASE(tb_test_path_max_length_walk),
2863	KUNIT_CASE(tb_test_path_not_connected),
2864	KUNIT_CASE(tb_test_path_not_bonded_lane0),
2865	KUNIT_CASE(tb_test_path_not_bonded_lane1),
2866	KUNIT_CASE(tb_test_path_not_bonded_lane1_chain),
2867	KUNIT_CASE(tb_test_path_not_bonded_lane1_chain_reverse),
2868	KUNIT_CASE(tb_test_path_mixed_chain),
2869	KUNIT_CASE(tb_test_path_mixed_chain_reverse),
2870	KUNIT_CASE(tb_test_tunnel_pcie),
2871	KUNIT_CASE(tb_test_tunnel_dp),
2872	KUNIT_CASE(tb_test_tunnel_dp_chain),
2873	KUNIT_CASE(tb_test_tunnel_dp_tree),
2874	KUNIT_CASE(tb_test_tunnel_dp_max_length),
2875	KUNIT_CASE(tb_test_tunnel_3dp),
2876	KUNIT_CASE(tb_test_tunnel_port_on_path),
2877	KUNIT_CASE(tb_test_tunnel_usb3),
2878	KUNIT_CASE(tb_test_tunnel_dma),
2879	KUNIT_CASE(tb_test_tunnel_dma_rx),
2880	KUNIT_CASE(tb_test_tunnel_dma_tx),
2881	KUNIT_CASE(tb_test_tunnel_dma_chain),
2882	KUNIT_CASE(tb_test_tunnel_dma_match),
2883	KUNIT_CASE(tb_test_credit_alloc_legacy_not_bonded),
2884	KUNIT_CASE(tb_test_credit_alloc_legacy_bonded),
2885	KUNIT_CASE(tb_test_credit_alloc_pcie),
2886	KUNIT_CASE(tb_test_credit_alloc_without_dp),
2887	KUNIT_CASE(tb_test_credit_alloc_dp),
2888	KUNIT_CASE(tb_test_credit_alloc_usb3),
2889	KUNIT_CASE(tb_test_credit_alloc_dma),
2890	KUNIT_CASE(tb_test_credit_alloc_dma_multiple),
2891	KUNIT_CASE(tb_test_credit_alloc_all),
2892	KUNIT_CASE(tb_test_property_parse),
2893	KUNIT_CASE(tb_test_property_format),
2894	KUNIT_CASE(tb_test_property_copy),
2895	{ }
2896};
2897
2898static struct kunit_suite tb_test_suite = {
2899	.name = "thunderbolt",
2900	.test_cases = tb_test_cases,
2901};
2902
2903kunit_test_suite(tb_test_suite);