Loading...
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * KUnit tests
4 *
5 * Copyright (C) 2020, Intel Corporation
6 * Author: Mika Westerberg <mika.westerberg@linux.intel.com>
7 */
8
9#include <kunit/test.h>
10#include <linux/idr.h>
11
12#include "tb.h"
13#include "tunnel.h"
14
15static int __ida_init(struct kunit_resource *res, void *context)
16{
17 struct ida *ida = context;
18
19 ida_init(ida);
20 res->data = ida;
21 return 0;
22}
23
24static void __ida_destroy(struct kunit_resource *res)
25{
26 struct ida *ida = res->data;
27
28 ida_destroy(ida);
29}
30
31static void kunit_ida_init(struct kunit *test, struct ida *ida)
32{
33 kunit_alloc_resource(test, __ida_init, __ida_destroy, GFP_KERNEL, ida);
34}
35
36static struct tb_switch *alloc_switch(struct kunit *test, u64 route,
37 u8 upstream_port, u8 max_port_number)
38{
39 struct tb_switch *sw;
40 size_t size;
41 int i;
42
43 sw = kunit_kzalloc(test, sizeof(*sw), GFP_KERNEL);
44 if (!sw)
45 return NULL;
46
47 sw->config.upstream_port_number = upstream_port;
48 sw->config.depth = tb_route_length(route);
49 sw->config.route_hi = upper_32_bits(route);
50 sw->config.route_lo = lower_32_bits(route);
51 sw->config.enabled = 0;
52 sw->config.max_port_number = max_port_number;
53
54 size = (sw->config.max_port_number + 1) * sizeof(*sw->ports);
55 sw->ports = kunit_kzalloc(test, size, GFP_KERNEL);
56 if (!sw->ports)
57 return NULL;
58
59 for (i = 0; i <= sw->config.max_port_number; i++) {
60 sw->ports[i].sw = sw;
61 sw->ports[i].port = i;
62 sw->ports[i].config.port_number = i;
63 if (i) {
64 kunit_ida_init(test, &sw->ports[i].in_hopids);
65 kunit_ida_init(test, &sw->ports[i].out_hopids);
66 }
67 }
68
69 return sw;
70}
71
72static struct tb_switch *alloc_host(struct kunit *test)
73{
74 struct tb_switch *sw;
75
76 sw = alloc_switch(test, 0, 7, 13);
77 if (!sw)
78 return NULL;
79
80 sw->config.vendor_id = 0x8086;
81 sw->config.device_id = 0x9a1b;
82
83 sw->ports[0].config.type = TB_TYPE_PORT;
84 sw->ports[0].config.max_in_hop_id = 7;
85 sw->ports[0].config.max_out_hop_id = 7;
86
87 sw->ports[1].config.type = TB_TYPE_PORT;
88 sw->ports[1].config.max_in_hop_id = 19;
89 sw->ports[1].config.max_out_hop_id = 19;
90 sw->ports[1].total_credits = 60;
91 sw->ports[1].ctl_credits = 2;
92 sw->ports[1].dual_link_port = &sw->ports[2];
93
94 sw->ports[2].config.type = TB_TYPE_PORT;
95 sw->ports[2].config.max_in_hop_id = 19;
96 sw->ports[2].config.max_out_hop_id = 19;
97 sw->ports[2].total_credits = 60;
98 sw->ports[2].ctl_credits = 2;
99 sw->ports[2].dual_link_port = &sw->ports[1];
100 sw->ports[2].link_nr = 1;
101
102 sw->ports[3].config.type = TB_TYPE_PORT;
103 sw->ports[3].config.max_in_hop_id = 19;
104 sw->ports[3].config.max_out_hop_id = 19;
105 sw->ports[3].total_credits = 60;
106 sw->ports[3].ctl_credits = 2;
107 sw->ports[3].dual_link_port = &sw->ports[4];
108
109 sw->ports[4].config.type = TB_TYPE_PORT;
110 sw->ports[4].config.max_in_hop_id = 19;
111 sw->ports[4].config.max_out_hop_id = 19;
112 sw->ports[4].total_credits = 60;
113 sw->ports[4].ctl_credits = 2;
114 sw->ports[4].dual_link_port = &sw->ports[3];
115 sw->ports[4].link_nr = 1;
116
117 sw->ports[5].config.type = TB_TYPE_DP_HDMI_IN;
118 sw->ports[5].config.max_in_hop_id = 9;
119 sw->ports[5].config.max_out_hop_id = 9;
120 sw->ports[5].cap_adap = -1;
121
122 sw->ports[6].config.type = TB_TYPE_DP_HDMI_IN;
123 sw->ports[6].config.max_in_hop_id = 9;
124 sw->ports[6].config.max_out_hop_id = 9;
125 sw->ports[6].cap_adap = -1;
126
127 sw->ports[7].config.type = TB_TYPE_NHI;
128 sw->ports[7].config.max_in_hop_id = 11;
129 sw->ports[7].config.max_out_hop_id = 11;
130 sw->ports[7].config.nfc_credits = 0x41800000;
131
132 sw->ports[8].config.type = TB_TYPE_PCIE_DOWN;
133 sw->ports[8].config.max_in_hop_id = 8;
134 sw->ports[8].config.max_out_hop_id = 8;
135
136 sw->ports[9].config.type = TB_TYPE_PCIE_DOWN;
137 sw->ports[9].config.max_in_hop_id = 8;
138 sw->ports[9].config.max_out_hop_id = 8;
139
140 sw->ports[10].disabled = true;
141 sw->ports[11].disabled = true;
142
143 sw->ports[12].config.type = TB_TYPE_USB3_DOWN;
144 sw->ports[12].config.max_in_hop_id = 8;
145 sw->ports[12].config.max_out_hop_id = 8;
146
147 sw->ports[13].config.type = TB_TYPE_USB3_DOWN;
148 sw->ports[13].config.max_in_hop_id = 8;
149 sw->ports[13].config.max_out_hop_id = 8;
150
151 return sw;
152}
153
154static struct tb_switch *alloc_host_usb4(struct kunit *test)
155{
156 struct tb_switch *sw;
157
158 sw = alloc_host(test);
159 if (!sw)
160 return NULL;
161
162 sw->generation = 4;
163 sw->credit_allocation = true;
164 sw->max_usb3_credits = 32;
165 sw->min_dp_aux_credits = 1;
166 sw->min_dp_main_credits = 0;
167 sw->max_pcie_credits = 64;
168 sw->max_dma_credits = 14;
169
170 return sw;
171}
172
173static struct tb_switch *alloc_host_br(struct kunit *test)
174{
175 struct tb_switch *sw;
176
177 sw = alloc_host_usb4(test);
178 if (!sw)
179 return NULL;
180
181 sw->ports[10].config.type = TB_TYPE_DP_HDMI_IN;
182 sw->ports[10].config.max_in_hop_id = 9;
183 sw->ports[10].config.max_out_hop_id = 9;
184 sw->ports[10].cap_adap = -1;
185 sw->ports[10].disabled = false;
186
187 return sw;
188}
189
190static struct tb_switch *alloc_dev_default(struct kunit *test,
191 struct tb_switch *parent,
192 u64 route, bool bonded)
193{
194 struct tb_port *port, *upstream_port;
195 struct tb_switch *sw;
196
197 sw = alloc_switch(test, route, 1, 19);
198 if (!sw)
199 return NULL;
200
201 sw->config.vendor_id = 0x8086;
202 sw->config.device_id = 0x15ef;
203
204 sw->ports[0].config.type = TB_TYPE_PORT;
205 sw->ports[0].config.max_in_hop_id = 8;
206 sw->ports[0].config.max_out_hop_id = 8;
207
208 sw->ports[1].config.type = TB_TYPE_PORT;
209 sw->ports[1].config.max_in_hop_id = 19;
210 sw->ports[1].config.max_out_hop_id = 19;
211 sw->ports[1].total_credits = 60;
212 sw->ports[1].ctl_credits = 2;
213 sw->ports[1].dual_link_port = &sw->ports[2];
214
215 sw->ports[2].config.type = TB_TYPE_PORT;
216 sw->ports[2].config.max_in_hop_id = 19;
217 sw->ports[2].config.max_out_hop_id = 19;
218 sw->ports[2].total_credits = 60;
219 sw->ports[2].ctl_credits = 2;
220 sw->ports[2].dual_link_port = &sw->ports[1];
221 sw->ports[2].link_nr = 1;
222
223 sw->ports[3].config.type = TB_TYPE_PORT;
224 sw->ports[3].config.max_in_hop_id = 19;
225 sw->ports[3].config.max_out_hop_id = 19;
226 sw->ports[3].total_credits = 60;
227 sw->ports[3].ctl_credits = 2;
228 sw->ports[3].dual_link_port = &sw->ports[4];
229
230 sw->ports[4].config.type = TB_TYPE_PORT;
231 sw->ports[4].config.max_in_hop_id = 19;
232 sw->ports[4].config.max_out_hop_id = 19;
233 sw->ports[4].total_credits = 60;
234 sw->ports[4].ctl_credits = 2;
235 sw->ports[4].dual_link_port = &sw->ports[3];
236 sw->ports[4].link_nr = 1;
237
238 sw->ports[5].config.type = TB_TYPE_PORT;
239 sw->ports[5].config.max_in_hop_id = 19;
240 sw->ports[5].config.max_out_hop_id = 19;
241 sw->ports[5].total_credits = 60;
242 sw->ports[5].ctl_credits = 2;
243 sw->ports[5].dual_link_port = &sw->ports[6];
244
245 sw->ports[6].config.type = TB_TYPE_PORT;
246 sw->ports[6].config.max_in_hop_id = 19;
247 sw->ports[6].config.max_out_hop_id = 19;
248 sw->ports[6].total_credits = 60;
249 sw->ports[6].ctl_credits = 2;
250 sw->ports[6].dual_link_port = &sw->ports[5];
251 sw->ports[6].link_nr = 1;
252
253 sw->ports[7].config.type = TB_TYPE_PORT;
254 sw->ports[7].config.max_in_hop_id = 19;
255 sw->ports[7].config.max_out_hop_id = 19;
256 sw->ports[7].total_credits = 60;
257 sw->ports[7].ctl_credits = 2;
258 sw->ports[7].dual_link_port = &sw->ports[8];
259
260 sw->ports[8].config.type = TB_TYPE_PORT;
261 sw->ports[8].config.max_in_hop_id = 19;
262 sw->ports[8].config.max_out_hop_id = 19;
263 sw->ports[8].total_credits = 60;
264 sw->ports[8].ctl_credits = 2;
265 sw->ports[8].dual_link_port = &sw->ports[7];
266 sw->ports[8].link_nr = 1;
267
268 sw->ports[9].config.type = TB_TYPE_PCIE_UP;
269 sw->ports[9].config.max_in_hop_id = 8;
270 sw->ports[9].config.max_out_hop_id = 8;
271
272 sw->ports[10].config.type = TB_TYPE_PCIE_DOWN;
273 sw->ports[10].config.max_in_hop_id = 8;
274 sw->ports[10].config.max_out_hop_id = 8;
275
276 sw->ports[11].config.type = TB_TYPE_PCIE_DOWN;
277 sw->ports[11].config.max_in_hop_id = 8;
278 sw->ports[11].config.max_out_hop_id = 8;
279
280 sw->ports[12].config.type = TB_TYPE_PCIE_DOWN;
281 sw->ports[12].config.max_in_hop_id = 8;
282 sw->ports[12].config.max_out_hop_id = 8;
283
284 sw->ports[13].config.type = TB_TYPE_DP_HDMI_OUT;
285 sw->ports[13].config.max_in_hop_id = 9;
286 sw->ports[13].config.max_out_hop_id = 9;
287 sw->ports[13].cap_adap = -1;
288
289 sw->ports[14].config.type = TB_TYPE_DP_HDMI_OUT;
290 sw->ports[14].config.max_in_hop_id = 9;
291 sw->ports[14].config.max_out_hop_id = 9;
292 sw->ports[14].cap_adap = -1;
293
294 sw->ports[15].disabled = true;
295
296 sw->ports[16].config.type = TB_TYPE_USB3_UP;
297 sw->ports[16].config.max_in_hop_id = 8;
298 sw->ports[16].config.max_out_hop_id = 8;
299
300 sw->ports[17].config.type = TB_TYPE_USB3_DOWN;
301 sw->ports[17].config.max_in_hop_id = 8;
302 sw->ports[17].config.max_out_hop_id = 8;
303
304 sw->ports[18].config.type = TB_TYPE_USB3_DOWN;
305 sw->ports[18].config.max_in_hop_id = 8;
306 sw->ports[18].config.max_out_hop_id = 8;
307
308 sw->ports[19].config.type = TB_TYPE_USB3_DOWN;
309 sw->ports[19].config.max_in_hop_id = 8;
310 sw->ports[19].config.max_out_hop_id = 8;
311
312 if (!parent)
313 return sw;
314
315 /* Link them */
316 upstream_port = tb_upstream_port(sw);
317 port = tb_port_at(route, parent);
318 port->remote = upstream_port;
319 upstream_port->remote = port;
320 if (port->dual_link_port && upstream_port->dual_link_port) {
321 port->dual_link_port->remote = upstream_port->dual_link_port;
322 upstream_port->dual_link_port->remote = port->dual_link_port;
323
324 if (bonded) {
325 /* Bonding is used */
326 port->bonded = true;
327 port->total_credits *= 2;
328 port->dual_link_port->bonded = true;
329 port->dual_link_port->total_credits = 0;
330 upstream_port->bonded = true;
331 upstream_port->total_credits *= 2;
332 upstream_port->dual_link_port->bonded = true;
333 upstream_port->dual_link_port->total_credits = 0;
334 }
335 }
336
337 return sw;
338}
339
340static struct tb_switch *alloc_dev_with_dpin(struct kunit *test,
341 struct tb_switch *parent,
342 u64 route, bool bonded)
343{
344 struct tb_switch *sw;
345
346 sw = alloc_dev_default(test, parent, route, bonded);
347 if (!sw)
348 return NULL;
349
350 sw->ports[13].config.type = TB_TYPE_DP_HDMI_IN;
351 sw->ports[13].config.max_in_hop_id = 9;
352 sw->ports[13].config.max_out_hop_id = 9;
353
354 sw->ports[14].config.type = TB_TYPE_DP_HDMI_IN;
355 sw->ports[14].config.max_in_hop_id = 9;
356 sw->ports[14].config.max_out_hop_id = 9;
357
358 return sw;
359}
360
361static struct tb_switch *alloc_dev_without_dp(struct kunit *test,
362 struct tb_switch *parent,
363 u64 route, bool bonded)
364{
365 struct tb_switch *sw;
366 int i;
367
368 sw = alloc_dev_default(test, parent, route, bonded);
369 if (!sw)
370 return NULL;
371 /*
372 * Device with:
373 * 2x USB4 Adapters (adapters 1,2 and 3,4),
374 * 1x PCIe Upstream (adapter 9),
375 * 1x PCIe Downstream (adapter 10),
376 * 1x USB3 Upstream (adapter 16),
377 * 1x USB3 Downstream (adapter 17)
378 */
379 for (i = 5; i <= 8; i++)
380 sw->ports[i].disabled = true;
381
382 for (i = 11; i <= 14; i++)
383 sw->ports[i].disabled = true;
384
385 sw->ports[13].cap_adap = 0;
386 sw->ports[14].cap_adap = 0;
387
388 for (i = 18; i <= 19; i++)
389 sw->ports[i].disabled = true;
390
391 sw->generation = 4;
392 sw->credit_allocation = true;
393 sw->max_usb3_credits = 109;
394 sw->min_dp_aux_credits = 0;
395 sw->min_dp_main_credits = 0;
396 sw->max_pcie_credits = 30;
397 sw->max_dma_credits = 1;
398
399 return sw;
400}
401
402static struct tb_switch *alloc_dev_usb4(struct kunit *test,
403 struct tb_switch *parent,
404 u64 route, bool bonded)
405{
406 struct tb_switch *sw;
407
408 sw = alloc_dev_default(test, parent, route, bonded);
409 if (!sw)
410 return NULL;
411
412 sw->generation = 4;
413 sw->credit_allocation = true;
414 sw->max_usb3_credits = 14;
415 sw->min_dp_aux_credits = 1;
416 sw->min_dp_main_credits = 18;
417 sw->max_pcie_credits = 32;
418 sw->max_dma_credits = 14;
419
420 return sw;
421}
422
423static void tb_test_path_basic(struct kunit *test)
424{
425 struct tb_port *src_port, *dst_port, *p;
426 struct tb_switch *host;
427
428 host = alloc_host(test);
429
430 src_port = &host->ports[5];
431 dst_port = src_port;
432
433 p = tb_next_port_on_path(src_port, dst_port, NULL);
434 KUNIT_EXPECT_PTR_EQ(test, p, dst_port);
435
436 p = tb_next_port_on_path(src_port, dst_port, p);
437 KUNIT_EXPECT_TRUE(test, !p);
438}
439
440static void tb_test_path_not_connected_walk(struct kunit *test)
441{
442 struct tb_port *src_port, *dst_port, *p;
443 struct tb_switch *host, *dev;
444
445 host = alloc_host(test);
446 /* No connection between host and dev */
447 dev = alloc_dev_default(test, NULL, 3, true);
448
449 src_port = &host->ports[12];
450 dst_port = &dev->ports[16];
451
452 p = tb_next_port_on_path(src_port, dst_port, NULL);
453 KUNIT_EXPECT_PTR_EQ(test, p, src_port);
454
455 p = tb_next_port_on_path(src_port, dst_port, p);
456 KUNIT_EXPECT_PTR_EQ(test, p, &host->ports[3]);
457
458 p = tb_next_port_on_path(src_port, dst_port, p);
459 KUNIT_EXPECT_TRUE(test, !p);
460
461 /* Other direction */
462
463 p = tb_next_port_on_path(dst_port, src_port, NULL);
464 KUNIT_EXPECT_PTR_EQ(test, p, dst_port);
465
466 p = tb_next_port_on_path(dst_port, src_port, p);
467 KUNIT_EXPECT_PTR_EQ(test, p, &dev->ports[1]);
468
469 p = tb_next_port_on_path(dst_port, src_port, p);
470 KUNIT_EXPECT_TRUE(test, !p);
471}
472
473struct port_expectation {
474 u64 route;
475 u8 port;
476 enum tb_port_type type;
477};
478
479static void tb_test_path_single_hop_walk(struct kunit *test)
480{
481 /*
482 * Walks from Host PCIe downstream port to Device #1 PCIe
483 * upstream port.
484 *
485 * [Host]
486 * 1 |
487 * 1 |
488 * [Device]
489 */
490 static const struct port_expectation test_data[] = {
491 { .route = 0x0, .port = 8, .type = TB_TYPE_PCIE_DOWN },
492 { .route = 0x0, .port = 1, .type = TB_TYPE_PORT },
493 { .route = 0x1, .port = 1, .type = TB_TYPE_PORT },
494 { .route = 0x1, .port = 9, .type = TB_TYPE_PCIE_UP },
495 };
496 struct tb_port *src_port, *dst_port, *p;
497 struct tb_switch *host, *dev;
498 int i;
499
500 host = alloc_host(test);
501 dev = alloc_dev_default(test, host, 1, true);
502
503 src_port = &host->ports[8];
504 dst_port = &dev->ports[9];
505
506 /* Walk both directions */
507
508 i = 0;
509 tb_for_each_port_on_path(src_port, dst_port, p) {
510 KUNIT_EXPECT_TRUE(test, i < ARRAY_SIZE(test_data));
511 KUNIT_EXPECT_EQ(test, tb_route(p->sw), test_data[i].route);
512 KUNIT_EXPECT_EQ(test, p->port, test_data[i].port);
513 KUNIT_EXPECT_EQ(test, (enum tb_port_type)p->config.type,
514 test_data[i].type);
515 i++;
516 }
517
518 KUNIT_EXPECT_EQ(test, i, ARRAY_SIZE(test_data));
519
520 i = ARRAY_SIZE(test_data) - 1;
521 tb_for_each_port_on_path(dst_port, src_port, p) {
522 KUNIT_EXPECT_TRUE(test, i < ARRAY_SIZE(test_data));
523 KUNIT_EXPECT_EQ(test, tb_route(p->sw), test_data[i].route);
524 KUNIT_EXPECT_EQ(test, p->port, test_data[i].port);
525 KUNIT_EXPECT_EQ(test, (enum tb_port_type)p->config.type,
526 test_data[i].type);
527 i--;
528 }
529
530 KUNIT_EXPECT_EQ(test, i, -1);
531}
532
533static void tb_test_path_daisy_chain_walk(struct kunit *test)
534{
535 /*
536 * Walks from Host DP IN to Device #2 DP OUT.
537 *
538 * [Host]
539 * 1 |
540 * 1 |
541 * [Device #1]
542 * 3 /
543 * 1 /
544 * [Device #2]
545 */
546 static const struct port_expectation test_data[] = {
547 { .route = 0x0, .port = 5, .type = TB_TYPE_DP_HDMI_IN },
548 { .route = 0x0, .port = 1, .type = TB_TYPE_PORT },
549 { .route = 0x1, .port = 1, .type = TB_TYPE_PORT },
550 { .route = 0x1, .port = 3, .type = TB_TYPE_PORT },
551 { .route = 0x301, .port = 1, .type = TB_TYPE_PORT },
552 { .route = 0x301, .port = 13, .type = TB_TYPE_DP_HDMI_OUT },
553 };
554 struct tb_port *src_port, *dst_port, *p;
555 struct tb_switch *host, *dev1, *dev2;
556 int i;
557
558 host = alloc_host(test);
559 dev1 = alloc_dev_default(test, host, 0x1, true);
560 dev2 = alloc_dev_default(test, dev1, 0x301, true);
561
562 src_port = &host->ports[5];
563 dst_port = &dev2->ports[13];
564
565 /* Walk both directions */
566
567 i = 0;
568 tb_for_each_port_on_path(src_port, dst_port, p) {
569 KUNIT_EXPECT_TRUE(test, i < ARRAY_SIZE(test_data));
570 KUNIT_EXPECT_EQ(test, tb_route(p->sw), test_data[i].route);
571 KUNIT_EXPECT_EQ(test, p->port, test_data[i].port);
572 KUNIT_EXPECT_EQ(test, (enum tb_port_type)p->config.type,
573 test_data[i].type);
574 i++;
575 }
576
577 KUNIT_EXPECT_EQ(test, i, ARRAY_SIZE(test_data));
578
579 i = ARRAY_SIZE(test_data) - 1;
580 tb_for_each_port_on_path(dst_port, src_port, p) {
581 KUNIT_EXPECT_TRUE(test, i < ARRAY_SIZE(test_data));
582 KUNIT_EXPECT_EQ(test, tb_route(p->sw), test_data[i].route);
583 KUNIT_EXPECT_EQ(test, p->port, test_data[i].port);
584 KUNIT_EXPECT_EQ(test, (enum tb_port_type)p->config.type,
585 test_data[i].type);
586 i--;
587 }
588
589 KUNIT_EXPECT_EQ(test, i, -1);
590}
591
592static void tb_test_path_simple_tree_walk(struct kunit *test)
593{
594 /*
595 * Walks from Host DP IN to Device #3 DP OUT.
596 *
597 * [Host]
598 * 1 |
599 * 1 |
600 * [Device #1]
601 * 3 / | 5 \ 7
602 * 1 / | \ 1
603 * [Device #2] | [Device #4]
604 * | 1
605 * [Device #3]
606 */
607 static const struct port_expectation test_data[] = {
608 { .route = 0x0, .port = 5, .type = TB_TYPE_DP_HDMI_IN },
609 { .route = 0x0, .port = 1, .type = TB_TYPE_PORT },
610 { .route = 0x1, .port = 1, .type = TB_TYPE_PORT },
611 { .route = 0x1, .port = 5, .type = TB_TYPE_PORT },
612 { .route = 0x501, .port = 1, .type = TB_TYPE_PORT },
613 { .route = 0x501, .port = 13, .type = TB_TYPE_DP_HDMI_OUT },
614 };
615 struct tb_port *src_port, *dst_port, *p;
616 struct tb_switch *host, *dev1, *dev3;
617 int i;
618
619 host = alloc_host(test);
620 dev1 = alloc_dev_default(test, host, 0x1, true);
621 alloc_dev_default(test, dev1, 0x301, true);
622 dev3 = alloc_dev_default(test, dev1, 0x501, true);
623 alloc_dev_default(test, dev1, 0x701, true);
624
625 src_port = &host->ports[5];
626 dst_port = &dev3->ports[13];
627
628 /* Walk both directions */
629
630 i = 0;
631 tb_for_each_port_on_path(src_port, dst_port, p) {
632 KUNIT_EXPECT_TRUE(test, i < ARRAY_SIZE(test_data));
633 KUNIT_EXPECT_EQ(test, tb_route(p->sw), test_data[i].route);
634 KUNIT_EXPECT_EQ(test, p->port, test_data[i].port);
635 KUNIT_EXPECT_EQ(test, (enum tb_port_type)p->config.type,
636 test_data[i].type);
637 i++;
638 }
639
640 KUNIT_EXPECT_EQ(test, i, ARRAY_SIZE(test_data));
641
642 i = ARRAY_SIZE(test_data) - 1;
643 tb_for_each_port_on_path(dst_port, src_port, p) {
644 KUNIT_EXPECT_TRUE(test, i < ARRAY_SIZE(test_data));
645 KUNIT_EXPECT_EQ(test, tb_route(p->sw), test_data[i].route);
646 KUNIT_EXPECT_EQ(test, p->port, test_data[i].port);
647 KUNIT_EXPECT_EQ(test, (enum tb_port_type)p->config.type,
648 test_data[i].type);
649 i--;
650 }
651
652 KUNIT_EXPECT_EQ(test, i, -1);
653}
654
655static void tb_test_path_complex_tree_walk(struct kunit *test)
656{
657 /*
658 * Walks from Device #3 DP IN to Device #9 DP OUT.
659 *
660 * [Host]
661 * 1 |
662 * 1 |
663 * [Device #1]
664 * 3 / | 5 \ 7
665 * 1 / | \ 1
666 * [Device #2] | [Device #5]
667 * 5 | | 1 \ 7
668 * 1 | [Device #4] \ 1
669 * [Device #3] [Device #6]
670 * 3 /
671 * 1 /
672 * [Device #7]
673 * 3 / | 5
674 * 1 / |
675 * [Device #8] | 1
676 * [Device #9]
677 */
678 static const struct port_expectation test_data[] = {
679 { .route = 0x50301, .port = 13, .type = TB_TYPE_DP_HDMI_IN },
680 { .route = 0x50301, .port = 1, .type = TB_TYPE_PORT },
681 { .route = 0x301, .port = 5, .type = TB_TYPE_PORT },
682 { .route = 0x301, .port = 1, .type = TB_TYPE_PORT },
683 { .route = 0x1, .port = 3, .type = TB_TYPE_PORT },
684 { .route = 0x1, .port = 7, .type = TB_TYPE_PORT },
685 { .route = 0x701, .port = 1, .type = TB_TYPE_PORT },
686 { .route = 0x701, .port = 7, .type = TB_TYPE_PORT },
687 { .route = 0x70701, .port = 1, .type = TB_TYPE_PORT },
688 { .route = 0x70701, .port = 3, .type = TB_TYPE_PORT },
689 { .route = 0x3070701, .port = 1, .type = TB_TYPE_PORT },
690 { .route = 0x3070701, .port = 5, .type = TB_TYPE_PORT },
691 { .route = 0x503070701, .port = 1, .type = TB_TYPE_PORT },
692 { .route = 0x503070701, .port = 14, .type = TB_TYPE_DP_HDMI_OUT },
693 };
694 struct tb_switch *host, *dev1, *dev2, *dev3, *dev5, *dev6, *dev7, *dev9;
695 struct tb_port *src_port, *dst_port, *p;
696 int i;
697
698 host = alloc_host(test);
699 dev1 = alloc_dev_default(test, host, 0x1, true);
700 dev2 = alloc_dev_default(test, dev1, 0x301, true);
701 dev3 = alloc_dev_with_dpin(test, dev2, 0x50301, true);
702 alloc_dev_default(test, dev1, 0x501, true);
703 dev5 = alloc_dev_default(test, dev1, 0x701, true);
704 dev6 = alloc_dev_default(test, dev5, 0x70701, true);
705 dev7 = alloc_dev_default(test, dev6, 0x3070701, true);
706 alloc_dev_default(test, dev7, 0x303070701, true);
707 dev9 = alloc_dev_default(test, dev7, 0x503070701, true);
708
709 src_port = &dev3->ports[13];
710 dst_port = &dev9->ports[14];
711
712 /* Walk both directions */
713
714 i = 0;
715 tb_for_each_port_on_path(src_port, dst_port, p) {
716 KUNIT_EXPECT_TRUE(test, i < ARRAY_SIZE(test_data));
717 KUNIT_EXPECT_EQ(test, tb_route(p->sw), test_data[i].route);
718 KUNIT_EXPECT_EQ(test, p->port, test_data[i].port);
719 KUNIT_EXPECT_EQ(test, (enum tb_port_type)p->config.type,
720 test_data[i].type);
721 i++;
722 }
723
724 KUNIT_EXPECT_EQ(test, i, ARRAY_SIZE(test_data));
725
726 i = ARRAY_SIZE(test_data) - 1;
727 tb_for_each_port_on_path(dst_port, src_port, p) {
728 KUNIT_EXPECT_TRUE(test, i < ARRAY_SIZE(test_data));
729 KUNIT_EXPECT_EQ(test, tb_route(p->sw), test_data[i].route);
730 KUNIT_EXPECT_EQ(test, p->port, test_data[i].port);
731 KUNIT_EXPECT_EQ(test, (enum tb_port_type)p->config.type,
732 test_data[i].type);
733 i--;
734 }
735
736 KUNIT_EXPECT_EQ(test, i, -1);
737}
738
739static void tb_test_path_max_length_walk(struct kunit *test)
740{
741 struct tb_switch *host, *dev1, *dev2, *dev3, *dev4, *dev5, *dev6;
742 struct tb_switch *dev7, *dev8, *dev9, *dev10, *dev11, *dev12;
743 struct tb_port *src_port, *dst_port, *p;
744 int i;
745
746 /*
747 * Walks from Device #6 DP IN to Device #12 DP OUT.
748 *
749 * [Host]
750 * 1 / \ 3
751 * 1 / \ 1
752 * [Device #1] [Device #7]
753 * 3 | | 3
754 * 1 | | 1
755 * [Device #2] [Device #8]
756 * 3 | | 3
757 * 1 | | 1
758 * [Device #3] [Device #9]
759 * 3 | | 3
760 * 1 | | 1
761 * [Device #4] [Device #10]
762 * 3 | | 3
763 * 1 | | 1
764 * [Device #5] [Device #11]
765 * 3 | | 3
766 * 1 | | 1
767 * [Device #6] [Device #12]
768 */
769 static const struct port_expectation test_data[] = {
770 { .route = 0x30303030301, .port = 13, .type = TB_TYPE_DP_HDMI_IN },
771 { .route = 0x30303030301, .port = 1, .type = TB_TYPE_PORT },
772 { .route = 0x303030301, .port = 3, .type = TB_TYPE_PORT },
773 { .route = 0x303030301, .port = 1, .type = TB_TYPE_PORT },
774 { .route = 0x3030301, .port = 3, .type = TB_TYPE_PORT },
775 { .route = 0x3030301, .port = 1, .type = TB_TYPE_PORT },
776 { .route = 0x30301, .port = 3, .type = TB_TYPE_PORT },
777 { .route = 0x30301, .port = 1, .type = TB_TYPE_PORT },
778 { .route = 0x301, .port = 3, .type = TB_TYPE_PORT },
779 { .route = 0x301, .port = 1, .type = TB_TYPE_PORT },
780 { .route = 0x1, .port = 3, .type = TB_TYPE_PORT },
781 { .route = 0x1, .port = 1, .type = TB_TYPE_PORT },
782 { .route = 0x0, .port = 1, .type = TB_TYPE_PORT },
783 { .route = 0x0, .port = 3, .type = TB_TYPE_PORT },
784 { .route = 0x3, .port = 1, .type = TB_TYPE_PORT },
785 { .route = 0x3, .port = 3, .type = TB_TYPE_PORT },
786 { .route = 0x303, .port = 1, .type = TB_TYPE_PORT },
787 { .route = 0x303, .port = 3, .type = TB_TYPE_PORT },
788 { .route = 0x30303, .port = 1, .type = TB_TYPE_PORT },
789 { .route = 0x30303, .port = 3, .type = TB_TYPE_PORT },
790 { .route = 0x3030303, .port = 1, .type = TB_TYPE_PORT },
791 { .route = 0x3030303, .port = 3, .type = TB_TYPE_PORT },
792 { .route = 0x303030303, .port = 1, .type = TB_TYPE_PORT },
793 { .route = 0x303030303, .port = 3, .type = TB_TYPE_PORT },
794 { .route = 0x30303030303, .port = 1, .type = TB_TYPE_PORT },
795 { .route = 0x30303030303, .port = 13, .type = TB_TYPE_DP_HDMI_OUT },
796 };
797
798 host = alloc_host(test);
799 dev1 = alloc_dev_default(test, host, 0x1, true);
800 dev2 = alloc_dev_default(test, dev1, 0x301, true);
801 dev3 = alloc_dev_default(test, dev2, 0x30301, true);
802 dev4 = alloc_dev_default(test, dev3, 0x3030301, true);
803 dev5 = alloc_dev_default(test, dev4, 0x303030301, true);
804 dev6 = alloc_dev_with_dpin(test, dev5, 0x30303030301, true);
805 dev7 = alloc_dev_default(test, host, 0x3, true);
806 dev8 = alloc_dev_default(test, dev7, 0x303, true);
807 dev9 = alloc_dev_default(test, dev8, 0x30303, true);
808 dev10 = alloc_dev_default(test, dev9, 0x3030303, true);
809 dev11 = alloc_dev_default(test, dev10, 0x303030303, true);
810 dev12 = alloc_dev_default(test, dev11, 0x30303030303, true);
811
812 src_port = &dev6->ports[13];
813 dst_port = &dev12->ports[13];
814
815 /* Walk both directions */
816
817 i = 0;
818 tb_for_each_port_on_path(src_port, dst_port, p) {
819 KUNIT_EXPECT_TRUE(test, i < ARRAY_SIZE(test_data));
820 KUNIT_EXPECT_EQ(test, tb_route(p->sw), test_data[i].route);
821 KUNIT_EXPECT_EQ(test, p->port, test_data[i].port);
822 KUNIT_EXPECT_EQ(test, (enum tb_port_type)p->config.type,
823 test_data[i].type);
824 i++;
825 }
826
827 KUNIT_EXPECT_EQ(test, i, ARRAY_SIZE(test_data));
828
829 i = ARRAY_SIZE(test_data) - 1;
830 tb_for_each_port_on_path(dst_port, src_port, p) {
831 KUNIT_EXPECT_TRUE(test, i < ARRAY_SIZE(test_data));
832 KUNIT_EXPECT_EQ(test, tb_route(p->sw), test_data[i].route);
833 KUNIT_EXPECT_EQ(test, p->port, test_data[i].port);
834 KUNIT_EXPECT_EQ(test, (enum tb_port_type)p->config.type,
835 test_data[i].type);
836 i--;
837 }
838
839 KUNIT_EXPECT_EQ(test, i, -1);
840}
841
842static void tb_test_path_not_connected(struct kunit *test)
843{
844 struct tb_switch *host, *dev1, *dev2;
845 struct tb_port *down, *up;
846 struct tb_path *path;
847
848 host = alloc_host(test);
849 dev1 = alloc_dev_default(test, host, 0x3, false);
850 /* Not connected to anything */
851 dev2 = alloc_dev_default(test, NULL, 0x303, false);
852
853 down = &dev1->ports[10];
854 up = &dev2->ports[9];
855
856 path = tb_path_alloc(NULL, down, 8, up, 8, 0, "PCIe Down");
857 KUNIT_ASSERT_NULL(test, path);
858 path = tb_path_alloc(NULL, down, 8, up, 8, 1, "PCIe Down");
859 KUNIT_ASSERT_NULL(test, path);
860}
861
862struct hop_expectation {
863 u64 route;
864 u8 in_port;
865 enum tb_port_type in_type;
866 u8 out_port;
867 enum tb_port_type out_type;
868};
869
870static void tb_test_path_not_bonded_lane0(struct kunit *test)
871{
872 /*
873 * PCIe path from host to device using lane 0.
874 *
875 * [Host]
876 * 3 |: 4
877 * 1 |: 2
878 * [Device]
879 */
880 static const struct hop_expectation test_data[] = {
881 {
882 .route = 0x0,
883 .in_port = 9,
884 .in_type = TB_TYPE_PCIE_DOWN,
885 .out_port = 3,
886 .out_type = TB_TYPE_PORT,
887 },
888 {
889 .route = 0x3,
890 .in_port = 1,
891 .in_type = TB_TYPE_PORT,
892 .out_port = 9,
893 .out_type = TB_TYPE_PCIE_UP,
894 },
895 };
896 struct tb_switch *host, *dev;
897 struct tb_port *down, *up;
898 struct tb_path *path;
899 int i;
900
901 host = alloc_host(test);
902 dev = alloc_dev_default(test, host, 0x3, false);
903
904 down = &host->ports[9];
905 up = &dev->ports[9];
906
907 path = tb_path_alloc(NULL, down, 8, up, 8, 0, "PCIe Down");
908 KUNIT_ASSERT_NOT_NULL(test, path);
909 KUNIT_ASSERT_EQ(test, path->path_length, ARRAY_SIZE(test_data));
910 for (i = 0; i < ARRAY_SIZE(test_data); i++) {
911 const struct tb_port *in_port, *out_port;
912
913 in_port = path->hops[i].in_port;
914 out_port = path->hops[i].out_port;
915
916 KUNIT_EXPECT_EQ(test, tb_route(in_port->sw), test_data[i].route);
917 KUNIT_EXPECT_EQ(test, in_port->port, test_data[i].in_port);
918 KUNIT_EXPECT_EQ(test, (enum tb_port_type)in_port->config.type,
919 test_data[i].in_type);
920 KUNIT_EXPECT_EQ(test, tb_route(out_port->sw), test_data[i].route);
921 KUNIT_EXPECT_EQ(test, out_port->port, test_data[i].out_port);
922 KUNIT_EXPECT_EQ(test, (enum tb_port_type)out_port->config.type,
923 test_data[i].out_type);
924 }
925 tb_path_free(path);
926}
927
928static void tb_test_path_not_bonded_lane1(struct kunit *test)
929{
930 /*
931 * DP Video path from host to device using lane 1. Paths like
932 * these are only used with Thunderbolt 1 devices where lane
933 * bonding is not possible. USB4 specifically does not allow
934 * paths like this (you either use lane 0 where lane 1 is
935 * disabled or both lanes are bonded).
936 *
937 * [Host]
938 * 1 :| 2
939 * 1 :| 2
940 * [Device]
941 */
942 static const struct hop_expectation test_data[] = {
943 {
944 .route = 0x0,
945 .in_port = 5,
946 .in_type = TB_TYPE_DP_HDMI_IN,
947 .out_port = 2,
948 .out_type = TB_TYPE_PORT,
949 },
950 {
951 .route = 0x1,
952 .in_port = 2,
953 .in_type = TB_TYPE_PORT,
954 .out_port = 13,
955 .out_type = TB_TYPE_DP_HDMI_OUT,
956 },
957 };
958 struct tb_switch *host, *dev;
959 struct tb_port *in, *out;
960 struct tb_path *path;
961 int i;
962
963 host = alloc_host(test);
964 dev = alloc_dev_default(test, host, 0x1, false);
965
966 in = &host->ports[5];
967 out = &dev->ports[13];
968
969 path = tb_path_alloc(NULL, in, 9, out, 9, 1, "Video");
970 KUNIT_ASSERT_NOT_NULL(test, path);
971 KUNIT_ASSERT_EQ(test, path->path_length, ARRAY_SIZE(test_data));
972 for (i = 0; i < ARRAY_SIZE(test_data); i++) {
973 const struct tb_port *in_port, *out_port;
974
975 in_port = path->hops[i].in_port;
976 out_port = path->hops[i].out_port;
977
978 KUNIT_EXPECT_EQ(test, tb_route(in_port->sw), test_data[i].route);
979 KUNIT_EXPECT_EQ(test, in_port->port, test_data[i].in_port);
980 KUNIT_EXPECT_EQ(test, (enum tb_port_type)in_port->config.type,
981 test_data[i].in_type);
982 KUNIT_EXPECT_EQ(test, tb_route(out_port->sw), test_data[i].route);
983 KUNIT_EXPECT_EQ(test, out_port->port, test_data[i].out_port);
984 KUNIT_EXPECT_EQ(test, (enum tb_port_type)out_port->config.type,
985 test_data[i].out_type);
986 }
987 tb_path_free(path);
988}
989
990static void tb_test_path_not_bonded_lane1_chain(struct kunit *test)
991{
992 /*
993 * DP Video path from host to device 3 using lane 1.
994 *
995 * [Host]
996 * 1 :| 2
997 * 1 :| 2
998 * [Device #1]
999 * 7 :| 8
1000 * 1 :| 2
1001 * [Device #2]
1002 * 5 :| 6
1003 * 1 :| 2
1004 * [Device #3]
1005 */
1006 static const struct hop_expectation test_data[] = {
1007 {
1008 .route = 0x0,
1009 .in_port = 5,
1010 .in_type = TB_TYPE_DP_HDMI_IN,
1011 .out_port = 2,
1012 .out_type = TB_TYPE_PORT,
1013 },
1014 {
1015 .route = 0x1,
1016 .in_port = 2,
1017 .in_type = TB_TYPE_PORT,
1018 .out_port = 8,
1019 .out_type = TB_TYPE_PORT,
1020 },
1021 {
1022 .route = 0x701,
1023 .in_port = 2,
1024 .in_type = TB_TYPE_PORT,
1025 .out_port = 6,
1026 .out_type = TB_TYPE_PORT,
1027 },
1028 {
1029 .route = 0x50701,
1030 .in_port = 2,
1031 .in_type = TB_TYPE_PORT,
1032 .out_port = 13,
1033 .out_type = TB_TYPE_DP_HDMI_OUT,
1034 },
1035 };
1036 struct tb_switch *host, *dev1, *dev2, *dev3;
1037 struct tb_port *in, *out;
1038 struct tb_path *path;
1039 int i;
1040
1041 host = alloc_host(test);
1042 dev1 = alloc_dev_default(test, host, 0x1, false);
1043 dev2 = alloc_dev_default(test, dev1, 0x701, false);
1044 dev3 = alloc_dev_default(test, dev2, 0x50701, false);
1045
1046 in = &host->ports[5];
1047 out = &dev3->ports[13];
1048
1049 path = tb_path_alloc(NULL, in, 9, out, 9, 1, "Video");
1050 KUNIT_ASSERT_NOT_NULL(test, path);
1051 KUNIT_ASSERT_EQ(test, path->path_length, ARRAY_SIZE(test_data));
1052 for (i = 0; i < ARRAY_SIZE(test_data); i++) {
1053 const struct tb_port *in_port, *out_port;
1054
1055 in_port = path->hops[i].in_port;
1056 out_port = path->hops[i].out_port;
1057
1058 KUNIT_EXPECT_EQ(test, tb_route(in_port->sw), test_data[i].route);
1059 KUNIT_EXPECT_EQ(test, in_port->port, test_data[i].in_port);
1060 KUNIT_EXPECT_EQ(test, (enum tb_port_type)in_port->config.type,
1061 test_data[i].in_type);
1062 KUNIT_EXPECT_EQ(test, tb_route(out_port->sw), test_data[i].route);
1063 KUNIT_EXPECT_EQ(test, out_port->port, test_data[i].out_port);
1064 KUNIT_EXPECT_EQ(test, (enum tb_port_type)out_port->config.type,
1065 test_data[i].out_type);
1066 }
1067 tb_path_free(path);
1068}
1069
1070static void tb_test_path_not_bonded_lane1_chain_reverse(struct kunit *test)
1071{
1072 /*
1073 * DP Video path from device 3 to host using lane 1.
1074 *
1075 * [Host]
1076 * 1 :| 2
1077 * 1 :| 2
1078 * [Device #1]
1079 * 7 :| 8
1080 * 1 :| 2
1081 * [Device #2]
1082 * 5 :| 6
1083 * 1 :| 2
1084 * [Device #3]
1085 */
1086 static const struct hop_expectation test_data[] = {
1087 {
1088 .route = 0x50701,
1089 .in_port = 13,
1090 .in_type = TB_TYPE_DP_HDMI_IN,
1091 .out_port = 2,
1092 .out_type = TB_TYPE_PORT,
1093 },
1094 {
1095 .route = 0x701,
1096 .in_port = 6,
1097 .in_type = TB_TYPE_PORT,
1098 .out_port = 2,
1099 .out_type = TB_TYPE_PORT,
1100 },
1101 {
1102 .route = 0x1,
1103 .in_port = 8,
1104 .in_type = TB_TYPE_PORT,
1105 .out_port = 2,
1106 .out_type = TB_TYPE_PORT,
1107 },
1108 {
1109 .route = 0x0,
1110 .in_port = 2,
1111 .in_type = TB_TYPE_PORT,
1112 .out_port = 5,
1113 .out_type = TB_TYPE_DP_HDMI_IN,
1114 },
1115 };
1116 struct tb_switch *host, *dev1, *dev2, *dev3;
1117 struct tb_port *in, *out;
1118 struct tb_path *path;
1119 int i;
1120
1121 host = alloc_host(test);
1122 dev1 = alloc_dev_default(test, host, 0x1, false);
1123 dev2 = alloc_dev_default(test, dev1, 0x701, false);
1124 dev3 = alloc_dev_with_dpin(test, dev2, 0x50701, false);
1125
1126 in = &dev3->ports[13];
1127 out = &host->ports[5];
1128
1129 path = tb_path_alloc(NULL, in, 9, out, 9, 1, "Video");
1130 KUNIT_ASSERT_NOT_NULL(test, path);
1131 KUNIT_ASSERT_EQ(test, path->path_length, ARRAY_SIZE(test_data));
1132 for (i = 0; i < ARRAY_SIZE(test_data); i++) {
1133 const struct tb_port *in_port, *out_port;
1134
1135 in_port = path->hops[i].in_port;
1136 out_port = path->hops[i].out_port;
1137
1138 KUNIT_EXPECT_EQ(test, tb_route(in_port->sw), test_data[i].route);
1139 KUNIT_EXPECT_EQ(test, in_port->port, test_data[i].in_port);
1140 KUNIT_EXPECT_EQ(test, (enum tb_port_type)in_port->config.type,
1141 test_data[i].in_type);
1142 KUNIT_EXPECT_EQ(test, tb_route(out_port->sw), test_data[i].route);
1143 KUNIT_EXPECT_EQ(test, out_port->port, test_data[i].out_port);
1144 KUNIT_EXPECT_EQ(test, (enum tb_port_type)out_port->config.type,
1145 test_data[i].out_type);
1146 }
1147 tb_path_free(path);
1148}
1149
1150static void tb_test_path_mixed_chain(struct kunit *test)
1151{
1152 /*
1153 * DP Video path from host to device 4 where first and last link
1154 * is bonded.
1155 *
1156 * [Host]
1157 * 1 |
1158 * 1 |
1159 * [Device #1]
1160 * 7 :| 8
1161 * 1 :| 2
1162 * [Device #2]
1163 * 5 :| 6
1164 * 1 :| 2
1165 * [Device #3]
1166 * 3 |
1167 * 1 |
1168 * [Device #4]
1169 */
1170 static const struct hop_expectation test_data[] = {
1171 {
1172 .route = 0x0,
1173 .in_port = 5,
1174 .in_type = TB_TYPE_DP_HDMI_IN,
1175 .out_port = 1,
1176 .out_type = TB_TYPE_PORT,
1177 },
1178 {
1179 .route = 0x1,
1180 .in_port = 1,
1181 .in_type = TB_TYPE_PORT,
1182 .out_port = 8,
1183 .out_type = TB_TYPE_PORT,
1184 },
1185 {
1186 .route = 0x701,
1187 .in_port = 2,
1188 .in_type = TB_TYPE_PORT,
1189 .out_port = 6,
1190 .out_type = TB_TYPE_PORT,
1191 },
1192 {
1193 .route = 0x50701,
1194 .in_port = 2,
1195 .in_type = TB_TYPE_PORT,
1196 .out_port = 3,
1197 .out_type = TB_TYPE_PORT,
1198 },
1199 {
1200 .route = 0x3050701,
1201 .in_port = 1,
1202 .in_type = TB_TYPE_PORT,
1203 .out_port = 13,
1204 .out_type = TB_TYPE_DP_HDMI_OUT,
1205 },
1206 };
1207 struct tb_switch *host, *dev1, *dev2, *dev3, *dev4;
1208 struct tb_port *in, *out;
1209 struct tb_path *path;
1210 int i;
1211
1212 host = alloc_host(test);
1213 dev1 = alloc_dev_default(test, host, 0x1, true);
1214 dev2 = alloc_dev_default(test, dev1, 0x701, false);
1215 dev3 = alloc_dev_default(test, dev2, 0x50701, false);
1216 dev4 = alloc_dev_default(test, dev3, 0x3050701, true);
1217
1218 in = &host->ports[5];
1219 out = &dev4->ports[13];
1220
1221 path = tb_path_alloc(NULL, in, 9, out, 9, 1, "Video");
1222 KUNIT_ASSERT_NOT_NULL(test, path);
1223 KUNIT_ASSERT_EQ(test, path->path_length, ARRAY_SIZE(test_data));
1224 for (i = 0; i < ARRAY_SIZE(test_data); i++) {
1225 const struct tb_port *in_port, *out_port;
1226
1227 in_port = path->hops[i].in_port;
1228 out_port = path->hops[i].out_port;
1229
1230 KUNIT_EXPECT_EQ(test, tb_route(in_port->sw), test_data[i].route);
1231 KUNIT_EXPECT_EQ(test, in_port->port, test_data[i].in_port);
1232 KUNIT_EXPECT_EQ(test, (enum tb_port_type)in_port->config.type,
1233 test_data[i].in_type);
1234 KUNIT_EXPECT_EQ(test, tb_route(out_port->sw), test_data[i].route);
1235 KUNIT_EXPECT_EQ(test, out_port->port, test_data[i].out_port);
1236 KUNIT_EXPECT_EQ(test, (enum tb_port_type)out_port->config.type,
1237 test_data[i].out_type);
1238 }
1239 tb_path_free(path);
1240}
1241
1242static void tb_test_path_mixed_chain_reverse(struct kunit *test)
1243{
1244 /*
1245 * DP Video path from device 4 to host where first and last link
1246 * is bonded.
1247 *
1248 * [Host]
1249 * 1 |
1250 * 1 |
1251 * [Device #1]
1252 * 7 :| 8
1253 * 1 :| 2
1254 * [Device #2]
1255 * 5 :| 6
1256 * 1 :| 2
1257 * [Device #3]
1258 * 3 |
1259 * 1 |
1260 * [Device #4]
1261 */
1262 static const struct hop_expectation test_data[] = {
1263 {
1264 .route = 0x3050701,
1265 .in_port = 13,
1266 .in_type = TB_TYPE_DP_HDMI_OUT,
1267 .out_port = 1,
1268 .out_type = TB_TYPE_PORT,
1269 },
1270 {
1271 .route = 0x50701,
1272 .in_port = 3,
1273 .in_type = TB_TYPE_PORT,
1274 .out_port = 2,
1275 .out_type = TB_TYPE_PORT,
1276 },
1277 {
1278 .route = 0x701,
1279 .in_port = 6,
1280 .in_type = TB_TYPE_PORT,
1281 .out_port = 2,
1282 .out_type = TB_TYPE_PORT,
1283 },
1284 {
1285 .route = 0x1,
1286 .in_port = 8,
1287 .in_type = TB_TYPE_PORT,
1288 .out_port = 1,
1289 .out_type = TB_TYPE_PORT,
1290 },
1291 {
1292 .route = 0x0,
1293 .in_port = 1,
1294 .in_type = TB_TYPE_PORT,
1295 .out_port = 5,
1296 .out_type = TB_TYPE_DP_HDMI_IN,
1297 },
1298 };
1299 struct tb_switch *host, *dev1, *dev2, *dev3, *dev4;
1300 struct tb_port *in, *out;
1301 struct tb_path *path;
1302 int i;
1303
1304 host = alloc_host(test);
1305 dev1 = alloc_dev_default(test, host, 0x1, true);
1306 dev2 = alloc_dev_default(test, dev1, 0x701, false);
1307 dev3 = alloc_dev_default(test, dev2, 0x50701, false);
1308 dev4 = alloc_dev_default(test, dev3, 0x3050701, true);
1309
1310 in = &dev4->ports[13];
1311 out = &host->ports[5];
1312
1313 path = tb_path_alloc(NULL, in, 9, out, 9, 1, "Video");
1314 KUNIT_ASSERT_NOT_NULL(test, path);
1315 KUNIT_ASSERT_EQ(test, path->path_length, ARRAY_SIZE(test_data));
1316 for (i = 0; i < ARRAY_SIZE(test_data); i++) {
1317 const struct tb_port *in_port, *out_port;
1318
1319 in_port = path->hops[i].in_port;
1320 out_port = path->hops[i].out_port;
1321
1322 KUNIT_EXPECT_EQ(test, tb_route(in_port->sw), test_data[i].route);
1323 KUNIT_EXPECT_EQ(test, in_port->port, test_data[i].in_port);
1324 KUNIT_EXPECT_EQ(test, (enum tb_port_type)in_port->config.type,
1325 test_data[i].in_type);
1326 KUNIT_EXPECT_EQ(test, tb_route(out_port->sw), test_data[i].route);
1327 KUNIT_EXPECT_EQ(test, out_port->port, test_data[i].out_port);
1328 KUNIT_EXPECT_EQ(test, (enum tb_port_type)out_port->config.type,
1329 test_data[i].out_type);
1330 }
1331 tb_path_free(path);
1332}
1333
1334static void tb_test_tunnel_pcie(struct kunit *test)
1335{
1336 struct tb_switch *host, *dev1, *dev2;
1337 struct tb_tunnel *tunnel1, *tunnel2;
1338 struct tb_port *down, *up;
1339
1340 /*
1341 * Create PCIe tunnel between host and two devices.
1342 *
1343 * [Host]
1344 * 1 |
1345 * 1 |
1346 * [Device #1]
1347 * 5 |
1348 * 1 |
1349 * [Device #2]
1350 */
1351 host = alloc_host(test);
1352 dev1 = alloc_dev_default(test, host, 0x1, true);
1353 dev2 = alloc_dev_default(test, dev1, 0x501, true);
1354
1355 down = &host->ports[8];
1356 up = &dev1->ports[9];
1357 tunnel1 = tb_tunnel_alloc_pci(NULL, up, down);
1358 KUNIT_ASSERT_NOT_NULL(test, tunnel1);
1359 KUNIT_EXPECT_EQ(test, tunnel1->type, TB_TUNNEL_PCI);
1360 KUNIT_EXPECT_PTR_EQ(test, tunnel1->src_port, down);
1361 KUNIT_EXPECT_PTR_EQ(test, tunnel1->dst_port, up);
1362 KUNIT_ASSERT_EQ(test, tunnel1->npaths, 2);
1363 KUNIT_ASSERT_EQ(test, tunnel1->paths[0]->path_length, 2);
1364 KUNIT_EXPECT_PTR_EQ(test, tunnel1->paths[0]->hops[0].in_port, down);
1365 KUNIT_EXPECT_PTR_EQ(test, tunnel1->paths[0]->hops[1].out_port, up);
1366 KUNIT_ASSERT_EQ(test, tunnel1->paths[1]->path_length, 2);
1367 KUNIT_EXPECT_PTR_EQ(test, tunnel1->paths[1]->hops[0].in_port, up);
1368 KUNIT_EXPECT_PTR_EQ(test, tunnel1->paths[1]->hops[1].out_port, down);
1369
1370 down = &dev1->ports[10];
1371 up = &dev2->ports[9];
1372 tunnel2 = tb_tunnel_alloc_pci(NULL, up, down);
1373 KUNIT_ASSERT_NOT_NULL(test, tunnel2);
1374 KUNIT_EXPECT_EQ(test, tunnel2->type, TB_TUNNEL_PCI);
1375 KUNIT_EXPECT_PTR_EQ(test, tunnel2->src_port, down);
1376 KUNIT_EXPECT_PTR_EQ(test, tunnel2->dst_port, up);
1377 KUNIT_ASSERT_EQ(test, tunnel2->npaths, 2);
1378 KUNIT_ASSERT_EQ(test, tunnel2->paths[0]->path_length, 2);
1379 KUNIT_EXPECT_PTR_EQ(test, tunnel2->paths[0]->hops[0].in_port, down);
1380 KUNIT_EXPECT_PTR_EQ(test, tunnel2->paths[0]->hops[1].out_port, up);
1381 KUNIT_ASSERT_EQ(test, tunnel2->paths[1]->path_length, 2);
1382 KUNIT_EXPECT_PTR_EQ(test, tunnel2->paths[1]->hops[0].in_port, up);
1383 KUNIT_EXPECT_PTR_EQ(test, tunnel2->paths[1]->hops[1].out_port, down);
1384
1385 tb_tunnel_free(tunnel2);
1386 tb_tunnel_free(tunnel1);
1387}
1388
1389static void tb_test_tunnel_dp(struct kunit *test)
1390{
1391 struct tb_switch *host, *dev;
1392 struct tb_port *in, *out;
1393 struct tb_tunnel *tunnel;
1394
1395 /*
1396 * Create DP tunnel between Host and Device
1397 *
1398 * [Host]
1399 * 1 |
1400 * 1 |
1401 * [Device]
1402 */
1403 host = alloc_host(test);
1404 dev = alloc_dev_default(test, host, 0x3, true);
1405
1406 in = &host->ports[5];
1407 out = &dev->ports[13];
1408
1409 tunnel = tb_tunnel_alloc_dp(NULL, in, out, 1, 0, 0);
1410 KUNIT_ASSERT_NOT_NULL(test, tunnel);
1411 KUNIT_EXPECT_EQ(test, tunnel->type, TB_TUNNEL_DP);
1412 KUNIT_EXPECT_PTR_EQ(test, tunnel->src_port, in);
1413 KUNIT_EXPECT_PTR_EQ(test, tunnel->dst_port, out);
1414 KUNIT_ASSERT_EQ(test, tunnel->npaths, 3);
1415 KUNIT_ASSERT_EQ(test, tunnel->paths[0]->path_length, 2);
1416 KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[0]->hops[0].in_port, in);
1417 KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[0]->hops[1].out_port, out);
1418 KUNIT_ASSERT_EQ(test, tunnel->paths[1]->path_length, 2);
1419 KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[1]->hops[0].in_port, in);
1420 KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[1]->hops[1].out_port, out);
1421 KUNIT_ASSERT_EQ(test, tunnel->paths[2]->path_length, 2);
1422 KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[2]->hops[0].in_port, out);
1423 KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[2]->hops[1].out_port, in);
1424 tb_tunnel_free(tunnel);
1425}
1426
1427static void tb_test_tunnel_dp_chain(struct kunit *test)
1428{
1429 struct tb_switch *host, *dev1, *dev4;
1430 struct tb_port *in, *out;
1431 struct tb_tunnel *tunnel;
1432
1433 /*
1434 * Create DP tunnel from Host DP IN to Device #4 DP OUT.
1435 *
1436 * [Host]
1437 * 1 |
1438 * 1 |
1439 * [Device #1]
1440 * 3 / | 5 \ 7
1441 * 1 / | \ 1
1442 * [Device #2] | [Device #4]
1443 * | 1
1444 * [Device #3]
1445 */
1446 host = alloc_host(test);
1447 dev1 = alloc_dev_default(test, host, 0x1, true);
1448 alloc_dev_default(test, dev1, 0x301, true);
1449 alloc_dev_default(test, dev1, 0x501, true);
1450 dev4 = alloc_dev_default(test, dev1, 0x701, true);
1451
1452 in = &host->ports[5];
1453 out = &dev4->ports[14];
1454
1455 tunnel = tb_tunnel_alloc_dp(NULL, in, out, 1, 0, 0);
1456 KUNIT_ASSERT_NOT_NULL(test, tunnel);
1457 KUNIT_EXPECT_EQ(test, tunnel->type, TB_TUNNEL_DP);
1458 KUNIT_EXPECT_PTR_EQ(test, tunnel->src_port, in);
1459 KUNIT_EXPECT_PTR_EQ(test, tunnel->dst_port, out);
1460 KUNIT_ASSERT_EQ(test, tunnel->npaths, 3);
1461 KUNIT_ASSERT_EQ(test, tunnel->paths[0]->path_length, 3);
1462 KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[0]->hops[0].in_port, in);
1463 KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[0]->hops[2].out_port, out);
1464 KUNIT_ASSERT_EQ(test, tunnel->paths[1]->path_length, 3);
1465 KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[1]->hops[0].in_port, in);
1466 KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[1]->hops[2].out_port, out);
1467 KUNIT_ASSERT_EQ(test, tunnel->paths[2]->path_length, 3);
1468 KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[2]->hops[0].in_port, out);
1469 KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[2]->hops[2].out_port, in);
1470 tb_tunnel_free(tunnel);
1471}
1472
1473static void tb_test_tunnel_dp_tree(struct kunit *test)
1474{
1475 struct tb_switch *host, *dev1, *dev2, *dev3, *dev5;
1476 struct tb_port *in, *out;
1477 struct tb_tunnel *tunnel;
1478
1479 /*
1480 * Create DP tunnel from Device #2 DP IN to Device #5 DP OUT.
1481 *
1482 * [Host]
1483 * 3 |
1484 * 1 |
1485 * [Device #1]
1486 * 3 / | 5 \ 7
1487 * 1 / | \ 1
1488 * [Device #2] | [Device #4]
1489 * | 1
1490 * [Device #3]
1491 * | 5
1492 * | 1
1493 * [Device #5]
1494 */
1495 host = alloc_host(test);
1496 dev1 = alloc_dev_default(test, host, 0x3, true);
1497 dev2 = alloc_dev_with_dpin(test, dev1, 0x303, true);
1498 dev3 = alloc_dev_default(test, dev1, 0x503, true);
1499 alloc_dev_default(test, dev1, 0x703, true);
1500 dev5 = alloc_dev_default(test, dev3, 0x50503, true);
1501
1502 in = &dev2->ports[13];
1503 out = &dev5->ports[13];
1504
1505 tunnel = tb_tunnel_alloc_dp(NULL, in, out, 1, 0, 0);
1506 KUNIT_ASSERT_NOT_NULL(test, tunnel);
1507 KUNIT_EXPECT_EQ(test, tunnel->type, TB_TUNNEL_DP);
1508 KUNIT_EXPECT_PTR_EQ(test, tunnel->src_port, in);
1509 KUNIT_EXPECT_PTR_EQ(test, tunnel->dst_port, out);
1510 KUNIT_ASSERT_EQ(test, tunnel->npaths, 3);
1511 KUNIT_ASSERT_EQ(test, tunnel->paths[0]->path_length, 4);
1512 KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[0]->hops[0].in_port, in);
1513 KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[0]->hops[3].out_port, out);
1514 KUNIT_ASSERT_EQ(test, tunnel->paths[1]->path_length, 4);
1515 KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[1]->hops[0].in_port, in);
1516 KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[1]->hops[3].out_port, out);
1517 KUNIT_ASSERT_EQ(test, tunnel->paths[2]->path_length, 4);
1518 KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[2]->hops[0].in_port, out);
1519 KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[2]->hops[3].out_port, in);
1520 tb_tunnel_free(tunnel);
1521}
1522
1523static void tb_test_tunnel_dp_max_length(struct kunit *test)
1524{
1525 struct tb_switch *host, *dev1, *dev2, *dev3, *dev4, *dev5, *dev6;
1526 struct tb_switch *dev7, *dev8, *dev9, *dev10, *dev11, *dev12;
1527 struct tb_port *in, *out;
1528 struct tb_tunnel *tunnel;
1529
1530 /*
1531 * Creates DP tunnel from Device #6 to Device #12.
1532 *
1533 * [Host]
1534 * 1 / \ 3
1535 * 1 / \ 1
1536 * [Device #1] [Device #7]
1537 * 3 | | 3
1538 * 1 | | 1
1539 * [Device #2] [Device #8]
1540 * 3 | | 3
1541 * 1 | | 1
1542 * [Device #3] [Device #9]
1543 * 3 | | 3
1544 * 1 | | 1
1545 * [Device #4] [Device #10]
1546 * 3 | | 3
1547 * 1 | | 1
1548 * [Device #5] [Device #11]
1549 * 3 | | 3
1550 * 1 | | 1
1551 * [Device #6] [Device #12]
1552 */
1553 host = alloc_host(test);
1554 dev1 = alloc_dev_default(test, host, 0x1, true);
1555 dev2 = alloc_dev_default(test, dev1, 0x301, true);
1556 dev3 = alloc_dev_default(test, dev2, 0x30301, true);
1557 dev4 = alloc_dev_default(test, dev3, 0x3030301, true);
1558 dev5 = alloc_dev_default(test, dev4, 0x303030301, true);
1559 dev6 = alloc_dev_with_dpin(test, dev5, 0x30303030301, true);
1560 dev7 = alloc_dev_default(test, host, 0x3, true);
1561 dev8 = alloc_dev_default(test, dev7, 0x303, true);
1562 dev9 = alloc_dev_default(test, dev8, 0x30303, true);
1563 dev10 = alloc_dev_default(test, dev9, 0x3030303, true);
1564 dev11 = alloc_dev_default(test, dev10, 0x303030303, true);
1565 dev12 = alloc_dev_default(test, dev11, 0x30303030303, true);
1566
1567 in = &dev6->ports[13];
1568 out = &dev12->ports[13];
1569
1570 tunnel = tb_tunnel_alloc_dp(NULL, in, out, 1, 0, 0);
1571 KUNIT_ASSERT_NOT_NULL(test, tunnel);
1572 KUNIT_EXPECT_EQ(test, tunnel->type, TB_TUNNEL_DP);
1573 KUNIT_EXPECT_PTR_EQ(test, tunnel->src_port, in);
1574 KUNIT_EXPECT_PTR_EQ(test, tunnel->dst_port, out);
1575 KUNIT_ASSERT_EQ(test, tunnel->npaths, 3);
1576 KUNIT_ASSERT_EQ(test, tunnel->paths[0]->path_length, 13);
1577 /* First hop */
1578 KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[0]->hops[0].in_port, in);
1579 /* Middle */
1580 KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[0]->hops[6].in_port,
1581 &host->ports[1]);
1582 KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[0]->hops[6].out_port,
1583 &host->ports[3]);
1584 /* Last */
1585 KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[0]->hops[12].out_port, out);
1586 KUNIT_ASSERT_EQ(test, tunnel->paths[1]->path_length, 13);
1587 KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[1]->hops[0].in_port, in);
1588 KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[1]->hops[6].in_port,
1589 &host->ports[1]);
1590 KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[1]->hops[6].out_port,
1591 &host->ports[3]);
1592 KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[1]->hops[12].out_port, out);
1593 KUNIT_ASSERT_EQ(test, tunnel->paths[2]->path_length, 13);
1594 KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[2]->hops[0].in_port, out);
1595 KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[2]->hops[6].in_port,
1596 &host->ports[3]);
1597 KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[2]->hops[6].out_port,
1598 &host->ports[1]);
1599 KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[2]->hops[12].out_port, in);
1600 tb_tunnel_free(tunnel);
1601}
1602
1603static void tb_test_tunnel_3dp(struct kunit *test)
1604{
1605 struct tb_switch *host, *dev1, *dev2, *dev3, *dev4, *dev5;
1606 struct tb_port *in1, *in2, *in3, *out1, *out2, *out3;
1607 struct tb_tunnel *tunnel1, *tunnel2, *tunnel3;
1608
1609 /*
1610 * Create 3 DP tunnels from Host to Devices #2, #5 and #4.
1611 *
1612 * [Host]
1613 * 3 |
1614 * 1 |
1615 * [Device #1]
1616 * 3 / | 5 \ 7
1617 * 1 / | \ 1
1618 * [Device #2] | [Device #4]
1619 * | 1
1620 * [Device #3]
1621 * | 5
1622 * | 1
1623 * [Device #5]
1624 */
1625 host = alloc_host_br(test);
1626 dev1 = alloc_dev_default(test, host, 0x3, true);
1627 dev2 = alloc_dev_default(test, dev1, 0x303, true);
1628 dev3 = alloc_dev_default(test, dev1, 0x503, true);
1629 dev4 = alloc_dev_default(test, dev1, 0x703, true);
1630 dev5 = alloc_dev_default(test, dev3, 0x50503, true);
1631
1632 in1 = &host->ports[5];
1633 in2 = &host->ports[6];
1634 in3 = &host->ports[10];
1635
1636 out1 = &dev2->ports[13];
1637 out2 = &dev5->ports[13];
1638 out3 = &dev4->ports[14];
1639
1640 tunnel1 = tb_tunnel_alloc_dp(NULL, in1, out1, 1, 0, 0);
1641 KUNIT_ASSERT_TRUE(test, tunnel1 != NULL);
1642 KUNIT_EXPECT_EQ(test, tunnel1->type, TB_TUNNEL_DP);
1643 KUNIT_EXPECT_PTR_EQ(test, tunnel1->src_port, in1);
1644 KUNIT_EXPECT_PTR_EQ(test, tunnel1->dst_port, out1);
1645 KUNIT_ASSERT_EQ(test, tunnel1->npaths, 3);
1646 KUNIT_ASSERT_EQ(test, tunnel1->paths[0]->path_length, 3);
1647
1648 tunnel2 = tb_tunnel_alloc_dp(NULL, in2, out2, 1, 0, 0);
1649 KUNIT_ASSERT_TRUE(test, tunnel2 != NULL);
1650 KUNIT_EXPECT_EQ(test, tunnel2->type, TB_TUNNEL_DP);
1651 KUNIT_EXPECT_PTR_EQ(test, tunnel2->src_port, in2);
1652 KUNIT_EXPECT_PTR_EQ(test, tunnel2->dst_port, out2);
1653 KUNIT_ASSERT_EQ(test, tunnel2->npaths, 3);
1654 KUNIT_ASSERT_EQ(test, tunnel2->paths[0]->path_length, 4);
1655
1656 tunnel3 = tb_tunnel_alloc_dp(NULL, in3, out3, 1, 0, 0);
1657 KUNIT_ASSERT_TRUE(test, tunnel3 != NULL);
1658 KUNIT_EXPECT_EQ(test, tunnel3->type, TB_TUNNEL_DP);
1659 KUNIT_EXPECT_PTR_EQ(test, tunnel3->src_port, in3);
1660 KUNIT_EXPECT_PTR_EQ(test, tunnel3->dst_port, out3);
1661 KUNIT_ASSERT_EQ(test, tunnel3->npaths, 3);
1662 KUNIT_ASSERT_EQ(test, tunnel3->paths[0]->path_length, 3);
1663
1664 tb_tunnel_free(tunnel2);
1665 tb_tunnel_free(tunnel1);
1666}
1667
1668static void tb_test_tunnel_usb3(struct kunit *test)
1669{
1670 struct tb_switch *host, *dev1, *dev2;
1671 struct tb_tunnel *tunnel1, *tunnel2;
1672 struct tb_port *down, *up;
1673
1674 /*
1675 * Create USB3 tunnel between host and two devices.
1676 *
1677 * [Host]
1678 * 1 |
1679 * 1 |
1680 * [Device #1]
1681 * \ 7
1682 * \ 1
1683 * [Device #2]
1684 */
1685 host = alloc_host(test);
1686 dev1 = alloc_dev_default(test, host, 0x1, true);
1687 dev2 = alloc_dev_default(test, dev1, 0x701, true);
1688
1689 down = &host->ports[12];
1690 up = &dev1->ports[16];
1691 tunnel1 = tb_tunnel_alloc_usb3(NULL, up, down, 0, 0);
1692 KUNIT_ASSERT_NOT_NULL(test, tunnel1);
1693 KUNIT_EXPECT_EQ(test, tunnel1->type, TB_TUNNEL_USB3);
1694 KUNIT_EXPECT_PTR_EQ(test, tunnel1->src_port, down);
1695 KUNIT_EXPECT_PTR_EQ(test, tunnel1->dst_port, up);
1696 KUNIT_ASSERT_EQ(test, tunnel1->npaths, 2);
1697 KUNIT_ASSERT_EQ(test, tunnel1->paths[0]->path_length, 2);
1698 KUNIT_EXPECT_PTR_EQ(test, tunnel1->paths[0]->hops[0].in_port, down);
1699 KUNIT_EXPECT_PTR_EQ(test, tunnel1->paths[0]->hops[1].out_port, up);
1700 KUNIT_ASSERT_EQ(test, tunnel1->paths[1]->path_length, 2);
1701 KUNIT_EXPECT_PTR_EQ(test, tunnel1->paths[1]->hops[0].in_port, up);
1702 KUNIT_EXPECT_PTR_EQ(test, tunnel1->paths[1]->hops[1].out_port, down);
1703
1704 down = &dev1->ports[17];
1705 up = &dev2->ports[16];
1706 tunnel2 = tb_tunnel_alloc_usb3(NULL, up, down, 0, 0);
1707 KUNIT_ASSERT_NOT_NULL(test, tunnel2);
1708 KUNIT_EXPECT_EQ(test, tunnel2->type, TB_TUNNEL_USB3);
1709 KUNIT_EXPECT_PTR_EQ(test, tunnel2->src_port, down);
1710 KUNIT_EXPECT_PTR_EQ(test, tunnel2->dst_port, up);
1711 KUNIT_ASSERT_EQ(test, tunnel2->npaths, 2);
1712 KUNIT_ASSERT_EQ(test, tunnel2->paths[0]->path_length, 2);
1713 KUNIT_EXPECT_PTR_EQ(test, tunnel2->paths[0]->hops[0].in_port, down);
1714 KUNIT_EXPECT_PTR_EQ(test, tunnel2->paths[0]->hops[1].out_port, up);
1715 KUNIT_ASSERT_EQ(test, tunnel2->paths[1]->path_length, 2);
1716 KUNIT_EXPECT_PTR_EQ(test, tunnel2->paths[1]->hops[0].in_port, up);
1717 KUNIT_EXPECT_PTR_EQ(test, tunnel2->paths[1]->hops[1].out_port, down);
1718
1719 tb_tunnel_free(tunnel2);
1720 tb_tunnel_free(tunnel1);
1721}
1722
1723static void tb_test_tunnel_port_on_path(struct kunit *test)
1724{
1725 struct tb_switch *host, *dev1, *dev2, *dev3, *dev4, *dev5;
1726 struct tb_port *in, *out, *port;
1727 struct tb_tunnel *dp_tunnel;
1728
1729 /*
1730 * [Host]
1731 * 3 |
1732 * 1 |
1733 * [Device #1]
1734 * 3 / | 5 \ 7
1735 * 1 / | \ 1
1736 * [Device #2] | [Device #4]
1737 * | 1
1738 * [Device #3]
1739 * | 5
1740 * | 1
1741 * [Device #5]
1742 */
1743 host = alloc_host(test);
1744 dev1 = alloc_dev_default(test, host, 0x3, true);
1745 dev2 = alloc_dev_with_dpin(test, dev1, 0x303, true);
1746 dev3 = alloc_dev_default(test, dev1, 0x503, true);
1747 dev4 = alloc_dev_default(test, dev1, 0x703, true);
1748 dev5 = alloc_dev_default(test, dev3, 0x50503, true);
1749
1750 in = &dev2->ports[13];
1751 out = &dev5->ports[13];
1752
1753 dp_tunnel = tb_tunnel_alloc_dp(NULL, in, out, 1, 0, 0);
1754 KUNIT_ASSERT_NOT_NULL(test, dp_tunnel);
1755
1756 KUNIT_EXPECT_TRUE(test, tb_tunnel_port_on_path(dp_tunnel, in));
1757 KUNIT_EXPECT_TRUE(test, tb_tunnel_port_on_path(dp_tunnel, out));
1758
1759 port = &host->ports[8];
1760 KUNIT_EXPECT_FALSE(test, tb_tunnel_port_on_path(dp_tunnel, port));
1761
1762 port = &host->ports[3];
1763 KUNIT_EXPECT_FALSE(test, tb_tunnel_port_on_path(dp_tunnel, port));
1764
1765 port = &dev1->ports[1];
1766 KUNIT_EXPECT_FALSE(test, tb_tunnel_port_on_path(dp_tunnel, port));
1767
1768 port = &dev1->ports[3];
1769 KUNIT_EXPECT_TRUE(test, tb_tunnel_port_on_path(dp_tunnel, port));
1770
1771 port = &dev1->ports[5];
1772 KUNIT_EXPECT_TRUE(test, tb_tunnel_port_on_path(dp_tunnel, port));
1773
1774 port = &dev1->ports[7];
1775 KUNIT_EXPECT_FALSE(test, tb_tunnel_port_on_path(dp_tunnel, port));
1776
1777 port = &dev3->ports[1];
1778 KUNIT_EXPECT_TRUE(test, tb_tunnel_port_on_path(dp_tunnel, port));
1779
1780 port = &dev5->ports[1];
1781 KUNIT_EXPECT_TRUE(test, tb_tunnel_port_on_path(dp_tunnel, port));
1782
1783 port = &dev4->ports[1];
1784 KUNIT_EXPECT_FALSE(test, tb_tunnel_port_on_path(dp_tunnel, port));
1785
1786 tb_tunnel_free(dp_tunnel);
1787}
1788
1789static void tb_test_tunnel_dma(struct kunit *test)
1790{
1791 struct tb_port *nhi, *port;
1792 struct tb_tunnel *tunnel;
1793 struct tb_switch *host;
1794
1795 /*
1796 * Create DMA tunnel from NHI to port 1 and back.
1797 *
1798 * [Host 1]
1799 * 1 ^ In HopID 1 -> Out HopID 8
1800 * |
1801 * v In HopID 8 -> Out HopID 1
1802 * ............ Domain border
1803 * |
1804 * [Host 2]
1805 */
1806 host = alloc_host(test);
1807 nhi = &host->ports[7];
1808 port = &host->ports[1];
1809
1810 tunnel = tb_tunnel_alloc_dma(NULL, nhi, port, 8, 1, 8, 1);
1811 KUNIT_ASSERT_NOT_NULL(test, tunnel);
1812 KUNIT_EXPECT_EQ(test, tunnel->type, TB_TUNNEL_DMA);
1813 KUNIT_EXPECT_PTR_EQ(test, tunnel->src_port, nhi);
1814 KUNIT_EXPECT_PTR_EQ(test, tunnel->dst_port, port);
1815 KUNIT_ASSERT_EQ(test, tunnel->npaths, 2);
1816 /* RX path */
1817 KUNIT_ASSERT_EQ(test, tunnel->paths[0]->path_length, 1);
1818 KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[0]->hops[0].in_port, port);
1819 KUNIT_EXPECT_EQ(test, tunnel->paths[0]->hops[0].in_hop_index, 8);
1820 KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[0]->hops[0].out_port, nhi);
1821 KUNIT_EXPECT_EQ(test, tunnel->paths[0]->hops[0].next_hop_index, 1);
1822 /* TX path */
1823 KUNIT_ASSERT_EQ(test, tunnel->paths[1]->path_length, 1);
1824 KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[1]->hops[0].in_port, nhi);
1825 KUNIT_EXPECT_EQ(test, tunnel->paths[1]->hops[0].in_hop_index, 1);
1826 KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[1]->hops[0].out_port, port);
1827 KUNIT_EXPECT_EQ(test, tunnel->paths[1]->hops[0].next_hop_index, 8);
1828
1829 tb_tunnel_free(tunnel);
1830}
1831
1832static void tb_test_tunnel_dma_rx(struct kunit *test)
1833{
1834 struct tb_port *nhi, *port;
1835 struct tb_tunnel *tunnel;
1836 struct tb_switch *host;
1837
1838 /*
1839 * Create DMA RX tunnel from port 1 to NHI.
1840 *
1841 * [Host 1]
1842 * 1 ^
1843 * |
1844 * | In HopID 15 -> Out HopID 2
1845 * ............ Domain border
1846 * |
1847 * [Host 2]
1848 */
1849 host = alloc_host(test);
1850 nhi = &host->ports[7];
1851 port = &host->ports[1];
1852
1853 tunnel = tb_tunnel_alloc_dma(NULL, nhi, port, -1, -1, 15, 2);
1854 KUNIT_ASSERT_NOT_NULL(test, tunnel);
1855 KUNIT_EXPECT_EQ(test, tunnel->type, TB_TUNNEL_DMA);
1856 KUNIT_EXPECT_PTR_EQ(test, tunnel->src_port, nhi);
1857 KUNIT_EXPECT_PTR_EQ(test, tunnel->dst_port, port);
1858 KUNIT_ASSERT_EQ(test, tunnel->npaths, 1);
1859 /* RX path */
1860 KUNIT_ASSERT_EQ(test, tunnel->paths[0]->path_length, 1);
1861 KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[0]->hops[0].in_port, port);
1862 KUNIT_EXPECT_EQ(test, tunnel->paths[0]->hops[0].in_hop_index, 15);
1863 KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[0]->hops[0].out_port, nhi);
1864 KUNIT_EXPECT_EQ(test, tunnel->paths[0]->hops[0].next_hop_index, 2);
1865
1866 tb_tunnel_free(tunnel);
1867}
1868
1869static void tb_test_tunnel_dma_tx(struct kunit *test)
1870{
1871 struct tb_port *nhi, *port;
1872 struct tb_tunnel *tunnel;
1873 struct tb_switch *host;
1874
1875 /*
1876 * Create DMA TX tunnel from NHI to port 1.
1877 *
1878 * [Host 1]
1879 * 1 | In HopID 2 -> Out HopID 15
1880 * |
1881 * v
1882 * ............ Domain border
1883 * |
1884 * [Host 2]
1885 */
1886 host = alloc_host(test);
1887 nhi = &host->ports[7];
1888 port = &host->ports[1];
1889
1890 tunnel = tb_tunnel_alloc_dma(NULL, nhi, port, 15, 2, -1, -1);
1891 KUNIT_ASSERT_NOT_NULL(test, tunnel);
1892 KUNIT_EXPECT_EQ(test, tunnel->type, TB_TUNNEL_DMA);
1893 KUNIT_EXPECT_PTR_EQ(test, tunnel->src_port, nhi);
1894 KUNIT_EXPECT_PTR_EQ(test, tunnel->dst_port, port);
1895 KUNIT_ASSERT_EQ(test, tunnel->npaths, 1);
1896 /* TX path */
1897 KUNIT_ASSERT_EQ(test, tunnel->paths[0]->path_length, 1);
1898 KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[0]->hops[0].in_port, nhi);
1899 KUNIT_EXPECT_EQ(test, tunnel->paths[0]->hops[0].in_hop_index, 2);
1900 KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[0]->hops[0].out_port, port);
1901 KUNIT_EXPECT_EQ(test, tunnel->paths[0]->hops[0].next_hop_index, 15);
1902
1903 tb_tunnel_free(tunnel);
1904}
1905
1906static void tb_test_tunnel_dma_chain(struct kunit *test)
1907{
1908 struct tb_switch *host, *dev1, *dev2;
1909 struct tb_port *nhi, *port;
1910 struct tb_tunnel *tunnel;
1911
1912 /*
1913 * Create DMA tunnel from NHI to Device #2 port 3 and back.
1914 *
1915 * [Host 1]
1916 * 1 ^ In HopID 1 -> Out HopID x
1917 * |
1918 * 1 | In HopID x -> Out HopID 1
1919 * [Device #1]
1920 * 7 \
1921 * 1 \
1922 * [Device #2]
1923 * 3 | In HopID x -> Out HopID 8
1924 * |
1925 * v In HopID 8 -> Out HopID x
1926 * ............ Domain border
1927 * |
1928 * [Host 2]
1929 */
1930 host = alloc_host(test);
1931 dev1 = alloc_dev_default(test, host, 0x1, true);
1932 dev2 = alloc_dev_default(test, dev1, 0x701, true);
1933
1934 nhi = &host->ports[7];
1935 port = &dev2->ports[3];
1936 tunnel = tb_tunnel_alloc_dma(NULL, nhi, port, 8, 1, 8, 1);
1937 KUNIT_ASSERT_NOT_NULL(test, tunnel);
1938 KUNIT_EXPECT_EQ(test, tunnel->type, TB_TUNNEL_DMA);
1939 KUNIT_EXPECT_PTR_EQ(test, tunnel->src_port, nhi);
1940 KUNIT_EXPECT_PTR_EQ(test, tunnel->dst_port, port);
1941 KUNIT_ASSERT_EQ(test, tunnel->npaths, 2);
1942 /* RX path */
1943 KUNIT_ASSERT_EQ(test, tunnel->paths[0]->path_length, 3);
1944 KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[0]->hops[0].in_port, port);
1945 KUNIT_EXPECT_EQ(test, tunnel->paths[0]->hops[0].in_hop_index, 8);
1946 KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[0]->hops[0].out_port,
1947 &dev2->ports[1]);
1948 KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[0]->hops[1].in_port,
1949 &dev1->ports[7]);
1950 KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[0]->hops[1].out_port,
1951 &dev1->ports[1]);
1952 KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[0]->hops[2].in_port,
1953 &host->ports[1]);
1954 KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[0]->hops[2].out_port, nhi);
1955 KUNIT_EXPECT_EQ(test, tunnel->paths[0]->hops[2].next_hop_index, 1);
1956 /* TX path */
1957 KUNIT_ASSERT_EQ(test, tunnel->paths[1]->path_length, 3);
1958 KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[1]->hops[0].in_port, nhi);
1959 KUNIT_EXPECT_EQ(test, tunnel->paths[1]->hops[0].in_hop_index, 1);
1960 KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[1]->hops[1].in_port,
1961 &dev1->ports[1]);
1962 KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[1]->hops[1].out_port,
1963 &dev1->ports[7]);
1964 KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[1]->hops[2].in_port,
1965 &dev2->ports[1]);
1966 KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[1]->hops[2].out_port, port);
1967 KUNIT_EXPECT_EQ(test, tunnel->paths[1]->hops[2].next_hop_index, 8);
1968
1969 tb_tunnel_free(tunnel);
1970}
1971
1972static void tb_test_tunnel_dma_match(struct kunit *test)
1973{
1974 struct tb_port *nhi, *port;
1975 struct tb_tunnel *tunnel;
1976 struct tb_switch *host;
1977
1978 host = alloc_host(test);
1979 nhi = &host->ports[7];
1980 port = &host->ports[1];
1981
1982 tunnel = tb_tunnel_alloc_dma(NULL, nhi, port, 15, 1, 15, 1);
1983 KUNIT_ASSERT_NOT_NULL(test, tunnel);
1984
1985 KUNIT_ASSERT_TRUE(test, tb_tunnel_match_dma(tunnel, 15, 1, 15, 1));
1986 KUNIT_ASSERT_FALSE(test, tb_tunnel_match_dma(tunnel, 8, 1, 15, 1));
1987 KUNIT_ASSERT_TRUE(test, tb_tunnel_match_dma(tunnel, -1, -1, 15, 1));
1988 KUNIT_ASSERT_TRUE(test, tb_tunnel_match_dma(tunnel, 15, 1, -1, -1));
1989 KUNIT_ASSERT_TRUE(test, tb_tunnel_match_dma(tunnel, 15, -1, -1, -1));
1990 KUNIT_ASSERT_TRUE(test, tb_tunnel_match_dma(tunnel, -1, 1, -1, -1));
1991 KUNIT_ASSERT_TRUE(test, tb_tunnel_match_dma(tunnel, -1, -1, 15, -1));
1992 KUNIT_ASSERT_TRUE(test, tb_tunnel_match_dma(tunnel, -1, -1, -1, 1));
1993 KUNIT_ASSERT_TRUE(test, tb_tunnel_match_dma(tunnel, -1, -1, -1, -1));
1994 KUNIT_ASSERT_FALSE(test, tb_tunnel_match_dma(tunnel, 8, -1, 8, -1));
1995
1996 tb_tunnel_free(tunnel);
1997
1998 tunnel = tb_tunnel_alloc_dma(NULL, nhi, port, 15, 1, -1, -1);
1999 KUNIT_ASSERT_NOT_NULL(test, tunnel);
2000 KUNIT_ASSERT_TRUE(test, tb_tunnel_match_dma(tunnel, 15, 1, -1, -1));
2001 KUNIT_ASSERT_TRUE(test, tb_tunnel_match_dma(tunnel, 15, -1, -1, -1));
2002 KUNIT_ASSERT_TRUE(test, tb_tunnel_match_dma(tunnel, -1, 1, -1, -1));
2003 KUNIT_ASSERT_TRUE(test, tb_tunnel_match_dma(tunnel, -1, -1, -1, -1));
2004 KUNIT_ASSERT_FALSE(test, tb_tunnel_match_dma(tunnel, 15, 1, 15, 1));
2005 KUNIT_ASSERT_FALSE(test, tb_tunnel_match_dma(tunnel, -1, -1, 15, 1));
2006 KUNIT_ASSERT_FALSE(test, tb_tunnel_match_dma(tunnel, 15, 11, -1, -1));
2007
2008 tb_tunnel_free(tunnel);
2009
2010 tunnel = tb_tunnel_alloc_dma(NULL, nhi, port, -1, -1, 15, 11);
2011 KUNIT_ASSERT_NOT_NULL(test, tunnel);
2012 KUNIT_ASSERT_TRUE(test, tb_tunnel_match_dma(tunnel, -1, -1, 15, 11));
2013 KUNIT_ASSERT_TRUE(test, tb_tunnel_match_dma(tunnel, -1, -1, 15, -1));
2014 KUNIT_ASSERT_TRUE(test, tb_tunnel_match_dma(tunnel, -1, -1, -1, 11));
2015 KUNIT_ASSERT_TRUE(test, tb_tunnel_match_dma(tunnel, -1, -1, -1, -1));
2016 KUNIT_ASSERT_FALSE(test, tb_tunnel_match_dma(tunnel, -1, -1, 15, 1));
2017 KUNIT_ASSERT_FALSE(test, tb_tunnel_match_dma(tunnel, -1, -1, 10, 11));
2018 KUNIT_ASSERT_FALSE(test, tb_tunnel_match_dma(tunnel, 15, 11, -1, -1));
2019
2020 tb_tunnel_free(tunnel);
2021}
2022
2023static void tb_test_credit_alloc_legacy_not_bonded(struct kunit *test)
2024{
2025 struct tb_switch *host, *dev;
2026 struct tb_port *up, *down;
2027 struct tb_tunnel *tunnel;
2028 struct tb_path *path;
2029
2030 host = alloc_host(test);
2031 dev = alloc_dev_default(test, host, 0x1, false);
2032
2033 down = &host->ports[8];
2034 up = &dev->ports[9];
2035 tunnel = tb_tunnel_alloc_pci(NULL, up, down);
2036 KUNIT_ASSERT_NOT_NULL(test, tunnel);
2037 KUNIT_ASSERT_EQ(test, tunnel->npaths, (size_t)2);
2038
2039 path = tunnel->paths[0];
2040 KUNIT_ASSERT_EQ(test, path->path_length, 2);
2041 KUNIT_EXPECT_EQ(test, path->hops[0].nfc_credits, 0U);
2042 KUNIT_EXPECT_EQ(test, path->hops[0].initial_credits, 7U);
2043 KUNIT_EXPECT_EQ(test, path->hops[1].nfc_credits, 0U);
2044 KUNIT_EXPECT_EQ(test, path->hops[1].initial_credits, 16U);
2045
2046 path = tunnel->paths[1];
2047 KUNIT_ASSERT_EQ(test, path->path_length, 2);
2048 KUNIT_EXPECT_EQ(test, path->hops[0].nfc_credits, 0U);
2049 KUNIT_EXPECT_EQ(test, path->hops[0].initial_credits, 7U);
2050 KUNIT_EXPECT_EQ(test, path->hops[1].nfc_credits, 0U);
2051 KUNIT_EXPECT_EQ(test, path->hops[1].initial_credits, 16U);
2052
2053 tb_tunnel_free(tunnel);
2054}
2055
2056static void tb_test_credit_alloc_legacy_bonded(struct kunit *test)
2057{
2058 struct tb_switch *host, *dev;
2059 struct tb_port *up, *down;
2060 struct tb_tunnel *tunnel;
2061 struct tb_path *path;
2062
2063 host = alloc_host(test);
2064 dev = alloc_dev_default(test, host, 0x1, true);
2065
2066 down = &host->ports[8];
2067 up = &dev->ports[9];
2068 tunnel = tb_tunnel_alloc_pci(NULL, up, down);
2069 KUNIT_ASSERT_NOT_NULL(test, tunnel);
2070 KUNIT_ASSERT_EQ(test, tunnel->npaths, (size_t)2);
2071
2072 path = tunnel->paths[0];
2073 KUNIT_ASSERT_EQ(test, path->path_length, 2);
2074 KUNIT_EXPECT_EQ(test, path->hops[0].nfc_credits, 0U);
2075 KUNIT_EXPECT_EQ(test, path->hops[0].initial_credits, 7U);
2076 KUNIT_EXPECT_EQ(test, path->hops[1].nfc_credits, 0U);
2077 KUNIT_EXPECT_EQ(test, path->hops[1].initial_credits, 32U);
2078
2079 path = tunnel->paths[1];
2080 KUNIT_ASSERT_EQ(test, path->path_length, 2);
2081 KUNIT_EXPECT_EQ(test, path->hops[0].nfc_credits, 0U);
2082 KUNIT_EXPECT_EQ(test, path->hops[0].initial_credits, 7U);
2083 KUNIT_EXPECT_EQ(test, path->hops[1].nfc_credits, 0U);
2084 KUNIT_EXPECT_EQ(test, path->hops[1].initial_credits, 32U);
2085
2086 tb_tunnel_free(tunnel);
2087}
2088
2089static void tb_test_credit_alloc_pcie(struct kunit *test)
2090{
2091 struct tb_switch *host, *dev;
2092 struct tb_port *up, *down;
2093 struct tb_tunnel *tunnel;
2094 struct tb_path *path;
2095
2096 host = alloc_host_usb4(test);
2097 dev = alloc_dev_usb4(test, host, 0x1, true);
2098
2099 down = &host->ports[8];
2100 up = &dev->ports[9];
2101 tunnel = tb_tunnel_alloc_pci(NULL, up, down);
2102 KUNIT_ASSERT_NOT_NULL(test, tunnel);
2103 KUNIT_ASSERT_EQ(test, tunnel->npaths, (size_t)2);
2104
2105 path = tunnel->paths[0];
2106 KUNIT_ASSERT_EQ(test, path->path_length, 2);
2107 KUNIT_EXPECT_EQ(test, path->hops[0].nfc_credits, 0U);
2108 KUNIT_EXPECT_EQ(test, path->hops[0].initial_credits, 7U);
2109 KUNIT_EXPECT_EQ(test, path->hops[1].nfc_credits, 0U);
2110 KUNIT_EXPECT_EQ(test, path->hops[1].initial_credits, 32U);
2111
2112 path = tunnel->paths[1];
2113 KUNIT_ASSERT_EQ(test, path->path_length, 2);
2114 KUNIT_EXPECT_EQ(test, path->hops[0].nfc_credits, 0U);
2115 KUNIT_EXPECT_EQ(test, path->hops[0].initial_credits, 7U);
2116 KUNIT_EXPECT_EQ(test, path->hops[1].nfc_credits, 0U);
2117 KUNIT_EXPECT_EQ(test, path->hops[1].initial_credits, 64U);
2118
2119 tb_tunnel_free(tunnel);
2120}
2121
2122static void tb_test_credit_alloc_without_dp(struct kunit *test)
2123{
2124 struct tb_switch *host, *dev;
2125 struct tb_port *up, *down;
2126 struct tb_tunnel *tunnel;
2127 struct tb_path *path;
2128
2129 host = alloc_host_usb4(test);
2130 dev = alloc_dev_without_dp(test, host, 0x1, true);
2131
2132 /*
2133 * The device has no DP therefore baMinDPmain = baMinDPaux = 0
2134 *
2135 * Create PCIe path with buffers less than baMaxPCIe.
2136 *
2137 * For a device with buffers configurations:
2138 * baMaxUSB3 = 109
2139 * baMinDPaux = 0
2140 * baMinDPmain = 0
2141 * baMaxPCIe = 30
2142 * baMaxHI = 1
2143 * Remaining Buffers = Total - (CP + DP) = 120 - (2 + 0) = 118
2144 * PCIe Credits = Max(6, Min(baMaxPCIe, Remaining Buffers - baMaxUSB3)
2145 * = Max(6, Min(30, 9) = 9
2146 */
2147 down = &host->ports[8];
2148 up = &dev->ports[9];
2149 tunnel = tb_tunnel_alloc_pci(NULL, up, down);
2150 KUNIT_ASSERT_TRUE(test, tunnel != NULL);
2151 KUNIT_ASSERT_EQ(test, tunnel->npaths, (size_t)2);
2152
2153 /* PCIe downstream path */
2154 path = tunnel->paths[0];
2155 KUNIT_ASSERT_EQ(test, path->path_length, 2);
2156 KUNIT_EXPECT_EQ(test, path->hops[0].nfc_credits, 0U);
2157 KUNIT_EXPECT_EQ(test, path->hops[0].initial_credits, 7U);
2158 KUNIT_EXPECT_EQ(test, path->hops[1].nfc_credits, 0U);
2159 KUNIT_EXPECT_EQ(test, path->hops[1].initial_credits, 9U);
2160
2161 /* PCIe upstream path */
2162 path = tunnel->paths[1];
2163 KUNIT_ASSERT_EQ(test, path->path_length, 2);
2164 KUNIT_EXPECT_EQ(test, path->hops[0].nfc_credits, 0U);
2165 KUNIT_EXPECT_EQ(test, path->hops[0].initial_credits, 7U);
2166 KUNIT_EXPECT_EQ(test, path->hops[1].nfc_credits, 0U);
2167 KUNIT_EXPECT_EQ(test, path->hops[1].initial_credits, 64U);
2168
2169 tb_tunnel_free(tunnel);
2170}
2171
2172static void tb_test_credit_alloc_dp(struct kunit *test)
2173{
2174 struct tb_switch *host, *dev;
2175 struct tb_port *in, *out;
2176 struct tb_tunnel *tunnel;
2177 struct tb_path *path;
2178
2179 host = alloc_host_usb4(test);
2180 dev = alloc_dev_usb4(test, host, 0x1, true);
2181
2182 in = &host->ports[5];
2183 out = &dev->ports[14];
2184
2185 tunnel = tb_tunnel_alloc_dp(NULL, in, out, 1, 0, 0);
2186 KUNIT_ASSERT_NOT_NULL(test, tunnel);
2187 KUNIT_ASSERT_EQ(test, tunnel->npaths, (size_t)3);
2188
2189 /* Video (main) path */
2190 path = tunnel->paths[0];
2191 KUNIT_ASSERT_EQ(test, path->path_length, 2);
2192 KUNIT_EXPECT_EQ(test, path->hops[0].nfc_credits, 12U);
2193 KUNIT_EXPECT_EQ(test, path->hops[0].initial_credits, 0U);
2194 KUNIT_EXPECT_EQ(test, path->hops[1].nfc_credits, 18U);
2195 KUNIT_EXPECT_EQ(test, path->hops[1].initial_credits, 0U);
2196
2197 /* AUX TX */
2198 path = tunnel->paths[1];
2199 KUNIT_ASSERT_EQ(test, path->path_length, 2);
2200 KUNIT_EXPECT_EQ(test, path->hops[0].nfc_credits, 0U);
2201 KUNIT_EXPECT_EQ(test, path->hops[0].initial_credits, 1U);
2202 KUNIT_EXPECT_EQ(test, path->hops[1].nfc_credits, 0U);
2203 KUNIT_EXPECT_EQ(test, path->hops[1].initial_credits, 1U);
2204
2205 /* AUX RX */
2206 path = tunnel->paths[2];
2207 KUNIT_ASSERT_EQ(test, path->path_length, 2);
2208 KUNIT_EXPECT_EQ(test, path->hops[0].nfc_credits, 0U);
2209 KUNIT_EXPECT_EQ(test, path->hops[0].initial_credits, 1U);
2210 KUNIT_EXPECT_EQ(test, path->hops[1].nfc_credits, 0U);
2211 KUNIT_EXPECT_EQ(test, path->hops[1].initial_credits, 1U);
2212
2213 tb_tunnel_free(tunnel);
2214}
2215
2216static void tb_test_credit_alloc_usb3(struct kunit *test)
2217{
2218 struct tb_switch *host, *dev;
2219 struct tb_port *up, *down;
2220 struct tb_tunnel *tunnel;
2221 struct tb_path *path;
2222
2223 host = alloc_host_usb4(test);
2224 dev = alloc_dev_usb4(test, host, 0x1, true);
2225
2226 down = &host->ports[12];
2227 up = &dev->ports[16];
2228 tunnel = tb_tunnel_alloc_usb3(NULL, up, down, 0, 0);
2229 KUNIT_ASSERT_NOT_NULL(test, tunnel);
2230 KUNIT_ASSERT_EQ(test, tunnel->npaths, (size_t)2);
2231
2232 path = tunnel->paths[0];
2233 KUNIT_ASSERT_EQ(test, path->path_length, 2);
2234 KUNIT_EXPECT_EQ(test, path->hops[0].nfc_credits, 0U);
2235 KUNIT_EXPECT_EQ(test, path->hops[0].initial_credits, 7U);
2236 KUNIT_EXPECT_EQ(test, path->hops[1].nfc_credits, 0U);
2237 KUNIT_EXPECT_EQ(test, path->hops[1].initial_credits, 14U);
2238
2239 path = tunnel->paths[1];
2240 KUNIT_ASSERT_EQ(test, path->path_length, 2);
2241 KUNIT_EXPECT_EQ(test, path->hops[0].nfc_credits, 0U);
2242 KUNIT_EXPECT_EQ(test, path->hops[0].initial_credits, 7U);
2243 KUNIT_EXPECT_EQ(test, path->hops[1].nfc_credits, 0U);
2244 KUNIT_EXPECT_EQ(test, path->hops[1].initial_credits, 32U);
2245
2246 tb_tunnel_free(tunnel);
2247}
2248
2249static void tb_test_credit_alloc_dma(struct kunit *test)
2250{
2251 struct tb_switch *host, *dev;
2252 struct tb_port *nhi, *port;
2253 struct tb_tunnel *tunnel;
2254 struct tb_path *path;
2255
2256 host = alloc_host_usb4(test);
2257 dev = alloc_dev_usb4(test, host, 0x1, true);
2258
2259 nhi = &host->ports[7];
2260 port = &dev->ports[3];
2261
2262 tunnel = tb_tunnel_alloc_dma(NULL, nhi, port, 8, 1, 8, 1);
2263 KUNIT_ASSERT_NOT_NULL(test, tunnel);
2264 KUNIT_ASSERT_EQ(test, tunnel->npaths, (size_t)2);
2265
2266 /* DMA RX */
2267 path = tunnel->paths[0];
2268 KUNIT_ASSERT_EQ(test, path->path_length, 2);
2269 KUNIT_EXPECT_EQ(test, path->hops[0].nfc_credits, 0U);
2270 KUNIT_EXPECT_EQ(test, path->hops[0].initial_credits, 14U);
2271 KUNIT_EXPECT_EQ(test, path->hops[1].nfc_credits, 0U);
2272 KUNIT_EXPECT_EQ(test, path->hops[1].initial_credits, 14U);
2273
2274 /* DMA TX */
2275 path = tunnel->paths[1];
2276 KUNIT_ASSERT_EQ(test, path->path_length, 2);
2277 KUNIT_EXPECT_EQ(test, path->hops[0].nfc_credits, 0U);
2278 KUNIT_EXPECT_EQ(test, path->hops[0].initial_credits, 0U);
2279 KUNIT_EXPECT_EQ(test, path->hops[1].nfc_credits, 0U);
2280 KUNIT_EXPECT_EQ(test, path->hops[1].initial_credits, 14U);
2281
2282 tb_tunnel_free(tunnel);
2283}
2284
2285static void tb_test_credit_alloc_dma_multiple(struct kunit *test)
2286{
2287 struct tb_tunnel *tunnel1, *tunnel2, *tunnel3;
2288 struct tb_switch *host, *dev;
2289 struct tb_port *nhi, *port;
2290 struct tb_path *path;
2291
2292 host = alloc_host_usb4(test);
2293 dev = alloc_dev_usb4(test, host, 0x1, true);
2294
2295 nhi = &host->ports[7];
2296 port = &dev->ports[3];
2297
2298 /*
2299 * Create three DMA tunnels through the same ports. With the
2300 * default buffers we should be able to create two and the last
2301 * one fails.
2302 *
2303 * For default host we have following buffers for DMA:
2304 *
2305 * 120 - (2 + 2 * (1 + 0) + 32 + 64 + spare) = 20
2306 *
2307 * For device we have following:
2308 *
2309 * 120 - (2 + 2 * (1 + 18) + 14 + 32 + spare) = 34
2310 *
2311 * spare = 14 + 1 = 15
2312 *
2313 * So on host the first tunnel gets 14 and the second gets the
2314 * remaining 1 and then we run out of buffers.
2315 */
2316 tunnel1 = tb_tunnel_alloc_dma(NULL, nhi, port, 8, 1, 8, 1);
2317 KUNIT_ASSERT_NOT_NULL(test, tunnel1);
2318 KUNIT_ASSERT_EQ(test, tunnel1->npaths, (size_t)2);
2319
2320 path = tunnel1->paths[0];
2321 KUNIT_ASSERT_EQ(test, path->path_length, 2);
2322 KUNIT_EXPECT_EQ(test, path->hops[0].nfc_credits, 0U);
2323 KUNIT_EXPECT_EQ(test, path->hops[0].initial_credits, 14U);
2324 KUNIT_EXPECT_EQ(test, path->hops[1].nfc_credits, 0U);
2325 KUNIT_EXPECT_EQ(test, path->hops[1].initial_credits, 14U);
2326
2327 path = tunnel1->paths[1];
2328 KUNIT_ASSERT_EQ(test, path->path_length, 2);
2329 KUNIT_EXPECT_EQ(test, path->hops[0].nfc_credits, 0U);
2330 KUNIT_EXPECT_EQ(test, path->hops[0].initial_credits, 0U);
2331 KUNIT_EXPECT_EQ(test, path->hops[1].nfc_credits, 0U);
2332 KUNIT_EXPECT_EQ(test, path->hops[1].initial_credits, 14U);
2333
2334 tunnel2 = tb_tunnel_alloc_dma(NULL, nhi, port, 9, 2, 9, 2);
2335 KUNIT_ASSERT_NOT_NULL(test, tunnel2);
2336 KUNIT_ASSERT_EQ(test, tunnel2->npaths, (size_t)2);
2337
2338 path = tunnel2->paths[0];
2339 KUNIT_ASSERT_EQ(test, path->path_length, 2);
2340 KUNIT_EXPECT_EQ(test, path->hops[0].nfc_credits, 0U);
2341 KUNIT_EXPECT_EQ(test, path->hops[0].initial_credits, 14U);
2342 KUNIT_EXPECT_EQ(test, path->hops[1].nfc_credits, 0U);
2343 KUNIT_EXPECT_EQ(test, path->hops[1].initial_credits, 1U);
2344
2345 path = tunnel2->paths[1];
2346 KUNIT_ASSERT_EQ(test, path->path_length, 2);
2347 KUNIT_EXPECT_EQ(test, path->hops[0].nfc_credits, 0U);
2348 KUNIT_EXPECT_EQ(test, path->hops[0].initial_credits, 0U);
2349 KUNIT_EXPECT_EQ(test, path->hops[1].nfc_credits, 0U);
2350 KUNIT_EXPECT_EQ(test, path->hops[1].initial_credits, 1U);
2351
2352 tunnel3 = tb_tunnel_alloc_dma(NULL, nhi, port, 10, 3, 10, 3);
2353 KUNIT_ASSERT_NULL(test, tunnel3);
2354
2355 /*
2356 * Release the first DMA tunnel. That should make 14 buffers
2357 * available for the next tunnel.
2358 */
2359 tb_tunnel_free(tunnel1);
2360
2361 tunnel3 = tb_tunnel_alloc_dma(NULL, nhi, port, 10, 3, 10, 3);
2362 KUNIT_ASSERT_NOT_NULL(test, tunnel3);
2363
2364 path = tunnel3->paths[0];
2365 KUNIT_ASSERT_EQ(test, path->path_length, 2);
2366 KUNIT_EXPECT_EQ(test, path->hops[0].nfc_credits, 0U);
2367 KUNIT_EXPECT_EQ(test, path->hops[0].initial_credits, 14U);
2368 KUNIT_EXPECT_EQ(test, path->hops[1].nfc_credits, 0U);
2369 KUNIT_EXPECT_EQ(test, path->hops[1].initial_credits, 14U);
2370
2371 path = tunnel3->paths[1];
2372 KUNIT_ASSERT_EQ(test, path->path_length, 2);
2373 KUNIT_EXPECT_EQ(test, path->hops[0].nfc_credits, 0U);
2374 KUNIT_EXPECT_EQ(test, path->hops[0].initial_credits, 0U);
2375 KUNIT_EXPECT_EQ(test, path->hops[1].nfc_credits, 0U);
2376 KUNIT_EXPECT_EQ(test, path->hops[1].initial_credits, 14U);
2377
2378 tb_tunnel_free(tunnel3);
2379 tb_tunnel_free(tunnel2);
2380}
2381
2382static struct tb_tunnel *TB_TEST_PCIE_TUNNEL(struct kunit *test,
2383 struct tb_switch *host, struct tb_switch *dev)
2384{
2385 struct tb_port *up, *down;
2386 struct tb_tunnel *pcie_tunnel;
2387 struct tb_path *path;
2388
2389 down = &host->ports[8];
2390 up = &dev->ports[9];
2391 pcie_tunnel = tb_tunnel_alloc_pci(NULL, up, down);
2392 KUNIT_ASSERT_NOT_NULL(test, pcie_tunnel);
2393 KUNIT_ASSERT_EQ(test, pcie_tunnel->npaths, (size_t)2);
2394
2395 path = pcie_tunnel->paths[0];
2396 KUNIT_ASSERT_EQ(test, path->path_length, 2);
2397 KUNIT_EXPECT_EQ(test, path->hops[0].nfc_credits, 0U);
2398 KUNIT_EXPECT_EQ(test, path->hops[0].initial_credits, 7U);
2399 KUNIT_EXPECT_EQ(test, path->hops[1].nfc_credits, 0U);
2400 KUNIT_EXPECT_EQ(test, path->hops[1].initial_credits, 32U);
2401
2402 path = pcie_tunnel->paths[1];
2403 KUNIT_ASSERT_EQ(test, path->path_length, 2);
2404 KUNIT_EXPECT_EQ(test, path->hops[0].nfc_credits, 0U);
2405 KUNIT_EXPECT_EQ(test, path->hops[0].initial_credits, 7U);
2406 KUNIT_EXPECT_EQ(test, path->hops[1].nfc_credits, 0U);
2407 KUNIT_EXPECT_EQ(test, path->hops[1].initial_credits, 64U);
2408
2409 return pcie_tunnel;
2410}
2411
2412static struct tb_tunnel *TB_TEST_DP_TUNNEL1(struct kunit *test,
2413 struct tb_switch *host, struct tb_switch *dev)
2414{
2415 struct tb_port *in, *out;
2416 struct tb_tunnel *dp_tunnel1;
2417 struct tb_path *path;
2418
2419 in = &host->ports[5];
2420 out = &dev->ports[13];
2421 dp_tunnel1 = tb_tunnel_alloc_dp(NULL, in, out, 1, 0, 0);
2422 KUNIT_ASSERT_NOT_NULL(test, dp_tunnel1);
2423 KUNIT_ASSERT_EQ(test, dp_tunnel1->npaths, (size_t)3);
2424
2425 path = dp_tunnel1->paths[0];
2426 KUNIT_ASSERT_EQ(test, path->path_length, 2);
2427 KUNIT_EXPECT_EQ(test, path->hops[0].nfc_credits, 12U);
2428 KUNIT_EXPECT_EQ(test, path->hops[0].initial_credits, 0U);
2429 KUNIT_EXPECT_EQ(test, path->hops[1].nfc_credits, 18U);
2430 KUNIT_EXPECT_EQ(test, path->hops[1].initial_credits, 0U);
2431
2432 path = dp_tunnel1->paths[1];
2433 KUNIT_ASSERT_EQ(test, path->path_length, 2);
2434 KUNIT_EXPECT_EQ(test, path->hops[0].nfc_credits, 0U);
2435 KUNIT_EXPECT_EQ(test, path->hops[0].initial_credits, 1U);
2436 KUNIT_EXPECT_EQ(test, path->hops[1].nfc_credits, 0U);
2437 KUNIT_EXPECT_EQ(test, path->hops[1].initial_credits, 1U);
2438
2439 path = dp_tunnel1->paths[2];
2440 KUNIT_ASSERT_EQ(test, path->path_length, 2);
2441 KUNIT_EXPECT_EQ(test, path->hops[0].nfc_credits, 0U);
2442 KUNIT_EXPECT_EQ(test, path->hops[0].initial_credits, 1U);
2443 KUNIT_EXPECT_EQ(test, path->hops[1].nfc_credits, 0U);
2444 KUNIT_EXPECT_EQ(test, path->hops[1].initial_credits, 1U);
2445
2446 return dp_tunnel1;
2447}
2448
2449static struct tb_tunnel *TB_TEST_DP_TUNNEL2(struct kunit *test,
2450 struct tb_switch *host, struct tb_switch *dev)
2451{
2452 struct tb_port *in, *out;
2453 struct tb_tunnel *dp_tunnel2;
2454 struct tb_path *path;
2455
2456 in = &host->ports[6];
2457 out = &dev->ports[14];
2458 dp_tunnel2 = tb_tunnel_alloc_dp(NULL, in, out, 1, 0, 0);
2459 KUNIT_ASSERT_NOT_NULL(test, dp_tunnel2);
2460 KUNIT_ASSERT_EQ(test, dp_tunnel2->npaths, (size_t)3);
2461
2462 path = dp_tunnel2->paths[0];
2463 KUNIT_ASSERT_EQ(test, path->path_length, 2);
2464 KUNIT_EXPECT_EQ(test, path->hops[0].nfc_credits, 12U);
2465 KUNIT_EXPECT_EQ(test, path->hops[0].initial_credits, 0U);
2466 KUNIT_EXPECT_EQ(test, path->hops[1].nfc_credits, 18U);
2467 KUNIT_EXPECT_EQ(test, path->hops[1].initial_credits, 0U);
2468
2469 path = dp_tunnel2->paths[1];
2470 KUNIT_ASSERT_EQ(test, path->path_length, 2);
2471 KUNIT_EXPECT_EQ(test, path->hops[0].nfc_credits, 0U);
2472 KUNIT_EXPECT_EQ(test, path->hops[0].initial_credits, 1U);
2473 KUNIT_EXPECT_EQ(test, path->hops[1].nfc_credits, 0U);
2474 KUNIT_EXPECT_EQ(test, path->hops[1].initial_credits, 1U);
2475
2476 path = dp_tunnel2->paths[2];
2477 KUNIT_ASSERT_EQ(test, path->path_length, 2);
2478 KUNIT_EXPECT_EQ(test, path->hops[0].nfc_credits, 0U);
2479 KUNIT_EXPECT_EQ(test, path->hops[0].initial_credits, 1U);
2480 KUNIT_EXPECT_EQ(test, path->hops[1].nfc_credits, 0U);
2481 KUNIT_EXPECT_EQ(test, path->hops[1].initial_credits, 1U);
2482
2483 return dp_tunnel2;
2484}
2485
2486static struct tb_tunnel *TB_TEST_USB3_TUNNEL(struct kunit *test,
2487 struct tb_switch *host, struct tb_switch *dev)
2488{
2489 struct tb_port *up, *down;
2490 struct tb_tunnel *usb3_tunnel;
2491 struct tb_path *path;
2492
2493 down = &host->ports[12];
2494 up = &dev->ports[16];
2495 usb3_tunnel = tb_tunnel_alloc_usb3(NULL, up, down, 0, 0);
2496 KUNIT_ASSERT_NOT_NULL(test, usb3_tunnel);
2497 KUNIT_ASSERT_EQ(test, usb3_tunnel->npaths, (size_t)2);
2498
2499 path = usb3_tunnel->paths[0];
2500 KUNIT_ASSERT_EQ(test, path->path_length, 2);
2501 KUNIT_EXPECT_EQ(test, path->hops[0].nfc_credits, 0U);
2502 KUNIT_EXPECT_EQ(test, path->hops[0].initial_credits, 7U);
2503 KUNIT_EXPECT_EQ(test, path->hops[1].nfc_credits, 0U);
2504 KUNIT_EXPECT_EQ(test, path->hops[1].initial_credits, 14U);
2505
2506 path = usb3_tunnel->paths[1];
2507 KUNIT_ASSERT_EQ(test, path->path_length, 2);
2508 KUNIT_EXPECT_EQ(test, path->hops[0].nfc_credits, 0U);
2509 KUNIT_EXPECT_EQ(test, path->hops[0].initial_credits, 7U);
2510 KUNIT_EXPECT_EQ(test, path->hops[1].nfc_credits, 0U);
2511 KUNIT_EXPECT_EQ(test, path->hops[1].initial_credits, 32U);
2512
2513 return usb3_tunnel;
2514}
2515
2516static struct tb_tunnel *TB_TEST_DMA_TUNNEL1(struct kunit *test,
2517 struct tb_switch *host, struct tb_switch *dev)
2518{
2519 struct tb_port *nhi, *port;
2520 struct tb_tunnel *dma_tunnel1;
2521 struct tb_path *path;
2522
2523 nhi = &host->ports[7];
2524 port = &dev->ports[3];
2525 dma_tunnel1 = tb_tunnel_alloc_dma(NULL, nhi, port, 8, 1, 8, 1);
2526 KUNIT_ASSERT_NOT_NULL(test, dma_tunnel1);
2527 KUNIT_ASSERT_EQ(test, dma_tunnel1->npaths, (size_t)2);
2528
2529 path = dma_tunnel1->paths[0];
2530 KUNIT_ASSERT_EQ(test, path->path_length, 2);
2531 KUNIT_EXPECT_EQ(test, path->hops[0].nfc_credits, 0U);
2532 KUNIT_EXPECT_EQ(test, path->hops[0].initial_credits, 14U);
2533 KUNIT_EXPECT_EQ(test, path->hops[1].nfc_credits, 0U);
2534 KUNIT_EXPECT_EQ(test, path->hops[1].initial_credits, 14U);
2535
2536 path = dma_tunnel1->paths[1];
2537 KUNIT_ASSERT_EQ(test, path->path_length, 2);
2538 KUNIT_EXPECT_EQ(test, path->hops[0].nfc_credits, 0U);
2539 KUNIT_EXPECT_EQ(test, path->hops[0].initial_credits, 0U);
2540 KUNIT_EXPECT_EQ(test, path->hops[1].nfc_credits, 0U);
2541 KUNIT_EXPECT_EQ(test, path->hops[1].initial_credits, 14U);
2542
2543 return dma_tunnel1;
2544}
2545
2546static struct tb_tunnel *TB_TEST_DMA_TUNNEL2(struct kunit *test,
2547 struct tb_switch *host, struct tb_switch *dev)
2548{
2549 struct tb_port *nhi, *port;
2550 struct tb_tunnel *dma_tunnel2;
2551 struct tb_path *path;
2552
2553 nhi = &host->ports[7];
2554 port = &dev->ports[3];
2555 dma_tunnel2 = tb_tunnel_alloc_dma(NULL, nhi, port, 9, 2, 9, 2);
2556 KUNIT_ASSERT_NOT_NULL(test, dma_tunnel2);
2557 KUNIT_ASSERT_EQ(test, dma_tunnel2->npaths, (size_t)2);
2558
2559 path = dma_tunnel2->paths[0];
2560 KUNIT_ASSERT_EQ(test, path->path_length, 2);
2561 KUNIT_EXPECT_EQ(test, path->hops[0].nfc_credits, 0U);
2562 KUNIT_EXPECT_EQ(test, path->hops[0].initial_credits, 14U);
2563 KUNIT_EXPECT_EQ(test, path->hops[1].nfc_credits, 0U);
2564 KUNIT_EXPECT_EQ(test, path->hops[1].initial_credits, 1U);
2565
2566 path = dma_tunnel2->paths[1];
2567 KUNIT_ASSERT_EQ(test, path->path_length, 2);
2568 KUNIT_EXPECT_EQ(test, path->hops[0].nfc_credits, 0U);
2569 KUNIT_EXPECT_EQ(test, path->hops[0].initial_credits, 0U);
2570 KUNIT_EXPECT_EQ(test, path->hops[1].nfc_credits, 0U);
2571 KUNIT_EXPECT_EQ(test, path->hops[1].initial_credits, 1U);
2572
2573 return dma_tunnel2;
2574}
2575
2576static void tb_test_credit_alloc_all(struct kunit *test)
2577{
2578 struct tb_tunnel *pcie_tunnel, *dp_tunnel1, *dp_tunnel2, *usb3_tunnel;
2579 struct tb_tunnel *dma_tunnel1, *dma_tunnel2;
2580 struct tb_switch *host, *dev;
2581
2582 /*
2583 * Create PCIe, 2 x DP, USB 3.x and two DMA tunnels from host to
2584 * device. Expectation is that all these can be established with
2585 * the default credit allocation found in Intel hardware.
2586 */
2587
2588 host = alloc_host_usb4(test);
2589 dev = alloc_dev_usb4(test, host, 0x1, true);
2590
2591 pcie_tunnel = TB_TEST_PCIE_TUNNEL(test, host, dev);
2592 dp_tunnel1 = TB_TEST_DP_TUNNEL1(test, host, dev);
2593 dp_tunnel2 = TB_TEST_DP_TUNNEL2(test, host, dev);
2594 usb3_tunnel = TB_TEST_USB3_TUNNEL(test, host, dev);
2595 dma_tunnel1 = TB_TEST_DMA_TUNNEL1(test, host, dev);
2596 dma_tunnel2 = TB_TEST_DMA_TUNNEL2(test, host, dev);
2597
2598 tb_tunnel_free(dma_tunnel2);
2599 tb_tunnel_free(dma_tunnel1);
2600 tb_tunnel_free(usb3_tunnel);
2601 tb_tunnel_free(dp_tunnel2);
2602 tb_tunnel_free(dp_tunnel1);
2603 tb_tunnel_free(pcie_tunnel);
2604}
2605
2606static const u32 root_directory[] = {
2607 0x55584401, /* "UXD" v1 */
2608 0x00000018, /* Root directory length */
2609 0x76656e64, /* "vend" */
2610 0x6f726964, /* "orid" */
2611 0x76000001, /* "v" R 1 */
2612 0x00000a27, /* Immediate value, ! Vendor ID */
2613 0x76656e64, /* "vend" */
2614 0x6f726964, /* "orid" */
2615 0x74000003, /* "t" R 3 */
2616 0x0000001a, /* Text leaf offset, (“Apple Inc.”) */
2617 0x64657669, /* "devi" */
2618 0x63656964, /* "ceid" */
2619 0x76000001, /* "v" R 1 */
2620 0x0000000a, /* Immediate value, ! Device ID */
2621 0x64657669, /* "devi" */
2622 0x63656964, /* "ceid" */
2623 0x74000003, /* "t" R 3 */
2624 0x0000001d, /* Text leaf offset, (“Macintosh”) */
2625 0x64657669, /* "devi" */
2626 0x63657276, /* "cerv" */
2627 0x76000001, /* "v" R 1 */
2628 0x80000100, /* Immediate value, Device Revision */
2629 0x6e657477, /* "netw" */
2630 0x6f726b00, /* "ork" */
2631 0x44000014, /* "D" R 20 */
2632 0x00000021, /* Directory data offset, (Network Directory) */
2633 0x4170706c, /* "Appl" */
2634 0x6520496e, /* "e In" */
2635 0x632e0000, /* "c." ! */
2636 0x4d616369, /* "Maci" */
2637 0x6e746f73, /* "ntos" */
2638 0x68000000, /* "h" */
2639 0x00000000, /* padding */
2640 0xca8961c6, /* Directory UUID, Network Directory */
2641 0x9541ce1c, /* Directory UUID, Network Directory */
2642 0x5949b8bd, /* Directory UUID, Network Directory */
2643 0x4f5a5f2e, /* Directory UUID, Network Directory */
2644 0x70727463, /* "prtc" */
2645 0x69640000, /* "id" */
2646 0x76000001, /* "v" R 1 */
2647 0x00000001, /* Immediate value, Network Protocol ID */
2648 0x70727463, /* "prtc" */
2649 0x76657273, /* "vers" */
2650 0x76000001, /* "v" R 1 */
2651 0x00000001, /* Immediate value, Network Protocol Version */
2652 0x70727463, /* "prtc" */
2653 0x72657673, /* "revs" */
2654 0x76000001, /* "v" R 1 */
2655 0x00000001, /* Immediate value, Network Protocol Revision */
2656 0x70727463, /* "prtc" */
2657 0x73746e73, /* "stns" */
2658 0x76000001, /* "v" R 1 */
2659 0x00000000, /* Immediate value, Network Protocol Settings */
2660};
2661
2662static const uuid_t network_dir_uuid =
2663 UUID_INIT(0xc66189ca, 0x1cce, 0x4195,
2664 0xbd, 0xb8, 0x49, 0x59, 0x2e, 0x5f, 0x5a, 0x4f);
2665
2666static void tb_test_property_parse(struct kunit *test)
2667{
2668 struct tb_property_dir *dir, *network_dir;
2669 struct tb_property *p;
2670
2671 dir = tb_property_parse_dir(root_directory, ARRAY_SIZE(root_directory));
2672 KUNIT_ASSERT_NOT_NULL(test, dir);
2673
2674 p = tb_property_find(dir, "foo", TB_PROPERTY_TYPE_TEXT);
2675 KUNIT_ASSERT_NULL(test, p);
2676
2677 p = tb_property_find(dir, "vendorid", TB_PROPERTY_TYPE_TEXT);
2678 KUNIT_ASSERT_NOT_NULL(test, p);
2679 KUNIT_EXPECT_STREQ(test, p->value.text, "Apple Inc.");
2680
2681 p = tb_property_find(dir, "vendorid", TB_PROPERTY_TYPE_VALUE);
2682 KUNIT_ASSERT_NOT_NULL(test, p);
2683 KUNIT_EXPECT_EQ(test, p->value.immediate, 0xa27);
2684
2685 p = tb_property_find(dir, "deviceid", TB_PROPERTY_TYPE_TEXT);
2686 KUNIT_ASSERT_NOT_NULL(test, p);
2687 KUNIT_EXPECT_STREQ(test, p->value.text, "Macintosh");
2688
2689 p = tb_property_find(dir, "deviceid", TB_PROPERTY_TYPE_VALUE);
2690 KUNIT_ASSERT_NOT_NULL(test, p);
2691 KUNIT_EXPECT_EQ(test, p->value.immediate, 0xa);
2692
2693 p = tb_property_find(dir, "missing", TB_PROPERTY_TYPE_DIRECTORY);
2694 KUNIT_ASSERT_NULL(test, p);
2695
2696 p = tb_property_find(dir, "network", TB_PROPERTY_TYPE_DIRECTORY);
2697 KUNIT_ASSERT_NOT_NULL(test, p);
2698
2699 network_dir = p->value.dir;
2700 KUNIT_EXPECT_TRUE(test, uuid_equal(network_dir->uuid, &network_dir_uuid));
2701
2702 p = tb_property_find(network_dir, "prtcid", TB_PROPERTY_TYPE_VALUE);
2703 KUNIT_ASSERT_NOT_NULL(test, p);
2704 KUNIT_EXPECT_EQ(test, p->value.immediate, 0x1);
2705
2706 p = tb_property_find(network_dir, "prtcvers", TB_PROPERTY_TYPE_VALUE);
2707 KUNIT_ASSERT_NOT_NULL(test, p);
2708 KUNIT_EXPECT_EQ(test, p->value.immediate, 0x1);
2709
2710 p = tb_property_find(network_dir, "prtcrevs", TB_PROPERTY_TYPE_VALUE);
2711 KUNIT_ASSERT_NOT_NULL(test, p);
2712 KUNIT_EXPECT_EQ(test, p->value.immediate, 0x1);
2713
2714 p = tb_property_find(network_dir, "prtcstns", TB_PROPERTY_TYPE_VALUE);
2715 KUNIT_ASSERT_NOT_NULL(test, p);
2716 KUNIT_EXPECT_EQ(test, p->value.immediate, 0x0);
2717
2718 p = tb_property_find(network_dir, "deviceid", TB_PROPERTY_TYPE_VALUE);
2719 KUNIT_EXPECT_TRUE(test, !p);
2720 p = tb_property_find(network_dir, "deviceid", TB_PROPERTY_TYPE_TEXT);
2721 KUNIT_EXPECT_TRUE(test, !p);
2722
2723 tb_property_free_dir(dir);
2724}
2725
2726static void tb_test_property_format(struct kunit *test)
2727{
2728 struct tb_property_dir *dir;
2729 ssize_t block_len;
2730 u32 *block;
2731 int ret, i;
2732
2733 dir = tb_property_parse_dir(root_directory, ARRAY_SIZE(root_directory));
2734 KUNIT_ASSERT_NOT_NULL(test, dir);
2735
2736 ret = tb_property_format_dir(dir, NULL, 0);
2737 KUNIT_ASSERT_EQ(test, ret, ARRAY_SIZE(root_directory));
2738
2739 block_len = ret;
2740
2741 block = kunit_kzalloc(test, block_len * sizeof(u32), GFP_KERNEL);
2742 KUNIT_ASSERT_NOT_NULL(test, block);
2743
2744 ret = tb_property_format_dir(dir, block, block_len);
2745 KUNIT_EXPECT_EQ(test, ret, 0);
2746
2747 for (i = 0; i < ARRAY_SIZE(root_directory); i++)
2748 KUNIT_EXPECT_EQ(test, root_directory[i], block[i]);
2749
2750 tb_property_free_dir(dir);
2751}
2752
2753static void compare_dirs(struct kunit *test, struct tb_property_dir *d1,
2754 struct tb_property_dir *d2)
2755{
2756 struct tb_property *p1, *p2, *tmp;
2757 int n1, n2, i;
2758
2759 if (d1->uuid) {
2760 KUNIT_ASSERT_NOT_NULL(test, d2->uuid);
2761 KUNIT_ASSERT_TRUE(test, uuid_equal(d1->uuid, d2->uuid));
2762 } else {
2763 KUNIT_ASSERT_NULL(test, d2->uuid);
2764 }
2765
2766 n1 = 0;
2767 tb_property_for_each(d1, tmp)
2768 n1++;
2769 KUNIT_ASSERT_NE(test, n1, 0);
2770
2771 n2 = 0;
2772 tb_property_for_each(d2, tmp)
2773 n2++;
2774 KUNIT_ASSERT_NE(test, n2, 0);
2775
2776 KUNIT_ASSERT_EQ(test, n1, n2);
2777
2778 p1 = NULL;
2779 p2 = NULL;
2780 for (i = 0; i < n1; i++) {
2781 p1 = tb_property_get_next(d1, p1);
2782 KUNIT_ASSERT_NOT_NULL(test, p1);
2783 p2 = tb_property_get_next(d2, p2);
2784 KUNIT_ASSERT_NOT_NULL(test, p2);
2785
2786 KUNIT_ASSERT_STREQ(test, &p1->key[0], &p2->key[0]);
2787 KUNIT_ASSERT_EQ(test, p1->type, p2->type);
2788 KUNIT_ASSERT_EQ(test, p1->length, p2->length);
2789
2790 switch (p1->type) {
2791 case TB_PROPERTY_TYPE_DIRECTORY:
2792 KUNIT_ASSERT_NOT_NULL(test, p1->value.dir);
2793 KUNIT_ASSERT_NOT_NULL(test, p2->value.dir);
2794 compare_dirs(test, p1->value.dir, p2->value.dir);
2795 break;
2796
2797 case TB_PROPERTY_TYPE_DATA:
2798 KUNIT_ASSERT_NOT_NULL(test, p1->value.data);
2799 KUNIT_ASSERT_NOT_NULL(test, p2->value.data);
2800 KUNIT_ASSERT_TRUE(test,
2801 !memcmp(p1->value.data, p2->value.data,
2802 p1->length * 4)
2803 );
2804 break;
2805
2806 case TB_PROPERTY_TYPE_TEXT:
2807 KUNIT_ASSERT_NOT_NULL(test, p1->value.text);
2808 KUNIT_ASSERT_NOT_NULL(test, p2->value.text);
2809 KUNIT_ASSERT_STREQ(test, p1->value.text, p2->value.text);
2810 break;
2811
2812 case TB_PROPERTY_TYPE_VALUE:
2813 KUNIT_ASSERT_EQ(test, p1->value.immediate,
2814 p2->value.immediate);
2815 break;
2816 default:
2817 KUNIT_FAIL(test, "unexpected property type");
2818 break;
2819 }
2820 }
2821}
2822
2823static void tb_test_property_copy(struct kunit *test)
2824{
2825 struct tb_property_dir *src, *dst;
2826 u32 *block;
2827 int ret, i;
2828
2829 src = tb_property_parse_dir(root_directory, ARRAY_SIZE(root_directory));
2830 KUNIT_ASSERT_NOT_NULL(test, src);
2831
2832 dst = tb_property_copy_dir(src);
2833 KUNIT_ASSERT_NOT_NULL(test, dst);
2834
2835 /* Compare the structures */
2836 compare_dirs(test, src, dst);
2837
2838 /* Compare the resulting property block */
2839 ret = tb_property_format_dir(dst, NULL, 0);
2840 KUNIT_ASSERT_EQ(test, ret, ARRAY_SIZE(root_directory));
2841
2842 block = kunit_kzalloc(test, sizeof(root_directory), GFP_KERNEL);
2843 KUNIT_ASSERT_NOT_NULL(test, block);
2844
2845 ret = tb_property_format_dir(dst, block, ARRAY_SIZE(root_directory));
2846 KUNIT_EXPECT_TRUE(test, !ret);
2847
2848 for (i = 0; i < ARRAY_SIZE(root_directory); i++)
2849 KUNIT_EXPECT_EQ(test, root_directory[i], block[i]);
2850
2851 tb_property_free_dir(dst);
2852 tb_property_free_dir(src);
2853}
2854
2855static struct kunit_case tb_test_cases[] = {
2856 KUNIT_CASE(tb_test_path_basic),
2857 KUNIT_CASE(tb_test_path_not_connected_walk),
2858 KUNIT_CASE(tb_test_path_single_hop_walk),
2859 KUNIT_CASE(tb_test_path_daisy_chain_walk),
2860 KUNIT_CASE(tb_test_path_simple_tree_walk),
2861 KUNIT_CASE(tb_test_path_complex_tree_walk),
2862 KUNIT_CASE(tb_test_path_max_length_walk),
2863 KUNIT_CASE(tb_test_path_not_connected),
2864 KUNIT_CASE(tb_test_path_not_bonded_lane0),
2865 KUNIT_CASE(tb_test_path_not_bonded_lane1),
2866 KUNIT_CASE(tb_test_path_not_bonded_lane1_chain),
2867 KUNIT_CASE(tb_test_path_not_bonded_lane1_chain_reverse),
2868 KUNIT_CASE(tb_test_path_mixed_chain),
2869 KUNIT_CASE(tb_test_path_mixed_chain_reverse),
2870 KUNIT_CASE(tb_test_tunnel_pcie),
2871 KUNIT_CASE(tb_test_tunnel_dp),
2872 KUNIT_CASE(tb_test_tunnel_dp_chain),
2873 KUNIT_CASE(tb_test_tunnel_dp_tree),
2874 KUNIT_CASE(tb_test_tunnel_dp_max_length),
2875 KUNIT_CASE(tb_test_tunnel_3dp),
2876 KUNIT_CASE(tb_test_tunnel_port_on_path),
2877 KUNIT_CASE(tb_test_tunnel_usb3),
2878 KUNIT_CASE(tb_test_tunnel_dma),
2879 KUNIT_CASE(tb_test_tunnel_dma_rx),
2880 KUNIT_CASE(tb_test_tunnel_dma_tx),
2881 KUNIT_CASE(tb_test_tunnel_dma_chain),
2882 KUNIT_CASE(tb_test_tunnel_dma_match),
2883 KUNIT_CASE(tb_test_credit_alloc_legacy_not_bonded),
2884 KUNIT_CASE(tb_test_credit_alloc_legacy_bonded),
2885 KUNIT_CASE(tb_test_credit_alloc_pcie),
2886 KUNIT_CASE(tb_test_credit_alloc_without_dp),
2887 KUNIT_CASE(tb_test_credit_alloc_dp),
2888 KUNIT_CASE(tb_test_credit_alloc_usb3),
2889 KUNIT_CASE(tb_test_credit_alloc_dma),
2890 KUNIT_CASE(tb_test_credit_alloc_dma_multiple),
2891 KUNIT_CASE(tb_test_credit_alloc_all),
2892 KUNIT_CASE(tb_test_property_parse),
2893 KUNIT_CASE(tb_test_property_format),
2894 KUNIT_CASE(tb_test_property_copy),
2895 { }
2896};
2897
2898static struct kunit_suite tb_test_suite = {
2899 .name = "thunderbolt",
2900 .test_cases = tb_test_cases,
2901};
2902
2903kunit_test_suite(tb_test_suite);
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * KUnit tests
4 *
5 * Copyright (C) 2020, Intel Corporation
6 * Author: Mika Westerberg <mika.westerberg@linux.intel.com>
7 */
8
9#include <kunit/test.h>
10#include <linux/idr.h>
11
12#include "tb.h"
13#include "tunnel.h"
14
15static int __ida_init(struct kunit_resource *res, void *context)
16{
17 struct ida *ida = context;
18
19 ida_init(ida);
20 res->data = ida;
21 return 0;
22}
23
24static void __ida_destroy(struct kunit_resource *res)
25{
26 struct ida *ida = res->data;
27
28 ida_destroy(ida);
29}
30
31static void kunit_ida_init(struct kunit *test, struct ida *ida)
32{
33 kunit_alloc_resource(test, __ida_init, __ida_destroy, GFP_KERNEL, ida);
34}
35
36static struct tb_switch *alloc_switch(struct kunit *test, u64 route,
37 u8 upstream_port, u8 max_port_number)
38{
39 struct tb_switch *sw;
40 size_t size;
41 int i;
42
43 sw = kunit_kzalloc(test, sizeof(*sw), GFP_KERNEL);
44 if (!sw)
45 return NULL;
46
47 sw->config.upstream_port_number = upstream_port;
48 sw->config.depth = tb_route_length(route);
49 sw->config.route_hi = upper_32_bits(route);
50 sw->config.route_lo = lower_32_bits(route);
51 sw->config.enabled = 0;
52 sw->config.max_port_number = max_port_number;
53
54 size = (sw->config.max_port_number + 1) * sizeof(*sw->ports);
55 sw->ports = kunit_kzalloc(test, size, GFP_KERNEL);
56 if (!sw->ports)
57 return NULL;
58
59 for (i = 0; i <= sw->config.max_port_number; i++) {
60 sw->ports[i].sw = sw;
61 sw->ports[i].port = i;
62 sw->ports[i].config.port_number = i;
63 if (i) {
64 kunit_ida_init(test, &sw->ports[i].in_hopids);
65 kunit_ida_init(test, &sw->ports[i].out_hopids);
66 }
67 }
68
69 return sw;
70}
71
72static struct tb_switch *alloc_host(struct kunit *test)
73{
74 struct tb_switch *sw;
75
76 sw = alloc_switch(test, 0, 7, 13);
77 if (!sw)
78 return NULL;
79
80 sw->config.vendor_id = 0x8086;
81 sw->config.device_id = 0x9a1b;
82
83 sw->ports[0].config.type = TB_TYPE_PORT;
84 sw->ports[0].config.max_in_hop_id = 7;
85 sw->ports[0].config.max_out_hop_id = 7;
86
87 sw->ports[1].config.type = TB_TYPE_PORT;
88 sw->ports[1].config.max_in_hop_id = 19;
89 sw->ports[1].config.max_out_hop_id = 19;
90 sw->ports[1].total_credits = 60;
91 sw->ports[1].ctl_credits = 2;
92 sw->ports[1].dual_link_port = &sw->ports[2];
93
94 sw->ports[2].config.type = TB_TYPE_PORT;
95 sw->ports[2].config.max_in_hop_id = 19;
96 sw->ports[2].config.max_out_hop_id = 19;
97 sw->ports[2].total_credits = 60;
98 sw->ports[2].ctl_credits = 2;
99 sw->ports[2].dual_link_port = &sw->ports[1];
100 sw->ports[2].link_nr = 1;
101
102 sw->ports[3].config.type = TB_TYPE_PORT;
103 sw->ports[3].config.max_in_hop_id = 19;
104 sw->ports[3].config.max_out_hop_id = 19;
105 sw->ports[3].total_credits = 60;
106 sw->ports[3].ctl_credits = 2;
107 sw->ports[3].dual_link_port = &sw->ports[4];
108
109 sw->ports[4].config.type = TB_TYPE_PORT;
110 sw->ports[4].config.max_in_hop_id = 19;
111 sw->ports[4].config.max_out_hop_id = 19;
112 sw->ports[4].total_credits = 60;
113 sw->ports[4].ctl_credits = 2;
114 sw->ports[4].dual_link_port = &sw->ports[3];
115 sw->ports[4].link_nr = 1;
116
117 sw->ports[5].config.type = TB_TYPE_DP_HDMI_IN;
118 sw->ports[5].config.max_in_hop_id = 9;
119 sw->ports[5].config.max_out_hop_id = 9;
120 sw->ports[5].cap_adap = -1;
121
122 sw->ports[6].config.type = TB_TYPE_DP_HDMI_IN;
123 sw->ports[6].config.max_in_hop_id = 9;
124 sw->ports[6].config.max_out_hop_id = 9;
125 sw->ports[6].cap_adap = -1;
126
127 sw->ports[7].config.type = TB_TYPE_NHI;
128 sw->ports[7].config.max_in_hop_id = 11;
129 sw->ports[7].config.max_out_hop_id = 11;
130 sw->ports[7].config.nfc_credits = 0x41800000;
131
132 sw->ports[8].config.type = TB_TYPE_PCIE_DOWN;
133 sw->ports[8].config.max_in_hop_id = 8;
134 sw->ports[8].config.max_out_hop_id = 8;
135
136 sw->ports[9].config.type = TB_TYPE_PCIE_DOWN;
137 sw->ports[9].config.max_in_hop_id = 8;
138 sw->ports[9].config.max_out_hop_id = 8;
139
140 sw->ports[10].disabled = true;
141 sw->ports[11].disabled = true;
142
143 sw->ports[12].config.type = TB_TYPE_USB3_DOWN;
144 sw->ports[12].config.max_in_hop_id = 8;
145 sw->ports[12].config.max_out_hop_id = 8;
146
147 sw->ports[13].config.type = TB_TYPE_USB3_DOWN;
148 sw->ports[13].config.max_in_hop_id = 8;
149 sw->ports[13].config.max_out_hop_id = 8;
150
151 return sw;
152}
153
154static struct tb_switch *alloc_host_usb4(struct kunit *test)
155{
156 struct tb_switch *sw;
157
158 sw = alloc_host(test);
159 if (!sw)
160 return NULL;
161
162 sw->generation = 4;
163 sw->credit_allocation = true;
164 sw->max_usb3_credits = 32;
165 sw->min_dp_aux_credits = 1;
166 sw->min_dp_main_credits = 0;
167 sw->max_pcie_credits = 64;
168 sw->max_dma_credits = 14;
169
170 return sw;
171}
172
173static struct tb_switch *alloc_dev_default(struct kunit *test,
174 struct tb_switch *parent,
175 u64 route, bool bonded)
176{
177 struct tb_port *port, *upstream_port;
178 struct tb_switch *sw;
179
180 sw = alloc_switch(test, route, 1, 19);
181 if (!sw)
182 return NULL;
183
184 sw->config.vendor_id = 0x8086;
185 sw->config.device_id = 0x15ef;
186
187 sw->ports[0].config.type = TB_TYPE_PORT;
188 sw->ports[0].config.max_in_hop_id = 8;
189 sw->ports[0].config.max_out_hop_id = 8;
190
191 sw->ports[1].config.type = TB_TYPE_PORT;
192 sw->ports[1].config.max_in_hop_id = 19;
193 sw->ports[1].config.max_out_hop_id = 19;
194 sw->ports[1].total_credits = 60;
195 sw->ports[1].ctl_credits = 2;
196 sw->ports[1].dual_link_port = &sw->ports[2];
197
198 sw->ports[2].config.type = TB_TYPE_PORT;
199 sw->ports[2].config.max_in_hop_id = 19;
200 sw->ports[2].config.max_out_hop_id = 19;
201 sw->ports[2].total_credits = 60;
202 sw->ports[2].ctl_credits = 2;
203 sw->ports[2].dual_link_port = &sw->ports[1];
204 sw->ports[2].link_nr = 1;
205
206 sw->ports[3].config.type = TB_TYPE_PORT;
207 sw->ports[3].config.max_in_hop_id = 19;
208 sw->ports[3].config.max_out_hop_id = 19;
209 sw->ports[3].total_credits = 60;
210 sw->ports[3].ctl_credits = 2;
211 sw->ports[3].dual_link_port = &sw->ports[4];
212
213 sw->ports[4].config.type = TB_TYPE_PORT;
214 sw->ports[4].config.max_in_hop_id = 19;
215 sw->ports[4].config.max_out_hop_id = 19;
216 sw->ports[4].total_credits = 60;
217 sw->ports[4].ctl_credits = 2;
218 sw->ports[4].dual_link_port = &sw->ports[3];
219 sw->ports[4].link_nr = 1;
220
221 sw->ports[5].config.type = TB_TYPE_PORT;
222 sw->ports[5].config.max_in_hop_id = 19;
223 sw->ports[5].config.max_out_hop_id = 19;
224 sw->ports[5].total_credits = 60;
225 sw->ports[5].ctl_credits = 2;
226 sw->ports[5].dual_link_port = &sw->ports[6];
227
228 sw->ports[6].config.type = TB_TYPE_PORT;
229 sw->ports[6].config.max_in_hop_id = 19;
230 sw->ports[6].config.max_out_hop_id = 19;
231 sw->ports[6].total_credits = 60;
232 sw->ports[6].ctl_credits = 2;
233 sw->ports[6].dual_link_port = &sw->ports[5];
234 sw->ports[6].link_nr = 1;
235
236 sw->ports[7].config.type = TB_TYPE_PORT;
237 sw->ports[7].config.max_in_hop_id = 19;
238 sw->ports[7].config.max_out_hop_id = 19;
239 sw->ports[7].total_credits = 60;
240 sw->ports[7].ctl_credits = 2;
241 sw->ports[7].dual_link_port = &sw->ports[8];
242
243 sw->ports[8].config.type = TB_TYPE_PORT;
244 sw->ports[8].config.max_in_hop_id = 19;
245 sw->ports[8].config.max_out_hop_id = 19;
246 sw->ports[8].total_credits = 60;
247 sw->ports[8].ctl_credits = 2;
248 sw->ports[8].dual_link_port = &sw->ports[7];
249 sw->ports[8].link_nr = 1;
250
251 sw->ports[9].config.type = TB_TYPE_PCIE_UP;
252 sw->ports[9].config.max_in_hop_id = 8;
253 sw->ports[9].config.max_out_hop_id = 8;
254
255 sw->ports[10].config.type = TB_TYPE_PCIE_DOWN;
256 sw->ports[10].config.max_in_hop_id = 8;
257 sw->ports[10].config.max_out_hop_id = 8;
258
259 sw->ports[11].config.type = TB_TYPE_PCIE_DOWN;
260 sw->ports[11].config.max_in_hop_id = 8;
261 sw->ports[11].config.max_out_hop_id = 8;
262
263 sw->ports[12].config.type = TB_TYPE_PCIE_DOWN;
264 sw->ports[12].config.max_in_hop_id = 8;
265 sw->ports[12].config.max_out_hop_id = 8;
266
267 sw->ports[13].config.type = TB_TYPE_DP_HDMI_OUT;
268 sw->ports[13].config.max_in_hop_id = 9;
269 sw->ports[13].config.max_out_hop_id = 9;
270 sw->ports[13].cap_adap = -1;
271
272 sw->ports[14].config.type = TB_TYPE_DP_HDMI_OUT;
273 sw->ports[14].config.max_in_hop_id = 9;
274 sw->ports[14].config.max_out_hop_id = 9;
275 sw->ports[14].cap_adap = -1;
276
277 sw->ports[15].disabled = true;
278
279 sw->ports[16].config.type = TB_TYPE_USB3_UP;
280 sw->ports[16].config.max_in_hop_id = 8;
281 sw->ports[16].config.max_out_hop_id = 8;
282
283 sw->ports[17].config.type = TB_TYPE_USB3_DOWN;
284 sw->ports[17].config.max_in_hop_id = 8;
285 sw->ports[17].config.max_out_hop_id = 8;
286
287 sw->ports[18].config.type = TB_TYPE_USB3_DOWN;
288 sw->ports[18].config.max_in_hop_id = 8;
289 sw->ports[18].config.max_out_hop_id = 8;
290
291 sw->ports[19].config.type = TB_TYPE_USB3_DOWN;
292 sw->ports[19].config.max_in_hop_id = 8;
293 sw->ports[19].config.max_out_hop_id = 8;
294
295 if (!parent)
296 return sw;
297
298 /* Link them */
299 upstream_port = tb_upstream_port(sw);
300 port = tb_port_at(route, parent);
301 port->remote = upstream_port;
302 upstream_port->remote = port;
303 if (port->dual_link_port && upstream_port->dual_link_port) {
304 port->dual_link_port->remote = upstream_port->dual_link_port;
305 upstream_port->dual_link_port->remote = port->dual_link_port;
306
307 if (bonded) {
308 /* Bonding is used */
309 port->bonded = true;
310 port->total_credits *= 2;
311 port->dual_link_port->bonded = true;
312 port->dual_link_port->total_credits = 0;
313 upstream_port->bonded = true;
314 upstream_port->total_credits *= 2;
315 upstream_port->dual_link_port->bonded = true;
316 upstream_port->dual_link_port->total_credits = 0;
317 }
318 }
319
320 return sw;
321}
322
323static struct tb_switch *alloc_dev_with_dpin(struct kunit *test,
324 struct tb_switch *parent,
325 u64 route, bool bonded)
326{
327 struct tb_switch *sw;
328
329 sw = alloc_dev_default(test, parent, route, bonded);
330 if (!sw)
331 return NULL;
332
333 sw->ports[13].config.type = TB_TYPE_DP_HDMI_IN;
334 sw->ports[13].config.max_in_hop_id = 9;
335 sw->ports[13].config.max_out_hop_id = 9;
336
337 sw->ports[14].config.type = TB_TYPE_DP_HDMI_IN;
338 sw->ports[14].config.max_in_hop_id = 9;
339 sw->ports[14].config.max_out_hop_id = 9;
340
341 return sw;
342}
343
344static struct tb_switch *alloc_dev_without_dp(struct kunit *test,
345 struct tb_switch *parent,
346 u64 route, bool bonded)
347{
348 struct tb_switch *sw;
349 int i;
350
351 sw = alloc_dev_default(test, parent, route, bonded);
352 if (!sw)
353 return NULL;
354 /*
355 * Device with:
356 * 2x USB4 Adapters (adapters 1,2 and 3,4),
357 * 1x PCIe Upstream (adapter 9),
358 * 1x PCIe Downstream (adapter 10),
359 * 1x USB3 Upstream (adapter 16),
360 * 1x USB3 Downstream (adapter 17)
361 */
362 for (i = 5; i <= 8; i++)
363 sw->ports[i].disabled = true;
364
365 for (i = 11; i <= 14; i++)
366 sw->ports[i].disabled = true;
367
368 sw->ports[13].cap_adap = 0;
369 sw->ports[14].cap_adap = 0;
370
371 for (i = 18; i <= 19; i++)
372 sw->ports[i].disabled = true;
373
374 sw->generation = 4;
375 sw->credit_allocation = true;
376 sw->max_usb3_credits = 109;
377 sw->min_dp_aux_credits = 0;
378 sw->min_dp_main_credits = 0;
379 sw->max_pcie_credits = 30;
380 sw->max_dma_credits = 1;
381
382 return sw;
383}
384
385static struct tb_switch *alloc_dev_usb4(struct kunit *test,
386 struct tb_switch *parent,
387 u64 route, bool bonded)
388{
389 struct tb_switch *sw;
390
391 sw = alloc_dev_default(test, parent, route, bonded);
392 if (!sw)
393 return NULL;
394
395 sw->generation = 4;
396 sw->credit_allocation = true;
397 sw->max_usb3_credits = 14;
398 sw->min_dp_aux_credits = 1;
399 sw->min_dp_main_credits = 18;
400 sw->max_pcie_credits = 32;
401 sw->max_dma_credits = 14;
402
403 return sw;
404}
405
406static void tb_test_path_basic(struct kunit *test)
407{
408 struct tb_port *src_port, *dst_port, *p;
409 struct tb_switch *host;
410
411 host = alloc_host(test);
412
413 src_port = &host->ports[5];
414 dst_port = src_port;
415
416 p = tb_next_port_on_path(src_port, dst_port, NULL);
417 KUNIT_EXPECT_PTR_EQ(test, p, dst_port);
418
419 p = tb_next_port_on_path(src_port, dst_port, p);
420 KUNIT_EXPECT_TRUE(test, !p);
421}
422
423static void tb_test_path_not_connected_walk(struct kunit *test)
424{
425 struct tb_port *src_port, *dst_port, *p;
426 struct tb_switch *host, *dev;
427
428 host = alloc_host(test);
429 /* No connection between host and dev */
430 dev = alloc_dev_default(test, NULL, 3, true);
431
432 src_port = &host->ports[12];
433 dst_port = &dev->ports[16];
434
435 p = tb_next_port_on_path(src_port, dst_port, NULL);
436 KUNIT_EXPECT_PTR_EQ(test, p, src_port);
437
438 p = tb_next_port_on_path(src_port, dst_port, p);
439 KUNIT_EXPECT_PTR_EQ(test, p, &host->ports[3]);
440
441 p = tb_next_port_on_path(src_port, dst_port, p);
442 KUNIT_EXPECT_TRUE(test, !p);
443
444 /* Other direction */
445
446 p = tb_next_port_on_path(dst_port, src_port, NULL);
447 KUNIT_EXPECT_PTR_EQ(test, p, dst_port);
448
449 p = tb_next_port_on_path(dst_port, src_port, p);
450 KUNIT_EXPECT_PTR_EQ(test, p, &dev->ports[1]);
451
452 p = tb_next_port_on_path(dst_port, src_port, p);
453 KUNIT_EXPECT_TRUE(test, !p);
454}
455
456struct port_expectation {
457 u64 route;
458 u8 port;
459 enum tb_port_type type;
460};
461
462static void tb_test_path_single_hop_walk(struct kunit *test)
463{
464 /*
465 * Walks from Host PCIe downstream port to Device #1 PCIe
466 * upstream port.
467 *
468 * [Host]
469 * 1 |
470 * 1 |
471 * [Device]
472 */
473 static const struct port_expectation test_data[] = {
474 { .route = 0x0, .port = 8, .type = TB_TYPE_PCIE_DOWN },
475 { .route = 0x0, .port = 1, .type = TB_TYPE_PORT },
476 { .route = 0x1, .port = 1, .type = TB_TYPE_PORT },
477 { .route = 0x1, .port = 9, .type = TB_TYPE_PCIE_UP },
478 };
479 struct tb_port *src_port, *dst_port, *p;
480 struct tb_switch *host, *dev;
481 int i;
482
483 host = alloc_host(test);
484 dev = alloc_dev_default(test, host, 1, true);
485
486 src_port = &host->ports[8];
487 dst_port = &dev->ports[9];
488
489 /* Walk both directions */
490
491 i = 0;
492 tb_for_each_port_on_path(src_port, dst_port, p) {
493 KUNIT_EXPECT_TRUE(test, i < ARRAY_SIZE(test_data));
494 KUNIT_EXPECT_EQ(test, tb_route(p->sw), test_data[i].route);
495 KUNIT_EXPECT_EQ(test, p->port, test_data[i].port);
496 KUNIT_EXPECT_EQ(test, (enum tb_port_type)p->config.type,
497 test_data[i].type);
498 i++;
499 }
500
501 KUNIT_EXPECT_EQ(test, i, ARRAY_SIZE(test_data));
502
503 i = ARRAY_SIZE(test_data) - 1;
504 tb_for_each_port_on_path(dst_port, src_port, p) {
505 KUNIT_EXPECT_TRUE(test, i < ARRAY_SIZE(test_data));
506 KUNIT_EXPECT_EQ(test, tb_route(p->sw), test_data[i].route);
507 KUNIT_EXPECT_EQ(test, p->port, test_data[i].port);
508 KUNIT_EXPECT_EQ(test, (enum tb_port_type)p->config.type,
509 test_data[i].type);
510 i--;
511 }
512
513 KUNIT_EXPECT_EQ(test, i, -1);
514}
515
516static void tb_test_path_daisy_chain_walk(struct kunit *test)
517{
518 /*
519 * Walks from Host DP IN to Device #2 DP OUT.
520 *
521 * [Host]
522 * 1 |
523 * 1 |
524 * [Device #1]
525 * 3 /
526 * 1 /
527 * [Device #2]
528 */
529 static const struct port_expectation test_data[] = {
530 { .route = 0x0, .port = 5, .type = TB_TYPE_DP_HDMI_IN },
531 { .route = 0x0, .port = 1, .type = TB_TYPE_PORT },
532 { .route = 0x1, .port = 1, .type = TB_TYPE_PORT },
533 { .route = 0x1, .port = 3, .type = TB_TYPE_PORT },
534 { .route = 0x301, .port = 1, .type = TB_TYPE_PORT },
535 { .route = 0x301, .port = 13, .type = TB_TYPE_DP_HDMI_OUT },
536 };
537 struct tb_port *src_port, *dst_port, *p;
538 struct tb_switch *host, *dev1, *dev2;
539 int i;
540
541 host = alloc_host(test);
542 dev1 = alloc_dev_default(test, host, 0x1, true);
543 dev2 = alloc_dev_default(test, dev1, 0x301, true);
544
545 src_port = &host->ports[5];
546 dst_port = &dev2->ports[13];
547
548 /* Walk both directions */
549
550 i = 0;
551 tb_for_each_port_on_path(src_port, dst_port, p) {
552 KUNIT_EXPECT_TRUE(test, i < ARRAY_SIZE(test_data));
553 KUNIT_EXPECT_EQ(test, tb_route(p->sw), test_data[i].route);
554 KUNIT_EXPECT_EQ(test, p->port, test_data[i].port);
555 KUNIT_EXPECT_EQ(test, (enum tb_port_type)p->config.type,
556 test_data[i].type);
557 i++;
558 }
559
560 KUNIT_EXPECT_EQ(test, i, ARRAY_SIZE(test_data));
561
562 i = ARRAY_SIZE(test_data) - 1;
563 tb_for_each_port_on_path(dst_port, src_port, p) {
564 KUNIT_EXPECT_TRUE(test, i < ARRAY_SIZE(test_data));
565 KUNIT_EXPECT_EQ(test, tb_route(p->sw), test_data[i].route);
566 KUNIT_EXPECT_EQ(test, p->port, test_data[i].port);
567 KUNIT_EXPECT_EQ(test, (enum tb_port_type)p->config.type,
568 test_data[i].type);
569 i--;
570 }
571
572 KUNIT_EXPECT_EQ(test, i, -1);
573}
574
575static void tb_test_path_simple_tree_walk(struct kunit *test)
576{
577 /*
578 * Walks from Host DP IN to Device #3 DP OUT.
579 *
580 * [Host]
581 * 1 |
582 * 1 |
583 * [Device #1]
584 * 3 / | 5 \ 7
585 * 1 / | \ 1
586 * [Device #2] | [Device #4]
587 * | 1
588 * [Device #3]
589 */
590 static const struct port_expectation test_data[] = {
591 { .route = 0x0, .port = 5, .type = TB_TYPE_DP_HDMI_IN },
592 { .route = 0x0, .port = 1, .type = TB_TYPE_PORT },
593 { .route = 0x1, .port = 1, .type = TB_TYPE_PORT },
594 { .route = 0x1, .port = 5, .type = TB_TYPE_PORT },
595 { .route = 0x501, .port = 1, .type = TB_TYPE_PORT },
596 { .route = 0x501, .port = 13, .type = TB_TYPE_DP_HDMI_OUT },
597 };
598 struct tb_port *src_port, *dst_port, *p;
599 struct tb_switch *host, *dev1, *dev3;
600 int i;
601
602 host = alloc_host(test);
603 dev1 = alloc_dev_default(test, host, 0x1, true);
604 alloc_dev_default(test, dev1, 0x301, true);
605 dev3 = alloc_dev_default(test, dev1, 0x501, true);
606 alloc_dev_default(test, dev1, 0x701, true);
607
608 src_port = &host->ports[5];
609 dst_port = &dev3->ports[13];
610
611 /* Walk both directions */
612
613 i = 0;
614 tb_for_each_port_on_path(src_port, dst_port, p) {
615 KUNIT_EXPECT_TRUE(test, i < ARRAY_SIZE(test_data));
616 KUNIT_EXPECT_EQ(test, tb_route(p->sw), test_data[i].route);
617 KUNIT_EXPECT_EQ(test, p->port, test_data[i].port);
618 KUNIT_EXPECT_EQ(test, (enum tb_port_type)p->config.type,
619 test_data[i].type);
620 i++;
621 }
622
623 KUNIT_EXPECT_EQ(test, i, ARRAY_SIZE(test_data));
624
625 i = ARRAY_SIZE(test_data) - 1;
626 tb_for_each_port_on_path(dst_port, src_port, p) {
627 KUNIT_EXPECT_TRUE(test, i < ARRAY_SIZE(test_data));
628 KUNIT_EXPECT_EQ(test, tb_route(p->sw), test_data[i].route);
629 KUNIT_EXPECT_EQ(test, p->port, test_data[i].port);
630 KUNIT_EXPECT_EQ(test, (enum tb_port_type)p->config.type,
631 test_data[i].type);
632 i--;
633 }
634
635 KUNIT_EXPECT_EQ(test, i, -1);
636}
637
638static void tb_test_path_complex_tree_walk(struct kunit *test)
639{
640 /*
641 * Walks from Device #3 DP IN to Device #9 DP OUT.
642 *
643 * [Host]
644 * 1 |
645 * 1 |
646 * [Device #1]
647 * 3 / | 5 \ 7
648 * 1 / | \ 1
649 * [Device #2] | [Device #5]
650 * 5 | | 1 \ 7
651 * 1 | [Device #4] \ 1
652 * [Device #3] [Device #6]
653 * 3 /
654 * 1 /
655 * [Device #7]
656 * 3 / | 5
657 * 1 / |
658 * [Device #8] | 1
659 * [Device #9]
660 */
661 static const struct port_expectation test_data[] = {
662 { .route = 0x50301, .port = 13, .type = TB_TYPE_DP_HDMI_IN },
663 { .route = 0x50301, .port = 1, .type = TB_TYPE_PORT },
664 { .route = 0x301, .port = 5, .type = TB_TYPE_PORT },
665 { .route = 0x301, .port = 1, .type = TB_TYPE_PORT },
666 { .route = 0x1, .port = 3, .type = TB_TYPE_PORT },
667 { .route = 0x1, .port = 7, .type = TB_TYPE_PORT },
668 { .route = 0x701, .port = 1, .type = TB_TYPE_PORT },
669 { .route = 0x701, .port = 7, .type = TB_TYPE_PORT },
670 { .route = 0x70701, .port = 1, .type = TB_TYPE_PORT },
671 { .route = 0x70701, .port = 3, .type = TB_TYPE_PORT },
672 { .route = 0x3070701, .port = 1, .type = TB_TYPE_PORT },
673 { .route = 0x3070701, .port = 5, .type = TB_TYPE_PORT },
674 { .route = 0x503070701, .port = 1, .type = TB_TYPE_PORT },
675 { .route = 0x503070701, .port = 14, .type = TB_TYPE_DP_HDMI_OUT },
676 };
677 struct tb_switch *host, *dev1, *dev2, *dev3, *dev5, *dev6, *dev7, *dev9;
678 struct tb_port *src_port, *dst_port, *p;
679 int i;
680
681 host = alloc_host(test);
682 dev1 = alloc_dev_default(test, host, 0x1, true);
683 dev2 = alloc_dev_default(test, dev1, 0x301, true);
684 dev3 = alloc_dev_with_dpin(test, dev2, 0x50301, true);
685 alloc_dev_default(test, dev1, 0x501, true);
686 dev5 = alloc_dev_default(test, dev1, 0x701, true);
687 dev6 = alloc_dev_default(test, dev5, 0x70701, true);
688 dev7 = alloc_dev_default(test, dev6, 0x3070701, true);
689 alloc_dev_default(test, dev7, 0x303070701, true);
690 dev9 = alloc_dev_default(test, dev7, 0x503070701, true);
691
692 src_port = &dev3->ports[13];
693 dst_port = &dev9->ports[14];
694
695 /* Walk both directions */
696
697 i = 0;
698 tb_for_each_port_on_path(src_port, dst_port, p) {
699 KUNIT_EXPECT_TRUE(test, i < ARRAY_SIZE(test_data));
700 KUNIT_EXPECT_EQ(test, tb_route(p->sw), test_data[i].route);
701 KUNIT_EXPECT_EQ(test, p->port, test_data[i].port);
702 KUNIT_EXPECT_EQ(test, (enum tb_port_type)p->config.type,
703 test_data[i].type);
704 i++;
705 }
706
707 KUNIT_EXPECT_EQ(test, i, ARRAY_SIZE(test_data));
708
709 i = ARRAY_SIZE(test_data) - 1;
710 tb_for_each_port_on_path(dst_port, src_port, p) {
711 KUNIT_EXPECT_TRUE(test, i < ARRAY_SIZE(test_data));
712 KUNIT_EXPECT_EQ(test, tb_route(p->sw), test_data[i].route);
713 KUNIT_EXPECT_EQ(test, p->port, test_data[i].port);
714 KUNIT_EXPECT_EQ(test, (enum tb_port_type)p->config.type,
715 test_data[i].type);
716 i--;
717 }
718
719 KUNIT_EXPECT_EQ(test, i, -1);
720}
721
722static void tb_test_path_max_length_walk(struct kunit *test)
723{
724 struct tb_switch *host, *dev1, *dev2, *dev3, *dev4, *dev5, *dev6;
725 struct tb_switch *dev7, *dev8, *dev9, *dev10, *dev11, *dev12;
726 struct tb_port *src_port, *dst_port, *p;
727 int i;
728
729 /*
730 * Walks from Device #6 DP IN to Device #12 DP OUT.
731 *
732 * [Host]
733 * 1 / \ 3
734 * 1 / \ 1
735 * [Device #1] [Device #7]
736 * 3 | | 3
737 * 1 | | 1
738 * [Device #2] [Device #8]
739 * 3 | | 3
740 * 1 | | 1
741 * [Device #3] [Device #9]
742 * 3 | | 3
743 * 1 | | 1
744 * [Device #4] [Device #10]
745 * 3 | | 3
746 * 1 | | 1
747 * [Device #5] [Device #11]
748 * 3 | | 3
749 * 1 | | 1
750 * [Device #6] [Device #12]
751 */
752 static const struct port_expectation test_data[] = {
753 { .route = 0x30303030301, .port = 13, .type = TB_TYPE_DP_HDMI_IN },
754 { .route = 0x30303030301, .port = 1, .type = TB_TYPE_PORT },
755 { .route = 0x303030301, .port = 3, .type = TB_TYPE_PORT },
756 { .route = 0x303030301, .port = 1, .type = TB_TYPE_PORT },
757 { .route = 0x3030301, .port = 3, .type = TB_TYPE_PORT },
758 { .route = 0x3030301, .port = 1, .type = TB_TYPE_PORT },
759 { .route = 0x30301, .port = 3, .type = TB_TYPE_PORT },
760 { .route = 0x30301, .port = 1, .type = TB_TYPE_PORT },
761 { .route = 0x301, .port = 3, .type = TB_TYPE_PORT },
762 { .route = 0x301, .port = 1, .type = TB_TYPE_PORT },
763 { .route = 0x1, .port = 3, .type = TB_TYPE_PORT },
764 { .route = 0x1, .port = 1, .type = TB_TYPE_PORT },
765 { .route = 0x0, .port = 1, .type = TB_TYPE_PORT },
766 { .route = 0x0, .port = 3, .type = TB_TYPE_PORT },
767 { .route = 0x3, .port = 1, .type = TB_TYPE_PORT },
768 { .route = 0x3, .port = 3, .type = TB_TYPE_PORT },
769 { .route = 0x303, .port = 1, .type = TB_TYPE_PORT },
770 { .route = 0x303, .port = 3, .type = TB_TYPE_PORT },
771 { .route = 0x30303, .port = 1, .type = TB_TYPE_PORT },
772 { .route = 0x30303, .port = 3, .type = TB_TYPE_PORT },
773 { .route = 0x3030303, .port = 1, .type = TB_TYPE_PORT },
774 { .route = 0x3030303, .port = 3, .type = TB_TYPE_PORT },
775 { .route = 0x303030303, .port = 1, .type = TB_TYPE_PORT },
776 { .route = 0x303030303, .port = 3, .type = TB_TYPE_PORT },
777 { .route = 0x30303030303, .port = 1, .type = TB_TYPE_PORT },
778 { .route = 0x30303030303, .port = 13, .type = TB_TYPE_DP_HDMI_OUT },
779 };
780
781 host = alloc_host(test);
782 dev1 = alloc_dev_default(test, host, 0x1, true);
783 dev2 = alloc_dev_default(test, dev1, 0x301, true);
784 dev3 = alloc_dev_default(test, dev2, 0x30301, true);
785 dev4 = alloc_dev_default(test, dev3, 0x3030301, true);
786 dev5 = alloc_dev_default(test, dev4, 0x303030301, true);
787 dev6 = alloc_dev_with_dpin(test, dev5, 0x30303030301, true);
788 dev7 = alloc_dev_default(test, host, 0x3, true);
789 dev8 = alloc_dev_default(test, dev7, 0x303, true);
790 dev9 = alloc_dev_default(test, dev8, 0x30303, true);
791 dev10 = alloc_dev_default(test, dev9, 0x3030303, true);
792 dev11 = alloc_dev_default(test, dev10, 0x303030303, true);
793 dev12 = alloc_dev_default(test, dev11, 0x30303030303, true);
794
795 src_port = &dev6->ports[13];
796 dst_port = &dev12->ports[13];
797
798 /* Walk both directions */
799
800 i = 0;
801 tb_for_each_port_on_path(src_port, dst_port, p) {
802 KUNIT_EXPECT_TRUE(test, i < ARRAY_SIZE(test_data));
803 KUNIT_EXPECT_EQ(test, tb_route(p->sw), test_data[i].route);
804 KUNIT_EXPECT_EQ(test, p->port, test_data[i].port);
805 KUNIT_EXPECT_EQ(test, (enum tb_port_type)p->config.type,
806 test_data[i].type);
807 i++;
808 }
809
810 KUNIT_EXPECT_EQ(test, i, ARRAY_SIZE(test_data));
811
812 i = ARRAY_SIZE(test_data) - 1;
813 tb_for_each_port_on_path(dst_port, src_port, p) {
814 KUNIT_EXPECT_TRUE(test, i < ARRAY_SIZE(test_data));
815 KUNIT_EXPECT_EQ(test, tb_route(p->sw), test_data[i].route);
816 KUNIT_EXPECT_EQ(test, p->port, test_data[i].port);
817 KUNIT_EXPECT_EQ(test, (enum tb_port_type)p->config.type,
818 test_data[i].type);
819 i--;
820 }
821
822 KUNIT_EXPECT_EQ(test, i, -1);
823}
824
825static void tb_test_path_not_connected(struct kunit *test)
826{
827 struct tb_switch *host, *dev1, *dev2;
828 struct tb_port *down, *up;
829 struct tb_path *path;
830
831 host = alloc_host(test);
832 dev1 = alloc_dev_default(test, host, 0x3, false);
833 /* Not connected to anything */
834 dev2 = alloc_dev_default(test, NULL, 0x303, false);
835
836 down = &dev1->ports[10];
837 up = &dev2->ports[9];
838
839 path = tb_path_alloc(NULL, down, 8, up, 8, 0, "PCIe Down");
840 KUNIT_ASSERT_NULL(test, path);
841 path = tb_path_alloc(NULL, down, 8, up, 8, 1, "PCIe Down");
842 KUNIT_ASSERT_NULL(test, path);
843}
844
845struct hop_expectation {
846 u64 route;
847 u8 in_port;
848 enum tb_port_type in_type;
849 u8 out_port;
850 enum tb_port_type out_type;
851};
852
853static void tb_test_path_not_bonded_lane0(struct kunit *test)
854{
855 /*
856 * PCIe path from host to device using lane 0.
857 *
858 * [Host]
859 * 3 |: 4
860 * 1 |: 2
861 * [Device]
862 */
863 static const struct hop_expectation test_data[] = {
864 {
865 .route = 0x0,
866 .in_port = 9,
867 .in_type = TB_TYPE_PCIE_DOWN,
868 .out_port = 3,
869 .out_type = TB_TYPE_PORT,
870 },
871 {
872 .route = 0x3,
873 .in_port = 1,
874 .in_type = TB_TYPE_PORT,
875 .out_port = 9,
876 .out_type = TB_TYPE_PCIE_UP,
877 },
878 };
879 struct tb_switch *host, *dev;
880 struct tb_port *down, *up;
881 struct tb_path *path;
882 int i;
883
884 host = alloc_host(test);
885 dev = alloc_dev_default(test, host, 0x3, false);
886
887 down = &host->ports[9];
888 up = &dev->ports[9];
889
890 path = tb_path_alloc(NULL, down, 8, up, 8, 0, "PCIe Down");
891 KUNIT_ASSERT_NOT_NULL(test, path);
892 KUNIT_ASSERT_EQ(test, path->path_length, ARRAY_SIZE(test_data));
893 for (i = 0; i < ARRAY_SIZE(test_data); i++) {
894 const struct tb_port *in_port, *out_port;
895
896 in_port = path->hops[i].in_port;
897 out_port = path->hops[i].out_port;
898
899 KUNIT_EXPECT_EQ(test, tb_route(in_port->sw), test_data[i].route);
900 KUNIT_EXPECT_EQ(test, in_port->port, test_data[i].in_port);
901 KUNIT_EXPECT_EQ(test, (enum tb_port_type)in_port->config.type,
902 test_data[i].in_type);
903 KUNIT_EXPECT_EQ(test, tb_route(out_port->sw), test_data[i].route);
904 KUNIT_EXPECT_EQ(test, out_port->port, test_data[i].out_port);
905 KUNIT_EXPECT_EQ(test, (enum tb_port_type)out_port->config.type,
906 test_data[i].out_type);
907 }
908 tb_path_free(path);
909}
910
911static void tb_test_path_not_bonded_lane1(struct kunit *test)
912{
913 /*
914 * DP Video path from host to device using lane 1. Paths like
915 * these are only used with Thunderbolt 1 devices where lane
916 * bonding is not possible. USB4 specifically does not allow
917 * paths like this (you either use lane 0 where lane 1 is
918 * disabled or both lanes are bonded).
919 *
920 * [Host]
921 * 1 :| 2
922 * 1 :| 2
923 * [Device]
924 */
925 static const struct hop_expectation test_data[] = {
926 {
927 .route = 0x0,
928 .in_port = 5,
929 .in_type = TB_TYPE_DP_HDMI_IN,
930 .out_port = 2,
931 .out_type = TB_TYPE_PORT,
932 },
933 {
934 .route = 0x1,
935 .in_port = 2,
936 .in_type = TB_TYPE_PORT,
937 .out_port = 13,
938 .out_type = TB_TYPE_DP_HDMI_OUT,
939 },
940 };
941 struct tb_switch *host, *dev;
942 struct tb_port *in, *out;
943 struct tb_path *path;
944 int i;
945
946 host = alloc_host(test);
947 dev = alloc_dev_default(test, host, 0x1, false);
948
949 in = &host->ports[5];
950 out = &dev->ports[13];
951
952 path = tb_path_alloc(NULL, in, 9, out, 9, 1, "Video");
953 KUNIT_ASSERT_NOT_NULL(test, path);
954 KUNIT_ASSERT_EQ(test, path->path_length, ARRAY_SIZE(test_data));
955 for (i = 0; i < ARRAY_SIZE(test_data); i++) {
956 const struct tb_port *in_port, *out_port;
957
958 in_port = path->hops[i].in_port;
959 out_port = path->hops[i].out_port;
960
961 KUNIT_EXPECT_EQ(test, tb_route(in_port->sw), test_data[i].route);
962 KUNIT_EXPECT_EQ(test, in_port->port, test_data[i].in_port);
963 KUNIT_EXPECT_EQ(test, (enum tb_port_type)in_port->config.type,
964 test_data[i].in_type);
965 KUNIT_EXPECT_EQ(test, tb_route(out_port->sw), test_data[i].route);
966 KUNIT_EXPECT_EQ(test, out_port->port, test_data[i].out_port);
967 KUNIT_EXPECT_EQ(test, (enum tb_port_type)out_port->config.type,
968 test_data[i].out_type);
969 }
970 tb_path_free(path);
971}
972
973static void tb_test_path_not_bonded_lane1_chain(struct kunit *test)
974{
975 /*
976 * DP Video path from host to device 3 using lane 1.
977 *
978 * [Host]
979 * 1 :| 2
980 * 1 :| 2
981 * [Device #1]
982 * 7 :| 8
983 * 1 :| 2
984 * [Device #2]
985 * 5 :| 6
986 * 1 :| 2
987 * [Device #3]
988 */
989 static const struct hop_expectation test_data[] = {
990 {
991 .route = 0x0,
992 .in_port = 5,
993 .in_type = TB_TYPE_DP_HDMI_IN,
994 .out_port = 2,
995 .out_type = TB_TYPE_PORT,
996 },
997 {
998 .route = 0x1,
999 .in_port = 2,
1000 .in_type = TB_TYPE_PORT,
1001 .out_port = 8,
1002 .out_type = TB_TYPE_PORT,
1003 },
1004 {
1005 .route = 0x701,
1006 .in_port = 2,
1007 .in_type = TB_TYPE_PORT,
1008 .out_port = 6,
1009 .out_type = TB_TYPE_PORT,
1010 },
1011 {
1012 .route = 0x50701,
1013 .in_port = 2,
1014 .in_type = TB_TYPE_PORT,
1015 .out_port = 13,
1016 .out_type = TB_TYPE_DP_HDMI_OUT,
1017 },
1018 };
1019 struct tb_switch *host, *dev1, *dev2, *dev3;
1020 struct tb_port *in, *out;
1021 struct tb_path *path;
1022 int i;
1023
1024 host = alloc_host(test);
1025 dev1 = alloc_dev_default(test, host, 0x1, false);
1026 dev2 = alloc_dev_default(test, dev1, 0x701, false);
1027 dev3 = alloc_dev_default(test, dev2, 0x50701, false);
1028
1029 in = &host->ports[5];
1030 out = &dev3->ports[13];
1031
1032 path = tb_path_alloc(NULL, in, 9, out, 9, 1, "Video");
1033 KUNIT_ASSERT_NOT_NULL(test, path);
1034 KUNIT_ASSERT_EQ(test, path->path_length, ARRAY_SIZE(test_data));
1035 for (i = 0; i < ARRAY_SIZE(test_data); i++) {
1036 const struct tb_port *in_port, *out_port;
1037
1038 in_port = path->hops[i].in_port;
1039 out_port = path->hops[i].out_port;
1040
1041 KUNIT_EXPECT_EQ(test, tb_route(in_port->sw), test_data[i].route);
1042 KUNIT_EXPECT_EQ(test, in_port->port, test_data[i].in_port);
1043 KUNIT_EXPECT_EQ(test, (enum tb_port_type)in_port->config.type,
1044 test_data[i].in_type);
1045 KUNIT_EXPECT_EQ(test, tb_route(out_port->sw), test_data[i].route);
1046 KUNIT_EXPECT_EQ(test, out_port->port, test_data[i].out_port);
1047 KUNIT_EXPECT_EQ(test, (enum tb_port_type)out_port->config.type,
1048 test_data[i].out_type);
1049 }
1050 tb_path_free(path);
1051}
1052
1053static void tb_test_path_not_bonded_lane1_chain_reverse(struct kunit *test)
1054{
1055 /*
1056 * DP Video path from device 3 to host using lane 1.
1057 *
1058 * [Host]
1059 * 1 :| 2
1060 * 1 :| 2
1061 * [Device #1]
1062 * 7 :| 8
1063 * 1 :| 2
1064 * [Device #2]
1065 * 5 :| 6
1066 * 1 :| 2
1067 * [Device #3]
1068 */
1069 static const struct hop_expectation test_data[] = {
1070 {
1071 .route = 0x50701,
1072 .in_port = 13,
1073 .in_type = TB_TYPE_DP_HDMI_IN,
1074 .out_port = 2,
1075 .out_type = TB_TYPE_PORT,
1076 },
1077 {
1078 .route = 0x701,
1079 .in_port = 6,
1080 .in_type = TB_TYPE_PORT,
1081 .out_port = 2,
1082 .out_type = TB_TYPE_PORT,
1083 },
1084 {
1085 .route = 0x1,
1086 .in_port = 8,
1087 .in_type = TB_TYPE_PORT,
1088 .out_port = 2,
1089 .out_type = TB_TYPE_PORT,
1090 },
1091 {
1092 .route = 0x0,
1093 .in_port = 2,
1094 .in_type = TB_TYPE_PORT,
1095 .out_port = 5,
1096 .out_type = TB_TYPE_DP_HDMI_IN,
1097 },
1098 };
1099 struct tb_switch *host, *dev1, *dev2, *dev3;
1100 struct tb_port *in, *out;
1101 struct tb_path *path;
1102 int i;
1103
1104 host = alloc_host(test);
1105 dev1 = alloc_dev_default(test, host, 0x1, false);
1106 dev2 = alloc_dev_default(test, dev1, 0x701, false);
1107 dev3 = alloc_dev_with_dpin(test, dev2, 0x50701, false);
1108
1109 in = &dev3->ports[13];
1110 out = &host->ports[5];
1111
1112 path = tb_path_alloc(NULL, in, 9, out, 9, 1, "Video");
1113 KUNIT_ASSERT_NOT_NULL(test, path);
1114 KUNIT_ASSERT_EQ(test, path->path_length, ARRAY_SIZE(test_data));
1115 for (i = 0; i < ARRAY_SIZE(test_data); i++) {
1116 const struct tb_port *in_port, *out_port;
1117
1118 in_port = path->hops[i].in_port;
1119 out_port = path->hops[i].out_port;
1120
1121 KUNIT_EXPECT_EQ(test, tb_route(in_port->sw), test_data[i].route);
1122 KUNIT_EXPECT_EQ(test, in_port->port, test_data[i].in_port);
1123 KUNIT_EXPECT_EQ(test, (enum tb_port_type)in_port->config.type,
1124 test_data[i].in_type);
1125 KUNIT_EXPECT_EQ(test, tb_route(out_port->sw), test_data[i].route);
1126 KUNIT_EXPECT_EQ(test, out_port->port, test_data[i].out_port);
1127 KUNIT_EXPECT_EQ(test, (enum tb_port_type)out_port->config.type,
1128 test_data[i].out_type);
1129 }
1130 tb_path_free(path);
1131}
1132
1133static void tb_test_path_mixed_chain(struct kunit *test)
1134{
1135 /*
1136 * DP Video path from host to device 4 where first and last link
1137 * is bonded.
1138 *
1139 * [Host]
1140 * 1 |
1141 * 1 |
1142 * [Device #1]
1143 * 7 :| 8
1144 * 1 :| 2
1145 * [Device #2]
1146 * 5 :| 6
1147 * 1 :| 2
1148 * [Device #3]
1149 * 3 |
1150 * 1 |
1151 * [Device #4]
1152 */
1153 static const struct hop_expectation test_data[] = {
1154 {
1155 .route = 0x0,
1156 .in_port = 5,
1157 .in_type = TB_TYPE_DP_HDMI_IN,
1158 .out_port = 1,
1159 .out_type = TB_TYPE_PORT,
1160 },
1161 {
1162 .route = 0x1,
1163 .in_port = 1,
1164 .in_type = TB_TYPE_PORT,
1165 .out_port = 8,
1166 .out_type = TB_TYPE_PORT,
1167 },
1168 {
1169 .route = 0x701,
1170 .in_port = 2,
1171 .in_type = TB_TYPE_PORT,
1172 .out_port = 6,
1173 .out_type = TB_TYPE_PORT,
1174 },
1175 {
1176 .route = 0x50701,
1177 .in_port = 2,
1178 .in_type = TB_TYPE_PORT,
1179 .out_port = 3,
1180 .out_type = TB_TYPE_PORT,
1181 },
1182 {
1183 .route = 0x3050701,
1184 .in_port = 1,
1185 .in_type = TB_TYPE_PORT,
1186 .out_port = 13,
1187 .out_type = TB_TYPE_DP_HDMI_OUT,
1188 },
1189 };
1190 struct tb_switch *host, *dev1, *dev2, *dev3, *dev4;
1191 struct tb_port *in, *out;
1192 struct tb_path *path;
1193 int i;
1194
1195 host = alloc_host(test);
1196 dev1 = alloc_dev_default(test, host, 0x1, true);
1197 dev2 = alloc_dev_default(test, dev1, 0x701, false);
1198 dev3 = alloc_dev_default(test, dev2, 0x50701, false);
1199 dev4 = alloc_dev_default(test, dev3, 0x3050701, true);
1200
1201 in = &host->ports[5];
1202 out = &dev4->ports[13];
1203
1204 path = tb_path_alloc(NULL, in, 9, out, 9, 1, "Video");
1205 KUNIT_ASSERT_NOT_NULL(test, path);
1206 KUNIT_ASSERT_EQ(test, path->path_length, ARRAY_SIZE(test_data));
1207 for (i = 0; i < ARRAY_SIZE(test_data); i++) {
1208 const struct tb_port *in_port, *out_port;
1209
1210 in_port = path->hops[i].in_port;
1211 out_port = path->hops[i].out_port;
1212
1213 KUNIT_EXPECT_EQ(test, tb_route(in_port->sw), test_data[i].route);
1214 KUNIT_EXPECT_EQ(test, in_port->port, test_data[i].in_port);
1215 KUNIT_EXPECT_EQ(test, (enum tb_port_type)in_port->config.type,
1216 test_data[i].in_type);
1217 KUNIT_EXPECT_EQ(test, tb_route(out_port->sw), test_data[i].route);
1218 KUNIT_EXPECT_EQ(test, out_port->port, test_data[i].out_port);
1219 KUNIT_EXPECT_EQ(test, (enum tb_port_type)out_port->config.type,
1220 test_data[i].out_type);
1221 }
1222 tb_path_free(path);
1223}
1224
1225static void tb_test_path_mixed_chain_reverse(struct kunit *test)
1226{
1227 /*
1228 * DP Video path from device 4 to host where first and last link
1229 * is bonded.
1230 *
1231 * [Host]
1232 * 1 |
1233 * 1 |
1234 * [Device #1]
1235 * 7 :| 8
1236 * 1 :| 2
1237 * [Device #2]
1238 * 5 :| 6
1239 * 1 :| 2
1240 * [Device #3]
1241 * 3 |
1242 * 1 |
1243 * [Device #4]
1244 */
1245 static const struct hop_expectation test_data[] = {
1246 {
1247 .route = 0x3050701,
1248 .in_port = 13,
1249 .in_type = TB_TYPE_DP_HDMI_OUT,
1250 .out_port = 1,
1251 .out_type = TB_TYPE_PORT,
1252 },
1253 {
1254 .route = 0x50701,
1255 .in_port = 3,
1256 .in_type = TB_TYPE_PORT,
1257 .out_port = 2,
1258 .out_type = TB_TYPE_PORT,
1259 },
1260 {
1261 .route = 0x701,
1262 .in_port = 6,
1263 .in_type = TB_TYPE_PORT,
1264 .out_port = 2,
1265 .out_type = TB_TYPE_PORT,
1266 },
1267 {
1268 .route = 0x1,
1269 .in_port = 8,
1270 .in_type = TB_TYPE_PORT,
1271 .out_port = 1,
1272 .out_type = TB_TYPE_PORT,
1273 },
1274 {
1275 .route = 0x0,
1276 .in_port = 1,
1277 .in_type = TB_TYPE_PORT,
1278 .out_port = 5,
1279 .out_type = TB_TYPE_DP_HDMI_IN,
1280 },
1281 };
1282 struct tb_switch *host, *dev1, *dev2, *dev3, *dev4;
1283 struct tb_port *in, *out;
1284 struct tb_path *path;
1285 int i;
1286
1287 host = alloc_host(test);
1288 dev1 = alloc_dev_default(test, host, 0x1, true);
1289 dev2 = alloc_dev_default(test, dev1, 0x701, false);
1290 dev3 = alloc_dev_default(test, dev2, 0x50701, false);
1291 dev4 = alloc_dev_default(test, dev3, 0x3050701, true);
1292
1293 in = &dev4->ports[13];
1294 out = &host->ports[5];
1295
1296 path = tb_path_alloc(NULL, in, 9, out, 9, 1, "Video");
1297 KUNIT_ASSERT_NOT_NULL(test, path);
1298 KUNIT_ASSERT_EQ(test, path->path_length, ARRAY_SIZE(test_data));
1299 for (i = 0; i < ARRAY_SIZE(test_data); i++) {
1300 const struct tb_port *in_port, *out_port;
1301
1302 in_port = path->hops[i].in_port;
1303 out_port = path->hops[i].out_port;
1304
1305 KUNIT_EXPECT_EQ(test, tb_route(in_port->sw), test_data[i].route);
1306 KUNIT_EXPECT_EQ(test, in_port->port, test_data[i].in_port);
1307 KUNIT_EXPECT_EQ(test, (enum tb_port_type)in_port->config.type,
1308 test_data[i].in_type);
1309 KUNIT_EXPECT_EQ(test, tb_route(out_port->sw), test_data[i].route);
1310 KUNIT_EXPECT_EQ(test, out_port->port, test_data[i].out_port);
1311 KUNIT_EXPECT_EQ(test, (enum tb_port_type)out_port->config.type,
1312 test_data[i].out_type);
1313 }
1314 tb_path_free(path);
1315}
1316
1317static void tb_test_tunnel_pcie(struct kunit *test)
1318{
1319 struct tb_switch *host, *dev1, *dev2;
1320 struct tb_tunnel *tunnel1, *tunnel2;
1321 struct tb_port *down, *up;
1322
1323 /*
1324 * Create PCIe tunnel between host and two devices.
1325 *
1326 * [Host]
1327 * 1 |
1328 * 1 |
1329 * [Device #1]
1330 * 5 |
1331 * 1 |
1332 * [Device #2]
1333 */
1334 host = alloc_host(test);
1335 dev1 = alloc_dev_default(test, host, 0x1, true);
1336 dev2 = alloc_dev_default(test, dev1, 0x501, true);
1337
1338 down = &host->ports[8];
1339 up = &dev1->ports[9];
1340 tunnel1 = tb_tunnel_alloc_pci(NULL, up, down);
1341 KUNIT_ASSERT_NOT_NULL(test, tunnel1);
1342 KUNIT_EXPECT_EQ(test, tunnel1->type, TB_TUNNEL_PCI);
1343 KUNIT_EXPECT_PTR_EQ(test, tunnel1->src_port, down);
1344 KUNIT_EXPECT_PTR_EQ(test, tunnel1->dst_port, up);
1345 KUNIT_ASSERT_EQ(test, tunnel1->npaths, 2);
1346 KUNIT_ASSERT_EQ(test, tunnel1->paths[0]->path_length, 2);
1347 KUNIT_EXPECT_PTR_EQ(test, tunnel1->paths[0]->hops[0].in_port, down);
1348 KUNIT_EXPECT_PTR_EQ(test, tunnel1->paths[0]->hops[1].out_port, up);
1349 KUNIT_ASSERT_EQ(test, tunnel1->paths[1]->path_length, 2);
1350 KUNIT_EXPECT_PTR_EQ(test, tunnel1->paths[1]->hops[0].in_port, up);
1351 KUNIT_EXPECT_PTR_EQ(test, tunnel1->paths[1]->hops[1].out_port, down);
1352
1353 down = &dev1->ports[10];
1354 up = &dev2->ports[9];
1355 tunnel2 = tb_tunnel_alloc_pci(NULL, up, down);
1356 KUNIT_ASSERT_NOT_NULL(test, tunnel2);
1357 KUNIT_EXPECT_EQ(test, tunnel2->type, TB_TUNNEL_PCI);
1358 KUNIT_EXPECT_PTR_EQ(test, tunnel2->src_port, down);
1359 KUNIT_EXPECT_PTR_EQ(test, tunnel2->dst_port, up);
1360 KUNIT_ASSERT_EQ(test, tunnel2->npaths, 2);
1361 KUNIT_ASSERT_EQ(test, tunnel2->paths[0]->path_length, 2);
1362 KUNIT_EXPECT_PTR_EQ(test, tunnel2->paths[0]->hops[0].in_port, down);
1363 KUNIT_EXPECT_PTR_EQ(test, tunnel2->paths[0]->hops[1].out_port, up);
1364 KUNIT_ASSERT_EQ(test, tunnel2->paths[1]->path_length, 2);
1365 KUNIT_EXPECT_PTR_EQ(test, tunnel2->paths[1]->hops[0].in_port, up);
1366 KUNIT_EXPECT_PTR_EQ(test, tunnel2->paths[1]->hops[1].out_port, down);
1367
1368 tb_tunnel_free(tunnel2);
1369 tb_tunnel_free(tunnel1);
1370}
1371
1372static void tb_test_tunnel_dp(struct kunit *test)
1373{
1374 struct tb_switch *host, *dev;
1375 struct tb_port *in, *out;
1376 struct tb_tunnel *tunnel;
1377
1378 /*
1379 * Create DP tunnel between Host and Device
1380 *
1381 * [Host]
1382 * 1 |
1383 * 1 |
1384 * [Device]
1385 */
1386 host = alloc_host(test);
1387 dev = alloc_dev_default(test, host, 0x3, true);
1388
1389 in = &host->ports[5];
1390 out = &dev->ports[13];
1391
1392 tunnel = tb_tunnel_alloc_dp(NULL, in, out, 1, 0, 0);
1393 KUNIT_ASSERT_NOT_NULL(test, tunnel);
1394 KUNIT_EXPECT_EQ(test, tunnel->type, TB_TUNNEL_DP);
1395 KUNIT_EXPECT_PTR_EQ(test, tunnel->src_port, in);
1396 KUNIT_EXPECT_PTR_EQ(test, tunnel->dst_port, out);
1397 KUNIT_ASSERT_EQ(test, tunnel->npaths, 3);
1398 KUNIT_ASSERT_EQ(test, tunnel->paths[0]->path_length, 2);
1399 KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[0]->hops[0].in_port, in);
1400 KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[0]->hops[1].out_port, out);
1401 KUNIT_ASSERT_EQ(test, tunnel->paths[1]->path_length, 2);
1402 KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[1]->hops[0].in_port, in);
1403 KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[1]->hops[1].out_port, out);
1404 KUNIT_ASSERT_EQ(test, tunnel->paths[2]->path_length, 2);
1405 KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[2]->hops[0].in_port, out);
1406 KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[2]->hops[1].out_port, in);
1407 tb_tunnel_free(tunnel);
1408}
1409
1410static void tb_test_tunnel_dp_chain(struct kunit *test)
1411{
1412 struct tb_switch *host, *dev1, *dev4;
1413 struct tb_port *in, *out;
1414 struct tb_tunnel *tunnel;
1415
1416 /*
1417 * Create DP tunnel from Host DP IN to Device #4 DP OUT.
1418 *
1419 * [Host]
1420 * 1 |
1421 * 1 |
1422 * [Device #1]
1423 * 3 / | 5 \ 7
1424 * 1 / | \ 1
1425 * [Device #2] | [Device #4]
1426 * | 1
1427 * [Device #3]
1428 */
1429 host = alloc_host(test);
1430 dev1 = alloc_dev_default(test, host, 0x1, true);
1431 alloc_dev_default(test, dev1, 0x301, true);
1432 alloc_dev_default(test, dev1, 0x501, true);
1433 dev4 = alloc_dev_default(test, dev1, 0x701, true);
1434
1435 in = &host->ports[5];
1436 out = &dev4->ports[14];
1437
1438 tunnel = tb_tunnel_alloc_dp(NULL, in, out, 1, 0, 0);
1439 KUNIT_ASSERT_NOT_NULL(test, tunnel);
1440 KUNIT_EXPECT_EQ(test, tunnel->type, TB_TUNNEL_DP);
1441 KUNIT_EXPECT_PTR_EQ(test, tunnel->src_port, in);
1442 KUNIT_EXPECT_PTR_EQ(test, tunnel->dst_port, out);
1443 KUNIT_ASSERT_EQ(test, tunnel->npaths, 3);
1444 KUNIT_ASSERT_EQ(test, tunnel->paths[0]->path_length, 3);
1445 KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[0]->hops[0].in_port, in);
1446 KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[0]->hops[2].out_port, out);
1447 KUNIT_ASSERT_EQ(test, tunnel->paths[1]->path_length, 3);
1448 KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[1]->hops[0].in_port, in);
1449 KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[1]->hops[2].out_port, out);
1450 KUNIT_ASSERT_EQ(test, tunnel->paths[2]->path_length, 3);
1451 KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[2]->hops[0].in_port, out);
1452 KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[2]->hops[2].out_port, in);
1453 tb_tunnel_free(tunnel);
1454}
1455
1456static void tb_test_tunnel_dp_tree(struct kunit *test)
1457{
1458 struct tb_switch *host, *dev1, *dev2, *dev3, *dev5;
1459 struct tb_port *in, *out;
1460 struct tb_tunnel *tunnel;
1461
1462 /*
1463 * Create DP tunnel from Device #2 DP IN to Device #5 DP OUT.
1464 *
1465 * [Host]
1466 * 3 |
1467 * 1 |
1468 * [Device #1]
1469 * 3 / | 5 \ 7
1470 * 1 / | \ 1
1471 * [Device #2] | [Device #4]
1472 * | 1
1473 * [Device #3]
1474 * | 5
1475 * | 1
1476 * [Device #5]
1477 */
1478 host = alloc_host(test);
1479 dev1 = alloc_dev_default(test, host, 0x3, true);
1480 dev2 = alloc_dev_with_dpin(test, dev1, 0x303, true);
1481 dev3 = alloc_dev_default(test, dev1, 0x503, true);
1482 alloc_dev_default(test, dev1, 0x703, true);
1483 dev5 = alloc_dev_default(test, dev3, 0x50503, true);
1484
1485 in = &dev2->ports[13];
1486 out = &dev5->ports[13];
1487
1488 tunnel = tb_tunnel_alloc_dp(NULL, in, out, 1, 0, 0);
1489 KUNIT_ASSERT_NOT_NULL(test, tunnel);
1490 KUNIT_EXPECT_EQ(test, tunnel->type, TB_TUNNEL_DP);
1491 KUNIT_EXPECT_PTR_EQ(test, tunnel->src_port, in);
1492 KUNIT_EXPECT_PTR_EQ(test, tunnel->dst_port, out);
1493 KUNIT_ASSERT_EQ(test, tunnel->npaths, 3);
1494 KUNIT_ASSERT_EQ(test, tunnel->paths[0]->path_length, 4);
1495 KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[0]->hops[0].in_port, in);
1496 KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[0]->hops[3].out_port, out);
1497 KUNIT_ASSERT_EQ(test, tunnel->paths[1]->path_length, 4);
1498 KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[1]->hops[0].in_port, in);
1499 KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[1]->hops[3].out_port, out);
1500 KUNIT_ASSERT_EQ(test, tunnel->paths[2]->path_length, 4);
1501 KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[2]->hops[0].in_port, out);
1502 KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[2]->hops[3].out_port, in);
1503 tb_tunnel_free(tunnel);
1504}
1505
1506static void tb_test_tunnel_dp_max_length(struct kunit *test)
1507{
1508 struct tb_switch *host, *dev1, *dev2, *dev3, *dev4, *dev5, *dev6;
1509 struct tb_switch *dev7, *dev8, *dev9, *dev10, *dev11, *dev12;
1510 struct tb_port *in, *out;
1511 struct tb_tunnel *tunnel;
1512
1513 /*
1514 * Creates DP tunnel from Device #6 to Device #12.
1515 *
1516 * [Host]
1517 * 1 / \ 3
1518 * 1 / \ 1
1519 * [Device #1] [Device #7]
1520 * 3 | | 3
1521 * 1 | | 1
1522 * [Device #2] [Device #8]
1523 * 3 | | 3
1524 * 1 | | 1
1525 * [Device #3] [Device #9]
1526 * 3 | | 3
1527 * 1 | | 1
1528 * [Device #4] [Device #10]
1529 * 3 | | 3
1530 * 1 | | 1
1531 * [Device #5] [Device #11]
1532 * 3 | | 3
1533 * 1 | | 1
1534 * [Device #6] [Device #12]
1535 */
1536 host = alloc_host(test);
1537 dev1 = alloc_dev_default(test, host, 0x1, true);
1538 dev2 = alloc_dev_default(test, dev1, 0x301, true);
1539 dev3 = alloc_dev_default(test, dev2, 0x30301, true);
1540 dev4 = alloc_dev_default(test, dev3, 0x3030301, true);
1541 dev5 = alloc_dev_default(test, dev4, 0x303030301, true);
1542 dev6 = alloc_dev_with_dpin(test, dev5, 0x30303030301, true);
1543 dev7 = alloc_dev_default(test, host, 0x3, true);
1544 dev8 = alloc_dev_default(test, dev7, 0x303, true);
1545 dev9 = alloc_dev_default(test, dev8, 0x30303, true);
1546 dev10 = alloc_dev_default(test, dev9, 0x3030303, true);
1547 dev11 = alloc_dev_default(test, dev10, 0x303030303, true);
1548 dev12 = alloc_dev_default(test, dev11, 0x30303030303, true);
1549
1550 in = &dev6->ports[13];
1551 out = &dev12->ports[13];
1552
1553 tunnel = tb_tunnel_alloc_dp(NULL, in, out, 1, 0, 0);
1554 KUNIT_ASSERT_NOT_NULL(test, tunnel);
1555 KUNIT_EXPECT_EQ(test, tunnel->type, TB_TUNNEL_DP);
1556 KUNIT_EXPECT_PTR_EQ(test, tunnel->src_port, in);
1557 KUNIT_EXPECT_PTR_EQ(test, tunnel->dst_port, out);
1558 KUNIT_ASSERT_EQ(test, tunnel->npaths, 3);
1559 KUNIT_ASSERT_EQ(test, tunnel->paths[0]->path_length, 13);
1560 /* First hop */
1561 KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[0]->hops[0].in_port, in);
1562 /* Middle */
1563 KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[0]->hops[6].in_port,
1564 &host->ports[1]);
1565 KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[0]->hops[6].out_port,
1566 &host->ports[3]);
1567 /* Last */
1568 KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[0]->hops[12].out_port, out);
1569 KUNIT_ASSERT_EQ(test, tunnel->paths[1]->path_length, 13);
1570 KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[1]->hops[0].in_port, in);
1571 KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[1]->hops[6].in_port,
1572 &host->ports[1]);
1573 KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[1]->hops[6].out_port,
1574 &host->ports[3]);
1575 KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[1]->hops[12].out_port, out);
1576 KUNIT_ASSERT_EQ(test, tunnel->paths[2]->path_length, 13);
1577 KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[2]->hops[0].in_port, out);
1578 KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[2]->hops[6].in_port,
1579 &host->ports[3]);
1580 KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[2]->hops[6].out_port,
1581 &host->ports[1]);
1582 KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[2]->hops[12].out_port, in);
1583 tb_tunnel_free(tunnel);
1584}
1585
1586static void tb_test_tunnel_usb3(struct kunit *test)
1587{
1588 struct tb_switch *host, *dev1, *dev2;
1589 struct tb_tunnel *tunnel1, *tunnel2;
1590 struct tb_port *down, *up;
1591
1592 /*
1593 * Create USB3 tunnel between host and two devices.
1594 *
1595 * [Host]
1596 * 1 |
1597 * 1 |
1598 * [Device #1]
1599 * \ 7
1600 * \ 1
1601 * [Device #2]
1602 */
1603 host = alloc_host(test);
1604 dev1 = alloc_dev_default(test, host, 0x1, true);
1605 dev2 = alloc_dev_default(test, dev1, 0x701, true);
1606
1607 down = &host->ports[12];
1608 up = &dev1->ports[16];
1609 tunnel1 = tb_tunnel_alloc_usb3(NULL, up, down, 0, 0);
1610 KUNIT_ASSERT_NOT_NULL(test, tunnel1);
1611 KUNIT_EXPECT_EQ(test, tunnel1->type, TB_TUNNEL_USB3);
1612 KUNIT_EXPECT_PTR_EQ(test, tunnel1->src_port, down);
1613 KUNIT_EXPECT_PTR_EQ(test, tunnel1->dst_port, up);
1614 KUNIT_ASSERT_EQ(test, tunnel1->npaths, 2);
1615 KUNIT_ASSERT_EQ(test, tunnel1->paths[0]->path_length, 2);
1616 KUNIT_EXPECT_PTR_EQ(test, tunnel1->paths[0]->hops[0].in_port, down);
1617 KUNIT_EXPECT_PTR_EQ(test, tunnel1->paths[0]->hops[1].out_port, up);
1618 KUNIT_ASSERT_EQ(test, tunnel1->paths[1]->path_length, 2);
1619 KUNIT_EXPECT_PTR_EQ(test, tunnel1->paths[1]->hops[0].in_port, up);
1620 KUNIT_EXPECT_PTR_EQ(test, tunnel1->paths[1]->hops[1].out_port, down);
1621
1622 down = &dev1->ports[17];
1623 up = &dev2->ports[16];
1624 tunnel2 = tb_tunnel_alloc_usb3(NULL, up, down, 0, 0);
1625 KUNIT_ASSERT_NOT_NULL(test, tunnel2);
1626 KUNIT_EXPECT_EQ(test, tunnel2->type, TB_TUNNEL_USB3);
1627 KUNIT_EXPECT_PTR_EQ(test, tunnel2->src_port, down);
1628 KUNIT_EXPECT_PTR_EQ(test, tunnel2->dst_port, up);
1629 KUNIT_ASSERT_EQ(test, tunnel2->npaths, 2);
1630 KUNIT_ASSERT_EQ(test, tunnel2->paths[0]->path_length, 2);
1631 KUNIT_EXPECT_PTR_EQ(test, tunnel2->paths[0]->hops[0].in_port, down);
1632 KUNIT_EXPECT_PTR_EQ(test, tunnel2->paths[0]->hops[1].out_port, up);
1633 KUNIT_ASSERT_EQ(test, tunnel2->paths[1]->path_length, 2);
1634 KUNIT_EXPECT_PTR_EQ(test, tunnel2->paths[1]->hops[0].in_port, up);
1635 KUNIT_EXPECT_PTR_EQ(test, tunnel2->paths[1]->hops[1].out_port, down);
1636
1637 tb_tunnel_free(tunnel2);
1638 tb_tunnel_free(tunnel1);
1639}
1640
1641static void tb_test_tunnel_port_on_path(struct kunit *test)
1642{
1643 struct tb_switch *host, *dev1, *dev2, *dev3, *dev4, *dev5;
1644 struct tb_port *in, *out, *port;
1645 struct tb_tunnel *dp_tunnel;
1646
1647 /*
1648 * [Host]
1649 * 3 |
1650 * 1 |
1651 * [Device #1]
1652 * 3 / | 5 \ 7
1653 * 1 / | \ 1
1654 * [Device #2] | [Device #4]
1655 * | 1
1656 * [Device #3]
1657 * | 5
1658 * | 1
1659 * [Device #5]
1660 */
1661 host = alloc_host(test);
1662 dev1 = alloc_dev_default(test, host, 0x3, true);
1663 dev2 = alloc_dev_with_dpin(test, dev1, 0x303, true);
1664 dev3 = alloc_dev_default(test, dev1, 0x503, true);
1665 dev4 = alloc_dev_default(test, dev1, 0x703, true);
1666 dev5 = alloc_dev_default(test, dev3, 0x50503, true);
1667
1668 in = &dev2->ports[13];
1669 out = &dev5->ports[13];
1670
1671 dp_tunnel = tb_tunnel_alloc_dp(NULL, in, out, 1, 0, 0);
1672 KUNIT_ASSERT_NOT_NULL(test, dp_tunnel);
1673
1674 KUNIT_EXPECT_TRUE(test, tb_tunnel_port_on_path(dp_tunnel, in));
1675 KUNIT_EXPECT_TRUE(test, tb_tunnel_port_on_path(dp_tunnel, out));
1676
1677 port = &host->ports[8];
1678 KUNIT_EXPECT_FALSE(test, tb_tunnel_port_on_path(dp_tunnel, port));
1679
1680 port = &host->ports[3];
1681 KUNIT_EXPECT_FALSE(test, tb_tunnel_port_on_path(dp_tunnel, port));
1682
1683 port = &dev1->ports[1];
1684 KUNIT_EXPECT_FALSE(test, tb_tunnel_port_on_path(dp_tunnel, port));
1685
1686 port = &dev1->ports[3];
1687 KUNIT_EXPECT_TRUE(test, tb_tunnel_port_on_path(dp_tunnel, port));
1688
1689 port = &dev1->ports[5];
1690 KUNIT_EXPECT_TRUE(test, tb_tunnel_port_on_path(dp_tunnel, port));
1691
1692 port = &dev1->ports[7];
1693 KUNIT_EXPECT_FALSE(test, tb_tunnel_port_on_path(dp_tunnel, port));
1694
1695 port = &dev3->ports[1];
1696 KUNIT_EXPECT_TRUE(test, tb_tunnel_port_on_path(dp_tunnel, port));
1697
1698 port = &dev5->ports[1];
1699 KUNIT_EXPECT_TRUE(test, tb_tunnel_port_on_path(dp_tunnel, port));
1700
1701 port = &dev4->ports[1];
1702 KUNIT_EXPECT_FALSE(test, tb_tunnel_port_on_path(dp_tunnel, port));
1703
1704 tb_tunnel_free(dp_tunnel);
1705}
1706
1707static void tb_test_tunnel_dma(struct kunit *test)
1708{
1709 struct tb_port *nhi, *port;
1710 struct tb_tunnel *tunnel;
1711 struct tb_switch *host;
1712
1713 /*
1714 * Create DMA tunnel from NHI to port 1 and back.
1715 *
1716 * [Host 1]
1717 * 1 ^ In HopID 1 -> Out HopID 8
1718 * |
1719 * v In HopID 8 -> Out HopID 1
1720 * ............ Domain border
1721 * |
1722 * [Host 2]
1723 */
1724 host = alloc_host(test);
1725 nhi = &host->ports[7];
1726 port = &host->ports[1];
1727
1728 tunnel = tb_tunnel_alloc_dma(NULL, nhi, port, 8, 1, 8, 1);
1729 KUNIT_ASSERT_NOT_NULL(test, tunnel);
1730 KUNIT_EXPECT_EQ(test, tunnel->type, TB_TUNNEL_DMA);
1731 KUNIT_EXPECT_PTR_EQ(test, tunnel->src_port, nhi);
1732 KUNIT_EXPECT_PTR_EQ(test, tunnel->dst_port, port);
1733 KUNIT_ASSERT_EQ(test, tunnel->npaths, 2);
1734 /* RX path */
1735 KUNIT_ASSERT_EQ(test, tunnel->paths[0]->path_length, 1);
1736 KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[0]->hops[0].in_port, port);
1737 KUNIT_EXPECT_EQ(test, tunnel->paths[0]->hops[0].in_hop_index, 8);
1738 KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[0]->hops[0].out_port, nhi);
1739 KUNIT_EXPECT_EQ(test, tunnel->paths[0]->hops[0].next_hop_index, 1);
1740 /* TX path */
1741 KUNIT_ASSERT_EQ(test, tunnel->paths[1]->path_length, 1);
1742 KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[1]->hops[0].in_port, nhi);
1743 KUNIT_EXPECT_EQ(test, tunnel->paths[1]->hops[0].in_hop_index, 1);
1744 KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[1]->hops[0].out_port, port);
1745 KUNIT_EXPECT_EQ(test, tunnel->paths[1]->hops[0].next_hop_index, 8);
1746
1747 tb_tunnel_free(tunnel);
1748}
1749
1750static void tb_test_tunnel_dma_rx(struct kunit *test)
1751{
1752 struct tb_port *nhi, *port;
1753 struct tb_tunnel *tunnel;
1754 struct tb_switch *host;
1755
1756 /*
1757 * Create DMA RX tunnel from port 1 to NHI.
1758 *
1759 * [Host 1]
1760 * 1 ^
1761 * |
1762 * | In HopID 15 -> Out HopID 2
1763 * ............ Domain border
1764 * |
1765 * [Host 2]
1766 */
1767 host = alloc_host(test);
1768 nhi = &host->ports[7];
1769 port = &host->ports[1];
1770
1771 tunnel = tb_tunnel_alloc_dma(NULL, nhi, port, -1, -1, 15, 2);
1772 KUNIT_ASSERT_NOT_NULL(test, tunnel);
1773 KUNIT_EXPECT_EQ(test, tunnel->type, TB_TUNNEL_DMA);
1774 KUNIT_EXPECT_PTR_EQ(test, tunnel->src_port, nhi);
1775 KUNIT_EXPECT_PTR_EQ(test, tunnel->dst_port, port);
1776 KUNIT_ASSERT_EQ(test, tunnel->npaths, 1);
1777 /* RX path */
1778 KUNIT_ASSERT_EQ(test, tunnel->paths[0]->path_length, 1);
1779 KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[0]->hops[0].in_port, port);
1780 KUNIT_EXPECT_EQ(test, tunnel->paths[0]->hops[0].in_hop_index, 15);
1781 KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[0]->hops[0].out_port, nhi);
1782 KUNIT_EXPECT_EQ(test, tunnel->paths[0]->hops[0].next_hop_index, 2);
1783
1784 tb_tunnel_free(tunnel);
1785}
1786
1787static void tb_test_tunnel_dma_tx(struct kunit *test)
1788{
1789 struct tb_port *nhi, *port;
1790 struct tb_tunnel *tunnel;
1791 struct tb_switch *host;
1792
1793 /*
1794 * Create DMA TX tunnel from NHI to port 1.
1795 *
1796 * [Host 1]
1797 * 1 | In HopID 2 -> Out HopID 15
1798 * |
1799 * v
1800 * ............ Domain border
1801 * |
1802 * [Host 2]
1803 */
1804 host = alloc_host(test);
1805 nhi = &host->ports[7];
1806 port = &host->ports[1];
1807
1808 tunnel = tb_tunnel_alloc_dma(NULL, nhi, port, 15, 2, -1, -1);
1809 KUNIT_ASSERT_NOT_NULL(test, tunnel);
1810 KUNIT_EXPECT_EQ(test, tunnel->type, TB_TUNNEL_DMA);
1811 KUNIT_EXPECT_PTR_EQ(test, tunnel->src_port, nhi);
1812 KUNIT_EXPECT_PTR_EQ(test, tunnel->dst_port, port);
1813 KUNIT_ASSERT_EQ(test, tunnel->npaths, 1);
1814 /* TX path */
1815 KUNIT_ASSERT_EQ(test, tunnel->paths[0]->path_length, 1);
1816 KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[0]->hops[0].in_port, nhi);
1817 KUNIT_EXPECT_EQ(test, tunnel->paths[0]->hops[0].in_hop_index, 2);
1818 KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[0]->hops[0].out_port, port);
1819 KUNIT_EXPECT_EQ(test, tunnel->paths[0]->hops[0].next_hop_index, 15);
1820
1821 tb_tunnel_free(tunnel);
1822}
1823
1824static void tb_test_tunnel_dma_chain(struct kunit *test)
1825{
1826 struct tb_switch *host, *dev1, *dev2;
1827 struct tb_port *nhi, *port;
1828 struct tb_tunnel *tunnel;
1829
1830 /*
1831 * Create DMA tunnel from NHI to Device #2 port 3 and back.
1832 *
1833 * [Host 1]
1834 * 1 ^ In HopID 1 -> Out HopID x
1835 * |
1836 * 1 | In HopID x -> Out HopID 1
1837 * [Device #1]
1838 * 7 \
1839 * 1 \
1840 * [Device #2]
1841 * 3 | In HopID x -> Out HopID 8
1842 * |
1843 * v In HopID 8 -> Out HopID x
1844 * ............ Domain border
1845 * |
1846 * [Host 2]
1847 */
1848 host = alloc_host(test);
1849 dev1 = alloc_dev_default(test, host, 0x1, true);
1850 dev2 = alloc_dev_default(test, dev1, 0x701, true);
1851
1852 nhi = &host->ports[7];
1853 port = &dev2->ports[3];
1854 tunnel = tb_tunnel_alloc_dma(NULL, nhi, port, 8, 1, 8, 1);
1855 KUNIT_ASSERT_NOT_NULL(test, tunnel);
1856 KUNIT_EXPECT_EQ(test, tunnel->type, TB_TUNNEL_DMA);
1857 KUNIT_EXPECT_PTR_EQ(test, tunnel->src_port, nhi);
1858 KUNIT_EXPECT_PTR_EQ(test, tunnel->dst_port, port);
1859 KUNIT_ASSERT_EQ(test, tunnel->npaths, 2);
1860 /* RX path */
1861 KUNIT_ASSERT_EQ(test, tunnel->paths[0]->path_length, 3);
1862 KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[0]->hops[0].in_port, port);
1863 KUNIT_EXPECT_EQ(test, tunnel->paths[0]->hops[0].in_hop_index, 8);
1864 KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[0]->hops[0].out_port,
1865 &dev2->ports[1]);
1866 KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[0]->hops[1].in_port,
1867 &dev1->ports[7]);
1868 KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[0]->hops[1].out_port,
1869 &dev1->ports[1]);
1870 KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[0]->hops[2].in_port,
1871 &host->ports[1]);
1872 KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[0]->hops[2].out_port, nhi);
1873 KUNIT_EXPECT_EQ(test, tunnel->paths[0]->hops[2].next_hop_index, 1);
1874 /* TX path */
1875 KUNIT_ASSERT_EQ(test, tunnel->paths[1]->path_length, 3);
1876 KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[1]->hops[0].in_port, nhi);
1877 KUNIT_EXPECT_EQ(test, tunnel->paths[1]->hops[0].in_hop_index, 1);
1878 KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[1]->hops[1].in_port,
1879 &dev1->ports[1]);
1880 KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[1]->hops[1].out_port,
1881 &dev1->ports[7]);
1882 KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[1]->hops[2].in_port,
1883 &dev2->ports[1]);
1884 KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[1]->hops[2].out_port, port);
1885 KUNIT_EXPECT_EQ(test, tunnel->paths[1]->hops[2].next_hop_index, 8);
1886
1887 tb_tunnel_free(tunnel);
1888}
1889
1890static void tb_test_tunnel_dma_match(struct kunit *test)
1891{
1892 struct tb_port *nhi, *port;
1893 struct tb_tunnel *tunnel;
1894 struct tb_switch *host;
1895
1896 host = alloc_host(test);
1897 nhi = &host->ports[7];
1898 port = &host->ports[1];
1899
1900 tunnel = tb_tunnel_alloc_dma(NULL, nhi, port, 15, 1, 15, 1);
1901 KUNIT_ASSERT_NOT_NULL(test, tunnel);
1902
1903 KUNIT_ASSERT_TRUE(test, tb_tunnel_match_dma(tunnel, 15, 1, 15, 1));
1904 KUNIT_ASSERT_FALSE(test, tb_tunnel_match_dma(tunnel, 8, 1, 15, 1));
1905 KUNIT_ASSERT_TRUE(test, tb_tunnel_match_dma(tunnel, -1, -1, 15, 1));
1906 KUNIT_ASSERT_TRUE(test, tb_tunnel_match_dma(tunnel, 15, 1, -1, -1));
1907 KUNIT_ASSERT_TRUE(test, tb_tunnel_match_dma(tunnel, 15, -1, -1, -1));
1908 KUNIT_ASSERT_TRUE(test, tb_tunnel_match_dma(tunnel, -1, 1, -1, -1));
1909 KUNIT_ASSERT_TRUE(test, tb_tunnel_match_dma(tunnel, -1, -1, 15, -1));
1910 KUNIT_ASSERT_TRUE(test, tb_tunnel_match_dma(tunnel, -1, -1, -1, 1));
1911 KUNIT_ASSERT_TRUE(test, tb_tunnel_match_dma(tunnel, -1, -1, -1, -1));
1912 KUNIT_ASSERT_FALSE(test, tb_tunnel_match_dma(tunnel, 8, -1, 8, -1));
1913
1914 tb_tunnel_free(tunnel);
1915
1916 tunnel = tb_tunnel_alloc_dma(NULL, nhi, port, 15, 1, -1, -1);
1917 KUNIT_ASSERT_NOT_NULL(test, tunnel);
1918 KUNIT_ASSERT_TRUE(test, tb_tunnel_match_dma(tunnel, 15, 1, -1, -1));
1919 KUNIT_ASSERT_TRUE(test, tb_tunnel_match_dma(tunnel, 15, -1, -1, -1));
1920 KUNIT_ASSERT_TRUE(test, tb_tunnel_match_dma(tunnel, -1, 1, -1, -1));
1921 KUNIT_ASSERT_TRUE(test, tb_tunnel_match_dma(tunnel, -1, -1, -1, -1));
1922 KUNIT_ASSERT_FALSE(test, tb_tunnel_match_dma(tunnel, 15, 1, 15, 1));
1923 KUNIT_ASSERT_FALSE(test, tb_tunnel_match_dma(tunnel, -1, -1, 15, 1));
1924 KUNIT_ASSERT_FALSE(test, tb_tunnel_match_dma(tunnel, 15, 11, -1, -1));
1925
1926 tb_tunnel_free(tunnel);
1927
1928 tunnel = tb_tunnel_alloc_dma(NULL, nhi, port, -1, -1, 15, 11);
1929 KUNIT_ASSERT_NOT_NULL(test, tunnel);
1930 KUNIT_ASSERT_TRUE(test, tb_tunnel_match_dma(tunnel, -1, -1, 15, 11));
1931 KUNIT_ASSERT_TRUE(test, tb_tunnel_match_dma(tunnel, -1, -1, 15, -1));
1932 KUNIT_ASSERT_TRUE(test, tb_tunnel_match_dma(tunnel, -1, -1, -1, 11));
1933 KUNIT_ASSERT_TRUE(test, tb_tunnel_match_dma(tunnel, -1, -1, -1, -1));
1934 KUNIT_ASSERT_FALSE(test, tb_tunnel_match_dma(tunnel, -1, -1, 15, 1));
1935 KUNIT_ASSERT_FALSE(test, tb_tunnel_match_dma(tunnel, -1, -1, 10, 11));
1936 KUNIT_ASSERT_FALSE(test, tb_tunnel_match_dma(tunnel, 15, 11, -1, -1));
1937
1938 tb_tunnel_free(tunnel);
1939}
1940
1941static void tb_test_credit_alloc_legacy_not_bonded(struct kunit *test)
1942{
1943 struct tb_switch *host, *dev;
1944 struct tb_port *up, *down;
1945 struct tb_tunnel *tunnel;
1946 struct tb_path *path;
1947
1948 host = alloc_host(test);
1949 dev = alloc_dev_default(test, host, 0x1, false);
1950
1951 down = &host->ports[8];
1952 up = &dev->ports[9];
1953 tunnel = tb_tunnel_alloc_pci(NULL, up, down);
1954 KUNIT_ASSERT_NOT_NULL(test, tunnel);
1955 KUNIT_ASSERT_EQ(test, tunnel->npaths, (size_t)2);
1956
1957 path = tunnel->paths[0];
1958 KUNIT_ASSERT_EQ(test, path->path_length, 2);
1959 KUNIT_EXPECT_EQ(test, path->hops[0].nfc_credits, 0U);
1960 KUNIT_EXPECT_EQ(test, path->hops[0].initial_credits, 7U);
1961 KUNIT_EXPECT_EQ(test, path->hops[1].nfc_credits, 0U);
1962 KUNIT_EXPECT_EQ(test, path->hops[1].initial_credits, 16U);
1963
1964 path = tunnel->paths[1];
1965 KUNIT_ASSERT_EQ(test, path->path_length, 2);
1966 KUNIT_EXPECT_EQ(test, path->hops[0].nfc_credits, 0U);
1967 KUNIT_EXPECT_EQ(test, path->hops[0].initial_credits, 7U);
1968 KUNIT_EXPECT_EQ(test, path->hops[1].nfc_credits, 0U);
1969 KUNIT_EXPECT_EQ(test, path->hops[1].initial_credits, 16U);
1970
1971 tb_tunnel_free(tunnel);
1972}
1973
1974static void tb_test_credit_alloc_legacy_bonded(struct kunit *test)
1975{
1976 struct tb_switch *host, *dev;
1977 struct tb_port *up, *down;
1978 struct tb_tunnel *tunnel;
1979 struct tb_path *path;
1980
1981 host = alloc_host(test);
1982 dev = alloc_dev_default(test, host, 0x1, true);
1983
1984 down = &host->ports[8];
1985 up = &dev->ports[9];
1986 tunnel = tb_tunnel_alloc_pci(NULL, up, down);
1987 KUNIT_ASSERT_NOT_NULL(test, tunnel);
1988 KUNIT_ASSERT_EQ(test, tunnel->npaths, (size_t)2);
1989
1990 path = tunnel->paths[0];
1991 KUNIT_ASSERT_EQ(test, path->path_length, 2);
1992 KUNIT_EXPECT_EQ(test, path->hops[0].nfc_credits, 0U);
1993 KUNIT_EXPECT_EQ(test, path->hops[0].initial_credits, 7U);
1994 KUNIT_EXPECT_EQ(test, path->hops[1].nfc_credits, 0U);
1995 KUNIT_EXPECT_EQ(test, path->hops[1].initial_credits, 32U);
1996
1997 path = tunnel->paths[1];
1998 KUNIT_ASSERT_EQ(test, path->path_length, 2);
1999 KUNIT_EXPECT_EQ(test, path->hops[0].nfc_credits, 0U);
2000 KUNIT_EXPECT_EQ(test, path->hops[0].initial_credits, 7U);
2001 KUNIT_EXPECT_EQ(test, path->hops[1].nfc_credits, 0U);
2002 KUNIT_EXPECT_EQ(test, path->hops[1].initial_credits, 32U);
2003
2004 tb_tunnel_free(tunnel);
2005}
2006
2007static void tb_test_credit_alloc_pcie(struct kunit *test)
2008{
2009 struct tb_switch *host, *dev;
2010 struct tb_port *up, *down;
2011 struct tb_tunnel *tunnel;
2012 struct tb_path *path;
2013
2014 host = alloc_host_usb4(test);
2015 dev = alloc_dev_usb4(test, host, 0x1, true);
2016
2017 down = &host->ports[8];
2018 up = &dev->ports[9];
2019 tunnel = tb_tunnel_alloc_pci(NULL, up, down);
2020 KUNIT_ASSERT_NOT_NULL(test, tunnel);
2021 KUNIT_ASSERT_EQ(test, tunnel->npaths, (size_t)2);
2022
2023 path = tunnel->paths[0];
2024 KUNIT_ASSERT_EQ(test, path->path_length, 2);
2025 KUNIT_EXPECT_EQ(test, path->hops[0].nfc_credits, 0U);
2026 KUNIT_EXPECT_EQ(test, path->hops[0].initial_credits, 7U);
2027 KUNIT_EXPECT_EQ(test, path->hops[1].nfc_credits, 0U);
2028 KUNIT_EXPECT_EQ(test, path->hops[1].initial_credits, 32U);
2029
2030 path = tunnel->paths[1];
2031 KUNIT_ASSERT_EQ(test, path->path_length, 2);
2032 KUNIT_EXPECT_EQ(test, path->hops[0].nfc_credits, 0U);
2033 KUNIT_EXPECT_EQ(test, path->hops[0].initial_credits, 7U);
2034 KUNIT_EXPECT_EQ(test, path->hops[1].nfc_credits, 0U);
2035 KUNIT_EXPECT_EQ(test, path->hops[1].initial_credits, 64U);
2036
2037 tb_tunnel_free(tunnel);
2038}
2039
2040static void tb_test_credit_alloc_without_dp(struct kunit *test)
2041{
2042 struct tb_switch *host, *dev;
2043 struct tb_port *up, *down;
2044 struct tb_tunnel *tunnel;
2045 struct tb_path *path;
2046
2047 host = alloc_host_usb4(test);
2048 dev = alloc_dev_without_dp(test, host, 0x1, true);
2049
2050 /*
2051 * The device has no DP therefore baMinDPmain = baMinDPaux = 0
2052 *
2053 * Create PCIe path with buffers less than baMaxPCIe.
2054 *
2055 * For a device with buffers configurations:
2056 * baMaxUSB3 = 109
2057 * baMinDPaux = 0
2058 * baMinDPmain = 0
2059 * baMaxPCIe = 30
2060 * baMaxHI = 1
2061 * Remaining Buffers = Total - (CP + DP) = 120 - (2 + 0) = 118
2062 * PCIe Credits = Max(6, Min(baMaxPCIe, Remaining Buffers - baMaxUSB3)
2063 * = Max(6, Min(30, 9) = 9
2064 */
2065 down = &host->ports[8];
2066 up = &dev->ports[9];
2067 tunnel = tb_tunnel_alloc_pci(NULL, up, down);
2068 KUNIT_ASSERT_TRUE(test, tunnel != NULL);
2069 KUNIT_ASSERT_EQ(test, tunnel->npaths, (size_t)2);
2070
2071 /* PCIe downstream path */
2072 path = tunnel->paths[0];
2073 KUNIT_ASSERT_EQ(test, path->path_length, 2);
2074 KUNIT_EXPECT_EQ(test, path->hops[0].nfc_credits, 0U);
2075 KUNIT_EXPECT_EQ(test, path->hops[0].initial_credits, 7U);
2076 KUNIT_EXPECT_EQ(test, path->hops[1].nfc_credits, 0U);
2077 KUNIT_EXPECT_EQ(test, path->hops[1].initial_credits, 9U);
2078
2079 /* PCIe upstream path */
2080 path = tunnel->paths[1];
2081 KUNIT_ASSERT_EQ(test, path->path_length, 2);
2082 KUNIT_EXPECT_EQ(test, path->hops[0].nfc_credits, 0U);
2083 KUNIT_EXPECT_EQ(test, path->hops[0].initial_credits, 7U);
2084 KUNIT_EXPECT_EQ(test, path->hops[1].nfc_credits, 0U);
2085 KUNIT_EXPECT_EQ(test, path->hops[1].initial_credits, 64U);
2086
2087 tb_tunnel_free(tunnel);
2088}
2089
2090static void tb_test_credit_alloc_dp(struct kunit *test)
2091{
2092 struct tb_switch *host, *dev;
2093 struct tb_port *in, *out;
2094 struct tb_tunnel *tunnel;
2095 struct tb_path *path;
2096
2097 host = alloc_host_usb4(test);
2098 dev = alloc_dev_usb4(test, host, 0x1, true);
2099
2100 in = &host->ports[5];
2101 out = &dev->ports[14];
2102
2103 tunnel = tb_tunnel_alloc_dp(NULL, in, out, 1, 0, 0);
2104 KUNIT_ASSERT_NOT_NULL(test, tunnel);
2105 KUNIT_ASSERT_EQ(test, tunnel->npaths, (size_t)3);
2106
2107 /* Video (main) path */
2108 path = tunnel->paths[0];
2109 KUNIT_ASSERT_EQ(test, path->path_length, 2);
2110 KUNIT_EXPECT_EQ(test, path->hops[0].nfc_credits, 12U);
2111 KUNIT_EXPECT_EQ(test, path->hops[0].initial_credits, 0U);
2112 KUNIT_EXPECT_EQ(test, path->hops[1].nfc_credits, 18U);
2113 KUNIT_EXPECT_EQ(test, path->hops[1].initial_credits, 0U);
2114
2115 /* AUX TX */
2116 path = tunnel->paths[1];
2117 KUNIT_ASSERT_EQ(test, path->path_length, 2);
2118 KUNIT_EXPECT_EQ(test, path->hops[0].nfc_credits, 0U);
2119 KUNIT_EXPECT_EQ(test, path->hops[0].initial_credits, 1U);
2120 KUNIT_EXPECT_EQ(test, path->hops[1].nfc_credits, 0U);
2121 KUNIT_EXPECT_EQ(test, path->hops[1].initial_credits, 1U);
2122
2123 /* AUX RX */
2124 path = tunnel->paths[2];
2125 KUNIT_ASSERT_EQ(test, path->path_length, 2);
2126 KUNIT_EXPECT_EQ(test, path->hops[0].nfc_credits, 0U);
2127 KUNIT_EXPECT_EQ(test, path->hops[0].initial_credits, 1U);
2128 KUNIT_EXPECT_EQ(test, path->hops[1].nfc_credits, 0U);
2129 KUNIT_EXPECT_EQ(test, path->hops[1].initial_credits, 1U);
2130
2131 tb_tunnel_free(tunnel);
2132}
2133
2134static void tb_test_credit_alloc_usb3(struct kunit *test)
2135{
2136 struct tb_switch *host, *dev;
2137 struct tb_port *up, *down;
2138 struct tb_tunnel *tunnel;
2139 struct tb_path *path;
2140
2141 host = alloc_host_usb4(test);
2142 dev = alloc_dev_usb4(test, host, 0x1, true);
2143
2144 down = &host->ports[12];
2145 up = &dev->ports[16];
2146 tunnel = tb_tunnel_alloc_usb3(NULL, up, down, 0, 0);
2147 KUNIT_ASSERT_NOT_NULL(test, tunnel);
2148 KUNIT_ASSERT_EQ(test, tunnel->npaths, (size_t)2);
2149
2150 path = tunnel->paths[0];
2151 KUNIT_ASSERT_EQ(test, path->path_length, 2);
2152 KUNIT_EXPECT_EQ(test, path->hops[0].nfc_credits, 0U);
2153 KUNIT_EXPECT_EQ(test, path->hops[0].initial_credits, 7U);
2154 KUNIT_EXPECT_EQ(test, path->hops[1].nfc_credits, 0U);
2155 KUNIT_EXPECT_EQ(test, path->hops[1].initial_credits, 14U);
2156
2157 path = tunnel->paths[1];
2158 KUNIT_ASSERT_EQ(test, path->path_length, 2);
2159 KUNIT_EXPECT_EQ(test, path->hops[0].nfc_credits, 0U);
2160 KUNIT_EXPECT_EQ(test, path->hops[0].initial_credits, 7U);
2161 KUNIT_EXPECT_EQ(test, path->hops[1].nfc_credits, 0U);
2162 KUNIT_EXPECT_EQ(test, path->hops[1].initial_credits, 32U);
2163
2164 tb_tunnel_free(tunnel);
2165}
2166
2167static void tb_test_credit_alloc_dma(struct kunit *test)
2168{
2169 struct tb_switch *host, *dev;
2170 struct tb_port *nhi, *port;
2171 struct tb_tunnel *tunnel;
2172 struct tb_path *path;
2173
2174 host = alloc_host_usb4(test);
2175 dev = alloc_dev_usb4(test, host, 0x1, true);
2176
2177 nhi = &host->ports[7];
2178 port = &dev->ports[3];
2179
2180 tunnel = tb_tunnel_alloc_dma(NULL, nhi, port, 8, 1, 8, 1);
2181 KUNIT_ASSERT_NOT_NULL(test, tunnel);
2182 KUNIT_ASSERT_EQ(test, tunnel->npaths, (size_t)2);
2183
2184 /* DMA RX */
2185 path = tunnel->paths[0];
2186 KUNIT_ASSERT_EQ(test, path->path_length, 2);
2187 KUNIT_EXPECT_EQ(test, path->hops[0].nfc_credits, 0U);
2188 KUNIT_EXPECT_EQ(test, path->hops[0].initial_credits, 14U);
2189 KUNIT_EXPECT_EQ(test, path->hops[1].nfc_credits, 0U);
2190 KUNIT_EXPECT_EQ(test, path->hops[1].initial_credits, 14U);
2191
2192 /* DMA TX */
2193 path = tunnel->paths[1];
2194 KUNIT_ASSERT_EQ(test, path->path_length, 2);
2195 KUNIT_EXPECT_EQ(test, path->hops[0].nfc_credits, 0U);
2196 KUNIT_EXPECT_EQ(test, path->hops[0].initial_credits, 0U);
2197 KUNIT_EXPECT_EQ(test, path->hops[1].nfc_credits, 0U);
2198 KUNIT_EXPECT_EQ(test, path->hops[1].initial_credits, 14U);
2199
2200 tb_tunnel_free(tunnel);
2201}
2202
2203static void tb_test_credit_alloc_dma_multiple(struct kunit *test)
2204{
2205 struct tb_tunnel *tunnel1, *tunnel2, *tunnel3;
2206 struct tb_switch *host, *dev;
2207 struct tb_port *nhi, *port;
2208 struct tb_path *path;
2209
2210 host = alloc_host_usb4(test);
2211 dev = alloc_dev_usb4(test, host, 0x1, true);
2212
2213 nhi = &host->ports[7];
2214 port = &dev->ports[3];
2215
2216 /*
2217 * Create three DMA tunnels through the same ports. With the
2218 * default buffers we should be able to create two and the last
2219 * one fails.
2220 *
2221 * For default host we have following buffers for DMA:
2222 *
2223 * 120 - (2 + 2 * (1 + 0) + 32 + 64 + spare) = 20
2224 *
2225 * For device we have following:
2226 *
2227 * 120 - (2 + 2 * (1 + 18) + 14 + 32 + spare) = 34
2228 *
2229 * spare = 14 + 1 = 15
2230 *
2231 * So on host the first tunnel gets 14 and the second gets the
2232 * remaining 1 and then we run out of buffers.
2233 */
2234 tunnel1 = tb_tunnel_alloc_dma(NULL, nhi, port, 8, 1, 8, 1);
2235 KUNIT_ASSERT_NOT_NULL(test, tunnel1);
2236 KUNIT_ASSERT_EQ(test, tunnel1->npaths, (size_t)2);
2237
2238 path = tunnel1->paths[0];
2239 KUNIT_ASSERT_EQ(test, path->path_length, 2);
2240 KUNIT_EXPECT_EQ(test, path->hops[0].nfc_credits, 0U);
2241 KUNIT_EXPECT_EQ(test, path->hops[0].initial_credits, 14U);
2242 KUNIT_EXPECT_EQ(test, path->hops[1].nfc_credits, 0U);
2243 KUNIT_EXPECT_EQ(test, path->hops[1].initial_credits, 14U);
2244
2245 path = tunnel1->paths[1];
2246 KUNIT_ASSERT_EQ(test, path->path_length, 2);
2247 KUNIT_EXPECT_EQ(test, path->hops[0].nfc_credits, 0U);
2248 KUNIT_EXPECT_EQ(test, path->hops[0].initial_credits, 0U);
2249 KUNIT_EXPECT_EQ(test, path->hops[1].nfc_credits, 0U);
2250 KUNIT_EXPECT_EQ(test, path->hops[1].initial_credits, 14U);
2251
2252 tunnel2 = tb_tunnel_alloc_dma(NULL, nhi, port, 9, 2, 9, 2);
2253 KUNIT_ASSERT_NOT_NULL(test, tunnel2);
2254 KUNIT_ASSERT_EQ(test, tunnel2->npaths, (size_t)2);
2255
2256 path = tunnel2->paths[0];
2257 KUNIT_ASSERT_EQ(test, path->path_length, 2);
2258 KUNIT_EXPECT_EQ(test, path->hops[0].nfc_credits, 0U);
2259 KUNIT_EXPECT_EQ(test, path->hops[0].initial_credits, 14U);
2260 KUNIT_EXPECT_EQ(test, path->hops[1].nfc_credits, 0U);
2261 KUNIT_EXPECT_EQ(test, path->hops[1].initial_credits, 1U);
2262
2263 path = tunnel2->paths[1];
2264 KUNIT_ASSERT_EQ(test, path->path_length, 2);
2265 KUNIT_EXPECT_EQ(test, path->hops[0].nfc_credits, 0U);
2266 KUNIT_EXPECT_EQ(test, path->hops[0].initial_credits, 0U);
2267 KUNIT_EXPECT_EQ(test, path->hops[1].nfc_credits, 0U);
2268 KUNIT_EXPECT_EQ(test, path->hops[1].initial_credits, 1U);
2269
2270 tunnel3 = tb_tunnel_alloc_dma(NULL, nhi, port, 10, 3, 10, 3);
2271 KUNIT_ASSERT_NULL(test, tunnel3);
2272
2273 /*
2274 * Release the first DMA tunnel. That should make 14 buffers
2275 * available for the next tunnel.
2276 */
2277 tb_tunnel_free(tunnel1);
2278
2279 tunnel3 = tb_tunnel_alloc_dma(NULL, nhi, port, 10, 3, 10, 3);
2280 KUNIT_ASSERT_NOT_NULL(test, tunnel3);
2281
2282 path = tunnel3->paths[0];
2283 KUNIT_ASSERT_EQ(test, path->path_length, 2);
2284 KUNIT_EXPECT_EQ(test, path->hops[0].nfc_credits, 0U);
2285 KUNIT_EXPECT_EQ(test, path->hops[0].initial_credits, 14U);
2286 KUNIT_EXPECT_EQ(test, path->hops[1].nfc_credits, 0U);
2287 KUNIT_EXPECT_EQ(test, path->hops[1].initial_credits, 14U);
2288
2289 path = tunnel3->paths[1];
2290 KUNIT_ASSERT_EQ(test, path->path_length, 2);
2291 KUNIT_EXPECT_EQ(test, path->hops[0].nfc_credits, 0U);
2292 KUNIT_EXPECT_EQ(test, path->hops[0].initial_credits, 0U);
2293 KUNIT_EXPECT_EQ(test, path->hops[1].nfc_credits, 0U);
2294 KUNIT_EXPECT_EQ(test, path->hops[1].initial_credits, 14U);
2295
2296 tb_tunnel_free(tunnel3);
2297 tb_tunnel_free(tunnel2);
2298}
2299
2300static struct tb_tunnel *TB_TEST_PCIE_TUNNEL(struct kunit *test,
2301 struct tb_switch *host, struct tb_switch *dev)
2302{
2303 struct tb_port *up, *down;
2304 struct tb_tunnel *pcie_tunnel;
2305 struct tb_path *path;
2306
2307 down = &host->ports[8];
2308 up = &dev->ports[9];
2309 pcie_tunnel = tb_tunnel_alloc_pci(NULL, up, down);
2310 KUNIT_ASSERT_NOT_NULL(test, pcie_tunnel);
2311 KUNIT_ASSERT_EQ(test, pcie_tunnel->npaths, (size_t)2);
2312
2313 path = pcie_tunnel->paths[0];
2314 KUNIT_ASSERT_EQ(test, path->path_length, 2);
2315 KUNIT_EXPECT_EQ(test, path->hops[0].nfc_credits, 0U);
2316 KUNIT_EXPECT_EQ(test, path->hops[0].initial_credits, 7U);
2317 KUNIT_EXPECT_EQ(test, path->hops[1].nfc_credits, 0U);
2318 KUNIT_EXPECT_EQ(test, path->hops[1].initial_credits, 32U);
2319
2320 path = pcie_tunnel->paths[1];
2321 KUNIT_ASSERT_EQ(test, path->path_length, 2);
2322 KUNIT_EXPECT_EQ(test, path->hops[0].nfc_credits, 0U);
2323 KUNIT_EXPECT_EQ(test, path->hops[0].initial_credits, 7U);
2324 KUNIT_EXPECT_EQ(test, path->hops[1].nfc_credits, 0U);
2325 KUNIT_EXPECT_EQ(test, path->hops[1].initial_credits, 64U);
2326
2327 return pcie_tunnel;
2328}
2329
2330static struct tb_tunnel *TB_TEST_DP_TUNNEL1(struct kunit *test,
2331 struct tb_switch *host, struct tb_switch *dev)
2332{
2333 struct tb_port *in, *out;
2334 struct tb_tunnel *dp_tunnel1;
2335 struct tb_path *path;
2336
2337 in = &host->ports[5];
2338 out = &dev->ports[13];
2339 dp_tunnel1 = tb_tunnel_alloc_dp(NULL, in, out, 1, 0, 0);
2340 KUNIT_ASSERT_NOT_NULL(test, dp_tunnel1);
2341 KUNIT_ASSERT_EQ(test, dp_tunnel1->npaths, (size_t)3);
2342
2343 path = dp_tunnel1->paths[0];
2344 KUNIT_ASSERT_EQ(test, path->path_length, 2);
2345 KUNIT_EXPECT_EQ(test, path->hops[0].nfc_credits, 12U);
2346 KUNIT_EXPECT_EQ(test, path->hops[0].initial_credits, 0U);
2347 KUNIT_EXPECT_EQ(test, path->hops[1].nfc_credits, 18U);
2348 KUNIT_EXPECT_EQ(test, path->hops[1].initial_credits, 0U);
2349
2350 path = dp_tunnel1->paths[1];
2351 KUNIT_ASSERT_EQ(test, path->path_length, 2);
2352 KUNIT_EXPECT_EQ(test, path->hops[0].nfc_credits, 0U);
2353 KUNIT_EXPECT_EQ(test, path->hops[0].initial_credits, 1U);
2354 KUNIT_EXPECT_EQ(test, path->hops[1].nfc_credits, 0U);
2355 KUNIT_EXPECT_EQ(test, path->hops[1].initial_credits, 1U);
2356
2357 path = dp_tunnel1->paths[2];
2358 KUNIT_ASSERT_EQ(test, path->path_length, 2);
2359 KUNIT_EXPECT_EQ(test, path->hops[0].nfc_credits, 0U);
2360 KUNIT_EXPECT_EQ(test, path->hops[0].initial_credits, 1U);
2361 KUNIT_EXPECT_EQ(test, path->hops[1].nfc_credits, 0U);
2362 KUNIT_EXPECT_EQ(test, path->hops[1].initial_credits, 1U);
2363
2364 return dp_tunnel1;
2365}
2366
2367static struct tb_tunnel *TB_TEST_DP_TUNNEL2(struct kunit *test,
2368 struct tb_switch *host, struct tb_switch *dev)
2369{
2370 struct tb_port *in, *out;
2371 struct tb_tunnel *dp_tunnel2;
2372 struct tb_path *path;
2373
2374 in = &host->ports[6];
2375 out = &dev->ports[14];
2376 dp_tunnel2 = tb_tunnel_alloc_dp(NULL, in, out, 1, 0, 0);
2377 KUNIT_ASSERT_NOT_NULL(test, dp_tunnel2);
2378 KUNIT_ASSERT_EQ(test, dp_tunnel2->npaths, (size_t)3);
2379
2380 path = dp_tunnel2->paths[0];
2381 KUNIT_ASSERT_EQ(test, path->path_length, 2);
2382 KUNIT_EXPECT_EQ(test, path->hops[0].nfc_credits, 12U);
2383 KUNIT_EXPECT_EQ(test, path->hops[0].initial_credits, 0U);
2384 KUNIT_EXPECT_EQ(test, path->hops[1].nfc_credits, 18U);
2385 KUNIT_EXPECT_EQ(test, path->hops[1].initial_credits, 0U);
2386
2387 path = dp_tunnel2->paths[1];
2388 KUNIT_ASSERT_EQ(test, path->path_length, 2);
2389 KUNIT_EXPECT_EQ(test, path->hops[0].nfc_credits, 0U);
2390 KUNIT_EXPECT_EQ(test, path->hops[0].initial_credits, 1U);
2391 KUNIT_EXPECT_EQ(test, path->hops[1].nfc_credits, 0U);
2392 KUNIT_EXPECT_EQ(test, path->hops[1].initial_credits, 1U);
2393
2394 path = dp_tunnel2->paths[2];
2395 KUNIT_ASSERT_EQ(test, path->path_length, 2);
2396 KUNIT_EXPECT_EQ(test, path->hops[0].nfc_credits, 0U);
2397 KUNIT_EXPECT_EQ(test, path->hops[0].initial_credits, 1U);
2398 KUNIT_EXPECT_EQ(test, path->hops[1].nfc_credits, 0U);
2399 KUNIT_EXPECT_EQ(test, path->hops[1].initial_credits, 1U);
2400
2401 return dp_tunnel2;
2402}
2403
2404static struct tb_tunnel *TB_TEST_USB3_TUNNEL(struct kunit *test,
2405 struct tb_switch *host, struct tb_switch *dev)
2406{
2407 struct tb_port *up, *down;
2408 struct tb_tunnel *usb3_tunnel;
2409 struct tb_path *path;
2410
2411 down = &host->ports[12];
2412 up = &dev->ports[16];
2413 usb3_tunnel = tb_tunnel_alloc_usb3(NULL, up, down, 0, 0);
2414 KUNIT_ASSERT_NOT_NULL(test, usb3_tunnel);
2415 KUNIT_ASSERT_EQ(test, usb3_tunnel->npaths, (size_t)2);
2416
2417 path = usb3_tunnel->paths[0];
2418 KUNIT_ASSERT_EQ(test, path->path_length, 2);
2419 KUNIT_EXPECT_EQ(test, path->hops[0].nfc_credits, 0U);
2420 KUNIT_EXPECT_EQ(test, path->hops[0].initial_credits, 7U);
2421 KUNIT_EXPECT_EQ(test, path->hops[1].nfc_credits, 0U);
2422 KUNIT_EXPECT_EQ(test, path->hops[1].initial_credits, 14U);
2423
2424 path = usb3_tunnel->paths[1];
2425 KUNIT_ASSERT_EQ(test, path->path_length, 2);
2426 KUNIT_EXPECT_EQ(test, path->hops[0].nfc_credits, 0U);
2427 KUNIT_EXPECT_EQ(test, path->hops[0].initial_credits, 7U);
2428 KUNIT_EXPECT_EQ(test, path->hops[1].nfc_credits, 0U);
2429 KUNIT_EXPECT_EQ(test, path->hops[1].initial_credits, 32U);
2430
2431 return usb3_tunnel;
2432}
2433
2434static struct tb_tunnel *TB_TEST_DMA_TUNNEL1(struct kunit *test,
2435 struct tb_switch *host, struct tb_switch *dev)
2436{
2437 struct tb_port *nhi, *port;
2438 struct tb_tunnel *dma_tunnel1;
2439 struct tb_path *path;
2440
2441 nhi = &host->ports[7];
2442 port = &dev->ports[3];
2443 dma_tunnel1 = tb_tunnel_alloc_dma(NULL, nhi, port, 8, 1, 8, 1);
2444 KUNIT_ASSERT_NOT_NULL(test, dma_tunnel1);
2445 KUNIT_ASSERT_EQ(test, dma_tunnel1->npaths, (size_t)2);
2446
2447 path = dma_tunnel1->paths[0];
2448 KUNIT_ASSERT_EQ(test, path->path_length, 2);
2449 KUNIT_EXPECT_EQ(test, path->hops[0].nfc_credits, 0U);
2450 KUNIT_EXPECT_EQ(test, path->hops[0].initial_credits, 14U);
2451 KUNIT_EXPECT_EQ(test, path->hops[1].nfc_credits, 0U);
2452 KUNIT_EXPECT_EQ(test, path->hops[1].initial_credits, 14U);
2453
2454 path = dma_tunnel1->paths[1];
2455 KUNIT_ASSERT_EQ(test, path->path_length, 2);
2456 KUNIT_EXPECT_EQ(test, path->hops[0].nfc_credits, 0U);
2457 KUNIT_EXPECT_EQ(test, path->hops[0].initial_credits, 0U);
2458 KUNIT_EXPECT_EQ(test, path->hops[1].nfc_credits, 0U);
2459 KUNIT_EXPECT_EQ(test, path->hops[1].initial_credits, 14U);
2460
2461 return dma_tunnel1;
2462}
2463
2464static struct tb_tunnel *TB_TEST_DMA_TUNNEL2(struct kunit *test,
2465 struct tb_switch *host, struct tb_switch *dev)
2466{
2467 struct tb_port *nhi, *port;
2468 struct tb_tunnel *dma_tunnel2;
2469 struct tb_path *path;
2470
2471 nhi = &host->ports[7];
2472 port = &dev->ports[3];
2473 dma_tunnel2 = tb_tunnel_alloc_dma(NULL, nhi, port, 9, 2, 9, 2);
2474 KUNIT_ASSERT_NOT_NULL(test, dma_tunnel2);
2475 KUNIT_ASSERT_EQ(test, dma_tunnel2->npaths, (size_t)2);
2476
2477 path = dma_tunnel2->paths[0];
2478 KUNIT_ASSERT_EQ(test, path->path_length, 2);
2479 KUNIT_EXPECT_EQ(test, path->hops[0].nfc_credits, 0U);
2480 KUNIT_EXPECT_EQ(test, path->hops[0].initial_credits, 14U);
2481 KUNIT_EXPECT_EQ(test, path->hops[1].nfc_credits, 0U);
2482 KUNIT_EXPECT_EQ(test, path->hops[1].initial_credits, 1U);
2483
2484 path = dma_tunnel2->paths[1];
2485 KUNIT_ASSERT_EQ(test, path->path_length, 2);
2486 KUNIT_EXPECT_EQ(test, path->hops[0].nfc_credits, 0U);
2487 KUNIT_EXPECT_EQ(test, path->hops[0].initial_credits, 0U);
2488 KUNIT_EXPECT_EQ(test, path->hops[1].nfc_credits, 0U);
2489 KUNIT_EXPECT_EQ(test, path->hops[1].initial_credits, 1U);
2490
2491 return dma_tunnel2;
2492}
2493
2494static void tb_test_credit_alloc_all(struct kunit *test)
2495{
2496 struct tb_tunnel *pcie_tunnel, *dp_tunnel1, *dp_tunnel2, *usb3_tunnel;
2497 struct tb_tunnel *dma_tunnel1, *dma_tunnel2;
2498 struct tb_switch *host, *dev;
2499
2500 /*
2501 * Create PCIe, 2 x DP, USB 3.x and two DMA tunnels from host to
2502 * device. Expectation is that all these can be established with
2503 * the default credit allocation found in Intel hardware.
2504 */
2505
2506 host = alloc_host_usb4(test);
2507 dev = alloc_dev_usb4(test, host, 0x1, true);
2508
2509 pcie_tunnel = TB_TEST_PCIE_TUNNEL(test, host, dev);
2510 dp_tunnel1 = TB_TEST_DP_TUNNEL1(test, host, dev);
2511 dp_tunnel2 = TB_TEST_DP_TUNNEL2(test, host, dev);
2512 usb3_tunnel = TB_TEST_USB3_TUNNEL(test, host, dev);
2513 dma_tunnel1 = TB_TEST_DMA_TUNNEL1(test, host, dev);
2514 dma_tunnel2 = TB_TEST_DMA_TUNNEL2(test, host, dev);
2515
2516 tb_tunnel_free(dma_tunnel2);
2517 tb_tunnel_free(dma_tunnel1);
2518 tb_tunnel_free(usb3_tunnel);
2519 tb_tunnel_free(dp_tunnel2);
2520 tb_tunnel_free(dp_tunnel1);
2521 tb_tunnel_free(pcie_tunnel);
2522}
2523
2524static const u32 root_directory[] = {
2525 0x55584401, /* "UXD" v1 */
2526 0x00000018, /* Root directory length */
2527 0x76656e64, /* "vend" */
2528 0x6f726964, /* "orid" */
2529 0x76000001, /* "v" R 1 */
2530 0x00000a27, /* Immediate value, ! Vendor ID */
2531 0x76656e64, /* "vend" */
2532 0x6f726964, /* "orid" */
2533 0x74000003, /* "t" R 3 */
2534 0x0000001a, /* Text leaf offset, (“Apple Inc.”) */
2535 0x64657669, /* "devi" */
2536 0x63656964, /* "ceid" */
2537 0x76000001, /* "v" R 1 */
2538 0x0000000a, /* Immediate value, ! Device ID */
2539 0x64657669, /* "devi" */
2540 0x63656964, /* "ceid" */
2541 0x74000003, /* "t" R 3 */
2542 0x0000001d, /* Text leaf offset, (“Macintosh”) */
2543 0x64657669, /* "devi" */
2544 0x63657276, /* "cerv" */
2545 0x76000001, /* "v" R 1 */
2546 0x80000100, /* Immediate value, Device Revision */
2547 0x6e657477, /* "netw" */
2548 0x6f726b00, /* "ork" */
2549 0x44000014, /* "D" R 20 */
2550 0x00000021, /* Directory data offset, (Network Directory) */
2551 0x4170706c, /* "Appl" */
2552 0x6520496e, /* "e In" */
2553 0x632e0000, /* "c." ! */
2554 0x4d616369, /* "Maci" */
2555 0x6e746f73, /* "ntos" */
2556 0x68000000, /* "h" */
2557 0x00000000, /* padding */
2558 0xca8961c6, /* Directory UUID, Network Directory */
2559 0x9541ce1c, /* Directory UUID, Network Directory */
2560 0x5949b8bd, /* Directory UUID, Network Directory */
2561 0x4f5a5f2e, /* Directory UUID, Network Directory */
2562 0x70727463, /* "prtc" */
2563 0x69640000, /* "id" */
2564 0x76000001, /* "v" R 1 */
2565 0x00000001, /* Immediate value, Network Protocol ID */
2566 0x70727463, /* "prtc" */
2567 0x76657273, /* "vers" */
2568 0x76000001, /* "v" R 1 */
2569 0x00000001, /* Immediate value, Network Protocol Version */
2570 0x70727463, /* "prtc" */
2571 0x72657673, /* "revs" */
2572 0x76000001, /* "v" R 1 */
2573 0x00000001, /* Immediate value, Network Protocol Revision */
2574 0x70727463, /* "prtc" */
2575 0x73746e73, /* "stns" */
2576 0x76000001, /* "v" R 1 */
2577 0x00000000, /* Immediate value, Network Protocol Settings */
2578};
2579
2580static const uuid_t network_dir_uuid =
2581 UUID_INIT(0xc66189ca, 0x1cce, 0x4195,
2582 0xbd, 0xb8, 0x49, 0x59, 0x2e, 0x5f, 0x5a, 0x4f);
2583
2584static void tb_test_property_parse(struct kunit *test)
2585{
2586 struct tb_property_dir *dir, *network_dir;
2587 struct tb_property *p;
2588
2589 dir = tb_property_parse_dir(root_directory, ARRAY_SIZE(root_directory));
2590 KUNIT_ASSERT_NOT_NULL(test, dir);
2591
2592 p = tb_property_find(dir, "foo", TB_PROPERTY_TYPE_TEXT);
2593 KUNIT_ASSERT_NULL(test, p);
2594
2595 p = tb_property_find(dir, "vendorid", TB_PROPERTY_TYPE_TEXT);
2596 KUNIT_ASSERT_NOT_NULL(test, p);
2597 KUNIT_EXPECT_STREQ(test, p->value.text, "Apple Inc.");
2598
2599 p = tb_property_find(dir, "vendorid", TB_PROPERTY_TYPE_VALUE);
2600 KUNIT_ASSERT_NOT_NULL(test, p);
2601 KUNIT_EXPECT_EQ(test, p->value.immediate, 0xa27);
2602
2603 p = tb_property_find(dir, "deviceid", TB_PROPERTY_TYPE_TEXT);
2604 KUNIT_ASSERT_NOT_NULL(test, p);
2605 KUNIT_EXPECT_STREQ(test, p->value.text, "Macintosh");
2606
2607 p = tb_property_find(dir, "deviceid", TB_PROPERTY_TYPE_VALUE);
2608 KUNIT_ASSERT_NOT_NULL(test, p);
2609 KUNIT_EXPECT_EQ(test, p->value.immediate, 0xa);
2610
2611 p = tb_property_find(dir, "missing", TB_PROPERTY_TYPE_DIRECTORY);
2612 KUNIT_ASSERT_NULL(test, p);
2613
2614 p = tb_property_find(dir, "network", TB_PROPERTY_TYPE_DIRECTORY);
2615 KUNIT_ASSERT_NOT_NULL(test, p);
2616
2617 network_dir = p->value.dir;
2618 KUNIT_EXPECT_TRUE(test, uuid_equal(network_dir->uuid, &network_dir_uuid));
2619
2620 p = tb_property_find(network_dir, "prtcid", TB_PROPERTY_TYPE_VALUE);
2621 KUNIT_ASSERT_NOT_NULL(test, p);
2622 KUNIT_EXPECT_EQ(test, p->value.immediate, 0x1);
2623
2624 p = tb_property_find(network_dir, "prtcvers", TB_PROPERTY_TYPE_VALUE);
2625 KUNIT_ASSERT_NOT_NULL(test, p);
2626 KUNIT_EXPECT_EQ(test, p->value.immediate, 0x1);
2627
2628 p = tb_property_find(network_dir, "prtcrevs", TB_PROPERTY_TYPE_VALUE);
2629 KUNIT_ASSERT_NOT_NULL(test, p);
2630 KUNIT_EXPECT_EQ(test, p->value.immediate, 0x1);
2631
2632 p = tb_property_find(network_dir, "prtcstns", TB_PROPERTY_TYPE_VALUE);
2633 KUNIT_ASSERT_NOT_NULL(test, p);
2634 KUNIT_EXPECT_EQ(test, p->value.immediate, 0x0);
2635
2636 p = tb_property_find(network_dir, "deviceid", TB_PROPERTY_TYPE_VALUE);
2637 KUNIT_EXPECT_TRUE(test, !p);
2638 p = tb_property_find(network_dir, "deviceid", TB_PROPERTY_TYPE_TEXT);
2639 KUNIT_EXPECT_TRUE(test, !p);
2640
2641 tb_property_free_dir(dir);
2642}
2643
2644static void tb_test_property_format(struct kunit *test)
2645{
2646 struct tb_property_dir *dir;
2647 ssize_t block_len;
2648 u32 *block;
2649 int ret, i;
2650
2651 dir = tb_property_parse_dir(root_directory, ARRAY_SIZE(root_directory));
2652 KUNIT_ASSERT_NOT_NULL(test, dir);
2653
2654 ret = tb_property_format_dir(dir, NULL, 0);
2655 KUNIT_ASSERT_EQ(test, ret, ARRAY_SIZE(root_directory));
2656
2657 block_len = ret;
2658
2659 block = kunit_kzalloc(test, block_len * sizeof(u32), GFP_KERNEL);
2660 KUNIT_ASSERT_NOT_NULL(test, block);
2661
2662 ret = tb_property_format_dir(dir, block, block_len);
2663 KUNIT_EXPECT_EQ(test, ret, 0);
2664
2665 for (i = 0; i < ARRAY_SIZE(root_directory); i++)
2666 KUNIT_EXPECT_EQ(test, root_directory[i], block[i]);
2667
2668 tb_property_free_dir(dir);
2669}
2670
2671static void compare_dirs(struct kunit *test, struct tb_property_dir *d1,
2672 struct tb_property_dir *d2)
2673{
2674 struct tb_property *p1, *p2, *tmp;
2675 int n1, n2, i;
2676
2677 if (d1->uuid) {
2678 KUNIT_ASSERT_NOT_NULL(test, d2->uuid);
2679 KUNIT_ASSERT_TRUE(test, uuid_equal(d1->uuid, d2->uuid));
2680 } else {
2681 KUNIT_ASSERT_NULL(test, d2->uuid);
2682 }
2683
2684 n1 = 0;
2685 tb_property_for_each(d1, tmp)
2686 n1++;
2687 KUNIT_ASSERT_NE(test, n1, 0);
2688
2689 n2 = 0;
2690 tb_property_for_each(d2, tmp)
2691 n2++;
2692 KUNIT_ASSERT_NE(test, n2, 0);
2693
2694 KUNIT_ASSERT_EQ(test, n1, n2);
2695
2696 p1 = NULL;
2697 p2 = NULL;
2698 for (i = 0; i < n1; i++) {
2699 p1 = tb_property_get_next(d1, p1);
2700 KUNIT_ASSERT_NOT_NULL(test, p1);
2701 p2 = tb_property_get_next(d2, p2);
2702 KUNIT_ASSERT_NOT_NULL(test, p2);
2703
2704 KUNIT_ASSERT_STREQ(test, &p1->key[0], &p2->key[0]);
2705 KUNIT_ASSERT_EQ(test, p1->type, p2->type);
2706 KUNIT_ASSERT_EQ(test, p1->length, p2->length);
2707
2708 switch (p1->type) {
2709 case TB_PROPERTY_TYPE_DIRECTORY:
2710 KUNIT_ASSERT_NOT_NULL(test, p1->value.dir);
2711 KUNIT_ASSERT_NOT_NULL(test, p2->value.dir);
2712 compare_dirs(test, p1->value.dir, p2->value.dir);
2713 break;
2714
2715 case TB_PROPERTY_TYPE_DATA:
2716 KUNIT_ASSERT_NOT_NULL(test, p1->value.data);
2717 KUNIT_ASSERT_NOT_NULL(test, p2->value.data);
2718 KUNIT_ASSERT_TRUE(test,
2719 !memcmp(p1->value.data, p2->value.data,
2720 p1->length * 4)
2721 );
2722 break;
2723
2724 case TB_PROPERTY_TYPE_TEXT:
2725 KUNIT_ASSERT_NOT_NULL(test, p1->value.text);
2726 KUNIT_ASSERT_NOT_NULL(test, p2->value.text);
2727 KUNIT_ASSERT_STREQ(test, p1->value.text, p2->value.text);
2728 break;
2729
2730 case TB_PROPERTY_TYPE_VALUE:
2731 KUNIT_ASSERT_EQ(test, p1->value.immediate,
2732 p2->value.immediate);
2733 break;
2734 default:
2735 KUNIT_FAIL(test, "unexpected property type");
2736 break;
2737 }
2738 }
2739}
2740
2741static void tb_test_property_copy(struct kunit *test)
2742{
2743 struct tb_property_dir *src, *dst;
2744 u32 *block;
2745 int ret, i;
2746
2747 src = tb_property_parse_dir(root_directory, ARRAY_SIZE(root_directory));
2748 KUNIT_ASSERT_NOT_NULL(test, src);
2749
2750 dst = tb_property_copy_dir(src);
2751 KUNIT_ASSERT_NOT_NULL(test, dst);
2752
2753 /* Compare the structures */
2754 compare_dirs(test, src, dst);
2755
2756 /* Compare the resulting property block */
2757 ret = tb_property_format_dir(dst, NULL, 0);
2758 KUNIT_ASSERT_EQ(test, ret, ARRAY_SIZE(root_directory));
2759
2760 block = kunit_kzalloc(test, sizeof(root_directory), GFP_KERNEL);
2761 KUNIT_ASSERT_NOT_NULL(test, block);
2762
2763 ret = tb_property_format_dir(dst, block, ARRAY_SIZE(root_directory));
2764 KUNIT_EXPECT_TRUE(test, !ret);
2765
2766 for (i = 0; i < ARRAY_SIZE(root_directory); i++)
2767 KUNIT_EXPECT_EQ(test, root_directory[i], block[i]);
2768
2769 tb_property_free_dir(dst);
2770 tb_property_free_dir(src);
2771}
2772
2773static struct kunit_case tb_test_cases[] = {
2774 KUNIT_CASE(tb_test_path_basic),
2775 KUNIT_CASE(tb_test_path_not_connected_walk),
2776 KUNIT_CASE(tb_test_path_single_hop_walk),
2777 KUNIT_CASE(tb_test_path_daisy_chain_walk),
2778 KUNIT_CASE(tb_test_path_simple_tree_walk),
2779 KUNIT_CASE(tb_test_path_complex_tree_walk),
2780 KUNIT_CASE(tb_test_path_max_length_walk),
2781 KUNIT_CASE(tb_test_path_not_connected),
2782 KUNIT_CASE(tb_test_path_not_bonded_lane0),
2783 KUNIT_CASE(tb_test_path_not_bonded_lane1),
2784 KUNIT_CASE(tb_test_path_not_bonded_lane1_chain),
2785 KUNIT_CASE(tb_test_path_not_bonded_lane1_chain_reverse),
2786 KUNIT_CASE(tb_test_path_mixed_chain),
2787 KUNIT_CASE(tb_test_path_mixed_chain_reverse),
2788 KUNIT_CASE(tb_test_tunnel_pcie),
2789 KUNIT_CASE(tb_test_tunnel_dp),
2790 KUNIT_CASE(tb_test_tunnel_dp_chain),
2791 KUNIT_CASE(tb_test_tunnel_dp_tree),
2792 KUNIT_CASE(tb_test_tunnel_dp_max_length),
2793 KUNIT_CASE(tb_test_tunnel_port_on_path),
2794 KUNIT_CASE(tb_test_tunnel_usb3),
2795 KUNIT_CASE(tb_test_tunnel_dma),
2796 KUNIT_CASE(tb_test_tunnel_dma_rx),
2797 KUNIT_CASE(tb_test_tunnel_dma_tx),
2798 KUNIT_CASE(tb_test_tunnel_dma_chain),
2799 KUNIT_CASE(tb_test_tunnel_dma_match),
2800 KUNIT_CASE(tb_test_credit_alloc_legacy_not_bonded),
2801 KUNIT_CASE(tb_test_credit_alloc_legacy_bonded),
2802 KUNIT_CASE(tb_test_credit_alloc_pcie),
2803 KUNIT_CASE(tb_test_credit_alloc_without_dp),
2804 KUNIT_CASE(tb_test_credit_alloc_dp),
2805 KUNIT_CASE(tb_test_credit_alloc_usb3),
2806 KUNIT_CASE(tb_test_credit_alloc_dma),
2807 KUNIT_CASE(tb_test_credit_alloc_dma_multiple),
2808 KUNIT_CASE(tb_test_credit_alloc_all),
2809 KUNIT_CASE(tb_test_property_parse),
2810 KUNIT_CASE(tb_test_property_format),
2811 KUNIT_CASE(tb_test_property_copy),
2812 { }
2813};
2814
2815static struct kunit_suite tb_test_suite = {
2816 .name = "thunderbolt",
2817 .test_cases = tb_test_cases,
2818};
2819
2820kunit_test_suite(tb_test_suite);