Loading...
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Thunderbolt driver - switch/port utility functions
4 *
5 * Copyright (c) 2014 Andreas Noever <andreas.noever@gmail.com>
6 * Copyright (C) 2018, Intel Corporation
7 */
8
9#include <linux/delay.h>
10#include <linux/idr.h>
11#include <linux/module.h>
12#include <linux/nvmem-provider.h>
13#include <linux/pm_runtime.h>
14#include <linux/sched/signal.h>
15#include <linux/sizes.h>
16#include <linux/slab.h>
17#include <linux/string_helpers.h>
18
19#include "tb.h"
20
21/* Switch NVM support */
22
23struct nvm_auth_status {
24 struct list_head list;
25 uuid_t uuid;
26 u32 status;
27};
28
29/*
30 * Hold NVM authentication failure status per switch This information
31 * needs to stay around even when the switch gets power cycled so we
32 * keep it separately.
33 */
34static LIST_HEAD(nvm_auth_status_cache);
35static DEFINE_MUTEX(nvm_auth_status_lock);
36
37static struct nvm_auth_status *__nvm_get_auth_status(const struct tb_switch *sw)
38{
39 struct nvm_auth_status *st;
40
41 list_for_each_entry(st, &nvm_auth_status_cache, list) {
42 if (uuid_equal(&st->uuid, sw->uuid))
43 return st;
44 }
45
46 return NULL;
47}
48
49static void nvm_get_auth_status(const struct tb_switch *sw, u32 *status)
50{
51 struct nvm_auth_status *st;
52
53 mutex_lock(&nvm_auth_status_lock);
54 st = __nvm_get_auth_status(sw);
55 mutex_unlock(&nvm_auth_status_lock);
56
57 *status = st ? st->status : 0;
58}
59
60static void nvm_set_auth_status(const struct tb_switch *sw, u32 status)
61{
62 struct nvm_auth_status *st;
63
64 if (WARN_ON(!sw->uuid))
65 return;
66
67 mutex_lock(&nvm_auth_status_lock);
68 st = __nvm_get_auth_status(sw);
69
70 if (!st) {
71 st = kzalloc(sizeof(*st), GFP_KERNEL);
72 if (!st)
73 goto unlock;
74
75 memcpy(&st->uuid, sw->uuid, sizeof(st->uuid));
76 INIT_LIST_HEAD(&st->list);
77 list_add_tail(&st->list, &nvm_auth_status_cache);
78 }
79
80 st->status = status;
81unlock:
82 mutex_unlock(&nvm_auth_status_lock);
83}
84
85static void nvm_clear_auth_status(const struct tb_switch *sw)
86{
87 struct nvm_auth_status *st;
88
89 mutex_lock(&nvm_auth_status_lock);
90 st = __nvm_get_auth_status(sw);
91 if (st) {
92 list_del(&st->list);
93 kfree(st);
94 }
95 mutex_unlock(&nvm_auth_status_lock);
96}
97
98static int nvm_validate_and_write(struct tb_switch *sw)
99{
100 unsigned int image_size;
101 const u8 *buf;
102 int ret;
103
104 ret = tb_nvm_validate(sw->nvm);
105 if (ret)
106 return ret;
107
108 ret = tb_nvm_write_headers(sw->nvm);
109 if (ret)
110 return ret;
111
112 buf = sw->nvm->buf_data_start;
113 image_size = sw->nvm->buf_data_size;
114
115 if (tb_switch_is_usb4(sw))
116 ret = usb4_switch_nvm_write(sw, 0, buf, image_size);
117 else
118 ret = dma_port_flash_write(sw->dma_port, 0, buf, image_size);
119 if (ret)
120 return ret;
121
122 sw->nvm->flushed = true;
123 return 0;
124}
125
126static int nvm_authenticate_host_dma_port(struct tb_switch *sw)
127{
128 int ret = 0;
129
130 /*
131 * Root switch NVM upgrade requires that we disconnect the
132 * existing paths first (in case it is not in safe mode
133 * already).
134 */
135 if (!sw->safe_mode) {
136 u32 status;
137
138 ret = tb_domain_disconnect_all_paths(sw->tb);
139 if (ret)
140 return ret;
141 /*
142 * The host controller goes away pretty soon after this if
143 * everything goes well so getting timeout is expected.
144 */
145 ret = dma_port_flash_update_auth(sw->dma_port);
146 if (!ret || ret == -ETIMEDOUT)
147 return 0;
148
149 /*
150 * Any error from update auth operation requires power
151 * cycling of the host router.
152 */
153 tb_sw_warn(sw, "failed to authenticate NVM, power cycling\n");
154 if (dma_port_flash_update_auth_status(sw->dma_port, &status) > 0)
155 nvm_set_auth_status(sw, status);
156 }
157
158 /*
159 * From safe mode we can get out by just power cycling the
160 * switch.
161 */
162 dma_port_power_cycle(sw->dma_port);
163 return ret;
164}
165
166static int nvm_authenticate_device_dma_port(struct tb_switch *sw)
167{
168 int ret, retries = 10;
169
170 ret = dma_port_flash_update_auth(sw->dma_port);
171 switch (ret) {
172 case 0:
173 case -ETIMEDOUT:
174 case -EACCES:
175 case -EINVAL:
176 /* Power cycle is required */
177 break;
178 default:
179 return ret;
180 }
181
182 /*
183 * Poll here for the authentication status. It takes some time
184 * for the device to respond (we get timeout for a while). Once
185 * we get response the device needs to be power cycled in order
186 * to the new NVM to be taken into use.
187 */
188 do {
189 u32 status;
190
191 ret = dma_port_flash_update_auth_status(sw->dma_port, &status);
192 if (ret < 0 && ret != -ETIMEDOUT)
193 return ret;
194 if (ret > 0) {
195 if (status) {
196 tb_sw_warn(sw, "failed to authenticate NVM\n");
197 nvm_set_auth_status(sw, status);
198 }
199
200 tb_sw_info(sw, "power cycling the switch now\n");
201 dma_port_power_cycle(sw->dma_port);
202 return 0;
203 }
204
205 msleep(500);
206 } while (--retries);
207
208 return -ETIMEDOUT;
209}
210
211static void nvm_authenticate_start_dma_port(struct tb_switch *sw)
212{
213 struct pci_dev *root_port;
214
215 /*
216 * During host router NVM upgrade we should not allow root port to
217 * go into D3cold because some root ports cannot trigger PME
218 * itself. To be on the safe side keep the root port in D0 during
219 * the whole upgrade process.
220 */
221 root_port = pcie_find_root_port(sw->tb->nhi->pdev);
222 if (root_port)
223 pm_runtime_get_noresume(&root_port->dev);
224}
225
226static void nvm_authenticate_complete_dma_port(struct tb_switch *sw)
227{
228 struct pci_dev *root_port;
229
230 root_port = pcie_find_root_port(sw->tb->nhi->pdev);
231 if (root_port)
232 pm_runtime_put(&root_port->dev);
233}
234
235static inline bool nvm_readable(struct tb_switch *sw)
236{
237 if (tb_switch_is_usb4(sw)) {
238 /*
239 * USB4 devices must support NVM operations but it is
240 * optional for hosts. Therefore we query the NVM sector
241 * size here and if it is supported assume NVM
242 * operations are implemented.
243 */
244 return usb4_switch_nvm_sector_size(sw) > 0;
245 }
246
247 /* Thunderbolt 2 and 3 devices support NVM through DMA port */
248 return !!sw->dma_port;
249}
250
251static inline bool nvm_upgradeable(struct tb_switch *sw)
252{
253 if (sw->no_nvm_upgrade)
254 return false;
255 return nvm_readable(sw);
256}
257
258static int nvm_authenticate(struct tb_switch *sw, bool auth_only)
259{
260 int ret;
261
262 if (tb_switch_is_usb4(sw)) {
263 if (auth_only) {
264 ret = usb4_switch_nvm_set_offset(sw, 0);
265 if (ret)
266 return ret;
267 }
268 sw->nvm->authenticating = true;
269 return usb4_switch_nvm_authenticate(sw);
270 }
271 if (auth_only)
272 return -EOPNOTSUPP;
273
274 sw->nvm->authenticating = true;
275 if (!tb_route(sw)) {
276 nvm_authenticate_start_dma_port(sw);
277 ret = nvm_authenticate_host_dma_port(sw);
278 } else {
279 ret = nvm_authenticate_device_dma_port(sw);
280 }
281
282 return ret;
283}
284
285/**
286 * tb_switch_nvm_read() - Read router NVM
287 * @sw: Router whose NVM to read
288 * @address: Start address on the NVM
289 * @buf: Buffer where the read data is copied
290 * @size: Size of the buffer in bytes
291 *
292 * Reads from router NVM and returns the requested data in @buf. Locking
293 * is up to the caller. Returns %0 in success and negative errno in case
294 * of failure.
295 */
296int tb_switch_nvm_read(struct tb_switch *sw, unsigned int address, void *buf,
297 size_t size)
298{
299 if (tb_switch_is_usb4(sw))
300 return usb4_switch_nvm_read(sw, address, buf, size);
301 return dma_port_flash_read(sw->dma_port, address, buf, size);
302}
303
304static int nvm_read(void *priv, unsigned int offset, void *val, size_t bytes)
305{
306 struct tb_nvm *nvm = priv;
307 struct tb_switch *sw = tb_to_switch(nvm->dev);
308 int ret;
309
310 pm_runtime_get_sync(&sw->dev);
311
312 if (!mutex_trylock(&sw->tb->lock)) {
313 ret = restart_syscall();
314 goto out;
315 }
316
317 ret = tb_switch_nvm_read(sw, offset, val, bytes);
318 mutex_unlock(&sw->tb->lock);
319
320out:
321 pm_runtime_mark_last_busy(&sw->dev);
322 pm_runtime_put_autosuspend(&sw->dev);
323
324 return ret;
325}
326
327static int nvm_write(void *priv, unsigned int offset, void *val, size_t bytes)
328{
329 struct tb_nvm *nvm = priv;
330 struct tb_switch *sw = tb_to_switch(nvm->dev);
331 int ret;
332
333 if (!mutex_trylock(&sw->tb->lock))
334 return restart_syscall();
335
336 /*
337 * Since writing the NVM image might require some special steps,
338 * for example when CSS headers are written, we cache the image
339 * locally here and handle the special cases when the user asks
340 * us to authenticate the image.
341 */
342 ret = tb_nvm_write_buf(nvm, offset, val, bytes);
343 mutex_unlock(&sw->tb->lock);
344
345 return ret;
346}
347
348static int tb_switch_nvm_add(struct tb_switch *sw)
349{
350 struct tb_nvm *nvm;
351 int ret;
352
353 if (!nvm_readable(sw))
354 return 0;
355
356 nvm = tb_nvm_alloc(&sw->dev);
357 if (IS_ERR(nvm)) {
358 ret = PTR_ERR(nvm) == -EOPNOTSUPP ? 0 : PTR_ERR(nvm);
359 goto err_nvm;
360 }
361
362 ret = tb_nvm_read_version(nvm);
363 if (ret)
364 goto err_nvm;
365
366 /*
367 * If the switch is in safe-mode the only accessible portion of
368 * the NVM is the non-active one where userspace is expected to
369 * write new functional NVM.
370 */
371 if (!sw->safe_mode) {
372 ret = tb_nvm_add_active(nvm, nvm_read);
373 if (ret)
374 goto err_nvm;
375 tb_sw_dbg(sw, "NVM version %x.%x\n", nvm->major, nvm->minor);
376 }
377
378 if (!sw->no_nvm_upgrade) {
379 ret = tb_nvm_add_non_active(nvm, nvm_write);
380 if (ret)
381 goto err_nvm;
382 }
383
384 sw->nvm = nvm;
385 return 0;
386
387err_nvm:
388 tb_sw_dbg(sw, "NVM upgrade disabled\n");
389 sw->no_nvm_upgrade = true;
390 if (!IS_ERR(nvm))
391 tb_nvm_free(nvm);
392
393 return ret;
394}
395
396static void tb_switch_nvm_remove(struct tb_switch *sw)
397{
398 struct tb_nvm *nvm;
399
400 nvm = sw->nvm;
401 sw->nvm = NULL;
402
403 if (!nvm)
404 return;
405
406 /* Remove authentication status in case the switch is unplugged */
407 if (!nvm->authenticating)
408 nvm_clear_auth_status(sw);
409
410 tb_nvm_free(nvm);
411}
412
413/* port utility functions */
414
415static const char *tb_port_type(const struct tb_regs_port_header *port)
416{
417 switch (port->type >> 16) {
418 case 0:
419 switch ((u8) port->type) {
420 case 0:
421 return "Inactive";
422 case 1:
423 return "Port";
424 case 2:
425 return "NHI";
426 default:
427 return "unknown";
428 }
429 case 0x2:
430 return "Ethernet";
431 case 0x8:
432 return "SATA";
433 case 0xe:
434 return "DP/HDMI";
435 case 0x10:
436 return "PCIe";
437 case 0x20:
438 return "USB";
439 default:
440 return "unknown";
441 }
442}
443
444static void tb_dump_port(struct tb *tb, const struct tb_port *port)
445{
446 const struct tb_regs_port_header *regs = &port->config;
447
448 tb_dbg(tb,
449 " Port %d: %x:%x (Revision: %d, TB Version: %d, Type: %s (%#x))\n",
450 regs->port_number, regs->vendor_id, regs->device_id,
451 regs->revision, regs->thunderbolt_version, tb_port_type(regs),
452 regs->type);
453 tb_dbg(tb, " Max hop id (in/out): %d/%d\n",
454 regs->max_in_hop_id, regs->max_out_hop_id);
455 tb_dbg(tb, " Max counters: %d\n", regs->max_counters);
456 tb_dbg(tb, " NFC Credits: %#x\n", regs->nfc_credits);
457 tb_dbg(tb, " Credits (total/control): %u/%u\n", port->total_credits,
458 port->ctl_credits);
459}
460
461/**
462 * tb_port_state() - get connectedness state of a port
463 * @port: the port to check
464 *
465 * The port must have a TB_CAP_PHY (i.e. it should be a real port).
466 *
467 * Return: Returns an enum tb_port_state on success or an error code on failure.
468 */
469int tb_port_state(struct tb_port *port)
470{
471 struct tb_cap_phy phy;
472 int res;
473 if (port->cap_phy == 0) {
474 tb_port_WARN(port, "does not have a PHY\n");
475 return -EINVAL;
476 }
477 res = tb_port_read(port, &phy, TB_CFG_PORT, port->cap_phy, 2);
478 if (res)
479 return res;
480 return phy.state;
481}
482
483/**
484 * tb_wait_for_port() - wait for a port to become ready
485 * @port: Port to wait
486 * @wait_if_unplugged: Wait also when port is unplugged
487 *
488 * Wait up to 1 second for a port to reach state TB_PORT_UP. If
489 * wait_if_unplugged is set then we also wait if the port is in state
490 * TB_PORT_UNPLUGGED (it takes a while for the device to be registered after
491 * switch resume). Otherwise we only wait if a device is registered but the link
492 * has not yet been established.
493 *
494 * Return: Returns an error code on failure. Returns 0 if the port is not
495 * connected or failed to reach state TB_PORT_UP within one second. Returns 1
496 * if the port is connected and in state TB_PORT_UP.
497 */
498int tb_wait_for_port(struct tb_port *port, bool wait_if_unplugged)
499{
500 int retries = 10;
501 int state;
502 if (!port->cap_phy) {
503 tb_port_WARN(port, "does not have PHY\n");
504 return -EINVAL;
505 }
506 if (tb_is_upstream_port(port)) {
507 tb_port_WARN(port, "is the upstream port\n");
508 return -EINVAL;
509 }
510
511 while (retries--) {
512 state = tb_port_state(port);
513 switch (state) {
514 case TB_PORT_DISABLED:
515 tb_port_dbg(port, "is disabled (state: 0)\n");
516 return 0;
517
518 case TB_PORT_UNPLUGGED:
519 if (wait_if_unplugged) {
520 /* used during resume */
521 tb_port_dbg(port,
522 "is unplugged (state: 7), retrying...\n");
523 msleep(100);
524 break;
525 }
526 tb_port_dbg(port, "is unplugged (state: 7)\n");
527 return 0;
528
529 case TB_PORT_UP:
530 case TB_PORT_TX_CL0S:
531 case TB_PORT_RX_CL0S:
532 case TB_PORT_CL1:
533 case TB_PORT_CL2:
534 tb_port_dbg(port, "is connected, link is up (state: %d)\n", state);
535 return 1;
536
537 default:
538 if (state < 0)
539 return state;
540
541 /*
542 * After plug-in the state is TB_PORT_CONNECTING. Give it some
543 * time.
544 */
545 tb_port_dbg(port,
546 "is connected, link is not up (state: %d), retrying...\n",
547 state);
548 msleep(100);
549 }
550
551 }
552 tb_port_warn(port,
553 "failed to reach state TB_PORT_UP. Ignoring port...\n");
554 return 0;
555}
556
557/**
558 * tb_port_add_nfc_credits() - add/remove non flow controlled credits to port
559 * @port: Port to add/remove NFC credits
560 * @credits: Credits to add/remove
561 *
562 * Change the number of NFC credits allocated to @port by @credits. To remove
563 * NFC credits pass a negative amount of credits.
564 *
565 * Return: Returns 0 on success or an error code on failure.
566 */
567int tb_port_add_nfc_credits(struct tb_port *port, int credits)
568{
569 u32 nfc_credits;
570
571 if (credits == 0 || port->sw->is_unplugged)
572 return 0;
573
574 /*
575 * USB4 restricts programming NFC buffers to lane adapters only
576 * so skip other ports.
577 */
578 if (tb_switch_is_usb4(port->sw) && !tb_port_is_null(port))
579 return 0;
580
581 nfc_credits = port->config.nfc_credits & ADP_CS_4_NFC_BUFFERS_MASK;
582 if (credits < 0)
583 credits = max_t(int, -nfc_credits, credits);
584
585 nfc_credits += credits;
586
587 tb_port_dbg(port, "adding %d NFC credits to %lu", credits,
588 port->config.nfc_credits & ADP_CS_4_NFC_BUFFERS_MASK);
589
590 port->config.nfc_credits &= ~ADP_CS_4_NFC_BUFFERS_MASK;
591 port->config.nfc_credits |= nfc_credits;
592
593 return tb_port_write(port, &port->config.nfc_credits,
594 TB_CFG_PORT, ADP_CS_4, 1);
595}
596
597/**
598 * tb_port_clear_counter() - clear a counter in TB_CFG_COUNTER
599 * @port: Port whose counters to clear
600 * @counter: Counter index to clear
601 *
602 * Return: Returns 0 on success or an error code on failure.
603 */
604int tb_port_clear_counter(struct tb_port *port, int counter)
605{
606 u32 zero[3] = { 0, 0, 0 };
607 tb_port_dbg(port, "clearing counter %d\n", counter);
608 return tb_port_write(port, zero, TB_CFG_COUNTERS, 3 * counter, 3);
609}
610
611/**
612 * tb_port_unlock() - Unlock downstream port
613 * @port: Port to unlock
614 *
615 * Needed for USB4 but can be called for any CIO/USB4 ports. Makes the
616 * downstream router accessible for CM.
617 */
618int tb_port_unlock(struct tb_port *port)
619{
620 if (tb_switch_is_icm(port->sw))
621 return 0;
622 if (!tb_port_is_null(port))
623 return -EINVAL;
624 if (tb_switch_is_usb4(port->sw))
625 return usb4_port_unlock(port);
626 return 0;
627}
628
629static int __tb_port_enable(struct tb_port *port, bool enable)
630{
631 int ret;
632 u32 phy;
633
634 if (!tb_port_is_null(port))
635 return -EINVAL;
636
637 ret = tb_port_read(port, &phy, TB_CFG_PORT,
638 port->cap_phy + LANE_ADP_CS_1, 1);
639 if (ret)
640 return ret;
641
642 if (enable)
643 phy &= ~LANE_ADP_CS_1_LD;
644 else
645 phy |= LANE_ADP_CS_1_LD;
646
647
648 ret = tb_port_write(port, &phy, TB_CFG_PORT,
649 port->cap_phy + LANE_ADP_CS_1, 1);
650 if (ret)
651 return ret;
652
653 tb_port_dbg(port, "lane %s\n", str_enabled_disabled(enable));
654 return 0;
655}
656
657/**
658 * tb_port_enable() - Enable lane adapter
659 * @port: Port to enable (can be %NULL)
660 *
661 * This is used for lane 0 and 1 adapters to enable it.
662 */
663int tb_port_enable(struct tb_port *port)
664{
665 return __tb_port_enable(port, true);
666}
667
668/**
669 * tb_port_disable() - Disable lane adapter
670 * @port: Port to disable (can be %NULL)
671 *
672 * This is used for lane 0 and 1 adapters to disable it.
673 */
674int tb_port_disable(struct tb_port *port)
675{
676 return __tb_port_enable(port, false);
677}
678
679/*
680 * tb_init_port() - initialize a port
681 *
682 * This is a helper method for tb_switch_alloc. Does not check or initialize
683 * any downstream switches.
684 *
685 * Return: Returns 0 on success or an error code on failure.
686 */
687static int tb_init_port(struct tb_port *port)
688{
689 int res;
690 int cap;
691
692 INIT_LIST_HEAD(&port->list);
693
694 /* Control adapter does not have configuration space */
695 if (!port->port)
696 return 0;
697
698 res = tb_port_read(port, &port->config, TB_CFG_PORT, 0, 8);
699 if (res) {
700 if (res == -ENODEV) {
701 tb_dbg(port->sw->tb, " Port %d: not implemented\n",
702 port->port);
703 port->disabled = true;
704 return 0;
705 }
706 return res;
707 }
708
709 /* Port 0 is the switch itself and has no PHY. */
710 if (port->config.type == TB_TYPE_PORT) {
711 cap = tb_port_find_cap(port, TB_PORT_CAP_PHY);
712
713 if (cap > 0)
714 port->cap_phy = cap;
715 else
716 tb_port_WARN(port, "non switch port without a PHY\n");
717
718 cap = tb_port_find_cap(port, TB_PORT_CAP_USB4);
719 if (cap > 0)
720 port->cap_usb4 = cap;
721
722 /*
723 * USB4 ports the buffers allocated for the control path
724 * can be read from the path config space. Legacy
725 * devices we use hard-coded value.
726 */
727 if (port->cap_usb4) {
728 struct tb_regs_hop hop;
729
730 if (!tb_port_read(port, &hop, TB_CFG_HOPS, 0, 2))
731 port->ctl_credits = hop.initial_credits;
732 }
733 if (!port->ctl_credits)
734 port->ctl_credits = 2;
735
736 } else {
737 cap = tb_port_find_cap(port, TB_PORT_CAP_ADAP);
738 if (cap > 0)
739 port->cap_adap = cap;
740 }
741
742 port->total_credits =
743 (port->config.nfc_credits & ADP_CS_4_TOTAL_BUFFERS_MASK) >>
744 ADP_CS_4_TOTAL_BUFFERS_SHIFT;
745
746 tb_dump_port(port->sw->tb, port);
747 return 0;
748}
749
750static int tb_port_alloc_hopid(struct tb_port *port, bool in, int min_hopid,
751 int max_hopid)
752{
753 int port_max_hopid;
754 struct ida *ida;
755
756 if (in) {
757 port_max_hopid = port->config.max_in_hop_id;
758 ida = &port->in_hopids;
759 } else {
760 port_max_hopid = port->config.max_out_hop_id;
761 ida = &port->out_hopids;
762 }
763
764 /*
765 * NHI can use HopIDs 1-max for other adapters HopIDs 0-7 are
766 * reserved.
767 */
768 if (!tb_port_is_nhi(port) && min_hopid < TB_PATH_MIN_HOPID)
769 min_hopid = TB_PATH_MIN_HOPID;
770
771 if (max_hopid < 0 || max_hopid > port_max_hopid)
772 max_hopid = port_max_hopid;
773
774 return ida_simple_get(ida, min_hopid, max_hopid + 1, GFP_KERNEL);
775}
776
777/**
778 * tb_port_alloc_in_hopid() - Allocate input HopID from port
779 * @port: Port to allocate HopID for
780 * @min_hopid: Minimum acceptable input HopID
781 * @max_hopid: Maximum acceptable input HopID
782 *
783 * Return: HopID between @min_hopid and @max_hopid or negative errno in
784 * case of error.
785 */
786int tb_port_alloc_in_hopid(struct tb_port *port, int min_hopid, int max_hopid)
787{
788 return tb_port_alloc_hopid(port, true, min_hopid, max_hopid);
789}
790
791/**
792 * tb_port_alloc_out_hopid() - Allocate output HopID from port
793 * @port: Port to allocate HopID for
794 * @min_hopid: Minimum acceptable output HopID
795 * @max_hopid: Maximum acceptable output HopID
796 *
797 * Return: HopID between @min_hopid and @max_hopid or negative errno in
798 * case of error.
799 */
800int tb_port_alloc_out_hopid(struct tb_port *port, int min_hopid, int max_hopid)
801{
802 return tb_port_alloc_hopid(port, false, min_hopid, max_hopid);
803}
804
805/**
806 * tb_port_release_in_hopid() - Release allocated input HopID from port
807 * @port: Port whose HopID to release
808 * @hopid: HopID to release
809 */
810void tb_port_release_in_hopid(struct tb_port *port, int hopid)
811{
812 ida_simple_remove(&port->in_hopids, hopid);
813}
814
815/**
816 * tb_port_release_out_hopid() - Release allocated output HopID from port
817 * @port: Port whose HopID to release
818 * @hopid: HopID to release
819 */
820void tb_port_release_out_hopid(struct tb_port *port, int hopid)
821{
822 ida_simple_remove(&port->out_hopids, hopid);
823}
824
825static inline bool tb_switch_is_reachable(const struct tb_switch *parent,
826 const struct tb_switch *sw)
827{
828 u64 mask = (1ULL << parent->config.depth * 8) - 1;
829 return (tb_route(parent) & mask) == (tb_route(sw) & mask);
830}
831
832/**
833 * tb_next_port_on_path() - Return next port for given port on a path
834 * @start: Start port of the walk
835 * @end: End port of the walk
836 * @prev: Previous port (%NULL if this is the first)
837 *
838 * This function can be used to walk from one port to another if they
839 * are connected through zero or more switches. If the @prev is dual
840 * link port, the function follows that link and returns another end on
841 * that same link.
842 *
843 * If the @end port has been reached, return %NULL.
844 *
845 * Domain tb->lock must be held when this function is called.
846 */
847struct tb_port *tb_next_port_on_path(struct tb_port *start, struct tb_port *end,
848 struct tb_port *prev)
849{
850 struct tb_port *next;
851
852 if (!prev)
853 return start;
854
855 if (prev->sw == end->sw) {
856 if (prev == end)
857 return NULL;
858 return end;
859 }
860
861 if (tb_switch_is_reachable(prev->sw, end->sw)) {
862 next = tb_port_at(tb_route(end->sw), prev->sw);
863 /* Walk down the topology if next == prev */
864 if (prev->remote &&
865 (next == prev || next->dual_link_port == prev))
866 next = prev->remote;
867 } else {
868 if (tb_is_upstream_port(prev)) {
869 next = prev->remote;
870 } else {
871 next = tb_upstream_port(prev->sw);
872 /*
873 * Keep the same link if prev and next are both
874 * dual link ports.
875 */
876 if (next->dual_link_port &&
877 next->link_nr != prev->link_nr) {
878 next = next->dual_link_port;
879 }
880 }
881 }
882
883 return next != prev ? next : NULL;
884}
885
886/**
887 * tb_port_get_link_speed() - Get current link speed
888 * @port: Port to check (USB4 or CIO)
889 *
890 * Returns link speed in Gb/s or negative errno in case of failure.
891 */
892int tb_port_get_link_speed(struct tb_port *port)
893{
894 u32 val, speed;
895 int ret;
896
897 if (!port->cap_phy)
898 return -EINVAL;
899
900 ret = tb_port_read(port, &val, TB_CFG_PORT,
901 port->cap_phy + LANE_ADP_CS_1, 1);
902 if (ret)
903 return ret;
904
905 speed = (val & LANE_ADP_CS_1_CURRENT_SPEED_MASK) >>
906 LANE_ADP_CS_1_CURRENT_SPEED_SHIFT;
907
908 switch (speed) {
909 case LANE_ADP_CS_1_CURRENT_SPEED_GEN4:
910 return 40;
911 case LANE_ADP_CS_1_CURRENT_SPEED_GEN3:
912 return 20;
913 default:
914 return 10;
915 }
916}
917
918/**
919 * tb_port_get_link_generation() - Returns link generation
920 * @port: Lane adapter
921 *
922 * Returns link generation as number or negative errno in case of
923 * failure. Does not distinguish between Thunderbolt 1 and Thunderbolt 2
924 * links so for those always returns 2.
925 */
926int tb_port_get_link_generation(struct tb_port *port)
927{
928 int ret;
929
930 ret = tb_port_get_link_speed(port);
931 if (ret < 0)
932 return ret;
933
934 switch (ret) {
935 case 40:
936 return 4;
937 case 20:
938 return 3;
939 default:
940 return 2;
941 }
942}
943
944/**
945 * tb_port_get_link_width() - Get current link width
946 * @port: Port to check (USB4 or CIO)
947 *
948 * Returns link width. Return the link width as encoded in &enum
949 * tb_link_width or negative errno in case of failure.
950 */
951int tb_port_get_link_width(struct tb_port *port)
952{
953 u32 val;
954 int ret;
955
956 if (!port->cap_phy)
957 return -EINVAL;
958
959 ret = tb_port_read(port, &val, TB_CFG_PORT,
960 port->cap_phy + LANE_ADP_CS_1, 1);
961 if (ret)
962 return ret;
963
964 /* Matches the values in enum tb_link_width */
965 return (val & LANE_ADP_CS_1_CURRENT_WIDTH_MASK) >>
966 LANE_ADP_CS_1_CURRENT_WIDTH_SHIFT;
967}
968
969/**
970 * tb_port_width_supported() - Is the given link width supported
971 * @port: Port to check
972 * @width: Widths to check (bitmask)
973 *
974 * Can be called to any lane adapter. Checks if given @width is
975 * supported by the hardware and returns %true if it is.
976 */
977bool tb_port_width_supported(struct tb_port *port, unsigned int width)
978{
979 u32 phy, widths;
980 int ret;
981
982 if (!port->cap_phy)
983 return false;
984
985 if (width & (TB_LINK_WIDTH_ASYM_TX | TB_LINK_WIDTH_ASYM_RX)) {
986 if (tb_port_get_link_generation(port) < 4 ||
987 !usb4_port_asym_supported(port))
988 return false;
989 }
990
991 ret = tb_port_read(port, &phy, TB_CFG_PORT,
992 port->cap_phy + LANE_ADP_CS_0, 1);
993 if (ret)
994 return false;
995
996 /*
997 * The field encoding is the same as &enum tb_link_width (which is
998 * passed to @width).
999 */
1000 widths = FIELD_GET(LANE_ADP_CS_0_SUPPORTED_WIDTH_MASK, phy);
1001 return widths & width;
1002}
1003
1004/**
1005 * tb_port_set_link_width() - Set target link width of the lane adapter
1006 * @port: Lane adapter
1007 * @width: Target link width
1008 *
1009 * Sets the target link width of the lane adapter to @width. Does not
1010 * enable/disable lane bonding. For that call tb_port_set_lane_bonding().
1011 *
1012 * Return: %0 in case of success and negative errno in case of error
1013 */
1014int tb_port_set_link_width(struct tb_port *port, enum tb_link_width width)
1015{
1016 u32 val;
1017 int ret;
1018
1019 if (!port->cap_phy)
1020 return -EINVAL;
1021
1022 ret = tb_port_read(port, &val, TB_CFG_PORT,
1023 port->cap_phy + LANE_ADP_CS_1, 1);
1024 if (ret)
1025 return ret;
1026
1027 val &= ~LANE_ADP_CS_1_TARGET_WIDTH_MASK;
1028 switch (width) {
1029 case TB_LINK_WIDTH_SINGLE:
1030 /* Gen 4 link cannot be single */
1031 if (tb_port_get_link_generation(port) >= 4)
1032 return -EOPNOTSUPP;
1033 val |= LANE_ADP_CS_1_TARGET_WIDTH_SINGLE <<
1034 LANE_ADP_CS_1_TARGET_WIDTH_SHIFT;
1035 break;
1036
1037 case TB_LINK_WIDTH_DUAL:
1038 if (tb_port_get_link_generation(port) >= 4)
1039 return usb4_port_asym_set_link_width(port, width);
1040 val |= LANE_ADP_CS_1_TARGET_WIDTH_DUAL <<
1041 LANE_ADP_CS_1_TARGET_WIDTH_SHIFT;
1042 break;
1043
1044 case TB_LINK_WIDTH_ASYM_TX:
1045 case TB_LINK_WIDTH_ASYM_RX:
1046 return usb4_port_asym_set_link_width(port, width);
1047
1048 default:
1049 return -EINVAL;
1050 }
1051
1052 return tb_port_write(port, &val, TB_CFG_PORT,
1053 port->cap_phy + LANE_ADP_CS_1, 1);
1054}
1055
1056/**
1057 * tb_port_set_lane_bonding() - Enable/disable lane bonding
1058 * @port: Lane adapter
1059 * @bonding: enable/disable bonding
1060 *
1061 * Enables or disables lane bonding. This should be called after target
1062 * link width has been set (tb_port_set_link_width()). Note in most
1063 * cases one should use tb_port_lane_bonding_enable() instead to enable
1064 * lane bonding.
1065 *
1066 * Return: %0 in case of success and negative errno in case of error
1067 */
1068static int tb_port_set_lane_bonding(struct tb_port *port, bool bonding)
1069{
1070 u32 val;
1071 int ret;
1072
1073 if (!port->cap_phy)
1074 return -EINVAL;
1075
1076 ret = tb_port_read(port, &val, TB_CFG_PORT,
1077 port->cap_phy + LANE_ADP_CS_1, 1);
1078 if (ret)
1079 return ret;
1080
1081 if (bonding)
1082 val |= LANE_ADP_CS_1_LB;
1083 else
1084 val &= ~LANE_ADP_CS_1_LB;
1085
1086 return tb_port_write(port, &val, TB_CFG_PORT,
1087 port->cap_phy + LANE_ADP_CS_1, 1);
1088}
1089
1090/**
1091 * tb_port_lane_bonding_enable() - Enable bonding on port
1092 * @port: port to enable
1093 *
1094 * Enable bonding by setting the link width of the port and the other
1095 * port in case of dual link port. Does not wait for the link to
1096 * actually reach the bonded state so caller needs to call
1097 * tb_port_wait_for_link_width() before enabling any paths through the
1098 * link to make sure the link is in expected state.
1099 *
1100 * Return: %0 in case of success and negative errno in case of error
1101 */
1102int tb_port_lane_bonding_enable(struct tb_port *port)
1103{
1104 enum tb_link_width width;
1105 int ret;
1106
1107 /*
1108 * Enable lane bonding for both links if not already enabled by
1109 * for example the boot firmware.
1110 */
1111 width = tb_port_get_link_width(port);
1112 if (width == TB_LINK_WIDTH_SINGLE) {
1113 ret = tb_port_set_link_width(port, TB_LINK_WIDTH_DUAL);
1114 if (ret)
1115 goto err_lane0;
1116 }
1117
1118 width = tb_port_get_link_width(port->dual_link_port);
1119 if (width == TB_LINK_WIDTH_SINGLE) {
1120 ret = tb_port_set_link_width(port->dual_link_port,
1121 TB_LINK_WIDTH_DUAL);
1122 if (ret)
1123 goto err_lane0;
1124 }
1125
1126 /*
1127 * Only set bonding if the link was not already bonded. This
1128 * avoids the lane adapter to re-enter bonding state.
1129 */
1130 if (width == TB_LINK_WIDTH_SINGLE && !tb_is_upstream_port(port)) {
1131 ret = tb_port_set_lane_bonding(port, true);
1132 if (ret)
1133 goto err_lane1;
1134 }
1135
1136 /*
1137 * When lane 0 bonding is set it will affect lane 1 too so
1138 * update both.
1139 */
1140 port->bonded = true;
1141 port->dual_link_port->bonded = true;
1142
1143 return 0;
1144
1145err_lane1:
1146 tb_port_set_link_width(port->dual_link_port, TB_LINK_WIDTH_SINGLE);
1147err_lane0:
1148 tb_port_set_link_width(port, TB_LINK_WIDTH_SINGLE);
1149
1150 return ret;
1151}
1152
1153/**
1154 * tb_port_lane_bonding_disable() - Disable bonding on port
1155 * @port: port to disable
1156 *
1157 * Disable bonding by setting the link width of the port and the
1158 * other port in case of dual link port.
1159 */
1160void tb_port_lane_bonding_disable(struct tb_port *port)
1161{
1162 tb_port_set_lane_bonding(port, false);
1163 tb_port_set_link_width(port->dual_link_port, TB_LINK_WIDTH_SINGLE);
1164 tb_port_set_link_width(port, TB_LINK_WIDTH_SINGLE);
1165 port->dual_link_port->bonded = false;
1166 port->bonded = false;
1167}
1168
1169/**
1170 * tb_port_wait_for_link_width() - Wait until link reaches specific width
1171 * @port: Port to wait for
1172 * @width: Expected link width (bitmask)
1173 * @timeout_msec: Timeout in ms how long to wait
1174 *
1175 * Should be used after both ends of the link have been bonded (or
1176 * bonding has been disabled) to wait until the link actually reaches
1177 * the expected state. Returns %-ETIMEDOUT if the width was not reached
1178 * within the given timeout, %0 if it did. Can be passed a mask of
1179 * expected widths and succeeds if any of the widths is reached.
1180 */
1181int tb_port_wait_for_link_width(struct tb_port *port, unsigned int width,
1182 int timeout_msec)
1183{
1184 ktime_t timeout = ktime_add_ms(ktime_get(), timeout_msec);
1185 int ret;
1186
1187 /* Gen 4 link does not support single lane */
1188 if ((width & TB_LINK_WIDTH_SINGLE) &&
1189 tb_port_get_link_generation(port) >= 4)
1190 return -EOPNOTSUPP;
1191
1192 do {
1193 ret = tb_port_get_link_width(port);
1194 if (ret < 0) {
1195 /*
1196 * Sometimes we get port locked error when
1197 * polling the lanes so we can ignore it and
1198 * retry.
1199 */
1200 if (ret != -EACCES)
1201 return ret;
1202 } else if (ret & width) {
1203 return 0;
1204 }
1205
1206 usleep_range(1000, 2000);
1207 } while (ktime_before(ktime_get(), timeout));
1208
1209 return -ETIMEDOUT;
1210}
1211
1212static int tb_port_do_update_credits(struct tb_port *port)
1213{
1214 u32 nfc_credits;
1215 int ret;
1216
1217 ret = tb_port_read(port, &nfc_credits, TB_CFG_PORT, ADP_CS_4, 1);
1218 if (ret)
1219 return ret;
1220
1221 if (nfc_credits != port->config.nfc_credits) {
1222 u32 total;
1223
1224 total = (nfc_credits & ADP_CS_4_TOTAL_BUFFERS_MASK) >>
1225 ADP_CS_4_TOTAL_BUFFERS_SHIFT;
1226
1227 tb_port_dbg(port, "total credits changed %u -> %u\n",
1228 port->total_credits, total);
1229
1230 port->config.nfc_credits = nfc_credits;
1231 port->total_credits = total;
1232 }
1233
1234 return 0;
1235}
1236
1237/**
1238 * tb_port_update_credits() - Re-read port total credits
1239 * @port: Port to update
1240 *
1241 * After the link is bonded (or bonding was disabled) the port total
1242 * credits may change, so this function needs to be called to re-read
1243 * the credits. Updates also the second lane adapter.
1244 */
1245int tb_port_update_credits(struct tb_port *port)
1246{
1247 int ret;
1248
1249 ret = tb_port_do_update_credits(port);
1250 if (ret)
1251 return ret;
1252
1253 if (!port->dual_link_port)
1254 return 0;
1255 return tb_port_do_update_credits(port->dual_link_port);
1256}
1257
1258static int tb_port_start_lane_initialization(struct tb_port *port)
1259{
1260 int ret;
1261
1262 if (tb_switch_is_usb4(port->sw))
1263 return 0;
1264
1265 ret = tb_lc_start_lane_initialization(port);
1266 return ret == -EINVAL ? 0 : ret;
1267}
1268
1269/*
1270 * Returns true if the port had something (router, XDomain) connected
1271 * before suspend.
1272 */
1273static bool tb_port_resume(struct tb_port *port)
1274{
1275 bool has_remote = tb_port_has_remote(port);
1276
1277 if (port->usb4) {
1278 usb4_port_device_resume(port->usb4);
1279 } else if (!has_remote) {
1280 /*
1281 * For disconnected downstream lane adapters start lane
1282 * initialization now so we detect future connects.
1283 *
1284 * For XDomain start the lane initialzation now so the
1285 * link gets re-established.
1286 *
1287 * This is only needed for non-USB4 ports.
1288 */
1289 if (!tb_is_upstream_port(port) || port->xdomain)
1290 tb_port_start_lane_initialization(port);
1291 }
1292
1293 return has_remote || port->xdomain;
1294}
1295
1296/**
1297 * tb_port_is_enabled() - Is the adapter port enabled
1298 * @port: Port to check
1299 */
1300bool tb_port_is_enabled(struct tb_port *port)
1301{
1302 switch (port->config.type) {
1303 case TB_TYPE_PCIE_UP:
1304 case TB_TYPE_PCIE_DOWN:
1305 return tb_pci_port_is_enabled(port);
1306
1307 case TB_TYPE_DP_HDMI_IN:
1308 case TB_TYPE_DP_HDMI_OUT:
1309 return tb_dp_port_is_enabled(port);
1310
1311 case TB_TYPE_USB3_UP:
1312 case TB_TYPE_USB3_DOWN:
1313 return tb_usb3_port_is_enabled(port);
1314
1315 default:
1316 return false;
1317 }
1318}
1319
1320/**
1321 * tb_usb3_port_is_enabled() - Is the USB3 adapter port enabled
1322 * @port: USB3 adapter port to check
1323 */
1324bool tb_usb3_port_is_enabled(struct tb_port *port)
1325{
1326 u32 data;
1327
1328 if (tb_port_read(port, &data, TB_CFG_PORT,
1329 port->cap_adap + ADP_USB3_CS_0, 1))
1330 return false;
1331
1332 return !!(data & ADP_USB3_CS_0_PE);
1333}
1334
1335/**
1336 * tb_usb3_port_enable() - Enable USB3 adapter port
1337 * @port: USB3 adapter port to enable
1338 * @enable: Enable/disable the USB3 adapter
1339 */
1340int tb_usb3_port_enable(struct tb_port *port, bool enable)
1341{
1342 u32 word = enable ? (ADP_USB3_CS_0_PE | ADP_USB3_CS_0_V)
1343 : ADP_USB3_CS_0_V;
1344
1345 if (!port->cap_adap)
1346 return -ENXIO;
1347 return tb_port_write(port, &word, TB_CFG_PORT,
1348 port->cap_adap + ADP_USB3_CS_0, 1);
1349}
1350
1351/**
1352 * tb_pci_port_is_enabled() - Is the PCIe adapter port enabled
1353 * @port: PCIe port to check
1354 */
1355bool tb_pci_port_is_enabled(struct tb_port *port)
1356{
1357 u32 data;
1358
1359 if (tb_port_read(port, &data, TB_CFG_PORT,
1360 port->cap_adap + ADP_PCIE_CS_0, 1))
1361 return false;
1362
1363 return !!(data & ADP_PCIE_CS_0_PE);
1364}
1365
1366/**
1367 * tb_pci_port_enable() - Enable PCIe adapter port
1368 * @port: PCIe port to enable
1369 * @enable: Enable/disable the PCIe adapter
1370 */
1371int tb_pci_port_enable(struct tb_port *port, bool enable)
1372{
1373 u32 word = enable ? ADP_PCIE_CS_0_PE : 0x0;
1374 if (!port->cap_adap)
1375 return -ENXIO;
1376 return tb_port_write(port, &word, TB_CFG_PORT,
1377 port->cap_adap + ADP_PCIE_CS_0, 1);
1378}
1379
1380/**
1381 * tb_dp_port_hpd_is_active() - Is HPD already active
1382 * @port: DP out port to check
1383 *
1384 * Checks if the DP OUT adapter port has HPD bit already set.
1385 */
1386int tb_dp_port_hpd_is_active(struct tb_port *port)
1387{
1388 u32 data;
1389 int ret;
1390
1391 ret = tb_port_read(port, &data, TB_CFG_PORT,
1392 port->cap_adap + ADP_DP_CS_2, 1);
1393 if (ret)
1394 return ret;
1395
1396 return !!(data & ADP_DP_CS_2_HPD);
1397}
1398
1399/**
1400 * tb_dp_port_hpd_clear() - Clear HPD from DP IN port
1401 * @port: Port to clear HPD
1402 *
1403 * If the DP IN port has HPD set, this function can be used to clear it.
1404 */
1405int tb_dp_port_hpd_clear(struct tb_port *port)
1406{
1407 u32 data;
1408 int ret;
1409
1410 ret = tb_port_read(port, &data, TB_CFG_PORT,
1411 port->cap_adap + ADP_DP_CS_3, 1);
1412 if (ret)
1413 return ret;
1414
1415 data |= ADP_DP_CS_3_HPDC;
1416 return tb_port_write(port, &data, TB_CFG_PORT,
1417 port->cap_adap + ADP_DP_CS_3, 1);
1418}
1419
1420/**
1421 * tb_dp_port_set_hops() - Set video/aux Hop IDs for DP port
1422 * @port: DP IN/OUT port to set hops
1423 * @video: Video Hop ID
1424 * @aux_tx: AUX TX Hop ID
1425 * @aux_rx: AUX RX Hop ID
1426 *
1427 * Programs specified Hop IDs for DP IN/OUT port. Can be called for USB4
1428 * router DP adapters too but does not program the values as the fields
1429 * are read-only.
1430 */
1431int tb_dp_port_set_hops(struct tb_port *port, unsigned int video,
1432 unsigned int aux_tx, unsigned int aux_rx)
1433{
1434 u32 data[2];
1435 int ret;
1436
1437 if (tb_switch_is_usb4(port->sw))
1438 return 0;
1439
1440 ret = tb_port_read(port, data, TB_CFG_PORT,
1441 port->cap_adap + ADP_DP_CS_0, ARRAY_SIZE(data));
1442 if (ret)
1443 return ret;
1444
1445 data[0] &= ~ADP_DP_CS_0_VIDEO_HOPID_MASK;
1446 data[1] &= ~ADP_DP_CS_1_AUX_RX_HOPID_MASK;
1447 data[1] &= ~ADP_DP_CS_1_AUX_RX_HOPID_MASK;
1448
1449 data[0] |= (video << ADP_DP_CS_0_VIDEO_HOPID_SHIFT) &
1450 ADP_DP_CS_0_VIDEO_HOPID_MASK;
1451 data[1] |= aux_tx & ADP_DP_CS_1_AUX_TX_HOPID_MASK;
1452 data[1] |= (aux_rx << ADP_DP_CS_1_AUX_RX_HOPID_SHIFT) &
1453 ADP_DP_CS_1_AUX_RX_HOPID_MASK;
1454
1455 return tb_port_write(port, data, TB_CFG_PORT,
1456 port->cap_adap + ADP_DP_CS_0, ARRAY_SIZE(data));
1457}
1458
1459/**
1460 * tb_dp_port_is_enabled() - Is DP adapter port enabled
1461 * @port: DP adapter port to check
1462 */
1463bool tb_dp_port_is_enabled(struct tb_port *port)
1464{
1465 u32 data[2];
1466
1467 if (tb_port_read(port, data, TB_CFG_PORT, port->cap_adap + ADP_DP_CS_0,
1468 ARRAY_SIZE(data)))
1469 return false;
1470
1471 return !!(data[0] & (ADP_DP_CS_0_VE | ADP_DP_CS_0_AE));
1472}
1473
1474/**
1475 * tb_dp_port_enable() - Enables/disables DP paths of a port
1476 * @port: DP IN/OUT port
1477 * @enable: Enable/disable DP path
1478 *
1479 * Once Hop IDs are programmed DP paths can be enabled or disabled by
1480 * calling this function.
1481 */
1482int tb_dp_port_enable(struct tb_port *port, bool enable)
1483{
1484 u32 data[2];
1485 int ret;
1486
1487 ret = tb_port_read(port, data, TB_CFG_PORT,
1488 port->cap_adap + ADP_DP_CS_0, ARRAY_SIZE(data));
1489 if (ret)
1490 return ret;
1491
1492 if (enable)
1493 data[0] |= ADP_DP_CS_0_VE | ADP_DP_CS_0_AE;
1494 else
1495 data[0] &= ~(ADP_DP_CS_0_VE | ADP_DP_CS_0_AE);
1496
1497 return tb_port_write(port, data, TB_CFG_PORT,
1498 port->cap_adap + ADP_DP_CS_0, ARRAY_SIZE(data));
1499}
1500
1501/* switch utility functions */
1502
1503static const char *tb_switch_generation_name(const struct tb_switch *sw)
1504{
1505 switch (sw->generation) {
1506 case 1:
1507 return "Thunderbolt 1";
1508 case 2:
1509 return "Thunderbolt 2";
1510 case 3:
1511 return "Thunderbolt 3";
1512 case 4:
1513 return "USB4";
1514 default:
1515 return "Unknown";
1516 }
1517}
1518
1519static void tb_dump_switch(const struct tb *tb, const struct tb_switch *sw)
1520{
1521 const struct tb_regs_switch_header *regs = &sw->config;
1522
1523 tb_dbg(tb, " %s Switch: %x:%x (Revision: %d, TB Version: %d)\n",
1524 tb_switch_generation_name(sw), regs->vendor_id, regs->device_id,
1525 regs->revision, regs->thunderbolt_version);
1526 tb_dbg(tb, " Max Port Number: %d\n", regs->max_port_number);
1527 tb_dbg(tb, " Config:\n");
1528 tb_dbg(tb,
1529 " Upstream Port Number: %d Depth: %d Route String: %#llx Enabled: %d, PlugEventsDelay: %dms\n",
1530 regs->upstream_port_number, regs->depth,
1531 (((u64) regs->route_hi) << 32) | regs->route_lo,
1532 regs->enabled, regs->plug_events_delay);
1533 tb_dbg(tb, " unknown1: %#x unknown4: %#x\n",
1534 regs->__unknown1, regs->__unknown4);
1535}
1536
1537/**
1538 * tb_switch_reset() - reconfigure route, enable and send TB_CFG_PKG_RESET
1539 * @sw: Switch to reset
1540 *
1541 * Return: Returns 0 on success or an error code on failure.
1542 */
1543int tb_switch_reset(struct tb_switch *sw)
1544{
1545 struct tb_cfg_result res;
1546
1547 if (sw->generation > 1)
1548 return 0;
1549
1550 tb_sw_dbg(sw, "resetting switch\n");
1551
1552 res.err = tb_sw_write(sw, ((u32 *) &sw->config) + 2,
1553 TB_CFG_SWITCH, 2, 2);
1554 if (res.err)
1555 return res.err;
1556 res = tb_cfg_reset(sw->tb->ctl, tb_route(sw));
1557 if (res.err > 0)
1558 return -EIO;
1559 return res.err;
1560}
1561
1562/**
1563 * tb_switch_wait_for_bit() - Wait for specified value of bits in offset
1564 * @sw: Router to read the offset value from
1565 * @offset: Offset in the router config space to read from
1566 * @bit: Bit mask in the offset to wait for
1567 * @value: Value of the bits to wait for
1568 * @timeout_msec: Timeout in ms how long to wait
1569 *
1570 * Wait till the specified bits in specified offset reach specified value.
1571 * Returns %0 in case of success, %-ETIMEDOUT if the @value was not reached
1572 * within the given timeout or a negative errno in case of failure.
1573 */
1574int tb_switch_wait_for_bit(struct tb_switch *sw, u32 offset, u32 bit,
1575 u32 value, int timeout_msec)
1576{
1577 ktime_t timeout = ktime_add_ms(ktime_get(), timeout_msec);
1578
1579 do {
1580 u32 val;
1581 int ret;
1582
1583 ret = tb_sw_read(sw, &val, TB_CFG_SWITCH, offset, 1);
1584 if (ret)
1585 return ret;
1586
1587 if ((val & bit) == value)
1588 return 0;
1589
1590 usleep_range(50, 100);
1591 } while (ktime_before(ktime_get(), timeout));
1592
1593 return -ETIMEDOUT;
1594}
1595
1596/*
1597 * tb_plug_events_active() - enable/disable plug events on a switch
1598 *
1599 * Also configures a sane plug_events_delay of 255ms.
1600 *
1601 * Return: Returns 0 on success or an error code on failure.
1602 */
1603static int tb_plug_events_active(struct tb_switch *sw, bool active)
1604{
1605 u32 data;
1606 int res;
1607
1608 if (tb_switch_is_icm(sw) || tb_switch_is_usb4(sw))
1609 return 0;
1610
1611 sw->config.plug_events_delay = 0xff;
1612 res = tb_sw_write(sw, ((u32 *) &sw->config) + 4, TB_CFG_SWITCH, 4, 1);
1613 if (res)
1614 return res;
1615
1616 res = tb_sw_read(sw, &data, TB_CFG_SWITCH, sw->cap_plug_events + 1, 1);
1617 if (res)
1618 return res;
1619
1620 if (active) {
1621 data = data & 0xFFFFFF83;
1622 switch (sw->config.device_id) {
1623 case PCI_DEVICE_ID_INTEL_LIGHT_RIDGE:
1624 case PCI_DEVICE_ID_INTEL_EAGLE_RIDGE:
1625 case PCI_DEVICE_ID_INTEL_PORT_RIDGE:
1626 break;
1627 default:
1628 /*
1629 * Skip Alpine Ridge, it needs to have vendor
1630 * specific USB hotplug event enabled for the
1631 * internal xHCI to work.
1632 */
1633 if (!tb_switch_is_alpine_ridge(sw))
1634 data |= TB_PLUG_EVENTS_USB_DISABLE;
1635 }
1636 } else {
1637 data = data | 0x7c;
1638 }
1639 return tb_sw_write(sw, &data, TB_CFG_SWITCH,
1640 sw->cap_plug_events + 1, 1);
1641}
1642
1643static ssize_t authorized_show(struct device *dev,
1644 struct device_attribute *attr,
1645 char *buf)
1646{
1647 struct tb_switch *sw = tb_to_switch(dev);
1648
1649 return sysfs_emit(buf, "%u\n", sw->authorized);
1650}
1651
1652static int disapprove_switch(struct device *dev, void *not_used)
1653{
1654 char *envp[] = { "AUTHORIZED=0", NULL };
1655 struct tb_switch *sw;
1656
1657 sw = tb_to_switch(dev);
1658 if (sw && sw->authorized) {
1659 int ret;
1660
1661 /* First children */
1662 ret = device_for_each_child_reverse(&sw->dev, NULL, disapprove_switch);
1663 if (ret)
1664 return ret;
1665
1666 ret = tb_domain_disapprove_switch(sw->tb, sw);
1667 if (ret)
1668 return ret;
1669
1670 sw->authorized = 0;
1671 kobject_uevent_env(&sw->dev.kobj, KOBJ_CHANGE, envp);
1672 }
1673
1674 return 0;
1675}
1676
1677static int tb_switch_set_authorized(struct tb_switch *sw, unsigned int val)
1678{
1679 char envp_string[13];
1680 int ret = -EINVAL;
1681 char *envp[] = { envp_string, NULL };
1682
1683 if (!mutex_trylock(&sw->tb->lock))
1684 return restart_syscall();
1685
1686 if (!!sw->authorized == !!val)
1687 goto unlock;
1688
1689 switch (val) {
1690 /* Disapprove switch */
1691 case 0:
1692 if (tb_route(sw)) {
1693 ret = disapprove_switch(&sw->dev, NULL);
1694 goto unlock;
1695 }
1696 break;
1697
1698 /* Approve switch */
1699 case 1:
1700 if (sw->key)
1701 ret = tb_domain_approve_switch_key(sw->tb, sw);
1702 else
1703 ret = tb_domain_approve_switch(sw->tb, sw);
1704 break;
1705
1706 /* Challenge switch */
1707 case 2:
1708 if (sw->key)
1709 ret = tb_domain_challenge_switch_key(sw->tb, sw);
1710 break;
1711
1712 default:
1713 break;
1714 }
1715
1716 if (!ret) {
1717 sw->authorized = val;
1718 /*
1719 * Notify status change to the userspace, informing the new
1720 * value of /sys/bus/thunderbolt/devices/.../authorized.
1721 */
1722 sprintf(envp_string, "AUTHORIZED=%u", sw->authorized);
1723 kobject_uevent_env(&sw->dev.kobj, KOBJ_CHANGE, envp);
1724 }
1725
1726unlock:
1727 mutex_unlock(&sw->tb->lock);
1728 return ret;
1729}
1730
1731static ssize_t authorized_store(struct device *dev,
1732 struct device_attribute *attr,
1733 const char *buf, size_t count)
1734{
1735 struct tb_switch *sw = tb_to_switch(dev);
1736 unsigned int val;
1737 ssize_t ret;
1738
1739 ret = kstrtouint(buf, 0, &val);
1740 if (ret)
1741 return ret;
1742 if (val > 2)
1743 return -EINVAL;
1744
1745 pm_runtime_get_sync(&sw->dev);
1746 ret = tb_switch_set_authorized(sw, val);
1747 pm_runtime_mark_last_busy(&sw->dev);
1748 pm_runtime_put_autosuspend(&sw->dev);
1749
1750 return ret ? ret : count;
1751}
1752static DEVICE_ATTR_RW(authorized);
1753
1754static ssize_t boot_show(struct device *dev, struct device_attribute *attr,
1755 char *buf)
1756{
1757 struct tb_switch *sw = tb_to_switch(dev);
1758
1759 return sysfs_emit(buf, "%u\n", sw->boot);
1760}
1761static DEVICE_ATTR_RO(boot);
1762
1763static ssize_t device_show(struct device *dev, struct device_attribute *attr,
1764 char *buf)
1765{
1766 struct tb_switch *sw = tb_to_switch(dev);
1767
1768 return sysfs_emit(buf, "%#x\n", sw->device);
1769}
1770static DEVICE_ATTR_RO(device);
1771
1772static ssize_t
1773device_name_show(struct device *dev, struct device_attribute *attr, char *buf)
1774{
1775 struct tb_switch *sw = tb_to_switch(dev);
1776
1777 return sysfs_emit(buf, "%s\n", sw->device_name ?: "");
1778}
1779static DEVICE_ATTR_RO(device_name);
1780
1781static ssize_t
1782generation_show(struct device *dev, struct device_attribute *attr, char *buf)
1783{
1784 struct tb_switch *sw = tb_to_switch(dev);
1785
1786 return sysfs_emit(buf, "%u\n", sw->generation);
1787}
1788static DEVICE_ATTR_RO(generation);
1789
1790static ssize_t key_show(struct device *dev, struct device_attribute *attr,
1791 char *buf)
1792{
1793 struct tb_switch *sw = tb_to_switch(dev);
1794 ssize_t ret;
1795
1796 if (!mutex_trylock(&sw->tb->lock))
1797 return restart_syscall();
1798
1799 if (sw->key)
1800 ret = sysfs_emit(buf, "%*phN\n", TB_SWITCH_KEY_SIZE, sw->key);
1801 else
1802 ret = sysfs_emit(buf, "\n");
1803
1804 mutex_unlock(&sw->tb->lock);
1805 return ret;
1806}
1807
1808static ssize_t key_store(struct device *dev, struct device_attribute *attr,
1809 const char *buf, size_t count)
1810{
1811 struct tb_switch *sw = tb_to_switch(dev);
1812 u8 key[TB_SWITCH_KEY_SIZE];
1813 ssize_t ret = count;
1814 bool clear = false;
1815
1816 if (!strcmp(buf, "\n"))
1817 clear = true;
1818 else if (hex2bin(key, buf, sizeof(key)))
1819 return -EINVAL;
1820
1821 if (!mutex_trylock(&sw->tb->lock))
1822 return restart_syscall();
1823
1824 if (sw->authorized) {
1825 ret = -EBUSY;
1826 } else {
1827 kfree(sw->key);
1828 if (clear) {
1829 sw->key = NULL;
1830 } else {
1831 sw->key = kmemdup(key, sizeof(key), GFP_KERNEL);
1832 if (!sw->key)
1833 ret = -ENOMEM;
1834 }
1835 }
1836
1837 mutex_unlock(&sw->tb->lock);
1838 return ret;
1839}
1840static DEVICE_ATTR(key, 0600, key_show, key_store);
1841
1842static ssize_t speed_show(struct device *dev, struct device_attribute *attr,
1843 char *buf)
1844{
1845 struct tb_switch *sw = tb_to_switch(dev);
1846
1847 return sysfs_emit(buf, "%u.0 Gb/s\n", sw->link_speed);
1848}
1849
1850/*
1851 * Currently all lanes must run at the same speed but we expose here
1852 * both directions to allow possible asymmetric links in the future.
1853 */
1854static DEVICE_ATTR(rx_speed, 0444, speed_show, NULL);
1855static DEVICE_ATTR(tx_speed, 0444, speed_show, NULL);
1856
1857static ssize_t rx_lanes_show(struct device *dev, struct device_attribute *attr,
1858 char *buf)
1859{
1860 struct tb_switch *sw = tb_to_switch(dev);
1861 unsigned int width;
1862
1863 switch (sw->link_width) {
1864 case TB_LINK_WIDTH_SINGLE:
1865 case TB_LINK_WIDTH_ASYM_TX:
1866 width = 1;
1867 break;
1868 case TB_LINK_WIDTH_DUAL:
1869 width = 2;
1870 break;
1871 case TB_LINK_WIDTH_ASYM_RX:
1872 width = 3;
1873 break;
1874 default:
1875 WARN_ON_ONCE(1);
1876 return -EINVAL;
1877 }
1878
1879 return sysfs_emit(buf, "%u\n", width);
1880}
1881static DEVICE_ATTR(rx_lanes, 0444, rx_lanes_show, NULL);
1882
1883static ssize_t tx_lanes_show(struct device *dev, struct device_attribute *attr,
1884 char *buf)
1885{
1886 struct tb_switch *sw = tb_to_switch(dev);
1887 unsigned int width;
1888
1889 switch (sw->link_width) {
1890 case TB_LINK_WIDTH_SINGLE:
1891 case TB_LINK_WIDTH_ASYM_RX:
1892 width = 1;
1893 break;
1894 case TB_LINK_WIDTH_DUAL:
1895 width = 2;
1896 break;
1897 case TB_LINK_WIDTH_ASYM_TX:
1898 width = 3;
1899 break;
1900 default:
1901 WARN_ON_ONCE(1);
1902 return -EINVAL;
1903 }
1904
1905 return sysfs_emit(buf, "%u\n", width);
1906}
1907static DEVICE_ATTR(tx_lanes, 0444, tx_lanes_show, NULL);
1908
1909static ssize_t nvm_authenticate_show(struct device *dev,
1910 struct device_attribute *attr, char *buf)
1911{
1912 struct tb_switch *sw = tb_to_switch(dev);
1913 u32 status;
1914
1915 nvm_get_auth_status(sw, &status);
1916 return sysfs_emit(buf, "%#x\n", status);
1917}
1918
1919static ssize_t nvm_authenticate_sysfs(struct device *dev, const char *buf,
1920 bool disconnect)
1921{
1922 struct tb_switch *sw = tb_to_switch(dev);
1923 int val, ret;
1924
1925 pm_runtime_get_sync(&sw->dev);
1926
1927 if (!mutex_trylock(&sw->tb->lock)) {
1928 ret = restart_syscall();
1929 goto exit_rpm;
1930 }
1931
1932 if (sw->no_nvm_upgrade) {
1933 ret = -EOPNOTSUPP;
1934 goto exit_unlock;
1935 }
1936
1937 /* If NVMem devices are not yet added */
1938 if (!sw->nvm) {
1939 ret = -EAGAIN;
1940 goto exit_unlock;
1941 }
1942
1943 ret = kstrtoint(buf, 10, &val);
1944 if (ret)
1945 goto exit_unlock;
1946
1947 /* Always clear the authentication status */
1948 nvm_clear_auth_status(sw);
1949
1950 if (val > 0) {
1951 if (val == AUTHENTICATE_ONLY) {
1952 if (disconnect)
1953 ret = -EINVAL;
1954 else
1955 ret = nvm_authenticate(sw, true);
1956 } else {
1957 if (!sw->nvm->flushed) {
1958 if (!sw->nvm->buf) {
1959 ret = -EINVAL;
1960 goto exit_unlock;
1961 }
1962
1963 ret = nvm_validate_and_write(sw);
1964 if (ret || val == WRITE_ONLY)
1965 goto exit_unlock;
1966 }
1967 if (val == WRITE_AND_AUTHENTICATE) {
1968 if (disconnect)
1969 ret = tb_lc_force_power(sw);
1970 else
1971 ret = nvm_authenticate(sw, false);
1972 }
1973 }
1974 }
1975
1976exit_unlock:
1977 mutex_unlock(&sw->tb->lock);
1978exit_rpm:
1979 pm_runtime_mark_last_busy(&sw->dev);
1980 pm_runtime_put_autosuspend(&sw->dev);
1981
1982 return ret;
1983}
1984
1985static ssize_t nvm_authenticate_store(struct device *dev,
1986 struct device_attribute *attr, const char *buf, size_t count)
1987{
1988 int ret = nvm_authenticate_sysfs(dev, buf, false);
1989 if (ret)
1990 return ret;
1991 return count;
1992}
1993static DEVICE_ATTR_RW(nvm_authenticate);
1994
1995static ssize_t nvm_authenticate_on_disconnect_show(struct device *dev,
1996 struct device_attribute *attr, char *buf)
1997{
1998 return nvm_authenticate_show(dev, attr, buf);
1999}
2000
2001static ssize_t nvm_authenticate_on_disconnect_store(struct device *dev,
2002 struct device_attribute *attr, const char *buf, size_t count)
2003{
2004 int ret;
2005
2006 ret = nvm_authenticate_sysfs(dev, buf, true);
2007 return ret ? ret : count;
2008}
2009static DEVICE_ATTR_RW(nvm_authenticate_on_disconnect);
2010
2011static ssize_t nvm_version_show(struct device *dev,
2012 struct device_attribute *attr, char *buf)
2013{
2014 struct tb_switch *sw = tb_to_switch(dev);
2015 int ret;
2016
2017 if (!mutex_trylock(&sw->tb->lock))
2018 return restart_syscall();
2019
2020 if (sw->safe_mode)
2021 ret = -ENODATA;
2022 else if (!sw->nvm)
2023 ret = -EAGAIN;
2024 else
2025 ret = sysfs_emit(buf, "%x.%x\n", sw->nvm->major, sw->nvm->minor);
2026
2027 mutex_unlock(&sw->tb->lock);
2028
2029 return ret;
2030}
2031static DEVICE_ATTR_RO(nvm_version);
2032
2033static ssize_t vendor_show(struct device *dev, struct device_attribute *attr,
2034 char *buf)
2035{
2036 struct tb_switch *sw = tb_to_switch(dev);
2037
2038 return sysfs_emit(buf, "%#x\n", sw->vendor);
2039}
2040static DEVICE_ATTR_RO(vendor);
2041
2042static ssize_t
2043vendor_name_show(struct device *dev, struct device_attribute *attr, char *buf)
2044{
2045 struct tb_switch *sw = tb_to_switch(dev);
2046
2047 return sysfs_emit(buf, "%s\n", sw->vendor_name ?: "");
2048}
2049static DEVICE_ATTR_RO(vendor_name);
2050
2051static ssize_t unique_id_show(struct device *dev, struct device_attribute *attr,
2052 char *buf)
2053{
2054 struct tb_switch *sw = tb_to_switch(dev);
2055
2056 return sysfs_emit(buf, "%pUb\n", sw->uuid);
2057}
2058static DEVICE_ATTR_RO(unique_id);
2059
2060static struct attribute *switch_attrs[] = {
2061 &dev_attr_authorized.attr,
2062 &dev_attr_boot.attr,
2063 &dev_attr_device.attr,
2064 &dev_attr_device_name.attr,
2065 &dev_attr_generation.attr,
2066 &dev_attr_key.attr,
2067 &dev_attr_nvm_authenticate.attr,
2068 &dev_attr_nvm_authenticate_on_disconnect.attr,
2069 &dev_attr_nvm_version.attr,
2070 &dev_attr_rx_speed.attr,
2071 &dev_attr_rx_lanes.attr,
2072 &dev_attr_tx_speed.attr,
2073 &dev_attr_tx_lanes.attr,
2074 &dev_attr_vendor.attr,
2075 &dev_attr_vendor_name.attr,
2076 &dev_attr_unique_id.attr,
2077 NULL,
2078};
2079
2080static umode_t switch_attr_is_visible(struct kobject *kobj,
2081 struct attribute *attr, int n)
2082{
2083 struct device *dev = kobj_to_dev(kobj);
2084 struct tb_switch *sw = tb_to_switch(dev);
2085
2086 if (attr == &dev_attr_authorized.attr) {
2087 if (sw->tb->security_level == TB_SECURITY_NOPCIE ||
2088 sw->tb->security_level == TB_SECURITY_DPONLY)
2089 return 0;
2090 } else if (attr == &dev_attr_device.attr) {
2091 if (!sw->device)
2092 return 0;
2093 } else if (attr == &dev_attr_device_name.attr) {
2094 if (!sw->device_name)
2095 return 0;
2096 } else if (attr == &dev_attr_vendor.attr) {
2097 if (!sw->vendor)
2098 return 0;
2099 } else if (attr == &dev_attr_vendor_name.attr) {
2100 if (!sw->vendor_name)
2101 return 0;
2102 } else if (attr == &dev_attr_key.attr) {
2103 if (tb_route(sw) &&
2104 sw->tb->security_level == TB_SECURITY_SECURE &&
2105 sw->security_level == TB_SECURITY_SECURE)
2106 return attr->mode;
2107 return 0;
2108 } else if (attr == &dev_attr_rx_speed.attr ||
2109 attr == &dev_attr_rx_lanes.attr ||
2110 attr == &dev_attr_tx_speed.attr ||
2111 attr == &dev_attr_tx_lanes.attr) {
2112 if (tb_route(sw))
2113 return attr->mode;
2114 return 0;
2115 } else if (attr == &dev_attr_nvm_authenticate.attr) {
2116 if (nvm_upgradeable(sw))
2117 return attr->mode;
2118 return 0;
2119 } else if (attr == &dev_attr_nvm_version.attr) {
2120 if (nvm_readable(sw))
2121 return attr->mode;
2122 return 0;
2123 } else if (attr == &dev_attr_boot.attr) {
2124 if (tb_route(sw))
2125 return attr->mode;
2126 return 0;
2127 } else if (attr == &dev_attr_nvm_authenticate_on_disconnect.attr) {
2128 if (sw->quirks & QUIRK_FORCE_POWER_LINK_CONTROLLER)
2129 return attr->mode;
2130 return 0;
2131 }
2132
2133 return sw->safe_mode ? 0 : attr->mode;
2134}
2135
2136static const struct attribute_group switch_group = {
2137 .is_visible = switch_attr_is_visible,
2138 .attrs = switch_attrs,
2139};
2140
2141static const struct attribute_group *switch_groups[] = {
2142 &switch_group,
2143 NULL,
2144};
2145
2146static void tb_switch_release(struct device *dev)
2147{
2148 struct tb_switch *sw = tb_to_switch(dev);
2149 struct tb_port *port;
2150
2151 dma_port_free(sw->dma_port);
2152
2153 tb_switch_for_each_port(sw, port) {
2154 ida_destroy(&port->in_hopids);
2155 ida_destroy(&port->out_hopids);
2156 }
2157
2158 kfree(sw->uuid);
2159 kfree(sw->device_name);
2160 kfree(sw->vendor_name);
2161 kfree(sw->ports);
2162 kfree(sw->drom);
2163 kfree(sw->key);
2164 kfree(sw);
2165}
2166
2167static int tb_switch_uevent(const struct device *dev, struct kobj_uevent_env *env)
2168{
2169 const struct tb_switch *sw = tb_to_switch(dev);
2170 const char *type;
2171
2172 if (tb_switch_is_usb4(sw)) {
2173 if (add_uevent_var(env, "USB4_VERSION=%u.0",
2174 usb4_switch_version(sw)))
2175 return -ENOMEM;
2176 }
2177
2178 if (!tb_route(sw)) {
2179 type = "host";
2180 } else {
2181 const struct tb_port *port;
2182 bool hub = false;
2183
2184 /* Device is hub if it has any downstream ports */
2185 tb_switch_for_each_port(sw, port) {
2186 if (!port->disabled && !tb_is_upstream_port(port) &&
2187 tb_port_is_null(port)) {
2188 hub = true;
2189 break;
2190 }
2191 }
2192
2193 type = hub ? "hub" : "device";
2194 }
2195
2196 if (add_uevent_var(env, "USB4_TYPE=%s", type))
2197 return -ENOMEM;
2198 return 0;
2199}
2200
2201/*
2202 * Currently only need to provide the callbacks. Everything else is handled
2203 * in the connection manager.
2204 */
2205static int __maybe_unused tb_switch_runtime_suspend(struct device *dev)
2206{
2207 struct tb_switch *sw = tb_to_switch(dev);
2208 const struct tb_cm_ops *cm_ops = sw->tb->cm_ops;
2209
2210 if (cm_ops->runtime_suspend_switch)
2211 return cm_ops->runtime_suspend_switch(sw);
2212
2213 return 0;
2214}
2215
2216static int __maybe_unused tb_switch_runtime_resume(struct device *dev)
2217{
2218 struct tb_switch *sw = tb_to_switch(dev);
2219 const struct tb_cm_ops *cm_ops = sw->tb->cm_ops;
2220
2221 if (cm_ops->runtime_resume_switch)
2222 return cm_ops->runtime_resume_switch(sw);
2223 return 0;
2224}
2225
2226static const struct dev_pm_ops tb_switch_pm_ops = {
2227 SET_RUNTIME_PM_OPS(tb_switch_runtime_suspend, tb_switch_runtime_resume,
2228 NULL)
2229};
2230
2231struct device_type tb_switch_type = {
2232 .name = "thunderbolt_device",
2233 .release = tb_switch_release,
2234 .uevent = tb_switch_uevent,
2235 .pm = &tb_switch_pm_ops,
2236};
2237
2238static int tb_switch_get_generation(struct tb_switch *sw)
2239{
2240 if (tb_switch_is_usb4(sw))
2241 return 4;
2242
2243 if (sw->config.vendor_id == PCI_VENDOR_ID_INTEL) {
2244 switch (sw->config.device_id) {
2245 case PCI_DEVICE_ID_INTEL_LIGHT_RIDGE:
2246 case PCI_DEVICE_ID_INTEL_EAGLE_RIDGE:
2247 case PCI_DEVICE_ID_INTEL_LIGHT_PEAK:
2248 case PCI_DEVICE_ID_INTEL_CACTUS_RIDGE_2C:
2249 case PCI_DEVICE_ID_INTEL_CACTUS_RIDGE_4C:
2250 case PCI_DEVICE_ID_INTEL_PORT_RIDGE:
2251 case PCI_DEVICE_ID_INTEL_REDWOOD_RIDGE_2C_BRIDGE:
2252 case PCI_DEVICE_ID_INTEL_REDWOOD_RIDGE_4C_BRIDGE:
2253 return 1;
2254
2255 case PCI_DEVICE_ID_INTEL_WIN_RIDGE_2C_BRIDGE:
2256 case PCI_DEVICE_ID_INTEL_FALCON_RIDGE_2C_BRIDGE:
2257 case PCI_DEVICE_ID_INTEL_FALCON_RIDGE_4C_BRIDGE:
2258 return 2;
2259
2260 case PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_LP_BRIDGE:
2261 case PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_2C_BRIDGE:
2262 case PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_4C_BRIDGE:
2263 case PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_C_2C_BRIDGE:
2264 case PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_C_4C_BRIDGE:
2265 case PCI_DEVICE_ID_INTEL_TITAN_RIDGE_2C_BRIDGE:
2266 case PCI_DEVICE_ID_INTEL_TITAN_RIDGE_4C_BRIDGE:
2267 case PCI_DEVICE_ID_INTEL_TITAN_RIDGE_DD_BRIDGE:
2268 case PCI_DEVICE_ID_INTEL_ICL_NHI0:
2269 case PCI_DEVICE_ID_INTEL_ICL_NHI1:
2270 return 3;
2271 }
2272 }
2273
2274 /*
2275 * For unknown switches assume generation to be 1 to be on the
2276 * safe side.
2277 */
2278 tb_sw_warn(sw, "unsupported switch device id %#x\n",
2279 sw->config.device_id);
2280 return 1;
2281}
2282
2283static bool tb_switch_exceeds_max_depth(const struct tb_switch *sw, int depth)
2284{
2285 int max_depth;
2286
2287 if (tb_switch_is_usb4(sw) ||
2288 (sw->tb->root_switch && tb_switch_is_usb4(sw->tb->root_switch)))
2289 max_depth = USB4_SWITCH_MAX_DEPTH;
2290 else
2291 max_depth = TB_SWITCH_MAX_DEPTH;
2292
2293 return depth > max_depth;
2294}
2295
2296/**
2297 * tb_switch_alloc() - allocate a switch
2298 * @tb: Pointer to the owning domain
2299 * @parent: Parent device for this switch
2300 * @route: Route string for this switch
2301 *
2302 * Allocates and initializes a switch. Will not upload configuration to
2303 * the switch. For that you need to call tb_switch_configure()
2304 * separately. The returned switch should be released by calling
2305 * tb_switch_put().
2306 *
2307 * Return: Pointer to the allocated switch or ERR_PTR() in case of
2308 * failure.
2309 */
2310struct tb_switch *tb_switch_alloc(struct tb *tb, struct device *parent,
2311 u64 route)
2312{
2313 struct tb_switch *sw;
2314 int upstream_port;
2315 int i, ret, depth;
2316
2317 /* Unlock the downstream port so we can access the switch below */
2318 if (route) {
2319 struct tb_switch *parent_sw = tb_to_switch(parent);
2320 struct tb_port *down;
2321
2322 down = tb_port_at(route, parent_sw);
2323 tb_port_unlock(down);
2324 }
2325
2326 depth = tb_route_length(route);
2327
2328 upstream_port = tb_cfg_get_upstream_port(tb->ctl, route);
2329 if (upstream_port < 0)
2330 return ERR_PTR(upstream_port);
2331
2332 sw = kzalloc(sizeof(*sw), GFP_KERNEL);
2333 if (!sw)
2334 return ERR_PTR(-ENOMEM);
2335
2336 sw->tb = tb;
2337 ret = tb_cfg_read(tb->ctl, &sw->config, route, 0, TB_CFG_SWITCH, 0, 5);
2338 if (ret)
2339 goto err_free_sw_ports;
2340
2341 sw->generation = tb_switch_get_generation(sw);
2342
2343 tb_dbg(tb, "current switch config:\n");
2344 tb_dump_switch(tb, sw);
2345
2346 /* configure switch */
2347 sw->config.upstream_port_number = upstream_port;
2348 sw->config.depth = depth;
2349 sw->config.route_hi = upper_32_bits(route);
2350 sw->config.route_lo = lower_32_bits(route);
2351 sw->config.enabled = 0;
2352
2353 /* Make sure we do not exceed maximum topology limit */
2354 if (tb_switch_exceeds_max_depth(sw, depth)) {
2355 ret = -EADDRNOTAVAIL;
2356 goto err_free_sw_ports;
2357 }
2358
2359 /* initialize ports */
2360 sw->ports = kcalloc(sw->config.max_port_number + 1, sizeof(*sw->ports),
2361 GFP_KERNEL);
2362 if (!sw->ports) {
2363 ret = -ENOMEM;
2364 goto err_free_sw_ports;
2365 }
2366
2367 for (i = 0; i <= sw->config.max_port_number; i++) {
2368 /* minimum setup for tb_find_cap and tb_drom_read to work */
2369 sw->ports[i].sw = sw;
2370 sw->ports[i].port = i;
2371
2372 /* Control port does not need HopID allocation */
2373 if (i) {
2374 ida_init(&sw->ports[i].in_hopids);
2375 ida_init(&sw->ports[i].out_hopids);
2376 }
2377 }
2378
2379 ret = tb_switch_find_vse_cap(sw, TB_VSE_CAP_PLUG_EVENTS);
2380 if (ret > 0)
2381 sw->cap_plug_events = ret;
2382
2383 ret = tb_switch_find_vse_cap(sw, TB_VSE_CAP_TIME2);
2384 if (ret > 0)
2385 sw->cap_vsec_tmu = ret;
2386
2387 ret = tb_switch_find_vse_cap(sw, TB_VSE_CAP_LINK_CONTROLLER);
2388 if (ret > 0)
2389 sw->cap_lc = ret;
2390
2391 ret = tb_switch_find_vse_cap(sw, TB_VSE_CAP_CP_LP);
2392 if (ret > 0)
2393 sw->cap_lp = ret;
2394
2395 /* Root switch is always authorized */
2396 if (!route)
2397 sw->authorized = true;
2398
2399 device_initialize(&sw->dev);
2400 sw->dev.parent = parent;
2401 sw->dev.bus = &tb_bus_type;
2402 sw->dev.type = &tb_switch_type;
2403 sw->dev.groups = switch_groups;
2404 dev_set_name(&sw->dev, "%u-%llx", tb->index, tb_route(sw));
2405
2406 return sw;
2407
2408err_free_sw_ports:
2409 kfree(sw->ports);
2410 kfree(sw);
2411
2412 return ERR_PTR(ret);
2413}
2414
2415/**
2416 * tb_switch_alloc_safe_mode() - allocate a switch that is in safe mode
2417 * @tb: Pointer to the owning domain
2418 * @parent: Parent device for this switch
2419 * @route: Route string for this switch
2420 *
2421 * This creates a switch in safe mode. This means the switch pretty much
2422 * lacks all capabilities except DMA configuration port before it is
2423 * flashed with a valid NVM firmware.
2424 *
2425 * The returned switch must be released by calling tb_switch_put().
2426 *
2427 * Return: Pointer to the allocated switch or ERR_PTR() in case of failure
2428 */
2429struct tb_switch *
2430tb_switch_alloc_safe_mode(struct tb *tb, struct device *parent, u64 route)
2431{
2432 struct tb_switch *sw;
2433
2434 sw = kzalloc(sizeof(*sw), GFP_KERNEL);
2435 if (!sw)
2436 return ERR_PTR(-ENOMEM);
2437
2438 sw->tb = tb;
2439 sw->config.depth = tb_route_length(route);
2440 sw->config.route_hi = upper_32_bits(route);
2441 sw->config.route_lo = lower_32_bits(route);
2442 sw->safe_mode = true;
2443
2444 device_initialize(&sw->dev);
2445 sw->dev.parent = parent;
2446 sw->dev.bus = &tb_bus_type;
2447 sw->dev.type = &tb_switch_type;
2448 sw->dev.groups = switch_groups;
2449 dev_set_name(&sw->dev, "%u-%llx", tb->index, tb_route(sw));
2450
2451 return sw;
2452}
2453
2454/**
2455 * tb_switch_configure() - Uploads configuration to the switch
2456 * @sw: Switch to configure
2457 *
2458 * Call this function before the switch is added to the system. It will
2459 * upload configuration to the switch and makes it available for the
2460 * connection manager to use. Can be called to the switch again after
2461 * resume from low power states to re-initialize it.
2462 *
2463 * Return: %0 in case of success and negative errno in case of failure
2464 */
2465int tb_switch_configure(struct tb_switch *sw)
2466{
2467 struct tb *tb = sw->tb;
2468 u64 route;
2469 int ret;
2470
2471 route = tb_route(sw);
2472
2473 tb_dbg(tb, "%s Switch at %#llx (depth: %d, up port: %d)\n",
2474 sw->config.enabled ? "restoring" : "initializing", route,
2475 tb_route_length(route), sw->config.upstream_port_number);
2476
2477 sw->config.enabled = 1;
2478
2479 if (tb_switch_is_usb4(sw)) {
2480 /*
2481 * For USB4 devices, we need to program the CM version
2482 * accordingly so that it knows to expose all the
2483 * additional capabilities. Program it according to USB4
2484 * version to avoid changing existing (v1) routers behaviour.
2485 */
2486 if (usb4_switch_version(sw) < 2)
2487 sw->config.cmuv = ROUTER_CS_4_CMUV_V1;
2488 else
2489 sw->config.cmuv = ROUTER_CS_4_CMUV_V2;
2490 sw->config.plug_events_delay = 0xa;
2491
2492 /* Enumerate the switch */
2493 ret = tb_sw_write(sw, (u32 *)&sw->config + 1, TB_CFG_SWITCH,
2494 ROUTER_CS_1, 4);
2495 if (ret)
2496 return ret;
2497
2498 ret = usb4_switch_setup(sw);
2499 } else {
2500 if (sw->config.vendor_id != PCI_VENDOR_ID_INTEL)
2501 tb_sw_warn(sw, "unknown switch vendor id %#x\n",
2502 sw->config.vendor_id);
2503
2504 if (!sw->cap_plug_events) {
2505 tb_sw_warn(sw, "cannot find TB_VSE_CAP_PLUG_EVENTS aborting\n");
2506 return -ENODEV;
2507 }
2508
2509 /* Enumerate the switch */
2510 ret = tb_sw_write(sw, (u32 *)&sw->config + 1, TB_CFG_SWITCH,
2511 ROUTER_CS_1, 3);
2512 }
2513 if (ret)
2514 return ret;
2515
2516 return tb_plug_events_active(sw, true);
2517}
2518
2519/**
2520 * tb_switch_configuration_valid() - Set the tunneling configuration to be valid
2521 * @sw: Router to configure
2522 *
2523 * Needs to be called before any tunnels can be setup through the
2524 * router. Can be called to any router.
2525 *
2526 * Returns %0 in success and negative errno otherwise.
2527 */
2528int tb_switch_configuration_valid(struct tb_switch *sw)
2529{
2530 if (tb_switch_is_usb4(sw))
2531 return usb4_switch_configuration_valid(sw);
2532 return 0;
2533}
2534
2535static int tb_switch_set_uuid(struct tb_switch *sw)
2536{
2537 bool uid = false;
2538 u32 uuid[4];
2539 int ret;
2540
2541 if (sw->uuid)
2542 return 0;
2543
2544 if (tb_switch_is_usb4(sw)) {
2545 ret = usb4_switch_read_uid(sw, &sw->uid);
2546 if (ret)
2547 return ret;
2548 uid = true;
2549 } else {
2550 /*
2551 * The newer controllers include fused UUID as part of
2552 * link controller specific registers
2553 */
2554 ret = tb_lc_read_uuid(sw, uuid);
2555 if (ret) {
2556 if (ret != -EINVAL)
2557 return ret;
2558 uid = true;
2559 }
2560 }
2561
2562 if (uid) {
2563 /*
2564 * ICM generates UUID based on UID and fills the upper
2565 * two words with ones. This is not strictly following
2566 * UUID format but we want to be compatible with it so
2567 * we do the same here.
2568 */
2569 uuid[0] = sw->uid & 0xffffffff;
2570 uuid[1] = (sw->uid >> 32) & 0xffffffff;
2571 uuid[2] = 0xffffffff;
2572 uuid[3] = 0xffffffff;
2573 }
2574
2575 sw->uuid = kmemdup(uuid, sizeof(uuid), GFP_KERNEL);
2576 if (!sw->uuid)
2577 return -ENOMEM;
2578 return 0;
2579}
2580
2581static int tb_switch_add_dma_port(struct tb_switch *sw)
2582{
2583 u32 status;
2584 int ret;
2585
2586 switch (sw->generation) {
2587 case 2:
2588 /* Only root switch can be upgraded */
2589 if (tb_route(sw))
2590 return 0;
2591
2592 fallthrough;
2593 case 3:
2594 case 4:
2595 ret = tb_switch_set_uuid(sw);
2596 if (ret)
2597 return ret;
2598 break;
2599
2600 default:
2601 /*
2602 * DMA port is the only thing available when the switch
2603 * is in safe mode.
2604 */
2605 if (!sw->safe_mode)
2606 return 0;
2607 break;
2608 }
2609
2610 if (sw->no_nvm_upgrade)
2611 return 0;
2612
2613 if (tb_switch_is_usb4(sw)) {
2614 ret = usb4_switch_nvm_authenticate_status(sw, &status);
2615 if (ret)
2616 return ret;
2617
2618 if (status) {
2619 tb_sw_info(sw, "switch flash authentication failed\n");
2620 nvm_set_auth_status(sw, status);
2621 }
2622
2623 return 0;
2624 }
2625
2626 /* Root switch DMA port requires running firmware */
2627 if (!tb_route(sw) && !tb_switch_is_icm(sw))
2628 return 0;
2629
2630 sw->dma_port = dma_port_alloc(sw);
2631 if (!sw->dma_port)
2632 return 0;
2633
2634 /*
2635 * If there is status already set then authentication failed
2636 * when the dma_port_flash_update_auth() returned. Power cycling
2637 * is not needed (it was done already) so only thing we do here
2638 * is to unblock runtime PM of the root port.
2639 */
2640 nvm_get_auth_status(sw, &status);
2641 if (status) {
2642 if (!tb_route(sw))
2643 nvm_authenticate_complete_dma_port(sw);
2644 return 0;
2645 }
2646
2647 /*
2648 * Check status of the previous flash authentication. If there
2649 * is one we need to power cycle the switch in any case to make
2650 * it functional again.
2651 */
2652 ret = dma_port_flash_update_auth_status(sw->dma_port, &status);
2653 if (ret <= 0)
2654 return ret;
2655
2656 /* Now we can allow root port to suspend again */
2657 if (!tb_route(sw))
2658 nvm_authenticate_complete_dma_port(sw);
2659
2660 if (status) {
2661 tb_sw_info(sw, "switch flash authentication failed\n");
2662 nvm_set_auth_status(sw, status);
2663 }
2664
2665 tb_sw_info(sw, "power cycling the switch now\n");
2666 dma_port_power_cycle(sw->dma_port);
2667
2668 /*
2669 * We return error here which causes the switch adding failure.
2670 * It should appear back after power cycle is complete.
2671 */
2672 return -ESHUTDOWN;
2673}
2674
2675static void tb_switch_default_link_ports(struct tb_switch *sw)
2676{
2677 int i;
2678
2679 for (i = 1; i <= sw->config.max_port_number; i++) {
2680 struct tb_port *port = &sw->ports[i];
2681 struct tb_port *subordinate;
2682
2683 if (!tb_port_is_null(port))
2684 continue;
2685
2686 /* Check for the subordinate port */
2687 if (i == sw->config.max_port_number ||
2688 !tb_port_is_null(&sw->ports[i + 1]))
2689 continue;
2690
2691 /* Link them if not already done so (by DROM) */
2692 subordinate = &sw->ports[i + 1];
2693 if (!port->dual_link_port && !subordinate->dual_link_port) {
2694 port->link_nr = 0;
2695 port->dual_link_port = subordinate;
2696 subordinate->link_nr = 1;
2697 subordinate->dual_link_port = port;
2698
2699 tb_sw_dbg(sw, "linked ports %d <-> %d\n",
2700 port->port, subordinate->port);
2701 }
2702 }
2703}
2704
2705static bool tb_switch_lane_bonding_possible(struct tb_switch *sw)
2706{
2707 const struct tb_port *up = tb_upstream_port(sw);
2708
2709 if (!up->dual_link_port || !up->dual_link_port->remote)
2710 return false;
2711
2712 if (tb_switch_is_usb4(sw))
2713 return usb4_switch_lane_bonding_possible(sw);
2714 return tb_lc_lane_bonding_possible(sw);
2715}
2716
2717static int tb_switch_update_link_attributes(struct tb_switch *sw)
2718{
2719 struct tb_port *up;
2720 bool change = false;
2721 int ret;
2722
2723 if (!tb_route(sw) || tb_switch_is_icm(sw))
2724 return 0;
2725
2726 up = tb_upstream_port(sw);
2727
2728 ret = tb_port_get_link_speed(up);
2729 if (ret < 0)
2730 return ret;
2731 if (sw->link_speed != ret)
2732 change = true;
2733 sw->link_speed = ret;
2734
2735 ret = tb_port_get_link_width(up);
2736 if (ret < 0)
2737 return ret;
2738 if (sw->link_width != ret)
2739 change = true;
2740 sw->link_width = ret;
2741
2742 /* Notify userspace that there is possible link attribute change */
2743 if (device_is_registered(&sw->dev) && change)
2744 kobject_uevent(&sw->dev.kobj, KOBJ_CHANGE);
2745
2746 return 0;
2747}
2748
2749/* Must be called after tb_switch_update_link_attributes() */
2750static void tb_switch_link_init(struct tb_switch *sw)
2751{
2752 struct tb_port *up, *down;
2753 bool bonded;
2754
2755 if (!tb_route(sw) || tb_switch_is_icm(sw))
2756 return;
2757
2758 tb_sw_dbg(sw, "current link speed %u.0 Gb/s\n", sw->link_speed);
2759 tb_sw_dbg(sw, "current link width %s\n", tb_width_name(sw->link_width));
2760
2761 bonded = sw->link_width >= TB_LINK_WIDTH_DUAL;
2762
2763 /*
2764 * Gen 4 links come up as bonded so update the port structures
2765 * accordingly.
2766 */
2767 up = tb_upstream_port(sw);
2768 down = tb_switch_downstream_port(sw);
2769
2770 up->bonded = bonded;
2771 if (up->dual_link_port)
2772 up->dual_link_port->bonded = bonded;
2773 tb_port_update_credits(up);
2774
2775 down->bonded = bonded;
2776 if (down->dual_link_port)
2777 down->dual_link_port->bonded = bonded;
2778 tb_port_update_credits(down);
2779
2780 if (tb_port_get_link_generation(up) < 4)
2781 return;
2782
2783 /*
2784 * Set the Gen 4 preferred link width. This is what the router
2785 * prefers when the link is brought up. If the router does not
2786 * support asymmetric link configuration, this also will be set
2787 * to TB_LINK_WIDTH_DUAL.
2788 */
2789 sw->preferred_link_width = sw->link_width;
2790 tb_sw_dbg(sw, "preferred link width %s\n",
2791 tb_width_name(sw->preferred_link_width));
2792}
2793
2794/**
2795 * tb_switch_lane_bonding_enable() - Enable lane bonding
2796 * @sw: Switch to enable lane bonding
2797 *
2798 * Connection manager can call this function to enable lane bonding of a
2799 * switch. If conditions are correct and both switches support the feature,
2800 * lanes are bonded. It is safe to call this to any switch.
2801 */
2802static int tb_switch_lane_bonding_enable(struct tb_switch *sw)
2803{
2804 struct tb_port *up, *down;
2805 unsigned int width;
2806 int ret;
2807
2808 if (!tb_switch_lane_bonding_possible(sw))
2809 return 0;
2810
2811 up = tb_upstream_port(sw);
2812 down = tb_switch_downstream_port(sw);
2813
2814 if (!tb_port_width_supported(up, TB_LINK_WIDTH_DUAL) ||
2815 !tb_port_width_supported(down, TB_LINK_WIDTH_DUAL))
2816 return 0;
2817
2818 /*
2819 * Both lanes need to be in CL0. Here we assume lane 0 already be in
2820 * CL0 and check just for lane 1.
2821 */
2822 if (tb_wait_for_port(down->dual_link_port, false) <= 0)
2823 return -ENOTCONN;
2824
2825 ret = tb_port_lane_bonding_enable(up);
2826 if (ret) {
2827 tb_port_warn(up, "failed to enable lane bonding\n");
2828 return ret;
2829 }
2830
2831 ret = tb_port_lane_bonding_enable(down);
2832 if (ret) {
2833 tb_port_warn(down, "failed to enable lane bonding\n");
2834 tb_port_lane_bonding_disable(up);
2835 return ret;
2836 }
2837
2838 /* Any of the widths are all bonded */
2839 width = TB_LINK_WIDTH_DUAL | TB_LINK_WIDTH_ASYM_TX |
2840 TB_LINK_WIDTH_ASYM_RX;
2841
2842 return tb_port_wait_for_link_width(down, width, 100);
2843}
2844
2845/**
2846 * tb_switch_lane_bonding_disable() - Disable lane bonding
2847 * @sw: Switch whose lane bonding to disable
2848 *
2849 * Disables lane bonding between @sw and parent. This can be called even
2850 * if lanes were not bonded originally.
2851 */
2852static int tb_switch_lane_bonding_disable(struct tb_switch *sw)
2853{
2854 struct tb_port *up, *down;
2855 int ret;
2856
2857 up = tb_upstream_port(sw);
2858 if (!up->bonded)
2859 return 0;
2860
2861 /*
2862 * If the link is Gen 4 there is no way to switch the link to
2863 * two single lane links so avoid that here. Also don't bother
2864 * if the link is not up anymore (sw is unplugged).
2865 */
2866 ret = tb_port_get_link_generation(up);
2867 if (ret < 0)
2868 return ret;
2869 if (ret >= 4)
2870 return -EOPNOTSUPP;
2871
2872 down = tb_switch_downstream_port(sw);
2873 tb_port_lane_bonding_disable(up);
2874 tb_port_lane_bonding_disable(down);
2875
2876 /*
2877 * It is fine if we get other errors as the router might have
2878 * been unplugged.
2879 */
2880 return tb_port_wait_for_link_width(down, TB_LINK_WIDTH_SINGLE, 100);
2881}
2882
2883/* Note updating sw->link_width done in tb_switch_update_link_attributes() */
2884static int tb_switch_asym_enable(struct tb_switch *sw, enum tb_link_width width)
2885{
2886 struct tb_port *up, *down, *port;
2887 enum tb_link_width down_width;
2888 int ret;
2889
2890 up = tb_upstream_port(sw);
2891 down = tb_switch_downstream_port(sw);
2892
2893 if (width == TB_LINK_WIDTH_ASYM_TX) {
2894 down_width = TB_LINK_WIDTH_ASYM_RX;
2895 port = down;
2896 } else {
2897 down_width = TB_LINK_WIDTH_ASYM_TX;
2898 port = up;
2899 }
2900
2901 ret = tb_port_set_link_width(up, width);
2902 if (ret)
2903 return ret;
2904
2905 ret = tb_port_set_link_width(down, down_width);
2906 if (ret)
2907 return ret;
2908
2909 /*
2910 * Initiate the change in the router that one of its TX lanes is
2911 * changing to RX but do so only if there is an actual change.
2912 */
2913 if (sw->link_width != width) {
2914 ret = usb4_port_asym_start(port);
2915 if (ret)
2916 return ret;
2917
2918 ret = tb_port_wait_for_link_width(up, width, 100);
2919 if (ret)
2920 return ret;
2921 }
2922
2923 return 0;
2924}
2925
2926/* Note updating sw->link_width done in tb_switch_update_link_attributes() */
2927static int tb_switch_asym_disable(struct tb_switch *sw)
2928{
2929 struct tb_port *up, *down;
2930 int ret;
2931
2932 up = tb_upstream_port(sw);
2933 down = tb_switch_downstream_port(sw);
2934
2935 ret = tb_port_set_link_width(up, TB_LINK_WIDTH_DUAL);
2936 if (ret)
2937 return ret;
2938
2939 ret = tb_port_set_link_width(down, TB_LINK_WIDTH_DUAL);
2940 if (ret)
2941 return ret;
2942
2943 /*
2944 * Initiate the change in the router that has three TX lanes and
2945 * is changing one of its TX lanes to RX but only if there is a
2946 * change in the link width.
2947 */
2948 if (sw->link_width > TB_LINK_WIDTH_DUAL) {
2949 if (sw->link_width == TB_LINK_WIDTH_ASYM_TX)
2950 ret = usb4_port_asym_start(up);
2951 else
2952 ret = usb4_port_asym_start(down);
2953 if (ret)
2954 return ret;
2955
2956 ret = tb_port_wait_for_link_width(up, TB_LINK_WIDTH_DUAL, 100);
2957 if (ret)
2958 return ret;
2959 }
2960
2961 return 0;
2962}
2963
2964/**
2965 * tb_switch_set_link_width() - Configure router link width
2966 * @sw: Router to configure
2967 * @width: The new link width
2968 *
2969 * Set device router link width to @width from router upstream port
2970 * perspective. Supports also asymmetric links if the routers boths side
2971 * of the link supports it.
2972 *
2973 * Does nothing for host router.
2974 *
2975 * Returns %0 in case of success, negative errno otherwise.
2976 */
2977int tb_switch_set_link_width(struct tb_switch *sw, enum tb_link_width width)
2978{
2979 struct tb_port *up, *down;
2980 int ret = 0;
2981
2982 if (!tb_route(sw))
2983 return 0;
2984
2985 up = tb_upstream_port(sw);
2986 down = tb_switch_downstream_port(sw);
2987
2988 switch (width) {
2989 case TB_LINK_WIDTH_SINGLE:
2990 ret = tb_switch_lane_bonding_disable(sw);
2991 break;
2992
2993 case TB_LINK_WIDTH_DUAL:
2994 if (sw->link_width == TB_LINK_WIDTH_ASYM_TX ||
2995 sw->link_width == TB_LINK_WIDTH_ASYM_RX) {
2996 ret = tb_switch_asym_disable(sw);
2997 if (ret)
2998 break;
2999 }
3000 ret = tb_switch_lane_bonding_enable(sw);
3001 break;
3002
3003 case TB_LINK_WIDTH_ASYM_TX:
3004 case TB_LINK_WIDTH_ASYM_RX:
3005 ret = tb_switch_asym_enable(sw, width);
3006 break;
3007 }
3008
3009 switch (ret) {
3010 case 0:
3011 break;
3012
3013 case -ETIMEDOUT:
3014 tb_sw_warn(sw, "timeout changing link width\n");
3015 return ret;
3016
3017 case -ENOTCONN:
3018 case -EOPNOTSUPP:
3019 case -ENODEV:
3020 return ret;
3021
3022 default:
3023 tb_sw_dbg(sw, "failed to change link width: %d\n", ret);
3024 return ret;
3025 }
3026
3027 tb_port_update_credits(down);
3028 tb_port_update_credits(up);
3029
3030 tb_switch_update_link_attributes(sw);
3031
3032 tb_sw_dbg(sw, "link width set to %s\n", tb_width_name(width));
3033 return ret;
3034}
3035
3036/**
3037 * tb_switch_configure_link() - Set link configured
3038 * @sw: Switch whose link is configured
3039 *
3040 * Sets the link upstream from @sw configured (from both ends) so that
3041 * it will not be disconnected when the domain exits sleep. Can be
3042 * called for any switch.
3043 *
3044 * It is recommended that this is called after lane bonding is enabled.
3045 *
3046 * Returns %0 on success and negative errno in case of error.
3047 */
3048int tb_switch_configure_link(struct tb_switch *sw)
3049{
3050 struct tb_port *up, *down;
3051 int ret;
3052
3053 if (!tb_route(sw) || tb_switch_is_icm(sw))
3054 return 0;
3055
3056 up = tb_upstream_port(sw);
3057 if (tb_switch_is_usb4(up->sw))
3058 ret = usb4_port_configure(up);
3059 else
3060 ret = tb_lc_configure_port(up);
3061 if (ret)
3062 return ret;
3063
3064 down = up->remote;
3065 if (tb_switch_is_usb4(down->sw))
3066 return usb4_port_configure(down);
3067 return tb_lc_configure_port(down);
3068}
3069
3070/**
3071 * tb_switch_unconfigure_link() - Unconfigure link
3072 * @sw: Switch whose link is unconfigured
3073 *
3074 * Sets the link unconfigured so the @sw will be disconnected if the
3075 * domain exists sleep.
3076 */
3077void tb_switch_unconfigure_link(struct tb_switch *sw)
3078{
3079 struct tb_port *up, *down;
3080
3081 if (sw->is_unplugged)
3082 return;
3083 if (!tb_route(sw) || tb_switch_is_icm(sw))
3084 return;
3085
3086 up = tb_upstream_port(sw);
3087 if (tb_switch_is_usb4(up->sw))
3088 usb4_port_unconfigure(up);
3089 else
3090 tb_lc_unconfigure_port(up);
3091
3092 down = up->remote;
3093 if (tb_switch_is_usb4(down->sw))
3094 usb4_port_unconfigure(down);
3095 else
3096 tb_lc_unconfigure_port(down);
3097}
3098
3099static void tb_switch_credits_init(struct tb_switch *sw)
3100{
3101 if (tb_switch_is_icm(sw))
3102 return;
3103 if (!tb_switch_is_usb4(sw))
3104 return;
3105 if (usb4_switch_credits_init(sw))
3106 tb_sw_info(sw, "failed to determine preferred buffer allocation, using defaults\n");
3107}
3108
3109static int tb_switch_port_hotplug_enable(struct tb_switch *sw)
3110{
3111 struct tb_port *port;
3112
3113 if (tb_switch_is_icm(sw))
3114 return 0;
3115
3116 tb_switch_for_each_port(sw, port) {
3117 int res;
3118
3119 if (!port->cap_usb4)
3120 continue;
3121
3122 res = usb4_port_hotplug_enable(port);
3123 if (res)
3124 return res;
3125 }
3126 return 0;
3127}
3128
3129/**
3130 * tb_switch_add() - Add a switch to the domain
3131 * @sw: Switch to add
3132 *
3133 * This is the last step in adding switch to the domain. It will read
3134 * identification information from DROM and initializes ports so that
3135 * they can be used to connect other switches. The switch will be
3136 * exposed to the userspace when this function successfully returns. To
3137 * remove and release the switch, call tb_switch_remove().
3138 *
3139 * Return: %0 in case of success and negative errno in case of failure
3140 */
3141int tb_switch_add(struct tb_switch *sw)
3142{
3143 int i, ret;
3144
3145 /*
3146 * Initialize DMA control port now before we read DROM. Recent
3147 * host controllers have more complete DROM on NVM that includes
3148 * vendor and model identification strings which we then expose
3149 * to the userspace. NVM can be accessed through DMA
3150 * configuration based mailbox.
3151 */
3152 ret = tb_switch_add_dma_port(sw);
3153 if (ret) {
3154 dev_err(&sw->dev, "failed to add DMA port\n");
3155 return ret;
3156 }
3157
3158 if (!sw->safe_mode) {
3159 tb_switch_credits_init(sw);
3160
3161 /* read drom */
3162 ret = tb_drom_read(sw);
3163 if (ret)
3164 dev_warn(&sw->dev, "reading DROM failed: %d\n", ret);
3165 tb_sw_dbg(sw, "uid: %#llx\n", sw->uid);
3166
3167 ret = tb_switch_set_uuid(sw);
3168 if (ret) {
3169 dev_err(&sw->dev, "failed to set UUID\n");
3170 return ret;
3171 }
3172
3173 for (i = 0; i <= sw->config.max_port_number; i++) {
3174 if (sw->ports[i].disabled) {
3175 tb_port_dbg(&sw->ports[i], "disabled by eeprom\n");
3176 continue;
3177 }
3178 ret = tb_init_port(&sw->ports[i]);
3179 if (ret) {
3180 dev_err(&sw->dev, "failed to initialize port %d\n", i);
3181 return ret;
3182 }
3183 }
3184
3185 tb_check_quirks(sw);
3186
3187 tb_switch_default_link_ports(sw);
3188
3189 ret = tb_switch_update_link_attributes(sw);
3190 if (ret)
3191 return ret;
3192
3193 tb_switch_link_init(sw);
3194
3195 ret = tb_switch_clx_init(sw);
3196 if (ret)
3197 return ret;
3198
3199 ret = tb_switch_tmu_init(sw);
3200 if (ret)
3201 return ret;
3202 }
3203
3204 ret = tb_switch_port_hotplug_enable(sw);
3205 if (ret)
3206 return ret;
3207
3208 ret = device_add(&sw->dev);
3209 if (ret) {
3210 dev_err(&sw->dev, "failed to add device: %d\n", ret);
3211 return ret;
3212 }
3213
3214 if (tb_route(sw)) {
3215 dev_info(&sw->dev, "new device found, vendor=%#x device=%#x\n",
3216 sw->vendor, sw->device);
3217 if (sw->vendor_name && sw->device_name)
3218 dev_info(&sw->dev, "%s %s\n", sw->vendor_name,
3219 sw->device_name);
3220 }
3221
3222 ret = usb4_switch_add_ports(sw);
3223 if (ret) {
3224 dev_err(&sw->dev, "failed to add USB4 ports\n");
3225 goto err_del;
3226 }
3227
3228 ret = tb_switch_nvm_add(sw);
3229 if (ret) {
3230 dev_err(&sw->dev, "failed to add NVM devices\n");
3231 goto err_ports;
3232 }
3233
3234 /*
3235 * Thunderbolt routers do not generate wakeups themselves but
3236 * they forward wakeups from tunneled protocols, so enable it
3237 * here.
3238 */
3239 device_init_wakeup(&sw->dev, true);
3240
3241 pm_runtime_set_active(&sw->dev);
3242 if (sw->rpm) {
3243 pm_runtime_set_autosuspend_delay(&sw->dev, TB_AUTOSUSPEND_DELAY);
3244 pm_runtime_use_autosuspend(&sw->dev);
3245 pm_runtime_mark_last_busy(&sw->dev);
3246 pm_runtime_enable(&sw->dev);
3247 pm_request_autosuspend(&sw->dev);
3248 }
3249
3250 tb_switch_debugfs_init(sw);
3251 return 0;
3252
3253err_ports:
3254 usb4_switch_remove_ports(sw);
3255err_del:
3256 device_del(&sw->dev);
3257
3258 return ret;
3259}
3260
3261/**
3262 * tb_switch_remove() - Remove and release a switch
3263 * @sw: Switch to remove
3264 *
3265 * This will remove the switch from the domain and release it after last
3266 * reference count drops to zero. If there are switches connected below
3267 * this switch, they will be removed as well.
3268 */
3269void tb_switch_remove(struct tb_switch *sw)
3270{
3271 struct tb_port *port;
3272
3273 tb_switch_debugfs_remove(sw);
3274
3275 if (sw->rpm) {
3276 pm_runtime_get_sync(&sw->dev);
3277 pm_runtime_disable(&sw->dev);
3278 }
3279
3280 /* port 0 is the switch itself and never has a remote */
3281 tb_switch_for_each_port(sw, port) {
3282 if (tb_port_has_remote(port)) {
3283 tb_switch_remove(port->remote->sw);
3284 port->remote = NULL;
3285 } else if (port->xdomain) {
3286 tb_xdomain_remove(port->xdomain);
3287 port->xdomain = NULL;
3288 }
3289
3290 /* Remove any downstream retimers */
3291 tb_retimer_remove_all(port);
3292 }
3293
3294 if (!sw->is_unplugged)
3295 tb_plug_events_active(sw, false);
3296
3297 tb_switch_nvm_remove(sw);
3298 usb4_switch_remove_ports(sw);
3299
3300 if (tb_route(sw))
3301 dev_info(&sw->dev, "device disconnected\n");
3302 device_unregister(&sw->dev);
3303}
3304
3305/**
3306 * tb_sw_set_unplugged() - set is_unplugged on switch and downstream switches
3307 * @sw: Router to mark unplugged
3308 */
3309void tb_sw_set_unplugged(struct tb_switch *sw)
3310{
3311 struct tb_port *port;
3312
3313 if (sw == sw->tb->root_switch) {
3314 tb_sw_WARN(sw, "cannot unplug root switch\n");
3315 return;
3316 }
3317 if (sw->is_unplugged) {
3318 tb_sw_WARN(sw, "is_unplugged already set\n");
3319 return;
3320 }
3321 sw->is_unplugged = true;
3322 tb_switch_for_each_port(sw, port) {
3323 if (tb_port_has_remote(port))
3324 tb_sw_set_unplugged(port->remote->sw);
3325 else if (port->xdomain)
3326 port->xdomain->is_unplugged = true;
3327 }
3328}
3329
3330static int tb_switch_set_wake(struct tb_switch *sw, unsigned int flags)
3331{
3332 if (flags)
3333 tb_sw_dbg(sw, "enabling wakeup: %#x\n", flags);
3334 else
3335 tb_sw_dbg(sw, "disabling wakeup\n");
3336
3337 if (tb_switch_is_usb4(sw))
3338 return usb4_switch_set_wake(sw, flags);
3339 return tb_lc_set_wake(sw, flags);
3340}
3341
3342int tb_switch_resume(struct tb_switch *sw)
3343{
3344 struct tb_port *port;
3345 int err;
3346
3347 tb_sw_dbg(sw, "resuming switch\n");
3348
3349 /*
3350 * Check for UID of the connected switches except for root
3351 * switch which we assume cannot be removed.
3352 */
3353 if (tb_route(sw)) {
3354 u64 uid;
3355
3356 /*
3357 * Check first that we can still read the switch config
3358 * space. It may be that there is now another domain
3359 * connected.
3360 */
3361 err = tb_cfg_get_upstream_port(sw->tb->ctl, tb_route(sw));
3362 if (err < 0) {
3363 tb_sw_info(sw, "switch not present anymore\n");
3364 return err;
3365 }
3366
3367 /* We don't have any way to confirm this was the same device */
3368 if (!sw->uid)
3369 return -ENODEV;
3370
3371 if (tb_switch_is_usb4(sw))
3372 err = usb4_switch_read_uid(sw, &uid);
3373 else
3374 err = tb_drom_read_uid_only(sw, &uid);
3375 if (err) {
3376 tb_sw_warn(sw, "uid read failed\n");
3377 return err;
3378 }
3379 if (sw->uid != uid) {
3380 tb_sw_info(sw,
3381 "changed while suspended (uid %#llx -> %#llx)\n",
3382 sw->uid, uid);
3383 return -ENODEV;
3384 }
3385 }
3386
3387 err = tb_switch_configure(sw);
3388 if (err)
3389 return err;
3390
3391 /* Disable wakes */
3392 tb_switch_set_wake(sw, 0);
3393
3394 err = tb_switch_tmu_init(sw);
3395 if (err)
3396 return err;
3397
3398 /* check for surviving downstream switches */
3399 tb_switch_for_each_port(sw, port) {
3400 if (!tb_port_is_null(port))
3401 continue;
3402
3403 if (!tb_port_resume(port))
3404 continue;
3405
3406 if (tb_wait_for_port(port, true) <= 0) {
3407 tb_port_warn(port,
3408 "lost during suspend, disconnecting\n");
3409 if (tb_port_has_remote(port))
3410 tb_sw_set_unplugged(port->remote->sw);
3411 else if (port->xdomain)
3412 port->xdomain->is_unplugged = true;
3413 } else {
3414 /*
3415 * Always unlock the port so the downstream
3416 * switch/domain is accessible.
3417 */
3418 if (tb_port_unlock(port))
3419 tb_port_warn(port, "failed to unlock port\n");
3420 if (port->remote && tb_switch_resume(port->remote->sw)) {
3421 tb_port_warn(port,
3422 "lost during suspend, disconnecting\n");
3423 tb_sw_set_unplugged(port->remote->sw);
3424 }
3425 }
3426 }
3427 return 0;
3428}
3429
3430/**
3431 * tb_switch_suspend() - Put a switch to sleep
3432 * @sw: Switch to suspend
3433 * @runtime: Is this runtime suspend or system sleep
3434 *
3435 * Suspends router and all its children. Enables wakes according to
3436 * value of @runtime and then sets sleep bit for the router. If @sw is
3437 * host router the domain is ready to go to sleep once this function
3438 * returns.
3439 */
3440void tb_switch_suspend(struct tb_switch *sw, bool runtime)
3441{
3442 unsigned int flags = 0;
3443 struct tb_port *port;
3444 int err;
3445
3446 tb_sw_dbg(sw, "suspending switch\n");
3447
3448 /*
3449 * Actually only needed for Titan Ridge but for simplicity can be
3450 * done for USB4 device too as CLx is re-enabled at resume.
3451 */
3452 tb_switch_clx_disable(sw);
3453
3454 err = tb_plug_events_active(sw, false);
3455 if (err)
3456 return;
3457
3458 tb_switch_for_each_port(sw, port) {
3459 if (tb_port_has_remote(port))
3460 tb_switch_suspend(port->remote->sw, runtime);
3461 }
3462
3463 if (runtime) {
3464 /* Trigger wake when something is plugged in/out */
3465 flags |= TB_WAKE_ON_CONNECT | TB_WAKE_ON_DISCONNECT;
3466 flags |= TB_WAKE_ON_USB4;
3467 flags |= TB_WAKE_ON_USB3 | TB_WAKE_ON_PCIE | TB_WAKE_ON_DP;
3468 } else if (device_may_wakeup(&sw->dev)) {
3469 flags |= TB_WAKE_ON_USB4 | TB_WAKE_ON_USB3 | TB_WAKE_ON_PCIE;
3470 }
3471
3472 tb_switch_set_wake(sw, flags);
3473
3474 if (tb_switch_is_usb4(sw))
3475 usb4_switch_set_sleep(sw);
3476 else
3477 tb_lc_set_sleep(sw);
3478}
3479
3480/**
3481 * tb_switch_query_dp_resource() - Query availability of DP resource
3482 * @sw: Switch whose DP resource is queried
3483 * @in: DP IN port
3484 *
3485 * Queries availability of DP resource for DP tunneling using switch
3486 * specific means. Returns %true if resource is available.
3487 */
3488bool tb_switch_query_dp_resource(struct tb_switch *sw, struct tb_port *in)
3489{
3490 if (tb_switch_is_usb4(sw))
3491 return usb4_switch_query_dp_resource(sw, in);
3492 return tb_lc_dp_sink_query(sw, in);
3493}
3494
3495/**
3496 * tb_switch_alloc_dp_resource() - Allocate available DP resource
3497 * @sw: Switch whose DP resource is allocated
3498 * @in: DP IN port
3499 *
3500 * Allocates DP resource for DP tunneling. The resource must be
3501 * available for this to succeed (see tb_switch_query_dp_resource()).
3502 * Returns %0 in success and negative errno otherwise.
3503 */
3504int tb_switch_alloc_dp_resource(struct tb_switch *sw, struct tb_port *in)
3505{
3506 int ret;
3507
3508 if (tb_switch_is_usb4(sw))
3509 ret = usb4_switch_alloc_dp_resource(sw, in);
3510 else
3511 ret = tb_lc_dp_sink_alloc(sw, in);
3512
3513 if (ret)
3514 tb_sw_warn(sw, "failed to allocate DP resource for port %d\n",
3515 in->port);
3516 else
3517 tb_sw_dbg(sw, "allocated DP resource for port %d\n", in->port);
3518
3519 return ret;
3520}
3521
3522/**
3523 * tb_switch_dealloc_dp_resource() - De-allocate DP resource
3524 * @sw: Switch whose DP resource is de-allocated
3525 * @in: DP IN port
3526 *
3527 * De-allocates DP resource that was previously allocated for DP
3528 * tunneling.
3529 */
3530void tb_switch_dealloc_dp_resource(struct tb_switch *sw, struct tb_port *in)
3531{
3532 int ret;
3533
3534 if (tb_switch_is_usb4(sw))
3535 ret = usb4_switch_dealloc_dp_resource(sw, in);
3536 else
3537 ret = tb_lc_dp_sink_dealloc(sw, in);
3538
3539 if (ret)
3540 tb_sw_warn(sw, "failed to de-allocate DP resource for port %d\n",
3541 in->port);
3542 else
3543 tb_sw_dbg(sw, "released DP resource for port %d\n", in->port);
3544}
3545
3546struct tb_sw_lookup {
3547 struct tb *tb;
3548 u8 link;
3549 u8 depth;
3550 const uuid_t *uuid;
3551 u64 route;
3552};
3553
3554static int tb_switch_match(struct device *dev, const void *data)
3555{
3556 struct tb_switch *sw = tb_to_switch(dev);
3557 const struct tb_sw_lookup *lookup = data;
3558
3559 if (!sw)
3560 return 0;
3561 if (sw->tb != lookup->tb)
3562 return 0;
3563
3564 if (lookup->uuid)
3565 return !memcmp(sw->uuid, lookup->uuid, sizeof(*lookup->uuid));
3566
3567 if (lookup->route) {
3568 return sw->config.route_lo == lower_32_bits(lookup->route) &&
3569 sw->config.route_hi == upper_32_bits(lookup->route);
3570 }
3571
3572 /* Root switch is matched only by depth */
3573 if (!lookup->depth)
3574 return !sw->depth;
3575
3576 return sw->link == lookup->link && sw->depth == lookup->depth;
3577}
3578
3579/**
3580 * tb_switch_find_by_link_depth() - Find switch by link and depth
3581 * @tb: Domain the switch belongs
3582 * @link: Link number the switch is connected
3583 * @depth: Depth of the switch in link
3584 *
3585 * Returned switch has reference count increased so the caller needs to
3586 * call tb_switch_put() when done with the switch.
3587 */
3588struct tb_switch *tb_switch_find_by_link_depth(struct tb *tb, u8 link, u8 depth)
3589{
3590 struct tb_sw_lookup lookup;
3591 struct device *dev;
3592
3593 memset(&lookup, 0, sizeof(lookup));
3594 lookup.tb = tb;
3595 lookup.link = link;
3596 lookup.depth = depth;
3597
3598 dev = bus_find_device(&tb_bus_type, NULL, &lookup, tb_switch_match);
3599 if (dev)
3600 return tb_to_switch(dev);
3601
3602 return NULL;
3603}
3604
3605/**
3606 * tb_switch_find_by_uuid() - Find switch by UUID
3607 * @tb: Domain the switch belongs
3608 * @uuid: UUID to look for
3609 *
3610 * Returned switch has reference count increased so the caller needs to
3611 * call tb_switch_put() when done with the switch.
3612 */
3613struct tb_switch *tb_switch_find_by_uuid(struct tb *tb, const uuid_t *uuid)
3614{
3615 struct tb_sw_lookup lookup;
3616 struct device *dev;
3617
3618 memset(&lookup, 0, sizeof(lookup));
3619 lookup.tb = tb;
3620 lookup.uuid = uuid;
3621
3622 dev = bus_find_device(&tb_bus_type, NULL, &lookup, tb_switch_match);
3623 if (dev)
3624 return tb_to_switch(dev);
3625
3626 return NULL;
3627}
3628
3629/**
3630 * tb_switch_find_by_route() - Find switch by route string
3631 * @tb: Domain the switch belongs
3632 * @route: Route string to look for
3633 *
3634 * Returned switch has reference count increased so the caller needs to
3635 * call tb_switch_put() when done with the switch.
3636 */
3637struct tb_switch *tb_switch_find_by_route(struct tb *tb, u64 route)
3638{
3639 struct tb_sw_lookup lookup;
3640 struct device *dev;
3641
3642 if (!route)
3643 return tb_switch_get(tb->root_switch);
3644
3645 memset(&lookup, 0, sizeof(lookup));
3646 lookup.tb = tb;
3647 lookup.route = route;
3648
3649 dev = bus_find_device(&tb_bus_type, NULL, &lookup, tb_switch_match);
3650 if (dev)
3651 return tb_to_switch(dev);
3652
3653 return NULL;
3654}
3655
3656/**
3657 * tb_switch_find_port() - return the first port of @type on @sw or NULL
3658 * @sw: Switch to find the port from
3659 * @type: Port type to look for
3660 */
3661struct tb_port *tb_switch_find_port(struct tb_switch *sw,
3662 enum tb_port_type type)
3663{
3664 struct tb_port *port;
3665
3666 tb_switch_for_each_port(sw, port) {
3667 if (port->config.type == type)
3668 return port;
3669 }
3670
3671 return NULL;
3672}
3673
3674/*
3675 * Can be used for read/write a specified PCIe bridge for any Thunderbolt 3
3676 * device. For now used only for Titan Ridge.
3677 */
3678static int tb_switch_pcie_bridge_write(struct tb_switch *sw, unsigned int bridge,
3679 unsigned int pcie_offset, u32 value)
3680{
3681 u32 offset, command, val;
3682 int ret;
3683
3684 if (sw->generation != 3)
3685 return -EOPNOTSUPP;
3686
3687 offset = sw->cap_plug_events + TB_PLUG_EVENTS_PCIE_WR_DATA;
3688 ret = tb_sw_write(sw, &value, TB_CFG_SWITCH, offset, 1);
3689 if (ret)
3690 return ret;
3691
3692 command = pcie_offset & TB_PLUG_EVENTS_PCIE_CMD_DW_OFFSET_MASK;
3693 command |= BIT(bridge + TB_PLUG_EVENTS_PCIE_CMD_BR_SHIFT);
3694 command |= TB_PLUG_EVENTS_PCIE_CMD_RD_WR_MASK;
3695 command |= TB_PLUG_EVENTS_PCIE_CMD_COMMAND_VAL
3696 << TB_PLUG_EVENTS_PCIE_CMD_COMMAND_SHIFT;
3697 command |= TB_PLUG_EVENTS_PCIE_CMD_REQ_ACK_MASK;
3698
3699 offset = sw->cap_plug_events + TB_PLUG_EVENTS_PCIE_CMD;
3700
3701 ret = tb_sw_write(sw, &command, TB_CFG_SWITCH, offset, 1);
3702 if (ret)
3703 return ret;
3704
3705 ret = tb_switch_wait_for_bit(sw, offset,
3706 TB_PLUG_EVENTS_PCIE_CMD_REQ_ACK_MASK, 0, 100);
3707 if (ret)
3708 return ret;
3709
3710 ret = tb_sw_read(sw, &val, TB_CFG_SWITCH, offset, 1);
3711 if (ret)
3712 return ret;
3713
3714 if (val & TB_PLUG_EVENTS_PCIE_CMD_TIMEOUT_MASK)
3715 return -ETIMEDOUT;
3716
3717 return 0;
3718}
3719
3720/**
3721 * tb_switch_pcie_l1_enable() - Enable PCIe link to enter L1 state
3722 * @sw: Router to enable PCIe L1
3723 *
3724 * For Titan Ridge switch to enter CLx state, its PCIe bridges shall enable
3725 * entry to PCIe L1 state. Shall be called after the upstream PCIe tunnel
3726 * was configured. Due to Intel platforms limitation, shall be called only
3727 * for first hop switch.
3728 */
3729int tb_switch_pcie_l1_enable(struct tb_switch *sw)
3730{
3731 struct tb_switch *parent = tb_switch_parent(sw);
3732 int ret;
3733
3734 if (!tb_route(sw))
3735 return 0;
3736
3737 if (!tb_switch_is_titan_ridge(sw))
3738 return 0;
3739
3740 /* Enable PCIe L1 enable only for first hop router (depth = 1) */
3741 if (tb_route(parent))
3742 return 0;
3743
3744 /* Write to downstream PCIe bridge #5 aka Dn4 */
3745 ret = tb_switch_pcie_bridge_write(sw, 5, 0x143, 0x0c7806b1);
3746 if (ret)
3747 return ret;
3748
3749 /* Write to Upstream PCIe bridge #0 aka Up0 */
3750 return tb_switch_pcie_bridge_write(sw, 0, 0x143, 0x0c5806b1);
3751}
3752
3753/**
3754 * tb_switch_xhci_connect() - Connect internal xHCI
3755 * @sw: Router whose xHCI to connect
3756 *
3757 * Can be called to any router. For Alpine Ridge and Titan Ridge
3758 * performs special flows that bring the xHCI functional for any device
3759 * connected to the type-C port. Call only after PCIe tunnel has been
3760 * established. The function only does the connect if not done already
3761 * so can be called several times for the same router.
3762 */
3763int tb_switch_xhci_connect(struct tb_switch *sw)
3764{
3765 struct tb_port *port1, *port3;
3766 int ret;
3767
3768 if (sw->generation != 3)
3769 return 0;
3770
3771 port1 = &sw->ports[1];
3772 port3 = &sw->ports[3];
3773
3774 if (tb_switch_is_alpine_ridge(sw)) {
3775 bool usb_port1, usb_port3, xhci_port1, xhci_port3;
3776
3777 usb_port1 = tb_lc_is_usb_plugged(port1);
3778 usb_port3 = tb_lc_is_usb_plugged(port3);
3779 xhci_port1 = tb_lc_is_xhci_connected(port1);
3780 xhci_port3 = tb_lc_is_xhci_connected(port3);
3781
3782 /* Figure out correct USB port to connect */
3783 if (usb_port1 && !xhci_port1) {
3784 ret = tb_lc_xhci_connect(port1);
3785 if (ret)
3786 return ret;
3787 }
3788 if (usb_port3 && !xhci_port3)
3789 return tb_lc_xhci_connect(port3);
3790 } else if (tb_switch_is_titan_ridge(sw)) {
3791 ret = tb_lc_xhci_connect(port1);
3792 if (ret)
3793 return ret;
3794 return tb_lc_xhci_connect(port3);
3795 }
3796
3797 return 0;
3798}
3799
3800/**
3801 * tb_switch_xhci_disconnect() - Disconnect internal xHCI
3802 * @sw: Router whose xHCI to disconnect
3803 *
3804 * The opposite of tb_switch_xhci_connect(). Disconnects xHCI on both
3805 * ports.
3806 */
3807void tb_switch_xhci_disconnect(struct tb_switch *sw)
3808{
3809 if (sw->generation == 3) {
3810 struct tb_port *port1 = &sw->ports[1];
3811 struct tb_port *port3 = &sw->ports[3];
3812
3813 tb_lc_xhci_disconnect(port1);
3814 tb_port_dbg(port1, "disconnected xHCI\n");
3815 tb_lc_xhci_disconnect(port3);
3816 tb_port_dbg(port3, "disconnected xHCI\n");
3817 }
3818}
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Thunderbolt driver - switch/port utility functions
4 *
5 * Copyright (c) 2014 Andreas Noever <andreas.noever@gmail.com>
6 * Copyright (C) 2018, Intel Corporation
7 */
8
9#include <linux/delay.h>
10#include <linux/idr.h>
11#include <linux/nvmem-provider.h>
12#include <linux/pm_runtime.h>
13#include <linux/sched/signal.h>
14#include <linux/sizes.h>
15#include <linux/slab.h>
16
17#include "tb.h"
18
19/* Switch NVM support */
20
21#define NVM_CSS 0x10
22
23struct nvm_auth_status {
24 struct list_head list;
25 uuid_t uuid;
26 u32 status;
27};
28
29enum nvm_write_ops {
30 WRITE_AND_AUTHENTICATE = 1,
31 WRITE_ONLY = 2,
32};
33
34/*
35 * Hold NVM authentication failure status per switch This information
36 * needs to stay around even when the switch gets power cycled so we
37 * keep it separately.
38 */
39static LIST_HEAD(nvm_auth_status_cache);
40static DEFINE_MUTEX(nvm_auth_status_lock);
41
42static struct nvm_auth_status *__nvm_get_auth_status(const struct tb_switch *sw)
43{
44 struct nvm_auth_status *st;
45
46 list_for_each_entry(st, &nvm_auth_status_cache, list) {
47 if (uuid_equal(&st->uuid, sw->uuid))
48 return st;
49 }
50
51 return NULL;
52}
53
54static void nvm_get_auth_status(const struct tb_switch *sw, u32 *status)
55{
56 struct nvm_auth_status *st;
57
58 mutex_lock(&nvm_auth_status_lock);
59 st = __nvm_get_auth_status(sw);
60 mutex_unlock(&nvm_auth_status_lock);
61
62 *status = st ? st->status : 0;
63}
64
65static void nvm_set_auth_status(const struct tb_switch *sw, u32 status)
66{
67 struct nvm_auth_status *st;
68
69 if (WARN_ON(!sw->uuid))
70 return;
71
72 mutex_lock(&nvm_auth_status_lock);
73 st = __nvm_get_auth_status(sw);
74
75 if (!st) {
76 st = kzalloc(sizeof(*st), GFP_KERNEL);
77 if (!st)
78 goto unlock;
79
80 memcpy(&st->uuid, sw->uuid, sizeof(st->uuid));
81 INIT_LIST_HEAD(&st->list);
82 list_add_tail(&st->list, &nvm_auth_status_cache);
83 }
84
85 st->status = status;
86unlock:
87 mutex_unlock(&nvm_auth_status_lock);
88}
89
90static void nvm_clear_auth_status(const struct tb_switch *sw)
91{
92 struct nvm_auth_status *st;
93
94 mutex_lock(&nvm_auth_status_lock);
95 st = __nvm_get_auth_status(sw);
96 if (st) {
97 list_del(&st->list);
98 kfree(st);
99 }
100 mutex_unlock(&nvm_auth_status_lock);
101}
102
103static int nvm_validate_and_write(struct tb_switch *sw)
104{
105 unsigned int image_size, hdr_size;
106 const u8 *buf = sw->nvm->buf;
107 u16 ds_size;
108 int ret;
109
110 if (!buf)
111 return -EINVAL;
112
113 image_size = sw->nvm->buf_data_size;
114 if (image_size < NVM_MIN_SIZE || image_size > NVM_MAX_SIZE)
115 return -EINVAL;
116
117 /*
118 * FARB pointer must point inside the image and must at least
119 * contain parts of the digital section we will be reading here.
120 */
121 hdr_size = (*(u32 *)buf) & 0xffffff;
122 if (hdr_size + NVM_DEVID + 2 >= image_size)
123 return -EINVAL;
124
125 /* Digital section start should be aligned to 4k page */
126 if (!IS_ALIGNED(hdr_size, SZ_4K))
127 return -EINVAL;
128
129 /*
130 * Read digital section size and check that it also fits inside
131 * the image.
132 */
133 ds_size = *(u16 *)(buf + hdr_size);
134 if (ds_size >= image_size)
135 return -EINVAL;
136
137 if (!sw->safe_mode) {
138 u16 device_id;
139
140 /*
141 * Make sure the device ID in the image matches the one
142 * we read from the switch config space.
143 */
144 device_id = *(u16 *)(buf + hdr_size + NVM_DEVID);
145 if (device_id != sw->config.device_id)
146 return -EINVAL;
147
148 if (sw->generation < 3) {
149 /* Write CSS headers first */
150 ret = dma_port_flash_write(sw->dma_port,
151 DMA_PORT_CSS_ADDRESS, buf + NVM_CSS,
152 DMA_PORT_CSS_MAX_SIZE);
153 if (ret)
154 return ret;
155 }
156
157 /* Skip headers in the image */
158 buf += hdr_size;
159 image_size -= hdr_size;
160 }
161
162 if (tb_switch_is_usb4(sw))
163 ret = usb4_switch_nvm_write(sw, 0, buf, image_size);
164 else
165 ret = dma_port_flash_write(sw->dma_port, 0, buf, image_size);
166 if (!ret)
167 sw->nvm->flushed = true;
168 return ret;
169}
170
171static int nvm_authenticate_host_dma_port(struct tb_switch *sw)
172{
173 int ret = 0;
174
175 /*
176 * Root switch NVM upgrade requires that we disconnect the
177 * existing paths first (in case it is not in safe mode
178 * already).
179 */
180 if (!sw->safe_mode) {
181 u32 status;
182
183 ret = tb_domain_disconnect_all_paths(sw->tb);
184 if (ret)
185 return ret;
186 /*
187 * The host controller goes away pretty soon after this if
188 * everything goes well so getting timeout is expected.
189 */
190 ret = dma_port_flash_update_auth(sw->dma_port);
191 if (!ret || ret == -ETIMEDOUT)
192 return 0;
193
194 /*
195 * Any error from update auth operation requires power
196 * cycling of the host router.
197 */
198 tb_sw_warn(sw, "failed to authenticate NVM, power cycling\n");
199 if (dma_port_flash_update_auth_status(sw->dma_port, &status) > 0)
200 nvm_set_auth_status(sw, status);
201 }
202
203 /*
204 * From safe mode we can get out by just power cycling the
205 * switch.
206 */
207 dma_port_power_cycle(sw->dma_port);
208 return ret;
209}
210
211static int nvm_authenticate_device_dma_port(struct tb_switch *sw)
212{
213 int ret, retries = 10;
214
215 ret = dma_port_flash_update_auth(sw->dma_port);
216 switch (ret) {
217 case 0:
218 case -ETIMEDOUT:
219 case -EACCES:
220 case -EINVAL:
221 /* Power cycle is required */
222 break;
223 default:
224 return ret;
225 }
226
227 /*
228 * Poll here for the authentication status. It takes some time
229 * for the device to respond (we get timeout for a while). Once
230 * we get response the device needs to be power cycled in order
231 * to the new NVM to be taken into use.
232 */
233 do {
234 u32 status;
235
236 ret = dma_port_flash_update_auth_status(sw->dma_port, &status);
237 if (ret < 0 && ret != -ETIMEDOUT)
238 return ret;
239 if (ret > 0) {
240 if (status) {
241 tb_sw_warn(sw, "failed to authenticate NVM\n");
242 nvm_set_auth_status(sw, status);
243 }
244
245 tb_sw_info(sw, "power cycling the switch now\n");
246 dma_port_power_cycle(sw->dma_port);
247 return 0;
248 }
249
250 msleep(500);
251 } while (--retries);
252
253 return -ETIMEDOUT;
254}
255
256static void nvm_authenticate_start_dma_port(struct tb_switch *sw)
257{
258 struct pci_dev *root_port;
259
260 /*
261 * During host router NVM upgrade we should not allow root port to
262 * go into D3cold because some root ports cannot trigger PME
263 * itself. To be on the safe side keep the root port in D0 during
264 * the whole upgrade process.
265 */
266 root_port = pcie_find_root_port(sw->tb->nhi->pdev);
267 if (root_port)
268 pm_runtime_get_noresume(&root_port->dev);
269}
270
271static void nvm_authenticate_complete_dma_port(struct tb_switch *sw)
272{
273 struct pci_dev *root_port;
274
275 root_port = pcie_find_root_port(sw->tb->nhi->pdev);
276 if (root_port)
277 pm_runtime_put(&root_port->dev);
278}
279
280static inline bool nvm_readable(struct tb_switch *sw)
281{
282 if (tb_switch_is_usb4(sw)) {
283 /*
284 * USB4 devices must support NVM operations but it is
285 * optional for hosts. Therefore we query the NVM sector
286 * size here and if it is supported assume NVM
287 * operations are implemented.
288 */
289 return usb4_switch_nvm_sector_size(sw) > 0;
290 }
291
292 /* Thunderbolt 2 and 3 devices support NVM through DMA port */
293 return !!sw->dma_port;
294}
295
296static inline bool nvm_upgradeable(struct tb_switch *sw)
297{
298 if (sw->no_nvm_upgrade)
299 return false;
300 return nvm_readable(sw);
301}
302
303static inline int nvm_read(struct tb_switch *sw, unsigned int address,
304 void *buf, size_t size)
305{
306 if (tb_switch_is_usb4(sw))
307 return usb4_switch_nvm_read(sw, address, buf, size);
308 return dma_port_flash_read(sw->dma_port, address, buf, size);
309}
310
311static int nvm_authenticate(struct tb_switch *sw)
312{
313 int ret;
314
315 if (tb_switch_is_usb4(sw))
316 return usb4_switch_nvm_authenticate(sw);
317
318 if (!tb_route(sw)) {
319 nvm_authenticate_start_dma_port(sw);
320 ret = nvm_authenticate_host_dma_port(sw);
321 } else {
322 ret = nvm_authenticate_device_dma_port(sw);
323 }
324
325 return ret;
326}
327
328static int tb_switch_nvm_read(void *priv, unsigned int offset, void *val,
329 size_t bytes)
330{
331 struct tb_nvm *nvm = priv;
332 struct tb_switch *sw = tb_to_switch(nvm->dev);
333 int ret;
334
335 pm_runtime_get_sync(&sw->dev);
336
337 if (!mutex_trylock(&sw->tb->lock)) {
338 ret = restart_syscall();
339 goto out;
340 }
341
342 ret = nvm_read(sw, offset, val, bytes);
343 mutex_unlock(&sw->tb->lock);
344
345out:
346 pm_runtime_mark_last_busy(&sw->dev);
347 pm_runtime_put_autosuspend(&sw->dev);
348
349 return ret;
350}
351
352static int tb_switch_nvm_write(void *priv, unsigned int offset, void *val,
353 size_t bytes)
354{
355 struct tb_nvm *nvm = priv;
356 struct tb_switch *sw = tb_to_switch(nvm->dev);
357 int ret;
358
359 if (!mutex_trylock(&sw->tb->lock))
360 return restart_syscall();
361
362 /*
363 * Since writing the NVM image might require some special steps,
364 * for example when CSS headers are written, we cache the image
365 * locally here and handle the special cases when the user asks
366 * us to authenticate the image.
367 */
368 ret = tb_nvm_write_buf(nvm, offset, val, bytes);
369 mutex_unlock(&sw->tb->lock);
370
371 return ret;
372}
373
374static int tb_switch_nvm_add(struct tb_switch *sw)
375{
376 struct tb_nvm *nvm;
377 u32 val;
378 int ret;
379
380 if (!nvm_readable(sw))
381 return 0;
382
383 /*
384 * The NVM format of non-Intel hardware is not known so
385 * currently restrict NVM upgrade for Intel hardware. We may
386 * relax this in the future when we learn other NVM formats.
387 */
388 if (sw->config.vendor_id != PCI_VENDOR_ID_INTEL &&
389 sw->config.vendor_id != 0x8087) {
390 dev_info(&sw->dev,
391 "NVM format of vendor %#x is not known, disabling NVM upgrade\n",
392 sw->config.vendor_id);
393 return 0;
394 }
395
396 nvm = tb_nvm_alloc(&sw->dev);
397 if (IS_ERR(nvm))
398 return PTR_ERR(nvm);
399
400 /*
401 * If the switch is in safe-mode the only accessible portion of
402 * the NVM is the non-active one where userspace is expected to
403 * write new functional NVM.
404 */
405 if (!sw->safe_mode) {
406 u32 nvm_size, hdr_size;
407
408 ret = nvm_read(sw, NVM_FLASH_SIZE, &val, sizeof(val));
409 if (ret)
410 goto err_nvm;
411
412 hdr_size = sw->generation < 3 ? SZ_8K : SZ_16K;
413 nvm_size = (SZ_1M << (val & 7)) / 8;
414 nvm_size = (nvm_size - hdr_size) / 2;
415
416 ret = nvm_read(sw, NVM_VERSION, &val, sizeof(val));
417 if (ret)
418 goto err_nvm;
419
420 nvm->major = val >> 16;
421 nvm->minor = val >> 8;
422
423 ret = tb_nvm_add_active(nvm, nvm_size, tb_switch_nvm_read);
424 if (ret)
425 goto err_nvm;
426 }
427
428 if (!sw->no_nvm_upgrade) {
429 ret = tb_nvm_add_non_active(nvm, NVM_MAX_SIZE,
430 tb_switch_nvm_write);
431 if (ret)
432 goto err_nvm;
433 }
434
435 sw->nvm = nvm;
436 return 0;
437
438err_nvm:
439 tb_nvm_free(nvm);
440 return ret;
441}
442
443static void tb_switch_nvm_remove(struct tb_switch *sw)
444{
445 struct tb_nvm *nvm;
446
447 nvm = sw->nvm;
448 sw->nvm = NULL;
449
450 if (!nvm)
451 return;
452
453 /* Remove authentication status in case the switch is unplugged */
454 if (!nvm->authenticating)
455 nvm_clear_auth_status(sw);
456
457 tb_nvm_free(nvm);
458}
459
460/* port utility functions */
461
462static const char *tb_port_type(struct tb_regs_port_header *port)
463{
464 switch (port->type >> 16) {
465 case 0:
466 switch ((u8) port->type) {
467 case 0:
468 return "Inactive";
469 case 1:
470 return "Port";
471 case 2:
472 return "NHI";
473 default:
474 return "unknown";
475 }
476 case 0x2:
477 return "Ethernet";
478 case 0x8:
479 return "SATA";
480 case 0xe:
481 return "DP/HDMI";
482 case 0x10:
483 return "PCIe";
484 case 0x20:
485 return "USB";
486 default:
487 return "unknown";
488 }
489}
490
491static void tb_dump_port(struct tb *tb, struct tb_regs_port_header *port)
492{
493 tb_dbg(tb,
494 " Port %d: %x:%x (Revision: %d, TB Version: %d, Type: %s (%#x))\n",
495 port->port_number, port->vendor_id, port->device_id,
496 port->revision, port->thunderbolt_version, tb_port_type(port),
497 port->type);
498 tb_dbg(tb, " Max hop id (in/out): %d/%d\n",
499 port->max_in_hop_id, port->max_out_hop_id);
500 tb_dbg(tb, " Max counters: %d\n", port->max_counters);
501 tb_dbg(tb, " NFC Credits: %#x\n", port->nfc_credits);
502}
503
504/**
505 * tb_port_state() - get connectedness state of a port
506 *
507 * The port must have a TB_CAP_PHY (i.e. it should be a real port).
508 *
509 * Return: Returns an enum tb_port_state on success or an error code on failure.
510 */
511static int tb_port_state(struct tb_port *port)
512{
513 struct tb_cap_phy phy;
514 int res;
515 if (port->cap_phy == 0) {
516 tb_port_WARN(port, "does not have a PHY\n");
517 return -EINVAL;
518 }
519 res = tb_port_read(port, &phy, TB_CFG_PORT, port->cap_phy, 2);
520 if (res)
521 return res;
522 return phy.state;
523}
524
525/**
526 * tb_wait_for_port() - wait for a port to become ready
527 *
528 * Wait up to 1 second for a port to reach state TB_PORT_UP. If
529 * wait_if_unplugged is set then we also wait if the port is in state
530 * TB_PORT_UNPLUGGED (it takes a while for the device to be registered after
531 * switch resume). Otherwise we only wait if a device is registered but the link
532 * has not yet been established.
533 *
534 * Return: Returns an error code on failure. Returns 0 if the port is not
535 * connected or failed to reach state TB_PORT_UP within one second. Returns 1
536 * if the port is connected and in state TB_PORT_UP.
537 */
538int tb_wait_for_port(struct tb_port *port, bool wait_if_unplugged)
539{
540 int retries = 10;
541 int state;
542 if (!port->cap_phy) {
543 tb_port_WARN(port, "does not have PHY\n");
544 return -EINVAL;
545 }
546 if (tb_is_upstream_port(port)) {
547 tb_port_WARN(port, "is the upstream port\n");
548 return -EINVAL;
549 }
550
551 while (retries--) {
552 state = tb_port_state(port);
553 if (state < 0)
554 return state;
555 if (state == TB_PORT_DISABLED) {
556 tb_port_dbg(port, "is disabled (state: 0)\n");
557 return 0;
558 }
559 if (state == TB_PORT_UNPLUGGED) {
560 if (wait_if_unplugged) {
561 /* used during resume */
562 tb_port_dbg(port,
563 "is unplugged (state: 7), retrying...\n");
564 msleep(100);
565 continue;
566 }
567 tb_port_dbg(port, "is unplugged (state: 7)\n");
568 return 0;
569 }
570 if (state == TB_PORT_UP) {
571 tb_port_dbg(port, "is connected, link is up (state: 2)\n");
572 return 1;
573 }
574
575 /*
576 * After plug-in the state is TB_PORT_CONNECTING. Give it some
577 * time.
578 */
579 tb_port_dbg(port,
580 "is connected, link is not up (state: %d), retrying...\n",
581 state);
582 msleep(100);
583 }
584 tb_port_warn(port,
585 "failed to reach state TB_PORT_UP. Ignoring port...\n");
586 return 0;
587}
588
589/**
590 * tb_port_add_nfc_credits() - add/remove non flow controlled credits to port
591 *
592 * Change the number of NFC credits allocated to @port by @credits. To remove
593 * NFC credits pass a negative amount of credits.
594 *
595 * Return: Returns 0 on success or an error code on failure.
596 */
597int tb_port_add_nfc_credits(struct tb_port *port, int credits)
598{
599 u32 nfc_credits;
600
601 if (credits == 0 || port->sw->is_unplugged)
602 return 0;
603
604 nfc_credits = port->config.nfc_credits & ADP_CS_4_NFC_BUFFERS_MASK;
605 nfc_credits += credits;
606
607 tb_port_dbg(port, "adding %d NFC credits to %lu", credits,
608 port->config.nfc_credits & ADP_CS_4_NFC_BUFFERS_MASK);
609
610 port->config.nfc_credits &= ~ADP_CS_4_NFC_BUFFERS_MASK;
611 port->config.nfc_credits |= nfc_credits;
612
613 return tb_port_write(port, &port->config.nfc_credits,
614 TB_CFG_PORT, ADP_CS_4, 1);
615}
616
617/**
618 * tb_port_set_initial_credits() - Set initial port link credits allocated
619 * @port: Port to set the initial credits
620 * @credits: Number of credits to to allocate
621 *
622 * Set initial credits value to be used for ingress shared buffering.
623 */
624int tb_port_set_initial_credits(struct tb_port *port, u32 credits)
625{
626 u32 data;
627 int ret;
628
629 ret = tb_port_read(port, &data, TB_CFG_PORT, ADP_CS_5, 1);
630 if (ret)
631 return ret;
632
633 data &= ~ADP_CS_5_LCA_MASK;
634 data |= (credits << ADP_CS_5_LCA_SHIFT) & ADP_CS_5_LCA_MASK;
635
636 return tb_port_write(port, &data, TB_CFG_PORT, ADP_CS_5, 1);
637}
638
639/**
640 * tb_port_clear_counter() - clear a counter in TB_CFG_COUNTER
641 *
642 * Return: Returns 0 on success or an error code on failure.
643 */
644int tb_port_clear_counter(struct tb_port *port, int counter)
645{
646 u32 zero[3] = { 0, 0, 0 };
647 tb_port_dbg(port, "clearing counter %d\n", counter);
648 return tb_port_write(port, zero, TB_CFG_COUNTERS, 3 * counter, 3);
649}
650
651/**
652 * tb_port_unlock() - Unlock downstream port
653 * @port: Port to unlock
654 *
655 * Needed for USB4 but can be called for any CIO/USB4 ports. Makes the
656 * downstream router accessible for CM.
657 */
658int tb_port_unlock(struct tb_port *port)
659{
660 if (tb_switch_is_icm(port->sw))
661 return 0;
662 if (!tb_port_is_null(port))
663 return -EINVAL;
664 if (tb_switch_is_usb4(port->sw))
665 return usb4_port_unlock(port);
666 return 0;
667}
668
669/**
670 * tb_init_port() - initialize a port
671 *
672 * This is a helper method for tb_switch_alloc. Does not check or initialize
673 * any downstream switches.
674 *
675 * Return: Returns 0 on success or an error code on failure.
676 */
677static int tb_init_port(struct tb_port *port)
678{
679 int res;
680 int cap;
681
682 res = tb_port_read(port, &port->config, TB_CFG_PORT, 0, 8);
683 if (res) {
684 if (res == -ENODEV) {
685 tb_dbg(port->sw->tb, " Port %d: not implemented\n",
686 port->port);
687 port->disabled = true;
688 return 0;
689 }
690 return res;
691 }
692
693 /* Port 0 is the switch itself and has no PHY. */
694 if (port->config.type == TB_TYPE_PORT && port->port != 0) {
695 cap = tb_port_find_cap(port, TB_PORT_CAP_PHY);
696
697 if (cap > 0)
698 port->cap_phy = cap;
699 else
700 tb_port_WARN(port, "non switch port without a PHY\n");
701
702 cap = tb_port_find_cap(port, TB_PORT_CAP_USB4);
703 if (cap > 0)
704 port->cap_usb4 = cap;
705 } else if (port->port != 0) {
706 cap = tb_port_find_cap(port, TB_PORT_CAP_ADAP);
707 if (cap > 0)
708 port->cap_adap = cap;
709 }
710
711 tb_dump_port(port->sw->tb, &port->config);
712
713 /* Control port does not need HopID allocation */
714 if (port->port) {
715 ida_init(&port->in_hopids);
716 ida_init(&port->out_hopids);
717 }
718
719 INIT_LIST_HEAD(&port->list);
720 return 0;
721
722}
723
724static int tb_port_alloc_hopid(struct tb_port *port, bool in, int min_hopid,
725 int max_hopid)
726{
727 int port_max_hopid;
728 struct ida *ida;
729
730 if (in) {
731 port_max_hopid = port->config.max_in_hop_id;
732 ida = &port->in_hopids;
733 } else {
734 port_max_hopid = port->config.max_out_hop_id;
735 ida = &port->out_hopids;
736 }
737
738 /*
739 * NHI can use HopIDs 1-max for other adapters HopIDs 0-7 are
740 * reserved.
741 */
742 if (port->config.type != TB_TYPE_NHI && min_hopid < TB_PATH_MIN_HOPID)
743 min_hopid = TB_PATH_MIN_HOPID;
744
745 if (max_hopid < 0 || max_hopid > port_max_hopid)
746 max_hopid = port_max_hopid;
747
748 return ida_simple_get(ida, min_hopid, max_hopid + 1, GFP_KERNEL);
749}
750
751/**
752 * tb_port_alloc_in_hopid() - Allocate input HopID from port
753 * @port: Port to allocate HopID for
754 * @min_hopid: Minimum acceptable input HopID
755 * @max_hopid: Maximum acceptable input HopID
756 *
757 * Return: HopID between @min_hopid and @max_hopid or negative errno in
758 * case of error.
759 */
760int tb_port_alloc_in_hopid(struct tb_port *port, int min_hopid, int max_hopid)
761{
762 return tb_port_alloc_hopid(port, true, min_hopid, max_hopid);
763}
764
765/**
766 * tb_port_alloc_out_hopid() - Allocate output HopID from port
767 * @port: Port to allocate HopID for
768 * @min_hopid: Minimum acceptable output HopID
769 * @max_hopid: Maximum acceptable output HopID
770 *
771 * Return: HopID between @min_hopid and @max_hopid or negative errno in
772 * case of error.
773 */
774int tb_port_alloc_out_hopid(struct tb_port *port, int min_hopid, int max_hopid)
775{
776 return tb_port_alloc_hopid(port, false, min_hopid, max_hopid);
777}
778
779/**
780 * tb_port_release_in_hopid() - Release allocated input HopID from port
781 * @port: Port whose HopID to release
782 * @hopid: HopID to release
783 */
784void tb_port_release_in_hopid(struct tb_port *port, int hopid)
785{
786 ida_simple_remove(&port->in_hopids, hopid);
787}
788
789/**
790 * tb_port_release_out_hopid() - Release allocated output HopID from port
791 * @port: Port whose HopID to release
792 * @hopid: HopID to release
793 */
794void tb_port_release_out_hopid(struct tb_port *port, int hopid)
795{
796 ida_simple_remove(&port->out_hopids, hopid);
797}
798
799static inline bool tb_switch_is_reachable(const struct tb_switch *parent,
800 const struct tb_switch *sw)
801{
802 u64 mask = (1ULL << parent->config.depth * 8) - 1;
803 return (tb_route(parent) & mask) == (tb_route(sw) & mask);
804}
805
806/**
807 * tb_next_port_on_path() - Return next port for given port on a path
808 * @start: Start port of the walk
809 * @end: End port of the walk
810 * @prev: Previous port (%NULL if this is the first)
811 *
812 * This function can be used to walk from one port to another if they
813 * are connected through zero or more switches. If the @prev is dual
814 * link port, the function follows that link and returns another end on
815 * that same link.
816 *
817 * If the @end port has been reached, return %NULL.
818 *
819 * Domain tb->lock must be held when this function is called.
820 */
821struct tb_port *tb_next_port_on_path(struct tb_port *start, struct tb_port *end,
822 struct tb_port *prev)
823{
824 struct tb_port *next;
825
826 if (!prev)
827 return start;
828
829 if (prev->sw == end->sw) {
830 if (prev == end)
831 return NULL;
832 return end;
833 }
834
835 if (tb_switch_is_reachable(prev->sw, end->sw)) {
836 next = tb_port_at(tb_route(end->sw), prev->sw);
837 /* Walk down the topology if next == prev */
838 if (prev->remote &&
839 (next == prev || next->dual_link_port == prev))
840 next = prev->remote;
841 } else {
842 if (tb_is_upstream_port(prev)) {
843 next = prev->remote;
844 } else {
845 next = tb_upstream_port(prev->sw);
846 /*
847 * Keep the same link if prev and next are both
848 * dual link ports.
849 */
850 if (next->dual_link_port &&
851 next->link_nr != prev->link_nr) {
852 next = next->dual_link_port;
853 }
854 }
855 }
856
857 return next != prev ? next : NULL;
858}
859
860/**
861 * tb_port_get_link_speed() - Get current link speed
862 * @port: Port to check (USB4 or CIO)
863 *
864 * Returns link speed in Gb/s or negative errno in case of failure.
865 */
866int tb_port_get_link_speed(struct tb_port *port)
867{
868 u32 val, speed;
869 int ret;
870
871 if (!port->cap_phy)
872 return -EINVAL;
873
874 ret = tb_port_read(port, &val, TB_CFG_PORT,
875 port->cap_phy + LANE_ADP_CS_1, 1);
876 if (ret)
877 return ret;
878
879 speed = (val & LANE_ADP_CS_1_CURRENT_SPEED_MASK) >>
880 LANE_ADP_CS_1_CURRENT_SPEED_SHIFT;
881 return speed == LANE_ADP_CS_1_CURRENT_SPEED_GEN3 ? 20 : 10;
882}
883
884static int tb_port_get_link_width(struct tb_port *port)
885{
886 u32 val;
887 int ret;
888
889 if (!port->cap_phy)
890 return -EINVAL;
891
892 ret = tb_port_read(port, &val, TB_CFG_PORT,
893 port->cap_phy + LANE_ADP_CS_1, 1);
894 if (ret)
895 return ret;
896
897 return (val & LANE_ADP_CS_1_CURRENT_WIDTH_MASK) >>
898 LANE_ADP_CS_1_CURRENT_WIDTH_SHIFT;
899}
900
901static bool tb_port_is_width_supported(struct tb_port *port, int width)
902{
903 u32 phy, widths;
904 int ret;
905
906 if (!port->cap_phy)
907 return false;
908
909 ret = tb_port_read(port, &phy, TB_CFG_PORT,
910 port->cap_phy + LANE_ADP_CS_0, 1);
911 if (ret)
912 return false;
913
914 widths = (phy & LANE_ADP_CS_0_SUPPORTED_WIDTH_MASK) >>
915 LANE_ADP_CS_0_SUPPORTED_WIDTH_SHIFT;
916
917 return !!(widths & width);
918}
919
920static int tb_port_set_link_width(struct tb_port *port, unsigned int width)
921{
922 u32 val;
923 int ret;
924
925 if (!port->cap_phy)
926 return -EINVAL;
927
928 ret = tb_port_read(port, &val, TB_CFG_PORT,
929 port->cap_phy + LANE_ADP_CS_1, 1);
930 if (ret)
931 return ret;
932
933 val &= ~LANE_ADP_CS_1_TARGET_WIDTH_MASK;
934 switch (width) {
935 case 1:
936 val |= LANE_ADP_CS_1_TARGET_WIDTH_SINGLE <<
937 LANE_ADP_CS_1_TARGET_WIDTH_SHIFT;
938 break;
939 case 2:
940 val |= LANE_ADP_CS_1_TARGET_WIDTH_DUAL <<
941 LANE_ADP_CS_1_TARGET_WIDTH_SHIFT;
942 break;
943 default:
944 return -EINVAL;
945 }
946
947 val |= LANE_ADP_CS_1_LB;
948
949 return tb_port_write(port, &val, TB_CFG_PORT,
950 port->cap_phy + LANE_ADP_CS_1, 1);
951}
952
953static int tb_port_lane_bonding_enable(struct tb_port *port)
954{
955 int ret;
956
957 /*
958 * Enable lane bonding for both links if not already enabled by
959 * for example the boot firmware.
960 */
961 ret = tb_port_get_link_width(port);
962 if (ret == 1) {
963 ret = tb_port_set_link_width(port, 2);
964 if (ret)
965 return ret;
966 }
967
968 ret = tb_port_get_link_width(port->dual_link_port);
969 if (ret == 1) {
970 ret = tb_port_set_link_width(port->dual_link_port, 2);
971 if (ret) {
972 tb_port_set_link_width(port, 1);
973 return ret;
974 }
975 }
976
977 port->bonded = true;
978 port->dual_link_port->bonded = true;
979
980 return 0;
981}
982
983static void tb_port_lane_bonding_disable(struct tb_port *port)
984{
985 port->dual_link_port->bonded = false;
986 port->bonded = false;
987
988 tb_port_set_link_width(port->dual_link_port, 1);
989 tb_port_set_link_width(port, 1);
990}
991
992/**
993 * tb_port_is_enabled() - Is the adapter port enabled
994 * @port: Port to check
995 */
996bool tb_port_is_enabled(struct tb_port *port)
997{
998 switch (port->config.type) {
999 case TB_TYPE_PCIE_UP:
1000 case TB_TYPE_PCIE_DOWN:
1001 return tb_pci_port_is_enabled(port);
1002
1003 case TB_TYPE_DP_HDMI_IN:
1004 case TB_TYPE_DP_HDMI_OUT:
1005 return tb_dp_port_is_enabled(port);
1006
1007 case TB_TYPE_USB3_UP:
1008 case TB_TYPE_USB3_DOWN:
1009 return tb_usb3_port_is_enabled(port);
1010
1011 default:
1012 return false;
1013 }
1014}
1015
1016/**
1017 * tb_usb3_port_is_enabled() - Is the USB3 adapter port enabled
1018 * @port: USB3 adapter port to check
1019 */
1020bool tb_usb3_port_is_enabled(struct tb_port *port)
1021{
1022 u32 data;
1023
1024 if (tb_port_read(port, &data, TB_CFG_PORT,
1025 port->cap_adap + ADP_USB3_CS_0, 1))
1026 return false;
1027
1028 return !!(data & ADP_USB3_CS_0_PE);
1029}
1030
1031/**
1032 * tb_usb3_port_enable() - Enable USB3 adapter port
1033 * @port: USB3 adapter port to enable
1034 * @enable: Enable/disable the USB3 adapter
1035 */
1036int tb_usb3_port_enable(struct tb_port *port, bool enable)
1037{
1038 u32 word = enable ? (ADP_USB3_CS_0_PE | ADP_USB3_CS_0_V)
1039 : ADP_USB3_CS_0_V;
1040
1041 if (!port->cap_adap)
1042 return -ENXIO;
1043 return tb_port_write(port, &word, TB_CFG_PORT,
1044 port->cap_adap + ADP_USB3_CS_0, 1);
1045}
1046
1047/**
1048 * tb_pci_port_is_enabled() - Is the PCIe adapter port enabled
1049 * @port: PCIe port to check
1050 */
1051bool tb_pci_port_is_enabled(struct tb_port *port)
1052{
1053 u32 data;
1054
1055 if (tb_port_read(port, &data, TB_CFG_PORT,
1056 port->cap_adap + ADP_PCIE_CS_0, 1))
1057 return false;
1058
1059 return !!(data & ADP_PCIE_CS_0_PE);
1060}
1061
1062/**
1063 * tb_pci_port_enable() - Enable PCIe adapter port
1064 * @port: PCIe port to enable
1065 * @enable: Enable/disable the PCIe adapter
1066 */
1067int tb_pci_port_enable(struct tb_port *port, bool enable)
1068{
1069 u32 word = enable ? ADP_PCIE_CS_0_PE : 0x0;
1070 if (!port->cap_adap)
1071 return -ENXIO;
1072 return tb_port_write(port, &word, TB_CFG_PORT,
1073 port->cap_adap + ADP_PCIE_CS_0, 1);
1074}
1075
1076/**
1077 * tb_dp_port_hpd_is_active() - Is HPD already active
1078 * @port: DP out port to check
1079 *
1080 * Checks if the DP OUT adapter port has HDP bit already set.
1081 */
1082int tb_dp_port_hpd_is_active(struct tb_port *port)
1083{
1084 u32 data;
1085 int ret;
1086
1087 ret = tb_port_read(port, &data, TB_CFG_PORT,
1088 port->cap_adap + ADP_DP_CS_2, 1);
1089 if (ret)
1090 return ret;
1091
1092 return !!(data & ADP_DP_CS_2_HDP);
1093}
1094
1095/**
1096 * tb_dp_port_hpd_clear() - Clear HPD from DP IN port
1097 * @port: Port to clear HPD
1098 *
1099 * If the DP IN port has HDP set, this function can be used to clear it.
1100 */
1101int tb_dp_port_hpd_clear(struct tb_port *port)
1102{
1103 u32 data;
1104 int ret;
1105
1106 ret = tb_port_read(port, &data, TB_CFG_PORT,
1107 port->cap_adap + ADP_DP_CS_3, 1);
1108 if (ret)
1109 return ret;
1110
1111 data |= ADP_DP_CS_3_HDPC;
1112 return tb_port_write(port, &data, TB_CFG_PORT,
1113 port->cap_adap + ADP_DP_CS_3, 1);
1114}
1115
1116/**
1117 * tb_dp_port_set_hops() - Set video/aux Hop IDs for DP port
1118 * @port: DP IN/OUT port to set hops
1119 * @video: Video Hop ID
1120 * @aux_tx: AUX TX Hop ID
1121 * @aux_rx: AUX RX Hop ID
1122 *
1123 * Programs specified Hop IDs for DP IN/OUT port.
1124 */
1125int tb_dp_port_set_hops(struct tb_port *port, unsigned int video,
1126 unsigned int aux_tx, unsigned int aux_rx)
1127{
1128 u32 data[2];
1129 int ret;
1130
1131 ret = tb_port_read(port, data, TB_CFG_PORT,
1132 port->cap_adap + ADP_DP_CS_0, ARRAY_SIZE(data));
1133 if (ret)
1134 return ret;
1135
1136 data[0] &= ~ADP_DP_CS_0_VIDEO_HOPID_MASK;
1137 data[1] &= ~ADP_DP_CS_1_AUX_RX_HOPID_MASK;
1138 data[1] &= ~ADP_DP_CS_1_AUX_RX_HOPID_MASK;
1139
1140 data[0] |= (video << ADP_DP_CS_0_VIDEO_HOPID_SHIFT) &
1141 ADP_DP_CS_0_VIDEO_HOPID_MASK;
1142 data[1] |= aux_tx & ADP_DP_CS_1_AUX_TX_HOPID_MASK;
1143 data[1] |= (aux_rx << ADP_DP_CS_1_AUX_RX_HOPID_SHIFT) &
1144 ADP_DP_CS_1_AUX_RX_HOPID_MASK;
1145
1146 return tb_port_write(port, data, TB_CFG_PORT,
1147 port->cap_adap + ADP_DP_CS_0, ARRAY_SIZE(data));
1148}
1149
1150/**
1151 * tb_dp_port_is_enabled() - Is DP adapter port enabled
1152 * @port: DP adapter port to check
1153 */
1154bool tb_dp_port_is_enabled(struct tb_port *port)
1155{
1156 u32 data[2];
1157
1158 if (tb_port_read(port, data, TB_CFG_PORT, port->cap_adap + ADP_DP_CS_0,
1159 ARRAY_SIZE(data)))
1160 return false;
1161
1162 return !!(data[0] & (ADP_DP_CS_0_VE | ADP_DP_CS_0_AE));
1163}
1164
1165/**
1166 * tb_dp_port_enable() - Enables/disables DP paths of a port
1167 * @port: DP IN/OUT port
1168 * @enable: Enable/disable DP path
1169 *
1170 * Once Hop IDs are programmed DP paths can be enabled or disabled by
1171 * calling this function.
1172 */
1173int tb_dp_port_enable(struct tb_port *port, bool enable)
1174{
1175 u32 data[2];
1176 int ret;
1177
1178 ret = tb_port_read(port, data, TB_CFG_PORT,
1179 port->cap_adap + ADP_DP_CS_0, ARRAY_SIZE(data));
1180 if (ret)
1181 return ret;
1182
1183 if (enable)
1184 data[0] |= ADP_DP_CS_0_VE | ADP_DP_CS_0_AE;
1185 else
1186 data[0] &= ~(ADP_DP_CS_0_VE | ADP_DP_CS_0_AE);
1187
1188 return tb_port_write(port, data, TB_CFG_PORT,
1189 port->cap_adap + ADP_DP_CS_0, ARRAY_SIZE(data));
1190}
1191
1192/* switch utility functions */
1193
1194static const char *tb_switch_generation_name(const struct tb_switch *sw)
1195{
1196 switch (sw->generation) {
1197 case 1:
1198 return "Thunderbolt 1";
1199 case 2:
1200 return "Thunderbolt 2";
1201 case 3:
1202 return "Thunderbolt 3";
1203 case 4:
1204 return "USB4";
1205 default:
1206 return "Unknown";
1207 }
1208}
1209
1210static void tb_dump_switch(const struct tb *tb, const struct tb_switch *sw)
1211{
1212 const struct tb_regs_switch_header *regs = &sw->config;
1213
1214 tb_dbg(tb, " %s Switch: %x:%x (Revision: %d, TB Version: %d)\n",
1215 tb_switch_generation_name(sw), regs->vendor_id, regs->device_id,
1216 regs->revision, regs->thunderbolt_version);
1217 tb_dbg(tb, " Max Port Number: %d\n", regs->max_port_number);
1218 tb_dbg(tb, " Config:\n");
1219 tb_dbg(tb,
1220 " Upstream Port Number: %d Depth: %d Route String: %#llx Enabled: %d, PlugEventsDelay: %dms\n",
1221 regs->upstream_port_number, regs->depth,
1222 (((u64) regs->route_hi) << 32) | regs->route_lo,
1223 regs->enabled, regs->plug_events_delay);
1224 tb_dbg(tb, " unknown1: %#x unknown4: %#x\n",
1225 regs->__unknown1, regs->__unknown4);
1226}
1227
1228/**
1229 * reset_switch() - reconfigure route, enable and send TB_CFG_PKG_RESET
1230 *
1231 * Return: Returns 0 on success or an error code on failure.
1232 */
1233int tb_switch_reset(struct tb *tb, u64 route)
1234{
1235 struct tb_cfg_result res;
1236 struct tb_regs_switch_header header = {
1237 header.route_hi = route >> 32,
1238 header.route_lo = route,
1239 header.enabled = true,
1240 };
1241 tb_dbg(tb, "resetting switch at %llx\n", route);
1242 res.err = tb_cfg_write(tb->ctl, ((u32 *) &header) + 2, route,
1243 0, 2, 2, 2);
1244 if (res.err)
1245 return res.err;
1246 res = tb_cfg_reset(tb->ctl, route, TB_CFG_DEFAULT_TIMEOUT);
1247 if (res.err > 0)
1248 return -EIO;
1249 return res.err;
1250}
1251
1252/**
1253 * tb_plug_events_active() - enable/disable plug events on a switch
1254 *
1255 * Also configures a sane plug_events_delay of 255ms.
1256 *
1257 * Return: Returns 0 on success or an error code on failure.
1258 */
1259static int tb_plug_events_active(struct tb_switch *sw, bool active)
1260{
1261 u32 data;
1262 int res;
1263
1264 if (tb_switch_is_icm(sw))
1265 return 0;
1266
1267 sw->config.plug_events_delay = 0xff;
1268 res = tb_sw_write(sw, ((u32 *) &sw->config) + 4, TB_CFG_SWITCH, 4, 1);
1269 if (res)
1270 return res;
1271
1272 /* Plug events are always enabled in USB4 */
1273 if (tb_switch_is_usb4(sw))
1274 return 0;
1275
1276 res = tb_sw_read(sw, &data, TB_CFG_SWITCH, sw->cap_plug_events + 1, 1);
1277 if (res)
1278 return res;
1279
1280 if (active) {
1281 data = data & 0xFFFFFF83;
1282 switch (sw->config.device_id) {
1283 case PCI_DEVICE_ID_INTEL_LIGHT_RIDGE:
1284 case PCI_DEVICE_ID_INTEL_EAGLE_RIDGE:
1285 case PCI_DEVICE_ID_INTEL_PORT_RIDGE:
1286 break;
1287 default:
1288 data |= 4;
1289 }
1290 } else {
1291 data = data | 0x7c;
1292 }
1293 return tb_sw_write(sw, &data, TB_CFG_SWITCH,
1294 sw->cap_plug_events + 1, 1);
1295}
1296
1297static ssize_t authorized_show(struct device *dev,
1298 struct device_attribute *attr,
1299 char *buf)
1300{
1301 struct tb_switch *sw = tb_to_switch(dev);
1302
1303 return sprintf(buf, "%u\n", sw->authorized);
1304}
1305
1306static int tb_switch_set_authorized(struct tb_switch *sw, unsigned int val)
1307{
1308 int ret = -EINVAL;
1309
1310 if (!mutex_trylock(&sw->tb->lock))
1311 return restart_syscall();
1312
1313 if (sw->authorized)
1314 goto unlock;
1315
1316 switch (val) {
1317 /* Approve switch */
1318 case 1:
1319 if (sw->key)
1320 ret = tb_domain_approve_switch_key(sw->tb, sw);
1321 else
1322 ret = tb_domain_approve_switch(sw->tb, sw);
1323 break;
1324
1325 /* Challenge switch */
1326 case 2:
1327 if (sw->key)
1328 ret = tb_domain_challenge_switch_key(sw->tb, sw);
1329 break;
1330
1331 default:
1332 break;
1333 }
1334
1335 if (!ret) {
1336 sw->authorized = val;
1337 /* Notify status change to the userspace */
1338 kobject_uevent(&sw->dev.kobj, KOBJ_CHANGE);
1339 }
1340
1341unlock:
1342 mutex_unlock(&sw->tb->lock);
1343 return ret;
1344}
1345
1346static ssize_t authorized_store(struct device *dev,
1347 struct device_attribute *attr,
1348 const char *buf, size_t count)
1349{
1350 struct tb_switch *sw = tb_to_switch(dev);
1351 unsigned int val;
1352 ssize_t ret;
1353
1354 ret = kstrtouint(buf, 0, &val);
1355 if (ret)
1356 return ret;
1357 if (val > 2)
1358 return -EINVAL;
1359
1360 pm_runtime_get_sync(&sw->dev);
1361 ret = tb_switch_set_authorized(sw, val);
1362 pm_runtime_mark_last_busy(&sw->dev);
1363 pm_runtime_put_autosuspend(&sw->dev);
1364
1365 return ret ? ret : count;
1366}
1367static DEVICE_ATTR_RW(authorized);
1368
1369static ssize_t boot_show(struct device *dev, struct device_attribute *attr,
1370 char *buf)
1371{
1372 struct tb_switch *sw = tb_to_switch(dev);
1373
1374 return sprintf(buf, "%u\n", sw->boot);
1375}
1376static DEVICE_ATTR_RO(boot);
1377
1378static ssize_t device_show(struct device *dev, struct device_attribute *attr,
1379 char *buf)
1380{
1381 struct tb_switch *sw = tb_to_switch(dev);
1382
1383 return sprintf(buf, "%#x\n", sw->device);
1384}
1385static DEVICE_ATTR_RO(device);
1386
1387static ssize_t
1388device_name_show(struct device *dev, struct device_attribute *attr, char *buf)
1389{
1390 struct tb_switch *sw = tb_to_switch(dev);
1391
1392 return sprintf(buf, "%s\n", sw->device_name ? sw->device_name : "");
1393}
1394static DEVICE_ATTR_RO(device_name);
1395
1396static ssize_t
1397generation_show(struct device *dev, struct device_attribute *attr, char *buf)
1398{
1399 struct tb_switch *sw = tb_to_switch(dev);
1400
1401 return sprintf(buf, "%u\n", sw->generation);
1402}
1403static DEVICE_ATTR_RO(generation);
1404
1405static ssize_t key_show(struct device *dev, struct device_attribute *attr,
1406 char *buf)
1407{
1408 struct tb_switch *sw = tb_to_switch(dev);
1409 ssize_t ret;
1410
1411 if (!mutex_trylock(&sw->tb->lock))
1412 return restart_syscall();
1413
1414 if (sw->key)
1415 ret = sprintf(buf, "%*phN\n", TB_SWITCH_KEY_SIZE, sw->key);
1416 else
1417 ret = sprintf(buf, "\n");
1418
1419 mutex_unlock(&sw->tb->lock);
1420 return ret;
1421}
1422
1423static ssize_t key_store(struct device *dev, struct device_attribute *attr,
1424 const char *buf, size_t count)
1425{
1426 struct tb_switch *sw = tb_to_switch(dev);
1427 u8 key[TB_SWITCH_KEY_SIZE];
1428 ssize_t ret = count;
1429 bool clear = false;
1430
1431 if (!strcmp(buf, "\n"))
1432 clear = true;
1433 else if (hex2bin(key, buf, sizeof(key)))
1434 return -EINVAL;
1435
1436 if (!mutex_trylock(&sw->tb->lock))
1437 return restart_syscall();
1438
1439 if (sw->authorized) {
1440 ret = -EBUSY;
1441 } else {
1442 kfree(sw->key);
1443 if (clear) {
1444 sw->key = NULL;
1445 } else {
1446 sw->key = kmemdup(key, sizeof(key), GFP_KERNEL);
1447 if (!sw->key)
1448 ret = -ENOMEM;
1449 }
1450 }
1451
1452 mutex_unlock(&sw->tb->lock);
1453 return ret;
1454}
1455static DEVICE_ATTR(key, 0600, key_show, key_store);
1456
1457static ssize_t speed_show(struct device *dev, struct device_attribute *attr,
1458 char *buf)
1459{
1460 struct tb_switch *sw = tb_to_switch(dev);
1461
1462 return sprintf(buf, "%u.0 Gb/s\n", sw->link_speed);
1463}
1464
1465/*
1466 * Currently all lanes must run at the same speed but we expose here
1467 * both directions to allow possible asymmetric links in the future.
1468 */
1469static DEVICE_ATTR(rx_speed, 0444, speed_show, NULL);
1470static DEVICE_ATTR(tx_speed, 0444, speed_show, NULL);
1471
1472static ssize_t lanes_show(struct device *dev, struct device_attribute *attr,
1473 char *buf)
1474{
1475 struct tb_switch *sw = tb_to_switch(dev);
1476
1477 return sprintf(buf, "%u\n", sw->link_width);
1478}
1479
1480/*
1481 * Currently link has same amount of lanes both directions (1 or 2) but
1482 * expose them separately to allow possible asymmetric links in the future.
1483 */
1484static DEVICE_ATTR(rx_lanes, 0444, lanes_show, NULL);
1485static DEVICE_ATTR(tx_lanes, 0444, lanes_show, NULL);
1486
1487static ssize_t nvm_authenticate_show(struct device *dev,
1488 struct device_attribute *attr, char *buf)
1489{
1490 struct tb_switch *sw = tb_to_switch(dev);
1491 u32 status;
1492
1493 nvm_get_auth_status(sw, &status);
1494 return sprintf(buf, "%#x\n", status);
1495}
1496
1497static ssize_t nvm_authenticate_sysfs(struct device *dev, const char *buf,
1498 bool disconnect)
1499{
1500 struct tb_switch *sw = tb_to_switch(dev);
1501 int val;
1502 int ret;
1503
1504 pm_runtime_get_sync(&sw->dev);
1505
1506 if (!mutex_trylock(&sw->tb->lock)) {
1507 ret = restart_syscall();
1508 goto exit_rpm;
1509 }
1510
1511 /* If NVMem devices are not yet added */
1512 if (!sw->nvm) {
1513 ret = -EAGAIN;
1514 goto exit_unlock;
1515 }
1516
1517 ret = kstrtoint(buf, 10, &val);
1518 if (ret)
1519 goto exit_unlock;
1520
1521 /* Always clear the authentication status */
1522 nvm_clear_auth_status(sw);
1523
1524 if (val > 0) {
1525 if (!sw->nvm->flushed) {
1526 if (!sw->nvm->buf) {
1527 ret = -EINVAL;
1528 goto exit_unlock;
1529 }
1530
1531 ret = nvm_validate_and_write(sw);
1532 if (ret || val == WRITE_ONLY)
1533 goto exit_unlock;
1534 }
1535 if (val == WRITE_AND_AUTHENTICATE) {
1536 if (disconnect) {
1537 ret = tb_lc_force_power(sw);
1538 } else {
1539 sw->nvm->authenticating = true;
1540 ret = nvm_authenticate(sw);
1541 }
1542 }
1543 }
1544
1545exit_unlock:
1546 mutex_unlock(&sw->tb->lock);
1547exit_rpm:
1548 pm_runtime_mark_last_busy(&sw->dev);
1549 pm_runtime_put_autosuspend(&sw->dev);
1550
1551 return ret;
1552}
1553
1554static ssize_t nvm_authenticate_store(struct device *dev,
1555 struct device_attribute *attr, const char *buf, size_t count)
1556{
1557 int ret = nvm_authenticate_sysfs(dev, buf, false);
1558 if (ret)
1559 return ret;
1560 return count;
1561}
1562static DEVICE_ATTR_RW(nvm_authenticate);
1563
1564static ssize_t nvm_authenticate_on_disconnect_show(struct device *dev,
1565 struct device_attribute *attr, char *buf)
1566{
1567 return nvm_authenticate_show(dev, attr, buf);
1568}
1569
1570static ssize_t nvm_authenticate_on_disconnect_store(struct device *dev,
1571 struct device_attribute *attr, const char *buf, size_t count)
1572{
1573 int ret;
1574
1575 ret = nvm_authenticate_sysfs(dev, buf, true);
1576 return ret ? ret : count;
1577}
1578static DEVICE_ATTR_RW(nvm_authenticate_on_disconnect);
1579
1580static ssize_t nvm_version_show(struct device *dev,
1581 struct device_attribute *attr, char *buf)
1582{
1583 struct tb_switch *sw = tb_to_switch(dev);
1584 int ret;
1585
1586 if (!mutex_trylock(&sw->tb->lock))
1587 return restart_syscall();
1588
1589 if (sw->safe_mode)
1590 ret = -ENODATA;
1591 else if (!sw->nvm)
1592 ret = -EAGAIN;
1593 else
1594 ret = sprintf(buf, "%x.%x\n", sw->nvm->major, sw->nvm->minor);
1595
1596 mutex_unlock(&sw->tb->lock);
1597
1598 return ret;
1599}
1600static DEVICE_ATTR_RO(nvm_version);
1601
1602static ssize_t vendor_show(struct device *dev, struct device_attribute *attr,
1603 char *buf)
1604{
1605 struct tb_switch *sw = tb_to_switch(dev);
1606
1607 return sprintf(buf, "%#x\n", sw->vendor);
1608}
1609static DEVICE_ATTR_RO(vendor);
1610
1611static ssize_t
1612vendor_name_show(struct device *dev, struct device_attribute *attr, char *buf)
1613{
1614 struct tb_switch *sw = tb_to_switch(dev);
1615
1616 return sprintf(buf, "%s\n", sw->vendor_name ? sw->vendor_name : "");
1617}
1618static DEVICE_ATTR_RO(vendor_name);
1619
1620static ssize_t unique_id_show(struct device *dev, struct device_attribute *attr,
1621 char *buf)
1622{
1623 struct tb_switch *sw = tb_to_switch(dev);
1624
1625 return sprintf(buf, "%pUb\n", sw->uuid);
1626}
1627static DEVICE_ATTR_RO(unique_id);
1628
1629static struct attribute *switch_attrs[] = {
1630 &dev_attr_authorized.attr,
1631 &dev_attr_boot.attr,
1632 &dev_attr_device.attr,
1633 &dev_attr_device_name.attr,
1634 &dev_attr_generation.attr,
1635 &dev_attr_key.attr,
1636 &dev_attr_nvm_authenticate.attr,
1637 &dev_attr_nvm_authenticate_on_disconnect.attr,
1638 &dev_attr_nvm_version.attr,
1639 &dev_attr_rx_speed.attr,
1640 &dev_attr_rx_lanes.attr,
1641 &dev_attr_tx_speed.attr,
1642 &dev_attr_tx_lanes.attr,
1643 &dev_attr_vendor.attr,
1644 &dev_attr_vendor_name.attr,
1645 &dev_attr_unique_id.attr,
1646 NULL,
1647};
1648
1649static umode_t switch_attr_is_visible(struct kobject *kobj,
1650 struct attribute *attr, int n)
1651{
1652 struct device *dev = container_of(kobj, struct device, kobj);
1653 struct tb_switch *sw = tb_to_switch(dev);
1654
1655 if (attr == &dev_attr_device.attr) {
1656 if (!sw->device)
1657 return 0;
1658 } else if (attr == &dev_attr_device_name.attr) {
1659 if (!sw->device_name)
1660 return 0;
1661 } else if (attr == &dev_attr_vendor.attr) {
1662 if (!sw->vendor)
1663 return 0;
1664 } else if (attr == &dev_attr_vendor_name.attr) {
1665 if (!sw->vendor_name)
1666 return 0;
1667 } else if (attr == &dev_attr_key.attr) {
1668 if (tb_route(sw) &&
1669 sw->tb->security_level == TB_SECURITY_SECURE &&
1670 sw->security_level == TB_SECURITY_SECURE)
1671 return attr->mode;
1672 return 0;
1673 } else if (attr == &dev_attr_rx_speed.attr ||
1674 attr == &dev_attr_rx_lanes.attr ||
1675 attr == &dev_attr_tx_speed.attr ||
1676 attr == &dev_attr_tx_lanes.attr) {
1677 if (tb_route(sw))
1678 return attr->mode;
1679 return 0;
1680 } else if (attr == &dev_attr_nvm_authenticate.attr) {
1681 if (nvm_upgradeable(sw))
1682 return attr->mode;
1683 return 0;
1684 } else if (attr == &dev_attr_nvm_version.attr) {
1685 if (nvm_readable(sw))
1686 return attr->mode;
1687 return 0;
1688 } else if (attr == &dev_attr_boot.attr) {
1689 if (tb_route(sw))
1690 return attr->mode;
1691 return 0;
1692 } else if (attr == &dev_attr_nvm_authenticate_on_disconnect.attr) {
1693 if (sw->quirks & QUIRK_FORCE_POWER_LINK_CONTROLLER)
1694 return attr->mode;
1695 return 0;
1696 }
1697
1698 return sw->safe_mode ? 0 : attr->mode;
1699}
1700
1701static struct attribute_group switch_group = {
1702 .is_visible = switch_attr_is_visible,
1703 .attrs = switch_attrs,
1704};
1705
1706static const struct attribute_group *switch_groups[] = {
1707 &switch_group,
1708 NULL,
1709};
1710
1711static void tb_switch_release(struct device *dev)
1712{
1713 struct tb_switch *sw = tb_to_switch(dev);
1714 struct tb_port *port;
1715
1716 dma_port_free(sw->dma_port);
1717
1718 tb_switch_for_each_port(sw, port) {
1719 if (!port->disabled) {
1720 ida_destroy(&port->in_hopids);
1721 ida_destroy(&port->out_hopids);
1722 }
1723 }
1724
1725 kfree(sw->uuid);
1726 kfree(sw->device_name);
1727 kfree(sw->vendor_name);
1728 kfree(sw->ports);
1729 kfree(sw->drom);
1730 kfree(sw->key);
1731 kfree(sw);
1732}
1733
1734/*
1735 * Currently only need to provide the callbacks. Everything else is handled
1736 * in the connection manager.
1737 */
1738static int __maybe_unused tb_switch_runtime_suspend(struct device *dev)
1739{
1740 struct tb_switch *sw = tb_to_switch(dev);
1741 const struct tb_cm_ops *cm_ops = sw->tb->cm_ops;
1742
1743 if (cm_ops->runtime_suspend_switch)
1744 return cm_ops->runtime_suspend_switch(sw);
1745
1746 return 0;
1747}
1748
1749static int __maybe_unused tb_switch_runtime_resume(struct device *dev)
1750{
1751 struct tb_switch *sw = tb_to_switch(dev);
1752 const struct tb_cm_ops *cm_ops = sw->tb->cm_ops;
1753
1754 if (cm_ops->runtime_resume_switch)
1755 return cm_ops->runtime_resume_switch(sw);
1756 return 0;
1757}
1758
1759static const struct dev_pm_ops tb_switch_pm_ops = {
1760 SET_RUNTIME_PM_OPS(tb_switch_runtime_suspend, tb_switch_runtime_resume,
1761 NULL)
1762};
1763
1764struct device_type tb_switch_type = {
1765 .name = "thunderbolt_device",
1766 .release = tb_switch_release,
1767 .pm = &tb_switch_pm_ops,
1768};
1769
1770static int tb_switch_get_generation(struct tb_switch *sw)
1771{
1772 switch (sw->config.device_id) {
1773 case PCI_DEVICE_ID_INTEL_LIGHT_RIDGE:
1774 case PCI_DEVICE_ID_INTEL_EAGLE_RIDGE:
1775 case PCI_DEVICE_ID_INTEL_LIGHT_PEAK:
1776 case PCI_DEVICE_ID_INTEL_CACTUS_RIDGE_2C:
1777 case PCI_DEVICE_ID_INTEL_CACTUS_RIDGE_4C:
1778 case PCI_DEVICE_ID_INTEL_PORT_RIDGE:
1779 case PCI_DEVICE_ID_INTEL_REDWOOD_RIDGE_2C_BRIDGE:
1780 case PCI_DEVICE_ID_INTEL_REDWOOD_RIDGE_4C_BRIDGE:
1781 return 1;
1782
1783 case PCI_DEVICE_ID_INTEL_WIN_RIDGE_2C_BRIDGE:
1784 case PCI_DEVICE_ID_INTEL_FALCON_RIDGE_2C_BRIDGE:
1785 case PCI_DEVICE_ID_INTEL_FALCON_RIDGE_4C_BRIDGE:
1786 return 2;
1787
1788 case PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_LP_BRIDGE:
1789 case PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_2C_BRIDGE:
1790 case PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_4C_BRIDGE:
1791 case PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_C_2C_BRIDGE:
1792 case PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_C_4C_BRIDGE:
1793 case PCI_DEVICE_ID_INTEL_TITAN_RIDGE_2C_BRIDGE:
1794 case PCI_DEVICE_ID_INTEL_TITAN_RIDGE_4C_BRIDGE:
1795 case PCI_DEVICE_ID_INTEL_TITAN_RIDGE_DD_BRIDGE:
1796 case PCI_DEVICE_ID_INTEL_ICL_NHI0:
1797 case PCI_DEVICE_ID_INTEL_ICL_NHI1:
1798 return 3;
1799
1800 default:
1801 if (tb_switch_is_usb4(sw))
1802 return 4;
1803
1804 /*
1805 * For unknown switches assume generation to be 1 to be
1806 * on the safe side.
1807 */
1808 tb_sw_warn(sw, "unsupported switch device id %#x\n",
1809 sw->config.device_id);
1810 return 1;
1811 }
1812}
1813
1814static bool tb_switch_exceeds_max_depth(const struct tb_switch *sw, int depth)
1815{
1816 int max_depth;
1817
1818 if (tb_switch_is_usb4(sw) ||
1819 (sw->tb->root_switch && tb_switch_is_usb4(sw->tb->root_switch)))
1820 max_depth = USB4_SWITCH_MAX_DEPTH;
1821 else
1822 max_depth = TB_SWITCH_MAX_DEPTH;
1823
1824 return depth > max_depth;
1825}
1826
1827/**
1828 * tb_switch_alloc() - allocate a switch
1829 * @tb: Pointer to the owning domain
1830 * @parent: Parent device for this switch
1831 * @route: Route string for this switch
1832 *
1833 * Allocates and initializes a switch. Will not upload configuration to
1834 * the switch. For that you need to call tb_switch_configure()
1835 * separately. The returned switch should be released by calling
1836 * tb_switch_put().
1837 *
1838 * Return: Pointer to the allocated switch or ERR_PTR() in case of
1839 * failure.
1840 */
1841struct tb_switch *tb_switch_alloc(struct tb *tb, struct device *parent,
1842 u64 route)
1843{
1844 struct tb_switch *sw;
1845 int upstream_port;
1846 int i, ret, depth;
1847
1848 /* Unlock the downstream port so we can access the switch below */
1849 if (route) {
1850 struct tb_switch *parent_sw = tb_to_switch(parent);
1851 struct tb_port *down;
1852
1853 down = tb_port_at(route, parent_sw);
1854 tb_port_unlock(down);
1855 }
1856
1857 depth = tb_route_length(route);
1858
1859 upstream_port = tb_cfg_get_upstream_port(tb->ctl, route);
1860 if (upstream_port < 0)
1861 return ERR_PTR(upstream_port);
1862
1863 sw = kzalloc(sizeof(*sw), GFP_KERNEL);
1864 if (!sw)
1865 return ERR_PTR(-ENOMEM);
1866
1867 sw->tb = tb;
1868 ret = tb_cfg_read(tb->ctl, &sw->config, route, 0, TB_CFG_SWITCH, 0, 5);
1869 if (ret)
1870 goto err_free_sw_ports;
1871
1872 sw->generation = tb_switch_get_generation(sw);
1873
1874 tb_dbg(tb, "current switch config:\n");
1875 tb_dump_switch(tb, sw);
1876
1877 /* configure switch */
1878 sw->config.upstream_port_number = upstream_port;
1879 sw->config.depth = depth;
1880 sw->config.route_hi = upper_32_bits(route);
1881 sw->config.route_lo = lower_32_bits(route);
1882 sw->config.enabled = 0;
1883
1884 /* Make sure we do not exceed maximum topology limit */
1885 if (tb_switch_exceeds_max_depth(sw, depth)) {
1886 ret = -EADDRNOTAVAIL;
1887 goto err_free_sw_ports;
1888 }
1889
1890 /* initialize ports */
1891 sw->ports = kcalloc(sw->config.max_port_number + 1, sizeof(*sw->ports),
1892 GFP_KERNEL);
1893 if (!sw->ports) {
1894 ret = -ENOMEM;
1895 goto err_free_sw_ports;
1896 }
1897
1898 for (i = 0; i <= sw->config.max_port_number; i++) {
1899 /* minimum setup for tb_find_cap and tb_drom_read to work */
1900 sw->ports[i].sw = sw;
1901 sw->ports[i].port = i;
1902 }
1903
1904 ret = tb_switch_find_vse_cap(sw, TB_VSE_CAP_PLUG_EVENTS);
1905 if (ret > 0)
1906 sw->cap_plug_events = ret;
1907
1908 ret = tb_switch_find_vse_cap(sw, TB_VSE_CAP_LINK_CONTROLLER);
1909 if (ret > 0)
1910 sw->cap_lc = ret;
1911
1912 /* Root switch is always authorized */
1913 if (!route)
1914 sw->authorized = true;
1915
1916 device_initialize(&sw->dev);
1917 sw->dev.parent = parent;
1918 sw->dev.bus = &tb_bus_type;
1919 sw->dev.type = &tb_switch_type;
1920 sw->dev.groups = switch_groups;
1921 dev_set_name(&sw->dev, "%u-%llx", tb->index, tb_route(sw));
1922
1923 return sw;
1924
1925err_free_sw_ports:
1926 kfree(sw->ports);
1927 kfree(sw);
1928
1929 return ERR_PTR(ret);
1930}
1931
1932/**
1933 * tb_switch_alloc_safe_mode() - allocate a switch that is in safe mode
1934 * @tb: Pointer to the owning domain
1935 * @parent: Parent device for this switch
1936 * @route: Route string for this switch
1937 *
1938 * This creates a switch in safe mode. This means the switch pretty much
1939 * lacks all capabilities except DMA configuration port before it is
1940 * flashed with a valid NVM firmware.
1941 *
1942 * The returned switch must be released by calling tb_switch_put().
1943 *
1944 * Return: Pointer to the allocated switch or ERR_PTR() in case of failure
1945 */
1946struct tb_switch *
1947tb_switch_alloc_safe_mode(struct tb *tb, struct device *parent, u64 route)
1948{
1949 struct tb_switch *sw;
1950
1951 sw = kzalloc(sizeof(*sw), GFP_KERNEL);
1952 if (!sw)
1953 return ERR_PTR(-ENOMEM);
1954
1955 sw->tb = tb;
1956 sw->config.depth = tb_route_length(route);
1957 sw->config.route_hi = upper_32_bits(route);
1958 sw->config.route_lo = lower_32_bits(route);
1959 sw->safe_mode = true;
1960
1961 device_initialize(&sw->dev);
1962 sw->dev.parent = parent;
1963 sw->dev.bus = &tb_bus_type;
1964 sw->dev.type = &tb_switch_type;
1965 sw->dev.groups = switch_groups;
1966 dev_set_name(&sw->dev, "%u-%llx", tb->index, tb_route(sw));
1967
1968 return sw;
1969}
1970
1971/**
1972 * tb_switch_configure() - Uploads configuration to the switch
1973 * @sw: Switch to configure
1974 *
1975 * Call this function before the switch is added to the system. It will
1976 * upload configuration to the switch and makes it available for the
1977 * connection manager to use. Can be called to the switch again after
1978 * resume from low power states to re-initialize it.
1979 *
1980 * Return: %0 in case of success and negative errno in case of failure
1981 */
1982int tb_switch_configure(struct tb_switch *sw)
1983{
1984 struct tb *tb = sw->tb;
1985 u64 route;
1986 int ret;
1987
1988 route = tb_route(sw);
1989
1990 tb_dbg(tb, "%s Switch at %#llx (depth: %d, up port: %d)\n",
1991 sw->config.enabled ? "restoring " : "initializing", route,
1992 tb_route_length(route), sw->config.upstream_port_number);
1993
1994 sw->config.enabled = 1;
1995
1996 if (tb_switch_is_usb4(sw)) {
1997 /*
1998 * For USB4 devices, we need to program the CM version
1999 * accordingly so that it knows to expose all the
2000 * additional capabilities.
2001 */
2002 sw->config.cmuv = USB4_VERSION_1_0;
2003
2004 /* Enumerate the switch */
2005 ret = tb_sw_write(sw, (u32 *)&sw->config + 1, TB_CFG_SWITCH,
2006 ROUTER_CS_1, 4);
2007 if (ret)
2008 return ret;
2009
2010 ret = usb4_switch_setup(sw);
2011 if (ret)
2012 return ret;
2013
2014 ret = usb4_switch_configure_link(sw);
2015 } else {
2016 if (sw->config.vendor_id != PCI_VENDOR_ID_INTEL)
2017 tb_sw_warn(sw, "unknown switch vendor id %#x\n",
2018 sw->config.vendor_id);
2019
2020 if (!sw->cap_plug_events) {
2021 tb_sw_warn(sw, "cannot find TB_VSE_CAP_PLUG_EVENTS aborting\n");
2022 return -ENODEV;
2023 }
2024
2025 /* Enumerate the switch */
2026 ret = tb_sw_write(sw, (u32 *)&sw->config + 1, TB_CFG_SWITCH,
2027 ROUTER_CS_1, 3);
2028 if (ret)
2029 return ret;
2030
2031 ret = tb_lc_configure_link(sw);
2032 }
2033 if (ret)
2034 return ret;
2035
2036 return tb_plug_events_active(sw, true);
2037}
2038
2039static int tb_switch_set_uuid(struct tb_switch *sw)
2040{
2041 bool uid = false;
2042 u32 uuid[4];
2043 int ret;
2044
2045 if (sw->uuid)
2046 return 0;
2047
2048 if (tb_switch_is_usb4(sw)) {
2049 ret = usb4_switch_read_uid(sw, &sw->uid);
2050 if (ret)
2051 return ret;
2052 uid = true;
2053 } else {
2054 /*
2055 * The newer controllers include fused UUID as part of
2056 * link controller specific registers
2057 */
2058 ret = tb_lc_read_uuid(sw, uuid);
2059 if (ret) {
2060 if (ret != -EINVAL)
2061 return ret;
2062 uid = true;
2063 }
2064 }
2065
2066 if (uid) {
2067 /*
2068 * ICM generates UUID based on UID and fills the upper
2069 * two words with ones. This is not strictly following
2070 * UUID format but we want to be compatible with it so
2071 * we do the same here.
2072 */
2073 uuid[0] = sw->uid & 0xffffffff;
2074 uuid[1] = (sw->uid >> 32) & 0xffffffff;
2075 uuid[2] = 0xffffffff;
2076 uuid[3] = 0xffffffff;
2077 }
2078
2079 sw->uuid = kmemdup(uuid, sizeof(uuid), GFP_KERNEL);
2080 if (!sw->uuid)
2081 return -ENOMEM;
2082 return 0;
2083}
2084
2085static int tb_switch_add_dma_port(struct tb_switch *sw)
2086{
2087 u32 status;
2088 int ret;
2089
2090 switch (sw->generation) {
2091 case 2:
2092 /* Only root switch can be upgraded */
2093 if (tb_route(sw))
2094 return 0;
2095
2096 fallthrough;
2097 case 3:
2098 ret = tb_switch_set_uuid(sw);
2099 if (ret)
2100 return ret;
2101 break;
2102
2103 default:
2104 /*
2105 * DMA port is the only thing available when the switch
2106 * is in safe mode.
2107 */
2108 if (!sw->safe_mode)
2109 return 0;
2110 break;
2111 }
2112
2113 /* Root switch DMA port requires running firmware */
2114 if (!tb_route(sw) && !tb_switch_is_icm(sw))
2115 return 0;
2116
2117 sw->dma_port = dma_port_alloc(sw);
2118 if (!sw->dma_port)
2119 return 0;
2120
2121 if (sw->no_nvm_upgrade)
2122 return 0;
2123
2124 /*
2125 * If there is status already set then authentication failed
2126 * when the dma_port_flash_update_auth() returned. Power cycling
2127 * is not needed (it was done already) so only thing we do here
2128 * is to unblock runtime PM of the root port.
2129 */
2130 nvm_get_auth_status(sw, &status);
2131 if (status) {
2132 if (!tb_route(sw))
2133 nvm_authenticate_complete_dma_port(sw);
2134 return 0;
2135 }
2136
2137 /*
2138 * Check status of the previous flash authentication. If there
2139 * is one we need to power cycle the switch in any case to make
2140 * it functional again.
2141 */
2142 ret = dma_port_flash_update_auth_status(sw->dma_port, &status);
2143 if (ret <= 0)
2144 return ret;
2145
2146 /* Now we can allow root port to suspend again */
2147 if (!tb_route(sw))
2148 nvm_authenticate_complete_dma_port(sw);
2149
2150 if (status) {
2151 tb_sw_info(sw, "switch flash authentication failed\n");
2152 nvm_set_auth_status(sw, status);
2153 }
2154
2155 tb_sw_info(sw, "power cycling the switch now\n");
2156 dma_port_power_cycle(sw->dma_port);
2157
2158 /*
2159 * We return error here which causes the switch adding failure.
2160 * It should appear back after power cycle is complete.
2161 */
2162 return -ESHUTDOWN;
2163}
2164
2165static void tb_switch_default_link_ports(struct tb_switch *sw)
2166{
2167 int i;
2168
2169 for (i = 1; i <= sw->config.max_port_number; i += 2) {
2170 struct tb_port *port = &sw->ports[i];
2171 struct tb_port *subordinate;
2172
2173 if (!tb_port_is_null(port))
2174 continue;
2175
2176 /* Check for the subordinate port */
2177 if (i == sw->config.max_port_number ||
2178 !tb_port_is_null(&sw->ports[i + 1]))
2179 continue;
2180
2181 /* Link them if not already done so (by DROM) */
2182 subordinate = &sw->ports[i + 1];
2183 if (!port->dual_link_port && !subordinate->dual_link_port) {
2184 port->link_nr = 0;
2185 port->dual_link_port = subordinate;
2186 subordinate->link_nr = 1;
2187 subordinate->dual_link_port = port;
2188
2189 tb_sw_dbg(sw, "linked ports %d <-> %d\n",
2190 port->port, subordinate->port);
2191 }
2192 }
2193}
2194
2195static bool tb_switch_lane_bonding_possible(struct tb_switch *sw)
2196{
2197 const struct tb_port *up = tb_upstream_port(sw);
2198
2199 if (!up->dual_link_port || !up->dual_link_port->remote)
2200 return false;
2201
2202 if (tb_switch_is_usb4(sw))
2203 return usb4_switch_lane_bonding_possible(sw);
2204 return tb_lc_lane_bonding_possible(sw);
2205}
2206
2207static int tb_switch_update_link_attributes(struct tb_switch *sw)
2208{
2209 struct tb_port *up;
2210 bool change = false;
2211 int ret;
2212
2213 if (!tb_route(sw) || tb_switch_is_icm(sw))
2214 return 0;
2215
2216 up = tb_upstream_port(sw);
2217
2218 ret = tb_port_get_link_speed(up);
2219 if (ret < 0)
2220 return ret;
2221 if (sw->link_speed != ret)
2222 change = true;
2223 sw->link_speed = ret;
2224
2225 ret = tb_port_get_link_width(up);
2226 if (ret < 0)
2227 return ret;
2228 if (sw->link_width != ret)
2229 change = true;
2230 sw->link_width = ret;
2231
2232 /* Notify userspace that there is possible link attribute change */
2233 if (device_is_registered(&sw->dev) && change)
2234 kobject_uevent(&sw->dev.kobj, KOBJ_CHANGE);
2235
2236 return 0;
2237}
2238
2239/**
2240 * tb_switch_lane_bonding_enable() - Enable lane bonding
2241 * @sw: Switch to enable lane bonding
2242 *
2243 * Connection manager can call this function to enable lane bonding of a
2244 * switch. If conditions are correct and both switches support the feature,
2245 * lanes are bonded. It is safe to call this to any switch.
2246 */
2247int tb_switch_lane_bonding_enable(struct tb_switch *sw)
2248{
2249 struct tb_switch *parent = tb_to_switch(sw->dev.parent);
2250 struct tb_port *up, *down;
2251 u64 route = tb_route(sw);
2252 int ret;
2253
2254 if (!route)
2255 return 0;
2256
2257 if (!tb_switch_lane_bonding_possible(sw))
2258 return 0;
2259
2260 up = tb_upstream_port(sw);
2261 down = tb_port_at(route, parent);
2262
2263 if (!tb_port_is_width_supported(up, 2) ||
2264 !tb_port_is_width_supported(down, 2))
2265 return 0;
2266
2267 ret = tb_port_lane_bonding_enable(up);
2268 if (ret) {
2269 tb_port_warn(up, "failed to enable lane bonding\n");
2270 return ret;
2271 }
2272
2273 ret = tb_port_lane_bonding_enable(down);
2274 if (ret) {
2275 tb_port_warn(down, "failed to enable lane bonding\n");
2276 tb_port_lane_bonding_disable(up);
2277 return ret;
2278 }
2279
2280 tb_switch_update_link_attributes(sw);
2281
2282 tb_sw_dbg(sw, "lane bonding enabled\n");
2283 return ret;
2284}
2285
2286/**
2287 * tb_switch_lane_bonding_disable() - Disable lane bonding
2288 * @sw: Switch whose lane bonding to disable
2289 *
2290 * Disables lane bonding between @sw and parent. This can be called even
2291 * if lanes were not bonded originally.
2292 */
2293void tb_switch_lane_bonding_disable(struct tb_switch *sw)
2294{
2295 struct tb_switch *parent = tb_to_switch(sw->dev.parent);
2296 struct tb_port *up, *down;
2297
2298 if (!tb_route(sw))
2299 return;
2300
2301 up = tb_upstream_port(sw);
2302 if (!up->bonded)
2303 return;
2304
2305 down = tb_port_at(tb_route(sw), parent);
2306
2307 tb_port_lane_bonding_disable(up);
2308 tb_port_lane_bonding_disable(down);
2309
2310 tb_switch_update_link_attributes(sw);
2311 tb_sw_dbg(sw, "lane bonding disabled\n");
2312}
2313
2314/**
2315 * tb_switch_add() - Add a switch to the domain
2316 * @sw: Switch to add
2317 *
2318 * This is the last step in adding switch to the domain. It will read
2319 * identification information from DROM and initializes ports so that
2320 * they can be used to connect other switches. The switch will be
2321 * exposed to the userspace when this function successfully returns. To
2322 * remove and release the switch, call tb_switch_remove().
2323 *
2324 * Return: %0 in case of success and negative errno in case of failure
2325 */
2326int tb_switch_add(struct tb_switch *sw)
2327{
2328 int i, ret;
2329
2330 /*
2331 * Initialize DMA control port now before we read DROM. Recent
2332 * host controllers have more complete DROM on NVM that includes
2333 * vendor and model identification strings which we then expose
2334 * to the userspace. NVM can be accessed through DMA
2335 * configuration based mailbox.
2336 */
2337 ret = tb_switch_add_dma_port(sw);
2338 if (ret) {
2339 dev_err(&sw->dev, "failed to add DMA port\n");
2340 return ret;
2341 }
2342
2343 if (!sw->safe_mode) {
2344 /* read drom */
2345 ret = tb_drom_read(sw);
2346 if (ret) {
2347 dev_err(&sw->dev, "reading DROM failed\n");
2348 return ret;
2349 }
2350 tb_sw_dbg(sw, "uid: %#llx\n", sw->uid);
2351
2352 ret = tb_switch_set_uuid(sw);
2353 if (ret) {
2354 dev_err(&sw->dev, "failed to set UUID\n");
2355 return ret;
2356 }
2357
2358 for (i = 0; i <= sw->config.max_port_number; i++) {
2359 if (sw->ports[i].disabled) {
2360 tb_port_dbg(&sw->ports[i], "disabled by eeprom\n");
2361 continue;
2362 }
2363 ret = tb_init_port(&sw->ports[i]);
2364 if (ret) {
2365 dev_err(&sw->dev, "failed to initialize port %d\n", i);
2366 return ret;
2367 }
2368 }
2369
2370 tb_switch_default_link_ports(sw);
2371
2372 ret = tb_switch_update_link_attributes(sw);
2373 if (ret)
2374 return ret;
2375
2376 ret = tb_switch_tmu_init(sw);
2377 if (ret)
2378 return ret;
2379 }
2380
2381 ret = device_add(&sw->dev);
2382 if (ret) {
2383 dev_err(&sw->dev, "failed to add device: %d\n", ret);
2384 return ret;
2385 }
2386
2387 if (tb_route(sw)) {
2388 dev_info(&sw->dev, "new device found, vendor=%#x device=%#x\n",
2389 sw->vendor, sw->device);
2390 if (sw->vendor_name && sw->device_name)
2391 dev_info(&sw->dev, "%s %s\n", sw->vendor_name,
2392 sw->device_name);
2393 }
2394
2395 ret = tb_switch_nvm_add(sw);
2396 if (ret) {
2397 dev_err(&sw->dev, "failed to add NVM devices\n");
2398 device_del(&sw->dev);
2399 return ret;
2400 }
2401
2402 pm_runtime_set_active(&sw->dev);
2403 if (sw->rpm) {
2404 pm_runtime_set_autosuspend_delay(&sw->dev, TB_AUTOSUSPEND_DELAY);
2405 pm_runtime_use_autosuspend(&sw->dev);
2406 pm_runtime_mark_last_busy(&sw->dev);
2407 pm_runtime_enable(&sw->dev);
2408 pm_request_autosuspend(&sw->dev);
2409 }
2410
2411 return 0;
2412}
2413
2414/**
2415 * tb_switch_remove() - Remove and release a switch
2416 * @sw: Switch to remove
2417 *
2418 * This will remove the switch from the domain and release it after last
2419 * reference count drops to zero. If there are switches connected below
2420 * this switch, they will be removed as well.
2421 */
2422void tb_switch_remove(struct tb_switch *sw)
2423{
2424 struct tb_port *port;
2425
2426 if (sw->rpm) {
2427 pm_runtime_get_sync(&sw->dev);
2428 pm_runtime_disable(&sw->dev);
2429 }
2430
2431 /* port 0 is the switch itself and never has a remote */
2432 tb_switch_for_each_port(sw, port) {
2433 if (tb_port_has_remote(port)) {
2434 tb_switch_remove(port->remote->sw);
2435 port->remote = NULL;
2436 } else if (port->xdomain) {
2437 tb_xdomain_remove(port->xdomain);
2438 port->xdomain = NULL;
2439 }
2440
2441 /* Remove any downstream retimers */
2442 tb_retimer_remove_all(port);
2443 }
2444
2445 if (!sw->is_unplugged)
2446 tb_plug_events_active(sw, false);
2447
2448 if (tb_switch_is_usb4(sw))
2449 usb4_switch_unconfigure_link(sw);
2450 else
2451 tb_lc_unconfigure_link(sw);
2452
2453 tb_switch_nvm_remove(sw);
2454
2455 if (tb_route(sw))
2456 dev_info(&sw->dev, "device disconnected\n");
2457 device_unregister(&sw->dev);
2458}
2459
2460/**
2461 * tb_sw_set_unplugged() - set is_unplugged on switch and downstream switches
2462 */
2463void tb_sw_set_unplugged(struct tb_switch *sw)
2464{
2465 struct tb_port *port;
2466
2467 if (sw == sw->tb->root_switch) {
2468 tb_sw_WARN(sw, "cannot unplug root switch\n");
2469 return;
2470 }
2471 if (sw->is_unplugged) {
2472 tb_sw_WARN(sw, "is_unplugged already set\n");
2473 return;
2474 }
2475 sw->is_unplugged = true;
2476 tb_switch_for_each_port(sw, port) {
2477 if (tb_port_has_remote(port))
2478 tb_sw_set_unplugged(port->remote->sw);
2479 else if (port->xdomain)
2480 port->xdomain->is_unplugged = true;
2481 }
2482}
2483
2484int tb_switch_resume(struct tb_switch *sw)
2485{
2486 struct tb_port *port;
2487 int err;
2488
2489 tb_sw_dbg(sw, "resuming switch\n");
2490
2491 /*
2492 * Check for UID of the connected switches except for root
2493 * switch which we assume cannot be removed.
2494 */
2495 if (tb_route(sw)) {
2496 u64 uid;
2497
2498 /*
2499 * Check first that we can still read the switch config
2500 * space. It may be that there is now another domain
2501 * connected.
2502 */
2503 err = tb_cfg_get_upstream_port(sw->tb->ctl, tb_route(sw));
2504 if (err < 0) {
2505 tb_sw_info(sw, "switch not present anymore\n");
2506 return err;
2507 }
2508
2509 if (tb_switch_is_usb4(sw))
2510 err = usb4_switch_read_uid(sw, &uid);
2511 else
2512 err = tb_drom_read_uid_only(sw, &uid);
2513 if (err) {
2514 tb_sw_warn(sw, "uid read failed\n");
2515 return err;
2516 }
2517 if (sw->uid != uid) {
2518 tb_sw_info(sw,
2519 "changed while suspended (uid %#llx -> %#llx)\n",
2520 sw->uid, uid);
2521 return -ENODEV;
2522 }
2523 }
2524
2525 err = tb_switch_configure(sw);
2526 if (err)
2527 return err;
2528
2529 /* check for surviving downstream switches */
2530 tb_switch_for_each_port(sw, port) {
2531 if (!tb_port_has_remote(port) && !port->xdomain)
2532 continue;
2533
2534 if (tb_wait_for_port(port, true) <= 0) {
2535 tb_port_warn(port,
2536 "lost during suspend, disconnecting\n");
2537 if (tb_port_has_remote(port))
2538 tb_sw_set_unplugged(port->remote->sw);
2539 else if (port->xdomain)
2540 port->xdomain->is_unplugged = true;
2541 } else if (tb_port_has_remote(port) || port->xdomain) {
2542 /*
2543 * Always unlock the port so the downstream
2544 * switch/domain is accessible.
2545 */
2546 if (tb_port_unlock(port))
2547 tb_port_warn(port, "failed to unlock port\n");
2548 if (port->remote && tb_switch_resume(port->remote->sw)) {
2549 tb_port_warn(port,
2550 "lost during suspend, disconnecting\n");
2551 tb_sw_set_unplugged(port->remote->sw);
2552 }
2553 }
2554 }
2555 return 0;
2556}
2557
2558void tb_switch_suspend(struct tb_switch *sw)
2559{
2560 struct tb_port *port;
2561 int err;
2562
2563 err = tb_plug_events_active(sw, false);
2564 if (err)
2565 return;
2566
2567 tb_switch_for_each_port(sw, port) {
2568 if (tb_port_has_remote(port))
2569 tb_switch_suspend(port->remote->sw);
2570 }
2571
2572 if (tb_switch_is_usb4(sw))
2573 usb4_switch_set_sleep(sw);
2574 else
2575 tb_lc_set_sleep(sw);
2576}
2577
2578/**
2579 * tb_switch_query_dp_resource() - Query availability of DP resource
2580 * @sw: Switch whose DP resource is queried
2581 * @in: DP IN port
2582 *
2583 * Queries availability of DP resource for DP tunneling using switch
2584 * specific means. Returns %true if resource is available.
2585 */
2586bool tb_switch_query_dp_resource(struct tb_switch *sw, struct tb_port *in)
2587{
2588 if (tb_switch_is_usb4(sw))
2589 return usb4_switch_query_dp_resource(sw, in);
2590 return tb_lc_dp_sink_query(sw, in);
2591}
2592
2593/**
2594 * tb_switch_alloc_dp_resource() - Allocate available DP resource
2595 * @sw: Switch whose DP resource is allocated
2596 * @in: DP IN port
2597 *
2598 * Allocates DP resource for DP tunneling. The resource must be
2599 * available for this to succeed (see tb_switch_query_dp_resource()).
2600 * Returns %0 in success and negative errno otherwise.
2601 */
2602int tb_switch_alloc_dp_resource(struct tb_switch *sw, struct tb_port *in)
2603{
2604 if (tb_switch_is_usb4(sw))
2605 return usb4_switch_alloc_dp_resource(sw, in);
2606 return tb_lc_dp_sink_alloc(sw, in);
2607}
2608
2609/**
2610 * tb_switch_dealloc_dp_resource() - De-allocate DP resource
2611 * @sw: Switch whose DP resource is de-allocated
2612 * @in: DP IN port
2613 *
2614 * De-allocates DP resource that was previously allocated for DP
2615 * tunneling.
2616 */
2617void tb_switch_dealloc_dp_resource(struct tb_switch *sw, struct tb_port *in)
2618{
2619 int ret;
2620
2621 if (tb_switch_is_usb4(sw))
2622 ret = usb4_switch_dealloc_dp_resource(sw, in);
2623 else
2624 ret = tb_lc_dp_sink_dealloc(sw, in);
2625
2626 if (ret)
2627 tb_sw_warn(sw, "failed to de-allocate DP resource for port %d\n",
2628 in->port);
2629}
2630
2631struct tb_sw_lookup {
2632 struct tb *tb;
2633 u8 link;
2634 u8 depth;
2635 const uuid_t *uuid;
2636 u64 route;
2637};
2638
2639static int tb_switch_match(struct device *dev, const void *data)
2640{
2641 struct tb_switch *sw = tb_to_switch(dev);
2642 const struct tb_sw_lookup *lookup = data;
2643
2644 if (!sw)
2645 return 0;
2646 if (sw->tb != lookup->tb)
2647 return 0;
2648
2649 if (lookup->uuid)
2650 return !memcmp(sw->uuid, lookup->uuid, sizeof(*lookup->uuid));
2651
2652 if (lookup->route) {
2653 return sw->config.route_lo == lower_32_bits(lookup->route) &&
2654 sw->config.route_hi == upper_32_bits(lookup->route);
2655 }
2656
2657 /* Root switch is matched only by depth */
2658 if (!lookup->depth)
2659 return !sw->depth;
2660
2661 return sw->link == lookup->link && sw->depth == lookup->depth;
2662}
2663
2664/**
2665 * tb_switch_find_by_link_depth() - Find switch by link and depth
2666 * @tb: Domain the switch belongs
2667 * @link: Link number the switch is connected
2668 * @depth: Depth of the switch in link
2669 *
2670 * Returned switch has reference count increased so the caller needs to
2671 * call tb_switch_put() when done with the switch.
2672 */
2673struct tb_switch *tb_switch_find_by_link_depth(struct tb *tb, u8 link, u8 depth)
2674{
2675 struct tb_sw_lookup lookup;
2676 struct device *dev;
2677
2678 memset(&lookup, 0, sizeof(lookup));
2679 lookup.tb = tb;
2680 lookup.link = link;
2681 lookup.depth = depth;
2682
2683 dev = bus_find_device(&tb_bus_type, NULL, &lookup, tb_switch_match);
2684 if (dev)
2685 return tb_to_switch(dev);
2686
2687 return NULL;
2688}
2689
2690/**
2691 * tb_switch_find_by_uuid() - Find switch by UUID
2692 * @tb: Domain the switch belongs
2693 * @uuid: UUID to look for
2694 *
2695 * Returned switch has reference count increased so the caller needs to
2696 * call tb_switch_put() when done with the switch.
2697 */
2698struct tb_switch *tb_switch_find_by_uuid(struct tb *tb, const uuid_t *uuid)
2699{
2700 struct tb_sw_lookup lookup;
2701 struct device *dev;
2702
2703 memset(&lookup, 0, sizeof(lookup));
2704 lookup.tb = tb;
2705 lookup.uuid = uuid;
2706
2707 dev = bus_find_device(&tb_bus_type, NULL, &lookup, tb_switch_match);
2708 if (dev)
2709 return tb_to_switch(dev);
2710
2711 return NULL;
2712}
2713
2714/**
2715 * tb_switch_find_by_route() - Find switch by route string
2716 * @tb: Domain the switch belongs
2717 * @route: Route string to look for
2718 *
2719 * Returned switch has reference count increased so the caller needs to
2720 * call tb_switch_put() when done with the switch.
2721 */
2722struct tb_switch *tb_switch_find_by_route(struct tb *tb, u64 route)
2723{
2724 struct tb_sw_lookup lookup;
2725 struct device *dev;
2726
2727 if (!route)
2728 return tb_switch_get(tb->root_switch);
2729
2730 memset(&lookup, 0, sizeof(lookup));
2731 lookup.tb = tb;
2732 lookup.route = route;
2733
2734 dev = bus_find_device(&tb_bus_type, NULL, &lookup, tb_switch_match);
2735 if (dev)
2736 return tb_to_switch(dev);
2737
2738 return NULL;
2739}
2740
2741/**
2742 * tb_switch_find_port() - return the first port of @type on @sw or NULL
2743 * @sw: Switch to find the port from
2744 * @type: Port type to look for
2745 */
2746struct tb_port *tb_switch_find_port(struct tb_switch *sw,
2747 enum tb_port_type type)
2748{
2749 struct tb_port *port;
2750
2751 tb_switch_for_each_port(sw, port) {
2752 if (port->config.type == type)
2753 return port;
2754 }
2755
2756 return NULL;
2757}