Loading...
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * xHCI host controller driver
4 *
5 * Copyright (C) 2008 Intel Corp.
6 *
7 * Author: Sarah Sharp
8 * Some code borrowed from the Linux EHCI driver.
9 */
10
11#include <linux/pci.h>
12#include <linux/irq.h>
13#include <linux/log2.h>
14#include <linux/module.h>
15#include <linux/moduleparam.h>
16#include <linux/slab.h>
17#include <linux/dmi.h>
18#include <linux/dma-mapping.h>
19
20#include "xhci.h"
21#include "xhci-trace.h"
22#include "xhci-mtk.h"
23#include "xhci-debugfs.h"
24#include "xhci-dbgcap.h"
25
26#define DRIVER_AUTHOR "Sarah Sharp"
27#define DRIVER_DESC "'eXtensible' Host Controller (xHC) Driver"
28
29#define PORT_WAKE_BITS (PORT_WKOC_E | PORT_WKDISC_E | PORT_WKCONN_E)
30
31/* Some 0.95 hardware can't handle the chain bit on a Link TRB being cleared */
32static int link_quirk;
33module_param(link_quirk, int, S_IRUGO | S_IWUSR);
34MODULE_PARM_DESC(link_quirk, "Don't clear the chain bit on a link TRB");
35
36static unsigned int quirks;
37module_param(quirks, uint, S_IRUGO);
38MODULE_PARM_DESC(quirks, "Bit flags for quirks to be enabled as default");
39
40/* TODO: copied from ehci-hcd.c - can this be refactored? */
41/*
42 * xhci_handshake - spin reading hc until handshake completes or fails
43 * @ptr: address of hc register to be read
44 * @mask: bits to look at in result of read
45 * @done: value of those bits when handshake succeeds
46 * @usec: timeout in microseconds
47 *
48 * Returns negative errno, or zero on success
49 *
50 * Success happens when the "mask" bits have the specified value (hardware
51 * handshake done). There are two failure modes: "usec" have passed (major
52 * hardware flakeout), or the register reads as all-ones (hardware removed).
53 */
54int xhci_handshake(void __iomem *ptr, u32 mask, u32 done, int usec)
55{
56 u32 result;
57
58 do {
59 result = readl(ptr);
60 if (result == ~(u32)0) /* card removed */
61 return -ENODEV;
62 result &= mask;
63 if (result == done)
64 return 0;
65 udelay(1);
66 usec--;
67 } while (usec > 0);
68 return -ETIMEDOUT;
69}
70
71/*
72 * Disable interrupts and begin the xHCI halting process.
73 */
74void xhci_quiesce(struct xhci_hcd *xhci)
75{
76 u32 halted;
77 u32 cmd;
78 u32 mask;
79
80 mask = ~(XHCI_IRQS);
81 halted = readl(&xhci->op_regs->status) & STS_HALT;
82 if (!halted)
83 mask &= ~CMD_RUN;
84
85 cmd = readl(&xhci->op_regs->command);
86 cmd &= mask;
87 writel(cmd, &xhci->op_regs->command);
88}
89
90/*
91 * Force HC into halt state.
92 *
93 * Disable any IRQs and clear the run/stop bit.
94 * HC will complete any current and actively pipelined transactions, and
95 * should halt within 16 ms of the run/stop bit being cleared.
96 * Read HC Halted bit in the status register to see when the HC is finished.
97 */
98int xhci_halt(struct xhci_hcd *xhci)
99{
100 int ret;
101 xhci_dbg_trace(xhci, trace_xhci_dbg_init, "// Halt the HC");
102 xhci_quiesce(xhci);
103
104 ret = xhci_handshake(&xhci->op_regs->status,
105 STS_HALT, STS_HALT, XHCI_MAX_HALT_USEC);
106 if (ret) {
107 xhci_warn(xhci, "Host halt failed, %d\n", ret);
108 return ret;
109 }
110 xhci->xhc_state |= XHCI_STATE_HALTED;
111 xhci->cmd_ring_state = CMD_RING_STATE_STOPPED;
112 return ret;
113}
114
115/*
116 * Set the run bit and wait for the host to be running.
117 */
118int xhci_start(struct xhci_hcd *xhci)
119{
120 u32 temp;
121 int ret;
122
123 temp = readl(&xhci->op_regs->command);
124 temp |= (CMD_RUN);
125 xhci_dbg_trace(xhci, trace_xhci_dbg_init, "// Turn on HC, cmd = 0x%x.",
126 temp);
127 writel(temp, &xhci->op_regs->command);
128
129 /*
130 * Wait for the HCHalted Status bit to be 0 to indicate the host is
131 * running.
132 */
133 ret = xhci_handshake(&xhci->op_regs->status,
134 STS_HALT, 0, XHCI_MAX_HALT_USEC);
135 if (ret == -ETIMEDOUT)
136 xhci_err(xhci, "Host took too long to start, "
137 "waited %u microseconds.\n",
138 XHCI_MAX_HALT_USEC);
139 if (!ret)
140 /* clear state flags. Including dying, halted or removing */
141 xhci->xhc_state = 0;
142
143 return ret;
144}
145
146/*
147 * Reset a halted HC.
148 *
149 * This resets pipelines, timers, counters, state machines, etc.
150 * Transactions will be terminated immediately, and operational registers
151 * will be set to their defaults.
152 */
153int xhci_reset(struct xhci_hcd *xhci)
154{
155 u32 command;
156 u32 state;
157 int ret, i;
158
159 state = readl(&xhci->op_regs->status);
160
161 if (state == ~(u32)0) {
162 xhci_warn(xhci, "Host not accessible, reset failed.\n");
163 return -ENODEV;
164 }
165
166 if ((state & STS_HALT) == 0) {
167 xhci_warn(xhci, "Host controller not halted, aborting reset.\n");
168 return 0;
169 }
170
171 xhci_dbg_trace(xhci, trace_xhci_dbg_init, "// Reset the HC");
172 command = readl(&xhci->op_regs->command);
173 command |= CMD_RESET;
174 writel(command, &xhci->op_regs->command);
175
176 /* Existing Intel xHCI controllers require a delay of 1 mS,
177 * after setting the CMD_RESET bit, and before accessing any
178 * HC registers. This allows the HC to complete the
179 * reset operation and be ready for HC register access.
180 * Without this delay, the subsequent HC register access,
181 * may result in a system hang very rarely.
182 */
183 if (xhci->quirks & XHCI_INTEL_HOST)
184 udelay(1000);
185
186 ret = xhci_handshake(&xhci->op_regs->command,
187 CMD_RESET, 0, 10 * 1000 * 1000);
188 if (ret)
189 return ret;
190
191 if (xhci->quirks & XHCI_ASMEDIA_MODIFY_FLOWCONTROL)
192 usb_asmedia_modifyflowcontrol(to_pci_dev(xhci_to_hcd(xhci)->self.controller));
193
194 xhci_dbg_trace(xhci, trace_xhci_dbg_init,
195 "Wait for controller to be ready for doorbell rings");
196 /*
197 * xHCI cannot write to any doorbells or operational registers other
198 * than status until the "Controller Not Ready" flag is cleared.
199 */
200 ret = xhci_handshake(&xhci->op_regs->status,
201 STS_CNR, 0, 10 * 1000 * 1000);
202
203 for (i = 0; i < 2; i++) {
204 xhci->bus_state[i].port_c_suspend = 0;
205 xhci->bus_state[i].suspended_ports = 0;
206 xhci->bus_state[i].resuming_ports = 0;
207 }
208
209 return ret;
210}
211
212
213#ifdef CONFIG_USB_PCI
214/*
215 * Set up MSI
216 */
217static int xhci_setup_msi(struct xhci_hcd *xhci)
218{
219 int ret;
220 /*
221 * TODO:Check with MSI Soc for sysdev
222 */
223 struct pci_dev *pdev = to_pci_dev(xhci_to_hcd(xhci)->self.controller);
224
225 ret = pci_alloc_irq_vectors(pdev, 1, 1, PCI_IRQ_MSI);
226 if (ret < 0) {
227 xhci_dbg_trace(xhci, trace_xhci_dbg_init,
228 "failed to allocate MSI entry");
229 return ret;
230 }
231
232 ret = request_irq(pdev->irq, xhci_msi_irq,
233 0, "xhci_hcd", xhci_to_hcd(xhci));
234 if (ret) {
235 xhci_dbg_trace(xhci, trace_xhci_dbg_init,
236 "disable MSI interrupt");
237 pci_free_irq_vectors(pdev);
238 }
239
240 return ret;
241}
242
243/*
244 * Set up MSI-X
245 */
246static int xhci_setup_msix(struct xhci_hcd *xhci)
247{
248 int i, ret = 0;
249 struct usb_hcd *hcd = xhci_to_hcd(xhci);
250 struct pci_dev *pdev = to_pci_dev(hcd->self.controller);
251
252 /*
253 * calculate number of msi-x vectors supported.
254 * - HCS_MAX_INTRS: the max number of interrupts the host can handle,
255 * with max number of interrupters based on the xhci HCSPARAMS1.
256 * - num_online_cpus: maximum msi-x vectors per CPUs core.
257 * Add additional 1 vector to ensure always available interrupt.
258 */
259 xhci->msix_count = min(num_online_cpus() + 1,
260 HCS_MAX_INTRS(xhci->hcs_params1));
261
262 ret = pci_alloc_irq_vectors(pdev, xhci->msix_count, xhci->msix_count,
263 PCI_IRQ_MSIX);
264 if (ret < 0) {
265 xhci_dbg_trace(xhci, trace_xhci_dbg_init,
266 "Failed to enable MSI-X");
267 return ret;
268 }
269
270 for (i = 0; i < xhci->msix_count; i++) {
271 ret = request_irq(pci_irq_vector(pdev, i), xhci_msi_irq, 0,
272 "xhci_hcd", xhci_to_hcd(xhci));
273 if (ret)
274 goto disable_msix;
275 }
276
277 hcd->msix_enabled = 1;
278 return ret;
279
280disable_msix:
281 xhci_dbg_trace(xhci, trace_xhci_dbg_init, "disable MSI-X interrupt");
282 while (--i >= 0)
283 free_irq(pci_irq_vector(pdev, i), xhci_to_hcd(xhci));
284 pci_free_irq_vectors(pdev);
285 return ret;
286}
287
288/* Free any IRQs and disable MSI-X */
289static void xhci_cleanup_msix(struct xhci_hcd *xhci)
290{
291 struct usb_hcd *hcd = xhci_to_hcd(xhci);
292 struct pci_dev *pdev = to_pci_dev(hcd->self.controller);
293
294 if (xhci->quirks & XHCI_PLAT)
295 return;
296
297 /* return if using legacy interrupt */
298 if (hcd->irq > 0)
299 return;
300
301 if (hcd->msix_enabled) {
302 int i;
303
304 for (i = 0; i < xhci->msix_count; i++)
305 free_irq(pci_irq_vector(pdev, i), xhci_to_hcd(xhci));
306 } else {
307 free_irq(pci_irq_vector(pdev, 0), xhci_to_hcd(xhci));
308 }
309
310 pci_free_irq_vectors(pdev);
311 hcd->msix_enabled = 0;
312}
313
314static void __maybe_unused xhci_msix_sync_irqs(struct xhci_hcd *xhci)
315{
316 struct usb_hcd *hcd = xhci_to_hcd(xhci);
317
318 if (hcd->msix_enabled) {
319 struct pci_dev *pdev = to_pci_dev(hcd->self.controller);
320 int i;
321
322 for (i = 0; i < xhci->msix_count; i++)
323 synchronize_irq(pci_irq_vector(pdev, i));
324 }
325}
326
327static int xhci_try_enable_msi(struct usb_hcd *hcd)
328{
329 struct xhci_hcd *xhci = hcd_to_xhci(hcd);
330 struct pci_dev *pdev;
331 int ret;
332
333 /* The xhci platform device has set up IRQs through usb_add_hcd. */
334 if (xhci->quirks & XHCI_PLAT)
335 return 0;
336
337 pdev = to_pci_dev(xhci_to_hcd(xhci)->self.controller);
338 /*
339 * Some Fresco Logic host controllers advertise MSI, but fail to
340 * generate interrupts. Don't even try to enable MSI.
341 */
342 if (xhci->quirks & XHCI_BROKEN_MSI)
343 goto legacy_irq;
344
345 /* unregister the legacy interrupt */
346 if (hcd->irq)
347 free_irq(hcd->irq, hcd);
348 hcd->irq = 0;
349
350 ret = xhci_setup_msix(xhci);
351 if (ret)
352 /* fall back to msi*/
353 ret = xhci_setup_msi(xhci);
354
355 if (!ret) {
356 hcd->msi_enabled = 1;
357 return 0;
358 }
359
360 if (!pdev->irq) {
361 xhci_err(xhci, "No msi-x/msi found and no IRQ in BIOS\n");
362 return -EINVAL;
363 }
364
365 legacy_irq:
366 if (!strlen(hcd->irq_descr))
367 snprintf(hcd->irq_descr, sizeof(hcd->irq_descr), "%s:usb%d",
368 hcd->driver->description, hcd->self.busnum);
369
370 /* fall back to legacy interrupt*/
371 ret = request_irq(pdev->irq, &usb_hcd_irq, IRQF_SHARED,
372 hcd->irq_descr, hcd);
373 if (ret) {
374 xhci_err(xhci, "request interrupt %d failed\n",
375 pdev->irq);
376 return ret;
377 }
378 hcd->irq = pdev->irq;
379 return 0;
380}
381
382#else
383
384static inline int xhci_try_enable_msi(struct usb_hcd *hcd)
385{
386 return 0;
387}
388
389static inline void xhci_cleanup_msix(struct xhci_hcd *xhci)
390{
391}
392
393static inline void xhci_msix_sync_irqs(struct xhci_hcd *xhci)
394{
395}
396
397#endif
398
399static void compliance_mode_recovery(struct timer_list *t)
400{
401 struct xhci_hcd *xhci;
402 struct usb_hcd *hcd;
403 u32 temp;
404 int i;
405
406 xhci = from_timer(xhci, t, comp_mode_recovery_timer);
407
408 for (i = 0; i < xhci->num_usb3_ports; i++) {
409 temp = readl(xhci->usb3_ports[i]);
410 if ((temp & PORT_PLS_MASK) == USB_SS_PORT_LS_COMP_MOD) {
411 /*
412 * Compliance Mode Detected. Letting USB Core
413 * handle the Warm Reset
414 */
415 xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
416 "Compliance mode detected->port %d",
417 i + 1);
418 xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
419 "Attempting compliance mode recovery");
420 hcd = xhci->shared_hcd;
421
422 if (hcd->state == HC_STATE_SUSPENDED)
423 usb_hcd_resume_root_hub(hcd);
424
425 usb_hcd_poll_rh_status(hcd);
426 }
427 }
428
429 if (xhci->port_status_u0 != ((1 << xhci->num_usb3_ports)-1))
430 mod_timer(&xhci->comp_mode_recovery_timer,
431 jiffies + msecs_to_jiffies(COMP_MODE_RCVRY_MSECS));
432}
433
434/*
435 * Quirk to work around issue generated by the SN65LVPE502CP USB3.0 re-driver
436 * that causes ports behind that hardware to enter compliance mode sometimes.
437 * The quirk creates a timer that polls every 2 seconds the link state of
438 * each host controller's port and recovers it by issuing a Warm reset
439 * if Compliance mode is detected, otherwise the port will become "dead" (no
440 * device connections or disconnections will be detected anymore). Becasue no
441 * status event is generated when entering compliance mode (per xhci spec),
442 * this quirk is needed on systems that have the failing hardware installed.
443 */
444static void compliance_mode_recovery_timer_init(struct xhci_hcd *xhci)
445{
446 xhci->port_status_u0 = 0;
447 timer_setup(&xhci->comp_mode_recovery_timer, compliance_mode_recovery,
448 0);
449 xhci->comp_mode_recovery_timer.expires = jiffies +
450 msecs_to_jiffies(COMP_MODE_RCVRY_MSECS);
451
452 add_timer(&xhci->comp_mode_recovery_timer);
453 xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
454 "Compliance mode recovery timer initialized");
455}
456
457/*
458 * This function identifies the systems that have installed the SN65LVPE502CP
459 * USB3.0 re-driver and that need the Compliance Mode Quirk.
460 * Systems:
461 * Vendor: Hewlett-Packard -> System Models: Z420, Z620 and Z820
462 */
463static bool xhci_compliance_mode_recovery_timer_quirk_check(void)
464{
465 const char *dmi_product_name, *dmi_sys_vendor;
466
467 dmi_product_name = dmi_get_system_info(DMI_PRODUCT_NAME);
468 dmi_sys_vendor = dmi_get_system_info(DMI_SYS_VENDOR);
469 if (!dmi_product_name || !dmi_sys_vendor)
470 return false;
471
472 if (!(strstr(dmi_sys_vendor, "Hewlett-Packard")))
473 return false;
474
475 if (strstr(dmi_product_name, "Z420") ||
476 strstr(dmi_product_name, "Z620") ||
477 strstr(dmi_product_name, "Z820") ||
478 strstr(dmi_product_name, "Z1 Workstation"))
479 return true;
480
481 return false;
482}
483
484static int xhci_all_ports_seen_u0(struct xhci_hcd *xhci)
485{
486 return (xhci->port_status_u0 == ((1 << xhci->num_usb3_ports)-1));
487}
488
489
490/*
491 * Initialize memory for HCD and xHC (one-time init).
492 *
493 * Program the PAGESIZE register, initialize the device context array, create
494 * device contexts (?), set up a command ring segment (or two?), create event
495 * ring (one for now).
496 */
497static int xhci_init(struct usb_hcd *hcd)
498{
499 struct xhci_hcd *xhci = hcd_to_xhci(hcd);
500 int retval = 0;
501
502 xhci_dbg_trace(xhci, trace_xhci_dbg_init, "xhci_init");
503 spin_lock_init(&xhci->lock);
504 if (xhci->hci_version == 0x95 && link_quirk) {
505 xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
506 "QUIRK: Not clearing Link TRB chain bits.");
507 xhci->quirks |= XHCI_LINK_TRB_QUIRK;
508 } else {
509 xhci_dbg_trace(xhci, trace_xhci_dbg_init,
510 "xHCI doesn't need link TRB QUIRK");
511 }
512 retval = xhci_mem_init(xhci, GFP_KERNEL);
513 xhci_dbg_trace(xhci, trace_xhci_dbg_init, "Finished xhci_init");
514
515 /* Initializing Compliance Mode Recovery Data If Needed */
516 if (xhci_compliance_mode_recovery_timer_quirk_check()) {
517 xhci->quirks |= XHCI_COMP_MODE_QUIRK;
518 compliance_mode_recovery_timer_init(xhci);
519 }
520
521 return retval;
522}
523
524/*-------------------------------------------------------------------------*/
525
526
527static int xhci_run_finished(struct xhci_hcd *xhci)
528{
529 if (xhci_start(xhci)) {
530 xhci_halt(xhci);
531 return -ENODEV;
532 }
533 xhci->shared_hcd->state = HC_STATE_RUNNING;
534 xhci->cmd_ring_state = CMD_RING_STATE_RUNNING;
535
536 if (xhci->quirks & XHCI_NEC_HOST)
537 xhci_ring_cmd_db(xhci);
538
539 xhci_dbg_trace(xhci, trace_xhci_dbg_init,
540 "Finished xhci_run for USB3 roothub");
541 return 0;
542}
543
544/*
545 * Start the HC after it was halted.
546 *
547 * This function is called by the USB core when the HC driver is added.
548 * Its opposite is xhci_stop().
549 *
550 * xhci_init() must be called once before this function can be called.
551 * Reset the HC, enable device slot contexts, program DCBAAP, and
552 * set command ring pointer and event ring pointer.
553 *
554 * Setup MSI-X vectors and enable interrupts.
555 */
556int xhci_run(struct usb_hcd *hcd)
557{
558 u32 temp;
559 u64 temp_64;
560 int ret;
561 struct xhci_hcd *xhci = hcd_to_xhci(hcd);
562
563 /* Start the xHCI host controller running only after the USB 2.0 roothub
564 * is setup.
565 */
566
567 hcd->uses_new_polling = 1;
568 if (!usb_hcd_is_primary_hcd(hcd))
569 return xhci_run_finished(xhci);
570
571 xhci_dbg_trace(xhci, trace_xhci_dbg_init, "xhci_run");
572
573 ret = xhci_try_enable_msi(hcd);
574 if (ret)
575 return ret;
576
577 temp_64 = xhci_read_64(xhci, &xhci->ir_set->erst_dequeue);
578 temp_64 &= ~ERST_PTR_MASK;
579 xhci_dbg_trace(xhci, trace_xhci_dbg_init,
580 "ERST deq = 64'h%0lx", (long unsigned int) temp_64);
581
582 xhci_dbg_trace(xhci, trace_xhci_dbg_init,
583 "// Set the interrupt modulation register");
584 temp = readl(&xhci->ir_set->irq_control);
585 temp &= ~ER_IRQ_INTERVAL_MASK;
586 temp |= (xhci->imod_interval / 250) & ER_IRQ_INTERVAL_MASK;
587 writel(temp, &xhci->ir_set->irq_control);
588
589 /* Set the HCD state before we enable the irqs */
590 temp = readl(&xhci->op_regs->command);
591 temp |= (CMD_EIE);
592 xhci_dbg_trace(xhci, trace_xhci_dbg_init,
593 "// Enable interrupts, cmd = 0x%x.", temp);
594 writel(temp, &xhci->op_regs->command);
595
596 temp = readl(&xhci->ir_set->irq_pending);
597 xhci_dbg_trace(xhci, trace_xhci_dbg_init,
598 "// Enabling event ring interrupter %p by writing 0x%x to irq_pending",
599 xhci->ir_set, (unsigned int) ER_IRQ_ENABLE(temp));
600 writel(ER_IRQ_ENABLE(temp), &xhci->ir_set->irq_pending);
601
602 if (xhci->quirks & XHCI_NEC_HOST) {
603 struct xhci_command *command;
604
605 command = xhci_alloc_command(xhci, false, GFP_KERNEL);
606 if (!command)
607 return -ENOMEM;
608
609 ret = xhci_queue_vendor_command(xhci, command, 0, 0, 0,
610 TRB_TYPE(TRB_NEC_GET_FW));
611 if (ret)
612 xhci_free_command(xhci, command);
613 }
614 xhci_dbg_trace(xhci, trace_xhci_dbg_init,
615 "Finished xhci_run for USB2 roothub");
616
617 xhci_dbc_init(xhci);
618
619 xhci_debugfs_init(xhci);
620
621 return 0;
622}
623EXPORT_SYMBOL_GPL(xhci_run);
624
625/*
626 * Stop xHCI driver.
627 *
628 * This function is called by the USB core when the HC driver is removed.
629 * Its opposite is xhci_run().
630 *
631 * Disable device contexts, disable IRQs, and quiesce the HC.
632 * Reset the HC, finish any completed transactions, and cleanup memory.
633 */
634static void xhci_stop(struct usb_hcd *hcd)
635{
636 u32 temp;
637 struct xhci_hcd *xhci = hcd_to_xhci(hcd);
638
639 mutex_lock(&xhci->mutex);
640
641 /* Only halt host and free memory after both hcds are removed */
642 if (!usb_hcd_is_primary_hcd(hcd)) {
643 /* usb core will free this hcd shortly, unset pointer */
644 xhci->shared_hcd = NULL;
645 mutex_unlock(&xhci->mutex);
646 return;
647 }
648
649 xhci_dbc_exit(xhci);
650
651 spin_lock_irq(&xhci->lock);
652 xhci->xhc_state |= XHCI_STATE_HALTED;
653 xhci->cmd_ring_state = CMD_RING_STATE_STOPPED;
654 xhci_halt(xhci);
655 xhci_reset(xhci);
656 spin_unlock_irq(&xhci->lock);
657
658 xhci_cleanup_msix(xhci);
659
660 /* Deleting Compliance Mode Recovery Timer */
661 if ((xhci->quirks & XHCI_COMP_MODE_QUIRK) &&
662 (!(xhci_all_ports_seen_u0(xhci)))) {
663 del_timer_sync(&xhci->comp_mode_recovery_timer);
664 xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
665 "%s: compliance mode recovery timer deleted",
666 __func__);
667 }
668
669 if (xhci->quirks & XHCI_AMD_PLL_FIX)
670 usb_amd_dev_put();
671
672 xhci_dbg_trace(xhci, trace_xhci_dbg_init,
673 "// Disabling event ring interrupts");
674 temp = readl(&xhci->op_regs->status);
675 writel((temp & ~0x1fff) | STS_EINT, &xhci->op_regs->status);
676 temp = readl(&xhci->ir_set->irq_pending);
677 writel(ER_IRQ_DISABLE(temp), &xhci->ir_set->irq_pending);
678
679 xhci_dbg_trace(xhci, trace_xhci_dbg_init, "cleaning up memory");
680 xhci_mem_cleanup(xhci);
681 xhci_debugfs_exit(xhci);
682 xhci_dbg_trace(xhci, trace_xhci_dbg_init,
683 "xhci_stop completed - status = %x",
684 readl(&xhci->op_regs->status));
685 mutex_unlock(&xhci->mutex);
686}
687
688/*
689 * Shutdown HC (not bus-specific)
690 *
691 * This is called when the machine is rebooting or halting. We assume that the
692 * machine will be powered off, and the HC's internal state will be reset.
693 * Don't bother to free memory.
694 *
695 * This will only ever be called with the main usb_hcd (the USB3 roothub).
696 */
697static void xhci_shutdown(struct usb_hcd *hcd)
698{
699 struct xhci_hcd *xhci = hcd_to_xhci(hcd);
700
701 if (xhci->quirks & XHCI_SPURIOUS_REBOOT)
702 usb_disable_xhci_ports(to_pci_dev(hcd->self.sysdev));
703
704 spin_lock_irq(&xhci->lock);
705 xhci_halt(xhci);
706 /* Workaround for spurious wakeups at shutdown with HSW */
707 if (xhci->quirks & XHCI_SPURIOUS_WAKEUP)
708 xhci_reset(xhci);
709 spin_unlock_irq(&xhci->lock);
710
711 xhci_cleanup_msix(xhci);
712
713 xhci_dbg_trace(xhci, trace_xhci_dbg_init,
714 "xhci_shutdown completed - status = %x",
715 readl(&xhci->op_regs->status));
716
717 /* Yet another workaround for spurious wakeups at shutdown with HSW */
718 if (xhci->quirks & XHCI_SPURIOUS_WAKEUP)
719 pci_set_power_state(to_pci_dev(hcd->self.sysdev), PCI_D3hot);
720}
721
722#ifdef CONFIG_PM
723static void xhci_save_registers(struct xhci_hcd *xhci)
724{
725 xhci->s3.command = readl(&xhci->op_regs->command);
726 xhci->s3.dev_nt = readl(&xhci->op_regs->dev_notification);
727 xhci->s3.dcbaa_ptr = xhci_read_64(xhci, &xhci->op_regs->dcbaa_ptr);
728 xhci->s3.config_reg = readl(&xhci->op_regs->config_reg);
729 xhci->s3.erst_size = readl(&xhci->ir_set->erst_size);
730 xhci->s3.erst_base = xhci_read_64(xhci, &xhci->ir_set->erst_base);
731 xhci->s3.erst_dequeue = xhci_read_64(xhci, &xhci->ir_set->erst_dequeue);
732 xhci->s3.irq_pending = readl(&xhci->ir_set->irq_pending);
733 xhci->s3.irq_control = readl(&xhci->ir_set->irq_control);
734}
735
736static void xhci_restore_registers(struct xhci_hcd *xhci)
737{
738 writel(xhci->s3.command, &xhci->op_regs->command);
739 writel(xhci->s3.dev_nt, &xhci->op_regs->dev_notification);
740 xhci_write_64(xhci, xhci->s3.dcbaa_ptr, &xhci->op_regs->dcbaa_ptr);
741 writel(xhci->s3.config_reg, &xhci->op_regs->config_reg);
742 writel(xhci->s3.erst_size, &xhci->ir_set->erst_size);
743 xhci_write_64(xhci, xhci->s3.erst_base, &xhci->ir_set->erst_base);
744 xhci_write_64(xhci, xhci->s3.erst_dequeue, &xhci->ir_set->erst_dequeue);
745 writel(xhci->s3.irq_pending, &xhci->ir_set->irq_pending);
746 writel(xhci->s3.irq_control, &xhci->ir_set->irq_control);
747}
748
749static void xhci_set_cmd_ring_deq(struct xhci_hcd *xhci)
750{
751 u64 val_64;
752
753 /* step 2: initialize command ring buffer */
754 val_64 = xhci_read_64(xhci, &xhci->op_regs->cmd_ring);
755 val_64 = (val_64 & (u64) CMD_RING_RSVD_BITS) |
756 (xhci_trb_virt_to_dma(xhci->cmd_ring->deq_seg,
757 xhci->cmd_ring->dequeue) &
758 (u64) ~CMD_RING_RSVD_BITS) |
759 xhci->cmd_ring->cycle_state;
760 xhci_dbg_trace(xhci, trace_xhci_dbg_init,
761 "// Setting command ring address to 0x%llx",
762 (long unsigned long) val_64);
763 xhci_write_64(xhci, val_64, &xhci->op_regs->cmd_ring);
764}
765
766/*
767 * The whole command ring must be cleared to zero when we suspend the host.
768 *
769 * The host doesn't save the command ring pointer in the suspend well, so we
770 * need to re-program it on resume. Unfortunately, the pointer must be 64-byte
771 * aligned, because of the reserved bits in the command ring dequeue pointer
772 * register. Therefore, we can't just set the dequeue pointer back in the
773 * middle of the ring (TRBs are 16-byte aligned).
774 */
775static void xhci_clear_command_ring(struct xhci_hcd *xhci)
776{
777 struct xhci_ring *ring;
778 struct xhci_segment *seg;
779
780 ring = xhci->cmd_ring;
781 seg = ring->deq_seg;
782 do {
783 memset(seg->trbs, 0,
784 sizeof(union xhci_trb) * (TRBS_PER_SEGMENT - 1));
785 seg->trbs[TRBS_PER_SEGMENT - 1].link.control &=
786 cpu_to_le32(~TRB_CYCLE);
787 seg = seg->next;
788 } while (seg != ring->deq_seg);
789
790 /* Reset the software enqueue and dequeue pointers */
791 ring->deq_seg = ring->first_seg;
792 ring->dequeue = ring->first_seg->trbs;
793 ring->enq_seg = ring->deq_seg;
794 ring->enqueue = ring->dequeue;
795
796 ring->num_trbs_free = ring->num_segs * (TRBS_PER_SEGMENT - 1) - 1;
797 /*
798 * Ring is now zeroed, so the HW should look for change of ownership
799 * when the cycle bit is set to 1.
800 */
801 ring->cycle_state = 1;
802
803 /*
804 * Reset the hardware dequeue pointer.
805 * Yes, this will need to be re-written after resume, but we're paranoid
806 * and want to make sure the hardware doesn't access bogus memory
807 * because, say, the BIOS or an SMI started the host without changing
808 * the command ring pointers.
809 */
810 xhci_set_cmd_ring_deq(xhci);
811}
812
813static void xhci_disable_port_wake_on_bits(struct xhci_hcd *xhci)
814{
815 int port_index;
816 __le32 __iomem **port_array;
817 unsigned long flags;
818 u32 t1, t2;
819
820 spin_lock_irqsave(&xhci->lock, flags);
821
822 /* disable usb3 ports Wake bits */
823 port_index = xhci->num_usb3_ports;
824 port_array = xhci->usb3_ports;
825 while (port_index--) {
826 t1 = readl(port_array[port_index]);
827 t1 = xhci_port_state_to_neutral(t1);
828 t2 = t1 & ~PORT_WAKE_BITS;
829 if (t1 != t2)
830 writel(t2, port_array[port_index]);
831 }
832
833 /* disable usb2 ports Wake bits */
834 port_index = xhci->num_usb2_ports;
835 port_array = xhci->usb2_ports;
836 while (port_index--) {
837 t1 = readl(port_array[port_index]);
838 t1 = xhci_port_state_to_neutral(t1);
839 t2 = t1 & ~PORT_WAKE_BITS;
840 if (t1 != t2)
841 writel(t2, port_array[port_index]);
842 }
843
844 spin_unlock_irqrestore(&xhci->lock, flags);
845}
846
847/*
848 * Stop HC (not bus-specific)
849 *
850 * This is called when the machine transition into S3/S4 mode.
851 *
852 */
853int xhci_suspend(struct xhci_hcd *xhci, bool do_wakeup)
854{
855 int rc = 0;
856 unsigned int delay = XHCI_MAX_HALT_USEC;
857 struct usb_hcd *hcd = xhci_to_hcd(xhci);
858 u32 command;
859
860 if (!hcd->state)
861 return 0;
862
863 if (hcd->state != HC_STATE_SUSPENDED ||
864 xhci->shared_hcd->state != HC_STATE_SUSPENDED)
865 return -EINVAL;
866
867 xhci_dbc_suspend(xhci);
868
869 /* Clear root port wake on bits if wakeup not allowed. */
870 if (!do_wakeup)
871 xhci_disable_port_wake_on_bits(xhci);
872
873 /* Don't poll the roothubs on bus suspend. */
874 xhci_dbg(xhci, "%s: stopping port polling.\n", __func__);
875 clear_bit(HCD_FLAG_POLL_RH, &hcd->flags);
876 del_timer_sync(&hcd->rh_timer);
877 clear_bit(HCD_FLAG_POLL_RH, &xhci->shared_hcd->flags);
878 del_timer_sync(&xhci->shared_hcd->rh_timer);
879
880 if (xhci->quirks & XHCI_SUSPEND_DELAY)
881 usleep_range(1000, 1500);
882
883 spin_lock_irq(&xhci->lock);
884 clear_bit(HCD_FLAG_HW_ACCESSIBLE, &hcd->flags);
885 clear_bit(HCD_FLAG_HW_ACCESSIBLE, &xhci->shared_hcd->flags);
886 /* step 1: stop endpoint */
887 /* skipped assuming that port suspend has done */
888
889 /* step 2: clear Run/Stop bit */
890 command = readl(&xhci->op_regs->command);
891 command &= ~CMD_RUN;
892 writel(command, &xhci->op_regs->command);
893
894 /* Some chips from Fresco Logic need an extraordinary delay */
895 delay *= (xhci->quirks & XHCI_SLOW_SUSPEND) ? 10 : 1;
896
897 if (xhci_handshake(&xhci->op_regs->status,
898 STS_HALT, STS_HALT, delay)) {
899 xhci_warn(xhci, "WARN: xHC CMD_RUN timeout\n");
900 spin_unlock_irq(&xhci->lock);
901 return -ETIMEDOUT;
902 }
903 xhci_clear_command_ring(xhci);
904
905 /* step 3: save registers */
906 xhci_save_registers(xhci);
907
908 /* step 4: set CSS flag */
909 command = readl(&xhci->op_regs->command);
910 command |= CMD_CSS;
911 writel(command, &xhci->op_regs->command);
912 if (xhci_handshake(&xhci->op_regs->status,
913 STS_SAVE, 0, 10 * 1000)) {
914 xhci_warn(xhci, "WARN: xHC save state timeout\n");
915 spin_unlock_irq(&xhci->lock);
916 return -ETIMEDOUT;
917 }
918 spin_unlock_irq(&xhci->lock);
919
920 /*
921 * Deleting Compliance Mode Recovery Timer because the xHCI Host
922 * is about to be suspended.
923 */
924 if ((xhci->quirks & XHCI_COMP_MODE_QUIRK) &&
925 (!(xhci_all_ports_seen_u0(xhci)))) {
926 del_timer_sync(&xhci->comp_mode_recovery_timer);
927 xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
928 "%s: compliance mode recovery timer deleted",
929 __func__);
930 }
931
932 /* step 5: remove core well power */
933 /* synchronize irq when using MSI-X */
934 xhci_msix_sync_irqs(xhci);
935
936 return rc;
937}
938EXPORT_SYMBOL_GPL(xhci_suspend);
939
940/*
941 * start xHC (not bus-specific)
942 *
943 * This is called when the machine transition from S3/S4 mode.
944 *
945 */
946int xhci_resume(struct xhci_hcd *xhci, bool hibernated)
947{
948 u32 command, temp = 0, status;
949 struct usb_hcd *hcd = xhci_to_hcd(xhci);
950 struct usb_hcd *secondary_hcd;
951 int retval = 0;
952 bool comp_timer_running = false;
953
954 if (!hcd->state)
955 return 0;
956
957 /* Wait a bit if either of the roothubs need to settle from the
958 * transition into bus suspend.
959 */
960 if (time_before(jiffies, xhci->bus_state[0].next_statechange) ||
961 time_before(jiffies,
962 xhci->bus_state[1].next_statechange))
963 msleep(100);
964
965 set_bit(HCD_FLAG_HW_ACCESSIBLE, &hcd->flags);
966 set_bit(HCD_FLAG_HW_ACCESSIBLE, &xhci->shared_hcd->flags);
967
968 spin_lock_irq(&xhci->lock);
969 if (xhci->quirks & XHCI_RESET_ON_RESUME)
970 hibernated = true;
971
972 if (!hibernated) {
973 /* step 1: restore register */
974 xhci_restore_registers(xhci);
975 /* step 2: initialize command ring buffer */
976 xhci_set_cmd_ring_deq(xhci);
977 /* step 3: restore state and start state*/
978 /* step 3: set CRS flag */
979 command = readl(&xhci->op_regs->command);
980 command |= CMD_CRS;
981 writel(command, &xhci->op_regs->command);
982 if (xhci_handshake(&xhci->op_regs->status,
983 STS_RESTORE, 0, 10 * 1000)) {
984 xhci_warn(xhci, "WARN: xHC restore state timeout\n");
985 spin_unlock_irq(&xhci->lock);
986 return -ETIMEDOUT;
987 }
988 temp = readl(&xhci->op_regs->status);
989 }
990
991 /* If restore operation fails, re-initialize the HC during resume */
992 if ((temp & STS_SRE) || hibernated) {
993
994 if ((xhci->quirks & XHCI_COMP_MODE_QUIRK) &&
995 !(xhci_all_ports_seen_u0(xhci))) {
996 del_timer_sync(&xhci->comp_mode_recovery_timer);
997 xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
998 "Compliance Mode Recovery Timer deleted!");
999 }
1000
1001 /* Let the USB core know _both_ roothubs lost power. */
1002 usb_root_hub_lost_power(xhci->main_hcd->self.root_hub);
1003 usb_root_hub_lost_power(xhci->shared_hcd->self.root_hub);
1004
1005 xhci_dbg(xhci, "Stop HCD\n");
1006 xhci_halt(xhci);
1007 xhci_reset(xhci);
1008 spin_unlock_irq(&xhci->lock);
1009 xhci_cleanup_msix(xhci);
1010
1011 xhci_dbg(xhci, "// Disabling event ring interrupts\n");
1012 temp = readl(&xhci->op_regs->status);
1013 writel((temp & ~0x1fff) | STS_EINT, &xhci->op_regs->status);
1014 temp = readl(&xhci->ir_set->irq_pending);
1015 writel(ER_IRQ_DISABLE(temp), &xhci->ir_set->irq_pending);
1016
1017 xhci_dbg(xhci, "cleaning up memory\n");
1018 xhci_mem_cleanup(xhci);
1019 xhci_debugfs_exit(xhci);
1020 xhci_dbg(xhci, "xhci_stop completed - status = %x\n",
1021 readl(&xhci->op_regs->status));
1022
1023 /* USB core calls the PCI reinit and start functions twice:
1024 * first with the primary HCD, and then with the secondary HCD.
1025 * If we don't do the same, the host will never be started.
1026 */
1027 if (!usb_hcd_is_primary_hcd(hcd))
1028 secondary_hcd = hcd;
1029 else
1030 secondary_hcd = xhci->shared_hcd;
1031
1032 xhci_dbg(xhci, "Initialize the xhci_hcd\n");
1033 retval = xhci_init(hcd->primary_hcd);
1034 if (retval)
1035 return retval;
1036 comp_timer_running = true;
1037
1038 xhci_dbg(xhci, "Start the primary HCD\n");
1039 retval = xhci_run(hcd->primary_hcd);
1040 if (!retval) {
1041 xhci_dbg(xhci, "Start the secondary HCD\n");
1042 retval = xhci_run(secondary_hcd);
1043 }
1044 hcd->state = HC_STATE_SUSPENDED;
1045 xhci->shared_hcd->state = HC_STATE_SUSPENDED;
1046 goto done;
1047 }
1048
1049 /* step 4: set Run/Stop bit */
1050 command = readl(&xhci->op_regs->command);
1051 command |= CMD_RUN;
1052 writel(command, &xhci->op_regs->command);
1053 xhci_handshake(&xhci->op_regs->status, STS_HALT,
1054 0, 250 * 1000);
1055
1056 /* step 5: walk topology and initialize portsc,
1057 * portpmsc and portli
1058 */
1059 /* this is done in bus_resume */
1060
1061 /* step 6: restart each of the previously
1062 * Running endpoints by ringing their doorbells
1063 */
1064
1065 spin_unlock_irq(&xhci->lock);
1066
1067 xhci_dbc_resume(xhci);
1068
1069 done:
1070 if (retval == 0) {
1071 /* Resume root hubs only when have pending events. */
1072 status = readl(&xhci->op_regs->status);
1073 if (status & STS_EINT) {
1074 usb_hcd_resume_root_hub(xhci->shared_hcd);
1075 usb_hcd_resume_root_hub(hcd);
1076 }
1077 }
1078
1079 /*
1080 * If system is subject to the Quirk, Compliance Mode Timer needs to
1081 * be re-initialized Always after a system resume. Ports are subject
1082 * to suffer the Compliance Mode issue again. It doesn't matter if
1083 * ports have entered previously to U0 before system's suspension.
1084 */
1085 if ((xhci->quirks & XHCI_COMP_MODE_QUIRK) && !comp_timer_running)
1086 compliance_mode_recovery_timer_init(xhci);
1087
1088 if (xhci->quirks & XHCI_ASMEDIA_MODIFY_FLOWCONTROL)
1089 usb_asmedia_modifyflowcontrol(to_pci_dev(hcd->self.controller));
1090
1091 /* Re-enable port polling. */
1092 xhci_dbg(xhci, "%s: starting port polling.\n", __func__);
1093 set_bit(HCD_FLAG_POLL_RH, &xhci->shared_hcd->flags);
1094 usb_hcd_poll_rh_status(xhci->shared_hcd);
1095 set_bit(HCD_FLAG_POLL_RH, &hcd->flags);
1096 usb_hcd_poll_rh_status(hcd);
1097
1098 return retval;
1099}
1100EXPORT_SYMBOL_GPL(xhci_resume);
1101#endif /* CONFIG_PM */
1102
1103/*-------------------------------------------------------------------------*/
1104
1105/**
1106 * xhci_get_endpoint_index - Used for passing endpoint bitmasks between the core and
1107 * HCDs. Find the index for an endpoint given its descriptor. Use the return
1108 * value to right shift 1 for the bitmask.
1109 *
1110 * Index = (epnum * 2) + direction - 1,
1111 * where direction = 0 for OUT, 1 for IN.
1112 * For control endpoints, the IN index is used (OUT index is unused), so
1113 * index = (epnum * 2) + direction - 1 = (epnum * 2) + 1 - 1 = (epnum * 2)
1114 */
1115unsigned int xhci_get_endpoint_index(struct usb_endpoint_descriptor *desc)
1116{
1117 unsigned int index;
1118 if (usb_endpoint_xfer_control(desc))
1119 index = (unsigned int) (usb_endpoint_num(desc)*2);
1120 else
1121 index = (unsigned int) (usb_endpoint_num(desc)*2) +
1122 (usb_endpoint_dir_in(desc) ? 1 : 0) - 1;
1123 return index;
1124}
1125
1126/* The reverse operation to xhci_get_endpoint_index. Calculate the USB endpoint
1127 * address from the XHCI endpoint index.
1128 */
1129unsigned int xhci_get_endpoint_address(unsigned int ep_index)
1130{
1131 unsigned int number = DIV_ROUND_UP(ep_index, 2);
1132 unsigned int direction = ep_index % 2 ? USB_DIR_OUT : USB_DIR_IN;
1133 return direction | number;
1134}
1135
1136/* Find the flag for this endpoint (for use in the control context). Use the
1137 * endpoint index to create a bitmask. The slot context is bit 0, endpoint 0 is
1138 * bit 1, etc.
1139 */
1140static unsigned int xhci_get_endpoint_flag(struct usb_endpoint_descriptor *desc)
1141{
1142 return 1 << (xhci_get_endpoint_index(desc) + 1);
1143}
1144
1145/* Find the flag for this endpoint (for use in the control context). Use the
1146 * endpoint index to create a bitmask. The slot context is bit 0, endpoint 0 is
1147 * bit 1, etc.
1148 */
1149static unsigned int xhci_get_endpoint_flag_from_index(unsigned int ep_index)
1150{
1151 return 1 << (ep_index + 1);
1152}
1153
1154/* Compute the last valid endpoint context index. Basically, this is the
1155 * endpoint index plus one. For slot contexts with more than valid endpoint,
1156 * we find the most significant bit set in the added contexts flags.
1157 * e.g. ep 1 IN (with epnum 0x81) => added_ctxs = 0b1000
1158 * fls(0b1000) = 4, but the endpoint context index is 3, so subtract one.
1159 */
1160unsigned int xhci_last_valid_endpoint(u32 added_ctxs)
1161{
1162 return fls(added_ctxs) - 1;
1163}
1164
1165/* Returns 1 if the arguments are OK;
1166 * returns 0 this is a root hub; returns -EINVAL for NULL pointers.
1167 */
1168static int xhci_check_args(struct usb_hcd *hcd, struct usb_device *udev,
1169 struct usb_host_endpoint *ep, int check_ep, bool check_virt_dev,
1170 const char *func) {
1171 struct xhci_hcd *xhci;
1172 struct xhci_virt_device *virt_dev;
1173
1174 if (!hcd || (check_ep && !ep) || !udev) {
1175 pr_debug("xHCI %s called with invalid args\n", func);
1176 return -EINVAL;
1177 }
1178 if (!udev->parent) {
1179 pr_debug("xHCI %s called for root hub\n", func);
1180 return 0;
1181 }
1182
1183 xhci = hcd_to_xhci(hcd);
1184 if (check_virt_dev) {
1185 if (!udev->slot_id || !xhci->devs[udev->slot_id]) {
1186 xhci_dbg(xhci, "xHCI %s called with unaddressed device\n",
1187 func);
1188 return -EINVAL;
1189 }
1190
1191 virt_dev = xhci->devs[udev->slot_id];
1192 if (virt_dev->udev != udev) {
1193 xhci_dbg(xhci, "xHCI %s called with udev and "
1194 "virt_dev does not match\n", func);
1195 return -EINVAL;
1196 }
1197 }
1198
1199 if (xhci->xhc_state & XHCI_STATE_HALTED)
1200 return -ENODEV;
1201
1202 return 1;
1203}
1204
1205static int xhci_configure_endpoint(struct xhci_hcd *xhci,
1206 struct usb_device *udev, struct xhci_command *command,
1207 bool ctx_change, bool must_succeed);
1208
1209/*
1210 * Full speed devices may have a max packet size greater than 8 bytes, but the
1211 * USB core doesn't know that until it reads the first 8 bytes of the
1212 * descriptor. If the usb_device's max packet size changes after that point,
1213 * we need to issue an evaluate context command and wait on it.
1214 */
1215static int xhci_check_maxpacket(struct xhci_hcd *xhci, unsigned int slot_id,
1216 unsigned int ep_index, struct urb *urb)
1217{
1218 struct xhci_container_ctx *out_ctx;
1219 struct xhci_input_control_ctx *ctrl_ctx;
1220 struct xhci_ep_ctx *ep_ctx;
1221 struct xhci_command *command;
1222 int max_packet_size;
1223 int hw_max_packet_size;
1224 int ret = 0;
1225
1226 out_ctx = xhci->devs[slot_id]->out_ctx;
1227 ep_ctx = xhci_get_ep_ctx(xhci, out_ctx, ep_index);
1228 hw_max_packet_size = MAX_PACKET_DECODED(le32_to_cpu(ep_ctx->ep_info2));
1229 max_packet_size = usb_endpoint_maxp(&urb->dev->ep0.desc);
1230 if (hw_max_packet_size != max_packet_size) {
1231 xhci_dbg_trace(xhci, trace_xhci_dbg_context_change,
1232 "Max Packet Size for ep 0 changed.");
1233 xhci_dbg_trace(xhci, trace_xhci_dbg_context_change,
1234 "Max packet size in usb_device = %d",
1235 max_packet_size);
1236 xhci_dbg_trace(xhci, trace_xhci_dbg_context_change,
1237 "Max packet size in xHCI HW = %d",
1238 hw_max_packet_size);
1239 xhci_dbg_trace(xhci, trace_xhci_dbg_context_change,
1240 "Issuing evaluate context command.");
1241
1242 /* Set up the input context flags for the command */
1243 /* FIXME: This won't work if a non-default control endpoint
1244 * changes max packet sizes.
1245 */
1246
1247 command = xhci_alloc_command(xhci, true, GFP_KERNEL);
1248 if (!command)
1249 return -ENOMEM;
1250
1251 command->in_ctx = xhci->devs[slot_id]->in_ctx;
1252 ctrl_ctx = xhci_get_input_control_ctx(command->in_ctx);
1253 if (!ctrl_ctx) {
1254 xhci_warn(xhci, "%s: Could not get input context, bad type.\n",
1255 __func__);
1256 ret = -ENOMEM;
1257 goto command_cleanup;
1258 }
1259 /* Set up the modified control endpoint 0 */
1260 xhci_endpoint_copy(xhci, xhci->devs[slot_id]->in_ctx,
1261 xhci->devs[slot_id]->out_ctx, ep_index);
1262
1263 ep_ctx = xhci_get_ep_ctx(xhci, command->in_ctx, ep_index);
1264 ep_ctx->ep_info2 &= cpu_to_le32(~MAX_PACKET_MASK);
1265 ep_ctx->ep_info2 |= cpu_to_le32(MAX_PACKET(max_packet_size));
1266
1267 ctrl_ctx->add_flags = cpu_to_le32(EP0_FLAG);
1268 ctrl_ctx->drop_flags = 0;
1269
1270 ret = xhci_configure_endpoint(xhci, urb->dev, command,
1271 true, false);
1272
1273 /* Clean up the input context for later use by bandwidth
1274 * functions.
1275 */
1276 ctrl_ctx->add_flags = cpu_to_le32(SLOT_FLAG);
1277command_cleanup:
1278 kfree(command->completion);
1279 kfree(command);
1280 }
1281 return ret;
1282}
1283
1284/*
1285 * non-error returns are a promise to giveback() the urb later
1286 * we drop ownership so next owner (or urb unlink) can get it
1287 */
1288static int xhci_urb_enqueue(struct usb_hcd *hcd, struct urb *urb, gfp_t mem_flags)
1289{
1290 struct xhci_hcd *xhci = hcd_to_xhci(hcd);
1291 unsigned long flags;
1292 int ret = 0;
1293 unsigned int slot_id, ep_index;
1294 unsigned int *ep_state;
1295 struct urb_priv *urb_priv;
1296 int num_tds;
1297
1298 if (!urb || xhci_check_args(hcd, urb->dev, urb->ep,
1299 true, true, __func__) <= 0)
1300 return -EINVAL;
1301
1302 slot_id = urb->dev->slot_id;
1303 ep_index = xhci_get_endpoint_index(&urb->ep->desc);
1304 ep_state = &xhci->devs[slot_id]->eps[ep_index].ep_state;
1305
1306 if (!HCD_HW_ACCESSIBLE(hcd)) {
1307 if (!in_interrupt())
1308 xhci_dbg(xhci, "urb submitted during PCI suspend\n");
1309 return -ESHUTDOWN;
1310 }
1311
1312 if (usb_endpoint_xfer_isoc(&urb->ep->desc))
1313 num_tds = urb->number_of_packets;
1314 else if (usb_endpoint_is_bulk_out(&urb->ep->desc) &&
1315 urb->transfer_buffer_length > 0 &&
1316 urb->transfer_flags & URB_ZERO_PACKET &&
1317 !(urb->transfer_buffer_length % usb_endpoint_maxp(&urb->ep->desc)))
1318 num_tds = 2;
1319 else
1320 num_tds = 1;
1321
1322 urb_priv = kzalloc(sizeof(struct urb_priv) +
1323 num_tds * sizeof(struct xhci_td), mem_flags);
1324 if (!urb_priv)
1325 return -ENOMEM;
1326
1327 urb_priv->num_tds = num_tds;
1328 urb_priv->num_tds_done = 0;
1329 urb->hcpriv = urb_priv;
1330
1331 trace_xhci_urb_enqueue(urb);
1332
1333 if (usb_endpoint_xfer_control(&urb->ep->desc)) {
1334 /* Check to see if the max packet size for the default control
1335 * endpoint changed during FS device enumeration
1336 */
1337 if (urb->dev->speed == USB_SPEED_FULL) {
1338 ret = xhci_check_maxpacket(xhci, slot_id,
1339 ep_index, urb);
1340 if (ret < 0) {
1341 xhci_urb_free_priv(urb_priv);
1342 urb->hcpriv = NULL;
1343 return ret;
1344 }
1345 }
1346 }
1347
1348 spin_lock_irqsave(&xhci->lock, flags);
1349
1350 if (xhci->xhc_state & XHCI_STATE_DYING) {
1351 xhci_dbg(xhci, "Ep 0x%x: URB %p submitted for non-responsive xHCI host.\n",
1352 urb->ep->desc.bEndpointAddress, urb);
1353 ret = -ESHUTDOWN;
1354 goto free_priv;
1355 }
1356 if (*ep_state & (EP_GETTING_STREAMS | EP_GETTING_NO_STREAMS)) {
1357 xhci_warn(xhci, "WARN: Can't enqueue URB, ep in streams transition state %x\n",
1358 *ep_state);
1359 ret = -EINVAL;
1360 goto free_priv;
1361 }
1362 if (*ep_state & EP_SOFT_CLEAR_TOGGLE) {
1363 xhci_warn(xhci, "Can't enqueue URB while manually clearing toggle\n");
1364 ret = -EINVAL;
1365 goto free_priv;
1366 }
1367
1368 switch (usb_endpoint_type(&urb->ep->desc)) {
1369
1370 case USB_ENDPOINT_XFER_CONTROL:
1371 ret = xhci_queue_ctrl_tx(xhci, GFP_ATOMIC, urb,
1372 slot_id, ep_index);
1373 break;
1374 case USB_ENDPOINT_XFER_BULK:
1375 ret = xhci_queue_bulk_tx(xhci, GFP_ATOMIC, urb,
1376 slot_id, ep_index);
1377 break;
1378 case USB_ENDPOINT_XFER_INT:
1379 ret = xhci_queue_intr_tx(xhci, GFP_ATOMIC, urb,
1380 slot_id, ep_index);
1381 break;
1382 case USB_ENDPOINT_XFER_ISOC:
1383 ret = xhci_queue_isoc_tx_prepare(xhci, GFP_ATOMIC, urb,
1384 slot_id, ep_index);
1385 }
1386
1387 if (ret) {
1388free_priv:
1389 xhci_urb_free_priv(urb_priv);
1390 urb->hcpriv = NULL;
1391 }
1392 spin_unlock_irqrestore(&xhci->lock, flags);
1393 return ret;
1394}
1395
1396/*
1397 * Remove the URB's TD from the endpoint ring. This may cause the HC to stop
1398 * USB transfers, potentially stopping in the middle of a TRB buffer. The HC
1399 * should pick up where it left off in the TD, unless a Set Transfer Ring
1400 * Dequeue Pointer is issued.
1401 *
1402 * The TRBs that make up the buffers for the canceled URB will be "removed" from
1403 * the ring. Since the ring is a contiguous structure, they can't be physically
1404 * removed. Instead, there are two options:
1405 *
1406 * 1) If the HC is in the middle of processing the URB to be canceled, we
1407 * simply move the ring's dequeue pointer past those TRBs using the Set
1408 * Transfer Ring Dequeue Pointer command. This will be the common case,
1409 * when drivers timeout on the last submitted URB and attempt to cancel.
1410 *
1411 * 2) If the HC is in the middle of a different TD, we turn the TRBs into a
1412 * series of 1-TRB transfer no-op TDs. (No-ops shouldn't be chained.) The
1413 * HC will need to invalidate the any TRBs it has cached after the stop
1414 * endpoint command, as noted in the xHCI 0.95 errata.
1415 *
1416 * 3) The TD may have completed by the time the Stop Endpoint Command
1417 * completes, so software needs to handle that case too.
1418 *
1419 * This function should protect against the TD enqueueing code ringing the
1420 * doorbell while this code is waiting for a Stop Endpoint command to complete.
1421 * It also needs to account for multiple cancellations on happening at the same
1422 * time for the same endpoint.
1423 *
1424 * Note that this function can be called in any context, or so says
1425 * usb_hcd_unlink_urb()
1426 */
1427static int xhci_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status)
1428{
1429 unsigned long flags;
1430 int ret, i;
1431 u32 temp;
1432 struct xhci_hcd *xhci;
1433 struct urb_priv *urb_priv;
1434 struct xhci_td *td;
1435 unsigned int ep_index;
1436 struct xhci_ring *ep_ring;
1437 struct xhci_virt_ep *ep;
1438 struct xhci_command *command;
1439 struct xhci_virt_device *vdev;
1440
1441 xhci = hcd_to_xhci(hcd);
1442 spin_lock_irqsave(&xhci->lock, flags);
1443
1444 trace_xhci_urb_dequeue(urb);
1445
1446 /* Make sure the URB hasn't completed or been unlinked already */
1447 ret = usb_hcd_check_unlink_urb(hcd, urb, status);
1448 if (ret)
1449 goto done;
1450
1451 /* give back URB now if we can't queue it for cancel */
1452 vdev = xhci->devs[urb->dev->slot_id];
1453 urb_priv = urb->hcpriv;
1454 if (!vdev || !urb_priv)
1455 goto err_giveback;
1456
1457 ep_index = xhci_get_endpoint_index(&urb->ep->desc);
1458 ep = &vdev->eps[ep_index];
1459 ep_ring = xhci_urb_to_transfer_ring(xhci, urb);
1460 if (!ep || !ep_ring)
1461 goto err_giveback;
1462
1463 /* If xHC is dead take it down and return ALL URBs in xhci_hc_died() */
1464 temp = readl(&xhci->op_regs->status);
1465 if (temp == ~(u32)0 || xhci->xhc_state & XHCI_STATE_DYING) {
1466 xhci_hc_died(xhci);
1467 goto done;
1468 }
1469
1470 if (xhci->xhc_state & XHCI_STATE_HALTED) {
1471 xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb,
1472 "HC halted, freeing TD manually.");
1473 for (i = urb_priv->num_tds_done;
1474 i < urb_priv->num_tds;
1475 i++) {
1476 td = &urb_priv->td[i];
1477 if (!list_empty(&td->td_list))
1478 list_del_init(&td->td_list);
1479 if (!list_empty(&td->cancelled_td_list))
1480 list_del_init(&td->cancelled_td_list);
1481 }
1482 goto err_giveback;
1483 }
1484
1485 i = urb_priv->num_tds_done;
1486 if (i < urb_priv->num_tds)
1487 xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb,
1488 "Cancel URB %p, dev %s, ep 0x%x, "
1489 "starting at offset 0x%llx",
1490 urb, urb->dev->devpath,
1491 urb->ep->desc.bEndpointAddress,
1492 (unsigned long long) xhci_trb_virt_to_dma(
1493 urb_priv->td[i].start_seg,
1494 urb_priv->td[i].first_trb));
1495
1496 for (; i < urb_priv->num_tds; i++) {
1497 td = &urb_priv->td[i];
1498 list_add_tail(&td->cancelled_td_list, &ep->cancelled_td_list);
1499 }
1500
1501 /* Queue a stop endpoint command, but only if this is
1502 * the first cancellation to be handled.
1503 */
1504 if (!(ep->ep_state & EP_STOP_CMD_PENDING)) {
1505 command = xhci_alloc_command(xhci, false, GFP_ATOMIC);
1506 if (!command) {
1507 ret = -ENOMEM;
1508 goto done;
1509 }
1510 ep->ep_state |= EP_STOP_CMD_PENDING;
1511 ep->stop_cmd_timer.expires = jiffies +
1512 XHCI_STOP_EP_CMD_TIMEOUT * HZ;
1513 add_timer(&ep->stop_cmd_timer);
1514 xhci_queue_stop_endpoint(xhci, command, urb->dev->slot_id,
1515 ep_index, 0);
1516 xhci_ring_cmd_db(xhci);
1517 }
1518done:
1519 spin_unlock_irqrestore(&xhci->lock, flags);
1520 return ret;
1521
1522err_giveback:
1523 if (urb_priv)
1524 xhci_urb_free_priv(urb_priv);
1525 usb_hcd_unlink_urb_from_ep(hcd, urb);
1526 spin_unlock_irqrestore(&xhci->lock, flags);
1527 usb_hcd_giveback_urb(hcd, urb, -ESHUTDOWN);
1528 return ret;
1529}
1530
1531/* Drop an endpoint from a new bandwidth configuration for this device.
1532 * Only one call to this function is allowed per endpoint before
1533 * check_bandwidth() or reset_bandwidth() must be called.
1534 * A call to xhci_drop_endpoint() followed by a call to xhci_add_endpoint() will
1535 * add the endpoint to the schedule with possibly new parameters denoted by a
1536 * different endpoint descriptor in usb_host_endpoint.
1537 * A call to xhci_add_endpoint() followed by a call to xhci_drop_endpoint() is
1538 * not allowed.
1539 *
1540 * The USB core will not allow URBs to be queued to an endpoint that is being
1541 * disabled, so there's no need for mutual exclusion to protect
1542 * the xhci->devs[slot_id] structure.
1543 */
1544static int xhci_drop_endpoint(struct usb_hcd *hcd, struct usb_device *udev,
1545 struct usb_host_endpoint *ep)
1546{
1547 struct xhci_hcd *xhci;
1548 struct xhci_container_ctx *in_ctx, *out_ctx;
1549 struct xhci_input_control_ctx *ctrl_ctx;
1550 unsigned int ep_index;
1551 struct xhci_ep_ctx *ep_ctx;
1552 u32 drop_flag;
1553 u32 new_add_flags, new_drop_flags;
1554 int ret;
1555
1556 ret = xhci_check_args(hcd, udev, ep, 1, true, __func__);
1557 if (ret <= 0)
1558 return ret;
1559 xhci = hcd_to_xhci(hcd);
1560 if (xhci->xhc_state & XHCI_STATE_DYING)
1561 return -ENODEV;
1562
1563 xhci_dbg(xhci, "%s called for udev %p\n", __func__, udev);
1564 drop_flag = xhci_get_endpoint_flag(&ep->desc);
1565 if (drop_flag == SLOT_FLAG || drop_flag == EP0_FLAG) {
1566 xhci_dbg(xhci, "xHCI %s - can't drop slot or ep 0 %#x\n",
1567 __func__, drop_flag);
1568 return 0;
1569 }
1570
1571 in_ctx = xhci->devs[udev->slot_id]->in_ctx;
1572 out_ctx = xhci->devs[udev->slot_id]->out_ctx;
1573 ctrl_ctx = xhci_get_input_control_ctx(in_ctx);
1574 if (!ctrl_ctx) {
1575 xhci_warn(xhci, "%s: Could not get input context, bad type.\n",
1576 __func__);
1577 return 0;
1578 }
1579
1580 ep_index = xhci_get_endpoint_index(&ep->desc);
1581 ep_ctx = xhci_get_ep_ctx(xhci, out_ctx, ep_index);
1582 /* If the HC already knows the endpoint is disabled,
1583 * or the HCD has noted it is disabled, ignore this request
1584 */
1585 if ((GET_EP_CTX_STATE(ep_ctx) == EP_STATE_DISABLED) ||
1586 le32_to_cpu(ctrl_ctx->drop_flags) &
1587 xhci_get_endpoint_flag(&ep->desc)) {
1588 /* Do not warn when called after a usb_device_reset */
1589 if (xhci->devs[udev->slot_id]->eps[ep_index].ring != NULL)
1590 xhci_warn(xhci, "xHCI %s called with disabled ep %p\n",
1591 __func__, ep);
1592 return 0;
1593 }
1594
1595 ctrl_ctx->drop_flags |= cpu_to_le32(drop_flag);
1596 new_drop_flags = le32_to_cpu(ctrl_ctx->drop_flags);
1597
1598 ctrl_ctx->add_flags &= cpu_to_le32(~drop_flag);
1599 new_add_flags = le32_to_cpu(ctrl_ctx->add_flags);
1600
1601 xhci_debugfs_remove_endpoint(xhci, xhci->devs[udev->slot_id], ep_index);
1602
1603 xhci_endpoint_zero(xhci, xhci->devs[udev->slot_id], ep);
1604
1605 if (xhci->quirks & XHCI_MTK_HOST)
1606 xhci_mtk_drop_ep_quirk(hcd, udev, ep);
1607
1608 xhci_dbg(xhci, "drop ep 0x%x, slot id %d, new drop flags = %#x, new add flags = %#x\n",
1609 (unsigned int) ep->desc.bEndpointAddress,
1610 udev->slot_id,
1611 (unsigned int) new_drop_flags,
1612 (unsigned int) new_add_flags);
1613 return 0;
1614}
1615
1616/* Add an endpoint to a new possible bandwidth configuration for this device.
1617 * Only one call to this function is allowed per endpoint before
1618 * check_bandwidth() or reset_bandwidth() must be called.
1619 * A call to xhci_drop_endpoint() followed by a call to xhci_add_endpoint() will
1620 * add the endpoint to the schedule with possibly new parameters denoted by a
1621 * different endpoint descriptor in usb_host_endpoint.
1622 * A call to xhci_add_endpoint() followed by a call to xhci_drop_endpoint() is
1623 * not allowed.
1624 *
1625 * The USB core will not allow URBs to be queued to an endpoint until the
1626 * configuration or alt setting is installed in the device, so there's no need
1627 * for mutual exclusion to protect the xhci->devs[slot_id] structure.
1628 */
1629static int xhci_add_endpoint(struct usb_hcd *hcd, struct usb_device *udev,
1630 struct usb_host_endpoint *ep)
1631{
1632 struct xhci_hcd *xhci;
1633 struct xhci_container_ctx *in_ctx;
1634 unsigned int ep_index;
1635 struct xhci_input_control_ctx *ctrl_ctx;
1636 u32 added_ctxs;
1637 u32 new_add_flags, new_drop_flags;
1638 struct xhci_virt_device *virt_dev;
1639 int ret = 0;
1640
1641 ret = xhci_check_args(hcd, udev, ep, 1, true, __func__);
1642 if (ret <= 0) {
1643 /* So we won't queue a reset ep command for a root hub */
1644 ep->hcpriv = NULL;
1645 return ret;
1646 }
1647 xhci = hcd_to_xhci(hcd);
1648 if (xhci->xhc_state & XHCI_STATE_DYING)
1649 return -ENODEV;
1650
1651 added_ctxs = xhci_get_endpoint_flag(&ep->desc);
1652 if (added_ctxs == SLOT_FLAG || added_ctxs == EP0_FLAG) {
1653 /* FIXME when we have to issue an evaluate endpoint command to
1654 * deal with ep0 max packet size changing once we get the
1655 * descriptors
1656 */
1657 xhci_dbg(xhci, "xHCI %s - can't add slot or ep 0 %#x\n",
1658 __func__, added_ctxs);
1659 return 0;
1660 }
1661
1662 virt_dev = xhci->devs[udev->slot_id];
1663 in_ctx = virt_dev->in_ctx;
1664 ctrl_ctx = xhci_get_input_control_ctx(in_ctx);
1665 if (!ctrl_ctx) {
1666 xhci_warn(xhci, "%s: Could not get input context, bad type.\n",
1667 __func__);
1668 return 0;
1669 }
1670
1671 ep_index = xhci_get_endpoint_index(&ep->desc);
1672 /* If this endpoint is already in use, and the upper layers are trying
1673 * to add it again without dropping it, reject the addition.
1674 */
1675 if (virt_dev->eps[ep_index].ring &&
1676 !(le32_to_cpu(ctrl_ctx->drop_flags) & added_ctxs)) {
1677 xhci_warn(xhci, "Trying to add endpoint 0x%x "
1678 "without dropping it.\n",
1679 (unsigned int) ep->desc.bEndpointAddress);
1680 return -EINVAL;
1681 }
1682
1683 /* If the HCD has already noted the endpoint is enabled,
1684 * ignore this request.
1685 */
1686 if (le32_to_cpu(ctrl_ctx->add_flags) & added_ctxs) {
1687 xhci_warn(xhci, "xHCI %s called with enabled ep %p\n",
1688 __func__, ep);
1689 return 0;
1690 }
1691
1692 /*
1693 * Configuration and alternate setting changes must be done in
1694 * process context, not interrupt context (or so documenation
1695 * for usb_set_interface() and usb_set_configuration() claim).
1696 */
1697 if (xhci_endpoint_init(xhci, virt_dev, udev, ep, GFP_NOIO) < 0) {
1698 dev_dbg(&udev->dev, "%s - could not initialize ep %#x\n",
1699 __func__, ep->desc.bEndpointAddress);
1700 return -ENOMEM;
1701 }
1702
1703 if (xhci->quirks & XHCI_MTK_HOST) {
1704 ret = xhci_mtk_add_ep_quirk(hcd, udev, ep);
1705 if (ret < 0) {
1706 xhci_ring_free(xhci, virt_dev->eps[ep_index].new_ring);
1707 virt_dev->eps[ep_index].new_ring = NULL;
1708 return ret;
1709 }
1710 }
1711
1712 ctrl_ctx->add_flags |= cpu_to_le32(added_ctxs);
1713 new_add_flags = le32_to_cpu(ctrl_ctx->add_flags);
1714
1715 /* If xhci_endpoint_disable() was called for this endpoint, but the
1716 * xHC hasn't been notified yet through the check_bandwidth() call,
1717 * this re-adds a new state for the endpoint from the new endpoint
1718 * descriptors. We must drop and re-add this endpoint, so we leave the
1719 * drop flags alone.
1720 */
1721 new_drop_flags = le32_to_cpu(ctrl_ctx->drop_flags);
1722
1723 /* Store the usb_device pointer for later use */
1724 ep->hcpriv = udev;
1725
1726 xhci_debugfs_create_endpoint(xhci, virt_dev, ep_index);
1727
1728 xhci_dbg(xhci, "add ep 0x%x, slot id %d, new drop flags = %#x, new add flags = %#x\n",
1729 (unsigned int) ep->desc.bEndpointAddress,
1730 udev->slot_id,
1731 (unsigned int) new_drop_flags,
1732 (unsigned int) new_add_flags);
1733 return 0;
1734}
1735
1736static void xhci_zero_in_ctx(struct xhci_hcd *xhci, struct xhci_virt_device *virt_dev)
1737{
1738 struct xhci_input_control_ctx *ctrl_ctx;
1739 struct xhci_ep_ctx *ep_ctx;
1740 struct xhci_slot_ctx *slot_ctx;
1741 int i;
1742
1743 ctrl_ctx = xhci_get_input_control_ctx(virt_dev->in_ctx);
1744 if (!ctrl_ctx) {
1745 xhci_warn(xhci, "%s: Could not get input context, bad type.\n",
1746 __func__);
1747 return;
1748 }
1749
1750 /* When a device's add flag and drop flag are zero, any subsequent
1751 * configure endpoint command will leave that endpoint's state
1752 * untouched. Make sure we don't leave any old state in the input
1753 * endpoint contexts.
1754 */
1755 ctrl_ctx->drop_flags = 0;
1756 ctrl_ctx->add_flags = 0;
1757 slot_ctx = xhci_get_slot_ctx(xhci, virt_dev->in_ctx);
1758 slot_ctx->dev_info &= cpu_to_le32(~LAST_CTX_MASK);
1759 /* Endpoint 0 is always valid */
1760 slot_ctx->dev_info |= cpu_to_le32(LAST_CTX(1));
1761 for (i = 1; i < 31; i++) {
1762 ep_ctx = xhci_get_ep_ctx(xhci, virt_dev->in_ctx, i);
1763 ep_ctx->ep_info = 0;
1764 ep_ctx->ep_info2 = 0;
1765 ep_ctx->deq = 0;
1766 ep_ctx->tx_info = 0;
1767 }
1768}
1769
1770static int xhci_configure_endpoint_result(struct xhci_hcd *xhci,
1771 struct usb_device *udev, u32 *cmd_status)
1772{
1773 int ret;
1774
1775 switch (*cmd_status) {
1776 case COMP_COMMAND_ABORTED:
1777 case COMP_COMMAND_RING_STOPPED:
1778 xhci_warn(xhci, "Timeout while waiting for configure endpoint command\n");
1779 ret = -ETIME;
1780 break;
1781 case COMP_RESOURCE_ERROR:
1782 dev_warn(&udev->dev,
1783 "Not enough host controller resources for new device state.\n");
1784 ret = -ENOMEM;
1785 /* FIXME: can we allocate more resources for the HC? */
1786 break;
1787 case COMP_BANDWIDTH_ERROR:
1788 case COMP_SECONDARY_BANDWIDTH_ERROR:
1789 dev_warn(&udev->dev,
1790 "Not enough bandwidth for new device state.\n");
1791 ret = -ENOSPC;
1792 /* FIXME: can we go back to the old state? */
1793 break;
1794 case COMP_TRB_ERROR:
1795 /* the HCD set up something wrong */
1796 dev_warn(&udev->dev, "ERROR: Endpoint drop flag = 0, "
1797 "add flag = 1, "
1798 "and endpoint is not disabled.\n");
1799 ret = -EINVAL;
1800 break;
1801 case COMP_INCOMPATIBLE_DEVICE_ERROR:
1802 dev_warn(&udev->dev,
1803 "ERROR: Incompatible device for endpoint configure command.\n");
1804 ret = -ENODEV;
1805 break;
1806 case COMP_SUCCESS:
1807 xhci_dbg_trace(xhci, trace_xhci_dbg_context_change,
1808 "Successful Endpoint Configure command");
1809 ret = 0;
1810 break;
1811 default:
1812 xhci_err(xhci, "ERROR: unexpected command completion code 0x%x.\n",
1813 *cmd_status);
1814 ret = -EINVAL;
1815 break;
1816 }
1817 return ret;
1818}
1819
1820static int xhci_evaluate_context_result(struct xhci_hcd *xhci,
1821 struct usb_device *udev, u32 *cmd_status)
1822{
1823 int ret;
1824
1825 switch (*cmd_status) {
1826 case COMP_COMMAND_ABORTED:
1827 case COMP_COMMAND_RING_STOPPED:
1828 xhci_warn(xhci, "Timeout while waiting for evaluate context command\n");
1829 ret = -ETIME;
1830 break;
1831 case COMP_PARAMETER_ERROR:
1832 dev_warn(&udev->dev,
1833 "WARN: xHCI driver setup invalid evaluate context command.\n");
1834 ret = -EINVAL;
1835 break;
1836 case COMP_SLOT_NOT_ENABLED_ERROR:
1837 dev_warn(&udev->dev,
1838 "WARN: slot not enabled for evaluate context command.\n");
1839 ret = -EINVAL;
1840 break;
1841 case COMP_CONTEXT_STATE_ERROR:
1842 dev_warn(&udev->dev,
1843 "WARN: invalid context state for evaluate context command.\n");
1844 ret = -EINVAL;
1845 break;
1846 case COMP_INCOMPATIBLE_DEVICE_ERROR:
1847 dev_warn(&udev->dev,
1848 "ERROR: Incompatible device for evaluate context command.\n");
1849 ret = -ENODEV;
1850 break;
1851 case COMP_MAX_EXIT_LATENCY_TOO_LARGE_ERROR:
1852 /* Max Exit Latency too large error */
1853 dev_warn(&udev->dev, "WARN: Max Exit Latency too large\n");
1854 ret = -EINVAL;
1855 break;
1856 case COMP_SUCCESS:
1857 xhci_dbg_trace(xhci, trace_xhci_dbg_context_change,
1858 "Successful evaluate context command");
1859 ret = 0;
1860 break;
1861 default:
1862 xhci_err(xhci, "ERROR: unexpected command completion code 0x%x.\n",
1863 *cmd_status);
1864 ret = -EINVAL;
1865 break;
1866 }
1867 return ret;
1868}
1869
1870static u32 xhci_count_num_new_endpoints(struct xhci_hcd *xhci,
1871 struct xhci_input_control_ctx *ctrl_ctx)
1872{
1873 u32 valid_add_flags;
1874 u32 valid_drop_flags;
1875
1876 /* Ignore the slot flag (bit 0), and the default control endpoint flag
1877 * (bit 1). The default control endpoint is added during the Address
1878 * Device command and is never removed until the slot is disabled.
1879 */
1880 valid_add_flags = le32_to_cpu(ctrl_ctx->add_flags) >> 2;
1881 valid_drop_flags = le32_to_cpu(ctrl_ctx->drop_flags) >> 2;
1882
1883 /* Use hweight32 to count the number of ones in the add flags, or
1884 * number of endpoints added. Don't count endpoints that are changed
1885 * (both added and dropped).
1886 */
1887 return hweight32(valid_add_flags) -
1888 hweight32(valid_add_flags & valid_drop_flags);
1889}
1890
1891static unsigned int xhci_count_num_dropped_endpoints(struct xhci_hcd *xhci,
1892 struct xhci_input_control_ctx *ctrl_ctx)
1893{
1894 u32 valid_add_flags;
1895 u32 valid_drop_flags;
1896
1897 valid_add_flags = le32_to_cpu(ctrl_ctx->add_flags) >> 2;
1898 valid_drop_flags = le32_to_cpu(ctrl_ctx->drop_flags) >> 2;
1899
1900 return hweight32(valid_drop_flags) -
1901 hweight32(valid_add_flags & valid_drop_flags);
1902}
1903
1904/*
1905 * We need to reserve the new number of endpoints before the configure endpoint
1906 * command completes. We can't subtract the dropped endpoints from the number
1907 * of active endpoints until the command completes because we can oversubscribe
1908 * the host in this case:
1909 *
1910 * - the first configure endpoint command drops more endpoints than it adds
1911 * - a second configure endpoint command that adds more endpoints is queued
1912 * - the first configure endpoint command fails, so the config is unchanged
1913 * - the second command may succeed, even though there isn't enough resources
1914 *
1915 * Must be called with xhci->lock held.
1916 */
1917static int xhci_reserve_host_resources(struct xhci_hcd *xhci,
1918 struct xhci_input_control_ctx *ctrl_ctx)
1919{
1920 u32 added_eps;
1921
1922 added_eps = xhci_count_num_new_endpoints(xhci, ctrl_ctx);
1923 if (xhci->num_active_eps + added_eps > xhci->limit_active_eps) {
1924 xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
1925 "Not enough ep ctxs: "
1926 "%u active, need to add %u, limit is %u.",
1927 xhci->num_active_eps, added_eps,
1928 xhci->limit_active_eps);
1929 return -ENOMEM;
1930 }
1931 xhci->num_active_eps += added_eps;
1932 xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
1933 "Adding %u ep ctxs, %u now active.", added_eps,
1934 xhci->num_active_eps);
1935 return 0;
1936}
1937
1938/*
1939 * The configure endpoint was failed by the xHC for some other reason, so we
1940 * need to revert the resources that failed configuration would have used.
1941 *
1942 * Must be called with xhci->lock held.
1943 */
1944static void xhci_free_host_resources(struct xhci_hcd *xhci,
1945 struct xhci_input_control_ctx *ctrl_ctx)
1946{
1947 u32 num_failed_eps;
1948
1949 num_failed_eps = xhci_count_num_new_endpoints(xhci, ctrl_ctx);
1950 xhci->num_active_eps -= num_failed_eps;
1951 xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
1952 "Removing %u failed ep ctxs, %u now active.",
1953 num_failed_eps,
1954 xhci->num_active_eps);
1955}
1956
1957/*
1958 * Now that the command has completed, clean up the active endpoint count by
1959 * subtracting out the endpoints that were dropped (but not changed).
1960 *
1961 * Must be called with xhci->lock held.
1962 */
1963static void xhci_finish_resource_reservation(struct xhci_hcd *xhci,
1964 struct xhci_input_control_ctx *ctrl_ctx)
1965{
1966 u32 num_dropped_eps;
1967
1968 num_dropped_eps = xhci_count_num_dropped_endpoints(xhci, ctrl_ctx);
1969 xhci->num_active_eps -= num_dropped_eps;
1970 if (num_dropped_eps)
1971 xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
1972 "Removing %u dropped ep ctxs, %u now active.",
1973 num_dropped_eps,
1974 xhci->num_active_eps);
1975}
1976
1977static unsigned int xhci_get_block_size(struct usb_device *udev)
1978{
1979 switch (udev->speed) {
1980 case USB_SPEED_LOW:
1981 case USB_SPEED_FULL:
1982 return FS_BLOCK;
1983 case USB_SPEED_HIGH:
1984 return HS_BLOCK;
1985 case USB_SPEED_SUPER:
1986 case USB_SPEED_SUPER_PLUS:
1987 return SS_BLOCK;
1988 case USB_SPEED_UNKNOWN:
1989 case USB_SPEED_WIRELESS:
1990 default:
1991 /* Should never happen */
1992 return 1;
1993 }
1994}
1995
1996static unsigned int
1997xhci_get_largest_overhead(struct xhci_interval_bw *interval_bw)
1998{
1999 if (interval_bw->overhead[LS_OVERHEAD_TYPE])
2000 return LS_OVERHEAD;
2001 if (interval_bw->overhead[FS_OVERHEAD_TYPE])
2002 return FS_OVERHEAD;
2003 return HS_OVERHEAD;
2004}
2005
2006/* If we are changing a LS/FS device under a HS hub,
2007 * make sure (if we are activating a new TT) that the HS bus has enough
2008 * bandwidth for this new TT.
2009 */
2010static int xhci_check_tt_bw_table(struct xhci_hcd *xhci,
2011 struct xhci_virt_device *virt_dev,
2012 int old_active_eps)
2013{
2014 struct xhci_interval_bw_table *bw_table;
2015 struct xhci_tt_bw_info *tt_info;
2016
2017 /* Find the bandwidth table for the root port this TT is attached to. */
2018 bw_table = &xhci->rh_bw[virt_dev->real_port - 1].bw_table;
2019 tt_info = virt_dev->tt_info;
2020 /* If this TT already had active endpoints, the bandwidth for this TT
2021 * has already been added. Removing all periodic endpoints (and thus
2022 * making the TT enactive) will only decrease the bandwidth used.
2023 */
2024 if (old_active_eps)
2025 return 0;
2026 if (old_active_eps == 0 && tt_info->active_eps != 0) {
2027 if (bw_table->bw_used + TT_HS_OVERHEAD > HS_BW_LIMIT)
2028 return -ENOMEM;
2029 return 0;
2030 }
2031 /* Not sure why we would have no new active endpoints...
2032 *
2033 * Maybe because of an Evaluate Context change for a hub update or a
2034 * control endpoint 0 max packet size change?
2035 * FIXME: skip the bandwidth calculation in that case.
2036 */
2037 return 0;
2038}
2039
2040static int xhci_check_ss_bw(struct xhci_hcd *xhci,
2041 struct xhci_virt_device *virt_dev)
2042{
2043 unsigned int bw_reserved;
2044
2045 bw_reserved = DIV_ROUND_UP(SS_BW_RESERVED*SS_BW_LIMIT_IN, 100);
2046 if (virt_dev->bw_table->ss_bw_in > (SS_BW_LIMIT_IN - bw_reserved))
2047 return -ENOMEM;
2048
2049 bw_reserved = DIV_ROUND_UP(SS_BW_RESERVED*SS_BW_LIMIT_OUT, 100);
2050 if (virt_dev->bw_table->ss_bw_out > (SS_BW_LIMIT_OUT - bw_reserved))
2051 return -ENOMEM;
2052
2053 return 0;
2054}
2055
2056/*
2057 * This algorithm is a very conservative estimate of the worst-case scheduling
2058 * scenario for any one interval. The hardware dynamically schedules the
2059 * packets, so we can't tell which microframe could be the limiting factor in
2060 * the bandwidth scheduling. This only takes into account periodic endpoints.
2061 *
2062 * Obviously, we can't solve an NP complete problem to find the minimum worst
2063 * case scenario. Instead, we come up with an estimate that is no less than
2064 * the worst case bandwidth used for any one microframe, but may be an
2065 * over-estimate.
2066 *
2067 * We walk the requirements for each endpoint by interval, starting with the
2068 * smallest interval, and place packets in the schedule where there is only one
2069 * possible way to schedule packets for that interval. In order to simplify
2070 * this algorithm, we record the largest max packet size for each interval, and
2071 * assume all packets will be that size.
2072 *
2073 * For interval 0, we obviously must schedule all packets for each interval.
2074 * The bandwidth for interval 0 is just the amount of data to be transmitted
2075 * (the sum of all max ESIT payload sizes, plus any overhead per packet times
2076 * the number of packets).
2077 *
2078 * For interval 1, we have two possible microframes to schedule those packets
2079 * in. For this algorithm, if we can schedule the same number of packets for
2080 * each possible scheduling opportunity (each microframe), we will do so. The
2081 * remaining number of packets will be saved to be transmitted in the gaps in
2082 * the next interval's scheduling sequence.
2083 *
2084 * As we move those remaining packets to be scheduled with interval 2 packets,
2085 * we have to double the number of remaining packets to transmit. This is
2086 * because the intervals are actually powers of 2, and we would be transmitting
2087 * the previous interval's packets twice in this interval. We also have to be
2088 * sure that when we look at the largest max packet size for this interval, we
2089 * also look at the largest max packet size for the remaining packets and take
2090 * the greater of the two.
2091 *
2092 * The algorithm continues to evenly distribute packets in each scheduling
2093 * opportunity, and push the remaining packets out, until we get to the last
2094 * interval. Then those packets and their associated overhead are just added
2095 * to the bandwidth used.
2096 */
2097static int xhci_check_bw_table(struct xhci_hcd *xhci,
2098 struct xhci_virt_device *virt_dev,
2099 int old_active_eps)
2100{
2101 unsigned int bw_reserved;
2102 unsigned int max_bandwidth;
2103 unsigned int bw_used;
2104 unsigned int block_size;
2105 struct xhci_interval_bw_table *bw_table;
2106 unsigned int packet_size = 0;
2107 unsigned int overhead = 0;
2108 unsigned int packets_transmitted = 0;
2109 unsigned int packets_remaining = 0;
2110 unsigned int i;
2111
2112 if (virt_dev->udev->speed >= USB_SPEED_SUPER)
2113 return xhci_check_ss_bw(xhci, virt_dev);
2114
2115 if (virt_dev->udev->speed == USB_SPEED_HIGH) {
2116 max_bandwidth = HS_BW_LIMIT;
2117 /* Convert percent of bus BW reserved to blocks reserved */
2118 bw_reserved = DIV_ROUND_UP(HS_BW_RESERVED * max_bandwidth, 100);
2119 } else {
2120 max_bandwidth = FS_BW_LIMIT;
2121 bw_reserved = DIV_ROUND_UP(FS_BW_RESERVED * max_bandwidth, 100);
2122 }
2123
2124 bw_table = virt_dev->bw_table;
2125 /* We need to translate the max packet size and max ESIT payloads into
2126 * the units the hardware uses.
2127 */
2128 block_size = xhci_get_block_size(virt_dev->udev);
2129
2130 /* If we are manipulating a LS/FS device under a HS hub, double check
2131 * that the HS bus has enough bandwidth if we are activing a new TT.
2132 */
2133 if (virt_dev->tt_info) {
2134 xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
2135 "Recalculating BW for rootport %u",
2136 virt_dev->real_port);
2137 if (xhci_check_tt_bw_table(xhci, virt_dev, old_active_eps)) {
2138 xhci_warn(xhci, "Not enough bandwidth on HS bus for "
2139 "newly activated TT.\n");
2140 return -ENOMEM;
2141 }
2142 xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
2143 "Recalculating BW for TT slot %u port %u",
2144 virt_dev->tt_info->slot_id,
2145 virt_dev->tt_info->ttport);
2146 } else {
2147 xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
2148 "Recalculating BW for rootport %u",
2149 virt_dev->real_port);
2150 }
2151
2152 /* Add in how much bandwidth will be used for interval zero, or the
2153 * rounded max ESIT payload + number of packets * largest overhead.
2154 */
2155 bw_used = DIV_ROUND_UP(bw_table->interval0_esit_payload, block_size) +
2156 bw_table->interval_bw[0].num_packets *
2157 xhci_get_largest_overhead(&bw_table->interval_bw[0]);
2158
2159 for (i = 1; i < XHCI_MAX_INTERVAL; i++) {
2160 unsigned int bw_added;
2161 unsigned int largest_mps;
2162 unsigned int interval_overhead;
2163
2164 /*
2165 * How many packets could we transmit in this interval?
2166 * If packets didn't fit in the previous interval, we will need
2167 * to transmit that many packets twice within this interval.
2168 */
2169 packets_remaining = 2 * packets_remaining +
2170 bw_table->interval_bw[i].num_packets;
2171
2172 /* Find the largest max packet size of this or the previous
2173 * interval.
2174 */
2175 if (list_empty(&bw_table->interval_bw[i].endpoints))
2176 largest_mps = 0;
2177 else {
2178 struct xhci_virt_ep *virt_ep;
2179 struct list_head *ep_entry;
2180
2181 ep_entry = bw_table->interval_bw[i].endpoints.next;
2182 virt_ep = list_entry(ep_entry,
2183 struct xhci_virt_ep, bw_endpoint_list);
2184 /* Convert to blocks, rounding up */
2185 largest_mps = DIV_ROUND_UP(
2186 virt_ep->bw_info.max_packet_size,
2187 block_size);
2188 }
2189 if (largest_mps > packet_size)
2190 packet_size = largest_mps;
2191
2192 /* Use the larger overhead of this or the previous interval. */
2193 interval_overhead = xhci_get_largest_overhead(
2194 &bw_table->interval_bw[i]);
2195 if (interval_overhead > overhead)
2196 overhead = interval_overhead;
2197
2198 /* How many packets can we evenly distribute across
2199 * (1 << (i + 1)) possible scheduling opportunities?
2200 */
2201 packets_transmitted = packets_remaining >> (i + 1);
2202
2203 /* Add in the bandwidth used for those scheduled packets */
2204 bw_added = packets_transmitted * (overhead + packet_size);
2205
2206 /* How many packets do we have remaining to transmit? */
2207 packets_remaining = packets_remaining % (1 << (i + 1));
2208
2209 /* What largest max packet size should those packets have? */
2210 /* If we've transmitted all packets, don't carry over the
2211 * largest packet size.
2212 */
2213 if (packets_remaining == 0) {
2214 packet_size = 0;
2215 overhead = 0;
2216 } else if (packets_transmitted > 0) {
2217 /* Otherwise if we do have remaining packets, and we've
2218 * scheduled some packets in this interval, take the
2219 * largest max packet size from endpoints with this
2220 * interval.
2221 */
2222 packet_size = largest_mps;
2223 overhead = interval_overhead;
2224 }
2225 /* Otherwise carry over packet_size and overhead from the last
2226 * time we had a remainder.
2227 */
2228 bw_used += bw_added;
2229 if (bw_used > max_bandwidth) {
2230 xhci_warn(xhci, "Not enough bandwidth. "
2231 "Proposed: %u, Max: %u\n",
2232 bw_used, max_bandwidth);
2233 return -ENOMEM;
2234 }
2235 }
2236 /*
2237 * Ok, we know we have some packets left over after even-handedly
2238 * scheduling interval 15. We don't know which microframes they will
2239 * fit into, so we over-schedule and say they will be scheduled every
2240 * microframe.
2241 */
2242 if (packets_remaining > 0)
2243 bw_used += overhead + packet_size;
2244
2245 if (!virt_dev->tt_info && virt_dev->udev->speed == USB_SPEED_HIGH) {
2246 unsigned int port_index = virt_dev->real_port - 1;
2247
2248 /* OK, we're manipulating a HS device attached to a
2249 * root port bandwidth domain. Include the number of active TTs
2250 * in the bandwidth used.
2251 */
2252 bw_used += TT_HS_OVERHEAD *
2253 xhci->rh_bw[port_index].num_active_tts;
2254 }
2255
2256 xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
2257 "Final bandwidth: %u, Limit: %u, Reserved: %u, "
2258 "Available: %u " "percent",
2259 bw_used, max_bandwidth, bw_reserved,
2260 (max_bandwidth - bw_used - bw_reserved) * 100 /
2261 max_bandwidth);
2262
2263 bw_used += bw_reserved;
2264 if (bw_used > max_bandwidth) {
2265 xhci_warn(xhci, "Not enough bandwidth. Proposed: %u, Max: %u\n",
2266 bw_used, max_bandwidth);
2267 return -ENOMEM;
2268 }
2269
2270 bw_table->bw_used = bw_used;
2271 return 0;
2272}
2273
2274static bool xhci_is_async_ep(unsigned int ep_type)
2275{
2276 return (ep_type != ISOC_OUT_EP && ep_type != INT_OUT_EP &&
2277 ep_type != ISOC_IN_EP &&
2278 ep_type != INT_IN_EP);
2279}
2280
2281static bool xhci_is_sync_in_ep(unsigned int ep_type)
2282{
2283 return (ep_type == ISOC_IN_EP || ep_type == INT_IN_EP);
2284}
2285
2286static unsigned int xhci_get_ss_bw_consumed(struct xhci_bw_info *ep_bw)
2287{
2288 unsigned int mps = DIV_ROUND_UP(ep_bw->max_packet_size, SS_BLOCK);
2289
2290 if (ep_bw->ep_interval == 0)
2291 return SS_OVERHEAD_BURST +
2292 (ep_bw->mult * ep_bw->num_packets *
2293 (SS_OVERHEAD + mps));
2294 return DIV_ROUND_UP(ep_bw->mult * ep_bw->num_packets *
2295 (SS_OVERHEAD + mps + SS_OVERHEAD_BURST),
2296 1 << ep_bw->ep_interval);
2297
2298}
2299
2300static void xhci_drop_ep_from_interval_table(struct xhci_hcd *xhci,
2301 struct xhci_bw_info *ep_bw,
2302 struct xhci_interval_bw_table *bw_table,
2303 struct usb_device *udev,
2304 struct xhci_virt_ep *virt_ep,
2305 struct xhci_tt_bw_info *tt_info)
2306{
2307 struct xhci_interval_bw *interval_bw;
2308 int normalized_interval;
2309
2310 if (xhci_is_async_ep(ep_bw->type))
2311 return;
2312
2313 if (udev->speed >= USB_SPEED_SUPER) {
2314 if (xhci_is_sync_in_ep(ep_bw->type))
2315 xhci->devs[udev->slot_id]->bw_table->ss_bw_in -=
2316 xhci_get_ss_bw_consumed(ep_bw);
2317 else
2318 xhci->devs[udev->slot_id]->bw_table->ss_bw_out -=
2319 xhci_get_ss_bw_consumed(ep_bw);
2320 return;
2321 }
2322
2323 /* SuperSpeed endpoints never get added to intervals in the table, so
2324 * this check is only valid for HS/FS/LS devices.
2325 */
2326 if (list_empty(&virt_ep->bw_endpoint_list))
2327 return;
2328 /* For LS/FS devices, we need to translate the interval expressed in
2329 * microframes to frames.
2330 */
2331 if (udev->speed == USB_SPEED_HIGH)
2332 normalized_interval = ep_bw->ep_interval;
2333 else
2334 normalized_interval = ep_bw->ep_interval - 3;
2335
2336 if (normalized_interval == 0)
2337 bw_table->interval0_esit_payload -= ep_bw->max_esit_payload;
2338 interval_bw = &bw_table->interval_bw[normalized_interval];
2339 interval_bw->num_packets -= ep_bw->num_packets;
2340 switch (udev->speed) {
2341 case USB_SPEED_LOW:
2342 interval_bw->overhead[LS_OVERHEAD_TYPE] -= 1;
2343 break;
2344 case USB_SPEED_FULL:
2345 interval_bw->overhead[FS_OVERHEAD_TYPE] -= 1;
2346 break;
2347 case USB_SPEED_HIGH:
2348 interval_bw->overhead[HS_OVERHEAD_TYPE] -= 1;
2349 break;
2350 case USB_SPEED_SUPER:
2351 case USB_SPEED_SUPER_PLUS:
2352 case USB_SPEED_UNKNOWN:
2353 case USB_SPEED_WIRELESS:
2354 /* Should never happen because only LS/FS/HS endpoints will get
2355 * added to the endpoint list.
2356 */
2357 return;
2358 }
2359 if (tt_info)
2360 tt_info->active_eps -= 1;
2361 list_del_init(&virt_ep->bw_endpoint_list);
2362}
2363
2364static void xhci_add_ep_to_interval_table(struct xhci_hcd *xhci,
2365 struct xhci_bw_info *ep_bw,
2366 struct xhci_interval_bw_table *bw_table,
2367 struct usb_device *udev,
2368 struct xhci_virt_ep *virt_ep,
2369 struct xhci_tt_bw_info *tt_info)
2370{
2371 struct xhci_interval_bw *interval_bw;
2372 struct xhci_virt_ep *smaller_ep;
2373 int normalized_interval;
2374
2375 if (xhci_is_async_ep(ep_bw->type))
2376 return;
2377
2378 if (udev->speed == USB_SPEED_SUPER) {
2379 if (xhci_is_sync_in_ep(ep_bw->type))
2380 xhci->devs[udev->slot_id]->bw_table->ss_bw_in +=
2381 xhci_get_ss_bw_consumed(ep_bw);
2382 else
2383 xhci->devs[udev->slot_id]->bw_table->ss_bw_out +=
2384 xhci_get_ss_bw_consumed(ep_bw);
2385 return;
2386 }
2387
2388 /* For LS/FS devices, we need to translate the interval expressed in
2389 * microframes to frames.
2390 */
2391 if (udev->speed == USB_SPEED_HIGH)
2392 normalized_interval = ep_bw->ep_interval;
2393 else
2394 normalized_interval = ep_bw->ep_interval - 3;
2395
2396 if (normalized_interval == 0)
2397 bw_table->interval0_esit_payload += ep_bw->max_esit_payload;
2398 interval_bw = &bw_table->interval_bw[normalized_interval];
2399 interval_bw->num_packets += ep_bw->num_packets;
2400 switch (udev->speed) {
2401 case USB_SPEED_LOW:
2402 interval_bw->overhead[LS_OVERHEAD_TYPE] += 1;
2403 break;
2404 case USB_SPEED_FULL:
2405 interval_bw->overhead[FS_OVERHEAD_TYPE] += 1;
2406 break;
2407 case USB_SPEED_HIGH:
2408 interval_bw->overhead[HS_OVERHEAD_TYPE] += 1;
2409 break;
2410 case USB_SPEED_SUPER:
2411 case USB_SPEED_SUPER_PLUS:
2412 case USB_SPEED_UNKNOWN:
2413 case USB_SPEED_WIRELESS:
2414 /* Should never happen because only LS/FS/HS endpoints will get
2415 * added to the endpoint list.
2416 */
2417 return;
2418 }
2419
2420 if (tt_info)
2421 tt_info->active_eps += 1;
2422 /* Insert the endpoint into the list, largest max packet size first. */
2423 list_for_each_entry(smaller_ep, &interval_bw->endpoints,
2424 bw_endpoint_list) {
2425 if (ep_bw->max_packet_size >=
2426 smaller_ep->bw_info.max_packet_size) {
2427 /* Add the new ep before the smaller endpoint */
2428 list_add_tail(&virt_ep->bw_endpoint_list,
2429 &smaller_ep->bw_endpoint_list);
2430 return;
2431 }
2432 }
2433 /* Add the new endpoint at the end of the list. */
2434 list_add_tail(&virt_ep->bw_endpoint_list,
2435 &interval_bw->endpoints);
2436}
2437
2438void xhci_update_tt_active_eps(struct xhci_hcd *xhci,
2439 struct xhci_virt_device *virt_dev,
2440 int old_active_eps)
2441{
2442 struct xhci_root_port_bw_info *rh_bw_info;
2443 if (!virt_dev->tt_info)
2444 return;
2445
2446 rh_bw_info = &xhci->rh_bw[virt_dev->real_port - 1];
2447 if (old_active_eps == 0 &&
2448 virt_dev->tt_info->active_eps != 0) {
2449 rh_bw_info->num_active_tts += 1;
2450 rh_bw_info->bw_table.bw_used += TT_HS_OVERHEAD;
2451 } else if (old_active_eps != 0 &&
2452 virt_dev->tt_info->active_eps == 0) {
2453 rh_bw_info->num_active_tts -= 1;
2454 rh_bw_info->bw_table.bw_used -= TT_HS_OVERHEAD;
2455 }
2456}
2457
2458static int xhci_reserve_bandwidth(struct xhci_hcd *xhci,
2459 struct xhci_virt_device *virt_dev,
2460 struct xhci_container_ctx *in_ctx)
2461{
2462 struct xhci_bw_info ep_bw_info[31];
2463 int i;
2464 struct xhci_input_control_ctx *ctrl_ctx;
2465 int old_active_eps = 0;
2466
2467 if (virt_dev->tt_info)
2468 old_active_eps = virt_dev->tt_info->active_eps;
2469
2470 ctrl_ctx = xhci_get_input_control_ctx(in_ctx);
2471 if (!ctrl_ctx) {
2472 xhci_warn(xhci, "%s: Could not get input context, bad type.\n",
2473 __func__);
2474 return -ENOMEM;
2475 }
2476
2477 for (i = 0; i < 31; i++) {
2478 if (!EP_IS_ADDED(ctrl_ctx, i) && !EP_IS_DROPPED(ctrl_ctx, i))
2479 continue;
2480
2481 /* Make a copy of the BW info in case we need to revert this */
2482 memcpy(&ep_bw_info[i], &virt_dev->eps[i].bw_info,
2483 sizeof(ep_bw_info[i]));
2484 /* Drop the endpoint from the interval table if the endpoint is
2485 * being dropped or changed.
2486 */
2487 if (EP_IS_DROPPED(ctrl_ctx, i))
2488 xhci_drop_ep_from_interval_table(xhci,
2489 &virt_dev->eps[i].bw_info,
2490 virt_dev->bw_table,
2491 virt_dev->udev,
2492 &virt_dev->eps[i],
2493 virt_dev->tt_info);
2494 }
2495 /* Overwrite the information stored in the endpoints' bw_info */
2496 xhci_update_bw_info(xhci, virt_dev->in_ctx, ctrl_ctx, virt_dev);
2497 for (i = 0; i < 31; i++) {
2498 /* Add any changed or added endpoints to the interval table */
2499 if (EP_IS_ADDED(ctrl_ctx, i))
2500 xhci_add_ep_to_interval_table(xhci,
2501 &virt_dev->eps[i].bw_info,
2502 virt_dev->bw_table,
2503 virt_dev->udev,
2504 &virt_dev->eps[i],
2505 virt_dev->tt_info);
2506 }
2507
2508 if (!xhci_check_bw_table(xhci, virt_dev, old_active_eps)) {
2509 /* Ok, this fits in the bandwidth we have.
2510 * Update the number of active TTs.
2511 */
2512 xhci_update_tt_active_eps(xhci, virt_dev, old_active_eps);
2513 return 0;
2514 }
2515
2516 /* We don't have enough bandwidth for this, revert the stored info. */
2517 for (i = 0; i < 31; i++) {
2518 if (!EP_IS_ADDED(ctrl_ctx, i) && !EP_IS_DROPPED(ctrl_ctx, i))
2519 continue;
2520
2521 /* Drop the new copies of any added or changed endpoints from
2522 * the interval table.
2523 */
2524 if (EP_IS_ADDED(ctrl_ctx, i)) {
2525 xhci_drop_ep_from_interval_table(xhci,
2526 &virt_dev->eps[i].bw_info,
2527 virt_dev->bw_table,
2528 virt_dev->udev,
2529 &virt_dev->eps[i],
2530 virt_dev->tt_info);
2531 }
2532 /* Revert the endpoint back to its old information */
2533 memcpy(&virt_dev->eps[i].bw_info, &ep_bw_info[i],
2534 sizeof(ep_bw_info[i]));
2535 /* Add any changed or dropped endpoints back into the table */
2536 if (EP_IS_DROPPED(ctrl_ctx, i))
2537 xhci_add_ep_to_interval_table(xhci,
2538 &virt_dev->eps[i].bw_info,
2539 virt_dev->bw_table,
2540 virt_dev->udev,
2541 &virt_dev->eps[i],
2542 virt_dev->tt_info);
2543 }
2544 return -ENOMEM;
2545}
2546
2547
2548/* Issue a configure endpoint command or evaluate context command
2549 * and wait for it to finish.
2550 */
2551static int xhci_configure_endpoint(struct xhci_hcd *xhci,
2552 struct usb_device *udev,
2553 struct xhci_command *command,
2554 bool ctx_change, bool must_succeed)
2555{
2556 int ret;
2557 unsigned long flags;
2558 struct xhci_input_control_ctx *ctrl_ctx;
2559 struct xhci_virt_device *virt_dev;
2560 struct xhci_slot_ctx *slot_ctx;
2561
2562 if (!command)
2563 return -EINVAL;
2564
2565 spin_lock_irqsave(&xhci->lock, flags);
2566
2567 if (xhci->xhc_state & XHCI_STATE_DYING) {
2568 spin_unlock_irqrestore(&xhci->lock, flags);
2569 return -ESHUTDOWN;
2570 }
2571
2572 virt_dev = xhci->devs[udev->slot_id];
2573
2574 ctrl_ctx = xhci_get_input_control_ctx(command->in_ctx);
2575 if (!ctrl_ctx) {
2576 spin_unlock_irqrestore(&xhci->lock, flags);
2577 xhci_warn(xhci, "%s: Could not get input context, bad type.\n",
2578 __func__);
2579 return -ENOMEM;
2580 }
2581
2582 if ((xhci->quirks & XHCI_EP_LIMIT_QUIRK) &&
2583 xhci_reserve_host_resources(xhci, ctrl_ctx)) {
2584 spin_unlock_irqrestore(&xhci->lock, flags);
2585 xhci_warn(xhci, "Not enough host resources, "
2586 "active endpoint contexts = %u\n",
2587 xhci->num_active_eps);
2588 return -ENOMEM;
2589 }
2590 if ((xhci->quirks & XHCI_SW_BW_CHECKING) &&
2591 xhci_reserve_bandwidth(xhci, virt_dev, command->in_ctx)) {
2592 if ((xhci->quirks & XHCI_EP_LIMIT_QUIRK))
2593 xhci_free_host_resources(xhci, ctrl_ctx);
2594 spin_unlock_irqrestore(&xhci->lock, flags);
2595 xhci_warn(xhci, "Not enough bandwidth\n");
2596 return -ENOMEM;
2597 }
2598
2599 slot_ctx = xhci_get_slot_ctx(xhci, command->in_ctx);
2600 trace_xhci_configure_endpoint(slot_ctx);
2601
2602 if (!ctx_change)
2603 ret = xhci_queue_configure_endpoint(xhci, command,
2604 command->in_ctx->dma,
2605 udev->slot_id, must_succeed);
2606 else
2607 ret = xhci_queue_evaluate_context(xhci, command,
2608 command->in_ctx->dma,
2609 udev->slot_id, must_succeed);
2610 if (ret < 0) {
2611 if ((xhci->quirks & XHCI_EP_LIMIT_QUIRK))
2612 xhci_free_host_resources(xhci, ctrl_ctx);
2613 spin_unlock_irqrestore(&xhci->lock, flags);
2614 xhci_dbg_trace(xhci, trace_xhci_dbg_context_change,
2615 "FIXME allocate a new ring segment");
2616 return -ENOMEM;
2617 }
2618 xhci_ring_cmd_db(xhci);
2619 spin_unlock_irqrestore(&xhci->lock, flags);
2620
2621 /* Wait for the configure endpoint command to complete */
2622 wait_for_completion(command->completion);
2623
2624 if (!ctx_change)
2625 ret = xhci_configure_endpoint_result(xhci, udev,
2626 &command->status);
2627 else
2628 ret = xhci_evaluate_context_result(xhci, udev,
2629 &command->status);
2630
2631 if ((xhci->quirks & XHCI_EP_LIMIT_QUIRK)) {
2632 spin_lock_irqsave(&xhci->lock, flags);
2633 /* If the command failed, remove the reserved resources.
2634 * Otherwise, clean up the estimate to include dropped eps.
2635 */
2636 if (ret)
2637 xhci_free_host_resources(xhci, ctrl_ctx);
2638 else
2639 xhci_finish_resource_reservation(xhci, ctrl_ctx);
2640 spin_unlock_irqrestore(&xhci->lock, flags);
2641 }
2642 return ret;
2643}
2644
2645static void xhci_check_bw_drop_ep_streams(struct xhci_hcd *xhci,
2646 struct xhci_virt_device *vdev, int i)
2647{
2648 struct xhci_virt_ep *ep = &vdev->eps[i];
2649
2650 if (ep->ep_state & EP_HAS_STREAMS) {
2651 xhci_warn(xhci, "WARN: endpoint 0x%02x has streams on set_interface, freeing streams.\n",
2652 xhci_get_endpoint_address(i));
2653 xhci_free_stream_info(xhci, ep->stream_info);
2654 ep->stream_info = NULL;
2655 ep->ep_state &= ~EP_HAS_STREAMS;
2656 }
2657}
2658
2659/* Called after one or more calls to xhci_add_endpoint() or
2660 * xhci_drop_endpoint(). If this call fails, the USB core is expected
2661 * to call xhci_reset_bandwidth().
2662 *
2663 * Since we are in the middle of changing either configuration or
2664 * installing a new alt setting, the USB core won't allow URBs to be
2665 * enqueued for any endpoint on the old config or interface. Nothing
2666 * else should be touching the xhci->devs[slot_id] structure, so we
2667 * don't need to take the xhci->lock for manipulating that.
2668 */
2669static int xhci_check_bandwidth(struct usb_hcd *hcd, struct usb_device *udev)
2670{
2671 int i;
2672 int ret = 0;
2673 struct xhci_hcd *xhci;
2674 struct xhci_virt_device *virt_dev;
2675 struct xhci_input_control_ctx *ctrl_ctx;
2676 struct xhci_slot_ctx *slot_ctx;
2677 struct xhci_command *command;
2678
2679 ret = xhci_check_args(hcd, udev, NULL, 0, true, __func__);
2680 if (ret <= 0)
2681 return ret;
2682 xhci = hcd_to_xhci(hcd);
2683 if ((xhci->xhc_state & XHCI_STATE_DYING) ||
2684 (xhci->xhc_state & XHCI_STATE_REMOVING))
2685 return -ENODEV;
2686
2687 xhci_dbg(xhci, "%s called for udev %p\n", __func__, udev);
2688 virt_dev = xhci->devs[udev->slot_id];
2689
2690 command = xhci_alloc_command(xhci, true, GFP_KERNEL);
2691 if (!command)
2692 return -ENOMEM;
2693
2694 command->in_ctx = virt_dev->in_ctx;
2695
2696 /* See section 4.6.6 - A0 = 1; A1 = D0 = D1 = 0 */
2697 ctrl_ctx = xhci_get_input_control_ctx(command->in_ctx);
2698 if (!ctrl_ctx) {
2699 xhci_warn(xhci, "%s: Could not get input context, bad type.\n",
2700 __func__);
2701 ret = -ENOMEM;
2702 goto command_cleanup;
2703 }
2704 ctrl_ctx->add_flags |= cpu_to_le32(SLOT_FLAG);
2705 ctrl_ctx->add_flags &= cpu_to_le32(~EP0_FLAG);
2706 ctrl_ctx->drop_flags &= cpu_to_le32(~(SLOT_FLAG | EP0_FLAG));
2707
2708 /* Don't issue the command if there's no endpoints to update. */
2709 if (ctrl_ctx->add_flags == cpu_to_le32(SLOT_FLAG) &&
2710 ctrl_ctx->drop_flags == 0) {
2711 ret = 0;
2712 goto command_cleanup;
2713 }
2714 /* Fix up Context Entries field. Minimum value is EP0 == BIT(1). */
2715 slot_ctx = xhci_get_slot_ctx(xhci, virt_dev->in_ctx);
2716 for (i = 31; i >= 1; i--) {
2717 __le32 le32 = cpu_to_le32(BIT(i));
2718
2719 if ((virt_dev->eps[i-1].ring && !(ctrl_ctx->drop_flags & le32))
2720 || (ctrl_ctx->add_flags & le32) || i == 1) {
2721 slot_ctx->dev_info &= cpu_to_le32(~LAST_CTX_MASK);
2722 slot_ctx->dev_info |= cpu_to_le32(LAST_CTX(i));
2723 break;
2724 }
2725 }
2726
2727 ret = xhci_configure_endpoint(xhci, udev, command,
2728 false, false);
2729 if (ret)
2730 /* Callee should call reset_bandwidth() */
2731 goto command_cleanup;
2732
2733 /* Free any rings that were dropped, but not changed. */
2734 for (i = 1; i < 31; i++) {
2735 if ((le32_to_cpu(ctrl_ctx->drop_flags) & (1 << (i + 1))) &&
2736 !(le32_to_cpu(ctrl_ctx->add_flags) & (1 << (i + 1)))) {
2737 xhci_free_endpoint_ring(xhci, virt_dev, i);
2738 xhci_check_bw_drop_ep_streams(xhci, virt_dev, i);
2739 }
2740 }
2741 xhci_zero_in_ctx(xhci, virt_dev);
2742 /*
2743 * Install any rings for completely new endpoints or changed endpoints,
2744 * and free any old rings from changed endpoints.
2745 */
2746 for (i = 1; i < 31; i++) {
2747 if (!virt_dev->eps[i].new_ring)
2748 continue;
2749 /* Only free the old ring if it exists.
2750 * It may not if this is the first add of an endpoint.
2751 */
2752 if (virt_dev->eps[i].ring) {
2753 xhci_free_endpoint_ring(xhci, virt_dev, i);
2754 }
2755 xhci_check_bw_drop_ep_streams(xhci, virt_dev, i);
2756 virt_dev->eps[i].ring = virt_dev->eps[i].new_ring;
2757 virt_dev->eps[i].new_ring = NULL;
2758 }
2759command_cleanup:
2760 kfree(command->completion);
2761 kfree(command);
2762
2763 return ret;
2764}
2765
2766static void xhci_reset_bandwidth(struct usb_hcd *hcd, struct usb_device *udev)
2767{
2768 struct xhci_hcd *xhci;
2769 struct xhci_virt_device *virt_dev;
2770 int i, ret;
2771
2772 ret = xhci_check_args(hcd, udev, NULL, 0, true, __func__);
2773 if (ret <= 0)
2774 return;
2775 xhci = hcd_to_xhci(hcd);
2776
2777 xhci_dbg(xhci, "%s called for udev %p\n", __func__, udev);
2778 virt_dev = xhci->devs[udev->slot_id];
2779 /* Free any rings allocated for added endpoints */
2780 for (i = 0; i < 31; i++) {
2781 if (virt_dev->eps[i].new_ring) {
2782 xhci_debugfs_remove_endpoint(xhci, virt_dev, i);
2783 xhci_ring_free(xhci, virt_dev->eps[i].new_ring);
2784 virt_dev->eps[i].new_ring = NULL;
2785 }
2786 }
2787 xhci_zero_in_ctx(xhci, virt_dev);
2788}
2789
2790static void xhci_setup_input_ctx_for_config_ep(struct xhci_hcd *xhci,
2791 struct xhci_container_ctx *in_ctx,
2792 struct xhci_container_ctx *out_ctx,
2793 struct xhci_input_control_ctx *ctrl_ctx,
2794 u32 add_flags, u32 drop_flags)
2795{
2796 ctrl_ctx->add_flags = cpu_to_le32(add_flags);
2797 ctrl_ctx->drop_flags = cpu_to_le32(drop_flags);
2798 xhci_slot_copy(xhci, in_ctx, out_ctx);
2799 ctrl_ctx->add_flags |= cpu_to_le32(SLOT_FLAG);
2800}
2801
2802static void xhci_setup_input_ctx_for_quirk(struct xhci_hcd *xhci,
2803 unsigned int slot_id, unsigned int ep_index,
2804 struct xhci_dequeue_state *deq_state)
2805{
2806 struct xhci_input_control_ctx *ctrl_ctx;
2807 struct xhci_container_ctx *in_ctx;
2808 struct xhci_ep_ctx *ep_ctx;
2809 u32 added_ctxs;
2810 dma_addr_t addr;
2811
2812 in_ctx = xhci->devs[slot_id]->in_ctx;
2813 ctrl_ctx = xhci_get_input_control_ctx(in_ctx);
2814 if (!ctrl_ctx) {
2815 xhci_warn(xhci, "%s: Could not get input context, bad type.\n",
2816 __func__);
2817 return;
2818 }
2819
2820 xhci_endpoint_copy(xhci, xhci->devs[slot_id]->in_ctx,
2821 xhci->devs[slot_id]->out_ctx, ep_index);
2822 ep_ctx = xhci_get_ep_ctx(xhci, in_ctx, ep_index);
2823 addr = xhci_trb_virt_to_dma(deq_state->new_deq_seg,
2824 deq_state->new_deq_ptr);
2825 if (addr == 0) {
2826 xhci_warn(xhci, "WARN Cannot submit config ep after "
2827 "reset ep command\n");
2828 xhci_warn(xhci, "WARN deq seg = %p, deq ptr = %p\n",
2829 deq_state->new_deq_seg,
2830 deq_state->new_deq_ptr);
2831 return;
2832 }
2833 ep_ctx->deq = cpu_to_le64(addr | deq_state->new_cycle_state);
2834
2835 added_ctxs = xhci_get_endpoint_flag_from_index(ep_index);
2836 xhci_setup_input_ctx_for_config_ep(xhci, xhci->devs[slot_id]->in_ctx,
2837 xhci->devs[slot_id]->out_ctx, ctrl_ctx,
2838 added_ctxs, added_ctxs);
2839}
2840
2841void xhci_cleanup_stalled_ring(struct xhci_hcd *xhci, unsigned int ep_index,
2842 unsigned int stream_id, struct xhci_td *td)
2843{
2844 struct xhci_dequeue_state deq_state;
2845 struct usb_device *udev = td->urb->dev;
2846
2847 xhci_dbg_trace(xhci, trace_xhci_dbg_reset_ep,
2848 "Cleaning up stalled endpoint ring");
2849 /* We need to move the HW's dequeue pointer past this TD,
2850 * or it will attempt to resend it on the next doorbell ring.
2851 */
2852 xhci_find_new_dequeue_state(xhci, udev->slot_id,
2853 ep_index, stream_id, td, &deq_state);
2854
2855 if (!deq_state.new_deq_ptr || !deq_state.new_deq_seg)
2856 return;
2857
2858 /* HW with the reset endpoint quirk will use the saved dequeue state to
2859 * issue a configure endpoint command later.
2860 */
2861 if (!(xhci->quirks & XHCI_RESET_EP_QUIRK)) {
2862 xhci_dbg_trace(xhci, trace_xhci_dbg_reset_ep,
2863 "Queueing new dequeue state");
2864 xhci_queue_new_dequeue_state(xhci, udev->slot_id,
2865 ep_index, &deq_state);
2866 } else {
2867 /* Better hope no one uses the input context between now and the
2868 * reset endpoint completion!
2869 * XXX: No idea how this hardware will react when stream rings
2870 * are enabled.
2871 */
2872 xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
2873 "Setting up input context for "
2874 "configure endpoint command");
2875 xhci_setup_input_ctx_for_quirk(xhci, udev->slot_id,
2876 ep_index, &deq_state);
2877 }
2878}
2879
2880/*
2881 * Called after usb core issues a clear halt control message.
2882 * The host side of the halt should already be cleared by a reset endpoint
2883 * command issued when the STALL event was received.
2884 *
2885 * The reset endpoint command may only be issued to endpoints in the halted
2886 * state. For software that wishes to reset the data toggle or sequence number
2887 * of an endpoint that isn't in the halted state this function will issue a
2888 * configure endpoint command with the Drop and Add bits set for the target
2889 * endpoint. Refer to the additional note in xhci spcification section 4.6.8.
2890 */
2891
2892static void xhci_endpoint_reset(struct usb_hcd *hcd,
2893 struct usb_host_endpoint *host_ep)
2894{
2895 struct xhci_hcd *xhci;
2896 struct usb_device *udev;
2897 struct xhci_virt_device *vdev;
2898 struct xhci_virt_ep *ep;
2899 struct xhci_input_control_ctx *ctrl_ctx;
2900 struct xhci_command *stop_cmd, *cfg_cmd;
2901 unsigned int ep_index;
2902 unsigned long flags;
2903 u32 ep_flag;
2904
2905 xhci = hcd_to_xhci(hcd);
2906 if (!host_ep->hcpriv)
2907 return;
2908 udev = (struct usb_device *) host_ep->hcpriv;
2909 vdev = xhci->devs[udev->slot_id];
2910 ep_index = xhci_get_endpoint_index(&host_ep->desc);
2911 ep = &vdev->eps[ep_index];
2912
2913 /* Bail out if toggle is already being cleared by a endpoint reset */
2914 if (ep->ep_state & EP_HARD_CLEAR_TOGGLE) {
2915 ep->ep_state &= ~EP_HARD_CLEAR_TOGGLE;
2916 return;
2917 }
2918 /* Only interrupt and bulk ep's use data toggle, USB2 spec 5.5.4-> */
2919 if (usb_endpoint_xfer_control(&host_ep->desc) ||
2920 usb_endpoint_xfer_isoc(&host_ep->desc))
2921 return;
2922
2923 ep_flag = xhci_get_endpoint_flag(&host_ep->desc);
2924
2925 if (ep_flag == SLOT_FLAG || ep_flag == EP0_FLAG)
2926 return;
2927
2928 stop_cmd = xhci_alloc_command(xhci, true, GFP_NOWAIT);
2929 if (!stop_cmd)
2930 return;
2931
2932 cfg_cmd = xhci_alloc_command_with_ctx(xhci, true, GFP_NOWAIT);
2933 if (!cfg_cmd)
2934 goto cleanup;
2935
2936 spin_lock_irqsave(&xhci->lock, flags);
2937
2938 /* block queuing new trbs and ringing ep doorbell */
2939 ep->ep_state |= EP_SOFT_CLEAR_TOGGLE;
2940
2941 /*
2942 * Make sure endpoint ring is empty before resetting the toggle/seq.
2943 * Driver is required to synchronously cancel all transfer request.
2944 * Stop the endpoint to force xHC to update the output context
2945 */
2946
2947 if (!list_empty(&ep->ring->td_list)) {
2948 dev_err(&udev->dev, "EP not empty, refuse reset\n");
2949 spin_unlock_irqrestore(&xhci->lock, flags);
2950 goto cleanup;
2951 }
2952 xhci_queue_stop_endpoint(xhci, stop_cmd, udev->slot_id, ep_index, 0);
2953 xhci_ring_cmd_db(xhci);
2954 spin_unlock_irqrestore(&xhci->lock, flags);
2955
2956 wait_for_completion(stop_cmd->completion);
2957
2958 spin_lock_irqsave(&xhci->lock, flags);
2959
2960 /* config ep command clears toggle if add and drop ep flags are set */
2961 ctrl_ctx = xhci_get_input_control_ctx(cfg_cmd->in_ctx);
2962 xhci_setup_input_ctx_for_config_ep(xhci, cfg_cmd->in_ctx, vdev->out_ctx,
2963 ctrl_ctx, ep_flag, ep_flag);
2964 xhci_endpoint_copy(xhci, cfg_cmd->in_ctx, vdev->out_ctx, ep_index);
2965
2966 xhci_queue_configure_endpoint(xhci, cfg_cmd, cfg_cmd->in_ctx->dma,
2967 udev->slot_id, false);
2968 xhci_ring_cmd_db(xhci);
2969 spin_unlock_irqrestore(&xhci->lock, flags);
2970
2971 wait_for_completion(cfg_cmd->completion);
2972
2973 ep->ep_state &= ~EP_SOFT_CLEAR_TOGGLE;
2974 xhci_free_command(xhci, cfg_cmd);
2975cleanup:
2976 xhci_free_command(xhci, stop_cmd);
2977}
2978
2979static int xhci_check_streams_endpoint(struct xhci_hcd *xhci,
2980 struct usb_device *udev, struct usb_host_endpoint *ep,
2981 unsigned int slot_id)
2982{
2983 int ret;
2984 unsigned int ep_index;
2985 unsigned int ep_state;
2986
2987 if (!ep)
2988 return -EINVAL;
2989 ret = xhci_check_args(xhci_to_hcd(xhci), udev, ep, 1, true, __func__);
2990 if (ret <= 0)
2991 return -EINVAL;
2992 if (usb_ss_max_streams(&ep->ss_ep_comp) == 0) {
2993 xhci_warn(xhci, "WARN: SuperSpeed Endpoint Companion"
2994 " descriptor for ep 0x%x does not support streams\n",
2995 ep->desc.bEndpointAddress);
2996 return -EINVAL;
2997 }
2998
2999 ep_index = xhci_get_endpoint_index(&ep->desc);
3000 ep_state = xhci->devs[slot_id]->eps[ep_index].ep_state;
3001 if (ep_state & EP_HAS_STREAMS ||
3002 ep_state & EP_GETTING_STREAMS) {
3003 xhci_warn(xhci, "WARN: SuperSpeed bulk endpoint 0x%x "
3004 "already has streams set up.\n",
3005 ep->desc.bEndpointAddress);
3006 xhci_warn(xhci, "Send email to xHCI maintainer and ask for "
3007 "dynamic stream context array reallocation.\n");
3008 return -EINVAL;
3009 }
3010 if (!list_empty(&xhci->devs[slot_id]->eps[ep_index].ring->td_list)) {
3011 xhci_warn(xhci, "Cannot setup streams for SuperSpeed bulk "
3012 "endpoint 0x%x; URBs are pending.\n",
3013 ep->desc.bEndpointAddress);
3014 return -EINVAL;
3015 }
3016 return 0;
3017}
3018
3019static void xhci_calculate_streams_entries(struct xhci_hcd *xhci,
3020 unsigned int *num_streams, unsigned int *num_stream_ctxs)
3021{
3022 unsigned int max_streams;
3023
3024 /* The stream context array size must be a power of two */
3025 *num_stream_ctxs = roundup_pow_of_two(*num_streams);
3026 /*
3027 * Find out how many primary stream array entries the host controller
3028 * supports. Later we may use secondary stream arrays (similar to 2nd
3029 * level page entries), but that's an optional feature for xHCI host
3030 * controllers. xHCs must support at least 4 stream IDs.
3031 */
3032 max_streams = HCC_MAX_PSA(xhci->hcc_params);
3033 if (*num_stream_ctxs > max_streams) {
3034 xhci_dbg(xhci, "xHCI HW only supports %u stream ctx entries.\n",
3035 max_streams);
3036 *num_stream_ctxs = max_streams;
3037 *num_streams = max_streams;
3038 }
3039}
3040
3041/* Returns an error code if one of the endpoint already has streams.
3042 * This does not change any data structures, it only checks and gathers
3043 * information.
3044 */
3045static int xhci_calculate_streams_and_bitmask(struct xhci_hcd *xhci,
3046 struct usb_device *udev,
3047 struct usb_host_endpoint **eps, unsigned int num_eps,
3048 unsigned int *num_streams, u32 *changed_ep_bitmask)
3049{
3050 unsigned int max_streams;
3051 unsigned int endpoint_flag;
3052 int i;
3053 int ret;
3054
3055 for (i = 0; i < num_eps; i++) {
3056 ret = xhci_check_streams_endpoint(xhci, udev,
3057 eps[i], udev->slot_id);
3058 if (ret < 0)
3059 return ret;
3060
3061 max_streams = usb_ss_max_streams(&eps[i]->ss_ep_comp);
3062 if (max_streams < (*num_streams - 1)) {
3063 xhci_dbg(xhci, "Ep 0x%x only supports %u stream IDs.\n",
3064 eps[i]->desc.bEndpointAddress,
3065 max_streams);
3066 *num_streams = max_streams+1;
3067 }
3068
3069 endpoint_flag = xhci_get_endpoint_flag(&eps[i]->desc);
3070 if (*changed_ep_bitmask & endpoint_flag)
3071 return -EINVAL;
3072 *changed_ep_bitmask |= endpoint_flag;
3073 }
3074 return 0;
3075}
3076
3077static u32 xhci_calculate_no_streams_bitmask(struct xhci_hcd *xhci,
3078 struct usb_device *udev,
3079 struct usb_host_endpoint **eps, unsigned int num_eps)
3080{
3081 u32 changed_ep_bitmask = 0;
3082 unsigned int slot_id;
3083 unsigned int ep_index;
3084 unsigned int ep_state;
3085 int i;
3086
3087 slot_id = udev->slot_id;
3088 if (!xhci->devs[slot_id])
3089 return 0;
3090
3091 for (i = 0; i < num_eps; i++) {
3092 ep_index = xhci_get_endpoint_index(&eps[i]->desc);
3093 ep_state = xhci->devs[slot_id]->eps[ep_index].ep_state;
3094 /* Are streams already being freed for the endpoint? */
3095 if (ep_state & EP_GETTING_NO_STREAMS) {
3096 xhci_warn(xhci, "WARN Can't disable streams for "
3097 "endpoint 0x%x, "
3098 "streams are being disabled already\n",
3099 eps[i]->desc.bEndpointAddress);
3100 return 0;
3101 }
3102 /* Are there actually any streams to free? */
3103 if (!(ep_state & EP_HAS_STREAMS) &&
3104 !(ep_state & EP_GETTING_STREAMS)) {
3105 xhci_warn(xhci, "WARN Can't disable streams for "
3106 "endpoint 0x%x, "
3107 "streams are already disabled!\n",
3108 eps[i]->desc.bEndpointAddress);
3109 xhci_warn(xhci, "WARN xhci_free_streams() called "
3110 "with non-streams endpoint\n");
3111 return 0;
3112 }
3113 changed_ep_bitmask |= xhci_get_endpoint_flag(&eps[i]->desc);
3114 }
3115 return changed_ep_bitmask;
3116}
3117
3118/*
3119 * The USB device drivers use this function (through the HCD interface in USB
3120 * core) to prepare a set of bulk endpoints to use streams. Streams are used to
3121 * coordinate mass storage command queueing across multiple endpoints (basically
3122 * a stream ID == a task ID).
3123 *
3124 * Setting up streams involves allocating the same size stream context array
3125 * for each endpoint and issuing a configure endpoint command for all endpoints.
3126 *
3127 * Don't allow the call to succeed if one endpoint only supports one stream
3128 * (which means it doesn't support streams at all).
3129 *
3130 * Drivers may get less stream IDs than they asked for, if the host controller
3131 * hardware or endpoints claim they can't support the number of requested
3132 * stream IDs.
3133 */
3134static int xhci_alloc_streams(struct usb_hcd *hcd, struct usb_device *udev,
3135 struct usb_host_endpoint **eps, unsigned int num_eps,
3136 unsigned int num_streams, gfp_t mem_flags)
3137{
3138 int i, ret;
3139 struct xhci_hcd *xhci;
3140 struct xhci_virt_device *vdev;
3141 struct xhci_command *config_cmd;
3142 struct xhci_input_control_ctx *ctrl_ctx;
3143 unsigned int ep_index;
3144 unsigned int num_stream_ctxs;
3145 unsigned int max_packet;
3146 unsigned long flags;
3147 u32 changed_ep_bitmask = 0;
3148
3149 if (!eps)
3150 return -EINVAL;
3151
3152 /* Add one to the number of streams requested to account for
3153 * stream 0 that is reserved for xHCI usage.
3154 */
3155 num_streams += 1;
3156 xhci = hcd_to_xhci(hcd);
3157 xhci_dbg(xhci, "Driver wants %u stream IDs (including stream 0).\n",
3158 num_streams);
3159
3160 /* MaxPSASize value 0 (2 streams) means streams are not supported */
3161 if ((xhci->quirks & XHCI_BROKEN_STREAMS) ||
3162 HCC_MAX_PSA(xhci->hcc_params) < 4) {
3163 xhci_dbg(xhci, "xHCI controller does not support streams.\n");
3164 return -ENOSYS;
3165 }
3166
3167 config_cmd = xhci_alloc_command_with_ctx(xhci, true, mem_flags);
3168 if (!config_cmd)
3169 return -ENOMEM;
3170
3171 ctrl_ctx = xhci_get_input_control_ctx(config_cmd->in_ctx);
3172 if (!ctrl_ctx) {
3173 xhci_warn(xhci, "%s: Could not get input context, bad type.\n",
3174 __func__);
3175 xhci_free_command(xhci, config_cmd);
3176 return -ENOMEM;
3177 }
3178
3179 /* Check to make sure all endpoints are not already configured for
3180 * streams. While we're at it, find the maximum number of streams that
3181 * all the endpoints will support and check for duplicate endpoints.
3182 */
3183 spin_lock_irqsave(&xhci->lock, flags);
3184 ret = xhci_calculate_streams_and_bitmask(xhci, udev, eps,
3185 num_eps, &num_streams, &changed_ep_bitmask);
3186 if (ret < 0) {
3187 xhci_free_command(xhci, config_cmd);
3188 spin_unlock_irqrestore(&xhci->lock, flags);
3189 return ret;
3190 }
3191 if (num_streams <= 1) {
3192 xhci_warn(xhci, "WARN: endpoints can't handle "
3193 "more than one stream.\n");
3194 xhci_free_command(xhci, config_cmd);
3195 spin_unlock_irqrestore(&xhci->lock, flags);
3196 return -EINVAL;
3197 }
3198 vdev = xhci->devs[udev->slot_id];
3199 /* Mark each endpoint as being in transition, so
3200 * xhci_urb_enqueue() will reject all URBs.
3201 */
3202 for (i = 0; i < num_eps; i++) {
3203 ep_index = xhci_get_endpoint_index(&eps[i]->desc);
3204 vdev->eps[ep_index].ep_state |= EP_GETTING_STREAMS;
3205 }
3206 spin_unlock_irqrestore(&xhci->lock, flags);
3207
3208 /* Setup internal data structures and allocate HW data structures for
3209 * streams (but don't install the HW structures in the input context
3210 * until we're sure all memory allocation succeeded).
3211 */
3212 xhci_calculate_streams_entries(xhci, &num_streams, &num_stream_ctxs);
3213 xhci_dbg(xhci, "Need %u stream ctx entries for %u stream IDs.\n",
3214 num_stream_ctxs, num_streams);
3215
3216 for (i = 0; i < num_eps; i++) {
3217 ep_index = xhci_get_endpoint_index(&eps[i]->desc);
3218 max_packet = usb_endpoint_maxp(&eps[i]->desc);
3219 vdev->eps[ep_index].stream_info = xhci_alloc_stream_info(xhci,
3220 num_stream_ctxs,
3221 num_streams,
3222 max_packet, mem_flags);
3223 if (!vdev->eps[ep_index].stream_info)
3224 goto cleanup;
3225 /* Set maxPstreams in endpoint context and update deq ptr to
3226 * point to stream context array. FIXME
3227 */
3228 }
3229
3230 /* Set up the input context for a configure endpoint command. */
3231 for (i = 0; i < num_eps; i++) {
3232 struct xhci_ep_ctx *ep_ctx;
3233
3234 ep_index = xhci_get_endpoint_index(&eps[i]->desc);
3235 ep_ctx = xhci_get_ep_ctx(xhci, config_cmd->in_ctx, ep_index);
3236
3237 xhci_endpoint_copy(xhci, config_cmd->in_ctx,
3238 vdev->out_ctx, ep_index);
3239 xhci_setup_streams_ep_input_ctx(xhci, ep_ctx,
3240 vdev->eps[ep_index].stream_info);
3241 }
3242 /* Tell the HW to drop its old copy of the endpoint context info
3243 * and add the updated copy from the input context.
3244 */
3245 xhci_setup_input_ctx_for_config_ep(xhci, config_cmd->in_ctx,
3246 vdev->out_ctx, ctrl_ctx,
3247 changed_ep_bitmask, changed_ep_bitmask);
3248
3249 /* Issue and wait for the configure endpoint command */
3250 ret = xhci_configure_endpoint(xhci, udev, config_cmd,
3251 false, false);
3252
3253 /* xHC rejected the configure endpoint command for some reason, so we
3254 * leave the old ring intact and free our internal streams data
3255 * structure.
3256 */
3257 if (ret < 0)
3258 goto cleanup;
3259
3260 spin_lock_irqsave(&xhci->lock, flags);
3261 for (i = 0; i < num_eps; i++) {
3262 ep_index = xhci_get_endpoint_index(&eps[i]->desc);
3263 vdev->eps[ep_index].ep_state &= ~EP_GETTING_STREAMS;
3264 xhci_dbg(xhci, "Slot %u ep ctx %u now has streams.\n",
3265 udev->slot_id, ep_index);
3266 vdev->eps[ep_index].ep_state |= EP_HAS_STREAMS;
3267 }
3268 xhci_free_command(xhci, config_cmd);
3269 spin_unlock_irqrestore(&xhci->lock, flags);
3270
3271 /* Subtract 1 for stream 0, which drivers can't use */
3272 return num_streams - 1;
3273
3274cleanup:
3275 /* If it didn't work, free the streams! */
3276 for (i = 0; i < num_eps; i++) {
3277 ep_index = xhci_get_endpoint_index(&eps[i]->desc);
3278 xhci_free_stream_info(xhci, vdev->eps[ep_index].stream_info);
3279 vdev->eps[ep_index].stream_info = NULL;
3280 /* FIXME Unset maxPstreams in endpoint context and
3281 * update deq ptr to point to normal string ring.
3282 */
3283 vdev->eps[ep_index].ep_state &= ~EP_GETTING_STREAMS;
3284 vdev->eps[ep_index].ep_state &= ~EP_HAS_STREAMS;
3285 xhci_endpoint_zero(xhci, vdev, eps[i]);
3286 }
3287 xhci_free_command(xhci, config_cmd);
3288 return -ENOMEM;
3289}
3290
3291/* Transition the endpoint from using streams to being a "normal" endpoint
3292 * without streams.
3293 *
3294 * Modify the endpoint context state, submit a configure endpoint command,
3295 * and free all endpoint rings for streams if that completes successfully.
3296 */
3297static int xhci_free_streams(struct usb_hcd *hcd, struct usb_device *udev,
3298 struct usb_host_endpoint **eps, unsigned int num_eps,
3299 gfp_t mem_flags)
3300{
3301 int i, ret;
3302 struct xhci_hcd *xhci;
3303 struct xhci_virt_device *vdev;
3304 struct xhci_command *command;
3305 struct xhci_input_control_ctx *ctrl_ctx;
3306 unsigned int ep_index;
3307 unsigned long flags;
3308 u32 changed_ep_bitmask;
3309
3310 xhci = hcd_to_xhci(hcd);
3311 vdev = xhci->devs[udev->slot_id];
3312
3313 /* Set up a configure endpoint command to remove the streams rings */
3314 spin_lock_irqsave(&xhci->lock, flags);
3315 changed_ep_bitmask = xhci_calculate_no_streams_bitmask(xhci,
3316 udev, eps, num_eps);
3317 if (changed_ep_bitmask == 0) {
3318 spin_unlock_irqrestore(&xhci->lock, flags);
3319 return -EINVAL;
3320 }
3321
3322 /* Use the xhci_command structure from the first endpoint. We may have
3323 * allocated too many, but the driver may call xhci_free_streams() for
3324 * each endpoint it grouped into one call to xhci_alloc_streams().
3325 */
3326 ep_index = xhci_get_endpoint_index(&eps[0]->desc);
3327 command = vdev->eps[ep_index].stream_info->free_streams_command;
3328 ctrl_ctx = xhci_get_input_control_ctx(command->in_ctx);
3329 if (!ctrl_ctx) {
3330 spin_unlock_irqrestore(&xhci->lock, flags);
3331 xhci_warn(xhci, "%s: Could not get input context, bad type.\n",
3332 __func__);
3333 return -EINVAL;
3334 }
3335
3336 for (i = 0; i < num_eps; i++) {
3337 struct xhci_ep_ctx *ep_ctx;
3338
3339 ep_index = xhci_get_endpoint_index(&eps[i]->desc);
3340 ep_ctx = xhci_get_ep_ctx(xhci, command->in_ctx, ep_index);
3341 xhci->devs[udev->slot_id]->eps[ep_index].ep_state |=
3342 EP_GETTING_NO_STREAMS;
3343
3344 xhci_endpoint_copy(xhci, command->in_ctx,
3345 vdev->out_ctx, ep_index);
3346 xhci_setup_no_streams_ep_input_ctx(ep_ctx,
3347 &vdev->eps[ep_index]);
3348 }
3349 xhci_setup_input_ctx_for_config_ep(xhci, command->in_ctx,
3350 vdev->out_ctx, ctrl_ctx,
3351 changed_ep_bitmask, changed_ep_bitmask);
3352 spin_unlock_irqrestore(&xhci->lock, flags);
3353
3354 /* Issue and wait for the configure endpoint command,
3355 * which must succeed.
3356 */
3357 ret = xhci_configure_endpoint(xhci, udev, command,
3358 false, true);
3359
3360 /* xHC rejected the configure endpoint command for some reason, so we
3361 * leave the streams rings intact.
3362 */
3363 if (ret < 0)
3364 return ret;
3365
3366 spin_lock_irqsave(&xhci->lock, flags);
3367 for (i = 0; i < num_eps; i++) {
3368 ep_index = xhci_get_endpoint_index(&eps[i]->desc);
3369 xhci_free_stream_info(xhci, vdev->eps[ep_index].stream_info);
3370 vdev->eps[ep_index].stream_info = NULL;
3371 /* FIXME Unset maxPstreams in endpoint context and
3372 * update deq ptr to point to normal string ring.
3373 */
3374 vdev->eps[ep_index].ep_state &= ~EP_GETTING_NO_STREAMS;
3375 vdev->eps[ep_index].ep_state &= ~EP_HAS_STREAMS;
3376 }
3377 spin_unlock_irqrestore(&xhci->lock, flags);
3378
3379 return 0;
3380}
3381
3382/*
3383 * Deletes endpoint resources for endpoints that were active before a Reset
3384 * Device command, or a Disable Slot command. The Reset Device command leaves
3385 * the control endpoint intact, whereas the Disable Slot command deletes it.
3386 *
3387 * Must be called with xhci->lock held.
3388 */
3389void xhci_free_device_endpoint_resources(struct xhci_hcd *xhci,
3390 struct xhci_virt_device *virt_dev, bool drop_control_ep)
3391{
3392 int i;
3393 unsigned int num_dropped_eps = 0;
3394 unsigned int drop_flags = 0;
3395
3396 for (i = (drop_control_ep ? 0 : 1); i < 31; i++) {
3397 if (virt_dev->eps[i].ring) {
3398 drop_flags |= 1 << i;
3399 num_dropped_eps++;
3400 }
3401 }
3402 xhci->num_active_eps -= num_dropped_eps;
3403 if (num_dropped_eps)
3404 xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
3405 "Dropped %u ep ctxs, flags = 0x%x, "
3406 "%u now active.",
3407 num_dropped_eps, drop_flags,
3408 xhci->num_active_eps);
3409}
3410
3411/*
3412 * This submits a Reset Device Command, which will set the device state to 0,
3413 * set the device address to 0, and disable all the endpoints except the default
3414 * control endpoint. The USB core should come back and call
3415 * xhci_address_device(), and then re-set up the configuration. If this is
3416 * called because of a usb_reset_and_verify_device(), then the old alternate
3417 * settings will be re-installed through the normal bandwidth allocation
3418 * functions.
3419 *
3420 * Wait for the Reset Device command to finish. Remove all structures
3421 * associated with the endpoints that were disabled. Clear the input device
3422 * structure? Reset the control endpoint 0 max packet size?
3423 *
3424 * If the virt_dev to be reset does not exist or does not match the udev,
3425 * it means the device is lost, possibly due to the xHC restore error and
3426 * re-initialization during S3/S4. In this case, call xhci_alloc_dev() to
3427 * re-allocate the device.
3428 */
3429static int xhci_discover_or_reset_device(struct usb_hcd *hcd,
3430 struct usb_device *udev)
3431{
3432 int ret, i;
3433 unsigned long flags;
3434 struct xhci_hcd *xhci;
3435 unsigned int slot_id;
3436 struct xhci_virt_device *virt_dev;
3437 struct xhci_command *reset_device_cmd;
3438 struct xhci_slot_ctx *slot_ctx;
3439 int old_active_eps = 0;
3440
3441 ret = xhci_check_args(hcd, udev, NULL, 0, false, __func__);
3442 if (ret <= 0)
3443 return ret;
3444 xhci = hcd_to_xhci(hcd);
3445 slot_id = udev->slot_id;
3446 virt_dev = xhci->devs[slot_id];
3447 if (!virt_dev) {
3448 xhci_dbg(xhci, "The device to be reset with slot ID %u does "
3449 "not exist. Re-allocate the device\n", slot_id);
3450 ret = xhci_alloc_dev(hcd, udev);
3451 if (ret == 1)
3452 return 0;
3453 else
3454 return -EINVAL;
3455 }
3456
3457 if (virt_dev->tt_info)
3458 old_active_eps = virt_dev->tt_info->active_eps;
3459
3460 if (virt_dev->udev != udev) {
3461 /* If the virt_dev and the udev does not match, this virt_dev
3462 * may belong to another udev.
3463 * Re-allocate the device.
3464 */
3465 xhci_dbg(xhci, "The device to be reset with slot ID %u does "
3466 "not match the udev. Re-allocate the device\n",
3467 slot_id);
3468 ret = xhci_alloc_dev(hcd, udev);
3469 if (ret == 1)
3470 return 0;
3471 else
3472 return -EINVAL;
3473 }
3474
3475 /* If device is not setup, there is no point in resetting it */
3476 slot_ctx = xhci_get_slot_ctx(xhci, virt_dev->out_ctx);
3477 if (GET_SLOT_STATE(le32_to_cpu(slot_ctx->dev_state)) ==
3478 SLOT_STATE_DISABLED)
3479 return 0;
3480
3481 trace_xhci_discover_or_reset_device(slot_ctx);
3482
3483 xhci_dbg(xhci, "Resetting device with slot ID %u\n", slot_id);
3484 /* Allocate the command structure that holds the struct completion.
3485 * Assume we're in process context, since the normal device reset
3486 * process has to wait for the device anyway. Storage devices are
3487 * reset as part of error handling, so use GFP_NOIO instead of
3488 * GFP_KERNEL.
3489 */
3490 reset_device_cmd = xhci_alloc_command(xhci, true, GFP_NOIO);
3491 if (!reset_device_cmd) {
3492 xhci_dbg(xhci, "Couldn't allocate command structure.\n");
3493 return -ENOMEM;
3494 }
3495
3496 /* Attempt to submit the Reset Device command to the command ring */
3497 spin_lock_irqsave(&xhci->lock, flags);
3498
3499 ret = xhci_queue_reset_device(xhci, reset_device_cmd, slot_id);
3500 if (ret) {
3501 xhci_dbg(xhci, "FIXME: allocate a command ring segment\n");
3502 spin_unlock_irqrestore(&xhci->lock, flags);
3503 goto command_cleanup;
3504 }
3505 xhci_ring_cmd_db(xhci);
3506 spin_unlock_irqrestore(&xhci->lock, flags);
3507
3508 /* Wait for the Reset Device command to finish */
3509 wait_for_completion(reset_device_cmd->completion);
3510
3511 /* The Reset Device command can't fail, according to the 0.95/0.96 spec,
3512 * unless we tried to reset a slot ID that wasn't enabled,
3513 * or the device wasn't in the addressed or configured state.
3514 */
3515 ret = reset_device_cmd->status;
3516 switch (ret) {
3517 case COMP_COMMAND_ABORTED:
3518 case COMP_COMMAND_RING_STOPPED:
3519 xhci_warn(xhci, "Timeout waiting for reset device command\n");
3520 ret = -ETIME;
3521 goto command_cleanup;
3522 case COMP_SLOT_NOT_ENABLED_ERROR: /* 0.95 completion for bad slot ID */
3523 case COMP_CONTEXT_STATE_ERROR: /* 0.96 completion code for same thing */
3524 xhci_dbg(xhci, "Can't reset device (slot ID %u) in %s state\n",
3525 slot_id,
3526 xhci_get_slot_state(xhci, virt_dev->out_ctx));
3527 xhci_dbg(xhci, "Not freeing device rings.\n");
3528 /* Don't treat this as an error. May change my mind later. */
3529 ret = 0;
3530 goto command_cleanup;
3531 case COMP_SUCCESS:
3532 xhci_dbg(xhci, "Successful reset device command.\n");
3533 break;
3534 default:
3535 if (xhci_is_vendor_info_code(xhci, ret))
3536 break;
3537 xhci_warn(xhci, "Unknown completion code %u for "
3538 "reset device command.\n", ret);
3539 ret = -EINVAL;
3540 goto command_cleanup;
3541 }
3542
3543 /* Free up host controller endpoint resources */
3544 if ((xhci->quirks & XHCI_EP_LIMIT_QUIRK)) {
3545 spin_lock_irqsave(&xhci->lock, flags);
3546 /* Don't delete the default control endpoint resources */
3547 xhci_free_device_endpoint_resources(xhci, virt_dev, false);
3548 spin_unlock_irqrestore(&xhci->lock, flags);
3549 }
3550
3551 /* Everything but endpoint 0 is disabled, so free the rings. */
3552 for (i = 1; i < 31; i++) {
3553 struct xhci_virt_ep *ep = &virt_dev->eps[i];
3554
3555 if (ep->ep_state & EP_HAS_STREAMS) {
3556 xhci_warn(xhci, "WARN: endpoint 0x%02x has streams on device reset, freeing streams.\n",
3557 xhci_get_endpoint_address(i));
3558 xhci_free_stream_info(xhci, ep->stream_info);
3559 ep->stream_info = NULL;
3560 ep->ep_state &= ~EP_HAS_STREAMS;
3561 }
3562
3563 if (ep->ring) {
3564 xhci_debugfs_remove_endpoint(xhci, virt_dev, i);
3565 xhci_free_endpoint_ring(xhci, virt_dev, i);
3566 }
3567 if (!list_empty(&virt_dev->eps[i].bw_endpoint_list))
3568 xhci_drop_ep_from_interval_table(xhci,
3569 &virt_dev->eps[i].bw_info,
3570 virt_dev->bw_table,
3571 udev,
3572 &virt_dev->eps[i],
3573 virt_dev->tt_info);
3574 xhci_clear_endpoint_bw_info(&virt_dev->eps[i].bw_info);
3575 }
3576 /* If necessary, update the number of active TTs on this root port */
3577 xhci_update_tt_active_eps(xhci, virt_dev, old_active_eps);
3578 ret = 0;
3579
3580command_cleanup:
3581 xhci_free_command(xhci, reset_device_cmd);
3582 return ret;
3583}
3584
3585/*
3586 * At this point, the struct usb_device is about to go away, the device has
3587 * disconnected, and all traffic has been stopped and the endpoints have been
3588 * disabled. Free any HC data structures associated with that device.
3589 */
3590static void xhci_free_dev(struct usb_hcd *hcd, struct usb_device *udev)
3591{
3592 struct xhci_hcd *xhci = hcd_to_xhci(hcd);
3593 struct xhci_virt_device *virt_dev;
3594 struct xhci_slot_ctx *slot_ctx;
3595 int i, ret;
3596
3597#ifndef CONFIG_USB_DEFAULT_PERSIST
3598 /*
3599 * We called pm_runtime_get_noresume when the device was attached.
3600 * Decrement the counter here to allow controller to runtime suspend
3601 * if no devices remain.
3602 */
3603 if (xhci->quirks & XHCI_RESET_ON_RESUME)
3604 pm_runtime_put_noidle(hcd->self.controller);
3605#endif
3606
3607 ret = xhci_check_args(hcd, udev, NULL, 0, true, __func__);
3608 /* If the host is halted due to driver unload, we still need to free the
3609 * device.
3610 */
3611 if (ret <= 0 && ret != -ENODEV)
3612 return;
3613
3614 virt_dev = xhci->devs[udev->slot_id];
3615 slot_ctx = xhci_get_slot_ctx(xhci, virt_dev->out_ctx);
3616 trace_xhci_free_dev(slot_ctx);
3617
3618 /* Stop any wayward timer functions (which may grab the lock) */
3619 for (i = 0; i < 31; i++) {
3620 virt_dev->eps[i].ep_state &= ~EP_STOP_CMD_PENDING;
3621 del_timer_sync(&virt_dev->eps[i].stop_cmd_timer);
3622 }
3623 xhci_debugfs_remove_slot(xhci, udev->slot_id);
3624 virt_dev->udev = NULL;
3625 ret = xhci_disable_slot(xhci, udev->slot_id);
3626 if (ret)
3627 xhci_free_virt_device(xhci, udev->slot_id);
3628}
3629
3630int xhci_disable_slot(struct xhci_hcd *xhci, u32 slot_id)
3631{
3632 struct xhci_command *command;
3633 unsigned long flags;
3634 u32 state;
3635 int ret = 0;
3636
3637 command = xhci_alloc_command(xhci, false, GFP_KERNEL);
3638 if (!command)
3639 return -ENOMEM;
3640
3641 spin_lock_irqsave(&xhci->lock, flags);
3642 /* Don't disable the slot if the host controller is dead. */
3643 state = readl(&xhci->op_regs->status);
3644 if (state == 0xffffffff || (xhci->xhc_state & XHCI_STATE_DYING) ||
3645 (xhci->xhc_state & XHCI_STATE_HALTED)) {
3646 spin_unlock_irqrestore(&xhci->lock, flags);
3647 kfree(command);
3648 return -ENODEV;
3649 }
3650
3651 ret = xhci_queue_slot_control(xhci, command, TRB_DISABLE_SLOT,
3652 slot_id);
3653 if (ret) {
3654 spin_unlock_irqrestore(&xhci->lock, flags);
3655 kfree(command);
3656 return ret;
3657 }
3658 xhci_ring_cmd_db(xhci);
3659 spin_unlock_irqrestore(&xhci->lock, flags);
3660 return ret;
3661}
3662
3663/*
3664 * Checks if we have enough host controller resources for the default control
3665 * endpoint.
3666 *
3667 * Must be called with xhci->lock held.
3668 */
3669static int xhci_reserve_host_control_ep_resources(struct xhci_hcd *xhci)
3670{
3671 if (xhci->num_active_eps + 1 > xhci->limit_active_eps) {
3672 xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
3673 "Not enough ep ctxs: "
3674 "%u active, need to add 1, limit is %u.",
3675 xhci->num_active_eps, xhci->limit_active_eps);
3676 return -ENOMEM;
3677 }
3678 xhci->num_active_eps += 1;
3679 xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
3680 "Adding 1 ep ctx, %u now active.",
3681 xhci->num_active_eps);
3682 return 0;
3683}
3684
3685
3686/*
3687 * Returns 0 if the xHC ran out of device slots, the Enable Slot command
3688 * timed out, or allocating memory failed. Returns 1 on success.
3689 */
3690int xhci_alloc_dev(struct usb_hcd *hcd, struct usb_device *udev)
3691{
3692 struct xhci_hcd *xhci = hcd_to_xhci(hcd);
3693 struct xhci_virt_device *vdev;
3694 struct xhci_slot_ctx *slot_ctx;
3695 unsigned long flags;
3696 int ret, slot_id;
3697 struct xhci_command *command;
3698
3699 command = xhci_alloc_command(xhci, true, GFP_KERNEL);
3700 if (!command)
3701 return 0;
3702
3703 spin_lock_irqsave(&xhci->lock, flags);
3704 ret = xhci_queue_slot_control(xhci, command, TRB_ENABLE_SLOT, 0);
3705 if (ret) {
3706 spin_unlock_irqrestore(&xhci->lock, flags);
3707 xhci_dbg(xhci, "FIXME: allocate a command ring segment\n");
3708 xhci_free_command(xhci, command);
3709 return 0;
3710 }
3711 xhci_ring_cmd_db(xhci);
3712 spin_unlock_irqrestore(&xhci->lock, flags);
3713
3714 wait_for_completion(command->completion);
3715 slot_id = command->slot_id;
3716
3717 if (!slot_id || command->status != COMP_SUCCESS) {
3718 xhci_err(xhci, "Error while assigning device slot ID\n");
3719 xhci_err(xhci, "Max number of devices this xHCI host supports is %u.\n",
3720 HCS_MAX_SLOTS(
3721 readl(&xhci->cap_regs->hcs_params1)));
3722 xhci_free_command(xhci, command);
3723 return 0;
3724 }
3725
3726 xhci_free_command(xhci, command);
3727
3728 if ((xhci->quirks & XHCI_EP_LIMIT_QUIRK)) {
3729 spin_lock_irqsave(&xhci->lock, flags);
3730 ret = xhci_reserve_host_control_ep_resources(xhci);
3731 if (ret) {
3732 spin_unlock_irqrestore(&xhci->lock, flags);
3733 xhci_warn(xhci, "Not enough host resources, "
3734 "active endpoint contexts = %u\n",
3735 xhci->num_active_eps);
3736 goto disable_slot;
3737 }
3738 spin_unlock_irqrestore(&xhci->lock, flags);
3739 }
3740 /* Use GFP_NOIO, since this function can be called from
3741 * xhci_discover_or_reset_device(), which may be called as part of
3742 * mass storage driver error handling.
3743 */
3744 if (!xhci_alloc_virt_device(xhci, slot_id, udev, GFP_NOIO)) {
3745 xhci_warn(xhci, "Could not allocate xHCI USB device data structures\n");
3746 goto disable_slot;
3747 }
3748 vdev = xhci->devs[slot_id];
3749 slot_ctx = xhci_get_slot_ctx(xhci, vdev->out_ctx);
3750 trace_xhci_alloc_dev(slot_ctx);
3751
3752 udev->slot_id = slot_id;
3753
3754 xhci_debugfs_create_slot(xhci, slot_id);
3755
3756#ifndef CONFIG_USB_DEFAULT_PERSIST
3757 /*
3758 * If resetting upon resume, we can't put the controller into runtime
3759 * suspend if there is a device attached.
3760 */
3761 if (xhci->quirks & XHCI_RESET_ON_RESUME)
3762 pm_runtime_get_noresume(hcd->self.controller);
3763#endif
3764
3765 /* Is this a LS or FS device under a HS hub? */
3766 /* Hub or peripherial? */
3767 return 1;
3768
3769disable_slot:
3770 ret = xhci_disable_slot(xhci, udev->slot_id);
3771 if (ret)
3772 xhci_free_virt_device(xhci, udev->slot_id);
3773
3774 return 0;
3775}
3776
3777/*
3778 * Issue an Address Device command and optionally send a corresponding
3779 * SetAddress request to the device.
3780 */
3781static int xhci_setup_device(struct usb_hcd *hcd, struct usb_device *udev,
3782 enum xhci_setup_dev setup)
3783{
3784 const char *act = setup == SETUP_CONTEXT_ONLY ? "context" : "address";
3785 unsigned long flags;
3786 struct xhci_virt_device *virt_dev;
3787 int ret = 0;
3788 struct xhci_hcd *xhci = hcd_to_xhci(hcd);
3789 struct xhci_slot_ctx *slot_ctx;
3790 struct xhci_input_control_ctx *ctrl_ctx;
3791 u64 temp_64;
3792 struct xhci_command *command = NULL;
3793
3794 mutex_lock(&xhci->mutex);
3795
3796 if (xhci->xhc_state) { /* dying, removing or halted */
3797 ret = -ESHUTDOWN;
3798 goto out;
3799 }
3800
3801 if (!udev->slot_id) {
3802 xhci_dbg_trace(xhci, trace_xhci_dbg_address,
3803 "Bad Slot ID %d", udev->slot_id);
3804 ret = -EINVAL;
3805 goto out;
3806 }
3807
3808 virt_dev = xhci->devs[udev->slot_id];
3809
3810 if (WARN_ON(!virt_dev)) {
3811 /*
3812 * In plug/unplug torture test with an NEC controller,
3813 * a zero-dereference was observed once due to virt_dev = 0.
3814 * Print useful debug rather than crash if it is observed again!
3815 */
3816 xhci_warn(xhci, "Virt dev invalid for slot_id 0x%x!\n",
3817 udev->slot_id);
3818 ret = -EINVAL;
3819 goto out;
3820 }
3821 slot_ctx = xhci_get_slot_ctx(xhci, virt_dev->out_ctx);
3822 trace_xhci_setup_device_slot(slot_ctx);
3823
3824 if (setup == SETUP_CONTEXT_ONLY) {
3825 if (GET_SLOT_STATE(le32_to_cpu(slot_ctx->dev_state)) ==
3826 SLOT_STATE_DEFAULT) {
3827 xhci_dbg(xhci, "Slot already in default state\n");
3828 goto out;
3829 }
3830 }
3831
3832 command = xhci_alloc_command(xhci, true, GFP_KERNEL);
3833 if (!command) {
3834 ret = -ENOMEM;
3835 goto out;
3836 }
3837
3838 command->in_ctx = virt_dev->in_ctx;
3839
3840 slot_ctx = xhci_get_slot_ctx(xhci, virt_dev->in_ctx);
3841 ctrl_ctx = xhci_get_input_control_ctx(virt_dev->in_ctx);
3842 if (!ctrl_ctx) {
3843 xhci_warn(xhci, "%s: Could not get input context, bad type.\n",
3844 __func__);
3845 ret = -EINVAL;
3846 goto out;
3847 }
3848 /*
3849 * If this is the first Set Address since device plug-in or
3850 * virt_device realloaction after a resume with an xHCI power loss,
3851 * then set up the slot context.
3852 */
3853 if (!slot_ctx->dev_info)
3854 xhci_setup_addressable_virt_dev(xhci, udev);
3855 /* Otherwise, update the control endpoint ring enqueue pointer. */
3856 else
3857 xhci_copy_ep0_dequeue_into_input_ctx(xhci, udev);
3858 ctrl_ctx->add_flags = cpu_to_le32(SLOT_FLAG | EP0_FLAG);
3859 ctrl_ctx->drop_flags = 0;
3860
3861 trace_xhci_address_ctx(xhci, virt_dev->in_ctx,
3862 le32_to_cpu(slot_ctx->dev_info) >> 27);
3863
3864 spin_lock_irqsave(&xhci->lock, flags);
3865 trace_xhci_setup_device(virt_dev);
3866 ret = xhci_queue_address_device(xhci, command, virt_dev->in_ctx->dma,
3867 udev->slot_id, setup);
3868 if (ret) {
3869 spin_unlock_irqrestore(&xhci->lock, flags);
3870 xhci_dbg_trace(xhci, trace_xhci_dbg_address,
3871 "FIXME: allocate a command ring segment");
3872 goto out;
3873 }
3874 xhci_ring_cmd_db(xhci);
3875 spin_unlock_irqrestore(&xhci->lock, flags);
3876
3877 /* ctrl tx can take up to 5 sec; XXX: need more time for xHC? */
3878 wait_for_completion(command->completion);
3879
3880 /* FIXME: From section 4.3.4: "Software shall be responsible for timing
3881 * the SetAddress() "recovery interval" required by USB and aborting the
3882 * command on a timeout.
3883 */
3884 switch (command->status) {
3885 case COMP_COMMAND_ABORTED:
3886 case COMP_COMMAND_RING_STOPPED:
3887 xhci_warn(xhci, "Timeout while waiting for setup device command\n");
3888 ret = -ETIME;
3889 break;
3890 case COMP_CONTEXT_STATE_ERROR:
3891 case COMP_SLOT_NOT_ENABLED_ERROR:
3892 xhci_err(xhci, "Setup ERROR: setup %s command for slot %d.\n",
3893 act, udev->slot_id);
3894 ret = -EINVAL;
3895 break;
3896 case COMP_USB_TRANSACTION_ERROR:
3897 dev_warn(&udev->dev, "Device not responding to setup %s.\n", act);
3898
3899 mutex_unlock(&xhci->mutex);
3900 ret = xhci_disable_slot(xhci, udev->slot_id);
3901 if (!ret)
3902 xhci_alloc_dev(hcd, udev);
3903 kfree(command->completion);
3904 kfree(command);
3905 return -EPROTO;
3906 case COMP_INCOMPATIBLE_DEVICE_ERROR:
3907 dev_warn(&udev->dev,
3908 "ERROR: Incompatible device for setup %s command\n", act);
3909 ret = -ENODEV;
3910 break;
3911 case COMP_SUCCESS:
3912 xhci_dbg_trace(xhci, trace_xhci_dbg_address,
3913 "Successful setup %s command", act);
3914 break;
3915 default:
3916 xhci_err(xhci,
3917 "ERROR: unexpected setup %s command completion code 0x%x.\n",
3918 act, command->status);
3919 trace_xhci_address_ctx(xhci, virt_dev->out_ctx, 1);
3920 ret = -EINVAL;
3921 break;
3922 }
3923 if (ret)
3924 goto out;
3925 temp_64 = xhci_read_64(xhci, &xhci->op_regs->dcbaa_ptr);
3926 xhci_dbg_trace(xhci, trace_xhci_dbg_address,
3927 "Op regs DCBAA ptr = %#016llx", temp_64);
3928 xhci_dbg_trace(xhci, trace_xhci_dbg_address,
3929 "Slot ID %d dcbaa entry @%p = %#016llx",
3930 udev->slot_id,
3931 &xhci->dcbaa->dev_context_ptrs[udev->slot_id],
3932 (unsigned long long)
3933 le64_to_cpu(xhci->dcbaa->dev_context_ptrs[udev->slot_id]));
3934 xhci_dbg_trace(xhci, trace_xhci_dbg_address,
3935 "Output Context DMA address = %#08llx",
3936 (unsigned long long)virt_dev->out_ctx->dma);
3937 trace_xhci_address_ctx(xhci, virt_dev->in_ctx,
3938 le32_to_cpu(slot_ctx->dev_info) >> 27);
3939 /*
3940 * USB core uses address 1 for the roothubs, so we add one to the
3941 * address given back to us by the HC.
3942 */
3943 trace_xhci_address_ctx(xhci, virt_dev->out_ctx,
3944 le32_to_cpu(slot_ctx->dev_info) >> 27);
3945 /* Zero the input context control for later use */
3946 ctrl_ctx->add_flags = 0;
3947 ctrl_ctx->drop_flags = 0;
3948
3949 xhci_dbg_trace(xhci, trace_xhci_dbg_address,
3950 "Internal device address = %d",
3951 le32_to_cpu(slot_ctx->dev_state) & DEV_ADDR_MASK);
3952out:
3953 mutex_unlock(&xhci->mutex);
3954 if (command) {
3955 kfree(command->completion);
3956 kfree(command);
3957 }
3958 return ret;
3959}
3960
3961static int xhci_address_device(struct usb_hcd *hcd, struct usb_device *udev)
3962{
3963 return xhci_setup_device(hcd, udev, SETUP_CONTEXT_ADDRESS);
3964}
3965
3966static int xhci_enable_device(struct usb_hcd *hcd, struct usb_device *udev)
3967{
3968 return xhci_setup_device(hcd, udev, SETUP_CONTEXT_ONLY);
3969}
3970
3971/*
3972 * Transfer the port index into real index in the HW port status
3973 * registers. Caculate offset between the port's PORTSC register
3974 * and port status base. Divide the number of per port register
3975 * to get the real index. The raw port number bases 1.
3976 */
3977int xhci_find_raw_port_number(struct usb_hcd *hcd, int port1)
3978{
3979 struct xhci_hcd *xhci = hcd_to_xhci(hcd);
3980 __le32 __iomem *base_addr = &xhci->op_regs->port_status_base;
3981 __le32 __iomem *addr;
3982 int raw_port;
3983
3984 if (hcd->speed < HCD_USB3)
3985 addr = xhci->usb2_ports[port1 - 1];
3986 else
3987 addr = xhci->usb3_ports[port1 - 1];
3988
3989 raw_port = (addr - base_addr)/NUM_PORT_REGS + 1;
3990 return raw_port;
3991}
3992
3993/*
3994 * Issue an Evaluate Context command to change the Maximum Exit Latency in the
3995 * slot context. If that succeeds, store the new MEL in the xhci_virt_device.
3996 */
3997static int __maybe_unused xhci_change_max_exit_latency(struct xhci_hcd *xhci,
3998 struct usb_device *udev, u16 max_exit_latency)
3999{
4000 struct xhci_virt_device *virt_dev;
4001 struct xhci_command *command;
4002 struct xhci_input_control_ctx *ctrl_ctx;
4003 struct xhci_slot_ctx *slot_ctx;
4004 unsigned long flags;
4005 int ret;
4006
4007 spin_lock_irqsave(&xhci->lock, flags);
4008
4009 virt_dev = xhci->devs[udev->slot_id];
4010
4011 /*
4012 * virt_dev might not exists yet if xHC resumed from hibernate (S4) and
4013 * xHC was re-initialized. Exit latency will be set later after
4014 * hub_port_finish_reset() is done and xhci->devs[] are re-allocated
4015 */
4016
4017 if (!virt_dev || max_exit_latency == virt_dev->current_mel) {
4018 spin_unlock_irqrestore(&xhci->lock, flags);
4019 return 0;
4020 }
4021
4022 /* Attempt to issue an Evaluate Context command to change the MEL. */
4023 command = xhci->lpm_command;
4024 ctrl_ctx = xhci_get_input_control_ctx(command->in_ctx);
4025 if (!ctrl_ctx) {
4026 spin_unlock_irqrestore(&xhci->lock, flags);
4027 xhci_warn(xhci, "%s: Could not get input context, bad type.\n",
4028 __func__);
4029 return -ENOMEM;
4030 }
4031
4032 xhci_slot_copy(xhci, command->in_ctx, virt_dev->out_ctx);
4033 spin_unlock_irqrestore(&xhci->lock, flags);
4034
4035 ctrl_ctx->add_flags |= cpu_to_le32(SLOT_FLAG);
4036 slot_ctx = xhci_get_slot_ctx(xhci, command->in_ctx);
4037 slot_ctx->dev_info2 &= cpu_to_le32(~((u32) MAX_EXIT));
4038 slot_ctx->dev_info2 |= cpu_to_le32(max_exit_latency);
4039 slot_ctx->dev_state = 0;
4040
4041 xhci_dbg_trace(xhci, trace_xhci_dbg_context_change,
4042 "Set up evaluate context for LPM MEL change.");
4043
4044 /* Issue and wait for the evaluate context command. */
4045 ret = xhci_configure_endpoint(xhci, udev, command,
4046 true, true);
4047
4048 if (!ret) {
4049 spin_lock_irqsave(&xhci->lock, flags);
4050 virt_dev->current_mel = max_exit_latency;
4051 spin_unlock_irqrestore(&xhci->lock, flags);
4052 }
4053 return ret;
4054}
4055
4056#ifdef CONFIG_PM
4057
4058/* BESL to HIRD Encoding array for USB2 LPM */
4059static int xhci_besl_encoding[16] = {125, 150, 200, 300, 400, 500, 1000, 2000,
4060 3000, 4000, 5000, 6000, 7000, 8000, 9000, 10000};
4061
4062/* Calculate HIRD/BESL for USB2 PORTPMSC*/
4063static int xhci_calculate_hird_besl(struct xhci_hcd *xhci,
4064 struct usb_device *udev)
4065{
4066 int u2del, besl, besl_host;
4067 int besl_device = 0;
4068 u32 field;
4069
4070 u2del = HCS_U2_LATENCY(xhci->hcs_params3);
4071 field = le32_to_cpu(udev->bos->ext_cap->bmAttributes);
4072
4073 if (field & USB_BESL_SUPPORT) {
4074 for (besl_host = 0; besl_host < 16; besl_host++) {
4075 if (xhci_besl_encoding[besl_host] >= u2del)
4076 break;
4077 }
4078 /* Use baseline BESL value as default */
4079 if (field & USB_BESL_BASELINE_VALID)
4080 besl_device = USB_GET_BESL_BASELINE(field);
4081 else if (field & USB_BESL_DEEP_VALID)
4082 besl_device = USB_GET_BESL_DEEP(field);
4083 } else {
4084 if (u2del <= 50)
4085 besl_host = 0;
4086 else
4087 besl_host = (u2del - 51) / 75 + 1;
4088 }
4089
4090 besl = besl_host + besl_device;
4091 if (besl > 15)
4092 besl = 15;
4093
4094 return besl;
4095}
4096
4097/* Calculate BESLD, L1 timeout and HIRDM for USB2 PORTHLPMC */
4098static int xhci_calculate_usb2_hw_lpm_params(struct usb_device *udev)
4099{
4100 u32 field;
4101 int l1;
4102 int besld = 0;
4103 int hirdm = 0;
4104
4105 field = le32_to_cpu(udev->bos->ext_cap->bmAttributes);
4106
4107 /* xHCI l1 is set in steps of 256us, xHCI 1.0 section 5.4.11.2 */
4108 l1 = udev->l1_params.timeout / 256;
4109
4110 /* device has preferred BESLD */
4111 if (field & USB_BESL_DEEP_VALID) {
4112 besld = USB_GET_BESL_DEEP(field);
4113 hirdm = 1;
4114 }
4115
4116 return PORT_BESLD(besld) | PORT_L1_TIMEOUT(l1) | PORT_HIRDM(hirdm);
4117}
4118
4119static int xhci_set_usb2_hardware_lpm(struct usb_hcd *hcd,
4120 struct usb_device *udev, int enable)
4121{
4122 struct xhci_hcd *xhci = hcd_to_xhci(hcd);
4123 __le32 __iomem **port_array;
4124 __le32 __iomem *pm_addr, *hlpm_addr;
4125 u32 pm_val, hlpm_val, field;
4126 unsigned int port_num;
4127 unsigned long flags;
4128 int hird, exit_latency;
4129 int ret;
4130
4131 if (hcd->speed >= HCD_USB3 || !xhci->hw_lpm_support ||
4132 !udev->lpm_capable)
4133 return -EPERM;
4134
4135 if (!udev->parent || udev->parent->parent ||
4136 udev->descriptor.bDeviceClass == USB_CLASS_HUB)
4137 return -EPERM;
4138
4139 if (udev->usb2_hw_lpm_capable != 1)
4140 return -EPERM;
4141
4142 spin_lock_irqsave(&xhci->lock, flags);
4143
4144 port_array = xhci->usb2_ports;
4145 port_num = udev->portnum - 1;
4146 pm_addr = port_array[port_num] + PORTPMSC;
4147 pm_val = readl(pm_addr);
4148 hlpm_addr = port_array[port_num] + PORTHLPMC;
4149 field = le32_to_cpu(udev->bos->ext_cap->bmAttributes);
4150
4151 xhci_dbg(xhci, "%s port %d USB2 hardware LPM\n",
4152 enable ? "enable" : "disable", port_num + 1);
4153
4154 if (enable && !(xhci->quirks & XHCI_HW_LPM_DISABLE)) {
4155 /* Host supports BESL timeout instead of HIRD */
4156 if (udev->usb2_hw_lpm_besl_capable) {
4157 /* if device doesn't have a preferred BESL value use a
4158 * default one which works with mixed HIRD and BESL
4159 * systems. See XHCI_DEFAULT_BESL definition in xhci.h
4160 */
4161 if ((field & USB_BESL_SUPPORT) &&
4162 (field & USB_BESL_BASELINE_VALID))
4163 hird = USB_GET_BESL_BASELINE(field);
4164 else
4165 hird = udev->l1_params.besl;
4166
4167 exit_latency = xhci_besl_encoding[hird];
4168 spin_unlock_irqrestore(&xhci->lock, flags);
4169
4170 /* USB 3.0 code dedicate one xhci->lpm_command->in_ctx
4171 * input context for link powermanagement evaluate
4172 * context commands. It is protected by hcd->bandwidth
4173 * mutex and is shared by all devices. We need to set
4174 * the max ext latency in USB 2 BESL LPM as well, so
4175 * use the same mutex and xhci_change_max_exit_latency()
4176 */
4177 mutex_lock(hcd->bandwidth_mutex);
4178 ret = xhci_change_max_exit_latency(xhci, udev,
4179 exit_latency);
4180 mutex_unlock(hcd->bandwidth_mutex);
4181
4182 if (ret < 0)
4183 return ret;
4184 spin_lock_irqsave(&xhci->lock, flags);
4185
4186 hlpm_val = xhci_calculate_usb2_hw_lpm_params(udev);
4187 writel(hlpm_val, hlpm_addr);
4188 /* flush write */
4189 readl(hlpm_addr);
4190 } else {
4191 hird = xhci_calculate_hird_besl(xhci, udev);
4192 }
4193
4194 pm_val &= ~PORT_HIRD_MASK;
4195 pm_val |= PORT_HIRD(hird) | PORT_RWE | PORT_L1DS(udev->slot_id);
4196 writel(pm_val, pm_addr);
4197 pm_val = readl(pm_addr);
4198 pm_val |= PORT_HLE;
4199 writel(pm_val, pm_addr);
4200 /* flush write */
4201 readl(pm_addr);
4202 } else {
4203 pm_val &= ~(PORT_HLE | PORT_RWE | PORT_HIRD_MASK | PORT_L1DS_MASK);
4204 writel(pm_val, pm_addr);
4205 /* flush write */
4206 readl(pm_addr);
4207 if (udev->usb2_hw_lpm_besl_capable) {
4208 spin_unlock_irqrestore(&xhci->lock, flags);
4209 mutex_lock(hcd->bandwidth_mutex);
4210 xhci_change_max_exit_latency(xhci, udev, 0);
4211 mutex_unlock(hcd->bandwidth_mutex);
4212 return 0;
4213 }
4214 }
4215
4216 spin_unlock_irqrestore(&xhci->lock, flags);
4217 return 0;
4218}
4219
4220/* check if a usb2 port supports a given extened capability protocol
4221 * only USB2 ports extended protocol capability values are cached.
4222 * Return 1 if capability is supported
4223 */
4224static int xhci_check_usb2_port_capability(struct xhci_hcd *xhci, int port,
4225 unsigned capability)
4226{
4227 u32 port_offset, port_count;
4228 int i;
4229
4230 for (i = 0; i < xhci->num_ext_caps; i++) {
4231 if (xhci->ext_caps[i] & capability) {
4232 /* port offsets starts at 1 */
4233 port_offset = XHCI_EXT_PORT_OFF(xhci->ext_caps[i]) - 1;
4234 port_count = XHCI_EXT_PORT_COUNT(xhci->ext_caps[i]);
4235 if (port >= port_offset &&
4236 port < port_offset + port_count)
4237 return 1;
4238 }
4239 }
4240 return 0;
4241}
4242
4243static int xhci_update_device(struct usb_hcd *hcd, struct usb_device *udev)
4244{
4245 struct xhci_hcd *xhci = hcd_to_xhci(hcd);
4246 int portnum = udev->portnum - 1;
4247
4248 if (hcd->speed >= HCD_USB3 || !xhci->sw_lpm_support ||
4249 !udev->lpm_capable)
4250 return 0;
4251
4252 /* we only support lpm for non-hub device connected to root hub yet */
4253 if (!udev->parent || udev->parent->parent ||
4254 udev->descriptor.bDeviceClass == USB_CLASS_HUB)
4255 return 0;
4256
4257 if (xhci->hw_lpm_support == 1 &&
4258 xhci_check_usb2_port_capability(
4259 xhci, portnum, XHCI_HLC)) {
4260 udev->usb2_hw_lpm_capable = 1;
4261 udev->l1_params.timeout = XHCI_L1_TIMEOUT;
4262 udev->l1_params.besl = XHCI_DEFAULT_BESL;
4263 if (xhci_check_usb2_port_capability(xhci, portnum,
4264 XHCI_BLC))
4265 udev->usb2_hw_lpm_besl_capable = 1;
4266 }
4267
4268 return 0;
4269}
4270
4271/*---------------------- USB 3.0 Link PM functions ------------------------*/
4272
4273/* Service interval in nanoseconds = 2^(bInterval - 1) * 125us * 1000ns / 1us */
4274static unsigned long long xhci_service_interval_to_ns(
4275 struct usb_endpoint_descriptor *desc)
4276{
4277 return (1ULL << (desc->bInterval - 1)) * 125 * 1000;
4278}
4279
4280static u16 xhci_get_timeout_no_hub_lpm(struct usb_device *udev,
4281 enum usb3_link_state state)
4282{
4283 unsigned long long sel;
4284 unsigned long long pel;
4285 unsigned int max_sel_pel;
4286 char *state_name;
4287
4288 switch (state) {
4289 case USB3_LPM_U1:
4290 /* Convert SEL and PEL stored in nanoseconds to microseconds */
4291 sel = DIV_ROUND_UP(udev->u1_params.sel, 1000);
4292 pel = DIV_ROUND_UP(udev->u1_params.pel, 1000);
4293 max_sel_pel = USB3_LPM_MAX_U1_SEL_PEL;
4294 state_name = "U1";
4295 break;
4296 case USB3_LPM_U2:
4297 sel = DIV_ROUND_UP(udev->u2_params.sel, 1000);
4298 pel = DIV_ROUND_UP(udev->u2_params.pel, 1000);
4299 max_sel_pel = USB3_LPM_MAX_U2_SEL_PEL;
4300 state_name = "U2";
4301 break;
4302 default:
4303 dev_warn(&udev->dev, "%s: Can't get timeout for non-U1 or U2 state.\n",
4304 __func__);
4305 return USB3_LPM_DISABLED;
4306 }
4307
4308 if (sel <= max_sel_pel && pel <= max_sel_pel)
4309 return USB3_LPM_DEVICE_INITIATED;
4310
4311 if (sel > max_sel_pel)
4312 dev_dbg(&udev->dev, "Device-initiated %s disabled "
4313 "due to long SEL %llu ms\n",
4314 state_name, sel);
4315 else
4316 dev_dbg(&udev->dev, "Device-initiated %s disabled "
4317 "due to long PEL %llu ms\n",
4318 state_name, pel);
4319 return USB3_LPM_DISABLED;
4320}
4321
4322/* The U1 timeout should be the maximum of the following values:
4323 * - For control endpoints, U1 system exit latency (SEL) * 3
4324 * - For bulk endpoints, U1 SEL * 5
4325 * - For interrupt endpoints:
4326 * - Notification EPs, U1 SEL * 3
4327 * - Periodic EPs, max(105% of bInterval, U1 SEL * 2)
4328 * - For isochronous endpoints, max(105% of bInterval, U1 SEL * 2)
4329 */
4330static unsigned long long xhci_calculate_intel_u1_timeout(
4331 struct usb_device *udev,
4332 struct usb_endpoint_descriptor *desc)
4333{
4334 unsigned long long timeout_ns;
4335 int ep_type;
4336 int intr_type;
4337
4338 ep_type = usb_endpoint_type(desc);
4339 switch (ep_type) {
4340 case USB_ENDPOINT_XFER_CONTROL:
4341 timeout_ns = udev->u1_params.sel * 3;
4342 break;
4343 case USB_ENDPOINT_XFER_BULK:
4344 timeout_ns = udev->u1_params.sel * 5;
4345 break;
4346 case USB_ENDPOINT_XFER_INT:
4347 intr_type = usb_endpoint_interrupt_type(desc);
4348 if (intr_type == USB_ENDPOINT_INTR_NOTIFICATION) {
4349 timeout_ns = udev->u1_params.sel * 3;
4350 break;
4351 }
4352 /* Otherwise the calculation is the same as isoc eps */
4353 /* fall through */
4354 case USB_ENDPOINT_XFER_ISOC:
4355 timeout_ns = xhci_service_interval_to_ns(desc);
4356 timeout_ns = DIV_ROUND_UP_ULL(timeout_ns * 105, 100);
4357 if (timeout_ns < udev->u1_params.sel * 2)
4358 timeout_ns = udev->u1_params.sel * 2;
4359 break;
4360 default:
4361 return 0;
4362 }
4363
4364 return timeout_ns;
4365}
4366
4367/* Returns the hub-encoded U1 timeout value. */
4368static u16 xhci_calculate_u1_timeout(struct xhci_hcd *xhci,
4369 struct usb_device *udev,
4370 struct usb_endpoint_descriptor *desc)
4371{
4372 unsigned long long timeout_ns;
4373
4374 if (xhci->quirks & XHCI_INTEL_HOST)
4375 timeout_ns = xhci_calculate_intel_u1_timeout(udev, desc);
4376 else
4377 timeout_ns = udev->u1_params.sel;
4378
4379 /* The U1 timeout is encoded in 1us intervals.
4380 * Don't return a timeout of zero, because that's USB3_LPM_DISABLED.
4381 */
4382 if (timeout_ns == USB3_LPM_DISABLED)
4383 timeout_ns = 1;
4384 else
4385 timeout_ns = DIV_ROUND_UP_ULL(timeout_ns, 1000);
4386
4387 /* If the necessary timeout value is bigger than what we can set in the
4388 * USB 3.0 hub, we have to disable hub-initiated U1.
4389 */
4390 if (timeout_ns <= USB3_LPM_U1_MAX_TIMEOUT)
4391 return timeout_ns;
4392 dev_dbg(&udev->dev, "Hub-initiated U1 disabled "
4393 "due to long timeout %llu ms\n", timeout_ns);
4394 return xhci_get_timeout_no_hub_lpm(udev, USB3_LPM_U1);
4395}
4396
4397/* The U2 timeout should be the maximum of:
4398 * - 10 ms (to avoid the bandwidth impact on the scheduler)
4399 * - largest bInterval of any active periodic endpoint (to avoid going
4400 * into lower power link states between intervals).
4401 * - the U2 Exit Latency of the device
4402 */
4403static unsigned long long xhci_calculate_intel_u2_timeout(
4404 struct usb_device *udev,
4405 struct usb_endpoint_descriptor *desc)
4406{
4407 unsigned long long timeout_ns;
4408 unsigned long long u2_del_ns;
4409
4410 timeout_ns = 10 * 1000 * 1000;
4411
4412 if ((usb_endpoint_xfer_int(desc) || usb_endpoint_xfer_isoc(desc)) &&
4413 (xhci_service_interval_to_ns(desc) > timeout_ns))
4414 timeout_ns = xhci_service_interval_to_ns(desc);
4415
4416 u2_del_ns = le16_to_cpu(udev->bos->ss_cap->bU2DevExitLat) * 1000ULL;
4417 if (u2_del_ns > timeout_ns)
4418 timeout_ns = u2_del_ns;
4419
4420 return timeout_ns;
4421}
4422
4423/* Returns the hub-encoded U2 timeout value. */
4424static u16 xhci_calculate_u2_timeout(struct xhci_hcd *xhci,
4425 struct usb_device *udev,
4426 struct usb_endpoint_descriptor *desc)
4427{
4428 unsigned long long timeout_ns;
4429
4430 if (xhci->quirks & XHCI_INTEL_HOST)
4431 timeout_ns = xhci_calculate_intel_u2_timeout(udev, desc);
4432 else
4433 timeout_ns = udev->u2_params.sel;
4434
4435 /* The U2 timeout is encoded in 256us intervals */
4436 timeout_ns = DIV_ROUND_UP_ULL(timeout_ns, 256 * 1000);
4437 /* If the necessary timeout value is bigger than what we can set in the
4438 * USB 3.0 hub, we have to disable hub-initiated U2.
4439 */
4440 if (timeout_ns <= USB3_LPM_U2_MAX_TIMEOUT)
4441 return timeout_ns;
4442 dev_dbg(&udev->dev, "Hub-initiated U2 disabled "
4443 "due to long timeout %llu ms\n", timeout_ns);
4444 return xhci_get_timeout_no_hub_lpm(udev, USB3_LPM_U2);
4445}
4446
4447static u16 xhci_call_host_update_timeout_for_endpoint(struct xhci_hcd *xhci,
4448 struct usb_device *udev,
4449 struct usb_endpoint_descriptor *desc,
4450 enum usb3_link_state state,
4451 u16 *timeout)
4452{
4453 if (state == USB3_LPM_U1)
4454 return xhci_calculate_u1_timeout(xhci, udev, desc);
4455 else if (state == USB3_LPM_U2)
4456 return xhci_calculate_u2_timeout(xhci, udev, desc);
4457
4458 return USB3_LPM_DISABLED;
4459}
4460
4461static int xhci_update_timeout_for_endpoint(struct xhci_hcd *xhci,
4462 struct usb_device *udev,
4463 struct usb_endpoint_descriptor *desc,
4464 enum usb3_link_state state,
4465 u16 *timeout)
4466{
4467 u16 alt_timeout;
4468
4469 alt_timeout = xhci_call_host_update_timeout_for_endpoint(xhci, udev,
4470 desc, state, timeout);
4471
4472 /* If we found we can't enable hub-initiated LPM, or
4473 * the U1 or U2 exit latency was too high to allow
4474 * device-initiated LPM as well, just stop searching.
4475 */
4476 if (alt_timeout == USB3_LPM_DISABLED ||
4477 alt_timeout == USB3_LPM_DEVICE_INITIATED) {
4478 *timeout = alt_timeout;
4479 return -E2BIG;
4480 }
4481 if (alt_timeout > *timeout)
4482 *timeout = alt_timeout;
4483 return 0;
4484}
4485
4486static int xhci_update_timeout_for_interface(struct xhci_hcd *xhci,
4487 struct usb_device *udev,
4488 struct usb_host_interface *alt,
4489 enum usb3_link_state state,
4490 u16 *timeout)
4491{
4492 int j;
4493
4494 for (j = 0; j < alt->desc.bNumEndpoints; j++) {
4495 if (xhci_update_timeout_for_endpoint(xhci, udev,
4496 &alt->endpoint[j].desc, state, timeout))
4497 return -E2BIG;
4498 continue;
4499 }
4500 return 0;
4501}
4502
4503static int xhci_check_intel_tier_policy(struct usb_device *udev,
4504 enum usb3_link_state state)
4505{
4506 struct usb_device *parent;
4507 unsigned int num_hubs;
4508
4509 if (state == USB3_LPM_U2)
4510 return 0;
4511
4512 /* Don't enable U1 if the device is on a 2nd tier hub or lower. */
4513 for (parent = udev->parent, num_hubs = 0; parent->parent;
4514 parent = parent->parent)
4515 num_hubs++;
4516
4517 if (num_hubs < 2)
4518 return 0;
4519
4520 dev_dbg(&udev->dev, "Disabling U1 link state for device"
4521 " below second-tier hub.\n");
4522 dev_dbg(&udev->dev, "Plug device into first-tier hub "
4523 "to decrease power consumption.\n");
4524 return -E2BIG;
4525}
4526
4527static int xhci_check_tier_policy(struct xhci_hcd *xhci,
4528 struct usb_device *udev,
4529 enum usb3_link_state state)
4530{
4531 if (xhci->quirks & XHCI_INTEL_HOST)
4532 return xhci_check_intel_tier_policy(udev, state);
4533 else
4534 return 0;
4535}
4536
4537/* Returns the U1 or U2 timeout that should be enabled.
4538 * If the tier check or timeout setting functions return with a non-zero exit
4539 * code, that means the timeout value has been finalized and we shouldn't look
4540 * at any more endpoints.
4541 */
4542static u16 xhci_calculate_lpm_timeout(struct usb_hcd *hcd,
4543 struct usb_device *udev, enum usb3_link_state state)
4544{
4545 struct xhci_hcd *xhci = hcd_to_xhci(hcd);
4546 struct usb_host_config *config;
4547 char *state_name;
4548 int i;
4549 u16 timeout = USB3_LPM_DISABLED;
4550
4551 if (state == USB3_LPM_U1)
4552 state_name = "U1";
4553 else if (state == USB3_LPM_U2)
4554 state_name = "U2";
4555 else {
4556 dev_warn(&udev->dev, "Can't enable unknown link state %i\n",
4557 state);
4558 return timeout;
4559 }
4560
4561 if (xhci_check_tier_policy(xhci, udev, state) < 0)
4562 return timeout;
4563
4564 /* Gather some information about the currently installed configuration
4565 * and alternate interface settings.
4566 */
4567 if (xhci_update_timeout_for_endpoint(xhci, udev, &udev->ep0.desc,
4568 state, &timeout))
4569 return timeout;
4570
4571 config = udev->actconfig;
4572 if (!config)
4573 return timeout;
4574
4575 for (i = 0; i < config->desc.bNumInterfaces; i++) {
4576 struct usb_driver *driver;
4577 struct usb_interface *intf = config->interface[i];
4578
4579 if (!intf)
4580 continue;
4581
4582 /* Check if any currently bound drivers want hub-initiated LPM
4583 * disabled.
4584 */
4585 if (intf->dev.driver) {
4586 driver = to_usb_driver(intf->dev.driver);
4587 if (driver && driver->disable_hub_initiated_lpm) {
4588 dev_dbg(&udev->dev, "Hub-initiated %s disabled "
4589 "at request of driver %s\n",
4590 state_name, driver->name);
4591 return xhci_get_timeout_no_hub_lpm(udev, state);
4592 }
4593 }
4594
4595 /* Not sure how this could happen... */
4596 if (!intf->cur_altsetting)
4597 continue;
4598
4599 if (xhci_update_timeout_for_interface(xhci, udev,
4600 intf->cur_altsetting,
4601 state, &timeout))
4602 return timeout;
4603 }
4604 return timeout;
4605}
4606
4607static int calculate_max_exit_latency(struct usb_device *udev,
4608 enum usb3_link_state state_changed,
4609 u16 hub_encoded_timeout)
4610{
4611 unsigned long long u1_mel_us = 0;
4612 unsigned long long u2_mel_us = 0;
4613 unsigned long long mel_us = 0;
4614 bool disabling_u1;
4615 bool disabling_u2;
4616 bool enabling_u1;
4617 bool enabling_u2;
4618
4619 disabling_u1 = (state_changed == USB3_LPM_U1 &&
4620 hub_encoded_timeout == USB3_LPM_DISABLED);
4621 disabling_u2 = (state_changed == USB3_LPM_U2 &&
4622 hub_encoded_timeout == USB3_LPM_DISABLED);
4623
4624 enabling_u1 = (state_changed == USB3_LPM_U1 &&
4625 hub_encoded_timeout != USB3_LPM_DISABLED);
4626 enabling_u2 = (state_changed == USB3_LPM_U2 &&
4627 hub_encoded_timeout != USB3_LPM_DISABLED);
4628
4629 /* If U1 was already enabled and we're not disabling it,
4630 * or we're going to enable U1, account for the U1 max exit latency.
4631 */
4632 if ((udev->u1_params.timeout != USB3_LPM_DISABLED && !disabling_u1) ||
4633 enabling_u1)
4634 u1_mel_us = DIV_ROUND_UP(udev->u1_params.mel, 1000);
4635 if ((udev->u2_params.timeout != USB3_LPM_DISABLED && !disabling_u2) ||
4636 enabling_u2)
4637 u2_mel_us = DIV_ROUND_UP(udev->u2_params.mel, 1000);
4638
4639 if (u1_mel_us > u2_mel_us)
4640 mel_us = u1_mel_us;
4641 else
4642 mel_us = u2_mel_us;
4643 /* xHCI host controller max exit latency field is only 16 bits wide. */
4644 if (mel_us > MAX_EXIT) {
4645 dev_warn(&udev->dev, "Link PM max exit latency of %lluus "
4646 "is too big.\n", mel_us);
4647 return -E2BIG;
4648 }
4649 return mel_us;
4650}
4651
4652/* Returns the USB3 hub-encoded value for the U1/U2 timeout. */
4653static int xhci_enable_usb3_lpm_timeout(struct usb_hcd *hcd,
4654 struct usb_device *udev, enum usb3_link_state state)
4655{
4656 struct xhci_hcd *xhci;
4657 u16 hub_encoded_timeout;
4658 int mel;
4659 int ret;
4660
4661 xhci = hcd_to_xhci(hcd);
4662 /* The LPM timeout values are pretty host-controller specific, so don't
4663 * enable hub-initiated timeouts unless the vendor has provided
4664 * information about their timeout algorithm.
4665 */
4666 if (!xhci || !(xhci->quirks & XHCI_LPM_SUPPORT) ||
4667 !xhci->devs[udev->slot_id])
4668 return USB3_LPM_DISABLED;
4669
4670 hub_encoded_timeout = xhci_calculate_lpm_timeout(hcd, udev, state);
4671 mel = calculate_max_exit_latency(udev, state, hub_encoded_timeout);
4672 if (mel < 0) {
4673 /* Max Exit Latency is too big, disable LPM. */
4674 hub_encoded_timeout = USB3_LPM_DISABLED;
4675 mel = 0;
4676 }
4677
4678 ret = xhci_change_max_exit_latency(xhci, udev, mel);
4679 if (ret)
4680 return ret;
4681 return hub_encoded_timeout;
4682}
4683
4684static int xhci_disable_usb3_lpm_timeout(struct usb_hcd *hcd,
4685 struct usb_device *udev, enum usb3_link_state state)
4686{
4687 struct xhci_hcd *xhci;
4688 u16 mel;
4689
4690 xhci = hcd_to_xhci(hcd);
4691 if (!xhci || !(xhci->quirks & XHCI_LPM_SUPPORT) ||
4692 !xhci->devs[udev->slot_id])
4693 return 0;
4694
4695 mel = calculate_max_exit_latency(udev, state, USB3_LPM_DISABLED);
4696 return xhci_change_max_exit_latency(xhci, udev, mel);
4697}
4698#else /* CONFIG_PM */
4699
4700static int xhci_set_usb2_hardware_lpm(struct usb_hcd *hcd,
4701 struct usb_device *udev, int enable)
4702{
4703 return 0;
4704}
4705
4706static int xhci_update_device(struct usb_hcd *hcd, struct usb_device *udev)
4707{
4708 return 0;
4709}
4710
4711static int xhci_enable_usb3_lpm_timeout(struct usb_hcd *hcd,
4712 struct usb_device *udev, enum usb3_link_state state)
4713{
4714 return USB3_LPM_DISABLED;
4715}
4716
4717static int xhci_disable_usb3_lpm_timeout(struct usb_hcd *hcd,
4718 struct usb_device *udev, enum usb3_link_state state)
4719{
4720 return 0;
4721}
4722#endif /* CONFIG_PM */
4723
4724/*-------------------------------------------------------------------------*/
4725
4726/* Once a hub descriptor is fetched for a device, we need to update the xHC's
4727 * internal data structures for the device.
4728 */
4729static int xhci_update_hub_device(struct usb_hcd *hcd, struct usb_device *hdev,
4730 struct usb_tt *tt, gfp_t mem_flags)
4731{
4732 struct xhci_hcd *xhci = hcd_to_xhci(hcd);
4733 struct xhci_virt_device *vdev;
4734 struct xhci_command *config_cmd;
4735 struct xhci_input_control_ctx *ctrl_ctx;
4736 struct xhci_slot_ctx *slot_ctx;
4737 unsigned long flags;
4738 unsigned think_time;
4739 int ret;
4740
4741 /* Ignore root hubs */
4742 if (!hdev->parent)
4743 return 0;
4744
4745 vdev = xhci->devs[hdev->slot_id];
4746 if (!vdev) {
4747 xhci_warn(xhci, "Cannot update hub desc for unknown device.\n");
4748 return -EINVAL;
4749 }
4750
4751 config_cmd = xhci_alloc_command_with_ctx(xhci, true, mem_flags);
4752 if (!config_cmd)
4753 return -ENOMEM;
4754
4755 ctrl_ctx = xhci_get_input_control_ctx(config_cmd->in_ctx);
4756 if (!ctrl_ctx) {
4757 xhci_warn(xhci, "%s: Could not get input context, bad type.\n",
4758 __func__);
4759 xhci_free_command(xhci, config_cmd);
4760 return -ENOMEM;
4761 }
4762
4763 spin_lock_irqsave(&xhci->lock, flags);
4764 if (hdev->speed == USB_SPEED_HIGH &&
4765 xhci_alloc_tt_info(xhci, vdev, hdev, tt, GFP_ATOMIC)) {
4766 xhci_dbg(xhci, "Could not allocate xHCI TT structure.\n");
4767 xhci_free_command(xhci, config_cmd);
4768 spin_unlock_irqrestore(&xhci->lock, flags);
4769 return -ENOMEM;
4770 }
4771
4772 xhci_slot_copy(xhci, config_cmd->in_ctx, vdev->out_ctx);
4773 ctrl_ctx->add_flags |= cpu_to_le32(SLOT_FLAG);
4774 slot_ctx = xhci_get_slot_ctx(xhci, config_cmd->in_ctx);
4775 slot_ctx->dev_info |= cpu_to_le32(DEV_HUB);
4776 /*
4777 * refer to section 6.2.2: MTT should be 0 for full speed hub,
4778 * but it may be already set to 1 when setup an xHCI virtual
4779 * device, so clear it anyway.
4780 */
4781 if (tt->multi)
4782 slot_ctx->dev_info |= cpu_to_le32(DEV_MTT);
4783 else if (hdev->speed == USB_SPEED_FULL)
4784 slot_ctx->dev_info &= cpu_to_le32(~DEV_MTT);
4785
4786 if (xhci->hci_version > 0x95) {
4787 xhci_dbg(xhci, "xHCI version %x needs hub "
4788 "TT think time and number of ports\n",
4789 (unsigned int) xhci->hci_version);
4790 slot_ctx->dev_info2 |= cpu_to_le32(XHCI_MAX_PORTS(hdev->maxchild));
4791 /* Set TT think time - convert from ns to FS bit times.
4792 * 0 = 8 FS bit times, 1 = 16 FS bit times,
4793 * 2 = 24 FS bit times, 3 = 32 FS bit times.
4794 *
4795 * xHCI 1.0: this field shall be 0 if the device is not a
4796 * High-spped hub.
4797 */
4798 think_time = tt->think_time;
4799 if (think_time != 0)
4800 think_time = (think_time / 666) - 1;
4801 if (xhci->hci_version < 0x100 || hdev->speed == USB_SPEED_HIGH)
4802 slot_ctx->tt_info |=
4803 cpu_to_le32(TT_THINK_TIME(think_time));
4804 } else {
4805 xhci_dbg(xhci, "xHCI version %x doesn't need hub "
4806 "TT think time or number of ports\n",
4807 (unsigned int) xhci->hci_version);
4808 }
4809 slot_ctx->dev_state = 0;
4810 spin_unlock_irqrestore(&xhci->lock, flags);
4811
4812 xhci_dbg(xhci, "Set up %s for hub device.\n",
4813 (xhci->hci_version > 0x95) ?
4814 "configure endpoint" : "evaluate context");
4815
4816 /* Issue and wait for the configure endpoint or
4817 * evaluate context command.
4818 */
4819 if (xhci->hci_version > 0x95)
4820 ret = xhci_configure_endpoint(xhci, hdev, config_cmd,
4821 false, false);
4822 else
4823 ret = xhci_configure_endpoint(xhci, hdev, config_cmd,
4824 true, false);
4825
4826 xhci_free_command(xhci, config_cmd);
4827 return ret;
4828}
4829
4830static int xhci_get_frame(struct usb_hcd *hcd)
4831{
4832 struct xhci_hcd *xhci = hcd_to_xhci(hcd);
4833 /* EHCI mods by the periodic size. Why? */
4834 return readl(&xhci->run_regs->microframe_index) >> 3;
4835}
4836
4837int xhci_gen_setup(struct usb_hcd *hcd, xhci_get_quirks_t get_quirks)
4838{
4839 struct xhci_hcd *xhci;
4840 /*
4841 * TODO: Check with DWC3 clients for sysdev according to
4842 * quirks
4843 */
4844 struct device *dev = hcd->self.sysdev;
4845 unsigned int minor_rev;
4846 int retval;
4847
4848 /* Accept arbitrarily long scatter-gather lists */
4849 hcd->self.sg_tablesize = ~0;
4850
4851 /* support to build packet from discontinuous buffers */
4852 hcd->self.no_sg_constraint = 1;
4853
4854 /* XHCI controllers don't stop the ep queue on short packets :| */
4855 hcd->self.no_stop_on_short = 1;
4856
4857 xhci = hcd_to_xhci(hcd);
4858
4859 if (usb_hcd_is_primary_hcd(hcd)) {
4860 xhci->main_hcd = hcd;
4861 /* Mark the first roothub as being USB 2.0.
4862 * The xHCI driver will register the USB 3.0 roothub.
4863 */
4864 hcd->speed = HCD_USB2;
4865 hcd->self.root_hub->speed = USB_SPEED_HIGH;
4866 /*
4867 * USB 2.0 roothub under xHCI has an integrated TT,
4868 * (rate matching hub) as opposed to having an OHCI/UHCI
4869 * companion controller.
4870 */
4871 hcd->has_tt = 1;
4872 } else {
4873 /*
4874 * Some 3.1 hosts return sbrn 0x30, use xhci supported protocol
4875 * minor revision instead of sbrn
4876 */
4877 minor_rev = xhci->usb3_rhub.min_rev;
4878 if (minor_rev) {
4879 hcd->speed = HCD_USB31;
4880 hcd->self.root_hub->speed = USB_SPEED_SUPER_PLUS;
4881 }
4882 xhci_info(xhci, "Host supports USB 3.%x %s SuperSpeed\n",
4883 minor_rev,
4884 minor_rev ? "Enhanced" : "");
4885
4886 /* xHCI private pointer was set in xhci_pci_probe for the second
4887 * registered roothub.
4888 */
4889 return 0;
4890 }
4891
4892 mutex_init(&xhci->mutex);
4893 xhci->cap_regs = hcd->regs;
4894 xhci->op_regs = hcd->regs +
4895 HC_LENGTH(readl(&xhci->cap_regs->hc_capbase));
4896 xhci->run_regs = hcd->regs +
4897 (readl(&xhci->cap_regs->run_regs_off) & RTSOFF_MASK);
4898 /* Cache read-only capability registers */
4899 xhci->hcs_params1 = readl(&xhci->cap_regs->hcs_params1);
4900 xhci->hcs_params2 = readl(&xhci->cap_regs->hcs_params2);
4901 xhci->hcs_params3 = readl(&xhci->cap_regs->hcs_params3);
4902 xhci->hcc_params = readl(&xhci->cap_regs->hc_capbase);
4903 xhci->hci_version = HC_VERSION(xhci->hcc_params);
4904 xhci->hcc_params = readl(&xhci->cap_regs->hcc_params);
4905 if (xhci->hci_version > 0x100)
4906 xhci->hcc_params2 = readl(&xhci->cap_regs->hcc_params2);
4907
4908 xhci->quirks |= quirks;
4909
4910 get_quirks(dev, xhci);
4911
4912 /* In xhci controllers which follow xhci 1.0 spec gives a spurious
4913 * success event after a short transfer. This quirk will ignore such
4914 * spurious event.
4915 */
4916 if (xhci->hci_version > 0x96)
4917 xhci->quirks |= XHCI_SPURIOUS_SUCCESS;
4918
4919 /* Make sure the HC is halted. */
4920 retval = xhci_halt(xhci);
4921 if (retval)
4922 return retval;
4923
4924 xhci_dbg(xhci, "Resetting HCD\n");
4925 /* Reset the internal HC memory state and registers. */
4926 retval = xhci_reset(xhci);
4927 if (retval)
4928 return retval;
4929 xhci_dbg(xhci, "Reset complete\n");
4930
4931 /*
4932 * On some xHCI controllers (e.g. R-Car SoCs), the AC64 bit (bit 0)
4933 * of HCCPARAMS1 is set to 1. However, the xHCs don't support 64-bit
4934 * address memory pointers actually. So, this driver clears the AC64
4935 * bit of xhci->hcc_params to call dma_set_coherent_mask(dev,
4936 * DMA_BIT_MASK(32)) in this xhci_gen_setup().
4937 */
4938 if (xhci->quirks & XHCI_NO_64BIT_SUPPORT)
4939 xhci->hcc_params &= ~BIT(0);
4940
4941 /* Set dma_mask and coherent_dma_mask to 64-bits,
4942 * if xHC supports 64-bit addressing */
4943 if (HCC_64BIT_ADDR(xhci->hcc_params) &&
4944 !dma_set_mask(dev, DMA_BIT_MASK(64))) {
4945 xhci_dbg(xhci, "Enabling 64-bit DMA addresses.\n");
4946 dma_set_coherent_mask(dev, DMA_BIT_MASK(64));
4947 } else {
4948 /*
4949 * This is to avoid error in cases where a 32-bit USB
4950 * controller is used on a 64-bit capable system.
4951 */
4952 retval = dma_set_mask(dev, DMA_BIT_MASK(32));
4953 if (retval)
4954 return retval;
4955 xhci_dbg(xhci, "Enabling 32-bit DMA addresses.\n");
4956 dma_set_coherent_mask(dev, DMA_BIT_MASK(32));
4957 }
4958
4959 xhci_dbg(xhci, "Calling HCD init\n");
4960 /* Initialize HCD and host controller data structures. */
4961 retval = xhci_init(hcd);
4962 if (retval)
4963 return retval;
4964 xhci_dbg(xhci, "Called HCD init\n");
4965
4966 xhci_info(xhci, "hcc params 0x%08x hci version 0x%x quirks 0x%08x\n",
4967 xhci->hcc_params, xhci->hci_version, xhci->quirks);
4968
4969 return 0;
4970}
4971EXPORT_SYMBOL_GPL(xhci_gen_setup);
4972
4973static const struct hc_driver xhci_hc_driver = {
4974 .description = "xhci-hcd",
4975 .product_desc = "xHCI Host Controller",
4976 .hcd_priv_size = sizeof(struct xhci_hcd),
4977
4978 /*
4979 * generic hardware linkage
4980 */
4981 .irq = xhci_irq,
4982 .flags = HCD_MEMORY | HCD_USB3 | HCD_SHARED,
4983
4984 /*
4985 * basic lifecycle operations
4986 */
4987 .reset = NULL, /* set in xhci_init_driver() */
4988 .start = xhci_run,
4989 .stop = xhci_stop,
4990 .shutdown = xhci_shutdown,
4991
4992 /*
4993 * managing i/o requests and associated device resources
4994 */
4995 .urb_enqueue = xhci_urb_enqueue,
4996 .urb_dequeue = xhci_urb_dequeue,
4997 .alloc_dev = xhci_alloc_dev,
4998 .free_dev = xhci_free_dev,
4999 .alloc_streams = xhci_alloc_streams,
5000 .free_streams = xhci_free_streams,
5001 .add_endpoint = xhci_add_endpoint,
5002 .drop_endpoint = xhci_drop_endpoint,
5003 .endpoint_reset = xhci_endpoint_reset,
5004 .check_bandwidth = xhci_check_bandwidth,
5005 .reset_bandwidth = xhci_reset_bandwidth,
5006 .address_device = xhci_address_device,
5007 .enable_device = xhci_enable_device,
5008 .update_hub_device = xhci_update_hub_device,
5009 .reset_device = xhci_discover_or_reset_device,
5010
5011 /*
5012 * scheduling support
5013 */
5014 .get_frame_number = xhci_get_frame,
5015
5016 /*
5017 * root hub support
5018 */
5019 .hub_control = xhci_hub_control,
5020 .hub_status_data = xhci_hub_status_data,
5021 .bus_suspend = xhci_bus_suspend,
5022 .bus_resume = xhci_bus_resume,
5023
5024 /*
5025 * call back when device connected and addressed
5026 */
5027 .update_device = xhci_update_device,
5028 .set_usb2_hw_lpm = xhci_set_usb2_hardware_lpm,
5029 .enable_usb3_lpm_timeout = xhci_enable_usb3_lpm_timeout,
5030 .disable_usb3_lpm_timeout = xhci_disable_usb3_lpm_timeout,
5031 .find_raw_port_number = xhci_find_raw_port_number,
5032};
5033
5034void xhci_init_driver(struct hc_driver *drv,
5035 const struct xhci_driver_overrides *over)
5036{
5037 BUG_ON(!over);
5038
5039 /* Copy the generic table to drv then apply the overrides */
5040 *drv = xhci_hc_driver;
5041
5042 if (over) {
5043 drv->hcd_priv_size += over->extra_priv_size;
5044 if (over->reset)
5045 drv->reset = over->reset;
5046 if (over->start)
5047 drv->start = over->start;
5048 }
5049}
5050EXPORT_SYMBOL_GPL(xhci_init_driver);
5051
5052MODULE_DESCRIPTION(DRIVER_DESC);
5053MODULE_AUTHOR(DRIVER_AUTHOR);
5054MODULE_LICENSE("GPL");
5055
5056static int __init xhci_hcd_init(void)
5057{
5058 /*
5059 * Check the compiler generated sizes of structures that must be laid
5060 * out in specific ways for hardware access.
5061 */
5062 BUILD_BUG_ON(sizeof(struct xhci_doorbell_array) != 256*32/8);
5063 BUILD_BUG_ON(sizeof(struct xhci_slot_ctx) != 8*32/8);
5064 BUILD_BUG_ON(sizeof(struct xhci_ep_ctx) != 8*32/8);
5065 /* xhci_device_control has eight fields, and also
5066 * embeds one xhci_slot_ctx and 31 xhci_ep_ctx
5067 */
5068 BUILD_BUG_ON(sizeof(struct xhci_stream_ctx) != 4*32/8);
5069 BUILD_BUG_ON(sizeof(union xhci_trb) != 4*32/8);
5070 BUILD_BUG_ON(sizeof(struct xhci_erst_entry) != 4*32/8);
5071 BUILD_BUG_ON(sizeof(struct xhci_cap_regs) != 8*32/8);
5072 BUILD_BUG_ON(sizeof(struct xhci_intr_reg) != 8*32/8);
5073 /* xhci_run_regs has eight fields and embeds 128 xhci_intr_regs */
5074 BUILD_BUG_ON(sizeof(struct xhci_run_regs) != (8+8*128)*32/8);
5075
5076 if (usb_disabled())
5077 return -ENODEV;
5078
5079 xhci_debugfs_create_root();
5080
5081 return 0;
5082}
5083
5084/*
5085 * If an init function is provided, an exit function must also be provided
5086 * to allow module unload.
5087 */
5088static void __exit xhci_hcd_fini(void)
5089{
5090 xhci_debugfs_remove_root();
5091}
5092
5093module_init(xhci_hcd_init);
5094module_exit(xhci_hcd_fini);
1/*
2 * xHCI host controller driver
3 *
4 * Copyright (C) 2008 Intel Corp.
5 *
6 * Author: Sarah Sharp
7 * Some code borrowed from the Linux EHCI driver.
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License version 2 as
11 * published by the Free Software Foundation.
12 *
13 * This program is distributed in the hope that it will be useful, but
14 * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
15 * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
16 * for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software Foundation,
20 * Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
21 */
22
23#include <linux/pci.h>
24#include <linux/irq.h>
25#include <linux/log2.h>
26#include <linux/module.h>
27#include <linux/moduleparam.h>
28#include <linux/slab.h>
29#include <linux/dmi.h>
30#include <linux/dma-mapping.h>
31
32#include "xhci.h"
33#include "xhci-trace.h"
34#include "xhci-mtk.h"
35
36#define DRIVER_AUTHOR "Sarah Sharp"
37#define DRIVER_DESC "'eXtensible' Host Controller (xHC) Driver"
38
39#define PORT_WAKE_BITS (PORT_WKOC_E | PORT_WKDISC_E | PORT_WKCONN_E)
40
41/* Some 0.95 hardware can't handle the chain bit on a Link TRB being cleared */
42static int link_quirk;
43module_param(link_quirk, int, S_IRUGO | S_IWUSR);
44MODULE_PARM_DESC(link_quirk, "Don't clear the chain bit on a link TRB");
45
46static unsigned int quirks;
47module_param(quirks, uint, S_IRUGO);
48MODULE_PARM_DESC(quirks, "Bit flags for quirks to be enabled as default");
49
50/* TODO: copied from ehci-hcd.c - can this be refactored? */
51/*
52 * xhci_handshake - spin reading hc until handshake completes or fails
53 * @ptr: address of hc register to be read
54 * @mask: bits to look at in result of read
55 * @done: value of those bits when handshake succeeds
56 * @usec: timeout in microseconds
57 *
58 * Returns negative errno, or zero on success
59 *
60 * Success happens when the "mask" bits have the specified value (hardware
61 * handshake done). There are two failure modes: "usec" have passed (major
62 * hardware flakeout), or the register reads as all-ones (hardware removed).
63 */
64int xhci_handshake(void __iomem *ptr, u32 mask, u32 done, int usec)
65{
66 u32 result;
67
68 do {
69 result = readl(ptr);
70 if (result == ~(u32)0) /* card removed */
71 return -ENODEV;
72 result &= mask;
73 if (result == done)
74 return 0;
75 udelay(1);
76 usec--;
77 } while (usec > 0);
78 return -ETIMEDOUT;
79}
80
81/*
82 * Disable interrupts and begin the xHCI halting process.
83 */
84void xhci_quiesce(struct xhci_hcd *xhci)
85{
86 u32 halted;
87 u32 cmd;
88 u32 mask;
89
90 mask = ~(XHCI_IRQS);
91 halted = readl(&xhci->op_regs->status) & STS_HALT;
92 if (!halted)
93 mask &= ~CMD_RUN;
94
95 cmd = readl(&xhci->op_regs->command);
96 cmd &= mask;
97 writel(cmd, &xhci->op_regs->command);
98}
99
100/*
101 * Force HC into halt state.
102 *
103 * Disable any IRQs and clear the run/stop bit.
104 * HC will complete any current and actively pipelined transactions, and
105 * should halt within 16 ms of the run/stop bit being cleared.
106 * Read HC Halted bit in the status register to see when the HC is finished.
107 */
108int xhci_halt(struct xhci_hcd *xhci)
109{
110 int ret;
111 xhci_dbg_trace(xhci, trace_xhci_dbg_init, "// Halt the HC");
112 xhci_quiesce(xhci);
113
114 ret = xhci_handshake(&xhci->op_regs->status,
115 STS_HALT, STS_HALT, XHCI_MAX_HALT_USEC);
116 if (!ret) {
117 xhci->xhc_state |= XHCI_STATE_HALTED;
118 xhci->cmd_ring_state = CMD_RING_STATE_STOPPED;
119 } else
120 xhci_warn(xhci, "Host not halted after %u microseconds.\n",
121 XHCI_MAX_HALT_USEC);
122 return ret;
123}
124
125/*
126 * Set the run bit and wait for the host to be running.
127 */
128static int xhci_start(struct xhci_hcd *xhci)
129{
130 u32 temp;
131 int ret;
132
133 temp = readl(&xhci->op_regs->command);
134 temp |= (CMD_RUN);
135 xhci_dbg_trace(xhci, trace_xhci_dbg_init, "// Turn on HC, cmd = 0x%x.",
136 temp);
137 writel(temp, &xhci->op_regs->command);
138
139 /*
140 * Wait for the HCHalted Status bit to be 0 to indicate the host is
141 * running.
142 */
143 ret = xhci_handshake(&xhci->op_regs->status,
144 STS_HALT, 0, XHCI_MAX_HALT_USEC);
145 if (ret == -ETIMEDOUT)
146 xhci_err(xhci, "Host took too long to start, "
147 "waited %u microseconds.\n",
148 XHCI_MAX_HALT_USEC);
149 if (!ret)
150 /* clear state flags. Including dying, halted or removing */
151 xhci->xhc_state = 0;
152
153 return ret;
154}
155
156/*
157 * Reset a halted HC.
158 *
159 * This resets pipelines, timers, counters, state machines, etc.
160 * Transactions will be terminated immediately, and operational registers
161 * will be set to their defaults.
162 */
163int xhci_reset(struct xhci_hcd *xhci)
164{
165 u32 command;
166 u32 state;
167 int ret, i;
168
169 state = readl(&xhci->op_regs->status);
170 if ((state & STS_HALT) == 0) {
171 xhci_warn(xhci, "Host controller not halted, aborting reset.\n");
172 return 0;
173 }
174
175 xhci_dbg_trace(xhci, trace_xhci_dbg_init, "// Reset the HC");
176 command = readl(&xhci->op_regs->command);
177 command |= CMD_RESET;
178 writel(command, &xhci->op_regs->command);
179
180 /* Existing Intel xHCI controllers require a delay of 1 mS,
181 * after setting the CMD_RESET bit, and before accessing any
182 * HC registers. This allows the HC to complete the
183 * reset operation and be ready for HC register access.
184 * Without this delay, the subsequent HC register access,
185 * may result in a system hang very rarely.
186 */
187 if (xhci->quirks & XHCI_INTEL_HOST)
188 udelay(1000);
189
190 ret = xhci_handshake(&xhci->op_regs->command,
191 CMD_RESET, 0, 10 * 1000 * 1000);
192 if (ret)
193 return ret;
194
195 xhci_dbg_trace(xhci, trace_xhci_dbg_init,
196 "Wait for controller to be ready for doorbell rings");
197 /*
198 * xHCI cannot write to any doorbells or operational registers other
199 * than status until the "Controller Not Ready" flag is cleared.
200 */
201 ret = xhci_handshake(&xhci->op_regs->status,
202 STS_CNR, 0, 10 * 1000 * 1000);
203
204 for (i = 0; i < 2; ++i) {
205 xhci->bus_state[i].port_c_suspend = 0;
206 xhci->bus_state[i].suspended_ports = 0;
207 xhci->bus_state[i].resuming_ports = 0;
208 }
209
210 return ret;
211}
212
213#ifdef CONFIG_PCI
214static int xhci_free_msi(struct xhci_hcd *xhci)
215{
216 int i;
217
218 if (!xhci->msix_entries)
219 return -EINVAL;
220
221 for (i = 0; i < xhci->msix_count; i++)
222 if (xhci->msix_entries[i].vector)
223 free_irq(xhci->msix_entries[i].vector,
224 xhci_to_hcd(xhci));
225 return 0;
226}
227
228/*
229 * Set up MSI
230 */
231static int xhci_setup_msi(struct xhci_hcd *xhci)
232{
233 int ret;
234 struct pci_dev *pdev = to_pci_dev(xhci_to_hcd(xhci)->self.controller);
235
236 ret = pci_enable_msi(pdev);
237 if (ret) {
238 xhci_dbg_trace(xhci, trace_xhci_dbg_init,
239 "failed to allocate MSI entry");
240 return ret;
241 }
242
243 ret = request_irq(pdev->irq, xhci_msi_irq,
244 0, "xhci_hcd", xhci_to_hcd(xhci));
245 if (ret) {
246 xhci_dbg_trace(xhci, trace_xhci_dbg_init,
247 "disable MSI interrupt");
248 pci_disable_msi(pdev);
249 }
250
251 return ret;
252}
253
254/*
255 * Free IRQs
256 * free all IRQs request
257 */
258static void xhci_free_irq(struct xhci_hcd *xhci)
259{
260 struct pci_dev *pdev = to_pci_dev(xhci_to_hcd(xhci)->self.controller);
261 int ret;
262
263 /* return if using legacy interrupt */
264 if (xhci_to_hcd(xhci)->irq > 0)
265 return;
266
267 ret = xhci_free_msi(xhci);
268 if (!ret)
269 return;
270 if (pdev->irq > 0)
271 free_irq(pdev->irq, xhci_to_hcd(xhci));
272
273 return;
274}
275
276/*
277 * Set up MSI-X
278 */
279static int xhci_setup_msix(struct xhci_hcd *xhci)
280{
281 int i, ret = 0;
282 struct usb_hcd *hcd = xhci_to_hcd(xhci);
283 struct pci_dev *pdev = to_pci_dev(hcd->self.controller);
284
285 /*
286 * calculate number of msi-x vectors supported.
287 * - HCS_MAX_INTRS: the max number of interrupts the host can handle,
288 * with max number of interrupters based on the xhci HCSPARAMS1.
289 * - num_online_cpus: maximum msi-x vectors per CPUs core.
290 * Add additional 1 vector to ensure always available interrupt.
291 */
292 xhci->msix_count = min(num_online_cpus() + 1,
293 HCS_MAX_INTRS(xhci->hcs_params1));
294
295 xhci->msix_entries =
296 kmalloc((sizeof(struct msix_entry))*xhci->msix_count,
297 GFP_KERNEL);
298 if (!xhci->msix_entries) {
299 xhci_err(xhci, "Failed to allocate MSI-X entries\n");
300 return -ENOMEM;
301 }
302
303 for (i = 0; i < xhci->msix_count; i++) {
304 xhci->msix_entries[i].entry = i;
305 xhci->msix_entries[i].vector = 0;
306 }
307
308 ret = pci_enable_msix_exact(pdev, xhci->msix_entries, xhci->msix_count);
309 if (ret) {
310 xhci_dbg_trace(xhci, trace_xhci_dbg_init,
311 "Failed to enable MSI-X");
312 goto free_entries;
313 }
314
315 for (i = 0; i < xhci->msix_count; i++) {
316 ret = request_irq(xhci->msix_entries[i].vector,
317 xhci_msi_irq,
318 0, "xhci_hcd", xhci_to_hcd(xhci));
319 if (ret)
320 goto disable_msix;
321 }
322
323 hcd->msix_enabled = 1;
324 return ret;
325
326disable_msix:
327 xhci_dbg_trace(xhci, trace_xhci_dbg_init, "disable MSI-X interrupt");
328 xhci_free_irq(xhci);
329 pci_disable_msix(pdev);
330free_entries:
331 kfree(xhci->msix_entries);
332 xhci->msix_entries = NULL;
333 return ret;
334}
335
336/* Free any IRQs and disable MSI-X */
337static void xhci_cleanup_msix(struct xhci_hcd *xhci)
338{
339 struct usb_hcd *hcd = xhci_to_hcd(xhci);
340 struct pci_dev *pdev = to_pci_dev(hcd->self.controller);
341
342 if (xhci->quirks & XHCI_PLAT)
343 return;
344
345 xhci_free_irq(xhci);
346
347 if (xhci->msix_entries) {
348 pci_disable_msix(pdev);
349 kfree(xhci->msix_entries);
350 xhci->msix_entries = NULL;
351 } else {
352 pci_disable_msi(pdev);
353 }
354
355 hcd->msix_enabled = 0;
356 return;
357}
358
359static void __maybe_unused xhci_msix_sync_irqs(struct xhci_hcd *xhci)
360{
361 int i;
362
363 if (xhci->msix_entries) {
364 for (i = 0; i < xhci->msix_count; i++)
365 synchronize_irq(xhci->msix_entries[i].vector);
366 }
367}
368
369static int xhci_try_enable_msi(struct usb_hcd *hcd)
370{
371 struct xhci_hcd *xhci = hcd_to_xhci(hcd);
372 struct pci_dev *pdev;
373 int ret;
374
375 /* The xhci platform device has set up IRQs through usb_add_hcd. */
376 if (xhci->quirks & XHCI_PLAT)
377 return 0;
378
379 pdev = to_pci_dev(xhci_to_hcd(xhci)->self.controller);
380 /*
381 * Some Fresco Logic host controllers advertise MSI, but fail to
382 * generate interrupts. Don't even try to enable MSI.
383 */
384 if (xhci->quirks & XHCI_BROKEN_MSI)
385 goto legacy_irq;
386
387 /* unregister the legacy interrupt */
388 if (hcd->irq)
389 free_irq(hcd->irq, hcd);
390 hcd->irq = 0;
391
392 ret = xhci_setup_msix(xhci);
393 if (ret)
394 /* fall back to msi*/
395 ret = xhci_setup_msi(xhci);
396
397 if (!ret)
398 /* hcd->irq is 0, we have MSI */
399 return 0;
400
401 if (!pdev->irq) {
402 xhci_err(xhci, "No msi-x/msi found and no IRQ in BIOS\n");
403 return -EINVAL;
404 }
405
406 legacy_irq:
407 if (!strlen(hcd->irq_descr))
408 snprintf(hcd->irq_descr, sizeof(hcd->irq_descr), "%s:usb%d",
409 hcd->driver->description, hcd->self.busnum);
410
411 /* fall back to legacy interrupt*/
412 ret = request_irq(pdev->irq, &usb_hcd_irq, IRQF_SHARED,
413 hcd->irq_descr, hcd);
414 if (ret) {
415 xhci_err(xhci, "request interrupt %d failed\n",
416 pdev->irq);
417 return ret;
418 }
419 hcd->irq = pdev->irq;
420 return 0;
421}
422
423#else
424
425static inline int xhci_try_enable_msi(struct usb_hcd *hcd)
426{
427 return 0;
428}
429
430static inline void xhci_cleanup_msix(struct xhci_hcd *xhci)
431{
432}
433
434static inline void xhci_msix_sync_irqs(struct xhci_hcd *xhci)
435{
436}
437
438#endif
439
440static void compliance_mode_recovery(unsigned long arg)
441{
442 struct xhci_hcd *xhci;
443 struct usb_hcd *hcd;
444 u32 temp;
445 int i;
446
447 xhci = (struct xhci_hcd *)arg;
448
449 for (i = 0; i < xhci->num_usb3_ports; i++) {
450 temp = readl(xhci->usb3_ports[i]);
451 if ((temp & PORT_PLS_MASK) == USB_SS_PORT_LS_COMP_MOD) {
452 /*
453 * Compliance Mode Detected. Letting USB Core
454 * handle the Warm Reset
455 */
456 xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
457 "Compliance mode detected->port %d",
458 i + 1);
459 xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
460 "Attempting compliance mode recovery");
461 hcd = xhci->shared_hcd;
462
463 if (hcd->state == HC_STATE_SUSPENDED)
464 usb_hcd_resume_root_hub(hcd);
465
466 usb_hcd_poll_rh_status(hcd);
467 }
468 }
469
470 if (xhci->port_status_u0 != ((1 << xhci->num_usb3_ports)-1))
471 mod_timer(&xhci->comp_mode_recovery_timer,
472 jiffies + msecs_to_jiffies(COMP_MODE_RCVRY_MSECS));
473}
474
475/*
476 * Quirk to work around issue generated by the SN65LVPE502CP USB3.0 re-driver
477 * that causes ports behind that hardware to enter compliance mode sometimes.
478 * The quirk creates a timer that polls every 2 seconds the link state of
479 * each host controller's port and recovers it by issuing a Warm reset
480 * if Compliance mode is detected, otherwise the port will become "dead" (no
481 * device connections or disconnections will be detected anymore). Becasue no
482 * status event is generated when entering compliance mode (per xhci spec),
483 * this quirk is needed on systems that have the failing hardware installed.
484 */
485static void compliance_mode_recovery_timer_init(struct xhci_hcd *xhci)
486{
487 xhci->port_status_u0 = 0;
488 setup_timer(&xhci->comp_mode_recovery_timer,
489 compliance_mode_recovery, (unsigned long)xhci);
490 xhci->comp_mode_recovery_timer.expires = jiffies +
491 msecs_to_jiffies(COMP_MODE_RCVRY_MSECS);
492
493 set_timer_slack(&xhci->comp_mode_recovery_timer,
494 msecs_to_jiffies(COMP_MODE_RCVRY_MSECS));
495 add_timer(&xhci->comp_mode_recovery_timer);
496 xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
497 "Compliance mode recovery timer initialized");
498}
499
500/*
501 * This function identifies the systems that have installed the SN65LVPE502CP
502 * USB3.0 re-driver and that need the Compliance Mode Quirk.
503 * Systems:
504 * Vendor: Hewlett-Packard -> System Models: Z420, Z620 and Z820
505 */
506static bool xhci_compliance_mode_recovery_timer_quirk_check(void)
507{
508 const char *dmi_product_name, *dmi_sys_vendor;
509
510 dmi_product_name = dmi_get_system_info(DMI_PRODUCT_NAME);
511 dmi_sys_vendor = dmi_get_system_info(DMI_SYS_VENDOR);
512 if (!dmi_product_name || !dmi_sys_vendor)
513 return false;
514
515 if (!(strstr(dmi_sys_vendor, "Hewlett-Packard")))
516 return false;
517
518 if (strstr(dmi_product_name, "Z420") ||
519 strstr(dmi_product_name, "Z620") ||
520 strstr(dmi_product_name, "Z820") ||
521 strstr(dmi_product_name, "Z1 Workstation"))
522 return true;
523
524 return false;
525}
526
527static int xhci_all_ports_seen_u0(struct xhci_hcd *xhci)
528{
529 return (xhci->port_status_u0 == ((1 << xhci->num_usb3_ports)-1));
530}
531
532
533/*
534 * Initialize memory for HCD and xHC (one-time init).
535 *
536 * Program the PAGESIZE register, initialize the device context array, create
537 * device contexts (?), set up a command ring segment (or two?), create event
538 * ring (one for now).
539 */
540int xhci_init(struct usb_hcd *hcd)
541{
542 struct xhci_hcd *xhci = hcd_to_xhci(hcd);
543 int retval = 0;
544
545 xhci_dbg_trace(xhci, trace_xhci_dbg_init, "xhci_init");
546 spin_lock_init(&xhci->lock);
547 if (xhci->hci_version == 0x95 && link_quirk) {
548 xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
549 "QUIRK: Not clearing Link TRB chain bits.");
550 xhci->quirks |= XHCI_LINK_TRB_QUIRK;
551 } else {
552 xhci_dbg_trace(xhci, trace_xhci_dbg_init,
553 "xHCI doesn't need link TRB QUIRK");
554 }
555 retval = xhci_mem_init(xhci, GFP_KERNEL);
556 xhci_dbg_trace(xhci, trace_xhci_dbg_init, "Finished xhci_init");
557
558 /* Initializing Compliance Mode Recovery Data If Needed */
559 if (xhci_compliance_mode_recovery_timer_quirk_check()) {
560 xhci->quirks |= XHCI_COMP_MODE_QUIRK;
561 compliance_mode_recovery_timer_init(xhci);
562 }
563
564 return retval;
565}
566
567/*-------------------------------------------------------------------------*/
568
569
570static int xhci_run_finished(struct xhci_hcd *xhci)
571{
572 if (xhci_start(xhci)) {
573 xhci_halt(xhci);
574 return -ENODEV;
575 }
576 xhci->shared_hcd->state = HC_STATE_RUNNING;
577 xhci->cmd_ring_state = CMD_RING_STATE_RUNNING;
578
579 if (xhci->quirks & XHCI_NEC_HOST)
580 xhci_ring_cmd_db(xhci);
581
582 xhci_dbg_trace(xhci, trace_xhci_dbg_init,
583 "Finished xhci_run for USB3 roothub");
584 return 0;
585}
586
587/*
588 * Start the HC after it was halted.
589 *
590 * This function is called by the USB core when the HC driver is added.
591 * Its opposite is xhci_stop().
592 *
593 * xhci_init() must be called once before this function can be called.
594 * Reset the HC, enable device slot contexts, program DCBAAP, and
595 * set command ring pointer and event ring pointer.
596 *
597 * Setup MSI-X vectors and enable interrupts.
598 */
599int xhci_run(struct usb_hcd *hcd)
600{
601 u32 temp;
602 u64 temp_64;
603 int ret;
604 struct xhci_hcd *xhci = hcd_to_xhci(hcd);
605
606 /* Start the xHCI host controller running only after the USB 2.0 roothub
607 * is setup.
608 */
609
610 hcd->uses_new_polling = 1;
611 if (!usb_hcd_is_primary_hcd(hcd))
612 return xhci_run_finished(xhci);
613
614 xhci_dbg_trace(xhci, trace_xhci_dbg_init, "xhci_run");
615
616 ret = xhci_try_enable_msi(hcd);
617 if (ret)
618 return ret;
619
620 xhci_dbg(xhci, "Command ring memory map follows:\n");
621 xhci_debug_ring(xhci, xhci->cmd_ring);
622 xhci_dbg_ring_ptrs(xhci, xhci->cmd_ring);
623 xhci_dbg_cmd_ptrs(xhci);
624
625 xhci_dbg(xhci, "ERST memory map follows:\n");
626 xhci_dbg_erst(xhci, &xhci->erst);
627 xhci_dbg(xhci, "Event ring:\n");
628 xhci_debug_ring(xhci, xhci->event_ring);
629 xhci_dbg_ring_ptrs(xhci, xhci->event_ring);
630 temp_64 = xhci_read_64(xhci, &xhci->ir_set->erst_dequeue);
631 temp_64 &= ~ERST_PTR_MASK;
632 xhci_dbg_trace(xhci, trace_xhci_dbg_init,
633 "ERST deq = 64'h%0lx", (long unsigned int) temp_64);
634
635 xhci_dbg_trace(xhci, trace_xhci_dbg_init,
636 "// Set the interrupt modulation register");
637 temp = readl(&xhci->ir_set->irq_control);
638 temp &= ~ER_IRQ_INTERVAL_MASK;
639 /*
640 * the increment interval is 8 times as much as that defined
641 * in xHCI spec on MTK's controller
642 */
643 temp |= (u32) ((xhci->quirks & XHCI_MTK_HOST) ? 20 : 160);
644 writel(temp, &xhci->ir_set->irq_control);
645
646 /* Set the HCD state before we enable the irqs */
647 temp = readl(&xhci->op_regs->command);
648 temp |= (CMD_EIE);
649 xhci_dbg_trace(xhci, trace_xhci_dbg_init,
650 "// Enable interrupts, cmd = 0x%x.", temp);
651 writel(temp, &xhci->op_regs->command);
652
653 temp = readl(&xhci->ir_set->irq_pending);
654 xhci_dbg_trace(xhci, trace_xhci_dbg_init,
655 "// Enabling event ring interrupter %p by writing 0x%x to irq_pending",
656 xhci->ir_set, (unsigned int) ER_IRQ_ENABLE(temp));
657 writel(ER_IRQ_ENABLE(temp), &xhci->ir_set->irq_pending);
658 xhci_print_ir_set(xhci, 0);
659
660 if (xhci->quirks & XHCI_NEC_HOST) {
661 struct xhci_command *command;
662 command = xhci_alloc_command(xhci, false, false, GFP_KERNEL);
663 if (!command)
664 return -ENOMEM;
665 xhci_queue_vendor_command(xhci, command, 0, 0, 0,
666 TRB_TYPE(TRB_NEC_GET_FW));
667 }
668 xhci_dbg_trace(xhci, trace_xhci_dbg_init,
669 "Finished xhci_run for USB2 roothub");
670 return 0;
671}
672EXPORT_SYMBOL_GPL(xhci_run);
673
674/*
675 * Stop xHCI driver.
676 *
677 * This function is called by the USB core when the HC driver is removed.
678 * Its opposite is xhci_run().
679 *
680 * Disable device contexts, disable IRQs, and quiesce the HC.
681 * Reset the HC, finish any completed transactions, and cleanup memory.
682 */
683void xhci_stop(struct usb_hcd *hcd)
684{
685 u32 temp;
686 struct xhci_hcd *xhci = hcd_to_xhci(hcd);
687
688 if (xhci->xhc_state & XHCI_STATE_HALTED)
689 return;
690
691 mutex_lock(&xhci->mutex);
692 spin_lock_irq(&xhci->lock);
693 xhci->xhc_state |= XHCI_STATE_HALTED;
694 xhci->cmd_ring_state = CMD_RING_STATE_STOPPED;
695
696 /* Make sure the xHC is halted for a USB3 roothub
697 * (xhci_stop() could be called as part of failed init).
698 */
699 xhci_halt(xhci);
700 xhci_reset(xhci);
701 spin_unlock_irq(&xhci->lock);
702
703 xhci_cleanup_msix(xhci);
704
705 /* Deleting Compliance Mode Recovery Timer */
706 if ((xhci->quirks & XHCI_COMP_MODE_QUIRK) &&
707 (!(xhci_all_ports_seen_u0(xhci)))) {
708 del_timer_sync(&xhci->comp_mode_recovery_timer);
709 xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
710 "%s: compliance mode recovery timer deleted",
711 __func__);
712 }
713
714 if (xhci->quirks & XHCI_AMD_PLL_FIX)
715 usb_amd_dev_put();
716
717 xhci_dbg_trace(xhci, trace_xhci_dbg_init,
718 "// Disabling event ring interrupts");
719 temp = readl(&xhci->op_regs->status);
720 writel(temp & ~STS_EINT, &xhci->op_regs->status);
721 temp = readl(&xhci->ir_set->irq_pending);
722 writel(ER_IRQ_DISABLE(temp), &xhci->ir_set->irq_pending);
723 xhci_print_ir_set(xhci, 0);
724
725 xhci_dbg_trace(xhci, trace_xhci_dbg_init, "cleaning up memory");
726 xhci_mem_cleanup(xhci);
727 xhci_dbg_trace(xhci, trace_xhci_dbg_init,
728 "xhci_stop completed - status = %x",
729 readl(&xhci->op_regs->status));
730 mutex_unlock(&xhci->mutex);
731}
732
733/*
734 * Shutdown HC (not bus-specific)
735 *
736 * This is called when the machine is rebooting or halting. We assume that the
737 * machine will be powered off, and the HC's internal state will be reset.
738 * Don't bother to free memory.
739 *
740 * This will only ever be called with the main usb_hcd (the USB3 roothub).
741 */
742void xhci_shutdown(struct usb_hcd *hcd)
743{
744 struct xhci_hcd *xhci = hcd_to_xhci(hcd);
745
746 if (xhci->quirks & XHCI_SPURIOUS_REBOOT)
747 usb_disable_xhci_ports(to_pci_dev(hcd->self.controller));
748
749 spin_lock_irq(&xhci->lock);
750 xhci_halt(xhci);
751 /* Workaround for spurious wakeups at shutdown with HSW */
752 if (xhci->quirks & XHCI_SPURIOUS_WAKEUP)
753 xhci_reset(xhci);
754 spin_unlock_irq(&xhci->lock);
755
756 xhci_cleanup_msix(xhci);
757
758 xhci_dbg_trace(xhci, trace_xhci_dbg_init,
759 "xhci_shutdown completed - status = %x",
760 readl(&xhci->op_regs->status));
761
762 /* Yet another workaround for spurious wakeups at shutdown with HSW */
763 if (xhci->quirks & XHCI_SPURIOUS_WAKEUP)
764 pci_set_power_state(to_pci_dev(hcd->self.controller), PCI_D3hot);
765}
766
767#ifdef CONFIG_PM
768static void xhci_save_registers(struct xhci_hcd *xhci)
769{
770 xhci->s3.command = readl(&xhci->op_regs->command);
771 xhci->s3.dev_nt = readl(&xhci->op_regs->dev_notification);
772 xhci->s3.dcbaa_ptr = xhci_read_64(xhci, &xhci->op_regs->dcbaa_ptr);
773 xhci->s3.config_reg = readl(&xhci->op_regs->config_reg);
774 xhci->s3.erst_size = readl(&xhci->ir_set->erst_size);
775 xhci->s3.erst_base = xhci_read_64(xhci, &xhci->ir_set->erst_base);
776 xhci->s3.erst_dequeue = xhci_read_64(xhci, &xhci->ir_set->erst_dequeue);
777 xhci->s3.irq_pending = readl(&xhci->ir_set->irq_pending);
778 xhci->s3.irq_control = readl(&xhci->ir_set->irq_control);
779}
780
781static void xhci_restore_registers(struct xhci_hcd *xhci)
782{
783 writel(xhci->s3.command, &xhci->op_regs->command);
784 writel(xhci->s3.dev_nt, &xhci->op_regs->dev_notification);
785 xhci_write_64(xhci, xhci->s3.dcbaa_ptr, &xhci->op_regs->dcbaa_ptr);
786 writel(xhci->s3.config_reg, &xhci->op_regs->config_reg);
787 writel(xhci->s3.erst_size, &xhci->ir_set->erst_size);
788 xhci_write_64(xhci, xhci->s3.erst_base, &xhci->ir_set->erst_base);
789 xhci_write_64(xhci, xhci->s3.erst_dequeue, &xhci->ir_set->erst_dequeue);
790 writel(xhci->s3.irq_pending, &xhci->ir_set->irq_pending);
791 writel(xhci->s3.irq_control, &xhci->ir_set->irq_control);
792}
793
794static void xhci_set_cmd_ring_deq(struct xhci_hcd *xhci)
795{
796 u64 val_64;
797
798 /* step 2: initialize command ring buffer */
799 val_64 = xhci_read_64(xhci, &xhci->op_regs->cmd_ring);
800 val_64 = (val_64 & (u64) CMD_RING_RSVD_BITS) |
801 (xhci_trb_virt_to_dma(xhci->cmd_ring->deq_seg,
802 xhci->cmd_ring->dequeue) &
803 (u64) ~CMD_RING_RSVD_BITS) |
804 xhci->cmd_ring->cycle_state;
805 xhci_dbg_trace(xhci, trace_xhci_dbg_init,
806 "// Setting command ring address to 0x%llx",
807 (long unsigned long) val_64);
808 xhci_write_64(xhci, val_64, &xhci->op_regs->cmd_ring);
809}
810
811/*
812 * The whole command ring must be cleared to zero when we suspend the host.
813 *
814 * The host doesn't save the command ring pointer in the suspend well, so we
815 * need to re-program it on resume. Unfortunately, the pointer must be 64-byte
816 * aligned, because of the reserved bits in the command ring dequeue pointer
817 * register. Therefore, we can't just set the dequeue pointer back in the
818 * middle of the ring (TRBs are 16-byte aligned).
819 */
820static void xhci_clear_command_ring(struct xhci_hcd *xhci)
821{
822 struct xhci_ring *ring;
823 struct xhci_segment *seg;
824
825 ring = xhci->cmd_ring;
826 seg = ring->deq_seg;
827 do {
828 memset(seg->trbs, 0,
829 sizeof(union xhci_trb) * (TRBS_PER_SEGMENT - 1));
830 seg->trbs[TRBS_PER_SEGMENT - 1].link.control &=
831 cpu_to_le32(~TRB_CYCLE);
832 seg = seg->next;
833 } while (seg != ring->deq_seg);
834
835 /* Reset the software enqueue and dequeue pointers */
836 ring->deq_seg = ring->first_seg;
837 ring->dequeue = ring->first_seg->trbs;
838 ring->enq_seg = ring->deq_seg;
839 ring->enqueue = ring->dequeue;
840
841 ring->num_trbs_free = ring->num_segs * (TRBS_PER_SEGMENT - 1) - 1;
842 /*
843 * Ring is now zeroed, so the HW should look for change of ownership
844 * when the cycle bit is set to 1.
845 */
846 ring->cycle_state = 1;
847
848 /*
849 * Reset the hardware dequeue pointer.
850 * Yes, this will need to be re-written after resume, but we're paranoid
851 * and want to make sure the hardware doesn't access bogus memory
852 * because, say, the BIOS or an SMI started the host without changing
853 * the command ring pointers.
854 */
855 xhci_set_cmd_ring_deq(xhci);
856}
857
858static void xhci_disable_port_wake_on_bits(struct xhci_hcd *xhci)
859{
860 int port_index;
861 __le32 __iomem **port_array;
862 unsigned long flags;
863 u32 t1, t2;
864
865 spin_lock_irqsave(&xhci->lock, flags);
866
867 /* disble usb3 ports Wake bits*/
868 port_index = xhci->num_usb3_ports;
869 port_array = xhci->usb3_ports;
870 while (port_index--) {
871 t1 = readl(port_array[port_index]);
872 t1 = xhci_port_state_to_neutral(t1);
873 t2 = t1 & ~PORT_WAKE_BITS;
874 if (t1 != t2)
875 writel(t2, port_array[port_index]);
876 }
877
878 /* disble usb2 ports Wake bits*/
879 port_index = xhci->num_usb2_ports;
880 port_array = xhci->usb2_ports;
881 while (port_index--) {
882 t1 = readl(port_array[port_index]);
883 t1 = xhci_port_state_to_neutral(t1);
884 t2 = t1 & ~PORT_WAKE_BITS;
885 if (t1 != t2)
886 writel(t2, port_array[port_index]);
887 }
888
889 spin_unlock_irqrestore(&xhci->lock, flags);
890}
891
892/*
893 * Stop HC (not bus-specific)
894 *
895 * This is called when the machine transition into S3/S4 mode.
896 *
897 */
898int xhci_suspend(struct xhci_hcd *xhci, bool do_wakeup)
899{
900 int rc = 0;
901 unsigned int delay = XHCI_MAX_HALT_USEC;
902 struct usb_hcd *hcd = xhci_to_hcd(xhci);
903 u32 command;
904
905 if (!hcd->state)
906 return 0;
907
908 if (hcd->state != HC_STATE_SUSPENDED ||
909 xhci->shared_hcd->state != HC_STATE_SUSPENDED)
910 return -EINVAL;
911
912 /* Clear root port wake on bits if wakeup not allowed. */
913 if (!do_wakeup)
914 xhci_disable_port_wake_on_bits(xhci);
915
916 /* Don't poll the roothubs on bus suspend. */
917 xhci_dbg(xhci, "%s: stopping port polling.\n", __func__);
918 clear_bit(HCD_FLAG_POLL_RH, &hcd->flags);
919 del_timer_sync(&hcd->rh_timer);
920 clear_bit(HCD_FLAG_POLL_RH, &xhci->shared_hcd->flags);
921 del_timer_sync(&xhci->shared_hcd->rh_timer);
922
923 spin_lock_irq(&xhci->lock);
924 clear_bit(HCD_FLAG_HW_ACCESSIBLE, &hcd->flags);
925 clear_bit(HCD_FLAG_HW_ACCESSIBLE, &xhci->shared_hcd->flags);
926 /* step 1: stop endpoint */
927 /* skipped assuming that port suspend has done */
928
929 /* step 2: clear Run/Stop bit */
930 command = readl(&xhci->op_regs->command);
931 command &= ~CMD_RUN;
932 writel(command, &xhci->op_regs->command);
933
934 /* Some chips from Fresco Logic need an extraordinary delay */
935 delay *= (xhci->quirks & XHCI_SLOW_SUSPEND) ? 10 : 1;
936
937 if (xhci_handshake(&xhci->op_regs->status,
938 STS_HALT, STS_HALT, delay)) {
939 xhci_warn(xhci, "WARN: xHC CMD_RUN timeout\n");
940 spin_unlock_irq(&xhci->lock);
941 return -ETIMEDOUT;
942 }
943 xhci_clear_command_ring(xhci);
944
945 /* step 3: save registers */
946 xhci_save_registers(xhci);
947
948 /* step 4: set CSS flag */
949 command = readl(&xhci->op_regs->command);
950 command |= CMD_CSS;
951 writel(command, &xhci->op_regs->command);
952 if (xhci_handshake(&xhci->op_regs->status,
953 STS_SAVE, 0, 10 * 1000)) {
954 xhci_warn(xhci, "WARN: xHC save state timeout\n");
955 spin_unlock_irq(&xhci->lock);
956 return -ETIMEDOUT;
957 }
958 spin_unlock_irq(&xhci->lock);
959
960 /*
961 * Deleting Compliance Mode Recovery Timer because the xHCI Host
962 * is about to be suspended.
963 */
964 if ((xhci->quirks & XHCI_COMP_MODE_QUIRK) &&
965 (!(xhci_all_ports_seen_u0(xhci)))) {
966 del_timer_sync(&xhci->comp_mode_recovery_timer);
967 xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
968 "%s: compliance mode recovery timer deleted",
969 __func__);
970 }
971
972 /* step 5: remove core well power */
973 /* synchronize irq when using MSI-X */
974 xhci_msix_sync_irqs(xhci);
975
976 return rc;
977}
978EXPORT_SYMBOL_GPL(xhci_suspend);
979
980/*
981 * start xHC (not bus-specific)
982 *
983 * This is called when the machine transition from S3/S4 mode.
984 *
985 */
986int xhci_resume(struct xhci_hcd *xhci, bool hibernated)
987{
988 u32 command, temp = 0, status;
989 struct usb_hcd *hcd = xhci_to_hcd(xhci);
990 struct usb_hcd *secondary_hcd;
991 int retval = 0;
992 bool comp_timer_running = false;
993
994 if (!hcd->state)
995 return 0;
996
997 /* Wait a bit if either of the roothubs need to settle from the
998 * transition into bus suspend.
999 */
1000 if (time_before(jiffies, xhci->bus_state[0].next_statechange) ||
1001 time_before(jiffies,
1002 xhci->bus_state[1].next_statechange))
1003 msleep(100);
1004
1005 set_bit(HCD_FLAG_HW_ACCESSIBLE, &hcd->flags);
1006 set_bit(HCD_FLAG_HW_ACCESSIBLE, &xhci->shared_hcd->flags);
1007
1008 spin_lock_irq(&xhci->lock);
1009 if (xhci->quirks & XHCI_RESET_ON_RESUME)
1010 hibernated = true;
1011
1012 if (!hibernated) {
1013 /* step 1: restore register */
1014 xhci_restore_registers(xhci);
1015 /* step 2: initialize command ring buffer */
1016 xhci_set_cmd_ring_deq(xhci);
1017 /* step 3: restore state and start state*/
1018 /* step 3: set CRS flag */
1019 command = readl(&xhci->op_regs->command);
1020 command |= CMD_CRS;
1021 writel(command, &xhci->op_regs->command);
1022 if (xhci_handshake(&xhci->op_regs->status,
1023 STS_RESTORE, 0, 10 * 1000)) {
1024 xhci_warn(xhci, "WARN: xHC restore state timeout\n");
1025 spin_unlock_irq(&xhci->lock);
1026 return -ETIMEDOUT;
1027 }
1028 temp = readl(&xhci->op_regs->status);
1029 }
1030
1031 /* If restore operation fails, re-initialize the HC during resume */
1032 if ((temp & STS_SRE) || hibernated) {
1033
1034 if ((xhci->quirks & XHCI_COMP_MODE_QUIRK) &&
1035 !(xhci_all_ports_seen_u0(xhci))) {
1036 del_timer_sync(&xhci->comp_mode_recovery_timer);
1037 xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
1038 "Compliance Mode Recovery Timer deleted!");
1039 }
1040
1041 /* Let the USB core know _both_ roothubs lost power. */
1042 usb_root_hub_lost_power(xhci->main_hcd->self.root_hub);
1043 usb_root_hub_lost_power(xhci->shared_hcd->self.root_hub);
1044
1045 xhci_dbg(xhci, "Stop HCD\n");
1046 xhci_halt(xhci);
1047 xhci_reset(xhci);
1048 spin_unlock_irq(&xhci->lock);
1049 xhci_cleanup_msix(xhci);
1050
1051 xhci_dbg(xhci, "// Disabling event ring interrupts\n");
1052 temp = readl(&xhci->op_regs->status);
1053 writel(temp & ~STS_EINT, &xhci->op_regs->status);
1054 temp = readl(&xhci->ir_set->irq_pending);
1055 writel(ER_IRQ_DISABLE(temp), &xhci->ir_set->irq_pending);
1056 xhci_print_ir_set(xhci, 0);
1057
1058 xhci_dbg(xhci, "cleaning up memory\n");
1059 xhci_mem_cleanup(xhci);
1060 xhci_dbg(xhci, "xhci_stop completed - status = %x\n",
1061 readl(&xhci->op_regs->status));
1062
1063 /* USB core calls the PCI reinit and start functions twice:
1064 * first with the primary HCD, and then with the secondary HCD.
1065 * If we don't do the same, the host will never be started.
1066 */
1067 if (!usb_hcd_is_primary_hcd(hcd))
1068 secondary_hcd = hcd;
1069 else
1070 secondary_hcd = xhci->shared_hcd;
1071
1072 xhci_dbg(xhci, "Initialize the xhci_hcd\n");
1073 retval = xhci_init(hcd->primary_hcd);
1074 if (retval)
1075 return retval;
1076 comp_timer_running = true;
1077
1078 xhci_dbg(xhci, "Start the primary HCD\n");
1079 retval = xhci_run(hcd->primary_hcd);
1080 if (!retval) {
1081 xhci_dbg(xhci, "Start the secondary HCD\n");
1082 retval = xhci_run(secondary_hcd);
1083 }
1084 hcd->state = HC_STATE_SUSPENDED;
1085 xhci->shared_hcd->state = HC_STATE_SUSPENDED;
1086 goto done;
1087 }
1088
1089 /* step 4: set Run/Stop bit */
1090 command = readl(&xhci->op_regs->command);
1091 command |= CMD_RUN;
1092 writel(command, &xhci->op_regs->command);
1093 xhci_handshake(&xhci->op_regs->status, STS_HALT,
1094 0, 250 * 1000);
1095
1096 /* step 5: walk topology and initialize portsc,
1097 * portpmsc and portli
1098 */
1099 /* this is done in bus_resume */
1100
1101 /* step 6: restart each of the previously
1102 * Running endpoints by ringing their doorbells
1103 */
1104
1105 spin_unlock_irq(&xhci->lock);
1106
1107 done:
1108 if (retval == 0) {
1109 /* Resume root hubs only when have pending events. */
1110 status = readl(&xhci->op_regs->status);
1111 if (status & STS_EINT) {
1112 usb_hcd_resume_root_hub(xhci->shared_hcd);
1113 usb_hcd_resume_root_hub(hcd);
1114 }
1115 }
1116
1117 /*
1118 * If system is subject to the Quirk, Compliance Mode Timer needs to
1119 * be re-initialized Always after a system resume. Ports are subject
1120 * to suffer the Compliance Mode issue again. It doesn't matter if
1121 * ports have entered previously to U0 before system's suspension.
1122 */
1123 if ((xhci->quirks & XHCI_COMP_MODE_QUIRK) && !comp_timer_running)
1124 compliance_mode_recovery_timer_init(xhci);
1125
1126 /* Re-enable port polling. */
1127 xhci_dbg(xhci, "%s: starting port polling.\n", __func__);
1128 set_bit(HCD_FLAG_POLL_RH, &xhci->shared_hcd->flags);
1129 usb_hcd_poll_rh_status(xhci->shared_hcd);
1130 set_bit(HCD_FLAG_POLL_RH, &hcd->flags);
1131 usb_hcd_poll_rh_status(hcd);
1132
1133 return retval;
1134}
1135EXPORT_SYMBOL_GPL(xhci_resume);
1136#endif /* CONFIG_PM */
1137
1138/*-------------------------------------------------------------------------*/
1139
1140/**
1141 * xhci_get_endpoint_index - Used for passing endpoint bitmasks between the core and
1142 * HCDs. Find the index for an endpoint given its descriptor. Use the return
1143 * value to right shift 1 for the bitmask.
1144 *
1145 * Index = (epnum * 2) + direction - 1,
1146 * where direction = 0 for OUT, 1 for IN.
1147 * For control endpoints, the IN index is used (OUT index is unused), so
1148 * index = (epnum * 2) + direction - 1 = (epnum * 2) + 1 - 1 = (epnum * 2)
1149 */
1150unsigned int xhci_get_endpoint_index(struct usb_endpoint_descriptor *desc)
1151{
1152 unsigned int index;
1153 if (usb_endpoint_xfer_control(desc))
1154 index = (unsigned int) (usb_endpoint_num(desc)*2);
1155 else
1156 index = (unsigned int) (usb_endpoint_num(desc)*2) +
1157 (usb_endpoint_dir_in(desc) ? 1 : 0) - 1;
1158 return index;
1159}
1160
1161/* The reverse operation to xhci_get_endpoint_index. Calculate the USB endpoint
1162 * address from the XHCI endpoint index.
1163 */
1164unsigned int xhci_get_endpoint_address(unsigned int ep_index)
1165{
1166 unsigned int number = DIV_ROUND_UP(ep_index, 2);
1167 unsigned int direction = ep_index % 2 ? USB_DIR_OUT : USB_DIR_IN;
1168 return direction | number;
1169}
1170
1171/* Find the flag for this endpoint (for use in the control context). Use the
1172 * endpoint index to create a bitmask. The slot context is bit 0, endpoint 0 is
1173 * bit 1, etc.
1174 */
1175unsigned int xhci_get_endpoint_flag(struct usb_endpoint_descriptor *desc)
1176{
1177 return 1 << (xhci_get_endpoint_index(desc) + 1);
1178}
1179
1180/* Find the flag for this endpoint (for use in the control context). Use the
1181 * endpoint index to create a bitmask. The slot context is bit 0, endpoint 0 is
1182 * bit 1, etc.
1183 */
1184unsigned int xhci_get_endpoint_flag_from_index(unsigned int ep_index)
1185{
1186 return 1 << (ep_index + 1);
1187}
1188
1189/* Compute the last valid endpoint context index. Basically, this is the
1190 * endpoint index plus one. For slot contexts with more than valid endpoint,
1191 * we find the most significant bit set in the added contexts flags.
1192 * e.g. ep 1 IN (with epnum 0x81) => added_ctxs = 0b1000
1193 * fls(0b1000) = 4, but the endpoint context index is 3, so subtract one.
1194 */
1195unsigned int xhci_last_valid_endpoint(u32 added_ctxs)
1196{
1197 return fls(added_ctxs) - 1;
1198}
1199
1200/* Returns 1 if the arguments are OK;
1201 * returns 0 this is a root hub; returns -EINVAL for NULL pointers.
1202 */
1203static int xhci_check_args(struct usb_hcd *hcd, struct usb_device *udev,
1204 struct usb_host_endpoint *ep, int check_ep, bool check_virt_dev,
1205 const char *func) {
1206 struct xhci_hcd *xhci;
1207 struct xhci_virt_device *virt_dev;
1208
1209 if (!hcd || (check_ep && !ep) || !udev) {
1210 pr_debug("xHCI %s called with invalid args\n", func);
1211 return -EINVAL;
1212 }
1213 if (!udev->parent) {
1214 pr_debug("xHCI %s called for root hub\n", func);
1215 return 0;
1216 }
1217
1218 xhci = hcd_to_xhci(hcd);
1219 if (check_virt_dev) {
1220 if (!udev->slot_id || !xhci->devs[udev->slot_id]) {
1221 xhci_dbg(xhci, "xHCI %s called with unaddressed device\n",
1222 func);
1223 return -EINVAL;
1224 }
1225
1226 virt_dev = xhci->devs[udev->slot_id];
1227 if (virt_dev->udev != udev) {
1228 xhci_dbg(xhci, "xHCI %s called with udev and "
1229 "virt_dev does not match\n", func);
1230 return -EINVAL;
1231 }
1232 }
1233
1234 if (xhci->xhc_state & XHCI_STATE_HALTED)
1235 return -ENODEV;
1236
1237 return 1;
1238}
1239
1240static int xhci_configure_endpoint(struct xhci_hcd *xhci,
1241 struct usb_device *udev, struct xhci_command *command,
1242 bool ctx_change, bool must_succeed);
1243
1244/*
1245 * Full speed devices may have a max packet size greater than 8 bytes, but the
1246 * USB core doesn't know that until it reads the first 8 bytes of the
1247 * descriptor. If the usb_device's max packet size changes after that point,
1248 * we need to issue an evaluate context command and wait on it.
1249 */
1250static int xhci_check_maxpacket(struct xhci_hcd *xhci, unsigned int slot_id,
1251 unsigned int ep_index, struct urb *urb)
1252{
1253 struct xhci_container_ctx *out_ctx;
1254 struct xhci_input_control_ctx *ctrl_ctx;
1255 struct xhci_ep_ctx *ep_ctx;
1256 struct xhci_command *command;
1257 int max_packet_size;
1258 int hw_max_packet_size;
1259 int ret = 0;
1260
1261 out_ctx = xhci->devs[slot_id]->out_ctx;
1262 ep_ctx = xhci_get_ep_ctx(xhci, out_ctx, ep_index);
1263 hw_max_packet_size = MAX_PACKET_DECODED(le32_to_cpu(ep_ctx->ep_info2));
1264 max_packet_size = usb_endpoint_maxp(&urb->dev->ep0.desc);
1265 if (hw_max_packet_size != max_packet_size) {
1266 xhci_dbg_trace(xhci, trace_xhci_dbg_context_change,
1267 "Max Packet Size for ep 0 changed.");
1268 xhci_dbg_trace(xhci, trace_xhci_dbg_context_change,
1269 "Max packet size in usb_device = %d",
1270 max_packet_size);
1271 xhci_dbg_trace(xhci, trace_xhci_dbg_context_change,
1272 "Max packet size in xHCI HW = %d",
1273 hw_max_packet_size);
1274 xhci_dbg_trace(xhci, trace_xhci_dbg_context_change,
1275 "Issuing evaluate context command.");
1276
1277 /* Set up the input context flags for the command */
1278 /* FIXME: This won't work if a non-default control endpoint
1279 * changes max packet sizes.
1280 */
1281
1282 command = xhci_alloc_command(xhci, false, true, GFP_KERNEL);
1283 if (!command)
1284 return -ENOMEM;
1285
1286 command->in_ctx = xhci->devs[slot_id]->in_ctx;
1287 ctrl_ctx = xhci_get_input_control_ctx(command->in_ctx);
1288 if (!ctrl_ctx) {
1289 xhci_warn(xhci, "%s: Could not get input context, bad type.\n",
1290 __func__);
1291 ret = -ENOMEM;
1292 goto command_cleanup;
1293 }
1294 /* Set up the modified control endpoint 0 */
1295 xhci_endpoint_copy(xhci, xhci->devs[slot_id]->in_ctx,
1296 xhci->devs[slot_id]->out_ctx, ep_index);
1297
1298 ep_ctx = xhci_get_ep_ctx(xhci, command->in_ctx, ep_index);
1299 ep_ctx->ep_info2 &= cpu_to_le32(~MAX_PACKET_MASK);
1300 ep_ctx->ep_info2 |= cpu_to_le32(MAX_PACKET(max_packet_size));
1301
1302 ctrl_ctx->add_flags = cpu_to_le32(EP0_FLAG);
1303 ctrl_ctx->drop_flags = 0;
1304
1305 xhci_dbg(xhci, "Slot %d input context\n", slot_id);
1306 xhci_dbg_ctx(xhci, command->in_ctx, ep_index);
1307 xhci_dbg(xhci, "Slot %d output context\n", slot_id);
1308 xhci_dbg_ctx(xhci, out_ctx, ep_index);
1309
1310 ret = xhci_configure_endpoint(xhci, urb->dev, command,
1311 true, false);
1312
1313 /* Clean up the input context for later use by bandwidth
1314 * functions.
1315 */
1316 ctrl_ctx->add_flags = cpu_to_le32(SLOT_FLAG);
1317command_cleanup:
1318 kfree(command->completion);
1319 kfree(command);
1320 }
1321 return ret;
1322}
1323
1324/*
1325 * non-error returns are a promise to giveback() the urb later
1326 * we drop ownership so next owner (or urb unlink) can get it
1327 */
1328int xhci_urb_enqueue(struct usb_hcd *hcd, struct urb *urb, gfp_t mem_flags)
1329{
1330 struct xhci_hcd *xhci = hcd_to_xhci(hcd);
1331 struct xhci_td *buffer;
1332 unsigned long flags;
1333 int ret = 0;
1334 unsigned int slot_id, ep_index;
1335 struct urb_priv *urb_priv;
1336 int size, i;
1337
1338 if (!urb || xhci_check_args(hcd, urb->dev, urb->ep,
1339 true, true, __func__) <= 0)
1340 return -EINVAL;
1341
1342 slot_id = urb->dev->slot_id;
1343 ep_index = xhci_get_endpoint_index(&urb->ep->desc);
1344
1345 if (!HCD_HW_ACCESSIBLE(hcd)) {
1346 if (!in_interrupt())
1347 xhci_dbg(xhci, "urb submitted during PCI suspend\n");
1348 ret = -ESHUTDOWN;
1349 goto exit;
1350 }
1351
1352 if (usb_endpoint_xfer_isoc(&urb->ep->desc))
1353 size = urb->number_of_packets;
1354 else if (usb_endpoint_is_bulk_out(&urb->ep->desc) &&
1355 urb->transfer_buffer_length > 0 &&
1356 urb->transfer_flags & URB_ZERO_PACKET &&
1357 !(urb->transfer_buffer_length % usb_endpoint_maxp(&urb->ep->desc)))
1358 size = 2;
1359 else
1360 size = 1;
1361
1362 urb_priv = kzalloc(sizeof(struct urb_priv) +
1363 size * sizeof(struct xhci_td *), mem_flags);
1364 if (!urb_priv)
1365 return -ENOMEM;
1366
1367 buffer = kzalloc(size * sizeof(struct xhci_td), mem_flags);
1368 if (!buffer) {
1369 kfree(urb_priv);
1370 return -ENOMEM;
1371 }
1372
1373 for (i = 0; i < size; i++) {
1374 urb_priv->td[i] = buffer;
1375 buffer++;
1376 }
1377
1378 urb_priv->length = size;
1379 urb_priv->td_cnt = 0;
1380 urb->hcpriv = urb_priv;
1381
1382 if (usb_endpoint_xfer_control(&urb->ep->desc)) {
1383 /* Check to see if the max packet size for the default control
1384 * endpoint changed during FS device enumeration
1385 */
1386 if (urb->dev->speed == USB_SPEED_FULL) {
1387 ret = xhci_check_maxpacket(xhci, slot_id,
1388 ep_index, urb);
1389 if (ret < 0) {
1390 xhci_urb_free_priv(urb_priv);
1391 urb->hcpriv = NULL;
1392 return ret;
1393 }
1394 }
1395
1396 /* We have a spinlock and interrupts disabled, so we must pass
1397 * atomic context to this function, which may allocate memory.
1398 */
1399 spin_lock_irqsave(&xhci->lock, flags);
1400 if (xhci->xhc_state & XHCI_STATE_DYING)
1401 goto dying;
1402 ret = xhci_queue_ctrl_tx(xhci, GFP_ATOMIC, urb,
1403 slot_id, ep_index);
1404 if (ret)
1405 goto free_priv;
1406 spin_unlock_irqrestore(&xhci->lock, flags);
1407 } else if (usb_endpoint_xfer_bulk(&urb->ep->desc)) {
1408 spin_lock_irqsave(&xhci->lock, flags);
1409 if (xhci->xhc_state & XHCI_STATE_DYING)
1410 goto dying;
1411 if (xhci->devs[slot_id]->eps[ep_index].ep_state &
1412 EP_GETTING_STREAMS) {
1413 xhci_warn(xhci, "WARN: Can't enqueue URB while bulk ep "
1414 "is transitioning to using streams.\n");
1415 ret = -EINVAL;
1416 } else if (xhci->devs[slot_id]->eps[ep_index].ep_state &
1417 EP_GETTING_NO_STREAMS) {
1418 xhci_warn(xhci, "WARN: Can't enqueue URB while bulk ep "
1419 "is transitioning to "
1420 "not having streams.\n");
1421 ret = -EINVAL;
1422 } else {
1423 ret = xhci_queue_bulk_tx(xhci, GFP_ATOMIC, urb,
1424 slot_id, ep_index);
1425 }
1426 if (ret)
1427 goto free_priv;
1428 spin_unlock_irqrestore(&xhci->lock, flags);
1429 } else if (usb_endpoint_xfer_int(&urb->ep->desc)) {
1430 spin_lock_irqsave(&xhci->lock, flags);
1431 if (xhci->xhc_state & XHCI_STATE_DYING)
1432 goto dying;
1433 ret = xhci_queue_intr_tx(xhci, GFP_ATOMIC, urb,
1434 slot_id, ep_index);
1435 if (ret)
1436 goto free_priv;
1437 spin_unlock_irqrestore(&xhci->lock, flags);
1438 } else {
1439 spin_lock_irqsave(&xhci->lock, flags);
1440 if (xhci->xhc_state & XHCI_STATE_DYING)
1441 goto dying;
1442 ret = xhci_queue_isoc_tx_prepare(xhci, GFP_ATOMIC, urb,
1443 slot_id, ep_index);
1444 if (ret)
1445 goto free_priv;
1446 spin_unlock_irqrestore(&xhci->lock, flags);
1447 }
1448exit:
1449 return ret;
1450dying:
1451 xhci_dbg(xhci, "Ep 0x%x: URB %p submitted for "
1452 "non-responsive xHCI host.\n",
1453 urb->ep->desc.bEndpointAddress, urb);
1454 ret = -ESHUTDOWN;
1455free_priv:
1456 xhci_urb_free_priv(urb_priv);
1457 urb->hcpriv = NULL;
1458 spin_unlock_irqrestore(&xhci->lock, flags);
1459 return ret;
1460}
1461
1462/* Get the right ring for the given URB.
1463 * If the endpoint supports streams, boundary check the URB's stream ID.
1464 * If the endpoint doesn't support streams, return the singular endpoint ring.
1465 */
1466static struct xhci_ring *xhci_urb_to_transfer_ring(struct xhci_hcd *xhci,
1467 struct urb *urb)
1468{
1469 unsigned int slot_id;
1470 unsigned int ep_index;
1471 unsigned int stream_id;
1472 struct xhci_virt_ep *ep;
1473
1474 slot_id = urb->dev->slot_id;
1475 ep_index = xhci_get_endpoint_index(&urb->ep->desc);
1476 stream_id = urb->stream_id;
1477 ep = &xhci->devs[slot_id]->eps[ep_index];
1478 /* Common case: no streams */
1479 if (!(ep->ep_state & EP_HAS_STREAMS))
1480 return ep->ring;
1481
1482 if (stream_id == 0) {
1483 xhci_warn(xhci,
1484 "WARN: Slot ID %u, ep index %u has streams, "
1485 "but URB has no stream ID.\n",
1486 slot_id, ep_index);
1487 return NULL;
1488 }
1489
1490 if (stream_id < ep->stream_info->num_streams)
1491 return ep->stream_info->stream_rings[stream_id];
1492
1493 xhci_warn(xhci,
1494 "WARN: Slot ID %u, ep index %u has "
1495 "stream IDs 1 to %u allocated, "
1496 "but stream ID %u is requested.\n",
1497 slot_id, ep_index,
1498 ep->stream_info->num_streams - 1,
1499 stream_id);
1500 return NULL;
1501}
1502
1503/*
1504 * Remove the URB's TD from the endpoint ring. This may cause the HC to stop
1505 * USB transfers, potentially stopping in the middle of a TRB buffer. The HC
1506 * should pick up where it left off in the TD, unless a Set Transfer Ring
1507 * Dequeue Pointer is issued.
1508 *
1509 * The TRBs that make up the buffers for the canceled URB will be "removed" from
1510 * the ring. Since the ring is a contiguous structure, they can't be physically
1511 * removed. Instead, there are two options:
1512 *
1513 * 1) If the HC is in the middle of processing the URB to be canceled, we
1514 * simply move the ring's dequeue pointer past those TRBs using the Set
1515 * Transfer Ring Dequeue Pointer command. This will be the common case,
1516 * when drivers timeout on the last submitted URB and attempt to cancel.
1517 *
1518 * 2) If the HC is in the middle of a different TD, we turn the TRBs into a
1519 * series of 1-TRB transfer no-op TDs. (No-ops shouldn't be chained.) The
1520 * HC will need to invalidate the any TRBs it has cached after the stop
1521 * endpoint command, as noted in the xHCI 0.95 errata.
1522 *
1523 * 3) The TD may have completed by the time the Stop Endpoint Command
1524 * completes, so software needs to handle that case too.
1525 *
1526 * This function should protect against the TD enqueueing code ringing the
1527 * doorbell while this code is waiting for a Stop Endpoint command to complete.
1528 * It also needs to account for multiple cancellations on happening at the same
1529 * time for the same endpoint.
1530 *
1531 * Note that this function can be called in any context, or so says
1532 * usb_hcd_unlink_urb()
1533 */
1534int xhci_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status)
1535{
1536 unsigned long flags;
1537 int ret, i;
1538 u32 temp;
1539 struct xhci_hcd *xhci;
1540 struct urb_priv *urb_priv;
1541 struct xhci_td *td;
1542 unsigned int ep_index;
1543 struct xhci_ring *ep_ring;
1544 struct xhci_virt_ep *ep;
1545 struct xhci_command *command;
1546
1547 xhci = hcd_to_xhci(hcd);
1548 spin_lock_irqsave(&xhci->lock, flags);
1549 /* Make sure the URB hasn't completed or been unlinked already */
1550 ret = usb_hcd_check_unlink_urb(hcd, urb, status);
1551 if (ret || !urb->hcpriv)
1552 goto done;
1553 temp = readl(&xhci->op_regs->status);
1554 if (temp == 0xffffffff || (xhci->xhc_state & XHCI_STATE_HALTED)) {
1555 xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb,
1556 "HW died, freeing TD.");
1557 urb_priv = urb->hcpriv;
1558 for (i = urb_priv->td_cnt;
1559 i < urb_priv->length && xhci->devs[urb->dev->slot_id];
1560 i++) {
1561 td = urb_priv->td[i];
1562 if (!list_empty(&td->td_list))
1563 list_del_init(&td->td_list);
1564 if (!list_empty(&td->cancelled_td_list))
1565 list_del_init(&td->cancelled_td_list);
1566 }
1567
1568 usb_hcd_unlink_urb_from_ep(hcd, urb);
1569 spin_unlock_irqrestore(&xhci->lock, flags);
1570 usb_hcd_giveback_urb(hcd, urb, -ESHUTDOWN);
1571 xhci_urb_free_priv(urb_priv);
1572 return ret;
1573 }
1574 if ((xhci->xhc_state & XHCI_STATE_DYING) ||
1575 (xhci->xhc_state & XHCI_STATE_HALTED)) {
1576 xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb,
1577 "Ep 0x%x: URB %p to be canceled on "
1578 "non-responsive xHCI host.",
1579 urb->ep->desc.bEndpointAddress, urb);
1580 /* Let the stop endpoint command watchdog timer (which set this
1581 * state) finish cleaning up the endpoint TD lists. We must
1582 * have caught it in the middle of dropping a lock and giving
1583 * back an URB.
1584 */
1585 goto done;
1586 }
1587
1588 ep_index = xhci_get_endpoint_index(&urb->ep->desc);
1589 ep = &xhci->devs[urb->dev->slot_id]->eps[ep_index];
1590 ep_ring = xhci_urb_to_transfer_ring(xhci, urb);
1591 if (!ep_ring) {
1592 ret = -EINVAL;
1593 goto done;
1594 }
1595
1596 urb_priv = urb->hcpriv;
1597 i = urb_priv->td_cnt;
1598 if (i < urb_priv->length)
1599 xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb,
1600 "Cancel URB %p, dev %s, ep 0x%x, "
1601 "starting at offset 0x%llx",
1602 urb, urb->dev->devpath,
1603 urb->ep->desc.bEndpointAddress,
1604 (unsigned long long) xhci_trb_virt_to_dma(
1605 urb_priv->td[i]->start_seg,
1606 urb_priv->td[i]->first_trb));
1607
1608 for (; i < urb_priv->length; i++) {
1609 td = urb_priv->td[i];
1610 list_add_tail(&td->cancelled_td_list, &ep->cancelled_td_list);
1611 }
1612
1613 /* Queue a stop endpoint command, but only if this is
1614 * the first cancellation to be handled.
1615 */
1616 if (!(ep->ep_state & EP_HALT_PENDING)) {
1617 command = xhci_alloc_command(xhci, false, false, GFP_ATOMIC);
1618 if (!command) {
1619 ret = -ENOMEM;
1620 goto done;
1621 }
1622 ep->ep_state |= EP_HALT_PENDING;
1623 ep->stop_cmds_pending++;
1624 ep->stop_cmd_timer.expires = jiffies +
1625 XHCI_STOP_EP_CMD_TIMEOUT * HZ;
1626 add_timer(&ep->stop_cmd_timer);
1627 xhci_queue_stop_endpoint(xhci, command, urb->dev->slot_id,
1628 ep_index, 0);
1629 xhci_ring_cmd_db(xhci);
1630 }
1631done:
1632 spin_unlock_irqrestore(&xhci->lock, flags);
1633 return ret;
1634}
1635
1636/* Drop an endpoint from a new bandwidth configuration for this device.
1637 * Only one call to this function is allowed per endpoint before
1638 * check_bandwidth() or reset_bandwidth() must be called.
1639 * A call to xhci_drop_endpoint() followed by a call to xhci_add_endpoint() will
1640 * add the endpoint to the schedule with possibly new parameters denoted by a
1641 * different endpoint descriptor in usb_host_endpoint.
1642 * A call to xhci_add_endpoint() followed by a call to xhci_drop_endpoint() is
1643 * not allowed.
1644 *
1645 * The USB core will not allow URBs to be queued to an endpoint that is being
1646 * disabled, so there's no need for mutual exclusion to protect
1647 * the xhci->devs[slot_id] structure.
1648 */
1649int xhci_drop_endpoint(struct usb_hcd *hcd, struct usb_device *udev,
1650 struct usb_host_endpoint *ep)
1651{
1652 struct xhci_hcd *xhci;
1653 struct xhci_container_ctx *in_ctx, *out_ctx;
1654 struct xhci_input_control_ctx *ctrl_ctx;
1655 unsigned int ep_index;
1656 struct xhci_ep_ctx *ep_ctx;
1657 u32 drop_flag;
1658 u32 new_add_flags, new_drop_flags;
1659 int ret;
1660
1661 ret = xhci_check_args(hcd, udev, ep, 1, true, __func__);
1662 if (ret <= 0)
1663 return ret;
1664 xhci = hcd_to_xhci(hcd);
1665 if (xhci->xhc_state & XHCI_STATE_DYING)
1666 return -ENODEV;
1667
1668 xhci_dbg(xhci, "%s called for udev %p\n", __func__, udev);
1669 drop_flag = xhci_get_endpoint_flag(&ep->desc);
1670 if (drop_flag == SLOT_FLAG || drop_flag == EP0_FLAG) {
1671 xhci_dbg(xhci, "xHCI %s - can't drop slot or ep 0 %#x\n",
1672 __func__, drop_flag);
1673 return 0;
1674 }
1675
1676 in_ctx = xhci->devs[udev->slot_id]->in_ctx;
1677 out_ctx = xhci->devs[udev->slot_id]->out_ctx;
1678 ctrl_ctx = xhci_get_input_control_ctx(in_ctx);
1679 if (!ctrl_ctx) {
1680 xhci_warn(xhci, "%s: Could not get input context, bad type.\n",
1681 __func__);
1682 return 0;
1683 }
1684
1685 ep_index = xhci_get_endpoint_index(&ep->desc);
1686 ep_ctx = xhci_get_ep_ctx(xhci, out_ctx, ep_index);
1687 /* If the HC already knows the endpoint is disabled,
1688 * or the HCD has noted it is disabled, ignore this request
1689 */
1690 if (((ep_ctx->ep_info & cpu_to_le32(EP_STATE_MASK)) ==
1691 cpu_to_le32(EP_STATE_DISABLED)) ||
1692 le32_to_cpu(ctrl_ctx->drop_flags) &
1693 xhci_get_endpoint_flag(&ep->desc)) {
1694 /* Do not warn when called after a usb_device_reset */
1695 if (xhci->devs[udev->slot_id]->eps[ep_index].ring != NULL)
1696 xhci_warn(xhci, "xHCI %s called with disabled ep %p\n",
1697 __func__, ep);
1698 return 0;
1699 }
1700
1701 ctrl_ctx->drop_flags |= cpu_to_le32(drop_flag);
1702 new_drop_flags = le32_to_cpu(ctrl_ctx->drop_flags);
1703
1704 ctrl_ctx->add_flags &= cpu_to_le32(~drop_flag);
1705 new_add_flags = le32_to_cpu(ctrl_ctx->add_flags);
1706
1707 xhci_endpoint_zero(xhci, xhci->devs[udev->slot_id], ep);
1708
1709 if (xhci->quirks & XHCI_MTK_HOST)
1710 xhci_mtk_drop_ep_quirk(hcd, udev, ep);
1711
1712 xhci_dbg(xhci, "drop ep 0x%x, slot id %d, new drop flags = %#x, new add flags = %#x\n",
1713 (unsigned int) ep->desc.bEndpointAddress,
1714 udev->slot_id,
1715 (unsigned int) new_drop_flags,
1716 (unsigned int) new_add_flags);
1717 return 0;
1718}
1719
1720/* Add an endpoint to a new possible bandwidth configuration for this device.
1721 * Only one call to this function is allowed per endpoint before
1722 * check_bandwidth() or reset_bandwidth() must be called.
1723 * A call to xhci_drop_endpoint() followed by a call to xhci_add_endpoint() will
1724 * add the endpoint to the schedule with possibly new parameters denoted by a
1725 * different endpoint descriptor in usb_host_endpoint.
1726 * A call to xhci_add_endpoint() followed by a call to xhci_drop_endpoint() is
1727 * not allowed.
1728 *
1729 * The USB core will not allow URBs to be queued to an endpoint until the
1730 * configuration or alt setting is installed in the device, so there's no need
1731 * for mutual exclusion to protect the xhci->devs[slot_id] structure.
1732 */
1733int xhci_add_endpoint(struct usb_hcd *hcd, struct usb_device *udev,
1734 struct usb_host_endpoint *ep)
1735{
1736 struct xhci_hcd *xhci;
1737 struct xhci_container_ctx *in_ctx;
1738 unsigned int ep_index;
1739 struct xhci_input_control_ctx *ctrl_ctx;
1740 u32 added_ctxs;
1741 u32 new_add_flags, new_drop_flags;
1742 struct xhci_virt_device *virt_dev;
1743 int ret = 0;
1744
1745 ret = xhci_check_args(hcd, udev, ep, 1, true, __func__);
1746 if (ret <= 0) {
1747 /* So we won't queue a reset ep command for a root hub */
1748 ep->hcpriv = NULL;
1749 return ret;
1750 }
1751 xhci = hcd_to_xhci(hcd);
1752 if (xhci->xhc_state & XHCI_STATE_DYING)
1753 return -ENODEV;
1754
1755 added_ctxs = xhci_get_endpoint_flag(&ep->desc);
1756 if (added_ctxs == SLOT_FLAG || added_ctxs == EP0_FLAG) {
1757 /* FIXME when we have to issue an evaluate endpoint command to
1758 * deal with ep0 max packet size changing once we get the
1759 * descriptors
1760 */
1761 xhci_dbg(xhci, "xHCI %s - can't add slot or ep 0 %#x\n",
1762 __func__, added_ctxs);
1763 return 0;
1764 }
1765
1766 virt_dev = xhci->devs[udev->slot_id];
1767 in_ctx = virt_dev->in_ctx;
1768 ctrl_ctx = xhci_get_input_control_ctx(in_ctx);
1769 if (!ctrl_ctx) {
1770 xhci_warn(xhci, "%s: Could not get input context, bad type.\n",
1771 __func__);
1772 return 0;
1773 }
1774
1775 ep_index = xhci_get_endpoint_index(&ep->desc);
1776 /* If this endpoint is already in use, and the upper layers are trying
1777 * to add it again without dropping it, reject the addition.
1778 */
1779 if (virt_dev->eps[ep_index].ring &&
1780 !(le32_to_cpu(ctrl_ctx->drop_flags) & added_ctxs)) {
1781 xhci_warn(xhci, "Trying to add endpoint 0x%x "
1782 "without dropping it.\n",
1783 (unsigned int) ep->desc.bEndpointAddress);
1784 return -EINVAL;
1785 }
1786
1787 /* If the HCD has already noted the endpoint is enabled,
1788 * ignore this request.
1789 */
1790 if (le32_to_cpu(ctrl_ctx->add_flags) & added_ctxs) {
1791 xhci_warn(xhci, "xHCI %s called with enabled ep %p\n",
1792 __func__, ep);
1793 return 0;
1794 }
1795
1796 /*
1797 * Configuration and alternate setting changes must be done in
1798 * process context, not interrupt context (or so documenation
1799 * for usb_set_interface() and usb_set_configuration() claim).
1800 */
1801 if (xhci_endpoint_init(xhci, virt_dev, udev, ep, GFP_NOIO) < 0) {
1802 dev_dbg(&udev->dev, "%s - could not initialize ep %#x\n",
1803 __func__, ep->desc.bEndpointAddress);
1804 return -ENOMEM;
1805 }
1806
1807 if (xhci->quirks & XHCI_MTK_HOST) {
1808 ret = xhci_mtk_add_ep_quirk(hcd, udev, ep);
1809 if (ret < 0) {
1810 xhci_free_or_cache_endpoint_ring(xhci,
1811 virt_dev, ep_index);
1812 return ret;
1813 }
1814 }
1815
1816 ctrl_ctx->add_flags |= cpu_to_le32(added_ctxs);
1817 new_add_flags = le32_to_cpu(ctrl_ctx->add_flags);
1818
1819 /* If xhci_endpoint_disable() was called for this endpoint, but the
1820 * xHC hasn't been notified yet through the check_bandwidth() call,
1821 * this re-adds a new state for the endpoint from the new endpoint
1822 * descriptors. We must drop and re-add this endpoint, so we leave the
1823 * drop flags alone.
1824 */
1825 new_drop_flags = le32_to_cpu(ctrl_ctx->drop_flags);
1826
1827 /* Store the usb_device pointer for later use */
1828 ep->hcpriv = udev;
1829
1830 xhci_dbg(xhci, "add ep 0x%x, slot id %d, new drop flags = %#x, new add flags = %#x\n",
1831 (unsigned int) ep->desc.bEndpointAddress,
1832 udev->slot_id,
1833 (unsigned int) new_drop_flags,
1834 (unsigned int) new_add_flags);
1835 return 0;
1836}
1837
1838static void xhci_zero_in_ctx(struct xhci_hcd *xhci, struct xhci_virt_device *virt_dev)
1839{
1840 struct xhci_input_control_ctx *ctrl_ctx;
1841 struct xhci_ep_ctx *ep_ctx;
1842 struct xhci_slot_ctx *slot_ctx;
1843 int i;
1844
1845 ctrl_ctx = xhci_get_input_control_ctx(virt_dev->in_ctx);
1846 if (!ctrl_ctx) {
1847 xhci_warn(xhci, "%s: Could not get input context, bad type.\n",
1848 __func__);
1849 return;
1850 }
1851
1852 /* When a device's add flag and drop flag are zero, any subsequent
1853 * configure endpoint command will leave that endpoint's state
1854 * untouched. Make sure we don't leave any old state in the input
1855 * endpoint contexts.
1856 */
1857 ctrl_ctx->drop_flags = 0;
1858 ctrl_ctx->add_flags = 0;
1859 slot_ctx = xhci_get_slot_ctx(xhci, virt_dev->in_ctx);
1860 slot_ctx->dev_info &= cpu_to_le32(~LAST_CTX_MASK);
1861 /* Endpoint 0 is always valid */
1862 slot_ctx->dev_info |= cpu_to_le32(LAST_CTX(1));
1863 for (i = 1; i < 31; ++i) {
1864 ep_ctx = xhci_get_ep_ctx(xhci, virt_dev->in_ctx, i);
1865 ep_ctx->ep_info = 0;
1866 ep_ctx->ep_info2 = 0;
1867 ep_ctx->deq = 0;
1868 ep_ctx->tx_info = 0;
1869 }
1870}
1871
1872static int xhci_configure_endpoint_result(struct xhci_hcd *xhci,
1873 struct usb_device *udev, u32 *cmd_status)
1874{
1875 int ret;
1876
1877 switch (*cmd_status) {
1878 case COMP_CMD_ABORT:
1879 case COMP_CMD_STOP:
1880 xhci_warn(xhci, "Timeout while waiting for configure endpoint command\n");
1881 ret = -ETIME;
1882 break;
1883 case COMP_ENOMEM:
1884 dev_warn(&udev->dev,
1885 "Not enough host controller resources for new device state.\n");
1886 ret = -ENOMEM;
1887 /* FIXME: can we allocate more resources for the HC? */
1888 break;
1889 case COMP_BW_ERR:
1890 case COMP_2ND_BW_ERR:
1891 dev_warn(&udev->dev,
1892 "Not enough bandwidth for new device state.\n");
1893 ret = -ENOSPC;
1894 /* FIXME: can we go back to the old state? */
1895 break;
1896 case COMP_TRB_ERR:
1897 /* the HCD set up something wrong */
1898 dev_warn(&udev->dev, "ERROR: Endpoint drop flag = 0, "
1899 "add flag = 1, "
1900 "and endpoint is not disabled.\n");
1901 ret = -EINVAL;
1902 break;
1903 case COMP_DEV_ERR:
1904 dev_warn(&udev->dev,
1905 "ERROR: Incompatible device for endpoint configure command.\n");
1906 ret = -ENODEV;
1907 break;
1908 case COMP_SUCCESS:
1909 xhci_dbg_trace(xhci, trace_xhci_dbg_context_change,
1910 "Successful Endpoint Configure command");
1911 ret = 0;
1912 break;
1913 default:
1914 xhci_err(xhci, "ERROR: unexpected command completion code 0x%x.\n",
1915 *cmd_status);
1916 ret = -EINVAL;
1917 break;
1918 }
1919 return ret;
1920}
1921
1922static int xhci_evaluate_context_result(struct xhci_hcd *xhci,
1923 struct usb_device *udev, u32 *cmd_status)
1924{
1925 int ret;
1926 struct xhci_virt_device *virt_dev = xhci->devs[udev->slot_id];
1927
1928 switch (*cmd_status) {
1929 case COMP_CMD_ABORT:
1930 case COMP_CMD_STOP:
1931 xhci_warn(xhci, "Timeout while waiting for evaluate context command\n");
1932 ret = -ETIME;
1933 break;
1934 case COMP_EINVAL:
1935 dev_warn(&udev->dev,
1936 "WARN: xHCI driver setup invalid evaluate context command.\n");
1937 ret = -EINVAL;
1938 break;
1939 case COMP_EBADSLT:
1940 dev_warn(&udev->dev,
1941 "WARN: slot not enabled for evaluate context command.\n");
1942 ret = -EINVAL;
1943 break;
1944 case COMP_CTX_STATE:
1945 dev_warn(&udev->dev,
1946 "WARN: invalid context state for evaluate context command.\n");
1947 xhci_dbg_ctx(xhci, virt_dev->out_ctx, 1);
1948 ret = -EINVAL;
1949 break;
1950 case COMP_DEV_ERR:
1951 dev_warn(&udev->dev,
1952 "ERROR: Incompatible device for evaluate context command.\n");
1953 ret = -ENODEV;
1954 break;
1955 case COMP_MEL_ERR:
1956 /* Max Exit Latency too large error */
1957 dev_warn(&udev->dev, "WARN: Max Exit Latency too large\n");
1958 ret = -EINVAL;
1959 break;
1960 case COMP_SUCCESS:
1961 xhci_dbg_trace(xhci, trace_xhci_dbg_context_change,
1962 "Successful evaluate context command");
1963 ret = 0;
1964 break;
1965 default:
1966 xhci_err(xhci, "ERROR: unexpected command completion code 0x%x.\n",
1967 *cmd_status);
1968 ret = -EINVAL;
1969 break;
1970 }
1971 return ret;
1972}
1973
1974static u32 xhci_count_num_new_endpoints(struct xhci_hcd *xhci,
1975 struct xhci_input_control_ctx *ctrl_ctx)
1976{
1977 u32 valid_add_flags;
1978 u32 valid_drop_flags;
1979
1980 /* Ignore the slot flag (bit 0), and the default control endpoint flag
1981 * (bit 1). The default control endpoint is added during the Address
1982 * Device command and is never removed until the slot is disabled.
1983 */
1984 valid_add_flags = le32_to_cpu(ctrl_ctx->add_flags) >> 2;
1985 valid_drop_flags = le32_to_cpu(ctrl_ctx->drop_flags) >> 2;
1986
1987 /* Use hweight32 to count the number of ones in the add flags, or
1988 * number of endpoints added. Don't count endpoints that are changed
1989 * (both added and dropped).
1990 */
1991 return hweight32(valid_add_flags) -
1992 hweight32(valid_add_flags & valid_drop_flags);
1993}
1994
1995static unsigned int xhci_count_num_dropped_endpoints(struct xhci_hcd *xhci,
1996 struct xhci_input_control_ctx *ctrl_ctx)
1997{
1998 u32 valid_add_flags;
1999 u32 valid_drop_flags;
2000
2001 valid_add_flags = le32_to_cpu(ctrl_ctx->add_flags) >> 2;
2002 valid_drop_flags = le32_to_cpu(ctrl_ctx->drop_flags) >> 2;
2003
2004 return hweight32(valid_drop_flags) -
2005 hweight32(valid_add_flags & valid_drop_flags);
2006}
2007
2008/*
2009 * We need to reserve the new number of endpoints before the configure endpoint
2010 * command completes. We can't subtract the dropped endpoints from the number
2011 * of active endpoints until the command completes because we can oversubscribe
2012 * the host in this case:
2013 *
2014 * - the first configure endpoint command drops more endpoints than it adds
2015 * - a second configure endpoint command that adds more endpoints is queued
2016 * - the first configure endpoint command fails, so the config is unchanged
2017 * - the second command may succeed, even though there isn't enough resources
2018 *
2019 * Must be called with xhci->lock held.
2020 */
2021static int xhci_reserve_host_resources(struct xhci_hcd *xhci,
2022 struct xhci_input_control_ctx *ctrl_ctx)
2023{
2024 u32 added_eps;
2025
2026 added_eps = xhci_count_num_new_endpoints(xhci, ctrl_ctx);
2027 if (xhci->num_active_eps + added_eps > xhci->limit_active_eps) {
2028 xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
2029 "Not enough ep ctxs: "
2030 "%u active, need to add %u, limit is %u.",
2031 xhci->num_active_eps, added_eps,
2032 xhci->limit_active_eps);
2033 return -ENOMEM;
2034 }
2035 xhci->num_active_eps += added_eps;
2036 xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
2037 "Adding %u ep ctxs, %u now active.", added_eps,
2038 xhci->num_active_eps);
2039 return 0;
2040}
2041
2042/*
2043 * The configure endpoint was failed by the xHC for some other reason, so we
2044 * need to revert the resources that failed configuration would have used.
2045 *
2046 * Must be called with xhci->lock held.
2047 */
2048static void xhci_free_host_resources(struct xhci_hcd *xhci,
2049 struct xhci_input_control_ctx *ctrl_ctx)
2050{
2051 u32 num_failed_eps;
2052
2053 num_failed_eps = xhci_count_num_new_endpoints(xhci, ctrl_ctx);
2054 xhci->num_active_eps -= num_failed_eps;
2055 xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
2056 "Removing %u failed ep ctxs, %u now active.",
2057 num_failed_eps,
2058 xhci->num_active_eps);
2059}
2060
2061/*
2062 * Now that the command has completed, clean up the active endpoint count by
2063 * subtracting out the endpoints that were dropped (but not changed).
2064 *
2065 * Must be called with xhci->lock held.
2066 */
2067static void xhci_finish_resource_reservation(struct xhci_hcd *xhci,
2068 struct xhci_input_control_ctx *ctrl_ctx)
2069{
2070 u32 num_dropped_eps;
2071
2072 num_dropped_eps = xhci_count_num_dropped_endpoints(xhci, ctrl_ctx);
2073 xhci->num_active_eps -= num_dropped_eps;
2074 if (num_dropped_eps)
2075 xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
2076 "Removing %u dropped ep ctxs, %u now active.",
2077 num_dropped_eps,
2078 xhci->num_active_eps);
2079}
2080
2081static unsigned int xhci_get_block_size(struct usb_device *udev)
2082{
2083 switch (udev->speed) {
2084 case USB_SPEED_LOW:
2085 case USB_SPEED_FULL:
2086 return FS_BLOCK;
2087 case USB_SPEED_HIGH:
2088 return HS_BLOCK;
2089 case USB_SPEED_SUPER:
2090 case USB_SPEED_SUPER_PLUS:
2091 return SS_BLOCK;
2092 case USB_SPEED_UNKNOWN:
2093 case USB_SPEED_WIRELESS:
2094 default:
2095 /* Should never happen */
2096 return 1;
2097 }
2098}
2099
2100static unsigned int
2101xhci_get_largest_overhead(struct xhci_interval_bw *interval_bw)
2102{
2103 if (interval_bw->overhead[LS_OVERHEAD_TYPE])
2104 return LS_OVERHEAD;
2105 if (interval_bw->overhead[FS_OVERHEAD_TYPE])
2106 return FS_OVERHEAD;
2107 return HS_OVERHEAD;
2108}
2109
2110/* If we are changing a LS/FS device under a HS hub,
2111 * make sure (if we are activating a new TT) that the HS bus has enough
2112 * bandwidth for this new TT.
2113 */
2114static int xhci_check_tt_bw_table(struct xhci_hcd *xhci,
2115 struct xhci_virt_device *virt_dev,
2116 int old_active_eps)
2117{
2118 struct xhci_interval_bw_table *bw_table;
2119 struct xhci_tt_bw_info *tt_info;
2120
2121 /* Find the bandwidth table for the root port this TT is attached to. */
2122 bw_table = &xhci->rh_bw[virt_dev->real_port - 1].bw_table;
2123 tt_info = virt_dev->tt_info;
2124 /* If this TT already had active endpoints, the bandwidth for this TT
2125 * has already been added. Removing all periodic endpoints (and thus
2126 * making the TT enactive) will only decrease the bandwidth used.
2127 */
2128 if (old_active_eps)
2129 return 0;
2130 if (old_active_eps == 0 && tt_info->active_eps != 0) {
2131 if (bw_table->bw_used + TT_HS_OVERHEAD > HS_BW_LIMIT)
2132 return -ENOMEM;
2133 return 0;
2134 }
2135 /* Not sure why we would have no new active endpoints...
2136 *
2137 * Maybe because of an Evaluate Context change for a hub update or a
2138 * control endpoint 0 max packet size change?
2139 * FIXME: skip the bandwidth calculation in that case.
2140 */
2141 return 0;
2142}
2143
2144static int xhci_check_ss_bw(struct xhci_hcd *xhci,
2145 struct xhci_virt_device *virt_dev)
2146{
2147 unsigned int bw_reserved;
2148
2149 bw_reserved = DIV_ROUND_UP(SS_BW_RESERVED*SS_BW_LIMIT_IN, 100);
2150 if (virt_dev->bw_table->ss_bw_in > (SS_BW_LIMIT_IN - bw_reserved))
2151 return -ENOMEM;
2152
2153 bw_reserved = DIV_ROUND_UP(SS_BW_RESERVED*SS_BW_LIMIT_OUT, 100);
2154 if (virt_dev->bw_table->ss_bw_out > (SS_BW_LIMIT_OUT - bw_reserved))
2155 return -ENOMEM;
2156
2157 return 0;
2158}
2159
2160/*
2161 * This algorithm is a very conservative estimate of the worst-case scheduling
2162 * scenario for any one interval. The hardware dynamically schedules the
2163 * packets, so we can't tell which microframe could be the limiting factor in
2164 * the bandwidth scheduling. This only takes into account periodic endpoints.
2165 *
2166 * Obviously, we can't solve an NP complete problem to find the minimum worst
2167 * case scenario. Instead, we come up with an estimate that is no less than
2168 * the worst case bandwidth used for any one microframe, but may be an
2169 * over-estimate.
2170 *
2171 * We walk the requirements for each endpoint by interval, starting with the
2172 * smallest interval, and place packets in the schedule where there is only one
2173 * possible way to schedule packets for that interval. In order to simplify
2174 * this algorithm, we record the largest max packet size for each interval, and
2175 * assume all packets will be that size.
2176 *
2177 * For interval 0, we obviously must schedule all packets for each interval.
2178 * The bandwidth for interval 0 is just the amount of data to be transmitted
2179 * (the sum of all max ESIT payload sizes, plus any overhead per packet times
2180 * the number of packets).
2181 *
2182 * For interval 1, we have two possible microframes to schedule those packets
2183 * in. For this algorithm, if we can schedule the same number of packets for
2184 * each possible scheduling opportunity (each microframe), we will do so. The
2185 * remaining number of packets will be saved to be transmitted in the gaps in
2186 * the next interval's scheduling sequence.
2187 *
2188 * As we move those remaining packets to be scheduled with interval 2 packets,
2189 * we have to double the number of remaining packets to transmit. This is
2190 * because the intervals are actually powers of 2, and we would be transmitting
2191 * the previous interval's packets twice in this interval. We also have to be
2192 * sure that when we look at the largest max packet size for this interval, we
2193 * also look at the largest max packet size for the remaining packets and take
2194 * the greater of the two.
2195 *
2196 * The algorithm continues to evenly distribute packets in each scheduling
2197 * opportunity, and push the remaining packets out, until we get to the last
2198 * interval. Then those packets and their associated overhead are just added
2199 * to the bandwidth used.
2200 */
2201static int xhci_check_bw_table(struct xhci_hcd *xhci,
2202 struct xhci_virt_device *virt_dev,
2203 int old_active_eps)
2204{
2205 unsigned int bw_reserved;
2206 unsigned int max_bandwidth;
2207 unsigned int bw_used;
2208 unsigned int block_size;
2209 struct xhci_interval_bw_table *bw_table;
2210 unsigned int packet_size = 0;
2211 unsigned int overhead = 0;
2212 unsigned int packets_transmitted = 0;
2213 unsigned int packets_remaining = 0;
2214 unsigned int i;
2215
2216 if (virt_dev->udev->speed >= USB_SPEED_SUPER)
2217 return xhci_check_ss_bw(xhci, virt_dev);
2218
2219 if (virt_dev->udev->speed == USB_SPEED_HIGH) {
2220 max_bandwidth = HS_BW_LIMIT;
2221 /* Convert percent of bus BW reserved to blocks reserved */
2222 bw_reserved = DIV_ROUND_UP(HS_BW_RESERVED * max_bandwidth, 100);
2223 } else {
2224 max_bandwidth = FS_BW_LIMIT;
2225 bw_reserved = DIV_ROUND_UP(FS_BW_RESERVED * max_bandwidth, 100);
2226 }
2227
2228 bw_table = virt_dev->bw_table;
2229 /* We need to translate the max packet size and max ESIT payloads into
2230 * the units the hardware uses.
2231 */
2232 block_size = xhci_get_block_size(virt_dev->udev);
2233
2234 /* If we are manipulating a LS/FS device under a HS hub, double check
2235 * that the HS bus has enough bandwidth if we are activing a new TT.
2236 */
2237 if (virt_dev->tt_info) {
2238 xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
2239 "Recalculating BW for rootport %u",
2240 virt_dev->real_port);
2241 if (xhci_check_tt_bw_table(xhci, virt_dev, old_active_eps)) {
2242 xhci_warn(xhci, "Not enough bandwidth on HS bus for "
2243 "newly activated TT.\n");
2244 return -ENOMEM;
2245 }
2246 xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
2247 "Recalculating BW for TT slot %u port %u",
2248 virt_dev->tt_info->slot_id,
2249 virt_dev->tt_info->ttport);
2250 } else {
2251 xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
2252 "Recalculating BW for rootport %u",
2253 virt_dev->real_port);
2254 }
2255
2256 /* Add in how much bandwidth will be used for interval zero, or the
2257 * rounded max ESIT payload + number of packets * largest overhead.
2258 */
2259 bw_used = DIV_ROUND_UP(bw_table->interval0_esit_payload, block_size) +
2260 bw_table->interval_bw[0].num_packets *
2261 xhci_get_largest_overhead(&bw_table->interval_bw[0]);
2262
2263 for (i = 1; i < XHCI_MAX_INTERVAL; i++) {
2264 unsigned int bw_added;
2265 unsigned int largest_mps;
2266 unsigned int interval_overhead;
2267
2268 /*
2269 * How many packets could we transmit in this interval?
2270 * If packets didn't fit in the previous interval, we will need
2271 * to transmit that many packets twice within this interval.
2272 */
2273 packets_remaining = 2 * packets_remaining +
2274 bw_table->interval_bw[i].num_packets;
2275
2276 /* Find the largest max packet size of this or the previous
2277 * interval.
2278 */
2279 if (list_empty(&bw_table->interval_bw[i].endpoints))
2280 largest_mps = 0;
2281 else {
2282 struct xhci_virt_ep *virt_ep;
2283 struct list_head *ep_entry;
2284
2285 ep_entry = bw_table->interval_bw[i].endpoints.next;
2286 virt_ep = list_entry(ep_entry,
2287 struct xhci_virt_ep, bw_endpoint_list);
2288 /* Convert to blocks, rounding up */
2289 largest_mps = DIV_ROUND_UP(
2290 virt_ep->bw_info.max_packet_size,
2291 block_size);
2292 }
2293 if (largest_mps > packet_size)
2294 packet_size = largest_mps;
2295
2296 /* Use the larger overhead of this or the previous interval. */
2297 interval_overhead = xhci_get_largest_overhead(
2298 &bw_table->interval_bw[i]);
2299 if (interval_overhead > overhead)
2300 overhead = interval_overhead;
2301
2302 /* How many packets can we evenly distribute across
2303 * (1 << (i + 1)) possible scheduling opportunities?
2304 */
2305 packets_transmitted = packets_remaining >> (i + 1);
2306
2307 /* Add in the bandwidth used for those scheduled packets */
2308 bw_added = packets_transmitted * (overhead + packet_size);
2309
2310 /* How many packets do we have remaining to transmit? */
2311 packets_remaining = packets_remaining % (1 << (i + 1));
2312
2313 /* What largest max packet size should those packets have? */
2314 /* If we've transmitted all packets, don't carry over the
2315 * largest packet size.
2316 */
2317 if (packets_remaining == 0) {
2318 packet_size = 0;
2319 overhead = 0;
2320 } else if (packets_transmitted > 0) {
2321 /* Otherwise if we do have remaining packets, and we've
2322 * scheduled some packets in this interval, take the
2323 * largest max packet size from endpoints with this
2324 * interval.
2325 */
2326 packet_size = largest_mps;
2327 overhead = interval_overhead;
2328 }
2329 /* Otherwise carry over packet_size and overhead from the last
2330 * time we had a remainder.
2331 */
2332 bw_used += bw_added;
2333 if (bw_used > max_bandwidth) {
2334 xhci_warn(xhci, "Not enough bandwidth. "
2335 "Proposed: %u, Max: %u\n",
2336 bw_used, max_bandwidth);
2337 return -ENOMEM;
2338 }
2339 }
2340 /*
2341 * Ok, we know we have some packets left over after even-handedly
2342 * scheduling interval 15. We don't know which microframes they will
2343 * fit into, so we over-schedule and say they will be scheduled every
2344 * microframe.
2345 */
2346 if (packets_remaining > 0)
2347 bw_used += overhead + packet_size;
2348
2349 if (!virt_dev->tt_info && virt_dev->udev->speed == USB_SPEED_HIGH) {
2350 unsigned int port_index = virt_dev->real_port - 1;
2351
2352 /* OK, we're manipulating a HS device attached to a
2353 * root port bandwidth domain. Include the number of active TTs
2354 * in the bandwidth used.
2355 */
2356 bw_used += TT_HS_OVERHEAD *
2357 xhci->rh_bw[port_index].num_active_tts;
2358 }
2359
2360 xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
2361 "Final bandwidth: %u, Limit: %u, Reserved: %u, "
2362 "Available: %u " "percent",
2363 bw_used, max_bandwidth, bw_reserved,
2364 (max_bandwidth - bw_used - bw_reserved) * 100 /
2365 max_bandwidth);
2366
2367 bw_used += bw_reserved;
2368 if (bw_used > max_bandwidth) {
2369 xhci_warn(xhci, "Not enough bandwidth. Proposed: %u, Max: %u\n",
2370 bw_used, max_bandwidth);
2371 return -ENOMEM;
2372 }
2373
2374 bw_table->bw_used = bw_used;
2375 return 0;
2376}
2377
2378static bool xhci_is_async_ep(unsigned int ep_type)
2379{
2380 return (ep_type != ISOC_OUT_EP && ep_type != INT_OUT_EP &&
2381 ep_type != ISOC_IN_EP &&
2382 ep_type != INT_IN_EP);
2383}
2384
2385static bool xhci_is_sync_in_ep(unsigned int ep_type)
2386{
2387 return (ep_type == ISOC_IN_EP || ep_type == INT_IN_EP);
2388}
2389
2390static unsigned int xhci_get_ss_bw_consumed(struct xhci_bw_info *ep_bw)
2391{
2392 unsigned int mps = DIV_ROUND_UP(ep_bw->max_packet_size, SS_BLOCK);
2393
2394 if (ep_bw->ep_interval == 0)
2395 return SS_OVERHEAD_BURST +
2396 (ep_bw->mult * ep_bw->num_packets *
2397 (SS_OVERHEAD + mps));
2398 return DIV_ROUND_UP(ep_bw->mult * ep_bw->num_packets *
2399 (SS_OVERHEAD + mps + SS_OVERHEAD_BURST),
2400 1 << ep_bw->ep_interval);
2401
2402}
2403
2404void xhci_drop_ep_from_interval_table(struct xhci_hcd *xhci,
2405 struct xhci_bw_info *ep_bw,
2406 struct xhci_interval_bw_table *bw_table,
2407 struct usb_device *udev,
2408 struct xhci_virt_ep *virt_ep,
2409 struct xhci_tt_bw_info *tt_info)
2410{
2411 struct xhci_interval_bw *interval_bw;
2412 int normalized_interval;
2413
2414 if (xhci_is_async_ep(ep_bw->type))
2415 return;
2416
2417 if (udev->speed >= USB_SPEED_SUPER) {
2418 if (xhci_is_sync_in_ep(ep_bw->type))
2419 xhci->devs[udev->slot_id]->bw_table->ss_bw_in -=
2420 xhci_get_ss_bw_consumed(ep_bw);
2421 else
2422 xhci->devs[udev->slot_id]->bw_table->ss_bw_out -=
2423 xhci_get_ss_bw_consumed(ep_bw);
2424 return;
2425 }
2426
2427 /* SuperSpeed endpoints never get added to intervals in the table, so
2428 * this check is only valid for HS/FS/LS devices.
2429 */
2430 if (list_empty(&virt_ep->bw_endpoint_list))
2431 return;
2432 /* For LS/FS devices, we need to translate the interval expressed in
2433 * microframes to frames.
2434 */
2435 if (udev->speed == USB_SPEED_HIGH)
2436 normalized_interval = ep_bw->ep_interval;
2437 else
2438 normalized_interval = ep_bw->ep_interval - 3;
2439
2440 if (normalized_interval == 0)
2441 bw_table->interval0_esit_payload -= ep_bw->max_esit_payload;
2442 interval_bw = &bw_table->interval_bw[normalized_interval];
2443 interval_bw->num_packets -= ep_bw->num_packets;
2444 switch (udev->speed) {
2445 case USB_SPEED_LOW:
2446 interval_bw->overhead[LS_OVERHEAD_TYPE] -= 1;
2447 break;
2448 case USB_SPEED_FULL:
2449 interval_bw->overhead[FS_OVERHEAD_TYPE] -= 1;
2450 break;
2451 case USB_SPEED_HIGH:
2452 interval_bw->overhead[HS_OVERHEAD_TYPE] -= 1;
2453 break;
2454 case USB_SPEED_SUPER:
2455 case USB_SPEED_SUPER_PLUS:
2456 case USB_SPEED_UNKNOWN:
2457 case USB_SPEED_WIRELESS:
2458 /* Should never happen because only LS/FS/HS endpoints will get
2459 * added to the endpoint list.
2460 */
2461 return;
2462 }
2463 if (tt_info)
2464 tt_info->active_eps -= 1;
2465 list_del_init(&virt_ep->bw_endpoint_list);
2466}
2467
2468static void xhci_add_ep_to_interval_table(struct xhci_hcd *xhci,
2469 struct xhci_bw_info *ep_bw,
2470 struct xhci_interval_bw_table *bw_table,
2471 struct usb_device *udev,
2472 struct xhci_virt_ep *virt_ep,
2473 struct xhci_tt_bw_info *tt_info)
2474{
2475 struct xhci_interval_bw *interval_bw;
2476 struct xhci_virt_ep *smaller_ep;
2477 int normalized_interval;
2478
2479 if (xhci_is_async_ep(ep_bw->type))
2480 return;
2481
2482 if (udev->speed == USB_SPEED_SUPER) {
2483 if (xhci_is_sync_in_ep(ep_bw->type))
2484 xhci->devs[udev->slot_id]->bw_table->ss_bw_in +=
2485 xhci_get_ss_bw_consumed(ep_bw);
2486 else
2487 xhci->devs[udev->slot_id]->bw_table->ss_bw_out +=
2488 xhci_get_ss_bw_consumed(ep_bw);
2489 return;
2490 }
2491
2492 /* For LS/FS devices, we need to translate the interval expressed in
2493 * microframes to frames.
2494 */
2495 if (udev->speed == USB_SPEED_HIGH)
2496 normalized_interval = ep_bw->ep_interval;
2497 else
2498 normalized_interval = ep_bw->ep_interval - 3;
2499
2500 if (normalized_interval == 0)
2501 bw_table->interval0_esit_payload += ep_bw->max_esit_payload;
2502 interval_bw = &bw_table->interval_bw[normalized_interval];
2503 interval_bw->num_packets += ep_bw->num_packets;
2504 switch (udev->speed) {
2505 case USB_SPEED_LOW:
2506 interval_bw->overhead[LS_OVERHEAD_TYPE] += 1;
2507 break;
2508 case USB_SPEED_FULL:
2509 interval_bw->overhead[FS_OVERHEAD_TYPE] += 1;
2510 break;
2511 case USB_SPEED_HIGH:
2512 interval_bw->overhead[HS_OVERHEAD_TYPE] += 1;
2513 break;
2514 case USB_SPEED_SUPER:
2515 case USB_SPEED_SUPER_PLUS:
2516 case USB_SPEED_UNKNOWN:
2517 case USB_SPEED_WIRELESS:
2518 /* Should never happen because only LS/FS/HS endpoints will get
2519 * added to the endpoint list.
2520 */
2521 return;
2522 }
2523
2524 if (tt_info)
2525 tt_info->active_eps += 1;
2526 /* Insert the endpoint into the list, largest max packet size first. */
2527 list_for_each_entry(smaller_ep, &interval_bw->endpoints,
2528 bw_endpoint_list) {
2529 if (ep_bw->max_packet_size >=
2530 smaller_ep->bw_info.max_packet_size) {
2531 /* Add the new ep before the smaller endpoint */
2532 list_add_tail(&virt_ep->bw_endpoint_list,
2533 &smaller_ep->bw_endpoint_list);
2534 return;
2535 }
2536 }
2537 /* Add the new endpoint at the end of the list. */
2538 list_add_tail(&virt_ep->bw_endpoint_list,
2539 &interval_bw->endpoints);
2540}
2541
2542void xhci_update_tt_active_eps(struct xhci_hcd *xhci,
2543 struct xhci_virt_device *virt_dev,
2544 int old_active_eps)
2545{
2546 struct xhci_root_port_bw_info *rh_bw_info;
2547 if (!virt_dev->tt_info)
2548 return;
2549
2550 rh_bw_info = &xhci->rh_bw[virt_dev->real_port - 1];
2551 if (old_active_eps == 0 &&
2552 virt_dev->tt_info->active_eps != 0) {
2553 rh_bw_info->num_active_tts += 1;
2554 rh_bw_info->bw_table.bw_used += TT_HS_OVERHEAD;
2555 } else if (old_active_eps != 0 &&
2556 virt_dev->tt_info->active_eps == 0) {
2557 rh_bw_info->num_active_tts -= 1;
2558 rh_bw_info->bw_table.bw_used -= TT_HS_OVERHEAD;
2559 }
2560}
2561
2562static int xhci_reserve_bandwidth(struct xhci_hcd *xhci,
2563 struct xhci_virt_device *virt_dev,
2564 struct xhci_container_ctx *in_ctx)
2565{
2566 struct xhci_bw_info ep_bw_info[31];
2567 int i;
2568 struct xhci_input_control_ctx *ctrl_ctx;
2569 int old_active_eps = 0;
2570
2571 if (virt_dev->tt_info)
2572 old_active_eps = virt_dev->tt_info->active_eps;
2573
2574 ctrl_ctx = xhci_get_input_control_ctx(in_ctx);
2575 if (!ctrl_ctx) {
2576 xhci_warn(xhci, "%s: Could not get input context, bad type.\n",
2577 __func__);
2578 return -ENOMEM;
2579 }
2580
2581 for (i = 0; i < 31; i++) {
2582 if (!EP_IS_ADDED(ctrl_ctx, i) && !EP_IS_DROPPED(ctrl_ctx, i))
2583 continue;
2584
2585 /* Make a copy of the BW info in case we need to revert this */
2586 memcpy(&ep_bw_info[i], &virt_dev->eps[i].bw_info,
2587 sizeof(ep_bw_info[i]));
2588 /* Drop the endpoint from the interval table if the endpoint is
2589 * being dropped or changed.
2590 */
2591 if (EP_IS_DROPPED(ctrl_ctx, i))
2592 xhci_drop_ep_from_interval_table(xhci,
2593 &virt_dev->eps[i].bw_info,
2594 virt_dev->bw_table,
2595 virt_dev->udev,
2596 &virt_dev->eps[i],
2597 virt_dev->tt_info);
2598 }
2599 /* Overwrite the information stored in the endpoints' bw_info */
2600 xhci_update_bw_info(xhci, virt_dev->in_ctx, ctrl_ctx, virt_dev);
2601 for (i = 0; i < 31; i++) {
2602 /* Add any changed or added endpoints to the interval table */
2603 if (EP_IS_ADDED(ctrl_ctx, i))
2604 xhci_add_ep_to_interval_table(xhci,
2605 &virt_dev->eps[i].bw_info,
2606 virt_dev->bw_table,
2607 virt_dev->udev,
2608 &virt_dev->eps[i],
2609 virt_dev->tt_info);
2610 }
2611
2612 if (!xhci_check_bw_table(xhci, virt_dev, old_active_eps)) {
2613 /* Ok, this fits in the bandwidth we have.
2614 * Update the number of active TTs.
2615 */
2616 xhci_update_tt_active_eps(xhci, virt_dev, old_active_eps);
2617 return 0;
2618 }
2619
2620 /* We don't have enough bandwidth for this, revert the stored info. */
2621 for (i = 0; i < 31; i++) {
2622 if (!EP_IS_ADDED(ctrl_ctx, i) && !EP_IS_DROPPED(ctrl_ctx, i))
2623 continue;
2624
2625 /* Drop the new copies of any added or changed endpoints from
2626 * the interval table.
2627 */
2628 if (EP_IS_ADDED(ctrl_ctx, i)) {
2629 xhci_drop_ep_from_interval_table(xhci,
2630 &virt_dev->eps[i].bw_info,
2631 virt_dev->bw_table,
2632 virt_dev->udev,
2633 &virt_dev->eps[i],
2634 virt_dev->tt_info);
2635 }
2636 /* Revert the endpoint back to its old information */
2637 memcpy(&virt_dev->eps[i].bw_info, &ep_bw_info[i],
2638 sizeof(ep_bw_info[i]));
2639 /* Add any changed or dropped endpoints back into the table */
2640 if (EP_IS_DROPPED(ctrl_ctx, i))
2641 xhci_add_ep_to_interval_table(xhci,
2642 &virt_dev->eps[i].bw_info,
2643 virt_dev->bw_table,
2644 virt_dev->udev,
2645 &virt_dev->eps[i],
2646 virt_dev->tt_info);
2647 }
2648 return -ENOMEM;
2649}
2650
2651
2652/* Issue a configure endpoint command or evaluate context command
2653 * and wait for it to finish.
2654 */
2655static int xhci_configure_endpoint(struct xhci_hcd *xhci,
2656 struct usb_device *udev,
2657 struct xhci_command *command,
2658 bool ctx_change, bool must_succeed)
2659{
2660 int ret;
2661 unsigned long flags;
2662 struct xhci_input_control_ctx *ctrl_ctx;
2663 struct xhci_virt_device *virt_dev;
2664
2665 if (!command)
2666 return -EINVAL;
2667
2668 spin_lock_irqsave(&xhci->lock, flags);
2669 virt_dev = xhci->devs[udev->slot_id];
2670
2671 ctrl_ctx = xhci_get_input_control_ctx(command->in_ctx);
2672 if (!ctrl_ctx) {
2673 spin_unlock_irqrestore(&xhci->lock, flags);
2674 xhci_warn(xhci, "%s: Could not get input context, bad type.\n",
2675 __func__);
2676 return -ENOMEM;
2677 }
2678
2679 if ((xhci->quirks & XHCI_EP_LIMIT_QUIRK) &&
2680 xhci_reserve_host_resources(xhci, ctrl_ctx)) {
2681 spin_unlock_irqrestore(&xhci->lock, flags);
2682 xhci_warn(xhci, "Not enough host resources, "
2683 "active endpoint contexts = %u\n",
2684 xhci->num_active_eps);
2685 return -ENOMEM;
2686 }
2687 if ((xhci->quirks & XHCI_SW_BW_CHECKING) &&
2688 xhci_reserve_bandwidth(xhci, virt_dev, command->in_ctx)) {
2689 if ((xhci->quirks & XHCI_EP_LIMIT_QUIRK))
2690 xhci_free_host_resources(xhci, ctrl_ctx);
2691 spin_unlock_irqrestore(&xhci->lock, flags);
2692 xhci_warn(xhci, "Not enough bandwidth\n");
2693 return -ENOMEM;
2694 }
2695
2696 if (!ctx_change)
2697 ret = xhci_queue_configure_endpoint(xhci, command,
2698 command->in_ctx->dma,
2699 udev->slot_id, must_succeed);
2700 else
2701 ret = xhci_queue_evaluate_context(xhci, command,
2702 command->in_ctx->dma,
2703 udev->slot_id, must_succeed);
2704 if (ret < 0) {
2705 if ((xhci->quirks & XHCI_EP_LIMIT_QUIRK))
2706 xhci_free_host_resources(xhci, ctrl_ctx);
2707 spin_unlock_irqrestore(&xhci->lock, flags);
2708 xhci_dbg_trace(xhci, trace_xhci_dbg_context_change,
2709 "FIXME allocate a new ring segment");
2710 return -ENOMEM;
2711 }
2712 xhci_ring_cmd_db(xhci);
2713 spin_unlock_irqrestore(&xhci->lock, flags);
2714
2715 /* Wait for the configure endpoint command to complete */
2716 wait_for_completion(command->completion);
2717
2718 if (!ctx_change)
2719 ret = xhci_configure_endpoint_result(xhci, udev,
2720 &command->status);
2721 else
2722 ret = xhci_evaluate_context_result(xhci, udev,
2723 &command->status);
2724
2725 if ((xhci->quirks & XHCI_EP_LIMIT_QUIRK)) {
2726 spin_lock_irqsave(&xhci->lock, flags);
2727 /* If the command failed, remove the reserved resources.
2728 * Otherwise, clean up the estimate to include dropped eps.
2729 */
2730 if (ret)
2731 xhci_free_host_resources(xhci, ctrl_ctx);
2732 else
2733 xhci_finish_resource_reservation(xhci, ctrl_ctx);
2734 spin_unlock_irqrestore(&xhci->lock, flags);
2735 }
2736 return ret;
2737}
2738
2739static void xhci_check_bw_drop_ep_streams(struct xhci_hcd *xhci,
2740 struct xhci_virt_device *vdev, int i)
2741{
2742 struct xhci_virt_ep *ep = &vdev->eps[i];
2743
2744 if (ep->ep_state & EP_HAS_STREAMS) {
2745 xhci_warn(xhci, "WARN: endpoint 0x%02x has streams on set_interface, freeing streams.\n",
2746 xhci_get_endpoint_address(i));
2747 xhci_free_stream_info(xhci, ep->stream_info);
2748 ep->stream_info = NULL;
2749 ep->ep_state &= ~EP_HAS_STREAMS;
2750 }
2751}
2752
2753/* Called after one or more calls to xhci_add_endpoint() or
2754 * xhci_drop_endpoint(). If this call fails, the USB core is expected
2755 * to call xhci_reset_bandwidth().
2756 *
2757 * Since we are in the middle of changing either configuration or
2758 * installing a new alt setting, the USB core won't allow URBs to be
2759 * enqueued for any endpoint on the old config or interface. Nothing
2760 * else should be touching the xhci->devs[slot_id] structure, so we
2761 * don't need to take the xhci->lock for manipulating that.
2762 */
2763int xhci_check_bandwidth(struct usb_hcd *hcd, struct usb_device *udev)
2764{
2765 int i;
2766 int ret = 0;
2767 struct xhci_hcd *xhci;
2768 struct xhci_virt_device *virt_dev;
2769 struct xhci_input_control_ctx *ctrl_ctx;
2770 struct xhci_slot_ctx *slot_ctx;
2771 struct xhci_command *command;
2772
2773 ret = xhci_check_args(hcd, udev, NULL, 0, true, __func__);
2774 if (ret <= 0)
2775 return ret;
2776 xhci = hcd_to_xhci(hcd);
2777 if ((xhci->xhc_state & XHCI_STATE_DYING) ||
2778 (xhci->xhc_state & XHCI_STATE_REMOVING))
2779 return -ENODEV;
2780
2781 xhci_dbg(xhci, "%s called for udev %p\n", __func__, udev);
2782 virt_dev = xhci->devs[udev->slot_id];
2783
2784 command = xhci_alloc_command(xhci, false, true, GFP_KERNEL);
2785 if (!command)
2786 return -ENOMEM;
2787
2788 command->in_ctx = virt_dev->in_ctx;
2789
2790 /* See section 4.6.6 - A0 = 1; A1 = D0 = D1 = 0 */
2791 ctrl_ctx = xhci_get_input_control_ctx(command->in_ctx);
2792 if (!ctrl_ctx) {
2793 xhci_warn(xhci, "%s: Could not get input context, bad type.\n",
2794 __func__);
2795 ret = -ENOMEM;
2796 goto command_cleanup;
2797 }
2798 ctrl_ctx->add_flags |= cpu_to_le32(SLOT_FLAG);
2799 ctrl_ctx->add_flags &= cpu_to_le32(~EP0_FLAG);
2800 ctrl_ctx->drop_flags &= cpu_to_le32(~(SLOT_FLAG | EP0_FLAG));
2801
2802 /* Don't issue the command if there's no endpoints to update. */
2803 if (ctrl_ctx->add_flags == cpu_to_le32(SLOT_FLAG) &&
2804 ctrl_ctx->drop_flags == 0) {
2805 ret = 0;
2806 goto command_cleanup;
2807 }
2808 /* Fix up Context Entries field. Minimum value is EP0 == BIT(1). */
2809 slot_ctx = xhci_get_slot_ctx(xhci, virt_dev->in_ctx);
2810 for (i = 31; i >= 1; i--) {
2811 __le32 le32 = cpu_to_le32(BIT(i));
2812
2813 if ((virt_dev->eps[i-1].ring && !(ctrl_ctx->drop_flags & le32))
2814 || (ctrl_ctx->add_flags & le32) || i == 1) {
2815 slot_ctx->dev_info &= cpu_to_le32(~LAST_CTX_MASK);
2816 slot_ctx->dev_info |= cpu_to_le32(LAST_CTX(i));
2817 break;
2818 }
2819 }
2820 xhci_dbg(xhci, "New Input Control Context:\n");
2821 xhci_dbg_ctx(xhci, virt_dev->in_ctx,
2822 LAST_CTX_TO_EP_NUM(le32_to_cpu(slot_ctx->dev_info)));
2823
2824 ret = xhci_configure_endpoint(xhci, udev, command,
2825 false, false);
2826 if (ret)
2827 /* Callee should call reset_bandwidth() */
2828 goto command_cleanup;
2829
2830 xhci_dbg(xhci, "Output context after successful config ep cmd:\n");
2831 xhci_dbg_ctx(xhci, virt_dev->out_ctx,
2832 LAST_CTX_TO_EP_NUM(le32_to_cpu(slot_ctx->dev_info)));
2833
2834 /* Free any rings that were dropped, but not changed. */
2835 for (i = 1; i < 31; ++i) {
2836 if ((le32_to_cpu(ctrl_ctx->drop_flags) & (1 << (i + 1))) &&
2837 !(le32_to_cpu(ctrl_ctx->add_flags) & (1 << (i + 1)))) {
2838 xhci_free_or_cache_endpoint_ring(xhci, virt_dev, i);
2839 xhci_check_bw_drop_ep_streams(xhci, virt_dev, i);
2840 }
2841 }
2842 xhci_zero_in_ctx(xhci, virt_dev);
2843 /*
2844 * Install any rings for completely new endpoints or changed endpoints,
2845 * and free or cache any old rings from changed endpoints.
2846 */
2847 for (i = 1; i < 31; ++i) {
2848 if (!virt_dev->eps[i].new_ring)
2849 continue;
2850 /* Only cache or free the old ring if it exists.
2851 * It may not if this is the first add of an endpoint.
2852 */
2853 if (virt_dev->eps[i].ring) {
2854 xhci_free_or_cache_endpoint_ring(xhci, virt_dev, i);
2855 }
2856 xhci_check_bw_drop_ep_streams(xhci, virt_dev, i);
2857 virt_dev->eps[i].ring = virt_dev->eps[i].new_ring;
2858 virt_dev->eps[i].new_ring = NULL;
2859 }
2860command_cleanup:
2861 kfree(command->completion);
2862 kfree(command);
2863
2864 return ret;
2865}
2866
2867void xhci_reset_bandwidth(struct usb_hcd *hcd, struct usb_device *udev)
2868{
2869 struct xhci_hcd *xhci;
2870 struct xhci_virt_device *virt_dev;
2871 int i, ret;
2872
2873 ret = xhci_check_args(hcd, udev, NULL, 0, true, __func__);
2874 if (ret <= 0)
2875 return;
2876 xhci = hcd_to_xhci(hcd);
2877
2878 xhci_dbg(xhci, "%s called for udev %p\n", __func__, udev);
2879 virt_dev = xhci->devs[udev->slot_id];
2880 /* Free any rings allocated for added endpoints */
2881 for (i = 0; i < 31; ++i) {
2882 if (virt_dev->eps[i].new_ring) {
2883 xhci_ring_free(xhci, virt_dev->eps[i].new_ring);
2884 virt_dev->eps[i].new_ring = NULL;
2885 }
2886 }
2887 xhci_zero_in_ctx(xhci, virt_dev);
2888}
2889
2890static void xhci_setup_input_ctx_for_config_ep(struct xhci_hcd *xhci,
2891 struct xhci_container_ctx *in_ctx,
2892 struct xhci_container_ctx *out_ctx,
2893 struct xhci_input_control_ctx *ctrl_ctx,
2894 u32 add_flags, u32 drop_flags)
2895{
2896 ctrl_ctx->add_flags = cpu_to_le32(add_flags);
2897 ctrl_ctx->drop_flags = cpu_to_le32(drop_flags);
2898 xhci_slot_copy(xhci, in_ctx, out_ctx);
2899 ctrl_ctx->add_flags |= cpu_to_le32(SLOT_FLAG);
2900
2901 xhci_dbg(xhci, "Input Context:\n");
2902 xhci_dbg_ctx(xhci, in_ctx, xhci_last_valid_endpoint(add_flags));
2903}
2904
2905static void xhci_setup_input_ctx_for_quirk(struct xhci_hcd *xhci,
2906 unsigned int slot_id, unsigned int ep_index,
2907 struct xhci_dequeue_state *deq_state)
2908{
2909 struct xhci_input_control_ctx *ctrl_ctx;
2910 struct xhci_container_ctx *in_ctx;
2911 struct xhci_ep_ctx *ep_ctx;
2912 u32 added_ctxs;
2913 dma_addr_t addr;
2914
2915 in_ctx = xhci->devs[slot_id]->in_ctx;
2916 ctrl_ctx = xhci_get_input_control_ctx(in_ctx);
2917 if (!ctrl_ctx) {
2918 xhci_warn(xhci, "%s: Could not get input context, bad type.\n",
2919 __func__);
2920 return;
2921 }
2922
2923 xhci_endpoint_copy(xhci, xhci->devs[slot_id]->in_ctx,
2924 xhci->devs[slot_id]->out_ctx, ep_index);
2925 ep_ctx = xhci_get_ep_ctx(xhci, in_ctx, ep_index);
2926 addr = xhci_trb_virt_to_dma(deq_state->new_deq_seg,
2927 deq_state->new_deq_ptr);
2928 if (addr == 0) {
2929 xhci_warn(xhci, "WARN Cannot submit config ep after "
2930 "reset ep command\n");
2931 xhci_warn(xhci, "WARN deq seg = %p, deq ptr = %p\n",
2932 deq_state->new_deq_seg,
2933 deq_state->new_deq_ptr);
2934 return;
2935 }
2936 ep_ctx->deq = cpu_to_le64(addr | deq_state->new_cycle_state);
2937
2938 added_ctxs = xhci_get_endpoint_flag_from_index(ep_index);
2939 xhci_setup_input_ctx_for_config_ep(xhci, xhci->devs[slot_id]->in_ctx,
2940 xhci->devs[slot_id]->out_ctx, ctrl_ctx,
2941 added_ctxs, added_ctxs);
2942}
2943
2944void xhci_cleanup_stalled_ring(struct xhci_hcd *xhci,
2945 unsigned int ep_index, struct xhci_td *td)
2946{
2947 struct xhci_dequeue_state deq_state;
2948 struct xhci_virt_ep *ep;
2949 struct usb_device *udev = td->urb->dev;
2950
2951 xhci_dbg_trace(xhci, trace_xhci_dbg_reset_ep,
2952 "Cleaning up stalled endpoint ring");
2953 ep = &xhci->devs[udev->slot_id]->eps[ep_index];
2954 /* We need to move the HW's dequeue pointer past this TD,
2955 * or it will attempt to resend it on the next doorbell ring.
2956 */
2957 xhci_find_new_dequeue_state(xhci, udev->slot_id,
2958 ep_index, ep->stopped_stream, td, &deq_state);
2959
2960 if (!deq_state.new_deq_ptr || !deq_state.new_deq_seg)
2961 return;
2962
2963 /* HW with the reset endpoint quirk will use the saved dequeue state to
2964 * issue a configure endpoint command later.
2965 */
2966 if (!(xhci->quirks & XHCI_RESET_EP_QUIRK)) {
2967 xhci_dbg_trace(xhci, trace_xhci_dbg_reset_ep,
2968 "Queueing new dequeue state");
2969 xhci_queue_new_dequeue_state(xhci, udev->slot_id,
2970 ep_index, ep->stopped_stream, &deq_state);
2971 } else {
2972 /* Better hope no one uses the input context between now and the
2973 * reset endpoint completion!
2974 * XXX: No idea how this hardware will react when stream rings
2975 * are enabled.
2976 */
2977 xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
2978 "Setting up input context for "
2979 "configure endpoint command");
2980 xhci_setup_input_ctx_for_quirk(xhci, udev->slot_id,
2981 ep_index, &deq_state);
2982 }
2983}
2984
2985/* Called when clearing halted device. The core should have sent the control
2986 * message to clear the device halt condition. The host side of the halt should
2987 * already be cleared with a reset endpoint command issued when the STALL tx
2988 * event was received.
2989 *
2990 * Context: in_interrupt
2991 */
2992
2993void xhci_endpoint_reset(struct usb_hcd *hcd,
2994 struct usb_host_endpoint *ep)
2995{
2996 struct xhci_hcd *xhci;
2997
2998 xhci = hcd_to_xhci(hcd);
2999
3000 /*
3001 * We might need to implement the config ep cmd in xhci 4.8.1 note:
3002 * The Reset Endpoint Command may only be issued to endpoints in the
3003 * Halted state. If software wishes reset the Data Toggle or Sequence
3004 * Number of an endpoint that isn't in the Halted state, then software
3005 * may issue a Configure Endpoint Command with the Drop and Add bits set
3006 * for the target endpoint. that is in the Stopped state.
3007 */
3008
3009 /* For now just print debug to follow the situation */
3010 xhci_dbg(xhci, "Endpoint 0x%x ep reset callback called\n",
3011 ep->desc.bEndpointAddress);
3012}
3013
3014static int xhci_check_streams_endpoint(struct xhci_hcd *xhci,
3015 struct usb_device *udev, struct usb_host_endpoint *ep,
3016 unsigned int slot_id)
3017{
3018 int ret;
3019 unsigned int ep_index;
3020 unsigned int ep_state;
3021
3022 if (!ep)
3023 return -EINVAL;
3024 ret = xhci_check_args(xhci_to_hcd(xhci), udev, ep, 1, true, __func__);
3025 if (ret <= 0)
3026 return -EINVAL;
3027 if (usb_ss_max_streams(&ep->ss_ep_comp) == 0) {
3028 xhci_warn(xhci, "WARN: SuperSpeed Endpoint Companion"
3029 " descriptor for ep 0x%x does not support streams\n",
3030 ep->desc.bEndpointAddress);
3031 return -EINVAL;
3032 }
3033
3034 ep_index = xhci_get_endpoint_index(&ep->desc);
3035 ep_state = xhci->devs[slot_id]->eps[ep_index].ep_state;
3036 if (ep_state & EP_HAS_STREAMS ||
3037 ep_state & EP_GETTING_STREAMS) {
3038 xhci_warn(xhci, "WARN: SuperSpeed bulk endpoint 0x%x "
3039 "already has streams set up.\n",
3040 ep->desc.bEndpointAddress);
3041 xhci_warn(xhci, "Send email to xHCI maintainer and ask for "
3042 "dynamic stream context array reallocation.\n");
3043 return -EINVAL;
3044 }
3045 if (!list_empty(&xhci->devs[slot_id]->eps[ep_index].ring->td_list)) {
3046 xhci_warn(xhci, "Cannot setup streams for SuperSpeed bulk "
3047 "endpoint 0x%x; URBs are pending.\n",
3048 ep->desc.bEndpointAddress);
3049 return -EINVAL;
3050 }
3051 return 0;
3052}
3053
3054static void xhci_calculate_streams_entries(struct xhci_hcd *xhci,
3055 unsigned int *num_streams, unsigned int *num_stream_ctxs)
3056{
3057 unsigned int max_streams;
3058
3059 /* The stream context array size must be a power of two */
3060 *num_stream_ctxs = roundup_pow_of_two(*num_streams);
3061 /*
3062 * Find out how many primary stream array entries the host controller
3063 * supports. Later we may use secondary stream arrays (similar to 2nd
3064 * level page entries), but that's an optional feature for xHCI host
3065 * controllers. xHCs must support at least 4 stream IDs.
3066 */
3067 max_streams = HCC_MAX_PSA(xhci->hcc_params);
3068 if (*num_stream_ctxs > max_streams) {
3069 xhci_dbg(xhci, "xHCI HW only supports %u stream ctx entries.\n",
3070 max_streams);
3071 *num_stream_ctxs = max_streams;
3072 *num_streams = max_streams;
3073 }
3074}
3075
3076/* Returns an error code if one of the endpoint already has streams.
3077 * This does not change any data structures, it only checks and gathers
3078 * information.
3079 */
3080static int xhci_calculate_streams_and_bitmask(struct xhci_hcd *xhci,
3081 struct usb_device *udev,
3082 struct usb_host_endpoint **eps, unsigned int num_eps,
3083 unsigned int *num_streams, u32 *changed_ep_bitmask)
3084{
3085 unsigned int max_streams;
3086 unsigned int endpoint_flag;
3087 int i;
3088 int ret;
3089
3090 for (i = 0; i < num_eps; i++) {
3091 ret = xhci_check_streams_endpoint(xhci, udev,
3092 eps[i], udev->slot_id);
3093 if (ret < 0)
3094 return ret;
3095
3096 max_streams = usb_ss_max_streams(&eps[i]->ss_ep_comp);
3097 if (max_streams < (*num_streams - 1)) {
3098 xhci_dbg(xhci, "Ep 0x%x only supports %u stream IDs.\n",
3099 eps[i]->desc.bEndpointAddress,
3100 max_streams);
3101 *num_streams = max_streams+1;
3102 }
3103
3104 endpoint_flag = xhci_get_endpoint_flag(&eps[i]->desc);
3105 if (*changed_ep_bitmask & endpoint_flag)
3106 return -EINVAL;
3107 *changed_ep_bitmask |= endpoint_flag;
3108 }
3109 return 0;
3110}
3111
3112static u32 xhci_calculate_no_streams_bitmask(struct xhci_hcd *xhci,
3113 struct usb_device *udev,
3114 struct usb_host_endpoint **eps, unsigned int num_eps)
3115{
3116 u32 changed_ep_bitmask = 0;
3117 unsigned int slot_id;
3118 unsigned int ep_index;
3119 unsigned int ep_state;
3120 int i;
3121
3122 slot_id = udev->slot_id;
3123 if (!xhci->devs[slot_id])
3124 return 0;
3125
3126 for (i = 0; i < num_eps; i++) {
3127 ep_index = xhci_get_endpoint_index(&eps[i]->desc);
3128 ep_state = xhci->devs[slot_id]->eps[ep_index].ep_state;
3129 /* Are streams already being freed for the endpoint? */
3130 if (ep_state & EP_GETTING_NO_STREAMS) {
3131 xhci_warn(xhci, "WARN Can't disable streams for "
3132 "endpoint 0x%x, "
3133 "streams are being disabled already\n",
3134 eps[i]->desc.bEndpointAddress);
3135 return 0;
3136 }
3137 /* Are there actually any streams to free? */
3138 if (!(ep_state & EP_HAS_STREAMS) &&
3139 !(ep_state & EP_GETTING_STREAMS)) {
3140 xhci_warn(xhci, "WARN Can't disable streams for "
3141 "endpoint 0x%x, "
3142 "streams are already disabled!\n",
3143 eps[i]->desc.bEndpointAddress);
3144 xhci_warn(xhci, "WARN xhci_free_streams() called "
3145 "with non-streams endpoint\n");
3146 return 0;
3147 }
3148 changed_ep_bitmask |= xhci_get_endpoint_flag(&eps[i]->desc);
3149 }
3150 return changed_ep_bitmask;
3151}
3152
3153/*
3154 * The USB device drivers use this function (through the HCD interface in USB
3155 * core) to prepare a set of bulk endpoints to use streams. Streams are used to
3156 * coordinate mass storage command queueing across multiple endpoints (basically
3157 * a stream ID == a task ID).
3158 *
3159 * Setting up streams involves allocating the same size stream context array
3160 * for each endpoint and issuing a configure endpoint command for all endpoints.
3161 *
3162 * Don't allow the call to succeed if one endpoint only supports one stream
3163 * (which means it doesn't support streams at all).
3164 *
3165 * Drivers may get less stream IDs than they asked for, if the host controller
3166 * hardware or endpoints claim they can't support the number of requested
3167 * stream IDs.
3168 */
3169int xhci_alloc_streams(struct usb_hcd *hcd, struct usb_device *udev,
3170 struct usb_host_endpoint **eps, unsigned int num_eps,
3171 unsigned int num_streams, gfp_t mem_flags)
3172{
3173 int i, ret;
3174 struct xhci_hcd *xhci;
3175 struct xhci_virt_device *vdev;
3176 struct xhci_command *config_cmd;
3177 struct xhci_input_control_ctx *ctrl_ctx;
3178 unsigned int ep_index;
3179 unsigned int num_stream_ctxs;
3180 unsigned long flags;
3181 u32 changed_ep_bitmask = 0;
3182
3183 if (!eps)
3184 return -EINVAL;
3185
3186 /* Add one to the number of streams requested to account for
3187 * stream 0 that is reserved for xHCI usage.
3188 */
3189 num_streams += 1;
3190 xhci = hcd_to_xhci(hcd);
3191 xhci_dbg(xhci, "Driver wants %u stream IDs (including stream 0).\n",
3192 num_streams);
3193
3194 /* MaxPSASize value 0 (2 streams) means streams are not supported */
3195 if ((xhci->quirks & XHCI_BROKEN_STREAMS) ||
3196 HCC_MAX_PSA(xhci->hcc_params) < 4) {
3197 xhci_dbg(xhci, "xHCI controller does not support streams.\n");
3198 return -ENOSYS;
3199 }
3200
3201 config_cmd = xhci_alloc_command(xhci, true, true, mem_flags);
3202 if (!config_cmd) {
3203 xhci_dbg(xhci, "Could not allocate xHCI command structure.\n");
3204 return -ENOMEM;
3205 }
3206 ctrl_ctx = xhci_get_input_control_ctx(config_cmd->in_ctx);
3207 if (!ctrl_ctx) {
3208 xhci_warn(xhci, "%s: Could not get input context, bad type.\n",
3209 __func__);
3210 xhci_free_command(xhci, config_cmd);
3211 return -ENOMEM;
3212 }
3213
3214 /* Check to make sure all endpoints are not already configured for
3215 * streams. While we're at it, find the maximum number of streams that
3216 * all the endpoints will support and check for duplicate endpoints.
3217 */
3218 spin_lock_irqsave(&xhci->lock, flags);
3219 ret = xhci_calculate_streams_and_bitmask(xhci, udev, eps,
3220 num_eps, &num_streams, &changed_ep_bitmask);
3221 if (ret < 0) {
3222 xhci_free_command(xhci, config_cmd);
3223 spin_unlock_irqrestore(&xhci->lock, flags);
3224 return ret;
3225 }
3226 if (num_streams <= 1) {
3227 xhci_warn(xhci, "WARN: endpoints can't handle "
3228 "more than one stream.\n");
3229 xhci_free_command(xhci, config_cmd);
3230 spin_unlock_irqrestore(&xhci->lock, flags);
3231 return -EINVAL;
3232 }
3233 vdev = xhci->devs[udev->slot_id];
3234 /* Mark each endpoint as being in transition, so
3235 * xhci_urb_enqueue() will reject all URBs.
3236 */
3237 for (i = 0; i < num_eps; i++) {
3238 ep_index = xhci_get_endpoint_index(&eps[i]->desc);
3239 vdev->eps[ep_index].ep_state |= EP_GETTING_STREAMS;
3240 }
3241 spin_unlock_irqrestore(&xhci->lock, flags);
3242
3243 /* Setup internal data structures and allocate HW data structures for
3244 * streams (but don't install the HW structures in the input context
3245 * until we're sure all memory allocation succeeded).
3246 */
3247 xhci_calculate_streams_entries(xhci, &num_streams, &num_stream_ctxs);
3248 xhci_dbg(xhci, "Need %u stream ctx entries for %u stream IDs.\n",
3249 num_stream_ctxs, num_streams);
3250
3251 for (i = 0; i < num_eps; i++) {
3252 ep_index = xhci_get_endpoint_index(&eps[i]->desc);
3253 vdev->eps[ep_index].stream_info = xhci_alloc_stream_info(xhci,
3254 num_stream_ctxs,
3255 num_streams, mem_flags);
3256 if (!vdev->eps[ep_index].stream_info)
3257 goto cleanup;
3258 /* Set maxPstreams in endpoint context and update deq ptr to
3259 * point to stream context array. FIXME
3260 */
3261 }
3262
3263 /* Set up the input context for a configure endpoint command. */
3264 for (i = 0; i < num_eps; i++) {
3265 struct xhci_ep_ctx *ep_ctx;
3266
3267 ep_index = xhci_get_endpoint_index(&eps[i]->desc);
3268 ep_ctx = xhci_get_ep_ctx(xhci, config_cmd->in_ctx, ep_index);
3269
3270 xhci_endpoint_copy(xhci, config_cmd->in_ctx,
3271 vdev->out_ctx, ep_index);
3272 xhci_setup_streams_ep_input_ctx(xhci, ep_ctx,
3273 vdev->eps[ep_index].stream_info);
3274 }
3275 /* Tell the HW to drop its old copy of the endpoint context info
3276 * and add the updated copy from the input context.
3277 */
3278 xhci_setup_input_ctx_for_config_ep(xhci, config_cmd->in_ctx,
3279 vdev->out_ctx, ctrl_ctx,
3280 changed_ep_bitmask, changed_ep_bitmask);
3281
3282 /* Issue and wait for the configure endpoint command */
3283 ret = xhci_configure_endpoint(xhci, udev, config_cmd,
3284 false, false);
3285
3286 /* xHC rejected the configure endpoint command for some reason, so we
3287 * leave the old ring intact and free our internal streams data
3288 * structure.
3289 */
3290 if (ret < 0)
3291 goto cleanup;
3292
3293 spin_lock_irqsave(&xhci->lock, flags);
3294 for (i = 0; i < num_eps; i++) {
3295 ep_index = xhci_get_endpoint_index(&eps[i]->desc);
3296 vdev->eps[ep_index].ep_state &= ~EP_GETTING_STREAMS;
3297 xhci_dbg(xhci, "Slot %u ep ctx %u now has streams.\n",
3298 udev->slot_id, ep_index);
3299 vdev->eps[ep_index].ep_state |= EP_HAS_STREAMS;
3300 }
3301 xhci_free_command(xhci, config_cmd);
3302 spin_unlock_irqrestore(&xhci->lock, flags);
3303
3304 /* Subtract 1 for stream 0, which drivers can't use */
3305 return num_streams - 1;
3306
3307cleanup:
3308 /* If it didn't work, free the streams! */
3309 for (i = 0; i < num_eps; i++) {
3310 ep_index = xhci_get_endpoint_index(&eps[i]->desc);
3311 xhci_free_stream_info(xhci, vdev->eps[ep_index].stream_info);
3312 vdev->eps[ep_index].stream_info = NULL;
3313 /* FIXME Unset maxPstreams in endpoint context and
3314 * update deq ptr to point to normal string ring.
3315 */
3316 vdev->eps[ep_index].ep_state &= ~EP_GETTING_STREAMS;
3317 vdev->eps[ep_index].ep_state &= ~EP_HAS_STREAMS;
3318 xhci_endpoint_zero(xhci, vdev, eps[i]);
3319 }
3320 xhci_free_command(xhci, config_cmd);
3321 return -ENOMEM;
3322}
3323
3324/* Transition the endpoint from using streams to being a "normal" endpoint
3325 * without streams.
3326 *
3327 * Modify the endpoint context state, submit a configure endpoint command,
3328 * and free all endpoint rings for streams if that completes successfully.
3329 */
3330int xhci_free_streams(struct usb_hcd *hcd, struct usb_device *udev,
3331 struct usb_host_endpoint **eps, unsigned int num_eps,
3332 gfp_t mem_flags)
3333{
3334 int i, ret;
3335 struct xhci_hcd *xhci;
3336 struct xhci_virt_device *vdev;
3337 struct xhci_command *command;
3338 struct xhci_input_control_ctx *ctrl_ctx;
3339 unsigned int ep_index;
3340 unsigned long flags;
3341 u32 changed_ep_bitmask;
3342
3343 xhci = hcd_to_xhci(hcd);
3344 vdev = xhci->devs[udev->slot_id];
3345
3346 /* Set up a configure endpoint command to remove the streams rings */
3347 spin_lock_irqsave(&xhci->lock, flags);
3348 changed_ep_bitmask = xhci_calculate_no_streams_bitmask(xhci,
3349 udev, eps, num_eps);
3350 if (changed_ep_bitmask == 0) {
3351 spin_unlock_irqrestore(&xhci->lock, flags);
3352 return -EINVAL;
3353 }
3354
3355 /* Use the xhci_command structure from the first endpoint. We may have
3356 * allocated too many, but the driver may call xhci_free_streams() for
3357 * each endpoint it grouped into one call to xhci_alloc_streams().
3358 */
3359 ep_index = xhci_get_endpoint_index(&eps[0]->desc);
3360 command = vdev->eps[ep_index].stream_info->free_streams_command;
3361 ctrl_ctx = xhci_get_input_control_ctx(command->in_ctx);
3362 if (!ctrl_ctx) {
3363 spin_unlock_irqrestore(&xhci->lock, flags);
3364 xhci_warn(xhci, "%s: Could not get input context, bad type.\n",
3365 __func__);
3366 return -EINVAL;
3367 }
3368
3369 for (i = 0; i < num_eps; i++) {
3370 struct xhci_ep_ctx *ep_ctx;
3371
3372 ep_index = xhci_get_endpoint_index(&eps[i]->desc);
3373 ep_ctx = xhci_get_ep_ctx(xhci, command->in_ctx, ep_index);
3374 xhci->devs[udev->slot_id]->eps[ep_index].ep_state |=
3375 EP_GETTING_NO_STREAMS;
3376
3377 xhci_endpoint_copy(xhci, command->in_ctx,
3378 vdev->out_ctx, ep_index);
3379 xhci_setup_no_streams_ep_input_ctx(ep_ctx,
3380 &vdev->eps[ep_index]);
3381 }
3382 xhci_setup_input_ctx_for_config_ep(xhci, command->in_ctx,
3383 vdev->out_ctx, ctrl_ctx,
3384 changed_ep_bitmask, changed_ep_bitmask);
3385 spin_unlock_irqrestore(&xhci->lock, flags);
3386
3387 /* Issue and wait for the configure endpoint command,
3388 * which must succeed.
3389 */
3390 ret = xhci_configure_endpoint(xhci, udev, command,
3391 false, true);
3392
3393 /* xHC rejected the configure endpoint command for some reason, so we
3394 * leave the streams rings intact.
3395 */
3396 if (ret < 0)
3397 return ret;
3398
3399 spin_lock_irqsave(&xhci->lock, flags);
3400 for (i = 0; i < num_eps; i++) {
3401 ep_index = xhci_get_endpoint_index(&eps[i]->desc);
3402 xhci_free_stream_info(xhci, vdev->eps[ep_index].stream_info);
3403 vdev->eps[ep_index].stream_info = NULL;
3404 /* FIXME Unset maxPstreams in endpoint context and
3405 * update deq ptr to point to normal string ring.
3406 */
3407 vdev->eps[ep_index].ep_state &= ~EP_GETTING_NO_STREAMS;
3408 vdev->eps[ep_index].ep_state &= ~EP_HAS_STREAMS;
3409 }
3410 spin_unlock_irqrestore(&xhci->lock, flags);
3411
3412 return 0;
3413}
3414
3415/*
3416 * Deletes endpoint resources for endpoints that were active before a Reset
3417 * Device command, or a Disable Slot command. The Reset Device command leaves
3418 * the control endpoint intact, whereas the Disable Slot command deletes it.
3419 *
3420 * Must be called with xhci->lock held.
3421 */
3422void xhci_free_device_endpoint_resources(struct xhci_hcd *xhci,
3423 struct xhci_virt_device *virt_dev, bool drop_control_ep)
3424{
3425 int i;
3426 unsigned int num_dropped_eps = 0;
3427 unsigned int drop_flags = 0;
3428
3429 for (i = (drop_control_ep ? 0 : 1); i < 31; i++) {
3430 if (virt_dev->eps[i].ring) {
3431 drop_flags |= 1 << i;
3432 num_dropped_eps++;
3433 }
3434 }
3435 xhci->num_active_eps -= num_dropped_eps;
3436 if (num_dropped_eps)
3437 xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
3438 "Dropped %u ep ctxs, flags = 0x%x, "
3439 "%u now active.",
3440 num_dropped_eps, drop_flags,
3441 xhci->num_active_eps);
3442}
3443
3444/*
3445 * This submits a Reset Device Command, which will set the device state to 0,
3446 * set the device address to 0, and disable all the endpoints except the default
3447 * control endpoint. The USB core should come back and call
3448 * xhci_address_device(), and then re-set up the configuration. If this is
3449 * called because of a usb_reset_and_verify_device(), then the old alternate
3450 * settings will be re-installed through the normal bandwidth allocation
3451 * functions.
3452 *
3453 * Wait for the Reset Device command to finish. Remove all structures
3454 * associated with the endpoints that were disabled. Clear the input device
3455 * structure? Cache the rings? Reset the control endpoint 0 max packet size?
3456 *
3457 * If the virt_dev to be reset does not exist or does not match the udev,
3458 * it means the device is lost, possibly due to the xHC restore error and
3459 * re-initialization during S3/S4. In this case, call xhci_alloc_dev() to
3460 * re-allocate the device.
3461 */
3462int xhci_discover_or_reset_device(struct usb_hcd *hcd, struct usb_device *udev)
3463{
3464 int ret, i;
3465 unsigned long flags;
3466 struct xhci_hcd *xhci;
3467 unsigned int slot_id;
3468 struct xhci_virt_device *virt_dev;
3469 struct xhci_command *reset_device_cmd;
3470 int last_freed_endpoint;
3471 struct xhci_slot_ctx *slot_ctx;
3472 int old_active_eps = 0;
3473
3474 ret = xhci_check_args(hcd, udev, NULL, 0, false, __func__);
3475 if (ret <= 0)
3476 return ret;
3477 xhci = hcd_to_xhci(hcd);
3478 slot_id = udev->slot_id;
3479 virt_dev = xhci->devs[slot_id];
3480 if (!virt_dev) {
3481 xhci_dbg(xhci, "The device to be reset with slot ID %u does "
3482 "not exist. Re-allocate the device\n", slot_id);
3483 ret = xhci_alloc_dev(hcd, udev);
3484 if (ret == 1)
3485 return 0;
3486 else
3487 return -EINVAL;
3488 }
3489
3490 if (virt_dev->tt_info)
3491 old_active_eps = virt_dev->tt_info->active_eps;
3492
3493 if (virt_dev->udev != udev) {
3494 /* If the virt_dev and the udev does not match, this virt_dev
3495 * may belong to another udev.
3496 * Re-allocate the device.
3497 */
3498 xhci_dbg(xhci, "The device to be reset with slot ID %u does "
3499 "not match the udev. Re-allocate the device\n",
3500 slot_id);
3501 ret = xhci_alloc_dev(hcd, udev);
3502 if (ret == 1)
3503 return 0;
3504 else
3505 return -EINVAL;
3506 }
3507
3508 /* If device is not setup, there is no point in resetting it */
3509 slot_ctx = xhci_get_slot_ctx(xhci, virt_dev->out_ctx);
3510 if (GET_SLOT_STATE(le32_to_cpu(slot_ctx->dev_state)) ==
3511 SLOT_STATE_DISABLED)
3512 return 0;
3513
3514 xhci_dbg(xhci, "Resetting device with slot ID %u\n", slot_id);
3515 /* Allocate the command structure that holds the struct completion.
3516 * Assume we're in process context, since the normal device reset
3517 * process has to wait for the device anyway. Storage devices are
3518 * reset as part of error handling, so use GFP_NOIO instead of
3519 * GFP_KERNEL.
3520 */
3521 reset_device_cmd = xhci_alloc_command(xhci, false, true, GFP_NOIO);
3522 if (!reset_device_cmd) {
3523 xhci_dbg(xhci, "Couldn't allocate command structure.\n");
3524 return -ENOMEM;
3525 }
3526
3527 /* Attempt to submit the Reset Device command to the command ring */
3528 spin_lock_irqsave(&xhci->lock, flags);
3529
3530 ret = xhci_queue_reset_device(xhci, reset_device_cmd, slot_id);
3531 if (ret) {
3532 xhci_dbg(xhci, "FIXME: allocate a command ring segment\n");
3533 spin_unlock_irqrestore(&xhci->lock, flags);
3534 goto command_cleanup;
3535 }
3536 xhci_ring_cmd_db(xhci);
3537 spin_unlock_irqrestore(&xhci->lock, flags);
3538
3539 /* Wait for the Reset Device command to finish */
3540 wait_for_completion(reset_device_cmd->completion);
3541
3542 /* The Reset Device command can't fail, according to the 0.95/0.96 spec,
3543 * unless we tried to reset a slot ID that wasn't enabled,
3544 * or the device wasn't in the addressed or configured state.
3545 */
3546 ret = reset_device_cmd->status;
3547 switch (ret) {
3548 case COMP_CMD_ABORT:
3549 case COMP_CMD_STOP:
3550 xhci_warn(xhci, "Timeout waiting for reset device command\n");
3551 ret = -ETIME;
3552 goto command_cleanup;
3553 case COMP_EBADSLT: /* 0.95 completion code for bad slot ID */
3554 case COMP_CTX_STATE: /* 0.96 completion code for same thing */
3555 xhci_dbg(xhci, "Can't reset device (slot ID %u) in %s state\n",
3556 slot_id,
3557 xhci_get_slot_state(xhci, virt_dev->out_ctx));
3558 xhci_dbg(xhci, "Not freeing device rings.\n");
3559 /* Don't treat this as an error. May change my mind later. */
3560 ret = 0;
3561 goto command_cleanup;
3562 case COMP_SUCCESS:
3563 xhci_dbg(xhci, "Successful reset device command.\n");
3564 break;
3565 default:
3566 if (xhci_is_vendor_info_code(xhci, ret))
3567 break;
3568 xhci_warn(xhci, "Unknown completion code %u for "
3569 "reset device command.\n", ret);
3570 ret = -EINVAL;
3571 goto command_cleanup;
3572 }
3573
3574 /* Free up host controller endpoint resources */
3575 if ((xhci->quirks & XHCI_EP_LIMIT_QUIRK)) {
3576 spin_lock_irqsave(&xhci->lock, flags);
3577 /* Don't delete the default control endpoint resources */
3578 xhci_free_device_endpoint_resources(xhci, virt_dev, false);
3579 spin_unlock_irqrestore(&xhci->lock, flags);
3580 }
3581
3582 /* Everything but endpoint 0 is disabled, so free or cache the rings. */
3583 last_freed_endpoint = 1;
3584 for (i = 1; i < 31; ++i) {
3585 struct xhci_virt_ep *ep = &virt_dev->eps[i];
3586
3587 if (ep->ep_state & EP_HAS_STREAMS) {
3588 xhci_warn(xhci, "WARN: endpoint 0x%02x has streams on device reset, freeing streams.\n",
3589 xhci_get_endpoint_address(i));
3590 xhci_free_stream_info(xhci, ep->stream_info);
3591 ep->stream_info = NULL;
3592 ep->ep_state &= ~EP_HAS_STREAMS;
3593 }
3594
3595 if (ep->ring) {
3596 xhci_free_or_cache_endpoint_ring(xhci, virt_dev, i);
3597 last_freed_endpoint = i;
3598 }
3599 if (!list_empty(&virt_dev->eps[i].bw_endpoint_list))
3600 xhci_drop_ep_from_interval_table(xhci,
3601 &virt_dev->eps[i].bw_info,
3602 virt_dev->bw_table,
3603 udev,
3604 &virt_dev->eps[i],
3605 virt_dev->tt_info);
3606 xhci_clear_endpoint_bw_info(&virt_dev->eps[i].bw_info);
3607 }
3608 /* If necessary, update the number of active TTs on this root port */
3609 xhci_update_tt_active_eps(xhci, virt_dev, old_active_eps);
3610
3611 xhci_dbg(xhci, "Output context after successful reset device cmd:\n");
3612 xhci_dbg_ctx(xhci, virt_dev->out_ctx, last_freed_endpoint);
3613 ret = 0;
3614
3615command_cleanup:
3616 xhci_free_command(xhci, reset_device_cmd);
3617 return ret;
3618}
3619
3620/*
3621 * At this point, the struct usb_device is about to go away, the device has
3622 * disconnected, and all traffic has been stopped and the endpoints have been
3623 * disabled. Free any HC data structures associated with that device.
3624 */
3625void xhci_free_dev(struct usb_hcd *hcd, struct usb_device *udev)
3626{
3627 struct xhci_hcd *xhci = hcd_to_xhci(hcd);
3628 struct xhci_virt_device *virt_dev;
3629 unsigned long flags;
3630 u32 state;
3631 int i, ret;
3632 struct xhci_command *command;
3633
3634 command = xhci_alloc_command(xhci, false, false, GFP_KERNEL);
3635 if (!command)
3636 return;
3637
3638#ifndef CONFIG_USB_DEFAULT_PERSIST
3639 /*
3640 * We called pm_runtime_get_noresume when the device was attached.
3641 * Decrement the counter here to allow controller to runtime suspend
3642 * if no devices remain.
3643 */
3644 if (xhci->quirks & XHCI_RESET_ON_RESUME)
3645 pm_runtime_put_noidle(hcd->self.controller);
3646#endif
3647
3648 ret = xhci_check_args(hcd, udev, NULL, 0, true, __func__);
3649 /* If the host is halted due to driver unload, we still need to free the
3650 * device.
3651 */
3652 if (ret <= 0 && ret != -ENODEV) {
3653 kfree(command);
3654 return;
3655 }
3656
3657 virt_dev = xhci->devs[udev->slot_id];
3658
3659 /* Stop any wayward timer functions (which may grab the lock) */
3660 for (i = 0; i < 31; ++i) {
3661 virt_dev->eps[i].ep_state &= ~EP_HALT_PENDING;
3662 del_timer_sync(&virt_dev->eps[i].stop_cmd_timer);
3663 }
3664
3665 spin_lock_irqsave(&xhci->lock, flags);
3666 /* Don't disable the slot if the host controller is dead. */
3667 state = readl(&xhci->op_regs->status);
3668 if (state == 0xffffffff || (xhci->xhc_state & XHCI_STATE_DYING) ||
3669 (xhci->xhc_state & XHCI_STATE_HALTED)) {
3670 xhci_free_virt_device(xhci, udev->slot_id);
3671 spin_unlock_irqrestore(&xhci->lock, flags);
3672 kfree(command);
3673 return;
3674 }
3675
3676 if (xhci_queue_slot_control(xhci, command, TRB_DISABLE_SLOT,
3677 udev->slot_id)) {
3678 spin_unlock_irqrestore(&xhci->lock, flags);
3679 xhci_dbg(xhci, "FIXME: allocate a command ring segment\n");
3680 return;
3681 }
3682 xhci_ring_cmd_db(xhci);
3683 spin_unlock_irqrestore(&xhci->lock, flags);
3684
3685 /*
3686 * Event command completion handler will free any data structures
3687 * associated with the slot. XXX Can free sleep?
3688 */
3689}
3690
3691/*
3692 * Checks if we have enough host controller resources for the default control
3693 * endpoint.
3694 *
3695 * Must be called with xhci->lock held.
3696 */
3697static int xhci_reserve_host_control_ep_resources(struct xhci_hcd *xhci)
3698{
3699 if (xhci->num_active_eps + 1 > xhci->limit_active_eps) {
3700 xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
3701 "Not enough ep ctxs: "
3702 "%u active, need to add 1, limit is %u.",
3703 xhci->num_active_eps, xhci->limit_active_eps);
3704 return -ENOMEM;
3705 }
3706 xhci->num_active_eps += 1;
3707 xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
3708 "Adding 1 ep ctx, %u now active.",
3709 xhci->num_active_eps);
3710 return 0;
3711}
3712
3713
3714/*
3715 * Returns 0 if the xHC ran out of device slots, the Enable Slot command
3716 * timed out, or allocating memory failed. Returns 1 on success.
3717 */
3718int xhci_alloc_dev(struct usb_hcd *hcd, struct usb_device *udev)
3719{
3720 struct xhci_hcd *xhci = hcd_to_xhci(hcd);
3721 unsigned long flags;
3722 int ret, slot_id;
3723 struct xhci_command *command;
3724
3725 command = xhci_alloc_command(xhci, false, false, GFP_KERNEL);
3726 if (!command)
3727 return 0;
3728
3729 /* xhci->slot_id and xhci->addr_dev are not thread-safe */
3730 mutex_lock(&xhci->mutex);
3731 spin_lock_irqsave(&xhci->lock, flags);
3732 command->completion = &xhci->addr_dev;
3733 ret = xhci_queue_slot_control(xhci, command, TRB_ENABLE_SLOT, 0);
3734 if (ret) {
3735 spin_unlock_irqrestore(&xhci->lock, flags);
3736 mutex_unlock(&xhci->mutex);
3737 xhci_dbg(xhci, "FIXME: allocate a command ring segment\n");
3738 kfree(command);
3739 return 0;
3740 }
3741 xhci_ring_cmd_db(xhci);
3742 spin_unlock_irqrestore(&xhci->lock, flags);
3743
3744 wait_for_completion(command->completion);
3745 slot_id = xhci->slot_id;
3746 mutex_unlock(&xhci->mutex);
3747
3748 if (!slot_id || command->status != COMP_SUCCESS) {
3749 xhci_err(xhci, "Error while assigning device slot ID\n");
3750 xhci_err(xhci, "Max number of devices this xHCI host supports is %u.\n",
3751 HCS_MAX_SLOTS(
3752 readl(&xhci->cap_regs->hcs_params1)));
3753 kfree(command);
3754 return 0;
3755 }
3756
3757 if ((xhci->quirks & XHCI_EP_LIMIT_QUIRK)) {
3758 spin_lock_irqsave(&xhci->lock, flags);
3759 ret = xhci_reserve_host_control_ep_resources(xhci);
3760 if (ret) {
3761 spin_unlock_irqrestore(&xhci->lock, flags);
3762 xhci_warn(xhci, "Not enough host resources, "
3763 "active endpoint contexts = %u\n",
3764 xhci->num_active_eps);
3765 goto disable_slot;
3766 }
3767 spin_unlock_irqrestore(&xhci->lock, flags);
3768 }
3769 /* Use GFP_NOIO, since this function can be called from
3770 * xhci_discover_or_reset_device(), which may be called as part of
3771 * mass storage driver error handling.
3772 */
3773 if (!xhci_alloc_virt_device(xhci, slot_id, udev, GFP_NOIO)) {
3774 xhci_warn(xhci, "Could not allocate xHCI USB device data structures\n");
3775 goto disable_slot;
3776 }
3777 udev->slot_id = slot_id;
3778
3779#ifndef CONFIG_USB_DEFAULT_PERSIST
3780 /*
3781 * If resetting upon resume, we can't put the controller into runtime
3782 * suspend if there is a device attached.
3783 */
3784 if (xhci->quirks & XHCI_RESET_ON_RESUME)
3785 pm_runtime_get_noresume(hcd->self.controller);
3786#endif
3787
3788
3789 kfree(command);
3790 /* Is this a LS or FS device under a HS hub? */
3791 /* Hub or peripherial? */
3792 return 1;
3793
3794disable_slot:
3795 /* Disable slot, if we can do it without mem alloc */
3796 spin_lock_irqsave(&xhci->lock, flags);
3797 command->completion = NULL;
3798 command->status = 0;
3799 if (!xhci_queue_slot_control(xhci, command, TRB_DISABLE_SLOT,
3800 udev->slot_id))
3801 xhci_ring_cmd_db(xhci);
3802 spin_unlock_irqrestore(&xhci->lock, flags);
3803 return 0;
3804}
3805
3806/*
3807 * Issue an Address Device command and optionally send a corresponding
3808 * SetAddress request to the device.
3809 */
3810static int xhci_setup_device(struct usb_hcd *hcd, struct usb_device *udev,
3811 enum xhci_setup_dev setup)
3812{
3813 const char *act = setup == SETUP_CONTEXT_ONLY ? "context" : "address";
3814 unsigned long flags;
3815 struct xhci_virt_device *virt_dev;
3816 int ret = 0;
3817 struct xhci_hcd *xhci = hcd_to_xhci(hcd);
3818 struct xhci_slot_ctx *slot_ctx;
3819 struct xhci_input_control_ctx *ctrl_ctx;
3820 u64 temp_64;
3821 struct xhci_command *command = NULL;
3822
3823 mutex_lock(&xhci->mutex);
3824
3825 if (xhci->xhc_state) /* dying, removing or halted */
3826 goto out;
3827
3828 if (!udev->slot_id) {
3829 xhci_dbg_trace(xhci, trace_xhci_dbg_address,
3830 "Bad Slot ID %d", udev->slot_id);
3831 ret = -EINVAL;
3832 goto out;
3833 }
3834
3835 virt_dev = xhci->devs[udev->slot_id];
3836
3837 if (WARN_ON(!virt_dev)) {
3838 /*
3839 * In plug/unplug torture test with an NEC controller,
3840 * a zero-dereference was observed once due to virt_dev = 0.
3841 * Print useful debug rather than crash if it is observed again!
3842 */
3843 xhci_warn(xhci, "Virt dev invalid for slot_id 0x%x!\n",
3844 udev->slot_id);
3845 ret = -EINVAL;
3846 goto out;
3847 }
3848
3849 if (setup == SETUP_CONTEXT_ONLY) {
3850 slot_ctx = xhci_get_slot_ctx(xhci, virt_dev->out_ctx);
3851 if (GET_SLOT_STATE(le32_to_cpu(slot_ctx->dev_state)) ==
3852 SLOT_STATE_DEFAULT) {
3853 xhci_dbg(xhci, "Slot already in default state\n");
3854 goto out;
3855 }
3856 }
3857
3858 command = xhci_alloc_command(xhci, false, false, GFP_KERNEL);
3859 if (!command) {
3860 ret = -ENOMEM;
3861 goto out;
3862 }
3863
3864 command->in_ctx = virt_dev->in_ctx;
3865 command->completion = &xhci->addr_dev;
3866
3867 slot_ctx = xhci_get_slot_ctx(xhci, virt_dev->in_ctx);
3868 ctrl_ctx = xhci_get_input_control_ctx(virt_dev->in_ctx);
3869 if (!ctrl_ctx) {
3870 xhci_warn(xhci, "%s: Could not get input context, bad type.\n",
3871 __func__);
3872 ret = -EINVAL;
3873 goto out;
3874 }
3875 /*
3876 * If this is the first Set Address since device plug-in or
3877 * virt_device realloaction after a resume with an xHCI power loss,
3878 * then set up the slot context.
3879 */
3880 if (!slot_ctx->dev_info)
3881 xhci_setup_addressable_virt_dev(xhci, udev);
3882 /* Otherwise, update the control endpoint ring enqueue pointer. */
3883 else
3884 xhci_copy_ep0_dequeue_into_input_ctx(xhci, udev);
3885 ctrl_ctx->add_flags = cpu_to_le32(SLOT_FLAG | EP0_FLAG);
3886 ctrl_ctx->drop_flags = 0;
3887
3888 xhci_dbg(xhci, "Slot ID %d Input Context:\n", udev->slot_id);
3889 xhci_dbg_ctx(xhci, virt_dev->in_ctx, 2);
3890 trace_xhci_address_ctx(xhci, virt_dev->in_ctx,
3891 le32_to_cpu(slot_ctx->dev_info) >> 27);
3892
3893 spin_lock_irqsave(&xhci->lock, flags);
3894 ret = xhci_queue_address_device(xhci, command, virt_dev->in_ctx->dma,
3895 udev->slot_id, setup);
3896 if (ret) {
3897 spin_unlock_irqrestore(&xhci->lock, flags);
3898 xhci_dbg_trace(xhci, trace_xhci_dbg_address,
3899 "FIXME: allocate a command ring segment");
3900 goto out;
3901 }
3902 xhci_ring_cmd_db(xhci);
3903 spin_unlock_irqrestore(&xhci->lock, flags);
3904
3905 /* ctrl tx can take up to 5 sec; XXX: need more time for xHC? */
3906 wait_for_completion(command->completion);
3907
3908 /* FIXME: From section 4.3.4: "Software shall be responsible for timing
3909 * the SetAddress() "recovery interval" required by USB and aborting the
3910 * command on a timeout.
3911 */
3912 switch (command->status) {
3913 case COMP_CMD_ABORT:
3914 case COMP_CMD_STOP:
3915 xhci_warn(xhci, "Timeout while waiting for setup device command\n");
3916 ret = -ETIME;
3917 break;
3918 case COMP_CTX_STATE:
3919 case COMP_EBADSLT:
3920 xhci_err(xhci, "Setup ERROR: setup %s command for slot %d.\n",
3921 act, udev->slot_id);
3922 ret = -EINVAL;
3923 break;
3924 case COMP_TX_ERR:
3925 dev_warn(&udev->dev, "Device not responding to setup %s.\n", act);
3926 ret = -EPROTO;
3927 break;
3928 case COMP_DEV_ERR:
3929 dev_warn(&udev->dev,
3930 "ERROR: Incompatible device for setup %s command\n", act);
3931 ret = -ENODEV;
3932 break;
3933 case COMP_SUCCESS:
3934 xhci_dbg_trace(xhci, trace_xhci_dbg_address,
3935 "Successful setup %s command", act);
3936 break;
3937 default:
3938 xhci_err(xhci,
3939 "ERROR: unexpected setup %s command completion code 0x%x.\n",
3940 act, command->status);
3941 xhci_dbg(xhci, "Slot ID %d Output Context:\n", udev->slot_id);
3942 xhci_dbg_ctx(xhci, virt_dev->out_ctx, 2);
3943 trace_xhci_address_ctx(xhci, virt_dev->out_ctx, 1);
3944 ret = -EINVAL;
3945 break;
3946 }
3947 if (ret)
3948 goto out;
3949 temp_64 = xhci_read_64(xhci, &xhci->op_regs->dcbaa_ptr);
3950 xhci_dbg_trace(xhci, trace_xhci_dbg_address,
3951 "Op regs DCBAA ptr = %#016llx", temp_64);
3952 xhci_dbg_trace(xhci, trace_xhci_dbg_address,
3953 "Slot ID %d dcbaa entry @%p = %#016llx",
3954 udev->slot_id,
3955 &xhci->dcbaa->dev_context_ptrs[udev->slot_id],
3956 (unsigned long long)
3957 le64_to_cpu(xhci->dcbaa->dev_context_ptrs[udev->slot_id]));
3958 xhci_dbg_trace(xhci, trace_xhci_dbg_address,
3959 "Output Context DMA address = %#08llx",
3960 (unsigned long long)virt_dev->out_ctx->dma);
3961 xhci_dbg(xhci, "Slot ID %d Input Context:\n", udev->slot_id);
3962 xhci_dbg_ctx(xhci, virt_dev->in_ctx, 2);
3963 trace_xhci_address_ctx(xhci, virt_dev->in_ctx,
3964 le32_to_cpu(slot_ctx->dev_info) >> 27);
3965 xhci_dbg(xhci, "Slot ID %d Output Context:\n", udev->slot_id);
3966 xhci_dbg_ctx(xhci, virt_dev->out_ctx, 2);
3967 /*
3968 * USB core uses address 1 for the roothubs, so we add one to the
3969 * address given back to us by the HC.
3970 */
3971 slot_ctx = xhci_get_slot_ctx(xhci, virt_dev->out_ctx);
3972 trace_xhci_address_ctx(xhci, virt_dev->out_ctx,
3973 le32_to_cpu(slot_ctx->dev_info) >> 27);
3974 /* Zero the input context control for later use */
3975 ctrl_ctx->add_flags = 0;
3976 ctrl_ctx->drop_flags = 0;
3977
3978 xhci_dbg_trace(xhci, trace_xhci_dbg_address,
3979 "Internal device address = %d",
3980 le32_to_cpu(slot_ctx->dev_state) & DEV_ADDR_MASK);
3981out:
3982 mutex_unlock(&xhci->mutex);
3983 kfree(command);
3984 return ret;
3985}
3986
3987int xhci_address_device(struct usb_hcd *hcd, struct usb_device *udev)
3988{
3989 return xhci_setup_device(hcd, udev, SETUP_CONTEXT_ADDRESS);
3990}
3991
3992int xhci_enable_device(struct usb_hcd *hcd, struct usb_device *udev)
3993{
3994 return xhci_setup_device(hcd, udev, SETUP_CONTEXT_ONLY);
3995}
3996
3997/*
3998 * Transfer the port index into real index in the HW port status
3999 * registers. Caculate offset between the port's PORTSC register
4000 * and port status base. Divide the number of per port register
4001 * to get the real index. The raw port number bases 1.
4002 */
4003int xhci_find_raw_port_number(struct usb_hcd *hcd, int port1)
4004{
4005 struct xhci_hcd *xhci = hcd_to_xhci(hcd);
4006 __le32 __iomem *base_addr = &xhci->op_regs->port_status_base;
4007 __le32 __iomem *addr;
4008 int raw_port;
4009
4010 if (hcd->speed < HCD_USB3)
4011 addr = xhci->usb2_ports[port1 - 1];
4012 else
4013 addr = xhci->usb3_ports[port1 - 1];
4014
4015 raw_port = (addr - base_addr)/NUM_PORT_REGS + 1;
4016 return raw_port;
4017}
4018
4019/*
4020 * Issue an Evaluate Context command to change the Maximum Exit Latency in the
4021 * slot context. If that succeeds, store the new MEL in the xhci_virt_device.
4022 */
4023static int __maybe_unused xhci_change_max_exit_latency(struct xhci_hcd *xhci,
4024 struct usb_device *udev, u16 max_exit_latency)
4025{
4026 struct xhci_virt_device *virt_dev;
4027 struct xhci_command *command;
4028 struct xhci_input_control_ctx *ctrl_ctx;
4029 struct xhci_slot_ctx *slot_ctx;
4030 unsigned long flags;
4031 int ret;
4032
4033 spin_lock_irqsave(&xhci->lock, flags);
4034
4035 virt_dev = xhci->devs[udev->slot_id];
4036
4037 /*
4038 * virt_dev might not exists yet if xHC resumed from hibernate (S4) and
4039 * xHC was re-initialized. Exit latency will be set later after
4040 * hub_port_finish_reset() is done and xhci->devs[] are re-allocated
4041 */
4042
4043 if (!virt_dev || max_exit_latency == virt_dev->current_mel) {
4044 spin_unlock_irqrestore(&xhci->lock, flags);
4045 return 0;
4046 }
4047
4048 /* Attempt to issue an Evaluate Context command to change the MEL. */
4049 command = xhci->lpm_command;
4050 ctrl_ctx = xhci_get_input_control_ctx(command->in_ctx);
4051 if (!ctrl_ctx) {
4052 spin_unlock_irqrestore(&xhci->lock, flags);
4053 xhci_warn(xhci, "%s: Could not get input context, bad type.\n",
4054 __func__);
4055 return -ENOMEM;
4056 }
4057
4058 xhci_slot_copy(xhci, command->in_ctx, virt_dev->out_ctx);
4059 spin_unlock_irqrestore(&xhci->lock, flags);
4060
4061 ctrl_ctx->add_flags |= cpu_to_le32(SLOT_FLAG);
4062 slot_ctx = xhci_get_slot_ctx(xhci, command->in_ctx);
4063 slot_ctx->dev_info2 &= cpu_to_le32(~((u32) MAX_EXIT));
4064 slot_ctx->dev_info2 |= cpu_to_le32(max_exit_latency);
4065 slot_ctx->dev_state = 0;
4066
4067 xhci_dbg_trace(xhci, trace_xhci_dbg_context_change,
4068 "Set up evaluate context for LPM MEL change.");
4069 xhci_dbg(xhci, "Slot %u Input Context:\n", udev->slot_id);
4070 xhci_dbg_ctx(xhci, command->in_ctx, 0);
4071
4072 /* Issue and wait for the evaluate context command. */
4073 ret = xhci_configure_endpoint(xhci, udev, command,
4074 true, true);
4075 xhci_dbg(xhci, "Slot %u Output Context:\n", udev->slot_id);
4076 xhci_dbg_ctx(xhci, virt_dev->out_ctx, 0);
4077
4078 if (!ret) {
4079 spin_lock_irqsave(&xhci->lock, flags);
4080 virt_dev->current_mel = max_exit_latency;
4081 spin_unlock_irqrestore(&xhci->lock, flags);
4082 }
4083 return ret;
4084}
4085
4086#ifdef CONFIG_PM
4087
4088/* BESL to HIRD Encoding array for USB2 LPM */
4089static int xhci_besl_encoding[16] = {125, 150, 200, 300, 400, 500, 1000, 2000,
4090 3000, 4000, 5000, 6000, 7000, 8000, 9000, 10000};
4091
4092/* Calculate HIRD/BESL for USB2 PORTPMSC*/
4093static int xhci_calculate_hird_besl(struct xhci_hcd *xhci,
4094 struct usb_device *udev)
4095{
4096 int u2del, besl, besl_host;
4097 int besl_device = 0;
4098 u32 field;
4099
4100 u2del = HCS_U2_LATENCY(xhci->hcs_params3);
4101 field = le32_to_cpu(udev->bos->ext_cap->bmAttributes);
4102
4103 if (field & USB_BESL_SUPPORT) {
4104 for (besl_host = 0; besl_host < 16; besl_host++) {
4105 if (xhci_besl_encoding[besl_host] >= u2del)
4106 break;
4107 }
4108 /* Use baseline BESL value as default */
4109 if (field & USB_BESL_BASELINE_VALID)
4110 besl_device = USB_GET_BESL_BASELINE(field);
4111 else if (field & USB_BESL_DEEP_VALID)
4112 besl_device = USB_GET_BESL_DEEP(field);
4113 } else {
4114 if (u2del <= 50)
4115 besl_host = 0;
4116 else
4117 besl_host = (u2del - 51) / 75 + 1;
4118 }
4119
4120 besl = besl_host + besl_device;
4121 if (besl > 15)
4122 besl = 15;
4123
4124 return besl;
4125}
4126
4127/* Calculate BESLD, L1 timeout and HIRDM for USB2 PORTHLPMC */
4128static int xhci_calculate_usb2_hw_lpm_params(struct usb_device *udev)
4129{
4130 u32 field;
4131 int l1;
4132 int besld = 0;
4133 int hirdm = 0;
4134
4135 field = le32_to_cpu(udev->bos->ext_cap->bmAttributes);
4136
4137 /* xHCI l1 is set in steps of 256us, xHCI 1.0 section 5.4.11.2 */
4138 l1 = udev->l1_params.timeout / 256;
4139
4140 /* device has preferred BESLD */
4141 if (field & USB_BESL_DEEP_VALID) {
4142 besld = USB_GET_BESL_DEEP(field);
4143 hirdm = 1;
4144 }
4145
4146 return PORT_BESLD(besld) | PORT_L1_TIMEOUT(l1) | PORT_HIRDM(hirdm);
4147}
4148
4149int xhci_set_usb2_hardware_lpm(struct usb_hcd *hcd,
4150 struct usb_device *udev, int enable)
4151{
4152 struct xhci_hcd *xhci = hcd_to_xhci(hcd);
4153 __le32 __iomem **port_array;
4154 __le32 __iomem *pm_addr, *hlpm_addr;
4155 u32 pm_val, hlpm_val, field;
4156 unsigned int port_num;
4157 unsigned long flags;
4158 int hird, exit_latency;
4159 int ret;
4160
4161 if (hcd->speed >= HCD_USB3 || !xhci->hw_lpm_support ||
4162 !udev->lpm_capable)
4163 return -EPERM;
4164
4165 if (!udev->parent || udev->parent->parent ||
4166 udev->descriptor.bDeviceClass == USB_CLASS_HUB)
4167 return -EPERM;
4168
4169 if (udev->usb2_hw_lpm_capable != 1)
4170 return -EPERM;
4171
4172 spin_lock_irqsave(&xhci->lock, flags);
4173
4174 port_array = xhci->usb2_ports;
4175 port_num = udev->portnum - 1;
4176 pm_addr = port_array[port_num] + PORTPMSC;
4177 pm_val = readl(pm_addr);
4178 hlpm_addr = port_array[port_num] + PORTHLPMC;
4179 field = le32_to_cpu(udev->bos->ext_cap->bmAttributes);
4180
4181 xhci_dbg(xhci, "%s port %d USB2 hardware LPM\n",
4182 enable ? "enable" : "disable", port_num + 1);
4183
4184 if (enable) {
4185 /* Host supports BESL timeout instead of HIRD */
4186 if (udev->usb2_hw_lpm_besl_capable) {
4187 /* if device doesn't have a preferred BESL value use a
4188 * default one which works with mixed HIRD and BESL
4189 * systems. See XHCI_DEFAULT_BESL definition in xhci.h
4190 */
4191 if ((field & USB_BESL_SUPPORT) &&
4192 (field & USB_BESL_BASELINE_VALID))
4193 hird = USB_GET_BESL_BASELINE(field);
4194 else
4195 hird = udev->l1_params.besl;
4196
4197 exit_latency = xhci_besl_encoding[hird];
4198 spin_unlock_irqrestore(&xhci->lock, flags);
4199
4200 /* USB 3.0 code dedicate one xhci->lpm_command->in_ctx
4201 * input context for link powermanagement evaluate
4202 * context commands. It is protected by hcd->bandwidth
4203 * mutex and is shared by all devices. We need to set
4204 * the max ext latency in USB 2 BESL LPM as well, so
4205 * use the same mutex and xhci_change_max_exit_latency()
4206 */
4207 mutex_lock(hcd->bandwidth_mutex);
4208 ret = xhci_change_max_exit_latency(xhci, udev,
4209 exit_latency);
4210 mutex_unlock(hcd->bandwidth_mutex);
4211
4212 if (ret < 0)
4213 return ret;
4214 spin_lock_irqsave(&xhci->lock, flags);
4215
4216 hlpm_val = xhci_calculate_usb2_hw_lpm_params(udev);
4217 writel(hlpm_val, hlpm_addr);
4218 /* flush write */
4219 readl(hlpm_addr);
4220 } else {
4221 hird = xhci_calculate_hird_besl(xhci, udev);
4222 }
4223
4224 pm_val &= ~PORT_HIRD_MASK;
4225 pm_val |= PORT_HIRD(hird) | PORT_RWE | PORT_L1DS(udev->slot_id);
4226 writel(pm_val, pm_addr);
4227 pm_val = readl(pm_addr);
4228 pm_val |= PORT_HLE;
4229 writel(pm_val, pm_addr);
4230 /* flush write */
4231 readl(pm_addr);
4232 } else {
4233 pm_val &= ~(PORT_HLE | PORT_RWE | PORT_HIRD_MASK | PORT_L1DS_MASK);
4234 writel(pm_val, pm_addr);
4235 /* flush write */
4236 readl(pm_addr);
4237 if (udev->usb2_hw_lpm_besl_capable) {
4238 spin_unlock_irqrestore(&xhci->lock, flags);
4239 mutex_lock(hcd->bandwidth_mutex);
4240 xhci_change_max_exit_latency(xhci, udev, 0);
4241 mutex_unlock(hcd->bandwidth_mutex);
4242 return 0;
4243 }
4244 }
4245
4246 spin_unlock_irqrestore(&xhci->lock, flags);
4247 return 0;
4248}
4249
4250/* check if a usb2 port supports a given extened capability protocol
4251 * only USB2 ports extended protocol capability values are cached.
4252 * Return 1 if capability is supported
4253 */
4254static int xhci_check_usb2_port_capability(struct xhci_hcd *xhci, int port,
4255 unsigned capability)
4256{
4257 u32 port_offset, port_count;
4258 int i;
4259
4260 for (i = 0; i < xhci->num_ext_caps; i++) {
4261 if (xhci->ext_caps[i] & capability) {
4262 /* port offsets starts at 1 */
4263 port_offset = XHCI_EXT_PORT_OFF(xhci->ext_caps[i]) - 1;
4264 port_count = XHCI_EXT_PORT_COUNT(xhci->ext_caps[i]);
4265 if (port >= port_offset &&
4266 port < port_offset + port_count)
4267 return 1;
4268 }
4269 }
4270 return 0;
4271}
4272
4273int xhci_update_device(struct usb_hcd *hcd, struct usb_device *udev)
4274{
4275 struct xhci_hcd *xhci = hcd_to_xhci(hcd);
4276 int portnum = udev->portnum - 1;
4277
4278 if (hcd->speed >= HCD_USB3 || !xhci->sw_lpm_support ||
4279 !udev->lpm_capable)
4280 return 0;
4281
4282 /* we only support lpm for non-hub device connected to root hub yet */
4283 if (!udev->parent || udev->parent->parent ||
4284 udev->descriptor.bDeviceClass == USB_CLASS_HUB)
4285 return 0;
4286
4287 if (xhci->hw_lpm_support == 1 &&
4288 xhci_check_usb2_port_capability(
4289 xhci, portnum, XHCI_HLC)) {
4290 udev->usb2_hw_lpm_capable = 1;
4291 udev->l1_params.timeout = XHCI_L1_TIMEOUT;
4292 udev->l1_params.besl = XHCI_DEFAULT_BESL;
4293 if (xhci_check_usb2_port_capability(xhci, portnum,
4294 XHCI_BLC))
4295 udev->usb2_hw_lpm_besl_capable = 1;
4296 }
4297
4298 return 0;
4299}
4300
4301/*---------------------- USB 3.0 Link PM functions ------------------------*/
4302
4303/* Service interval in nanoseconds = 2^(bInterval - 1) * 125us * 1000ns / 1us */
4304static unsigned long long xhci_service_interval_to_ns(
4305 struct usb_endpoint_descriptor *desc)
4306{
4307 return (1ULL << (desc->bInterval - 1)) * 125 * 1000;
4308}
4309
4310static u16 xhci_get_timeout_no_hub_lpm(struct usb_device *udev,
4311 enum usb3_link_state state)
4312{
4313 unsigned long long sel;
4314 unsigned long long pel;
4315 unsigned int max_sel_pel;
4316 char *state_name;
4317
4318 switch (state) {
4319 case USB3_LPM_U1:
4320 /* Convert SEL and PEL stored in nanoseconds to microseconds */
4321 sel = DIV_ROUND_UP(udev->u1_params.sel, 1000);
4322 pel = DIV_ROUND_UP(udev->u1_params.pel, 1000);
4323 max_sel_pel = USB3_LPM_MAX_U1_SEL_PEL;
4324 state_name = "U1";
4325 break;
4326 case USB3_LPM_U2:
4327 sel = DIV_ROUND_UP(udev->u2_params.sel, 1000);
4328 pel = DIV_ROUND_UP(udev->u2_params.pel, 1000);
4329 max_sel_pel = USB3_LPM_MAX_U2_SEL_PEL;
4330 state_name = "U2";
4331 break;
4332 default:
4333 dev_warn(&udev->dev, "%s: Can't get timeout for non-U1 or U2 state.\n",
4334 __func__);
4335 return USB3_LPM_DISABLED;
4336 }
4337
4338 if (sel <= max_sel_pel && pel <= max_sel_pel)
4339 return USB3_LPM_DEVICE_INITIATED;
4340
4341 if (sel > max_sel_pel)
4342 dev_dbg(&udev->dev, "Device-initiated %s disabled "
4343 "due to long SEL %llu ms\n",
4344 state_name, sel);
4345 else
4346 dev_dbg(&udev->dev, "Device-initiated %s disabled "
4347 "due to long PEL %llu ms\n",
4348 state_name, pel);
4349 return USB3_LPM_DISABLED;
4350}
4351
4352/* The U1 timeout should be the maximum of the following values:
4353 * - For control endpoints, U1 system exit latency (SEL) * 3
4354 * - For bulk endpoints, U1 SEL * 5
4355 * - For interrupt endpoints:
4356 * - Notification EPs, U1 SEL * 3
4357 * - Periodic EPs, max(105% of bInterval, U1 SEL * 2)
4358 * - For isochronous endpoints, max(105% of bInterval, U1 SEL * 2)
4359 */
4360static unsigned long long xhci_calculate_intel_u1_timeout(
4361 struct usb_device *udev,
4362 struct usb_endpoint_descriptor *desc)
4363{
4364 unsigned long long timeout_ns;
4365 int ep_type;
4366 int intr_type;
4367
4368 ep_type = usb_endpoint_type(desc);
4369 switch (ep_type) {
4370 case USB_ENDPOINT_XFER_CONTROL:
4371 timeout_ns = udev->u1_params.sel * 3;
4372 break;
4373 case USB_ENDPOINT_XFER_BULK:
4374 timeout_ns = udev->u1_params.sel * 5;
4375 break;
4376 case USB_ENDPOINT_XFER_INT:
4377 intr_type = usb_endpoint_interrupt_type(desc);
4378 if (intr_type == USB_ENDPOINT_INTR_NOTIFICATION) {
4379 timeout_ns = udev->u1_params.sel * 3;
4380 break;
4381 }
4382 /* Otherwise the calculation is the same as isoc eps */
4383 case USB_ENDPOINT_XFER_ISOC:
4384 timeout_ns = xhci_service_interval_to_ns(desc);
4385 timeout_ns = DIV_ROUND_UP_ULL(timeout_ns * 105, 100);
4386 if (timeout_ns < udev->u1_params.sel * 2)
4387 timeout_ns = udev->u1_params.sel * 2;
4388 break;
4389 default:
4390 return 0;
4391 }
4392
4393 return timeout_ns;
4394}
4395
4396/* Returns the hub-encoded U1 timeout value. */
4397static u16 xhci_calculate_u1_timeout(struct xhci_hcd *xhci,
4398 struct usb_device *udev,
4399 struct usb_endpoint_descriptor *desc)
4400{
4401 unsigned long long timeout_ns;
4402
4403 if (xhci->quirks & XHCI_INTEL_HOST)
4404 timeout_ns = xhci_calculate_intel_u1_timeout(udev, desc);
4405 else
4406 timeout_ns = udev->u1_params.sel;
4407
4408 /* The U1 timeout is encoded in 1us intervals.
4409 * Don't return a timeout of zero, because that's USB3_LPM_DISABLED.
4410 */
4411 if (timeout_ns == USB3_LPM_DISABLED)
4412 timeout_ns = 1;
4413 else
4414 timeout_ns = DIV_ROUND_UP_ULL(timeout_ns, 1000);
4415
4416 /* If the necessary timeout value is bigger than what we can set in the
4417 * USB 3.0 hub, we have to disable hub-initiated U1.
4418 */
4419 if (timeout_ns <= USB3_LPM_U1_MAX_TIMEOUT)
4420 return timeout_ns;
4421 dev_dbg(&udev->dev, "Hub-initiated U1 disabled "
4422 "due to long timeout %llu ms\n", timeout_ns);
4423 return xhci_get_timeout_no_hub_lpm(udev, USB3_LPM_U1);
4424}
4425
4426/* The U2 timeout should be the maximum of:
4427 * - 10 ms (to avoid the bandwidth impact on the scheduler)
4428 * - largest bInterval of any active periodic endpoint (to avoid going
4429 * into lower power link states between intervals).
4430 * - the U2 Exit Latency of the device
4431 */
4432static unsigned long long xhci_calculate_intel_u2_timeout(
4433 struct usb_device *udev,
4434 struct usb_endpoint_descriptor *desc)
4435{
4436 unsigned long long timeout_ns;
4437 unsigned long long u2_del_ns;
4438
4439 timeout_ns = 10 * 1000 * 1000;
4440
4441 if ((usb_endpoint_xfer_int(desc) || usb_endpoint_xfer_isoc(desc)) &&
4442 (xhci_service_interval_to_ns(desc) > timeout_ns))
4443 timeout_ns = xhci_service_interval_to_ns(desc);
4444
4445 u2_del_ns = le16_to_cpu(udev->bos->ss_cap->bU2DevExitLat) * 1000ULL;
4446 if (u2_del_ns > timeout_ns)
4447 timeout_ns = u2_del_ns;
4448
4449 return timeout_ns;
4450}
4451
4452/* Returns the hub-encoded U2 timeout value. */
4453static u16 xhci_calculate_u2_timeout(struct xhci_hcd *xhci,
4454 struct usb_device *udev,
4455 struct usb_endpoint_descriptor *desc)
4456{
4457 unsigned long long timeout_ns;
4458
4459 if (xhci->quirks & XHCI_INTEL_HOST)
4460 timeout_ns = xhci_calculate_intel_u2_timeout(udev, desc);
4461 else
4462 timeout_ns = udev->u2_params.sel;
4463
4464 /* The U2 timeout is encoded in 256us intervals */
4465 timeout_ns = DIV_ROUND_UP_ULL(timeout_ns, 256 * 1000);
4466 /* If the necessary timeout value is bigger than what we can set in the
4467 * USB 3.0 hub, we have to disable hub-initiated U2.
4468 */
4469 if (timeout_ns <= USB3_LPM_U2_MAX_TIMEOUT)
4470 return timeout_ns;
4471 dev_dbg(&udev->dev, "Hub-initiated U2 disabled "
4472 "due to long timeout %llu ms\n", timeout_ns);
4473 return xhci_get_timeout_no_hub_lpm(udev, USB3_LPM_U2);
4474}
4475
4476static u16 xhci_call_host_update_timeout_for_endpoint(struct xhci_hcd *xhci,
4477 struct usb_device *udev,
4478 struct usb_endpoint_descriptor *desc,
4479 enum usb3_link_state state,
4480 u16 *timeout)
4481{
4482 if (state == USB3_LPM_U1)
4483 return xhci_calculate_u1_timeout(xhci, udev, desc);
4484 else if (state == USB3_LPM_U2)
4485 return xhci_calculate_u2_timeout(xhci, udev, desc);
4486
4487 return USB3_LPM_DISABLED;
4488}
4489
4490static int xhci_update_timeout_for_endpoint(struct xhci_hcd *xhci,
4491 struct usb_device *udev,
4492 struct usb_endpoint_descriptor *desc,
4493 enum usb3_link_state state,
4494 u16 *timeout)
4495{
4496 u16 alt_timeout;
4497
4498 alt_timeout = xhci_call_host_update_timeout_for_endpoint(xhci, udev,
4499 desc, state, timeout);
4500
4501 /* If we found we can't enable hub-initiated LPM, or
4502 * the U1 or U2 exit latency was too high to allow
4503 * device-initiated LPM as well, just stop searching.
4504 */
4505 if (alt_timeout == USB3_LPM_DISABLED ||
4506 alt_timeout == USB3_LPM_DEVICE_INITIATED) {
4507 *timeout = alt_timeout;
4508 return -E2BIG;
4509 }
4510 if (alt_timeout > *timeout)
4511 *timeout = alt_timeout;
4512 return 0;
4513}
4514
4515static int xhci_update_timeout_for_interface(struct xhci_hcd *xhci,
4516 struct usb_device *udev,
4517 struct usb_host_interface *alt,
4518 enum usb3_link_state state,
4519 u16 *timeout)
4520{
4521 int j;
4522
4523 for (j = 0; j < alt->desc.bNumEndpoints; j++) {
4524 if (xhci_update_timeout_for_endpoint(xhci, udev,
4525 &alt->endpoint[j].desc, state, timeout))
4526 return -E2BIG;
4527 continue;
4528 }
4529 return 0;
4530}
4531
4532static int xhci_check_intel_tier_policy(struct usb_device *udev,
4533 enum usb3_link_state state)
4534{
4535 struct usb_device *parent;
4536 unsigned int num_hubs;
4537
4538 if (state == USB3_LPM_U2)
4539 return 0;
4540
4541 /* Don't enable U1 if the device is on a 2nd tier hub or lower. */
4542 for (parent = udev->parent, num_hubs = 0; parent->parent;
4543 parent = parent->parent)
4544 num_hubs++;
4545
4546 if (num_hubs < 2)
4547 return 0;
4548
4549 dev_dbg(&udev->dev, "Disabling U1 link state for device"
4550 " below second-tier hub.\n");
4551 dev_dbg(&udev->dev, "Plug device into first-tier hub "
4552 "to decrease power consumption.\n");
4553 return -E2BIG;
4554}
4555
4556static int xhci_check_tier_policy(struct xhci_hcd *xhci,
4557 struct usb_device *udev,
4558 enum usb3_link_state state)
4559{
4560 if (xhci->quirks & XHCI_INTEL_HOST)
4561 return xhci_check_intel_tier_policy(udev, state);
4562 else
4563 return 0;
4564}
4565
4566/* Returns the U1 or U2 timeout that should be enabled.
4567 * If the tier check or timeout setting functions return with a non-zero exit
4568 * code, that means the timeout value has been finalized and we shouldn't look
4569 * at any more endpoints.
4570 */
4571static u16 xhci_calculate_lpm_timeout(struct usb_hcd *hcd,
4572 struct usb_device *udev, enum usb3_link_state state)
4573{
4574 struct xhci_hcd *xhci = hcd_to_xhci(hcd);
4575 struct usb_host_config *config;
4576 char *state_name;
4577 int i;
4578 u16 timeout = USB3_LPM_DISABLED;
4579
4580 if (state == USB3_LPM_U1)
4581 state_name = "U1";
4582 else if (state == USB3_LPM_U2)
4583 state_name = "U2";
4584 else {
4585 dev_warn(&udev->dev, "Can't enable unknown link state %i\n",
4586 state);
4587 return timeout;
4588 }
4589
4590 if (xhci_check_tier_policy(xhci, udev, state) < 0)
4591 return timeout;
4592
4593 /* Gather some information about the currently installed configuration
4594 * and alternate interface settings.
4595 */
4596 if (xhci_update_timeout_for_endpoint(xhci, udev, &udev->ep0.desc,
4597 state, &timeout))
4598 return timeout;
4599
4600 config = udev->actconfig;
4601 if (!config)
4602 return timeout;
4603
4604 for (i = 0; i < config->desc.bNumInterfaces; i++) {
4605 struct usb_driver *driver;
4606 struct usb_interface *intf = config->interface[i];
4607
4608 if (!intf)
4609 continue;
4610
4611 /* Check if any currently bound drivers want hub-initiated LPM
4612 * disabled.
4613 */
4614 if (intf->dev.driver) {
4615 driver = to_usb_driver(intf->dev.driver);
4616 if (driver && driver->disable_hub_initiated_lpm) {
4617 dev_dbg(&udev->dev, "Hub-initiated %s disabled "
4618 "at request of driver %s\n",
4619 state_name, driver->name);
4620 return xhci_get_timeout_no_hub_lpm(udev, state);
4621 }
4622 }
4623
4624 /* Not sure how this could happen... */
4625 if (!intf->cur_altsetting)
4626 continue;
4627
4628 if (xhci_update_timeout_for_interface(xhci, udev,
4629 intf->cur_altsetting,
4630 state, &timeout))
4631 return timeout;
4632 }
4633 return timeout;
4634}
4635
4636static int calculate_max_exit_latency(struct usb_device *udev,
4637 enum usb3_link_state state_changed,
4638 u16 hub_encoded_timeout)
4639{
4640 unsigned long long u1_mel_us = 0;
4641 unsigned long long u2_mel_us = 0;
4642 unsigned long long mel_us = 0;
4643 bool disabling_u1;
4644 bool disabling_u2;
4645 bool enabling_u1;
4646 bool enabling_u2;
4647
4648 disabling_u1 = (state_changed == USB3_LPM_U1 &&
4649 hub_encoded_timeout == USB3_LPM_DISABLED);
4650 disabling_u2 = (state_changed == USB3_LPM_U2 &&
4651 hub_encoded_timeout == USB3_LPM_DISABLED);
4652
4653 enabling_u1 = (state_changed == USB3_LPM_U1 &&
4654 hub_encoded_timeout != USB3_LPM_DISABLED);
4655 enabling_u2 = (state_changed == USB3_LPM_U2 &&
4656 hub_encoded_timeout != USB3_LPM_DISABLED);
4657
4658 /* If U1 was already enabled and we're not disabling it,
4659 * or we're going to enable U1, account for the U1 max exit latency.
4660 */
4661 if ((udev->u1_params.timeout != USB3_LPM_DISABLED && !disabling_u1) ||
4662 enabling_u1)
4663 u1_mel_us = DIV_ROUND_UP(udev->u1_params.mel, 1000);
4664 if ((udev->u2_params.timeout != USB3_LPM_DISABLED && !disabling_u2) ||
4665 enabling_u2)
4666 u2_mel_us = DIV_ROUND_UP(udev->u2_params.mel, 1000);
4667
4668 if (u1_mel_us > u2_mel_us)
4669 mel_us = u1_mel_us;
4670 else
4671 mel_us = u2_mel_us;
4672 /* xHCI host controller max exit latency field is only 16 bits wide. */
4673 if (mel_us > MAX_EXIT) {
4674 dev_warn(&udev->dev, "Link PM max exit latency of %lluus "
4675 "is too big.\n", mel_us);
4676 return -E2BIG;
4677 }
4678 return mel_us;
4679}
4680
4681/* Returns the USB3 hub-encoded value for the U1/U2 timeout. */
4682int xhci_enable_usb3_lpm_timeout(struct usb_hcd *hcd,
4683 struct usb_device *udev, enum usb3_link_state state)
4684{
4685 struct xhci_hcd *xhci;
4686 u16 hub_encoded_timeout;
4687 int mel;
4688 int ret;
4689
4690 xhci = hcd_to_xhci(hcd);
4691 /* The LPM timeout values are pretty host-controller specific, so don't
4692 * enable hub-initiated timeouts unless the vendor has provided
4693 * information about their timeout algorithm.
4694 */
4695 if (!xhci || !(xhci->quirks & XHCI_LPM_SUPPORT) ||
4696 !xhci->devs[udev->slot_id])
4697 return USB3_LPM_DISABLED;
4698
4699 hub_encoded_timeout = xhci_calculate_lpm_timeout(hcd, udev, state);
4700 mel = calculate_max_exit_latency(udev, state, hub_encoded_timeout);
4701 if (mel < 0) {
4702 /* Max Exit Latency is too big, disable LPM. */
4703 hub_encoded_timeout = USB3_LPM_DISABLED;
4704 mel = 0;
4705 }
4706
4707 ret = xhci_change_max_exit_latency(xhci, udev, mel);
4708 if (ret)
4709 return ret;
4710 return hub_encoded_timeout;
4711}
4712
4713int xhci_disable_usb3_lpm_timeout(struct usb_hcd *hcd,
4714 struct usb_device *udev, enum usb3_link_state state)
4715{
4716 struct xhci_hcd *xhci;
4717 u16 mel;
4718
4719 xhci = hcd_to_xhci(hcd);
4720 if (!xhci || !(xhci->quirks & XHCI_LPM_SUPPORT) ||
4721 !xhci->devs[udev->slot_id])
4722 return 0;
4723
4724 mel = calculate_max_exit_latency(udev, state, USB3_LPM_DISABLED);
4725 return xhci_change_max_exit_latency(xhci, udev, mel);
4726}
4727#else /* CONFIG_PM */
4728
4729int xhci_set_usb2_hardware_lpm(struct usb_hcd *hcd,
4730 struct usb_device *udev, int enable)
4731{
4732 return 0;
4733}
4734
4735int xhci_update_device(struct usb_hcd *hcd, struct usb_device *udev)
4736{
4737 return 0;
4738}
4739
4740int xhci_enable_usb3_lpm_timeout(struct usb_hcd *hcd,
4741 struct usb_device *udev, enum usb3_link_state state)
4742{
4743 return USB3_LPM_DISABLED;
4744}
4745
4746int xhci_disable_usb3_lpm_timeout(struct usb_hcd *hcd,
4747 struct usb_device *udev, enum usb3_link_state state)
4748{
4749 return 0;
4750}
4751#endif /* CONFIG_PM */
4752
4753/*-------------------------------------------------------------------------*/
4754
4755/* Once a hub descriptor is fetched for a device, we need to update the xHC's
4756 * internal data structures for the device.
4757 */
4758int xhci_update_hub_device(struct usb_hcd *hcd, struct usb_device *hdev,
4759 struct usb_tt *tt, gfp_t mem_flags)
4760{
4761 struct xhci_hcd *xhci = hcd_to_xhci(hcd);
4762 struct xhci_virt_device *vdev;
4763 struct xhci_command *config_cmd;
4764 struct xhci_input_control_ctx *ctrl_ctx;
4765 struct xhci_slot_ctx *slot_ctx;
4766 unsigned long flags;
4767 unsigned think_time;
4768 int ret;
4769
4770 /* Ignore root hubs */
4771 if (!hdev->parent)
4772 return 0;
4773
4774 vdev = xhci->devs[hdev->slot_id];
4775 if (!vdev) {
4776 xhci_warn(xhci, "Cannot update hub desc for unknown device.\n");
4777 return -EINVAL;
4778 }
4779 config_cmd = xhci_alloc_command(xhci, true, true, mem_flags);
4780 if (!config_cmd) {
4781 xhci_dbg(xhci, "Could not allocate xHCI command structure.\n");
4782 return -ENOMEM;
4783 }
4784 ctrl_ctx = xhci_get_input_control_ctx(config_cmd->in_ctx);
4785 if (!ctrl_ctx) {
4786 xhci_warn(xhci, "%s: Could not get input context, bad type.\n",
4787 __func__);
4788 xhci_free_command(xhci, config_cmd);
4789 return -ENOMEM;
4790 }
4791
4792 spin_lock_irqsave(&xhci->lock, flags);
4793 if (hdev->speed == USB_SPEED_HIGH &&
4794 xhci_alloc_tt_info(xhci, vdev, hdev, tt, GFP_ATOMIC)) {
4795 xhci_dbg(xhci, "Could not allocate xHCI TT structure.\n");
4796 xhci_free_command(xhci, config_cmd);
4797 spin_unlock_irqrestore(&xhci->lock, flags);
4798 return -ENOMEM;
4799 }
4800
4801 xhci_slot_copy(xhci, config_cmd->in_ctx, vdev->out_ctx);
4802 ctrl_ctx->add_flags |= cpu_to_le32(SLOT_FLAG);
4803 slot_ctx = xhci_get_slot_ctx(xhci, config_cmd->in_ctx);
4804 slot_ctx->dev_info |= cpu_to_le32(DEV_HUB);
4805 /*
4806 * refer to section 6.2.2: MTT should be 0 for full speed hub,
4807 * but it may be already set to 1 when setup an xHCI virtual
4808 * device, so clear it anyway.
4809 */
4810 if (tt->multi)
4811 slot_ctx->dev_info |= cpu_to_le32(DEV_MTT);
4812 else if (hdev->speed == USB_SPEED_FULL)
4813 slot_ctx->dev_info &= cpu_to_le32(~DEV_MTT);
4814
4815 if (xhci->hci_version > 0x95) {
4816 xhci_dbg(xhci, "xHCI version %x needs hub "
4817 "TT think time and number of ports\n",
4818 (unsigned int) xhci->hci_version);
4819 slot_ctx->dev_info2 |= cpu_to_le32(XHCI_MAX_PORTS(hdev->maxchild));
4820 /* Set TT think time - convert from ns to FS bit times.
4821 * 0 = 8 FS bit times, 1 = 16 FS bit times,
4822 * 2 = 24 FS bit times, 3 = 32 FS bit times.
4823 *
4824 * xHCI 1.0: this field shall be 0 if the device is not a
4825 * High-spped hub.
4826 */
4827 think_time = tt->think_time;
4828 if (think_time != 0)
4829 think_time = (think_time / 666) - 1;
4830 if (xhci->hci_version < 0x100 || hdev->speed == USB_SPEED_HIGH)
4831 slot_ctx->tt_info |=
4832 cpu_to_le32(TT_THINK_TIME(think_time));
4833 } else {
4834 xhci_dbg(xhci, "xHCI version %x doesn't need hub "
4835 "TT think time or number of ports\n",
4836 (unsigned int) xhci->hci_version);
4837 }
4838 slot_ctx->dev_state = 0;
4839 spin_unlock_irqrestore(&xhci->lock, flags);
4840
4841 xhci_dbg(xhci, "Set up %s for hub device.\n",
4842 (xhci->hci_version > 0x95) ?
4843 "configure endpoint" : "evaluate context");
4844 xhci_dbg(xhci, "Slot %u Input Context:\n", hdev->slot_id);
4845 xhci_dbg_ctx(xhci, config_cmd->in_ctx, 0);
4846
4847 /* Issue and wait for the configure endpoint or
4848 * evaluate context command.
4849 */
4850 if (xhci->hci_version > 0x95)
4851 ret = xhci_configure_endpoint(xhci, hdev, config_cmd,
4852 false, false);
4853 else
4854 ret = xhci_configure_endpoint(xhci, hdev, config_cmd,
4855 true, false);
4856
4857 xhci_dbg(xhci, "Slot %u Output Context:\n", hdev->slot_id);
4858 xhci_dbg_ctx(xhci, vdev->out_ctx, 0);
4859
4860 xhci_free_command(xhci, config_cmd);
4861 return ret;
4862}
4863
4864int xhci_get_frame(struct usb_hcd *hcd)
4865{
4866 struct xhci_hcd *xhci = hcd_to_xhci(hcd);
4867 /* EHCI mods by the periodic size. Why? */
4868 return readl(&xhci->run_regs->microframe_index) >> 3;
4869}
4870
4871int xhci_gen_setup(struct usb_hcd *hcd, xhci_get_quirks_t get_quirks)
4872{
4873 struct xhci_hcd *xhci;
4874 struct device *dev = hcd->self.controller;
4875 int retval;
4876
4877 /* Accept arbitrarily long scatter-gather lists */
4878 hcd->self.sg_tablesize = ~0;
4879
4880 /* support to build packet from discontinuous buffers */
4881 hcd->self.no_sg_constraint = 1;
4882
4883 /* XHCI controllers don't stop the ep queue on short packets :| */
4884 hcd->self.no_stop_on_short = 1;
4885
4886 xhci = hcd_to_xhci(hcd);
4887
4888 if (usb_hcd_is_primary_hcd(hcd)) {
4889 xhci->main_hcd = hcd;
4890 /* Mark the first roothub as being USB 2.0.
4891 * The xHCI driver will register the USB 3.0 roothub.
4892 */
4893 hcd->speed = HCD_USB2;
4894 hcd->self.root_hub->speed = USB_SPEED_HIGH;
4895 /*
4896 * USB 2.0 roothub under xHCI has an integrated TT,
4897 * (rate matching hub) as opposed to having an OHCI/UHCI
4898 * companion controller.
4899 */
4900 hcd->has_tt = 1;
4901 } else {
4902 if (xhci->sbrn == 0x31) {
4903 xhci_info(xhci, "Host supports USB 3.1 Enhanced SuperSpeed\n");
4904 hcd->speed = HCD_USB31;
4905 hcd->self.root_hub->speed = USB_SPEED_SUPER_PLUS;
4906 }
4907 /* xHCI private pointer was set in xhci_pci_probe for the second
4908 * registered roothub.
4909 */
4910 return 0;
4911 }
4912
4913 mutex_init(&xhci->mutex);
4914 xhci->cap_regs = hcd->regs;
4915 xhci->op_regs = hcd->regs +
4916 HC_LENGTH(readl(&xhci->cap_regs->hc_capbase));
4917 xhci->run_regs = hcd->regs +
4918 (readl(&xhci->cap_regs->run_regs_off) & RTSOFF_MASK);
4919 /* Cache read-only capability registers */
4920 xhci->hcs_params1 = readl(&xhci->cap_regs->hcs_params1);
4921 xhci->hcs_params2 = readl(&xhci->cap_regs->hcs_params2);
4922 xhci->hcs_params3 = readl(&xhci->cap_regs->hcs_params3);
4923 xhci->hcc_params = readl(&xhci->cap_regs->hc_capbase);
4924 xhci->hci_version = HC_VERSION(xhci->hcc_params);
4925 xhci->hcc_params = readl(&xhci->cap_regs->hcc_params);
4926 if (xhci->hci_version > 0x100)
4927 xhci->hcc_params2 = readl(&xhci->cap_regs->hcc_params2);
4928 xhci_print_registers(xhci);
4929
4930 xhci->quirks = quirks;
4931
4932 get_quirks(dev, xhci);
4933
4934 /* In xhci controllers which follow xhci 1.0 spec gives a spurious
4935 * success event after a short transfer. This quirk will ignore such
4936 * spurious event.
4937 */
4938 if (xhci->hci_version > 0x96)
4939 xhci->quirks |= XHCI_SPURIOUS_SUCCESS;
4940
4941 /* Make sure the HC is halted. */
4942 retval = xhci_halt(xhci);
4943 if (retval)
4944 return retval;
4945
4946 xhci_dbg(xhci, "Resetting HCD\n");
4947 /* Reset the internal HC memory state and registers. */
4948 retval = xhci_reset(xhci);
4949 if (retval)
4950 return retval;
4951 xhci_dbg(xhci, "Reset complete\n");
4952
4953 /*
4954 * On some xHCI controllers (e.g. R-Car SoCs), the AC64 bit (bit 0)
4955 * of HCCPARAMS1 is set to 1. However, the xHCs don't support 64-bit
4956 * address memory pointers actually. So, this driver clears the AC64
4957 * bit of xhci->hcc_params to call dma_set_coherent_mask(dev,
4958 * DMA_BIT_MASK(32)) in this xhci_gen_setup().
4959 */
4960 if (xhci->quirks & XHCI_NO_64BIT_SUPPORT)
4961 xhci->hcc_params &= ~BIT(0);
4962
4963 /* Set dma_mask and coherent_dma_mask to 64-bits,
4964 * if xHC supports 64-bit addressing */
4965 if (HCC_64BIT_ADDR(xhci->hcc_params) &&
4966 !dma_set_mask(dev, DMA_BIT_MASK(64))) {
4967 xhci_dbg(xhci, "Enabling 64-bit DMA addresses.\n");
4968 dma_set_coherent_mask(dev, DMA_BIT_MASK(64));
4969 } else {
4970 /*
4971 * This is to avoid error in cases where a 32-bit USB
4972 * controller is used on a 64-bit capable system.
4973 */
4974 retval = dma_set_mask(dev, DMA_BIT_MASK(32));
4975 if (retval)
4976 return retval;
4977 xhci_dbg(xhci, "Enabling 32-bit DMA addresses.\n");
4978 dma_set_coherent_mask(dev, DMA_BIT_MASK(32));
4979 }
4980
4981 xhci_dbg(xhci, "Calling HCD init\n");
4982 /* Initialize HCD and host controller data structures. */
4983 retval = xhci_init(hcd);
4984 if (retval)
4985 return retval;
4986 xhci_dbg(xhci, "Called HCD init\n");
4987
4988 xhci_info(xhci, "hcc params 0x%08x hci version 0x%x quirks 0x%08x\n",
4989 xhci->hcc_params, xhci->hci_version, xhci->quirks);
4990
4991 return 0;
4992}
4993EXPORT_SYMBOL_GPL(xhci_gen_setup);
4994
4995static const struct hc_driver xhci_hc_driver = {
4996 .description = "xhci-hcd",
4997 .product_desc = "xHCI Host Controller",
4998 .hcd_priv_size = sizeof(struct xhci_hcd),
4999
5000 /*
5001 * generic hardware linkage
5002 */
5003 .irq = xhci_irq,
5004 .flags = HCD_MEMORY | HCD_USB3 | HCD_SHARED,
5005
5006 /*
5007 * basic lifecycle operations
5008 */
5009 .reset = NULL, /* set in xhci_init_driver() */
5010 .start = xhci_run,
5011 .stop = xhci_stop,
5012 .shutdown = xhci_shutdown,
5013
5014 /*
5015 * managing i/o requests and associated device resources
5016 */
5017 .urb_enqueue = xhci_urb_enqueue,
5018 .urb_dequeue = xhci_urb_dequeue,
5019 .alloc_dev = xhci_alloc_dev,
5020 .free_dev = xhci_free_dev,
5021 .alloc_streams = xhci_alloc_streams,
5022 .free_streams = xhci_free_streams,
5023 .add_endpoint = xhci_add_endpoint,
5024 .drop_endpoint = xhci_drop_endpoint,
5025 .endpoint_reset = xhci_endpoint_reset,
5026 .check_bandwidth = xhci_check_bandwidth,
5027 .reset_bandwidth = xhci_reset_bandwidth,
5028 .address_device = xhci_address_device,
5029 .enable_device = xhci_enable_device,
5030 .update_hub_device = xhci_update_hub_device,
5031 .reset_device = xhci_discover_or_reset_device,
5032
5033 /*
5034 * scheduling support
5035 */
5036 .get_frame_number = xhci_get_frame,
5037
5038 /*
5039 * root hub support
5040 */
5041 .hub_control = xhci_hub_control,
5042 .hub_status_data = xhci_hub_status_data,
5043 .bus_suspend = xhci_bus_suspend,
5044 .bus_resume = xhci_bus_resume,
5045
5046 /*
5047 * call back when device connected and addressed
5048 */
5049 .update_device = xhci_update_device,
5050 .set_usb2_hw_lpm = xhci_set_usb2_hardware_lpm,
5051 .enable_usb3_lpm_timeout = xhci_enable_usb3_lpm_timeout,
5052 .disable_usb3_lpm_timeout = xhci_disable_usb3_lpm_timeout,
5053 .find_raw_port_number = xhci_find_raw_port_number,
5054};
5055
5056void xhci_init_driver(struct hc_driver *drv,
5057 const struct xhci_driver_overrides *over)
5058{
5059 BUG_ON(!over);
5060
5061 /* Copy the generic table to drv then apply the overrides */
5062 *drv = xhci_hc_driver;
5063
5064 if (over) {
5065 drv->hcd_priv_size += over->extra_priv_size;
5066 if (over->reset)
5067 drv->reset = over->reset;
5068 if (over->start)
5069 drv->start = over->start;
5070 }
5071}
5072EXPORT_SYMBOL_GPL(xhci_init_driver);
5073
5074MODULE_DESCRIPTION(DRIVER_DESC);
5075MODULE_AUTHOR(DRIVER_AUTHOR);
5076MODULE_LICENSE("GPL");
5077
5078static int __init xhci_hcd_init(void)
5079{
5080 /*
5081 * Check the compiler generated sizes of structures that must be laid
5082 * out in specific ways for hardware access.
5083 */
5084 BUILD_BUG_ON(sizeof(struct xhci_doorbell_array) != 256*32/8);
5085 BUILD_BUG_ON(sizeof(struct xhci_slot_ctx) != 8*32/8);
5086 BUILD_BUG_ON(sizeof(struct xhci_ep_ctx) != 8*32/8);
5087 /* xhci_device_control has eight fields, and also
5088 * embeds one xhci_slot_ctx and 31 xhci_ep_ctx
5089 */
5090 BUILD_BUG_ON(sizeof(struct xhci_stream_ctx) != 4*32/8);
5091 BUILD_BUG_ON(sizeof(union xhci_trb) != 4*32/8);
5092 BUILD_BUG_ON(sizeof(struct xhci_erst_entry) != 4*32/8);
5093 BUILD_BUG_ON(sizeof(struct xhci_cap_regs) != 8*32/8);
5094 BUILD_BUG_ON(sizeof(struct xhci_intr_reg) != 8*32/8);
5095 /* xhci_run_regs has eight fields and embeds 128 xhci_intr_regs */
5096 BUILD_BUG_ON(sizeof(struct xhci_run_regs) != (8+8*128)*32/8);
5097
5098 if (usb_disabled())
5099 return -ENODEV;
5100
5101 return 0;
5102}
5103
5104/*
5105 * If an init function is provided, an exit function must also be provided
5106 * to allow module unload.
5107 */
5108static void __exit xhci_hcd_fini(void) { }
5109
5110module_init(xhci_hcd_init);
5111module_exit(xhci_hcd_fini);