Loading...
1/*
2 * xHCI host controller driver
3 *
4 * Copyright (C) 2008 Intel Corp.
5 *
6 * Author: Sarah Sharp
7 * Some code borrowed from the Linux EHCI driver.
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License version 2 as
11 * published by the Free Software Foundation.
12 *
13 * This program is distributed in the hope that it will be useful, but
14 * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
15 * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
16 * for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software Foundation,
20 * Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
21 */
22
23#include <linux/pci.h>
24#include <linux/irq.h>
25#include <linux/log2.h>
26#include <linux/module.h>
27#include <linux/moduleparam.h>
28#include <linux/slab.h>
29#include <linux/dmi.h>
30
31#include "xhci.h"
32
33#define DRIVER_AUTHOR "Sarah Sharp"
34#define DRIVER_DESC "'eXtensible' Host Controller (xHC) Driver"
35
36/* Some 0.95 hardware can't handle the chain bit on a Link TRB being cleared */
37static int link_quirk;
38module_param(link_quirk, int, S_IRUGO | S_IWUSR);
39MODULE_PARM_DESC(link_quirk, "Don't clear the chain bit on a link TRB");
40
41/* TODO: copied from ehci-hcd.c - can this be refactored? */
42/*
43 * handshake - spin reading hc until handshake completes or fails
44 * @ptr: address of hc register to be read
45 * @mask: bits to look at in result of read
46 * @done: value of those bits when handshake succeeds
47 * @usec: timeout in microseconds
48 *
49 * Returns negative errno, or zero on success
50 *
51 * Success happens when the "mask" bits have the specified value (hardware
52 * handshake done). There are two failure modes: "usec" have passed (major
53 * hardware flakeout), or the register reads as all-ones (hardware removed).
54 */
55int handshake(struct xhci_hcd *xhci, void __iomem *ptr,
56 u32 mask, u32 done, int usec)
57{
58 u32 result;
59
60 do {
61 result = xhci_readl(xhci, ptr);
62 if (result == ~(u32)0) /* card removed */
63 return -ENODEV;
64 result &= mask;
65 if (result == done)
66 return 0;
67 udelay(1);
68 usec--;
69 } while (usec > 0);
70 return -ETIMEDOUT;
71}
72
73/*
74 * Disable interrupts and begin the xHCI halting process.
75 */
76void xhci_quiesce(struct xhci_hcd *xhci)
77{
78 u32 halted;
79 u32 cmd;
80 u32 mask;
81
82 mask = ~(XHCI_IRQS);
83 halted = xhci_readl(xhci, &xhci->op_regs->status) & STS_HALT;
84 if (!halted)
85 mask &= ~CMD_RUN;
86
87 cmd = xhci_readl(xhci, &xhci->op_regs->command);
88 cmd &= mask;
89 xhci_writel(xhci, cmd, &xhci->op_regs->command);
90}
91
92/*
93 * Force HC into halt state.
94 *
95 * Disable any IRQs and clear the run/stop bit.
96 * HC will complete any current and actively pipelined transactions, and
97 * should halt within 16 ms of the run/stop bit being cleared.
98 * Read HC Halted bit in the status register to see when the HC is finished.
99 */
100int xhci_halt(struct xhci_hcd *xhci)
101{
102 int ret;
103 xhci_dbg(xhci, "// Halt the HC\n");
104 xhci_quiesce(xhci);
105
106 ret = handshake(xhci, &xhci->op_regs->status,
107 STS_HALT, STS_HALT, XHCI_MAX_HALT_USEC);
108 if (!ret) {
109 xhci->xhc_state |= XHCI_STATE_HALTED;
110 xhci->cmd_ring_state = CMD_RING_STATE_STOPPED;
111 } else
112 xhci_warn(xhci, "Host not halted after %u microseconds.\n",
113 XHCI_MAX_HALT_USEC);
114 return ret;
115}
116
117/*
118 * Set the run bit and wait for the host to be running.
119 */
120static int xhci_start(struct xhci_hcd *xhci)
121{
122 u32 temp;
123 int ret;
124
125 temp = xhci_readl(xhci, &xhci->op_regs->command);
126 temp |= (CMD_RUN);
127 xhci_dbg(xhci, "// Turn on HC, cmd = 0x%x.\n",
128 temp);
129 xhci_writel(xhci, temp, &xhci->op_regs->command);
130
131 /*
132 * Wait for the HCHalted Status bit to be 0 to indicate the host is
133 * running.
134 */
135 ret = handshake(xhci, &xhci->op_regs->status,
136 STS_HALT, 0, XHCI_MAX_HALT_USEC);
137 if (ret == -ETIMEDOUT)
138 xhci_err(xhci, "Host took too long to start, "
139 "waited %u microseconds.\n",
140 XHCI_MAX_HALT_USEC);
141 if (!ret)
142 xhci->xhc_state &= ~XHCI_STATE_HALTED;
143 return ret;
144}
145
146/*
147 * Reset a halted HC.
148 *
149 * This resets pipelines, timers, counters, state machines, etc.
150 * Transactions will be terminated immediately, and operational registers
151 * will be set to their defaults.
152 */
153int xhci_reset(struct xhci_hcd *xhci)
154{
155 u32 command;
156 u32 state;
157 int ret, i;
158
159 state = xhci_readl(xhci, &xhci->op_regs->status);
160 if ((state & STS_HALT) == 0) {
161 xhci_warn(xhci, "Host controller not halted, aborting reset.\n");
162 return 0;
163 }
164
165 xhci_dbg(xhci, "// Reset the HC\n");
166 command = xhci_readl(xhci, &xhci->op_regs->command);
167 command |= CMD_RESET;
168 xhci_writel(xhci, command, &xhci->op_regs->command);
169
170 ret = handshake(xhci, &xhci->op_regs->command,
171 CMD_RESET, 0, 10 * 1000 * 1000);
172 if (ret)
173 return ret;
174
175 xhci_dbg(xhci, "Wait for controller to be ready for doorbell rings\n");
176 /*
177 * xHCI cannot write to any doorbells or operational registers other
178 * than status until the "Controller Not Ready" flag is cleared.
179 */
180 ret = handshake(xhci, &xhci->op_regs->status,
181 STS_CNR, 0, 10 * 1000 * 1000);
182
183 for (i = 0; i < 2; ++i) {
184 xhci->bus_state[i].port_c_suspend = 0;
185 xhci->bus_state[i].suspended_ports = 0;
186 xhci->bus_state[i].resuming_ports = 0;
187 }
188
189 return ret;
190}
191
192#ifdef CONFIG_PCI
193static int xhci_free_msi(struct xhci_hcd *xhci)
194{
195 int i;
196
197 if (!xhci->msix_entries)
198 return -EINVAL;
199
200 for (i = 0; i < xhci->msix_count; i++)
201 if (xhci->msix_entries[i].vector)
202 free_irq(xhci->msix_entries[i].vector,
203 xhci_to_hcd(xhci));
204 return 0;
205}
206
207/*
208 * Set up MSI
209 */
210static int xhci_setup_msi(struct xhci_hcd *xhci)
211{
212 int ret;
213 struct pci_dev *pdev = to_pci_dev(xhci_to_hcd(xhci)->self.controller);
214
215 ret = pci_enable_msi(pdev);
216 if (ret) {
217 xhci_dbg(xhci, "failed to allocate MSI entry\n");
218 return ret;
219 }
220
221 ret = request_irq(pdev->irq, (irq_handler_t)xhci_msi_irq,
222 0, "xhci_hcd", xhci_to_hcd(xhci));
223 if (ret) {
224 xhci_dbg(xhci, "disable MSI interrupt\n");
225 pci_disable_msi(pdev);
226 }
227
228 return ret;
229}
230
231/*
232 * Free IRQs
233 * free all IRQs request
234 */
235static void xhci_free_irq(struct xhci_hcd *xhci)
236{
237 struct pci_dev *pdev = to_pci_dev(xhci_to_hcd(xhci)->self.controller);
238 int ret;
239
240 /* return if using legacy interrupt */
241 if (xhci_to_hcd(xhci)->irq > 0)
242 return;
243
244 ret = xhci_free_msi(xhci);
245 if (!ret)
246 return;
247 if (pdev->irq > 0)
248 free_irq(pdev->irq, xhci_to_hcd(xhci));
249
250 return;
251}
252
253/*
254 * Set up MSI-X
255 */
256static int xhci_setup_msix(struct xhci_hcd *xhci)
257{
258 int i, ret = 0;
259 struct usb_hcd *hcd = xhci_to_hcd(xhci);
260 struct pci_dev *pdev = to_pci_dev(hcd->self.controller);
261
262 /*
263 * calculate number of msi-x vectors supported.
264 * - HCS_MAX_INTRS: the max number of interrupts the host can handle,
265 * with max number of interrupters based on the xhci HCSPARAMS1.
266 * - num_online_cpus: maximum msi-x vectors per CPUs core.
267 * Add additional 1 vector to ensure always available interrupt.
268 */
269 xhci->msix_count = min(num_online_cpus() + 1,
270 HCS_MAX_INTRS(xhci->hcs_params1));
271
272 xhci->msix_entries =
273 kmalloc((sizeof(struct msix_entry))*xhci->msix_count,
274 GFP_KERNEL);
275 if (!xhci->msix_entries) {
276 xhci_err(xhci, "Failed to allocate MSI-X entries\n");
277 return -ENOMEM;
278 }
279
280 for (i = 0; i < xhci->msix_count; i++) {
281 xhci->msix_entries[i].entry = i;
282 xhci->msix_entries[i].vector = 0;
283 }
284
285 ret = pci_enable_msix(pdev, xhci->msix_entries, xhci->msix_count);
286 if (ret) {
287 xhci_dbg(xhci, "Failed to enable MSI-X\n");
288 goto free_entries;
289 }
290
291 for (i = 0; i < xhci->msix_count; i++) {
292 ret = request_irq(xhci->msix_entries[i].vector,
293 (irq_handler_t)xhci_msi_irq,
294 0, "xhci_hcd", xhci_to_hcd(xhci));
295 if (ret)
296 goto disable_msix;
297 }
298
299 hcd->msix_enabled = 1;
300 return ret;
301
302disable_msix:
303 xhci_dbg(xhci, "disable MSI-X interrupt\n");
304 xhci_free_irq(xhci);
305 pci_disable_msix(pdev);
306free_entries:
307 kfree(xhci->msix_entries);
308 xhci->msix_entries = NULL;
309 return ret;
310}
311
312/* Free any IRQs and disable MSI-X */
313static void xhci_cleanup_msix(struct xhci_hcd *xhci)
314{
315 struct usb_hcd *hcd = xhci_to_hcd(xhci);
316 struct pci_dev *pdev = to_pci_dev(hcd->self.controller);
317
318 xhci_free_irq(xhci);
319
320 if (xhci->msix_entries) {
321 pci_disable_msix(pdev);
322 kfree(xhci->msix_entries);
323 xhci->msix_entries = NULL;
324 } else {
325 pci_disable_msi(pdev);
326 }
327
328 hcd->msix_enabled = 0;
329 return;
330}
331
332static void xhci_msix_sync_irqs(struct xhci_hcd *xhci)
333{
334 int i;
335
336 if (xhci->msix_entries) {
337 for (i = 0; i < xhci->msix_count; i++)
338 synchronize_irq(xhci->msix_entries[i].vector);
339 }
340}
341
342static int xhci_try_enable_msi(struct usb_hcd *hcd)
343{
344 struct xhci_hcd *xhci = hcd_to_xhci(hcd);
345 struct pci_dev *pdev = to_pci_dev(xhci_to_hcd(xhci)->self.controller);
346 int ret;
347
348 /*
349 * Some Fresco Logic host controllers advertise MSI, but fail to
350 * generate interrupts. Don't even try to enable MSI.
351 */
352 if (xhci->quirks & XHCI_BROKEN_MSI)
353 return 0;
354
355 /* unregister the legacy interrupt */
356 if (hcd->irq)
357 free_irq(hcd->irq, hcd);
358 hcd->irq = 0;
359
360 ret = xhci_setup_msix(xhci);
361 if (ret)
362 /* fall back to msi*/
363 ret = xhci_setup_msi(xhci);
364
365 if (!ret)
366 /* hcd->irq is 0, we have MSI */
367 return 0;
368
369 if (!pdev->irq) {
370 xhci_err(xhci, "No msi-x/msi found and no IRQ in BIOS\n");
371 return -EINVAL;
372 }
373
374 /* fall back to legacy interrupt*/
375 ret = request_irq(pdev->irq, &usb_hcd_irq, IRQF_SHARED,
376 hcd->irq_descr, hcd);
377 if (ret) {
378 xhci_err(xhci, "request interrupt %d failed\n",
379 pdev->irq);
380 return ret;
381 }
382 hcd->irq = pdev->irq;
383 return 0;
384}
385
386#else
387
388static int xhci_try_enable_msi(struct usb_hcd *hcd)
389{
390 return 0;
391}
392
393static void xhci_cleanup_msix(struct xhci_hcd *xhci)
394{
395}
396
397static void xhci_msix_sync_irqs(struct xhci_hcd *xhci)
398{
399}
400
401#endif
402
403static void compliance_mode_recovery(unsigned long arg)
404{
405 struct xhci_hcd *xhci;
406 struct usb_hcd *hcd;
407 u32 temp;
408 int i;
409
410 xhci = (struct xhci_hcd *)arg;
411
412 for (i = 0; i < xhci->num_usb3_ports; i++) {
413 temp = xhci_readl(xhci, xhci->usb3_ports[i]);
414 if ((temp & PORT_PLS_MASK) == USB_SS_PORT_LS_COMP_MOD) {
415 /*
416 * Compliance Mode Detected. Letting USB Core
417 * handle the Warm Reset
418 */
419 xhci_dbg(xhci, "Compliance Mode Detected->Port %d!\n",
420 i + 1);
421 xhci_dbg(xhci, "Attempting Recovery routine!\n");
422 hcd = xhci->shared_hcd;
423
424 if (hcd->state == HC_STATE_SUSPENDED)
425 usb_hcd_resume_root_hub(hcd);
426
427 usb_hcd_poll_rh_status(hcd);
428 }
429 }
430
431 if (xhci->port_status_u0 != ((1 << xhci->num_usb3_ports)-1))
432 mod_timer(&xhci->comp_mode_recovery_timer,
433 jiffies + msecs_to_jiffies(COMP_MODE_RCVRY_MSECS));
434}
435
436/*
437 * Quirk to work around issue generated by the SN65LVPE502CP USB3.0 re-driver
438 * that causes ports behind that hardware to enter compliance mode sometimes.
439 * The quirk creates a timer that polls every 2 seconds the link state of
440 * each host controller's port and recovers it by issuing a Warm reset
441 * if Compliance mode is detected, otherwise the port will become "dead" (no
442 * device connections or disconnections will be detected anymore). Becasue no
443 * status event is generated when entering compliance mode (per xhci spec),
444 * this quirk is needed on systems that have the failing hardware installed.
445 */
446static void compliance_mode_recovery_timer_init(struct xhci_hcd *xhci)
447{
448 xhci->port_status_u0 = 0;
449 init_timer(&xhci->comp_mode_recovery_timer);
450
451 xhci->comp_mode_recovery_timer.data = (unsigned long) xhci;
452 xhci->comp_mode_recovery_timer.function = compliance_mode_recovery;
453 xhci->comp_mode_recovery_timer.expires = jiffies +
454 msecs_to_jiffies(COMP_MODE_RCVRY_MSECS);
455
456 set_timer_slack(&xhci->comp_mode_recovery_timer,
457 msecs_to_jiffies(COMP_MODE_RCVRY_MSECS));
458 add_timer(&xhci->comp_mode_recovery_timer);
459 xhci_dbg(xhci, "Compliance Mode Recovery Timer Initialized.\n");
460}
461
462/*
463 * This function identifies the systems that have installed the SN65LVPE502CP
464 * USB3.0 re-driver and that need the Compliance Mode Quirk.
465 * Systems:
466 * Vendor: Hewlett-Packard -> System Models: Z420, Z620 and Z820
467 */
468static bool compliance_mode_recovery_timer_quirk_check(void)
469{
470 const char *dmi_product_name, *dmi_sys_vendor;
471
472 dmi_product_name = dmi_get_system_info(DMI_PRODUCT_NAME);
473 dmi_sys_vendor = dmi_get_system_info(DMI_SYS_VENDOR);
474 if (!dmi_product_name || !dmi_sys_vendor)
475 return false;
476
477 if (!(strstr(dmi_sys_vendor, "Hewlett-Packard")))
478 return false;
479
480 if (strstr(dmi_product_name, "Z420") ||
481 strstr(dmi_product_name, "Z620") ||
482 strstr(dmi_product_name, "Z820"))
483 return true;
484
485 return false;
486}
487
488static int xhci_all_ports_seen_u0(struct xhci_hcd *xhci)
489{
490 return (xhci->port_status_u0 == ((1 << xhci->num_usb3_ports)-1));
491}
492
493
494/*
495 * Initialize memory for HCD and xHC (one-time init).
496 *
497 * Program the PAGESIZE register, initialize the device context array, create
498 * device contexts (?), set up a command ring segment (or two?), create event
499 * ring (one for now).
500 */
501int xhci_init(struct usb_hcd *hcd)
502{
503 struct xhci_hcd *xhci = hcd_to_xhci(hcd);
504 int retval = 0;
505
506 xhci_dbg(xhci, "xhci_init\n");
507 spin_lock_init(&xhci->lock);
508 if (xhci->hci_version == 0x95 && link_quirk) {
509 xhci_dbg(xhci, "QUIRK: Not clearing Link TRB chain bits.\n");
510 xhci->quirks |= XHCI_LINK_TRB_QUIRK;
511 } else {
512 xhci_dbg(xhci, "xHCI doesn't need link TRB QUIRK\n");
513 }
514 retval = xhci_mem_init(xhci, GFP_KERNEL);
515 xhci_dbg(xhci, "Finished xhci_init\n");
516
517 /* Initializing Compliance Mode Recovery Data If Needed */
518 if (compliance_mode_recovery_timer_quirk_check()) {
519 xhci->quirks |= XHCI_COMP_MODE_QUIRK;
520 compliance_mode_recovery_timer_init(xhci);
521 }
522
523 return retval;
524}
525
526/*-------------------------------------------------------------------------*/
527
528
529#ifdef CONFIG_USB_XHCI_HCD_DEBUGGING
530static void xhci_event_ring_work(unsigned long arg)
531{
532 unsigned long flags;
533 int temp;
534 u64 temp_64;
535 struct xhci_hcd *xhci = (struct xhci_hcd *) arg;
536 int i, j;
537
538 xhci_dbg(xhci, "Poll event ring: %lu\n", jiffies);
539
540 spin_lock_irqsave(&xhci->lock, flags);
541 temp = xhci_readl(xhci, &xhci->op_regs->status);
542 xhci_dbg(xhci, "op reg status = 0x%x\n", temp);
543 if (temp == 0xffffffff || (xhci->xhc_state & XHCI_STATE_DYING) ||
544 (xhci->xhc_state & XHCI_STATE_HALTED)) {
545 xhci_dbg(xhci, "HW died, polling stopped.\n");
546 spin_unlock_irqrestore(&xhci->lock, flags);
547 return;
548 }
549
550 temp = xhci_readl(xhci, &xhci->ir_set->irq_pending);
551 xhci_dbg(xhci, "ir_set 0 pending = 0x%x\n", temp);
552 xhci_dbg(xhci, "HC error bitmask = 0x%x\n", xhci->error_bitmask);
553 xhci->error_bitmask = 0;
554 xhci_dbg(xhci, "Event ring:\n");
555 xhci_debug_segment(xhci, xhci->event_ring->deq_seg);
556 xhci_dbg_ring_ptrs(xhci, xhci->event_ring);
557 temp_64 = xhci_read_64(xhci, &xhci->ir_set->erst_dequeue);
558 temp_64 &= ~ERST_PTR_MASK;
559 xhci_dbg(xhci, "ERST deq = 64'h%0lx\n", (long unsigned int) temp_64);
560 xhci_dbg(xhci, "Command ring:\n");
561 xhci_debug_segment(xhci, xhci->cmd_ring->deq_seg);
562 xhci_dbg_ring_ptrs(xhci, xhci->cmd_ring);
563 xhci_dbg_cmd_ptrs(xhci);
564 for (i = 0; i < MAX_HC_SLOTS; ++i) {
565 if (!xhci->devs[i])
566 continue;
567 for (j = 0; j < 31; ++j) {
568 xhci_dbg_ep_rings(xhci, i, j, &xhci->devs[i]->eps[j]);
569 }
570 }
571 spin_unlock_irqrestore(&xhci->lock, flags);
572
573 if (!xhci->zombie)
574 mod_timer(&xhci->event_ring_timer, jiffies + POLL_TIMEOUT * HZ);
575 else
576 xhci_dbg(xhci, "Quit polling the event ring.\n");
577}
578#endif
579
580static int xhci_run_finished(struct xhci_hcd *xhci)
581{
582 if (xhci_start(xhci)) {
583 xhci_halt(xhci);
584 return -ENODEV;
585 }
586 xhci->shared_hcd->state = HC_STATE_RUNNING;
587 xhci->cmd_ring_state = CMD_RING_STATE_RUNNING;
588
589 if (xhci->quirks & XHCI_NEC_HOST)
590 xhci_ring_cmd_db(xhci);
591
592 xhci_dbg(xhci, "Finished xhci_run for USB3 roothub\n");
593 return 0;
594}
595
596/*
597 * Start the HC after it was halted.
598 *
599 * This function is called by the USB core when the HC driver is added.
600 * Its opposite is xhci_stop().
601 *
602 * xhci_init() must be called once before this function can be called.
603 * Reset the HC, enable device slot contexts, program DCBAAP, and
604 * set command ring pointer and event ring pointer.
605 *
606 * Setup MSI-X vectors and enable interrupts.
607 */
608int xhci_run(struct usb_hcd *hcd)
609{
610 u32 temp;
611 u64 temp_64;
612 int ret;
613 struct xhci_hcd *xhci = hcd_to_xhci(hcd);
614
615 /* Start the xHCI host controller running only after the USB 2.0 roothub
616 * is setup.
617 */
618
619 hcd->uses_new_polling = 1;
620 if (!usb_hcd_is_primary_hcd(hcd))
621 return xhci_run_finished(xhci);
622
623 xhci_dbg(xhci, "xhci_run\n");
624
625 ret = xhci_try_enable_msi(hcd);
626 if (ret)
627 return ret;
628
629#ifdef CONFIG_USB_XHCI_HCD_DEBUGGING
630 init_timer(&xhci->event_ring_timer);
631 xhci->event_ring_timer.data = (unsigned long) xhci;
632 xhci->event_ring_timer.function = xhci_event_ring_work;
633 /* Poll the event ring */
634 xhci->event_ring_timer.expires = jiffies + POLL_TIMEOUT * HZ;
635 xhci->zombie = 0;
636 xhci_dbg(xhci, "Setting event ring polling timer\n");
637 add_timer(&xhci->event_ring_timer);
638#endif
639
640 xhci_dbg(xhci, "Command ring memory map follows:\n");
641 xhci_debug_ring(xhci, xhci->cmd_ring);
642 xhci_dbg_ring_ptrs(xhci, xhci->cmd_ring);
643 xhci_dbg_cmd_ptrs(xhci);
644
645 xhci_dbg(xhci, "ERST memory map follows:\n");
646 xhci_dbg_erst(xhci, &xhci->erst);
647 xhci_dbg(xhci, "Event ring:\n");
648 xhci_debug_ring(xhci, xhci->event_ring);
649 xhci_dbg_ring_ptrs(xhci, xhci->event_ring);
650 temp_64 = xhci_read_64(xhci, &xhci->ir_set->erst_dequeue);
651 temp_64 &= ~ERST_PTR_MASK;
652 xhci_dbg(xhci, "ERST deq = 64'h%0lx\n", (long unsigned int) temp_64);
653
654 xhci_dbg(xhci, "// Set the interrupt modulation register\n");
655 temp = xhci_readl(xhci, &xhci->ir_set->irq_control);
656 temp &= ~ER_IRQ_INTERVAL_MASK;
657 temp |= (u32) 160;
658 xhci_writel(xhci, temp, &xhci->ir_set->irq_control);
659
660 /* Set the HCD state before we enable the irqs */
661 temp = xhci_readl(xhci, &xhci->op_regs->command);
662 temp |= (CMD_EIE);
663 xhci_dbg(xhci, "// Enable interrupts, cmd = 0x%x.\n",
664 temp);
665 xhci_writel(xhci, temp, &xhci->op_regs->command);
666
667 temp = xhci_readl(xhci, &xhci->ir_set->irq_pending);
668 xhci_dbg(xhci, "// Enabling event ring interrupter %p by writing 0x%x to irq_pending\n",
669 xhci->ir_set, (unsigned int) ER_IRQ_ENABLE(temp));
670 xhci_writel(xhci, ER_IRQ_ENABLE(temp),
671 &xhci->ir_set->irq_pending);
672 xhci_print_ir_set(xhci, 0);
673
674 if (xhci->quirks & XHCI_NEC_HOST)
675 xhci_queue_vendor_command(xhci, 0, 0, 0,
676 TRB_TYPE(TRB_NEC_GET_FW));
677
678 xhci_dbg(xhci, "Finished xhci_run for USB2 roothub\n");
679 return 0;
680}
681
682static void xhci_only_stop_hcd(struct usb_hcd *hcd)
683{
684 struct xhci_hcd *xhci = hcd_to_xhci(hcd);
685
686 spin_lock_irq(&xhci->lock);
687 xhci_halt(xhci);
688
689 /* The shared_hcd is going to be deallocated shortly (the USB core only
690 * calls this function when allocation fails in usb_add_hcd(), or
691 * usb_remove_hcd() is called). So we need to unset xHCI's pointer.
692 */
693 xhci->shared_hcd = NULL;
694 spin_unlock_irq(&xhci->lock);
695}
696
697/*
698 * Stop xHCI driver.
699 *
700 * This function is called by the USB core when the HC driver is removed.
701 * Its opposite is xhci_run().
702 *
703 * Disable device contexts, disable IRQs, and quiesce the HC.
704 * Reset the HC, finish any completed transactions, and cleanup memory.
705 */
706void xhci_stop(struct usb_hcd *hcd)
707{
708 u32 temp;
709 struct xhci_hcd *xhci = hcd_to_xhci(hcd);
710
711 if (!usb_hcd_is_primary_hcd(hcd)) {
712 xhci_only_stop_hcd(xhci->shared_hcd);
713 return;
714 }
715
716 spin_lock_irq(&xhci->lock);
717 /* Make sure the xHC is halted for a USB3 roothub
718 * (xhci_stop() could be called as part of failed init).
719 */
720 xhci_halt(xhci);
721 xhci_reset(xhci);
722 spin_unlock_irq(&xhci->lock);
723
724 xhci_cleanup_msix(xhci);
725
726#ifdef CONFIG_USB_XHCI_HCD_DEBUGGING
727 /* Tell the event ring poll function not to reschedule */
728 xhci->zombie = 1;
729 del_timer_sync(&xhci->event_ring_timer);
730#endif
731
732 /* Deleting Compliance Mode Recovery Timer */
733 if ((xhci->quirks & XHCI_COMP_MODE_QUIRK) &&
734 (!(xhci_all_ports_seen_u0(xhci))))
735 del_timer_sync(&xhci->comp_mode_recovery_timer);
736
737 if (xhci->quirks & XHCI_AMD_PLL_FIX)
738 usb_amd_dev_put();
739
740 xhci_dbg(xhci, "// Disabling event ring interrupts\n");
741 temp = xhci_readl(xhci, &xhci->op_regs->status);
742 xhci_writel(xhci, temp & ~STS_EINT, &xhci->op_regs->status);
743 temp = xhci_readl(xhci, &xhci->ir_set->irq_pending);
744 xhci_writel(xhci, ER_IRQ_DISABLE(temp),
745 &xhci->ir_set->irq_pending);
746 xhci_print_ir_set(xhci, 0);
747
748 xhci_dbg(xhci, "cleaning up memory\n");
749 xhci_mem_cleanup(xhci);
750 xhci_dbg(xhci, "xhci_stop completed - status = %x\n",
751 xhci_readl(xhci, &xhci->op_regs->status));
752}
753
754/*
755 * Shutdown HC (not bus-specific)
756 *
757 * This is called when the machine is rebooting or halting. We assume that the
758 * machine will be powered off, and the HC's internal state will be reset.
759 * Don't bother to free memory.
760 *
761 * This will only ever be called with the main usb_hcd (the USB3 roothub).
762 */
763void xhci_shutdown(struct usb_hcd *hcd)
764{
765 struct xhci_hcd *xhci = hcd_to_xhci(hcd);
766
767 if (xhci->quirks & XHCI_SPURIOUS_REBOOT)
768 usb_disable_xhci_ports(to_pci_dev(hcd->self.controller));
769
770 spin_lock_irq(&xhci->lock);
771 xhci_halt(xhci);
772 spin_unlock_irq(&xhci->lock);
773
774 xhci_cleanup_msix(xhci);
775
776 xhci_dbg(xhci, "xhci_shutdown completed - status = %x\n",
777 xhci_readl(xhci, &xhci->op_regs->status));
778}
779
780#ifdef CONFIG_PM
781static void xhci_save_registers(struct xhci_hcd *xhci)
782{
783 xhci->s3.command = xhci_readl(xhci, &xhci->op_regs->command);
784 xhci->s3.dev_nt = xhci_readl(xhci, &xhci->op_regs->dev_notification);
785 xhci->s3.dcbaa_ptr = xhci_read_64(xhci, &xhci->op_regs->dcbaa_ptr);
786 xhci->s3.config_reg = xhci_readl(xhci, &xhci->op_regs->config_reg);
787 xhci->s3.erst_size = xhci_readl(xhci, &xhci->ir_set->erst_size);
788 xhci->s3.erst_base = xhci_read_64(xhci, &xhci->ir_set->erst_base);
789 xhci->s3.erst_dequeue = xhci_read_64(xhci, &xhci->ir_set->erst_dequeue);
790 xhci->s3.irq_pending = xhci_readl(xhci, &xhci->ir_set->irq_pending);
791 xhci->s3.irq_control = xhci_readl(xhci, &xhci->ir_set->irq_control);
792}
793
794static void xhci_restore_registers(struct xhci_hcd *xhci)
795{
796 xhci_writel(xhci, xhci->s3.command, &xhci->op_regs->command);
797 xhci_writel(xhci, xhci->s3.dev_nt, &xhci->op_regs->dev_notification);
798 xhci_write_64(xhci, xhci->s3.dcbaa_ptr, &xhci->op_regs->dcbaa_ptr);
799 xhci_writel(xhci, xhci->s3.config_reg, &xhci->op_regs->config_reg);
800 xhci_writel(xhci, xhci->s3.erst_size, &xhci->ir_set->erst_size);
801 xhci_write_64(xhci, xhci->s3.erst_base, &xhci->ir_set->erst_base);
802 xhci_write_64(xhci, xhci->s3.erst_dequeue, &xhci->ir_set->erst_dequeue);
803 xhci_writel(xhci, xhci->s3.irq_pending, &xhci->ir_set->irq_pending);
804 xhci_writel(xhci, xhci->s3.irq_control, &xhci->ir_set->irq_control);
805}
806
807static void xhci_set_cmd_ring_deq(struct xhci_hcd *xhci)
808{
809 u64 val_64;
810
811 /* step 2: initialize command ring buffer */
812 val_64 = xhci_read_64(xhci, &xhci->op_regs->cmd_ring);
813 val_64 = (val_64 & (u64) CMD_RING_RSVD_BITS) |
814 (xhci_trb_virt_to_dma(xhci->cmd_ring->deq_seg,
815 xhci->cmd_ring->dequeue) &
816 (u64) ~CMD_RING_RSVD_BITS) |
817 xhci->cmd_ring->cycle_state;
818 xhci_dbg(xhci, "// Setting command ring address to 0x%llx\n",
819 (long unsigned long) val_64);
820 xhci_write_64(xhci, val_64, &xhci->op_regs->cmd_ring);
821}
822
823/*
824 * The whole command ring must be cleared to zero when we suspend the host.
825 *
826 * The host doesn't save the command ring pointer in the suspend well, so we
827 * need to re-program it on resume. Unfortunately, the pointer must be 64-byte
828 * aligned, because of the reserved bits in the command ring dequeue pointer
829 * register. Therefore, we can't just set the dequeue pointer back in the
830 * middle of the ring (TRBs are 16-byte aligned).
831 */
832static void xhci_clear_command_ring(struct xhci_hcd *xhci)
833{
834 struct xhci_ring *ring;
835 struct xhci_segment *seg;
836
837 ring = xhci->cmd_ring;
838 seg = ring->deq_seg;
839 do {
840 memset(seg->trbs, 0,
841 sizeof(union xhci_trb) * (TRBS_PER_SEGMENT - 1));
842 seg->trbs[TRBS_PER_SEGMENT - 1].link.control &=
843 cpu_to_le32(~TRB_CYCLE);
844 seg = seg->next;
845 } while (seg != ring->deq_seg);
846
847 /* Reset the software enqueue and dequeue pointers */
848 ring->deq_seg = ring->first_seg;
849 ring->dequeue = ring->first_seg->trbs;
850 ring->enq_seg = ring->deq_seg;
851 ring->enqueue = ring->dequeue;
852
853 ring->num_trbs_free = ring->num_segs * (TRBS_PER_SEGMENT - 1) - 1;
854 /*
855 * Ring is now zeroed, so the HW should look for change of ownership
856 * when the cycle bit is set to 1.
857 */
858 ring->cycle_state = 1;
859
860 /*
861 * Reset the hardware dequeue pointer.
862 * Yes, this will need to be re-written after resume, but we're paranoid
863 * and want to make sure the hardware doesn't access bogus memory
864 * because, say, the BIOS or an SMI started the host without changing
865 * the command ring pointers.
866 */
867 xhci_set_cmd_ring_deq(xhci);
868}
869
870/*
871 * Stop HC (not bus-specific)
872 *
873 * This is called when the machine transition into S3/S4 mode.
874 *
875 */
876int xhci_suspend(struct xhci_hcd *xhci)
877{
878 int rc = 0;
879 struct usb_hcd *hcd = xhci_to_hcd(xhci);
880 u32 command;
881
882 spin_lock_irq(&xhci->lock);
883 clear_bit(HCD_FLAG_HW_ACCESSIBLE, &hcd->flags);
884 clear_bit(HCD_FLAG_HW_ACCESSIBLE, &xhci->shared_hcd->flags);
885 /* step 1: stop endpoint */
886 /* skipped assuming that port suspend has done */
887
888 /* step 2: clear Run/Stop bit */
889 command = xhci_readl(xhci, &xhci->op_regs->command);
890 command &= ~CMD_RUN;
891 xhci_writel(xhci, command, &xhci->op_regs->command);
892 if (handshake(xhci, &xhci->op_regs->status,
893 STS_HALT, STS_HALT, XHCI_MAX_HALT_USEC)) {
894 xhci_warn(xhci, "WARN: xHC CMD_RUN timeout\n");
895 spin_unlock_irq(&xhci->lock);
896 return -ETIMEDOUT;
897 }
898 xhci_clear_command_ring(xhci);
899
900 /* step 3: save registers */
901 xhci_save_registers(xhci);
902
903 /* step 4: set CSS flag */
904 command = xhci_readl(xhci, &xhci->op_regs->command);
905 command |= CMD_CSS;
906 xhci_writel(xhci, command, &xhci->op_regs->command);
907 if (handshake(xhci, &xhci->op_regs->status, STS_SAVE, 0, 10 * 1000)) {
908 xhci_warn(xhci, "WARN: xHC save state timeout\n");
909 spin_unlock_irq(&xhci->lock);
910 return -ETIMEDOUT;
911 }
912 spin_unlock_irq(&xhci->lock);
913
914 /*
915 * Deleting Compliance Mode Recovery Timer because the xHCI Host
916 * is about to be suspended.
917 */
918 if ((xhci->quirks & XHCI_COMP_MODE_QUIRK) &&
919 (!(xhci_all_ports_seen_u0(xhci)))) {
920 del_timer_sync(&xhci->comp_mode_recovery_timer);
921 xhci_dbg(xhci, "Compliance Mode Recovery Timer Deleted!\n");
922 }
923
924 /* step 5: remove core well power */
925 /* synchronize irq when using MSI-X */
926 xhci_msix_sync_irqs(xhci);
927
928 return rc;
929}
930
931/*
932 * start xHC (not bus-specific)
933 *
934 * This is called when the machine transition from S3/S4 mode.
935 *
936 */
937int xhci_resume(struct xhci_hcd *xhci, bool hibernated)
938{
939 u32 command, temp = 0;
940 struct usb_hcd *hcd = xhci_to_hcd(xhci);
941 struct usb_hcd *secondary_hcd;
942 int retval = 0;
943
944 /* Wait a bit if either of the roothubs need to settle from the
945 * transition into bus suspend.
946 */
947 if (time_before(jiffies, xhci->bus_state[0].next_statechange) ||
948 time_before(jiffies,
949 xhci->bus_state[1].next_statechange))
950 msleep(100);
951
952 set_bit(HCD_FLAG_HW_ACCESSIBLE, &hcd->flags);
953 set_bit(HCD_FLAG_HW_ACCESSIBLE, &xhci->shared_hcd->flags);
954
955 spin_lock_irq(&xhci->lock);
956 if (xhci->quirks & XHCI_RESET_ON_RESUME)
957 hibernated = true;
958
959 if (!hibernated) {
960 /* step 1: restore register */
961 xhci_restore_registers(xhci);
962 /* step 2: initialize command ring buffer */
963 xhci_set_cmd_ring_deq(xhci);
964 /* step 3: restore state and start state*/
965 /* step 3: set CRS flag */
966 command = xhci_readl(xhci, &xhci->op_regs->command);
967 command |= CMD_CRS;
968 xhci_writel(xhci, command, &xhci->op_regs->command);
969 if (handshake(xhci, &xhci->op_regs->status,
970 STS_RESTORE, 0, 10 * 1000)) {
971 xhci_warn(xhci, "WARN: xHC restore state timeout\n");
972 spin_unlock_irq(&xhci->lock);
973 return -ETIMEDOUT;
974 }
975 temp = xhci_readl(xhci, &xhci->op_regs->status);
976 }
977
978 /* If restore operation fails, re-initialize the HC during resume */
979 if ((temp & STS_SRE) || hibernated) {
980 /* Let the USB core know _both_ roothubs lost power. */
981 usb_root_hub_lost_power(xhci->main_hcd->self.root_hub);
982 usb_root_hub_lost_power(xhci->shared_hcd->self.root_hub);
983
984 xhci_dbg(xhci, "Stop HCD\n");
985 xhci_halt(xhci);
986 xhci_reset(xhci);
987 spin_unlock_irq(&xhci->lock);
988 xhci_cleanup_msix(xhci);
989
990#ifdef CONFIG_USB_XHCI_HCD_DEBUGGING
991 /* Tell the event ring poll function not to reschedule */
992 xhci->zombie = 1;
993 del_timer_sync(&xhci->event_ring_timer);
994#endif
995
996 xhci_dbg(xhci, "// Disabling event ring interrupts\n");
997 temp = xhci_readl(xhci, &xhci->op_regs->status);
998 xhci_writel(xhci, temp & ~STS_EINT, &xhci->op_regs->status);
999 temp = xhci_readl(xhci, &xhci->ir_set->irq_pending);
1000 xhci_writel(xhci, ER_IRQ_DISABLE(temp),
1001 &xhci->ir_set->irq_pending);
1002 xhci_print_ir_set(xhci, 0);
1003
1004 xhci_dbg(xhci, "cleaning up memory\n");
1005 xhci_mem_cleanup(xhci);
1006 xhci_dbg(xhci, "xhci_stop completed - status = %x\n",
1007 xhci_readl(xhci, &xhci->op_regs->status));
1008
1009 /* USB core calls the PCI reinit and start functions twice:
1010 * first with the primary HCD, and then with the secondary HCD.
1011 * If we don't do the same, the host will never be started.
1012 */
1013 if (!usb_hcd_is_primary_hcd(hcd))
1014 secondary_hcd = hcd;
1015 else
1016 secondary_hcd = xhci->shared_hcd;
1017
1018 xhci_dbg(xhci, "Initialize the xhci_hcd\n");
1019 retval = xhci_init(hcd->primary_hcd);
1020 if (retval)
1021 return retval;
1022 xhci_dbg(xhci, "Start the primary HCD\n");
1023 retval = xhci_run(hcd->primary_hcd);
1024 if (!retval) {
1025 xhci_dbg(xhci, "Start the secondary HCD\n");
1026 retval = xhci_run(secondary_hcd);
1027 }
1028 hcd->state = HC_STATE_SUSPENDED;
1029 xhci->shared_hcd->state = HC_STATE_SUSPENDED;
1030 goto done;
1031 }
1032
1033 /* step 4: set Run/Stop bit */
1034 command = xhci_readl(xhci, &xhci->op_regs->command);
1035 command |= CMD_RUN;
1036 xhci_writel(xhci, command, &xhci->op_regs->command);
1037 handshake(xhci, &xhci->op_regs->status, STS_HALT,
1038 0, 250 * 1000);
1039
1040 /* step 5: walk topology and initialize portsc,
1041 * portpmsc and portli
1042 */
1043 /* this is done in bus_resume */
1044
1045 /* step 6: restart each of the previously
1046 * Running endpoints by ringing their doorbells
1047 */
1048
1049 spin_unlock_irq(&xhci->lock);
1050
1051 done:
1052 if (retval == 0) {
1053 usb_hcd_resume_root_hub(hcd);
1054 usb_hcd_resume_root_hub(xhci->shared_hcd);
1055 }
1056
1057 /*
1058 * If system is subject to the Quirk, Compliance Mode Timer needs to
1059 * be re-initialized Always after a system resume. Ports are subject
1060 * to suffer the Compliance Mode issue again. It doesn't matter if
1061 * ports have entered previously to U0 before system's suspension.
1062 */
1063 if (xhci->quirks & XHCI_COMP_MODE_QUIRK)
1064 compliance_mode_recovery_timer_init(xhci);
1065
1066 return retval;
1067}
1068#endif /* CONFIG_PM */
1069
1070/*-------------------------------------------------------------------------*/
1071
1072/**
1073 * xhci_get_endpoint_index - Used for passing endpoint bitmasks between the core and
1074 * HCDs. Find the index for an endpoint given its descriptor. Use the return
1075 * value to right shift 1 for the bitmask.
1076 *
1077 * Index = (epnum * 2) + direction - 1,
1078 * where direction = 0 for OUT, 1 for IN.
1079 * For control endpoints, the IN index is used (OUT index is unused), so
1080 * index = (epnum * 2) + direction - 1 = (epnum * 2) + 1 - 1 = (epnum * 2)
1081 */
1082unsigned int xhci_get_endpoint_index(struct usb_endpoint_descriptor *desc)
1083{
1084 unsigned int index;
1085 if (usb_endpoint_xfer_control(desc))
1086 index = (unsigned int) (usb_endpoint_num(desc)*2);
1087 else
1088 index = (unsigned int) (usb_endpoint_num(desc)*2) +
1089 (usb_endpoint_dir_in(desc) ? 1 : 0) - 1;
1090 return index;
1091}
1092
1093/* Find the flag for this endpoint (for use in the control context). Use the
1094 * endpoint index to create a bitmask. The slot context is bit 0, endpoint 0 is
1095 * bit 1, etc.
1096 */
1097unsigned int xhci_get_endpoint_flag(struct usb_endpoint_descriptor *desc)
1098{
1099 return 1 << (xhci_get_endpoint_index(desc) + 1);
1100}
1101
1102/* Find the flag for this endpoint (for use in the control context). Use the
1103 * endpoint index to create a bitmask. The slot context is bit 0, endpoint 0 is
1104 * bit 1, etc.
1105 */
1106unsigned int xhci_get_endpoint_flag_from_index(unsigned int ep_index)
1107{
1108 return 1 << (ep_index + 1);
1109}
1110
1111/* Compute the last valid endpoint context index. Basically, this is the
1112 * endpoint index plus one. For slot contexts with more than valid endpoint,
1113 * we find the most significant bit set in the added contexts flags.
1114 * e.g. ep 1 IN (with epnum 0x81) => added_ctxs = 0b1000
1115 * fls(0b1000) = 4, but the endpoint context index is 3, so subtract one.
1116 */
1117unsigned int xhci_last_valid_endpoint(u32 added_ctxs)
1118{
1119 return fls(added_ctxs) - 1;
1120}
1121
1122/* Returns 1 if the arguments are OK;
1123 * returns 0 this is a root hub; returns -EINVAL for NULL pointers.
1124 */
1125static int xhci_check_args(struct usb_hcd *hcd, struct usb_device *udev,
1126 struct usb_host_endpoint *ep, int check_ep, bool check_virt_dev,
1127 const char *func) {
1128 struct xhci_hcd *xhci;
1129 struct xhci_virt_device *virt_dev;
1130
1131 if (!hcd || (check_ep && !ep) || !udev) {
1132 printk(KERN_DEBUG "xHCI %s called with invalid args\n",
1133 func);
1134 return -EINVAL;
1135 }
1136 if (!udev->parent) {
1137 printk(KERN_DEBUG "xHCI %s called for root hub\n",
1138 func);
1139 return 0;
1140 }
1141
1142 xhci = hcd_to_xhci(hcd);
1143 if (xhci->xhc_state & XHCI_STATE_HALTED)
1144 return -ENODEV;
1145
1146 if (check_virt_dev) {
1147 if (!udev->slot_id || !xhci->devs[udev->slot_id]) {
1148 printk(KERN_DEBUG "xHCI %s called with unaddressed "
1149 "device\n", func);
1150 return -EINVAL;
1151 }
1152
1153 virt_dev = xhci->devs[udev->slot_id];
1154 if (virt_dev->udev != udev) {
1155 printk(KERN_DEBUG "xHCI %s called with udev and "
1156 "virt_dev does not match\n", func);
1157 return -EINVAL;
1158 }
1159 }
1160
1161 return 1;
1162}
1163
1164static int xhci_configure_endpoint(struct xhci_hcd *xhci,
1165 struct usb_device *udev, struct xhci_command *command,
1166 bool ctx_change, bool must_succeed);
1167
1168/*
1169 * Full speed devices may have a max packet size greater than 8 bytes, but the
1170 * USB core doesn't know that until it reads the first 8 bytes of the
1171 * descriptor. If the usb_device's max packet size changes after that point,
1172 * we need to issue an evaluate context command and wait on it.
1173 */
1174static int xhci_check_maxpacket(struct xhci_hcd *xhci, unsigned int slot_id,
1175 unsigned int ep_index, struct urb *urb)
1176{
1177 struct xhci_container_ctx *in_ctx;
1178 struct xhci_container_ctx *out_ctx;
1179 struct xhci_input_control_ctx *ctrl_ctx;
1180 struct xhci_ep_ctx *ep_ctx;
1181 int max_packet_size;
1182 int hw_max_packet_size;
1183 int ret = 0;
1184
1185 out_ctx = xhci->devs[slot_id]->out_ctx;
1186 ep_ctx = xhci_get_ep_ctx(xhci, out_ctx, ep_index);
1187 hw_max_packet_size = MAX_PACKET_DECODED(le32_to_cpu(ep_ctx->ep_info2));
1188 max_packet_size = usb_endpoint_maxp(&urb->dev->ep0.desc);
1189 if (hw_max_packet_size != max_packet_size) {
1190 xhci_dbg(xhci, "Max Packet Size for ep 0 changed.\n");
1191 xhci_dbg(xhci, "Max packet size in usb_device = %d\n",
1192 max_packet_size);
1193 xhci_dbg(xhci, "Max packet size in xHCI HW = %d\n",
1194 hw_max_packet_size);
1195 xhci_dbg(xhci, "Issuing evaluate context command.\n");
1196
1197 /* Set up the modified control endpoint 0 */
1198 xhci_endpoint_copy(xhci, xhci->devs[slot_id]->in_ctx,
1199 xhci->devs[slot_id]->out_ctx, ep_index);
1200 in_ctx = xhci->devs[slot_id]->in_ctx;
1201 ep_ctx = xhci_get_ep_ctx(xhci, in_ctx, ep_index);
1202 ep_ctx->ep_info2 &= cpu_to_le32(~MAX_PACKET_MASK);
1203 ep_ctx->ep_info2 |= cpu_to_le32(MAX_PACKET(max_packet_size));
1204
1205 /* Set up the input context flags for the command */
1206 /* FIXME: This won't work if a non-default control endpoint
1207 * changes max packet sizes.
1208 */
1209 ctrl_ctx = xhci_get_input_control_ctx(xhci, in_ctx);
1210 ctrl_ctx->add_flags = cpu_to_le32(EP0_FLAG);
1211 ctrl_ctx->drop_flags = 0;
1212
1213 xhci_dbg(xhci, "Slot %d input context\n", slot_id);
1214 xhci_dbg_ctx(xhci, in_ctx, ep_index);
1215 xhci_dbg(xhci, "Slot %d output context\n", slot_id);
1216 xhci_dbg_ctx(xhci, out_ctx, ep_index);
1217
1218 ret = xhci_configure_endpoint(xhci, urb->dev, NULL,
1219 true, false);
1220
1221 /* Clean up the input context for later use by bandwidth
1222 * functions.
1223 */
1224 ctrl_ctx->add_flags = cpu_to_le32(SLOT_FLAG);
1225 }
1226 return ret;
1227}
1228
1229/*
1230 * non-error returns are a promise to giveback() the urb later
1231 * we drop ownership so next owner (or urb unlink) can get it
1232 */
1233int xhci_urb_enqueue(struct usb_hcd *hcd, struct urb *urb, gfp_t mem_flags)
1234{
1235 struct xhci_hcd *xhci = hcd_to_xhci(hcd);
1236 struct xhci_td *buffer;
1237 unsigned long flags;
1238 int ret = 0;
1239 unsigned int slot_id, ep_index;
1240 struct urb_priv *urb_priv;
1241 int size, i;
1242
1243 if (!urb || xhci_check_args(hcd, urb->dev, urb->ep,
1244 true, true, __func__) <= 0)
1245 return -EINVAL;
1246
1247 slot_id = urb->dev->slot_id;
1248 ep_index = xhci_get_endpoint_index(&urb->ep->desc);
1249
1250 if (!HCD_HW_ACCESSIBLE(hcd)) {
1251 if (!in_interrupt())
1252 xhci_dbg(xhci, "urb submitted during PCI suspend\n");
1253 ret = -ESHUTDOWN;
1254 goto exit;
1255 }
1256
1257 if (usb_endpoint_xfer_isoc(&urb->ep->desc))
1258 size = urb->number_of_packets;
1259 else
1260 size = 1;
1261
1262 urb_priv = kzalloc(sizeof(struct urb_priv) +
1263 size * sizeof(struct xhci_td *), mem_flags);
1264 if (!urb_priv)
1265 return -ENOMEM;
1266
1267 buffer = kzalloc(size * sizeof(struct xhci_td), mem_flags);
1268 if (!buffer) {
1269 kfree(urb_priv);
1270 return -ENOMEM;
1271 }
1272
1273 for (i = 0; i < size; i++) {
1274 urb_priv->td[i] = buffer;
1275 buffer++;
1276 }
1277
1278 urb_priv->length = size;
1279 urb_priv->td_cnt = 0;
1280 urb->hcpriv = urb_priv;
1281
1282 if (usb_endpoint_xfer_control(&urb->ep->desc)) {
1283 /* Check to see if the max packet size for the default control
1284 * endpoint changed during FS device enumeration
1285 */
1286 if (urb->dev->speed == USB_SPEED_FULL) {
1287 ret = xhci_check_maxpacket(xhci, slot_id,
1288 ep_index, urb);
1289 if (ret < 0) {
1290 xhci_urb_free_priv(xhci, urb_priv);
1291 urb->hcpriv = NULL;
1292 return ret;
1293 }
1294 }
1295
1296 /* We have a spinlock and interrupts disabled, so we must pass
1297 * atomic context to this function, which may allocate memory.
1298 */
1299 spin_lock_irqsave(&xhci->lock, flags);
1300 if (xhci->xhc_state & XHCI_STATE_DYING)
1301 goto dying;
1302 ret = xhci_queue_ctrl_tx(xhci, GFP_ATOMIC, urb,
1303 slot_id, ep_index);
1304 if (ret)
1305 goto free_priv;
1306 spin_unlock_irqrestore(&xhci->lock, flags);
1307 } else if (usb_endpoint_xfer_bulk(&urb->ep->desc)) {
1308 spin_lock_irqsave(&xhci->lock, flags);
1309 if (xhci->xhc_state & XHCI_STATE_DYING)
1310 goto dying;
1311 if (xhci->devs[slot_id]->eps[ep_index].ep_state &
1312 EP_GETTING_STREAMS) {
1313 xhci_warn(xhci, "WARN: Can't enqueue URB while bulk ep "
1314 "is transitioning to using streams.\n");
1315 ret = -EINVAL;
1316 } else if (xhci->devs[slot_id]->eps[ep_index].ep_state &
1317 EP_GETTING_NO_STREAMS) {
1318 xhci_warn(xhci, "WARN: Can't enqueue URB while bulk ep "
1319 "is transitioning to "
1320 "not having streams.\n");
1321 ret = -EINVAL;
1322 } else {
1323 ret = xhci_queue_bulk_tx(xhci, GFP_ATOMIC, urb,
1324 slot_id, ep_index);
1325 }
1326 if (ret)
1327 goto free_priv;
1328 spin_unlock_irqrestore(&xhci->lock, flags);
1329 } else if (usb_endpoint_xfer_int(&urb->ep->desc)) {
1330 spin_lock_irqsave(&xhci->lock, flags);
1331 if (xhci->xhc_state & XHCI_STATE_DYING)
1332 goto dying;
1333 ret = xhci_queue_intr_tx(xhci, GFP_ATOMIC, urb,
1334 slot_id, ep_index);
1335 if (ret)
1336 goto free_priv;
1337 spin_unlock_irqrestore(&xhci->lock, flags);
1338 } else {
1339 spin_lock_irqsave(&xhci->lock, flags);
1340 if (xhci->xhc_state & XHCI_STATE_DYING)
1341 goto dying;
1342 ret = xhci_queue_isoc_tx_prepare(xhci, GFP_ATOMIC, urb,
1343 slot_id, ep_index);
1344 if (ret)
1345 goto free_priv;
1346 spin_unlock_irqrestore(&xhci->lock, flags);
1347 }
1348exit:
1349 return ret;
1350dying:
1351 xhci_dbg(xhci, "Ep 0x%x: URB %p submitted for "
1352 "non-responsive xHCI host.\n",
1353 urb->ep->desc.bEndpointAddress, urb);
1354 ret = -ESHUTDOWN;
1355free_priv:
1356 xhci_urb_free_priv(xhci, urb_priv);
1357 urb->hcpriv = NULL;
1358 spin_unlock_irqrestore(&xhci->lock, flags);
1359 return ret;
1360}
1361
1362/* Get the right ring for the given URB.
1363 * If the endpoint supports streams, boundary check the URB's stream ID.
1364 * If the endpoint doesn't support streams, return the singular endpoint ring.
1365 */
1366static struct xhci_ring *xhci_urb_to_transfer_ring(struct xhci_hcd *xhci,
1367 struct urb *urb)
1368{
1369 unsigned int slot_id;
1370 unsigned int ep_index;
1371 unsigned int stream_id;
1372 struct xhci_virt_ep *ep;
1373
1374 slot_id = urb->dev->slot_id;
1375 ep_index = xhci_get_endpoint_index(&urb->ep->desc);
1376 stream_id = urb->stream_id;
1377 ep = &xhci->devs[slot_id]->eps[ep_index];
1378 /* Common case: no streams */
1379 if (!(ep->ep_state & EP_HAS_STREAMS))
1380 return ep->ring;
1381
1382 if (stream_id == 0) {
1383 xhci_warn(xhci,
1384 "WARN: Slot ID %u, ep index %u has streams, "
1385 "but URB has no stream ID.\n",
1386 slot_id, ep_index);
1387 return NULL;
1388 }
1389
1390 if (stream_id < ep->stream_info->num_streams)
1391 return ep->stream_info->stream_rings[stream_id];
1392
1393 xhci_warn(xhci,
1394 "WARN: Slot ID %u, ep index %u has "
1395 "stream IDs 1 to %u allocated, "
1396 "but stream ID %u is requested.\n",
1397 slot_id, ep_index,
1398 ep->stream_info->num_streams - 1,
1399 stream_id);
1400 return NULL;
1401}
1402
1403/*
1404 * Remove the URB's TD from the endpoint ring. This may cause the HC to stop
1405 * USB transfers, potentially stopping in the middle of a TRB buffer. The HC
1406 * should pick up where it left off in the TD, unless a Set Transfer Ring
1407 * Dequeue Pointer is issued.
1408 *
1409 * The TRBs that make up the buffers for the canceled URB will be "removed" from
1410 * the ring. Since the ring is a contiguous structure, they can't be physically
1411 * removed. Instead, there are two options:
1412 *
1413 * 1) If the HC is in the middle of processing the URB to be canceled, we
1414 * simply move the ring's dequeue pointer past those TRBs using the Set
1415 * Transfer Ring Dequeue Pointer command. This will be the common case,
1416 * when drivers timeout on the last submitted URB and attempt to cancel.
1417 *
1418 * 2) If the HC is in the middle of a different TD, we turn the TRBs into a
1419 * series of 1-TRB transfer no-op TDs. (No-ops shouldn't be chained.) The
1420 * HC will need to invalidate the any TRBs it has cached after the stop
1421 * endpoint command, as noted in the xHCI 0.95 errata.
1422 *
1423 * 3) The TD may have completed by the time the Stop Endpoint Command
1424 * completes, so software needs to handle that case too.
1425 *
1426 * This function should protect against the TD enqueueing code ringing the
1427 * doorbell while this code is waiting for a Stop Endpoint command to complete.
1428 * It also needs to account for multiple cancellations on happening at the same
1429 * time for the same endpoint.
1430 *
1431 * Note that this function can be called in any context, or so says
1432 * usb_hcd_unlink_urb()
1433 */
1434int xhci_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status)
1435{
1436 unsigned long flags;
1437 int ret, i;
1438 u32 temp;
1439 struct xhci_hcd *xhci;
1440 struct urb_priv *urb_priv;
1441 struct xhci_td *td;
1442 unsigned int ep_index;
1443 struct xhci_ring *ep_ring;
1444 struct xhci_virt_ep *ep;
1445
1446 xhci = hcd_to_xhci(hcd);
1447 spin_lock_irqsave(&xhci->lock, flags);
1448 /* Make sure the URB hasn't completed or been unlinked already */
1449 ret = usb_hcd_check_unlink_urb(hcd, urb, status);
1450 if (ret || !urb->hcpriv)
1451 goto done;
1452 temp = xhci_readl(xhci, &xhci->op_regs->status);
1453 if (temp == 0xffffffff || (xhci->xhc_state & XHCI_STATE_HALTED)) {
1454 xhci_dbg(xhci, "HW died, freeing TD.\n");
1455 urb_priv = urb->hcpriv;
1456 for (i = urb_priv->td_cnt; i < urb_priv->length; i++) {
1457 td = urb_priv->td[i];
1458 if (!list_empty(&td->td_list))
1459 list_del_init(&td->td_list);
1460 if (!list_empty(&td->cancelled_td_list))
1461 list_del_init(&td->cancelled_td_list);
1462 }
1463
1464 usb_hcd_unlink_urb_from_ep(hcd, urb);
1465 spin_unlock_irqrestore(&xhci->lock, flags);
1466 usb_hcd_giveback_urb(hcd, urb, -ESHUTDOWN);
1467 xhci_urb_free_priv(xhci, urb_priv);
1468 return ret;
1469 }
1470 if ((xhci->xhc_state & XHCI_STATE_DYING) ||
1471 (xhci->xhc_state & XHCI_STATE_HALTED)) {
1472 xhci_dbg(xhci, "Ep 0x%x: URB %p to be canceled on "
1473 "non-responsive xHCI host.\n",
1474 urb->ep->desc.bEndpointAddress, urb);
1475 /* Let the stop endpoint command watchdog timer (which set this
1476 * state) finish cleaning up the endpoint TD lists. We must
1477 * have caught it in the middle of dropping a lock and giving
1478 * back an URB.
1479 */
1480 goto done;
1481 }
1482
1483 ep_index = xhci_get_endpoint_index(&urb->ep->desc);
1484 ep = &xhci->devs[urb->dev->slot_id]->eps[ep_index];
1485 ep_ring = xhci_urb_to_transfer_ring(xhci, urb);
1486 if (!ep_ring) {
1487 ret = -EINVAL;
1488 goto done;
1489 }
1490
1491 urb_priv = urb->hcpriv;
1492 i = urb_priv->td_cnt;
1493 if (i < urb_priv->length)
1494 xhci_dbg(xhci, "Cancel URB %p, dev %s, ep 0x%x, "
1495 "starting at offset 0x%llx\n",
1496 urb, urb->dev->devpath,
1497 urb->ep->desc.bEndpointAddress,
1498 (unsigned long long) xhci_trb_virt_to_dma(
1499 urb_priv->td[i]->start_seg,
1500 urb_priv->td[i]->first_trb));
1501
1502 for (; i < urb_priv->length; i++) {
1503 td = urb_priv->td[i];
1504 list_add_tail(&td->cancelled_td_list, &ep->cancelled_td_list);
1505 }
1506
1507 /* Queue a stop endpoint command, but only if this is
1508 * the first cancellation to be handled.
1509 */
1510 if (!(ep->ep_state & EP_HALT_PENDING)) {
1511 ep->ep_state |= EP_HALT_PENDING;
1512 ep->stop_cmds_pending++;
1513 ep->stop_cmd_timer.expires = jiffies +
1514 XHCI_STOP_EP_CMD_TIMEOUT * HZ;
1515 add_timer(&ep->stop_cmd_timer);
1516 xhci_queue_stop_endpoint(xhci, urb->dev->slot_id, ep_index, 0);
1517 xhci_ring_cmd_db(xhci);
1518 }
1519done:
1520 spin_unlock_irqrestore(&xhci->lock, flags);
1521 return ret;
1522}
1523
1524/* Drop an endpoint from a new bandwidth configuration for this device.
1525 * Only one call to this function is allowed per endpoint before
1526 * check_bandwidth() or reset_bandwidth() must be called.
1527 * A call to xhci_drop_endpoint() followed by a call to xhci_add_endpoint() will
1528 * add the endpoint to the schedule with possibly new parameters denoted by a
1529 * different endpoint descriptor in usb_host_endpoint.
1530 * A call to xhci_add_endpoint() followed by a call to xhci_drop_endpoint() is
1531 * not allowed.
1532 *
1533 * The USB core will not allow URBs to be queued to an endpoint that is being
1534 * disabled, so there's no need for mutual exclusion to protect
1535 * the xhci->devs[slot_id] structure.
1536 */
1537int xhci_drop_endpoint(struct usb_hcd *hcd, struct usb_device *udev,
1538 struct usb_host_endpoint *ep)
1539{
1540 struct xhci_hcd *xhci;
1541 struct xhci_container_ctx *in_ctx, *out_ctx;
1542 struct xhci_input_control_ctx *ctrl_ctx;
1543 struct xhci_slot_ctx *slot_ctx;
1544 unsigned int last_ctx;
1545 unsigned int ep_index;
1546 struct xhci_ep_ctx *ep_ctx;
1547 u32 drop_flag;
1548 u32 new_add_flags, new_drop_flags, new_slot_info;
1549 int ret;
1550
1551 ret = xhci_check_args(hcd, udev, ep, 1, true, __func__);
1552 if (ret <= 0)
1553 return ret;
1554 xhci = hcd_to_xhci(hcd);
1555 if (xhci->xhc_state & XHCI_STATE_DYING)
1556 return -ENODEV;
1557
1558 xhci_dbg(xhci, "%s called for udev %p\n", __func__, udev);
1559 drop_flag = xhci_get_endpoint_flag(&ep->desc);
1560 if (drop_flag == SLOT_FLAG || drop_flag == EP0_FLAG) {
1561 xhci_dbg(xhci, "xHCI %s - can't drop slot or ep 0 %#x\n",
1562 __func__, drop_flag);
1563 return 0;
1564 }
1565
1566 in_ctx = xhci->devs[udev->slot_id]->in_ctx;
1567 out_ctx = xhci->devs[udev->slot_id]->out_ctx;
1568 ctrl_ctx = xhci_get_input_control_ctx(xhci, in_ctx);
1569 ep_index = xhci_get_endpoint_index(&ep->desc);
1570 ep_ctx = xhci_get_ep_ctx(xhci, out_ctx, ep_index);
1571 /* If the HC already knows the endpoint is disabled,
1572 * or the HCD has noted it is disabled, ignore this request
1573 */
1574 if (((ep_ctx->ep_info & cpu_to_le32(EP_STATE_MASK)) ==
1575 cpu_to_le32(EP_STATE_DISABLED)) ||
1576 le32_to_cpu(ctrl_ctx->drop_flags) &
1577 xhci_get_endpoint_flag(&ep->desc)) {
1578 xhci_warn(xhci, "xHCI %s called with disabled ep %p\n",
1579 __func__, ep);
1580 return 0;
1581 }
1582
1583 ctrl_ctx->drop_flags |= cpu_to_le32(drop_flag);
1584 new_drop_flags = le32_to_cpu(ctrl_ctx->drop_flags);
1585
1586 ctrl_ctx->add_flags &= cpu_to_le32(~drop_flag);
1587 new_add_flags = le32_to_cpu(ctrl_ctx->add_flags);
1588
1589 last_ctx = xhci_last_valid_endpoint(le32_to_cpu(ctrl_ctx->add_flags));
1590 slot_ctx = xhci_get_slot_ctx(xhci, in_ctx);
1591 /* Update the last valid endpoint context, if we deleted the last one */
1592 if ((le32_to_cpu(slot_ctx->dev_info) & LAST_CTX_MASK) >
1593 LAST_CTX(last_ctx)) {
1594 slot_ctx->dev_info &= cpu_to_le32(~LAST_CTX_MASK);
1595 slot_ctx->dev_info |= cpu_to_le32(LAST_CTX(last_ctx));
1596 }
1597 new_slot_info = le32_to_cpu(slot_ctx->dev_info);
1598
1599 xhci_endpoint_zero(xhci, xhci->devs[udev->slot_id], ep);
1600
1601 xhci_dbg(xhci, "drop ep 0x%x, slot id %d, new drop flags = %#x, new add flags = %#x, new slot info = %#x\n",
1602 (unsigned int) ep->desc.bEndpointAddress,
1603 udev->slot_id,
1604 (unsigned int) new_drop_flags,
1605 (unsigned int) new_add_flags,
1606 (unsigned int) new_slot_info);
1607 return 0;
1608}
1609
1610/* Add an endpoint to a new possible bandwidth configuration for this device.
1611 * Only one call to this function is allowed per endpoint before
1612 * check_bandwidth() or reset_bandwidth() must be called.
1613 * A call to xhci_drop_endpoint() followed by a call to xhci_add_endpoint() will
1614 * add the endpoint to the schedule with possibly new parameters denoted by a
1615 * different endpoint descriptor in usb_host_endpoint.
1616 * A call to xhci_add_endpoint() followed by a call to xhci_drop_endpoint() is
1617 * not allowed.
1618 *
1619 * The USB core will not allow URBs to be queued to an endpoint until the
1620 * configuration or alt setting is installed in the device, so there's no need
1621 * for mutual exclusion to protect the xhci->devs[slot_id] structure.
1622 */
1623int xhci_add_endpoint(struct usb_hcd *hcd, struct usb_device *udev,
1624 struct usb_host_endpoint *ep)
1625{
1626 struct xhci_hcd *xhci;
1627 struct xhci_container_ctx *in_ctx, *out_ctx;
1628 unsigned int ep_index;
1629 struct xhci_ep_ctx *ep_ctx;
1630 struct xhci_slot_ctx *slot_ctx;
1631 struct xhci_input_control_ctx *ctrl_ctx;
1632 u32 added_ctxs;
1633 unsigned int last_ctx;
1634 u32 new_add_flags, new_drop_flags, new_slot_info;
1635 struct xhci_virt_device *virt_dev;
1636 int ret = 0;
1637
1638 ret = xhci_check_args(hcd, udev, ep, 1, true, __func__);
1639 if (ret <= 0) {
1640 /* So we won't queue a reset ep command for a root hub */
1641 ep->hcpriv = NULL;
1642 return ret;
1643 }
1644 xhci = hcd_to_xhci(hcd);
1645 if (xhci->xhc_state & XHCI_STATE_DYING)
1646 return -ENODEV;
1647
1648 added_ctxs = xhci_get_endpoint_flag(&ep->desc);
1649 last_ctx = xhci_last_valid_endpoint(added_ctxs);
1650 if (added_ctxs == SLOT_FLAG || added_ctxs == EP0_FLAG) {
1651 /* FIXME when we have to issue an evaluate endpoint command to
1652 * deal with ep0 max packet size changing once we get the
1653 * descriptors
1654 */
1655 xhci_dbg(xhci, "xHCI %s - can't add slot or ep 0 %#x\n",
1656 __func__, added_ctxs);
1657 return 0;
1658 }
1659
1660 virt_dev = xhci->devs[udev->slot_id];
1661 in_ctx = virt_dev->in_ctx;
1662 out_ctx = virt_dev->out_ctx;
1663 ctrl_ctx = xhci_get_input_control_ctx(xhci, in_ctx);
1664 ep_index = xhci_get_endpoint_index(&ep->desc);
1665 ep_ctx = xhci_get_ep_ctx(xhci, out_ctx, ep_index);
1666
1667 /* If this endpoint is already in use, and the upper layers are trying
1668 * to add it again without dropping it, reject the addition.
1669 */
1670 if (virt_dev->eps[ep_index].ring &&
1671 !(le32_to_cpu(ctrl_ctx->drop_flags) &
1672 xhci_get_endpoint_flag(&ep->desc))) {
1673 xhci_warn(xhci, "Trying to add endpoint 0x%x "
1674 "without dropping it.\n",
1675 (unsigned int) ep->desc.bEndpointAddress);
1676 return -EINVAL;
1677 }
1678
1679 /* If the HCD has already noted the endpoint is enabled,
1680 * ignore this request.
1681 */
1682 if (le32_to_cpu(ctrl_ctx->add_flags) &
1683 xhci_get_endpoint_flag(&ep->desc)) {
1684 xhci_warn(xhci, "xHCI %s called with enabled ep %p\n",
1685 __func__, ep);
1686 return 0;
1687 }
1688
1689 /*
1690 * Configuration and alternate setting changes must be done in
1691 * process context, not interrupt context (or so documenation
1692 * for usb_set_interface() and usb_set_configuration() claim).
1693 */
1694 if (xhci_endpoint_init(xhci, virt_dev, udev, ep, GFP_NOIO) < 0) {
1695 dev_dbg(&udev->dev, "%s - could not initialize ep %#x\n",
1696 __func__, ep->desc.bEndpointAddress);
1697 return -ENOMEM;
1698 }
1699
1700 ctrl_ctx->add_flags |= cpu_to_le32(added_ctxs);
1701 new_add_flags = le32_to_cpu(ctrl_ctx->add_flags);
1702
1703 /* If xhci_endpoint_disable() was called for this endpoint, but the
1704 * xHC hasn't been notified yet through the check_bandwidth() call,
1705 * this re-adds a new state for the endpoint from the new endpoint
1706 * descriptors. We must drop and re-add this endpoint, so we leave the
1707 * drop flags alone.
1708 */
1709 new_drop_flags = le32_to_cpu(ctrl_ctx->drop_flags);
1710
1711 slot_ctx = xhci_get_slot_ctx(xhci, in_ctx);
1712 /* Update the last valid endpoint context, if we just added one past */
1713 if ((le32_to_cpu(slot_ctx->dev_info) & LAST_CTX_MASK) <
1714 LAST_CTX(last_ctx)) {
1715 slot_ctx->dev_info &= cpu_to_le32(~LAST_CTX_MASK);
1716 slot_ctx->dev_info |= cpu_to_le32(LAST_CTX(last_ctx));
1717 }
1718 new_slot_info = le32_to_cpu(slot_ctx->dev_info);
1719
1720 /* Store the usb_device pointer for later use */
1721 ep->hcpriv = udev;
1722
1723 xhci_dbg(xhci, "add ep 0x%x, slot id %d, new drop flags = %#x, new add flags = %#x, new slot info = %#x\n",
1724 (unsigned int) ep->desc.bEndpointAddress,
1725 udev->slot_id,
1726 (unsigned int) new_drop_flags,
1727 (unsigned int) new_add_flags,
1728 (unsigned int) new_slot_info);
1729 return 0;
1730}
1731
1732static void xhci_zero_in_ctx(struct xhci_hcd *xhci, struct xhci_virt_device *virt_dev)
1733{
1734 struct xhci_input_control_ctx *ctrl_ctx;
1735 struct xhci_ep_ctx *ep_ctx;
1736 struct xhci_slot_ctx *slot_ctx;
1737 int i;
1738
1739 /* When a device's add flag and drop flag are zero, any subsequent
1740 * configure endpoint command will leave that endpoint's state
1741 * untouched. Make sure we don't leave any old state in the input
1742 * endpoint contexts.
1743 */
1744 ctrl_ctx = xhci_get_input_control_ctx(xhci, virt_dev->in_ctx);
1745 ctrl_ctx->drop_flags = 0;
1746 ctrl_ctx->add_flags = 0;
1747 slot_ctx = xhci_get_slot_ctx(xhci, virt_dev->in_ctx);
1748 slot_ctx->dev_info &= cpu_to_le32(~LAST_CTX_MASK);
1749 /* Endpoint 0 is always valid */
1750 slot_ctx->dev_info |= cpu_to_le32(LAST_CTX(1));
1751 for (i = 1; i < 31; ++i) {
1752 ep_ctx = xhci_get_ep_ctx(xhci, virt_dev->in_ctx, i);
1753 ep_ctx->ep_info = 0;
1754 ep_ctx->ep_info2 = 0;
1755 ep_ctx->deq = 0;
1756 ep_ctx->tx_info = 0;
1757 }
1758}
1759
1760static int xhci_configure_endpoint_result(struct xhci_hcd *xhci,
1761 struct usb_device *udev, u32 *cmd_status)
1762{
1763 int ret;
1764
1765 switch (*cmd_status) {
1766 case COMP_ENOMEM:
1767 dev_warn(&udev->dev, "Not enough host controller resources "
1768 "for new device state.\n");
1769 ret = -ENOMEM;
1770 /* FIXME: can we allocate more resources for the HC? */
1771 break;
1772 case COMP_BW_ERR:
1773 case COMP_2ND_BW_ERR:
1774 dev_warn(&udev->dev, "Not enough bandwidth "
1775 "for new device state.\n");
1776 ret = -ENOSPC;
1777 /* FIXME: can we go back to the old state? */
1778 break;
1779 case COMP_TRB_ERR:
1780 /* the HCD set up something wrong */
1781 dev_warn(&udev->dev, "ERROR: Endpoint drop flag = 0, "
1782 "add flag = 1, "
1783 "and endpoint is not disabled.\n");
1784 ret = -EINVAL;
1785 break;
1786 case COMP_DEV_ERR:
1787 dev_warn(&udev->dev, "ERROR: Incompatible device for endpoint "
1788 "configure command.\n");
1789 ret = -ENODEV;
1790 break;
1791 case COMP_SUCCESS:
1792 dev_dbg(&udev->dev, "Successful Endpoint Configure command\n");
1793 ret = 0;
1794 break;
1795 default:
1796 xhci_err(xhci, "ERROR: unexpected command completion "
1797 "code 0x%x.\n", *cmd_status);
1798 ret = -EINVAL;
1799 break;
1800 }
1801 return ret;
1802}
1803
1804static int xhci_evaluate_context_result(struct xhci_hcd *xhci,
1805 struct usb_device *udev, u32 *cmd_status)
1806{
1807 int ret;
1808 struct xhci_virt_device *virt_dev = xhci->devs[udev->slot_id];
1809
1810 switch (*cmd_status) {
1811 case COMP_EINVAL:
1812 dev_warn(&udev->dev, "WARN: xHCI driver setup invalid evaluate "
1813 "context command.\n");
1814 ret = -EINVAL;
1815 break;
1816 case COMP_EBADSLT:
1817 dev_warn(&udev->dev, "WARN: slot not enabled for"
1818 "evaluate context command.\n");
1819 case COMP_CTX_STATE:
1820 dev_warn(&udev->dev, "WARN: invalid context state for "
1821 "evaluate context command.\n");
1822 xhci_dbg_ctx(xhci, virt_dev->out_ctx, 1);
1823 ret = -EINVAL;
1824 break;
1825 case COMP_DEV_ERR:
1826 dev_warn(&udev->dev, "ERROR: Incompatible device for evaluate "
1827 "context command.\n");
1828 ret = -ENODEV;
1829 break;
1830 case COMP_MEL_ERR:
1831 /* Max Exit Latency too large error */
1832 dev_warn(&udev->dev, "WARN: Max Exit Latency too large\n");
1833 ret = -EINVAL;
1834 break;
1835 case COMP_SUCCESS:
1836 dev_dbg(&udev->dev, "Successful evaluate context command\n");
1837 ret = 0;
1838 break;
1839 default:
1840 xhci_err(xhci, "ERROR: unexpected command completion "
1841 "code 0x%x.\n", *cmd_status);
1842 ret = -EINVAL;
1843 break;
1844 }
1845 return ret;
1846}
1847
1848static u32 xhci_count_num_new_endpoints(struct xhci_hcd *xhci,
1849 struct xhci_container_ctx *in_ctx)
1850{
1851 struct xhci_input_control_ctx *ctrl_ctx;
1852 u32 valid_add_flags;
1853 u32 valid_drop_flags;
1854
1855 ctrl_ctx = xhci_get_input_control_ctx(xhci, in_ctx);
1856 /* Ignore the slot flag (bit 0), and the default control endpoint flag
1857 * (bit 1). The default control endpoint is added during the Address
1858 * Device command and is never removed until the slot is disabled.
1859 */
1860 valid_add_flags = ctrl_ctx->add_flags >> 2;
1861 valid_drop_flags = ctrl_ctx->drop_flags >> 2;
1862
1863 /* Use hweight32 to count the number of ones in the add flags, or
1864 * number of endpoints added. Don't count endpoints that are changed
1865 * (both added and dropped).
1866 */
1867 return hweight32(valid_add_flags) -
1868 hweight32(valid_add_flags & valid_drop_flags);
1869}
1870
1871static unsigned int xhci_count_num_dropped_endpoints(struct xhci_hcd *xhci,
1872 struct xhci_container_ctx *in_ctx)
1873{
1874 struct xhci_input_control_ctx *ctrl_ctx;
1875 u32 valid_add_flags;
1876 u32 valid_drop_flags;
1877
1878 ctrl_ctx = xhci_get_input_control_ctx(xhci, in_ctx);
1879 valid_add_flags = ctrl_ctx->add_flags >> 2;
1880 valid_drop_flags = ctrl_ctx->drop_flags >> 2;
1881
1882 return hweight32(valid_drop_flags) -
1883 hweight32(valid_add_flags & valid_drop_flags);
1884}
1885
1886/*
1887 * We need to reserve the new number of endpoints before the configure endpoint
1888 * command completes. We can't subtract the dropped endpoints from the number
1889 * of active endpoints until the command completes because we can oversubscribe
1890 * the host in this case:
1891 *
1892 * - the first configure endpoint command drops more endpoints than it adds
1893 * - a second configure endpoint command that adds more endpoints is queued
1894 * - the first configure endpoint command fails, so the config is unchanged
1895 * - the second command may succeed, even though there isn't enough resources
1896 *
1897 * Must be called with xhci->lock held.
1898 */
1899static int xhci_reserve_host_resources(struct xhci_hcd *xhci,
1900 struct xhci_container_ctx *in_ctx)
1901{
1902 u32 added_eps;
1903
1904 added_eps = xhci_count_num_new_endpoints(xhci, in_ctx);
1905 if (xhci->num_active_eps + added_eps > xhci->limit_active_eps) {
1906 xhci_dbg(xhci, "Not enough ep ctxs: "
1907 "%u active, need to add %u, limit is %u.\n",
1908 xhci->num_active_eps, added_eps,
1909 xhci->limit_active_eps);
1910 return -ENOMEM;
1911 }
1912 xhci->num_active_eps += added_eps;
1913 xhci_dbg(xhci, "Adding %u ep ctxs, %u now active.\n", added_eps,
1914 xhci->num_active_eps);
1915 return 0;
1916}
1917
1918/*
1919 * The configure endpoint was failed by the xHC for some other reason, so we
1920 * need to revert the resources that failed configuration would have used.
1921 *
1922 * Must be called with xhci->lock held.
1923 */
1924static void xhci_free_host_resources(struct xhci_hcd *xhci,
1925 struct xhci_container_ctx *in_ctx)
1926{
1927 u32 num_failed_eps;
1928
1929 num_failed_eps = xhci_count_num_new_endpoints(xhci, in_ctx);
1930 xhci->num_active_eps -= num_failed_eps;
1931 xhci_dbg(xhci, "Removing %u failed ep ctxs, %u now active.\n",
1932 num_failed_eps,
1933 xhci->num_active_eps);
1934}
1935
1936/*
1937 * Now that the command has completed, clean up the active endpoint count by
1938 * subtracting out the endpoints that were dropped (but not changed).
1939 *
1940 * Must be called with xhci->lock held.
1941 */
1942static void xhci_finish_resource_reservation(struct xhci_hcd *xhci,
1943 struct xhci_container_ctx *in_ctx)
1944{
1945 u32 num_dropped_eps;
1946
1947 num_dropped_eps = xhci_count_num_dropped_endpoints(xhci, in_ctx);
1948 xhci->num_active_eps -= num_dropped_eps;
1949 if (num_dropped_eps)
1950 xhci_dbg(xhci, "Removing %u dropped ep ctxs, %u now active.\n",
1951 num_dropped_eps,
1952 xhci->num_active_eps);
1953}
1954
1955unsigned int xhci_get_block_size(struct usb_device *udev)
1956{
1957 switch (udev->speed) {
1958 case USB_SPEED_LOW:
1959 case USB_SPEED_FULL:
1960 return FS_BLOCK;
1961 case USB_SPEED_HIGH:
1962 return HS_BLOCK;
1963 case USB_SPEED_SUPER:
1964 return SS_BLOCK;
1965 case USB_SPEED_UNKNOWN:
1966 case USB_SPEED_WIRELESS:
1967 default:
1968 /* Should never happen */
1969 return 1;
1970 }
1971}
1972
1973unsigned int xhci_get_largest_overhead(struct xhci_interval_bw *interval_bw)
1974{
1975 if (interval_bw->overhead[LS_OVERHEAD_TYPE])
1976 return LS_OVERHEAD;
1977 if (interval_bw->overhead[FS_OVERHEAD_TYPE])
1978 return FS_OVERHEAD;
1979 return HS_OVERHEAD;
1980}
1981
1982/* If we are changing a LS/FS device under a HS hub,
1983 * make sure (if we are activating a new TT) that the HS bus has enough
1984 * bandwidth for this new TT.
1985 */
1986static int xhci_check_tt_bw_table(struct xhci_hcd *xhci,
1987 struct xhci_virt_device *virt_dev,
1988 int old_active_eps)
1989{
1990 struct xhci_interval_bw_table *bw_table;
1991 struct xhci_tt_bw_info *tt_info;
1992
1993 /* Find the bandwidth table for the root port this TT is attached to. */
1994 bw_table = &xhci->rh_bw[virt_dev->real_port - 1].bw_table;
1995 tt_info = virt_dev->tt_info;
1996 /* If this TT already had active endpoints, the bandwidth for this TT
1997 * has already been added. Removing all periodic endpoints (and thus
1998 * making the TT enactive) will only decrease the bandwidth used.
1999 */
2000 if (old_active_eps)
2001 return 0;
2002 if (old_active_eps == 0 && tt_info->active_eps != 0) {
2003 if (bw_table->bw_used + TT_HS_OVERHEAD > HS_BW_LIMIT)
2004 return -ENOMEM;
2005 return 0;
2006 }
2007 /* Not sure why we would have no new active endpoints...
2008 *
2009 * Maybe because of an Evaluate Context change for a hub update or a
2010 * control endpoint 0 max packet size change?
2011 * FIXME: skip the bandwidth calculation in that case.
2012 */
2013 return 0;
2014}
2015
2016static int xhci_check_ss_bw(struct xhci_hcd *xhci,
2017 struct xhci_virt_device *virt_dev)
2018{
2019 unsigned int bw_reserved;
2020
2021 bw_reserved = DIV_ROUND_UP(SS_BW_RESERVED*SS_BW_LIMIT_IN, 100);
2022 if (virt_dev->bw_table->ss_bw_in > (SS_BW_LIMIT_IN - bw_reserved))
2023 return -ENOMEM;
2024
2025 bw_reserved = DIV_ROUND_UP(SS_BW_RESERVED*SS_BW_LIMIT_OUT, 100);
2026 if (virt_dev->bw_table->ss_bw_out > (SS_BW_LIMIT_OUT - bw_reserved))
2027 return -ENOMEM;
2028
2029 return 0;
2030}
2031
2032/*
2033 * This algorithm is a very conservative estimate of the worst-case scheduling
2034 * scenario for any one interval. The hardware dynamically schedules the
2035 * packets, so we can't tell which microframe could be the limiting factor in
2036 * the bandwidth scheduling. This only takes into account periodic endpoints.
2037 *
2038 * Obviously, we can't solve an NP complete problem to find the minimum worst
2039 * case scenario. Instead, we come up with an estimate that is no less than
2040 * the worst case bandwidth used for any one microframe, but may be an
2041 * over-estimate.
2042 *
2043 * We walk the requirements for each endpoint by interval, starting with the
2044 * smallest interval, and place packets in the schedule where there is only one
2045 * possible way to schedule packets for that interval. In order to simplify
2046 * this algorithm, we record the largest max packet size for each interval, and
2047 * assume all packets will be that size.
2048 *
2049 * For interval 0, we obviously must schedule all packets for each interval.
2050 * The bandwidth for interval 0 is just the amount of data to be transmitted
2051 * (the sum of all max ESIT payload sizes, plus any overhead per packet times
2052 * the number of packets).
2053 *
2054 * For interval 1, we have two possible microframes to schedule those packets
2055 * in. For this algorithm, if we can schedule the same number of packets for
2056 * each possible scheduling opportunity (each microframe), we will do so. The
2057 * remaining number of packets will be saved to be transmitted in the gaps in
2058 * the next interval's scheduling sequence.
2059 *
2060 * As we move those remaining packets to be scheduled with interval 2 packets,
2061 * we have to double the number of remaining packets to transmit. This is
2062 * because the intervals are actually powers of 2, and we would be transmitting
2063 * the previous interval's packets twice in this interval. We also have to be
2064 * sure that when we look at the largest max packet size for this interval, we
2065 * also look at the largest max packet size for the remaining packets and take
2066 * the greater of the two.
2067 *
2068 * The algorithm continues to evenly distribute packets in each scheduling
2069 * opportunity, and push the remaining packets out, until we get to the last
2070 * interval. Then those packets and their associated overhead are just added
2071 * to the bandwidth used.
2072 */
2073static int xhci_check_bw_table(struct xhci_hcd *xhci,
2074 struct xhci_virt_device *virt_dev,
2075 int old_active_eps)
2076{
2077 unsigned int bw_reserved;
2078 unsigned int max_bandwidth;
2079 unsigned int bw_used;
2080 unsigned int block_size;
2081 struct xhci_interval_bw_table *bw_table;
2082 unsigned int packet_size = 0;
2083 unsigned int overhead = 0;
2084 unsigned int packets_transmitted = 0;
2085 unsigned int packets_remaining = 0;
2086 unsigned int i;
2087
2088 if (virt_dev->udev->speed == USB_SPEED_SUPER)
2089 return xhci_check_ss_bw(xhci, virt_dev);
2090
2091 if (virt_dev->udev->speed == USB_SPEED_HIGH) {
2092 max_bandwidth = HS_BW_LIMIT;
2093 /* Convert percent of bus BW reserved to blocks reserved */
2094 bw_reserved = DIV_ROUND_UP(HS_BW_RESERVED * max_bandwidth, 100);
2095 } else {
2096 max_bandwidth = FS_BW_LIMIT;
2097 bw_reserved = DIV_ROUND_UP(FS_BW_RESERVED * max_bandwidth, 100);
2098 }
2099
2100 bw_table = virt_dev->bw_table;
2101 /* We need to translate the max packet size and max ESIT payloads into
2102 * the units the hardware uses.
2103 */
2104 block_size = xhci_get_block_size(virt_dev->udev);
2105
2106 /* If we are manipulating a LS/FS device under a HS hub, double check
2107 * that the HS bus has enough bandwidth if we are activing a new TT.
2108 */
2109 if (virt_dev->tt_info) {
2110 xhci_dbg(xhci, "Recalculating BW for rootport %u\n",
2111 virt_dev->real_port);
2112 if (xhci_check_tt_bw_table(xhci, virt_dev, old_active_eps)) {
2113 xhci_warn(xhci, "Not enough bandwidth on HS bus for "
2114 "newly activated TT.\n");
2115 return -ENOMEM;
2116 }
2117 xhci_dbg(xhci, "Recalculating BW for TT slot %u port %u\n",
2118 virt_dev->tt_info->slot_id,
2119 virt_dev->tt_info->ttport);
2120 } else {
2121 xhci_dbg(xhci, "Recalculating BW for rootport %u\n",
2122 virt_dev->real_port);
2123 }
2124
2125 /* Add in how much bandwidth will be used for interval zero, or the
2126 * rounded max ESIT payload + number of packets * largest overhead.
2127 */
2128 bw_used = DIV_ROUND_UP(bw_table->interval0_esit_payload, block_size) +
2129 bw_table->interval_bw[0].num_packets *
2130 xhci_get_largest_overhead(&bw_table->interval_bw[0]);
2131
2132 for (i = 1; i < XHCI_MAX_INTERVAL; i++) {
2133 unsigned int bw_added;
2134 unsigned int largest_mps;
2135 unsigned int interval_overhead;
2136
2137 /*
2138 * How many packets could we transmit in this interval?
2139 * If packets didn't fit in the previous interval, we will need
2140 * to transmit that many packets twice within this interval.
2141 */
2142 packets_remaining = 2 * packets_remaining +
2143 bw_table->interval_bw[i].num_packets;
2144
2145 /* Find the largest max packet size of this or the previous
2146 * interval.
2147 */
2148 if (list_empty(&bw_table->interval_bw[i].endpoints))
2149 largest_mps = 0;
2150 else {
2151 struct xhci_virt_ep *virt_ep;
2152 struct list_head *ep_entry;
2153
2154 ep_entry = bw_table->interval_bw[i].endpoints.next;
2155 virt_ep = list_entry(ep_entry,
2156 struct xhci_virt_ep, bw_endpoint_list);
2157 /* Convert to blocks, rounding up */
2158 largest_mps = DIV_ROUND_UP(
2159 virt_ep->bw_info.max_packet_size,
2160 block_size);
2161 }
2162 if (largest_mps > packet_size)
2163 packet_size = largest_mps;
2164
2165 /* Use the larger overhead of this or the previous interval. */
2166 interval_overhead = xhci_get_largest_overhead(
2167 &bw_table->interval_bw[i]);
2168 if (interval_overhead > overhead)
2169 overhead = interval_overhead;
2170
2171 /* How many packets can we evenly distribute across
2172 * (1 << (i + 1)) possible scheduling opportunities?
2173 */
2174 packets_transmitted = packets_remaining >> (i + 1);
2175
2176 /* Add in the bandwidth used for those scheduled packets */
2177 bw_added = packets_transmitted * (overhead + packet_size);
2178
2179 /* How many packets do we have remaining to transmit? */
2180 packets_remaining = packets_remaining % (1 << (i + 1));
2181
2182 /* What largest max packet size should those packets have? */
2183 /* If we've transmitted all packets, don't carry over the
2184 * largest packet size.
2185 */
2186 if (packets_remaining == 0) {
2187 packet_size = 0;
2188 overhead = 0;
2189 } else if (packets_transmitted > 0) {
2190 /* Otherwise if we do have remaining packets, and we've
2191 * scheduled some packets in this interval, take the
2192 * largest max packet size from endpoints with this
2193 * interval.
2194 */
2195 packet_size = largest_mps;
2196 overhead = interval_overhead;
2197 }
2198 /* Otherwise carry over packet_size and overhead from the last
2199 * time we had a remainder.
2200 */
2201 bw_used += bw_added;
2202 if (bw_used > max_bandwidth) {
2203 xhci_warn(xhci, "Not enough bandwidth. "
2204 "Proposed: %u, Max: %u\n",
2205 bw_used, max_bandwidth);
2206 return -ENOMEM;
2207 }
2208 }
2209 /*
2210 * Ok, we know we have some packets left over after even-handedly
2211 * scheduling interval 15. We don't know which microframes they will
2212 * fit into, so we over-schedule and say they will be scheduled every
2213 * microframe.
2214 */
2215 if (packets_remaining > 0)
2216 bw_used += overhead + packet_size;
2217
2218 if (!virt_dev->tt_info && virt_dev->udev->speed == USB_SPEED_HIGH) {
2219 unsigned int port_index = virt_dev->real_port - 1;
2220
2221 /* OK, we're manipulating a HS device attached to a
2222 * root port bandwidth domain. Include the number of active TTs
2223 * in the bandwidth used.
2224 */
2225 bw_used += TT_HS_OVERHEAD *
2226 xhci->rh_bw[port_index].num_active_tts;
2227 }
2228
2229 xhci_dbg(xhci, "Final bandwidth: %u, Limit: %u, Reserved: %u, "
2230 "Available: %u " "percent\n",
2231 bw_used, max_bandwidth, bw_reserved,
2232 (max_bandwidth - bw_used - bw_reserved) * 100 /
2233 max_bandwidth);
2234
2235 bw_used += bw_reserved;
2236 if (bw_used > max_bandwidth) {
2237 xhci_warn(xhci, "Not enough bandwidth. Proposed: %u, Max: %u\n",
2238 bw_used, max_bandwidth);
2239 return -ENOMEM;
2240 }
2241
2242 bw_table->bw_used = bw_used;
2243 return 0;
2244}
2245
2246static bool xhci_is_async_ep(unsigned int ep_type)
2247{
2248 return (ep_type != ISOC_OUT_EP && ep_type != INT_OUT_EP &&
2249 ep_type != ISOC_IN_EP &&
2250 ep_type != INT_IN_EP);
2251}
2252
2253static bool xhci_is_sync_in_ep(unsigned int ep_type)
2254{
2255 return (ep_type == ISOC_IN_EP || ep_type != INT_IN_EP);
2256}
2257
2258static unsigned int xhci_get_ss_bw_consumed(struct xhci_bw_info *ep_bw)
2259{
2260 unsigned int mps = DIV_ROUND_UP(ep_bw->max_packet_size, SS_BLOCK);
2261
2262 if (ep_bw->ep_interval == 0)
2263 return SS_OVERHEAD_BURST +
2264 (ep_bw->mult * ep_bw->num_packets *
2265 (SS_OVERHEAD + mps));
2266 return DIV_ROUND_UP(ep_bw->mult * ep_bw->num_packets *
2267 (SS_OVERHEAD + mps + SS_OVERHEAD_BURST),
2268 1 << ep_bw->ep_interval);
2269
2270}
2271
2272void xhci_drop_ep_from_interval_table(struct xhci_hcd *xhci,
2273 struct xhci_bw_info *ep_bw,
2274 struct xhci_interval_bw_table *bw_table,
2275 struct usb_device *udev,
2276 struct xhci_virt_ep *virt_ep,
2277 struct xhci_tt_bw_info *tt_info)
2278{
2279 struct xhci_interval_bw *interval_bw;
2280 int normalized_interval;
2281
2282 if (xhci_is_async_ep(ep_bw->type))
2283 return;
2284
2285 if (udev->speed == USB_SPEED_SUPER) {
2286 if (xhci_is_sync_in_ep(ep_bw->type))
2287 xhci->devs[udev->slot_id]->bw_table->ss_bw_in -=
2288 xhci_get_ss_bw_consumed(ep_bw);
2289 else
2290 xhci->devs[udev->slot_id]->bw_table->ss_bw_out -=
2291 xhci_get_ss_bw_consumed(ep_bw);
2292 return;
2293 }
2294
2295 /* SuperSpeed endpoints never get added to intervals in the table, so
2296 * this check is only valid for HS/FS/LS devices.
2297 */
2298 if (list_empty(&virt_ep->bw_endpoint_list))
2299 return;
2300 /* For LS/FS devices, we need to translate the interval expressed in
2301 * microframes to frames.
2302 */
2303 if (udev->speed == USB_SPEED_HIGH)
2304 normalized_interval = ep_bw->ep_interval;
2305 else
2306 normalized_interval = ep_bw->ep_interval - 3;
2307
2308 if (normalized_interval == 0)
2309 bw_table->interval0_esit_payload -= ep_bw->max_esit_payload;
2310 interval_bw = &bw_table->interval_bw[normalized_interval];
2311 interval_bw->num_packets -= ep_bw->num_packets;
2312 switch (udev->speed) {
2313 case USB_SPEED_LOW:
2314 interval_bw->overhead[LS_OVERHEAD_TYPE] -= 1;
2315 break;
2316 case USB_SPEED_FULL:
2317 interval_bw->overhead[FS_OVERHEAD_TYPE] -= 1;
2318 break;
2319 case USB_SPEED_HIGH:
2320 interval_bw->overhead[HS_OVERHEAD_TYPE] -= 1;
2321 break;
2322 case USB_SPEED_SUPER:
2323 case USB_SPEED_UNKNOWN:
2324 case USB_SPEED_WIRELESS:
2325 /* Should never happen because only LS/FS/HS endpoints will get
2326 * added to the endpoint list.
2327 */
2328 return;
2329 }
2330 if (tt_info)
2331 tt_info->active_eps -= 1;
2332 list_del_init(&virt_ep->bw_endpoint_list);
2333}
2334
2335static void xhci_add_ep_to_interval_table(struct xhci_hcd *xhci,
2336 struct xhci_bw_info *ep_bw,
2337 struct xhci_interval_bw_table *bw_table,
2338 struct usb_device *udev,
2339 struct xhci_virt_ep *virt_ep,
2340 struct xhci_tt_bw_info *tt_info)
2341{
2342 struct xhci_interval_bw *interval_bw;
2343 struct xhci_virt_ep *smaller_ep;
2344 int normalized_interval;
2345
2346 if (xhci_is_async_ep(ep_bw->type))
2347 return;
2348
2349 if (udev->speed == USB_SPEED_SUPER) {
2350 if (xhci_is_sync_in_ep(ep_bw->type))
2351 xhci->devs[udev->slot_id]->bw_table->ss_bw_in +=
2352 xhci_get_ss_bw_consumed(ep_bw);
2353 else
2354 xhci->devs[udev->slot_id]->bw_table->ss_bw_out +=
2355 xhci_get_ss_bw_consumed(ep_bw);
2356 return;
2357 }
2358
2359 /* For LS/FS devices, we need to translate the interval expressed in
2360 * microframes to frames.
2361 */
2362 if (udev->speed == USB_SPEED_HIGH)
2363 normalized_interval = ep_bw->ep_interval;
2364 else
2365 normalized_interval = ep_bw->ep_interval - 3;
2366
2367 if (normalized_interval == 0)
2368 bw_table->interval0_esit_payload += ep_bw->max_esit_payload;
2369 interval_bw = &bw_table->interval_bw[normalized_interval];
2370 interval_bw->num_packets += ep_bw->num_packets;
2371 switch (udev->speed) {
2372 case USB_SPEED_LOW:
2373 interval_bw->overhead[LS_OVERHEAD_TYPE] += 1;
2374 break;
2375 case USB_SPEED_FULL:
2376 interval_bw->overhead[FS_OVERHEAD_TYPE] += 1;
2377 break;
2378 case USB_SPEED_HIGH:
2379 interval_bw->overhead[HS_OVERHEAD_TYPE] += 1;
2380 break;
2381 case USB_SPEED_SUPER:
2382 case USB_SPEED_UNKNOWN:
2383 case USB_SPEED_WIRELESS:
2384 /* Should never happen because only LS/FS/HS endpoints will get
2385 * added to the endpoint list.
2386 */
2387 return;
2388 }
2389
2390 if (tt_info)
2391 tt_info->active_eps += 1;
2392 /* Insert the endpoint into the list, largest max packet size first. */
2393 list_for_each_entry(smaller_ep, &interval_bw->endpoints,
2394 bw_endpoint_list) {
2395 if (ep_bw->max_packet_size >=
2396 smaller_ep->bw_info.max_packet_size) {
2397 /* Add the new ep before the smaller endpoint */
2398 list_add_tail(&virt_ep->bw_endpoint_list,
2399 &smaller_ep->bw_endpoint_list);
2400 return;
2401 }
2402 }
2403 /* Add the new endpoint at the end of the list. */
2404 list_add_tail(&virt_ep->bw_endpoint_list,
2405 &interval_bw->endpoints);
2406}
2407
2408void xhci_update_tt_active_eps(struct xhci_hcd *xhci,
2409 struct xhci_virt_device *virt_dev,
2410 int old_active_eps)
2411{
2412 struct xhci_root_port_bw_info *rh_bw_info;
2413 if (!virt_dev->tt_info)
2414 return;
2415
2416 rh_bw_info = &xhci->rh_bw[virt_dev->real_port - 1];
2417 if (old_active_eps == 0 &&
2418 virt_dev->tt_info->active_eps != 0) {
2419 rh_bw_info->num_active_tts += 1;
2420 rh_bw_info->bw_table.bw_used += TT_HS_OVERHEAD;
2421 } else if (old_active_eps != 0 &&
2422 virt_dev->tt_info->active_eps == 0) {
2423 rh_bw_info->num_active_tts -= 1;
2424 rh_bw_info->bw_table.bw_used -= TT_HS_OVERHEAD;
2425 }
2426}
2427
2428static int xhci_reserve_bandwidth(struct xhci_hcd *xhci,
2429 struct xhci_virt_device *virt_dev,
2430 struct xhci_container_ctx *in_ctx)
2431{
2432 struct xhci_bw_info ep_bw_info[31];
2433 int i;
2434 struct xhci_input_control_ctx *ctrl_ctx;
2435 int old_active_eps = 0;
2436
2437 if (virt_dev->tt_info)
2438 old_active_eps = virt_dev->tt_info->active_eps;
2439
2440 ctrl_ctx = xhci_get_input_control_ctx(xhci, in_ctx);
2441
2442 for (i = 0; i < 31; i++) {
2443 if (!EP_IS_ADDED(ctrl_ctx, i) && !EP_IS_DROPPED(ctrl_ctx, i))
2444 continue;
2445
2446 /* Make a copy of the BW info in case we need to revert this */
2447 memcpy(&ep_bw_info[i], &virt_dev->eps[i].bw_info,
2448 sizeof(ep_bw_info[i]));
2449 /* Drop the endpoint from the interval table if the endpoint is
2450 * being dropped or changed.
2451 */
2452 if (EP_IS_DROPPED(ctrl_ctx, i))
2453 xhci_drop_ep_from_interval_table(xhci,
2454 &virt_dev->eps[i].bw_info,
2455 virt_dev->bw_table,
2456 virt_dev->udev,
2457 &virt_dev->eps[i],
2458 virt_dev->tt_info);
2459 }
2460 /* Overwrite the information stored in the endpoints' bw_info */
2461 xhci_update_bw_info(xhci, virt_dev->in_ctx, ctrl_ctx, virt_dev);
2462 for (i = 0; i < 31; i++) {
2463 /* Add any changed or added endpoints to the interval table */
2464 if (EP_IS_ADDED(ctrl_ctx, i))
2465 xhci_add_ep_to_interval_table(xhci,
2466 &virt_dev->eps[i].bw_info,
2467 virt_dev->bw_table,
2468 virt_dev->udev,
2469 &virt_dev->eps[i],
2470 virt_dev->tt_info);
2471 }
2472
2473 if (!xhci_check_bw_table(xhci, virt_dev, old_active_eps)) {
2474 /* Ok, this fits in the bandwidth we have.
2475 * Update the number of active TTs.
2476 */
2477 xhci_update_tt_active_eps(xhci, virt_dev, old_active_eps);
2478 return 0;
2479 }
2480
2481 /* We don't have enough bandwidth for this, revert the stored info. */
2482 for (i = 0; i < 31; i++) {
2483 if (!EP_IS_ADDED(ctrl_ctx, i) && !EP_IS_DROPPED(ctrl_ctx, i))
2484 continue;
2485
2486 /* Drop the new copies of any added or changed endpoints from
2487 * the interval table.
2488 */
2489 if (EP_IS_ADDED(ctrl_ctx, i)) {
2490 xhci_drop_ep_from_interval_table(xhci,
2491 &virt_dev->eps[i].bw_info,
2492 virt_dev->bw_table,
2493 virt_dev->udev,
2494 &virt_dev->eps[i],
2495 virt_dev->tt_info);
2496 }
2497 /* Revert the endpoint back to its old information */
2498 memcpy(&virt_dev->eps[i].bw_info, &ep_bw_info[i],
2499 sizeof(ep_bw_info[i]));
2500 /* Add any changed or dropped endpoints back into the table */
2501 if (EP_IS_DROPPED(ctrl_ctx, i))
2502 xhci_add_ep_to_interval_table(xhci,
2503 &virt_dev->eps[i].bw_info,
2504 virt_dev->bw_table,
2505 virt_dev->udev,
2506 &virt_dev->eps[i],
2507 virt_dev->tt_info);
2508 }
2509 return -ENOMEM;
2510}
2511
2512
2513/* Issue a configure endpoint command or evaluate context command
2514 * and wait for it to finish.
2515 */
2516static int xhci_configure_endpoint(struct xhci_hcd *xhci,
2517 struct usb_device *udev,
2518 struct xhci_command *command,
2519 bool ctx_change, bool must_succeed)
2520{
2521 int ret;
2522 int timeleft;
2523 unsigned long flags;
2524 struct xhci_container_ctx *in_ctx;
2525 struct completion *cmd_completion;
2526 u32 *cmd_status;
2527 struct xhci_virt_device *virt_dev;
2528 union xhci_trb *cmd_trb;
2529
2530 spin_lock_irqsave(&xhci->lock, flags);
2531 virt_dev = xhci->devs[udev->slot_id];
2532
2533 if (command)
2534 in_ctx = command->in_ctx;
2535 else
2536 in_ctx = virt_dev->in_ctx;
2537
2538 if ((xhci->quirks & XHCI_EP_LIMIT_QUIRK) &&
2539 xhci_reserve_host_resources(xhci, in_ctx)) {
2540 spin_unlock_irqrestore(&xhci->lock, flags);
2541 xhci_warn(xhci, "Not enough host resources, "
2542 "active endpoint contexts = %u\n",
2543 xhci->num_active_eps);
2544 return -ENOMEM;
2545 }
2546 if ((xhci->quirks & XHCI_SW_BW_CHECKING) &&
2547 xhci_reserve_bandwidth(xhci, virt_dev, in_ctx)) {
2548 if ((xhci->quirks & XHCI_EP_LIMIT_QUIRK))
2549 xhci_free_host_resources(xhci, in_ctx);
2550 spin_unlock_irqrestore(&xhci->lock, flags);
2551 xhci_warn(xhci, "Not enough bandwidth\n");
2552 return -ENOMEM;
2553 }
2554
2555 if (command) {
2556 cmd_completion = command->completion;
2557 cmd_status = &command->status;
2558 command->command_trb = xhci->cmd_ring->enqueue;
2559
2560 /* Enqueue pointer can be left pointing to the link TRB,
2561 * we must handle that
2562 */
2563 if (TRB_TYPE_LINK_LE32(command->command_trb->link.control))
2564 command->command_trb =
2565 xhci->cmd_ring->enq_seg->next->trbs;
2566
2567 list_add_tail(&command->cmd_list, &virt_dev->cmd_list);
2568 } else {
2569 cmd_completion = &virt_dev->cmd_completion;
2570 cmd_status = &virt_dev->cmd_status;
2571 }
2572 init_completion(cmd_completion);
2573
2574 cmd_trb = xhci->cmd_ring->dequeue;
2575 if (!ctx_change)
2576 ret = xhci_queue_configure_endpoint(xhci, in_ctx->dma,
2577 udev->slot_id, must_succeed);
2578 else
2579 ret = xhci_queue_evaluate_context(xhci, in_ctx->dma,
2580 udev->slot_id, must_succeed);
2581 if (ret < 0) {
2582 if (command)
2583 list_del(&command->cmd_list);
2584 if ((xhci->quirks & XHCI_EP_LIMIT_QUIRK))
2585 xhci_free_host_resources(xhci, in_ctx);
2586 spin_unlock_irqrestore(&xhci->lock, flags);
2587 xhci_dbg(xhci, "FIXME allocate a new ring segment\n");
2588 return -ENOMEM;
2589 }
2590 xhci_ring_cmd_db(xhci);
2591 spin_unlock_irqrestore(&xhci->lock, flags);
2592
2593 /* Wait for the configure endpoint command to complete */
2594 timeleft = wait_for_completion_interruptible_timeout(
2595 cmd_completion,
2596 XHCI_CMD_DEFAULT_TIMEOUT);
2597 if (timeleft <= 0) {
2598 xhci_warn(xhci, "%s while waiting for %s command\n",
2599 timeleft == 0 ? "Timeout" : "Signal",
2600 ctx_change == 0 ?
2601 "configure endpoint" :
2602 "evaluate context");
2603 /* cancel the configure endpoint command */
2604 ret = xhci_cancel_cmd(xhci, command, cmd_trb);
2605 if (ret < 0)
2606 return ret;
2607 return -ETIME;
2608 }
2609
2610 if (!ctx_change)
2611 ret = xhci_configure_endpoint_result(xhci, udev, cmd_status);
2612 else
2613 ret = xhci_evaluate_context_result(xhci, udev, cmd_status);
2614
2615 if ((xhci->quirks & XHCI_EP_LIMIT_QUIRK)) {
2616 spin_lock_irqsave(&xhci->lock, flags);
2617 /* If the command failed, remove the reserved resources.
2618 * Otherwise, clean up the estimate to include dropped eps.
2619 */
2620 if (ret)
2621 xhci_free_host_resources(xhci, in_ctx);
2622 else
2623 xhci_finish_resource_reservation(xhci, in_ctx);
2624 spin_unlock_irqrestore(&xhci->lock, flags);
2625 }
2626 return ret;
2627}
2628
2629/* Called after one or more calls to xhci_add_endpoint() or
2630 * xhci_drop_endpoint(). If this call fails, the USB core is expected
2631 * to call xhci_reset_bandwidth().
2632 *
2633 * Since we are in the middle of changing either configuration or
2634 * installing a new alt setting, the USB core won't allow URBs to be
2635 * enqueued for any endpoint on the old config or interface. Nothing
2636 * else should be touching the xhci->devs[slot_id] structure, so we
2637 * don't need to take the xhci->lock for manipulating that.
2638 */
2639int xhci_check_bandwidth(struct usb_hcd *hcd, struct usb_device *udev)
2640{
2641 int i;
2642 int ret = 0;
2643 struct xhci_hcd *xhci;
2644 struct xhci_virt_device *virt_dev;
2645 struct xhci_input_control_ctx *ctrl_ctx;
2646 struct xhci_slot_ctx *slot_ctx;
2647
2648 ret = xhci_check_args(hcd, udev, NULL, 0, true, __func__);
2649 if (ret <= 0)
2650 return ret;
2651 xhci = hcd_to_xhci(hcd);
2652 if (xhci->xhc_state & XHCI_STATE_DYING)
2653 return -ENODEV;
2654
2655 xhci_dbg(xhci, "%s called for udev %p\n", __func__, udev);
2656 virt_dev = xhci->devs[udev->slot_id];
2657
2658 /* See section 4.6.6 - A0 = 1; A1 = D0 = D1 = 0 */
2659 ctrl_ctx = xhci_get_input_control_ctx(xhci, virt_dev->in_ctx);
2660 ctrl_ctx->add_flags |= cpu_to_le32(SLOT_FLAG);
2661 ctrl_ctx->add_flags &= cpu_to_le32(~EP0_FLAG);
2662 ctrl_ctx->drop_flags &= cpu_to_le32(~(SLOT_FLAG | EP0_FLAG));
2663
2664 /* Don't issue the command if there's no endpoints to update. */
2665 if (ctrl_ctx->add_flags == cpu_to_le32(SLOT_FLAG) &&
2666 ctrl_ctx->drop_flags == 0)
2667 return 0;
2668
2669 xhci_dbg(xhci, "New Input Control Context:\n");
2670 slot_ctx = xhci_get_slot_ctx(xhci, virt_dev->in_ctx);
2671 xhci_dbg_ctx(xhci, virt_dev->in_ctx,
2672 LAST_CTX_TO_EP_NUM(le32_to_cpu(slot_ctx->dev_info)));
2673
2674 ret = xhci_configure_endpoint(xhci, udev, NULL,
2675 false, false);
2676 if (ret) {
2677 /* Callee should call reset_bandwidth() */
2678 return ret;
2679 }
2680
2681 xhci_dbg(xhci, "Output context after successful config ep cmd:\n");
2682 xhci_dbg_ctx(xhci, virt_dev->out_ctx,
2683 LAST_CTX_TO_EP_NUM(le32_to_cpu(slot_ctx->dev_info)));
2684
2685 /* Free any rings that were dropped, but not changed. */
2686 for (i = 1; i < 31; ++i) {
2687 if ((le32_to_cpu(ctrl_ctx->drop_flags) & (1 << (i + 1))) &&
2688 !(le32_to_cpu(ctrl_ctx->add_flags) & (1 << (i + 1))))
2689 xhci_free_or_cache_endpoint_ring(xhci, virt_dev, i);
2690 }
2691 xhci_zero_in_ctx(xhci, virt_dev);
2692 /*
2693 * Install any rings for completely new endpoints or changed endpoints,
2694 * and free or cache any old rings from changed endpoints.
2695 */
2696 for (i = 1; i < 31; ++i) {
2697 if (!virt_dev->eps[i].new_ring)
2698 continue;
2699 /* Only cache or free the old ring if it exists.
2700 * It may not if this is the first add of an endpoint.
2701 */
2702 if (virt_dev->eps[i].ring) {
2703 xhci_free_or_cache_endpoint_ring(xhci, virt_dev, i);
2704 }
2705 virt_dev->eps[i].ring = virt_dev->eps[i].new_ring;
2706 virt_dev->eps[i].new_ring = NULL;
2707 }
2708
2709 return ret;
2710}
2711
2712void xhci_reset_bandwidth(struct usb_hcd *hcd, struct usb_device *udev)
2713{
2714 struct xhci_hcd *xhci;
2715 struct xhci_virt_device *virt_dev;
2716 int i, ret;
2717
2718 ret = xhci_check_args(hcd, udev, NULL, 0, true, __func__);
2719 if (ret <= 0)
2720 return;
2721 xhci = hcd_to_xhci(hcd);
2722
2723 xhci_dbg(xhci, "%s called for udev %p\n", __func__, udev);
2724 virt_dev = xhci->devs[udev->slot_id];
2725 /* Free any rings allocated for added endpoints */
2726 for (i = 0; i < 31; ++i) {
2727 if (virt_dev->eps[i].new_ring) {
2728 xhci_ring_free(xhci, virt_dev->eps[i].new_ring);
2729 virt_dev->eps[i].new_ring = NULL;
2730 }
2731 }
2732 xhci_zero_in_ctx(xhci, virt_dev);
2733}
2734
2735static void xhci_setup_input_ctx_for_config_ep(struct xhci_hcd *xhci,
2736 struct xhci_container_ctx *in_ctx,
2737 struct xhci_container_ctx *out_ctx,
2738 u32 add_flags, u32 drop_flags)
2739{
2740 struct xhci_input_control_ctx *ctrl_ctx;
2741 ctrl_ctx = xhci_get_input_control_ctx(xhci, in_ctx);
2742 ctrl_ctx->add_flags = cpu_to_le32(add_flags);
2743 ctrl_ctx->drop_flags = cpu_to_le32(drop_flags);
2744 xhci_slot_copy(xhci, in_ctx, out_ctx);
2745 ctrl_ctx->add_flags |= cpu_to_le32(SLOT_FLAG);
2746
2747 xhci_dbg(xhci, "Input Context:\n");
2748 xhci_dbg_ctx(xhci, in_ctx, xhci_last_valid_endpoint(add_flags));
2749}
2750
2751static void xhci_setup_input_ctx_for_quirk(struct xhci_hcd *xhci,
2752 unsigned int slot_id, unsigned int ep_index,
2753 struct xhci_dequeue_state *deq_state)
2754{
2755 struct xhci_container_ctx *in_ctx;
2756 struct xhci_ep_ctx *ep_ctx;
2757 u32 added_ctxs;
2758 dma_addr_t addr;
2759
2760 xhci_endpoint_copy(xhci, xhci->devs[slot_id]->in_ctx,
2761 xhci->devs[slot_id]->out_ctx, ep_index);
2762 in_ctx = xhci->devs[slot_id]->in_ctx;
2763 ep_ctx = xhci_get_ep_ctx(xhci, in_ctx, ep_index);
2764 addr = xhci_trb_virt_to_dma(deq_state->new_deq_seg,
2765 deq_state->new_deq_ptr);
2766 if (addr == 0) {
2767 xhci_warn(xhci, "WARN Cannot submit config ep after "
2768 "reset ep command\n");
2769 xhci_warn(xhci, "WARN deq seg = %p, deq ptr = %p\n",
2770 deq_state->new_deq_seg,
2771 deq_state->new_deq_ptr);
2772 return;
2773 }
2774 ep_ctx->deq = cpu_to_le64(addr | deq_state->new_cycle_state);
2775
2776 added_ctxs = xhci_get_endpoint_flag_from_index(ep_index);
2777 xhci_setup_input_ctx_for_config_ep(xhci, xhci->devs[slot_id]->in_ctx,
2778 xhci->devs[slot_id]->out_ctx, added_ctxs, added_ctxs);
2779}
2780
2781void xhci_cleanup_stalled_ring(struct xhci_hcd *xhci,
2782 struct usb_device *udev, unsigned int ep_index)
2783{
2784 struct xhci_dequeue_state deq_state;
2785 struct xhci_virt_ep *ep;
2786
2787 xhci_dbg(xhci, "Cleaning up stalled endpoint ring\n");
2788 ep = &xhci->devs[udev->slot_id]->eps[ep_index];
2789 /* We need to move the HW's dequeue pointer past this TD,
2790 * or it will attempt to resend it on the next doorbell ring.
2791 */
2792 xhci_find_new_dequeue_state(xhci, udev->slot_id,
2793 ep_index, ep->stopped_stream, ep->stopped_td,
2794 &deq_state);
2795
2796 /* HW with the reset endpoint quirk will use the saved dequeue state to
2797 * issue a configure endpoint command later.
2798 */
2799 if (!(xhci->quirks & XHCI_RESET_EP_QUIRK)) {
2800 xhci_dbg(xhci, "Queueing new dequeue state\n");
2801 xhci_queue_new_dequeue_state(xhci, udev->slot_id,
2802 ep_index, ep->stopped_stream, &deq_state);
2803 } else {
2804 /* Better hope no one uses the input context between now and the
2805 * reset endpoint completion!
2806 * XXX: No idea how this hardware will react when stream rings
2807 * are enabled.
2808 */
2809 xhci_dbg(xhci, "Setting up input context for "
2810 "configure endpoint command\n");
2811 xhci_setup_input_ctx_for_quirk(xhci, udev->slot_id,
2812 ep_index, &deq_state);
2813 }
2814}
2815
2816/* Deal with stalled endpoints. The core should have sent the control message
2817 * to clear the halt condition. However, we need to make the xHCI hardware
2818 * reset its sequence number, since a device will expect a sequence number of
2819 * zero after the halt condition is cleared.
2820 * Context: in_interrupt
2821 */
2822void xhci_endpoint_reset(struct usb_hcd *hcd,
2823 struct usb_host_endpoint *ep)
2824{
2825 struct xhci_hcd *xhci;
2826 struct usb_device *udev;
2827 unsigned int ep_index;
2828 unsigned long flags;
2829 int ret;
2830 struct xhci_virt_ep *virt_ep;
2831
2832 xhci = hcd_to_xhci(hcd);
2833 udev = (struct usb_device *) ep->hcpriv;
2834 /* Called with a root hub endpoint (or an endpoint that wasn't added
2835 * with xhci_add_endpoint()
2836 */
2837 if (!ep->hcpriv)
2838 return;
2839 ep_index = xhci_get_endpoint_index(&ep->desc);
2840 virt_ep = &xhci->devs[udev->slot_id]->eps[ep_index];
2841 if (!virt_ep->stopped_td) {
2842 xhci_dbg(xhci, "Endpoint 0x%x not halted, refusing to reset.\n",
2843 ep->desc.bEndpointAddress);
2844 return;
2845 }
2846 if (usb_endpoint_xfer_control(&ep->desc)) {
2847 xhci_dbg(xhci, "Control endpoint stall already handled.\n");
2848 return;
2849 }
2850
2851 xhci_dbg(xhci, "Queueing reset endpoint command\n");
2852 spin_lock_irqsave(&xhci->lock, flags);
2853 ret = xhci_queue_reset_ep(xhci, udev->slot_id, ep_index);
2854 /*
2855 * Can't change the ring dequeue pointer until it's transitioned to the
2856 * stopped state, which is only upon a successful reset endpoint
2857 * command. Better hope that last command worked!
2858 */
2859 if (!ret) {
2860 xhci_cleanup_stalled_ring(xhci, udev, ep_index);
2861 kfree(virt_ep->stopped_td);
2862 xhci_ring_cmd_db(xhci);
2863 }
2864 virt_ep->stopped_td = NULL;
2865 virt_ep->stopped_trb = NULL;
2866 virt_ep->stopped_stream = 0;
2867 spin_unlock_irqrestore(&xhci->lock, flags);
2868
2869 if (ret)
2870 xhci_warn(xhci, "FIXME allocate a new ring segment\n");
2871}
2872
2873static int xhci_check_streams_endpoint(struct xhci_hcd *xhci,
2874 struct usb_device *udev, struct usb_host_endpoint *ep,
2875 unsigned int slot_id)
2876{
2877 int ret;
2878 unsigned int ep_index;
2879 unsigned int ep_state;
2880
2881 if (!ep)
2882 return -EINVAL;
2883 ret = xhci_check_args(xhci_to_hcd(xhci), udev, ep, 1, true, __func__);
2884 if (ret <= 0)
2885 return -EINVAL;
2886 if (ep->ss_ep_comp.bmAttributes == 0) {
2887 xhci_warn(xhci, "WARN: SuperSpeed Endpoint Companion"
2888 " descriptor for ep 0x%x does not support streams\n",
2889 ep->desc.bEndpointAddress);
2890 return -EINVAL;
2891 }
2892
2893 ep_index = xhci_get_endpoint_index(&ep->desc);
2894 ep_state = xhci->devs[slot_id]->eps[ep_index].ep_state;
2895 if (ep_state & EP_HAS_STREAMS ||
2896 ep_state & EP_GETTING_STREAMS) {
2897 xhci_warn(xhci, "WARN: SuperSpeed bulk endpoint 0x%x "
2898 "already has streams set up.\n",
2899 ep->desc.bEndpointAddress);
2900 xhci_warn(xhci, "Send email to xHCI maintainer and ask for "
2901 "dynamic stream context array reallocation.\n");
2902 return -EINVAL;
2903 }
2904 if (!list_empty(&xhci->devs[slot_id]->eps[ep_index].ring->td_list)) {
2905 xhci_warn(xhci, "Cannot setup streams for SuperSpeed bulk "
2906 "endpoint 0x%x; URBs are pending.\n",
2907 ep->desc.bEndpointAddress);
2908 return -EINVAL;
2909 }
2910 return 0;
2911}
2912
2913static void xhci_calculate_streams_entries(struct xhci_hcd *xhci,
2914 unsigned int *num_streams, unsigned int *num_stream_ctxs)
2915{
2916 unsigned int max_streams;
2917
2918 /* The stream context array size must be a power of two */
2919 *num_stream_ctxs = roundup_pow_of_two(*num_streams);
2920 /*
2921 * Find out how many primary stream array entries the host controller
2922 * supports. Later we may use secondary stream arrays (similar to 2nd
2923 * level page entries), but that's an optional feature for xHCI host
2924 * controllers. xHCs must support at least 4 stream IDs.
2925 */
2926 max_streams = HCC_MAX_PSA(xhci->hcc_params);
2927 if (*num_stream_ctxs > max_streams) {
2928 xhci_dbg(xhci, "xHCI HW only supports %u stream ctx entries.\n",
2929 max_streams);
2930 *num_stream_ctxs = max_streams;
2931 *num_streams = max_streams;
2932 }
2933}
2934
2935/* Returns an error code if one of the endpoint already has streams.
2936 * This does not change any data structures, it only checks and gathers
2937 * information.
2938 */
2939static int xhci_calculate_streams_and_bitmask(struct xhci_hcd *xhci,
2940 struct usb_device *udev,
2941 struct usb_host_endpoint **eps, unsigned int num_eps,
2942 unsigned int *num_streams, u32 *changed_ep_bitmask)
2943{
2944 unsigned int max_streams;
2945 unsigned int endpoint_flag;
2946 int i;
2947 int ret;
2948
2949 for (i = 0; i < num_eps; i++) {
2950 ret = xhci_check_streams_endpoint(xhci, udev,
2951 eps[i], udev->slot_id);
2952 if (ret < 0)
2953 return ret;
2954
2955 max_streams = usb_ss_max_streams(&eps[i]->ss_ep_comp);
2956 if (max_streams < (*num_streams - 1)) {
2957 xhci_dbg(xhci, "Ep 0x%x only supports %u stream IDs.\n",
2958 eps[i]->desc.bEndpointAddress,
2959 max_streams);
2960 *num_streams = max_streams+1;
2961 }
2962
2963 endpoint_flag = xhci_get_endpoint_flag(&eps[i]->desc);
2964 if (*changed_ep_bitmask & endpoint_flag)
2965 return -EINVAL;
2966 *changed_ep_bitmask |= endpoint_flag;
2967 }
2968 return 0;
2969}
2970
2971static u32 xhci_calculate_no_streams_bitmask(struct xhci_hcd *xhci,
2972 struct usb_device *udev,
2973 struct usb_host_endpoint **eps, unsigned int num_eps)
2974{
2975 u32 changed_ep_bitmask = 0;
2976 unsigned int slot_id;
2977 unsigned int ep_index;
2978 unsigned int ep_state;
2979 int i;
2980
2981 slot_id = udev->slot_id;
2982 if (!xhci->devs[slot_id])
2983 return 0;
2984
2985 for (i = 0; i < num_eps; i++) {
2986 ep_index = xhci_get_endpoint_index(&eps[i]->desc);
2987 ep_state = xhci->devs[slot_id]->eps[ep_index].ep_state;
2988 /* Are streams already being freed for the endpoint? */
2989 if (ep_state & EP_GETTING_NO_STREAMS) {
2990 xhci_warn(xhci, "WARN Can't disable streams for "
2991 "endpoint 0x%x\n, "
2992 "streams are being disabled already.",
2993 eps[i]->desc.bEndpointAddress);
2994 return 0;
2995 }
2996 /* Are there actually any streams to free? */
2997 if (!(ep_state & EP_HAS_STREAMS) &&
2998 !(ep_state & EP_GETTING_STREAMS)) {
2999 xhci_warn(xhci, "WARN Can't disable streams for "
3000 "endpoint 0x%x\n, "
3001 "streams are already disabled!",
3002 eps[i]->desc.bEndpointAddress);
3003 xhci_warn(xhci, "WARN xhci_free_streams() called "
3004 "with non-streams endpoint\n");
3005 return 0;
3006 }
3007 changed_ep_bitmask |= xhci_get_endpoint_flag(&eps[i]->desc);
3008 }
3009 return changed_ep_bitmask;
3010}
3011
3012/*
3013 * The USB device drivers use this function (though the HCD interface in USB
3014 * core) to prepare a set of bulk endpoints to use streams. Streams are used to
3015 * coordinate mass storage command queueing across multiple endpoints (basically
3016 * a stream ID == a task ID).
3017 *
3018 * Setting up streams involves allocating the same size stream context array
3019 * for each endpoint and issuing a configure endpoint command for all endpoints.
3020 *
3021 * Don't allow the call to succeed if one endpoint only supports one stream
3022 * (which means it doesn't support streams at all).
3023 *
3024 * Drivers may get less stream IDs than they asked for, if the host controller
3025 * hardware or endpoints claim they can't support the number of requested
3026 * stream IDs.
3027 */
3028int xhci_alloc_streams(struct usb_hcd *hcd, struct usb_device *udev,
3029 struct usb_host_endpoint **eps, unsigned int num_eps,
3030 unsigned int num_streams, gfp_t mem_flags)
3031{
3032 int i, ret;
3033 struct xhci_hcd *xhci;
3034 struct xhci_virt_device *vdev;
3035 struct xhci_command *config_cmd;
3036 unsigned int ep_index;
3037 unsigned int num_stream_ctxs;
3038 unsigned long flags;
3039 u32 changed_ep_bitmask = 0;
3040
3041 if (!eps)
3042 return -EINVAL;
3043
3044 /* Add one to the number of streams requested to account for
3045 * stream 0 that is reserved for xHCI usage.
3046 */
3047 num_streams += 1;
3048 xhci = hcd_to_xhci(hcd);
3049 xhci_dbg(xhci, "Driver wants %u stream IDs (including stream 0).\n",
3050 num_streams);
3051
3052 config_cmd = xhci_alloc_command(xhci, true, true, mem_flags);
3053 if (!config_cmd) {
3054 xhci_dbg(xhci, "Could not allocate xHCI command structure.\n");
3055 return -ENOMEM;
3056 }
3057
3058 /* Check to make sure all endpoints are not already configured for
3059 * streams. While we're at it, find the maximum number of streams that
3060 * all the endpoints will support and check for duplicate endpoints.
3061 */
3062 spin_lock_irqsave(&xhci->lock, flags);
3063 ret = xhci_calculate_streams_and_bitmask(xhci, udev, eps,
3064 num_eps, &num_streams, &changed_ep_bitmask);
3065 if (ret < 0) {
3066 xhci_free_command(xhci, config_cmd);
3067 spin_unlock_irqrestore(&xhci->lock, flags);
3068 return ret;
3069 }
3070 if (num_streams <= 1) {
3071 xhci_warn(xhci, "WARN: endpoints can't handle "
3072 "more than one stream.\n");
3073 xhci_free_command(xhci, config_cmd);
3074 spin_unlock_irqrestore(&xhci->lock, flags);
3075 return -EINVAL;
3076 }
3077 vdev = xhci->devs[udev->slot_id];
3078 /* Mark each endpoint as being in transition, so
3079 * xhci_urb_enqueue() will reject all URBs.
3080 */
3081 for (i = 0; i < num_eps; i++) {
3082 ep_index = xhci_get_endpoint_index(&eps[i]->desc);
3083 vdev->eps[ep_index].ep_state |= EP_GETTING_STREAMS;
3084 }
3085 spin_unlock_irqrestore(&xhci->lock, flags);
3086
3087 /* Setup internal data structures and allocate HW data structures for
3088 * streams (but don't install the HW structures in the input context
3089 * until we're sure all memory allocation succeeded).
3090 */
3091 xhci_calculate_streams_entries(xhci, &num_streams, &num_stream_ctxs);
3092 xhci_dbg(xhci, "Need %u stream ctx entries for %u stream IDs.\n",
3093 num_stream_ctxs, num_streams);
3094
3095 for (i = 0; i < num_eps; i++) {
3096 ep_index = xhci_get_endpoint_index(&eps[i]->desc);
3097 vdev->eps[ep_index].stream_info = xhci_alloc_stream_info(xhci,
3098 num_stream_ctxs,
3099 num_streams, mem_flags);
3100 if (!vdev->eps[ep_index].stream_info)
3101 goto cleanup;
3102 /* Set maxPstreams in endpoint context and update deq ptr to
3103 * point to stream context array. FIXME
3104 */
3105 }
3106
3107 /* Set up the input context for a configure endpoint command. */
3108 for (i = 0; i < num_eps; i++) {
3109 struct xhci_ep_ctx *ep_ctx;
3110
3111 ep_index = xhci_get_endpoint_index(&eps[i]->desc);
3112 ep_ctx = xhci_get_ep_ctx(xhci, config_cmd->in_ctx, ep_index);
3113
3114 xhci_endpoint_copy(xhci, config_cmd->in_ctx,
3115 vdev->out_ctx, ep_index);
3116 xhci_setup_streams_ep_input_ctx(xhci, ep_ctx,
3117 vdev->eps[ep_index].stream_info);
3118 }
3119 /* Tell the HW to drop its old copy of the endpoint context info
3120 * and add the updated copy from the input context.
3121 */
3122 xhci_setup_input_ctx_for_config_ep(xhci, config_cmd->in_ctx,
3123 vdev->out_ctx, changed_ep_bitmask, changed_ep_bitmask);
3124
3125 /* Issue and wait for the configure endpoint command */
3126 ret = xhci_configure_endpoint(xhci, udev, config_cmd,
3127 false, false);
3128
3129 /* xHC rejected the configure endpoint command for some reason, so we
3130 * leave the old ring intact and free our internal streams data
3131 * structure.
3132 */
3133 if (ret < 0)
3134 goto cleanup;
3135
3136 spin_lock_irqsave(&xhci->lock, flags);
3137 for (i = 0; i < num_eps; i++) {
3138 ep_index = xhci_get_endpoint_index(&eps[i]->desc);
3139 vdev->eps[ep_index].ep_state &= ~EP_GETTING_STREAMS;
3140 xhci_dbg(xhci, "Slot %u ep ctx %u now has streams.\n",
3141 udev->slot_id, ep_index);
3142 vdev->eps[ep_index].ep_state |= EP_HAS_STREAMS;
3143 }
3144 xhci_free_command(xhci, config_cmd);
3145 spin_unlock_irqrestore(&xhci->lock, flags);
3146
3147 /* Subtract 1 for stream 0, which drivers can't use */
3148 return num_streams - 1;
3149
3150cleanup:
3151 /* If it didn't work, free the streams! */
3152 for (i = 0; i < num_eps; i++) {
3153 ep_index = xhci_get_endpoint_index(&eps[i]->desc);
3154 xhci_free_stream_info(xhci, vdev->eps[ep_index].stream_info);
3155 vdev->eps[ep_index].stream_info = NULL;
3156 /* FIXME Unset maxPstreams in endpoint context and
3157 * update deq ptr to point to normal string ring.
3158 */
3159 vdev->eps[ep_index].ep_state &= ~EP_GETTING_STREAMS;
3160 vdev->eps[ep_index].ep_state &= ~EP_HAS_STREAMS;
3161 xhci_endpoint_zero(xhci, vdev, eps[i]);
3162 }
3163 xhci_free_command(xhci, config_cmd);
3164 return -ENOMEM;
3165}
3166
3167/* Transition the endpoint from using streams to being a "normal" endpoint
3168 * without streams.
3169 *
3170 * Modify the endpoint context state, submit a configure endpoint command,
3171 * and free all endpoint rings for streams if that completes successfully.
3172 */
3173int xhci_free_streams(struct usb_hcd *hcd, struct usb_device *udev,
3174 struct usb_host_endpoint **eps, unsigned int num_eps,
3175 gfp_t mem_flags)
3176{
3177 int i, ret;
3178 struct xhci_hcd *xhci;
3179 struct xhci_virt_device *vdev;
3180 struct xhci_command *command;
3181 unsigned int ep_index;
3182 unsigned long flags;
3183 u32 changed_ep_bitmask;
3184
3185 xhci = hcd_to_xhci(hcd);
3186 vdev = xhci->devs[udev->slot_id];
3187
3188 /* Set up a configure endpoint command to remove the streams rings */
3189 spin_lock_irqsave(&xhci->lock, flags);
3190 changed_ep_bitmask = xhci_calculate_no_streams_bitmask(xhci,
3191 udev, eps, num_eps);
3192 if (changed_ep_bitmask == 0) {
3193 spin_unlock_irqrestore(&xhci->lock, flags);
3194 return -EINVAL;
3195 }
3196
3197 /* Use the xhci_command structure from the first endpoint. We may have
3198 * allocated too many, but the driver may call xhci_free_streams() for
3199 * each endpoint it grouped into one call to xhci_alloc_streams().
3200 */
3201 ep_index = xhci_get_endpoint_index(&eps[0]->desc);
3202 command = vdev->eps[ep_index].stream_info->free_streams_command;
3203 for (i = 0; i < num_eps; i++) {
3204 struct xhci_ep_ctx *ep_ctx;
3205
3206 ep_index = xhci_get_endpoint_index(&eps[i]->desc);
3207 ep_ctx = xhci_get_ep_ctx(xhci, command->in_ctx, ep_index);
3208 xhci->devs[udev->slot_id]->eps[ep_index].ep_state |=
3209 EP_GETTING_NO_STREAMS;
3210
3211 xhci_endpoint_copy(xhci, command->in_ctx,
3212 vdev->out_ctx, ep_index);
3213 xhci_setup_no_streams_ep_input_ctx(xhci, ep_ctx,
3214 &vdev->eps[ep_index]);
3215 }
3216 xhci_setup_input_ctx_for_config_ep(xhci, command->in_ctx,
3217 vdev->out_ctx, changed_ep_bitmask, changed_ep_bitmask);
3218 spin_unlock_irqrestore(&xhci->lock, flags);
3219
3220 /* Issue and wait for the configure endpoint command,
3221 * which must succeed.
3222 */
3223 ret = xhci_configure_endpoint(xhci, udev, command,
3224 false, true);
3225
3226 /* xHC rejected the configure endpoint command for some reason, so we
3227 * leave the streams rings intact.
3228 */
3229 if (ret < 0)
3230 return ret;
3231
3232 spin_lock_irqsave(&xhci->lock, flags);
3233 for (i = 0; i < num_eps; i++) {
3234 ep_index = xhci_get_endpoint_index(&eps[i]->desc);
3235 xhci_free_stream_info(xhci, vdev->eps[ep_index].stream_info);
3236 vdev->eps[ep_index].stream_info = NULL;
3237 /* FIXME Unset maxPstreams in endpoint context and
3238 * update deq ptr to point to normal string ring.
3239 */
3240 vdev->eps[ep_index].ep_state &= ~EP_GETTING_NO_STREAMS;
3241 vdev->eps[ep_index].ep_state &= ~EP_HAS_STREAMS;
3242 }
3243 spin_unlock_irqrestore(&xhci->lock, flags);
3244
3245 return 0;
3246}
3247
3248/*
3249 * Deletes endpoint resources for endpoints that were active before a Reset
3250 * Device command, or a Disable Slot command. The Reset Device command leaves
3251 * the control endpoint intact, whereas the Disable Slot command deletes it.
3252 *
3253 * Must be called with xhci->lock held.
3254 */
3255void xhci_free_device_endpoint_resources(struct xhci_hcd *xhci,
3256 struct xhci_virt_device *virt_dev, bool drop_control_ep)
3257{
3258 int i;
3259 unsigned int num_dropped_eps = 0;
3260 unsigned int drop_flags = 0;
3261
3262 for (i = (drop_control_ep ? 0 : 1); i < 31; i++) {
3263 if (virt_dev->eps[i].ring) {
3264 drop_flags |= 1 << i;
3265 num_dropped_eps++;
3266 }
3267 }
3268 xhci->num_active_eps -= num_dropped_eps;
3269 if (num_dropped_eps)
3270 xhci_dbg(xhci, "Dropped %u ep ctxs, flags = 0x%x, "
3271 "%u now active.\n",
3272 num_dropped_eps, drop_flags,
3273 xhci->num_active_eps);
3274}
3275
3276/*
3277 * This submits a Reset Device Command, which will set the device state to 0,
3278 * set the device address to 0, and disable all the endpoints except the default
3279 * control endpoint. The USB core should come back and call
3280 * xhci_address_device(), and then re-set up the configuration. If this is
3281 * called because of a usb_reset_and_verify_device(), then the old alternate
3282 * settings will be re-installed through the normal bandwidth allocation
3283 * functions.
3284 *
3285 * Wait for the Reset Device command to finish. Remove all structures
3286 * associated with the endpoints that were disabled. Clear the input device
3287 * structure? Cache the rings? Reset the control endpoint 0 max packet size?
3288 *
3289 * If the virt_dev to be reset does not exist or does not match the udev,
3290 * it means the device is lost, possibly due to the xHC restore error and
3291 * re-initialization during S3/S4. In this case, call xhci_alloc_dev() to
3292 * re-allocate the device.
3293 */
3294int xhci_discover_or_reset_device(struct usb_hcd *hcd, struct usb_device *udev)
3295{
3296 int ret, i;
3297 unsigned long flags;
3298 struct xhci_hcd *xhci;
3299 unsigned int slot_id;
3300 struct xhci_virt_device *virt_dev;
3301 struct xhci_command *reset_device_cmd;
3302 int timeleft;
3303 int last_freed_endpoint;
3304 struct xhci_slot_ctx *slot_ctx;
3305 int old_active_eps = 0;
3306
3307 ret = xhci_check_args(hcd, udev, NULL, 0, false, __func__);
3308 if (ret <= 0)
3309 return ret;
3310 xhci = hcd_to_xhci(hcd);
3311 slot_id = udev->slot_id;
3312 virt_dev = xhci->devs[slot_id];
3313 if (!virt_dev) {
3314 xhci_dbg(xhci, "The device to be reset with slot ID %u does "
3315 "not exist. Re-allocate the device\n", slot_id);
3316 ret = xhci_alloc_dev(hcd, udev);
3317 if (ret == 1)
3318 return 0;
3319 else
3320 return -EINVAL;
3321 }
3322
3323 if (virt_dev->udev != udev) {
3324 /* If the virt_dev and the udev does not match, this virt_dev
3325 * may belong to another udev.
3326 * Re-allocate the device.
3327 */
3328 xhci_dbg(xhci, "The device to be reset with slot ID %u does "
3329 "not match the udev. Re-allocate the device\n",
3330 slot_id);
3331 ret = xhci_alloc_dev(hcd, udev);
3332 if (ret == 1)
3333 return 0;
3334 else
3335 return -EINVAL;
3336 }
3337
3338 /* If device is not setup, there is no point in resetting it */
3339 slot_ctx = xhci_get_slot_ctx(xhci, virt_dev->out_ctx);
3340 if (GET_SLOT_STATE(le32_to_cpu(slot_ctx->dev_state)) ==
3341 SLOT_STATE_DISABLED)
3342 return 0;
3343
3344 xhci_dbg(xhci, "Resetting device with slot ID %u\n", slot_id);
3345 /* Allocate the command structure that holds the struct completion.
3346 * Assume we're in process context, since the normal device reset
3347 * process has to wait for the device anyway. Storage devices are
3348 * reset as part of error handling, so use GFP_NOIO instead of
3349 * GFP_KERNEL.
3350 */
3351 reset_device_cmd = xhci_alloc_command(xhci, false, true, GFP_NOIO);
3352 if (!reset_device_cmd) {
3353 xhci_dbg(xhci, "Couldn't allocate command structure.\n");
3354 return -ENOMEM;
3355 }
3356
3357 /* Attempt to submit the Reset Device command to the command ring */
3358 spin_lock_irqsave(&xhci->lock, flags);
3359 reset_device_cmd->command_trb = xhci->cmd_ring->enqueue;
3360
3361 /* Enqueue pointer can be left pointing to the link TRB,
3362 * we must handle that
3363 */
3364 if (TRB_TYPE_LINK_LE32(reset_device_cmd->command_trb->link.control))
3365 reset_device_cmd->command_trb =
3366 xhci->cmd_ring->enq_seg->next->trbs;
3367
3368 list_add_tail(&reset_device_cmd->cmd_list, &virt_dev->cmd_list);
3369 ret = xhci_queue_reset_device(xhci, slot_id);
3370 if (ret) {
3371 xhci_dbg(xhci, "FIXME: allocate a command ring segment\n");
3372 list_del(&reset_device_cmd->cmd_list);
3373 spin_unlock_irqrestore(&xhci->lock, flags);
3374 goto command_cleanup;
3375 }
3376 xhci_ring_cmd_db(xhci);
3377 spin_unlock_irqrestore(&xhci->lock, flags);
3378
3379 /* Wait for the Reset Device command to finish */
3380 timeleft = wait_for_completion_interruptible_timeout(
3381 reset_device_cmd->completion,
3382 USB_CTRL_SET_TIMEOUT);
3383 if (timeleft <= 0) {
3384 xhci_warn(xhci, "%s while waiting for reset device command\n",
3385 timeleft == 0 ? "Timeout" : "Signal");
3386 spin_lock_irqsave(&xhci->lock, flags);
3387 /* The timeout might have raced with the event ring handler, so
3388 * only delete from the list if the item isn't poisoned.
3389 */
3390 if (reset_device_cmd->cmd_list.next != LIST_POISON1)
3391 list_del(&reset_device_cmd->cmd_list);
3392 spin_unlock_irqrestore(&xhci->lock, flags);
3393 ret = -ETIME;
3394 goto command_cleanup;
3395 }
3396
3397 /* The Reset Device command can't fail, according to the 0.95/0.96 spec,
3398 * unless we tried to reset a slot ID that wasn't enabled,
3399 * or the device wasn't in the addressed or configured state.
3400 */
3401 ret = reset_device_cmd->status;
3402 switch (ret) {
3403 case COMP_EBADSLT: /* 0.95 completion code for bad slot ID */
3404 case COMP_CTX_STATE: /* 0.96 completion code for same thing */
3405 xhci_info(xhci, "Can't reset device (slot ID %u) in %s state\n",
3406 slot_id,
3407 xhci_get_slot_state(xhci, virt_dev->out_ctx));
3408 xhci_info(xhci, "Not freeing device rings.\n");
3409 /* Don't treat this as an error. May change my mind later. */
3410 ret = 0;
3411 goto command_cleanup;
3412 case COMP_SUCCESS:
3413 xhci_dbg(xhci, "Successful reset device command.\n");
3414 break;
3415 default:
3416 if (xhci_is_vendor_info_code(xhci, ret))
3417 break;
3418 xhci_warn(xhci, "Unknown completion code %u for "
3419 "reset device command.\n", ret);
3420 ret = -EINVAL;
3421 goto command_cleanup;
3422 }
3423
3424 /* Free up host controller endpoint resources */
3425 if ((xhci->quirks & XHCI_EP_LIMIT_QUIRK)) {
3426 spin_lock_irqsave(&xhci->lock, flags);
3427 /* Don't delete the default control endpoint resources */
3428 xhci_free_device_endpoint_resources(xhci, virt_dev, false);
3429 spin_unlock_irqrestore(&xhci->lock, flags);
3430 }
3431
3432 /* Everything but endpoint 0 is disabled, so free or cache the rings. */
3433 last_freed_endpoint = 1;
3434 for (i = 1; i < 31; ++i) {
3435 struct xhci_virt_ep *ep = &virt_dev->eps[i];
3436
3437 if (ep->ep_state & EP_HAS_STREAMS) {
3438 xhci_free_stream_info(xhci, ep->stream_info);
3439 ep->stream_info = NULL;
3440 ep->ep_state &= ~EP_HAS_STREAMS;
3441 }
3442
3443 if (ep->ring) {
3444 xhci_free_or_cache_endpoint_ring(xhci, virt_dev, i);
3445 last_freed_endpoint = i;
3446 }
3447 if (!list_empty(&virt_dev->eps[i].bw_endpoint_list))
3448 xhci_drop_ep_from_interval_table(xhci,
3449 &virt_dev->eps[i].bw_info,
3450 virt_dev->bw_table,
3451 udev,
3452 &virt_dev->eps[i],
3453 virt_dev->tt_info);
3454 xhci_clear_endpoint_bw_info(&virt_dev->eps[i].bw_info);
3455 }
3456 /* If necessary, update the number of active TTs on this root port */
3457 xhci_update_tt_active_eps(xhci, virt_dev, old_active_eps);
3458
3459 xhci_dbg(xhci, "Output context after successful reset device cmd:\n");
3460 xhci_dbg_ctx(xhci, virt_dev->out_ctx, last_freed_endpoint);
3461 ret = 0;
3462
3463command_cleanup:
3464 xhci_free_command(xhci, reset_device_cmd);
3465 return ret;
3466}
3467
3468/*
3469 * At this point, the struct usb_device is about to go away, the device has
3470 * disconnected, and all traffic has been stopped and the endpoints have been
3471 * disabled. Free any HC data structures associated with that device.
3472 */
3473void xhci_free_dev(struct usb_hcd *hcd, struct usb_device *udev)
3474{
3475 struct xhci_hcd *xhci = hcd_to_xhci(hcd);
3476 struct xhci_virt_device *virt_dev;
3477 unsigned long flags;
3478 u32 state;
3479 int i, ret;
3480
3481 ret = xhci_check_args(hcd, udev, NULL, 0, true, __func__);
3482 /* If the host is halted due to driver unload, we still need to free the
3483 * device.
3484 */
3485 if (ret <= 0 && ret != -ENODEV)
3486 return;
3487
3488 virt_dev = xhci->devs[udev->slot_id];
3489
3490 /* Stop any wayward timer functions (which may grab the lock) */
3491 for (i = 0; i < 31; ++i) {
3492 virt_dev->eps[i].ep_state &= ~EP_HALT_PENDING;
3493 del_timer_sync(&virt_dev->eps[i].stop_cmd_timer);
3494 }
3495
3496 if (udev->usb2_hw_lpm_enabled) {
3497 xhci_set_usb2_hardware_lpm(hcd, udev, 0);
3498 udev->usb2_hw_lpm_enabled = 0;
3499 }
3500
3501 spin_lock_irqsave(&xhci->lock, flags);
3502 /* Don't disable the slot if the host controller is dead. */
3503 state = xhci_readl(xhci, &xhci->op_regs->status);
3504 if (state == 0xffffffff || (xhci->xhc_state & XHCI_STATE_DYING) ||
3505 (xhci->xhc_state & XHCI_STATE_HALTED)) {
3506 xhci_free_virt_device(xhci, udev->slot_id);
3507 spin_unlock_irqrestore(&xhci->lock, flags);
3508 return;
3509 }
3510
3511 if (xhci_queue_slot_control(xhci, TRB_DISABLE_SLOT, udev->slot_id)) {
3512 spin_unlock_irqrestore(&xhci->lock, flags);
3513 xhci_dbg(xhci, "FIXME: allocate a command ring segment\n");
3514 return;
3515 }
3516 xhci_ring_cmd_db(xhci);
3517 spin_unlock_irqrestore(&xhci->lock, flags);
3518 /*
3519 * Event command completion handler will free any data structures
3520 * associated with the slot. XXX Can free sleep?
3521 */
3522}
3523
3524/*
3525 * Checks if we have enough host controller resources for the default control
3526 * endpoint.
3527 *
3528 * Must be called with xhci->lock held.
3529 */
3530static int xhci_reserve_host_control_ep_resources(struct xhci_hcd *xhci)
3531{
3532 if (xhci->num_active_eps + 1 > xhci->limit_active_eps) {
3533 xhci_dbg(xhci, "Not enough ep ctxs: "
3534 "%u active, need to add 1, limit is %u.\n",
3535 xhci->num_active_eps, xhci->limit_active_eps);
3536 return -ENOMEM;
3537 }
3538 xhci->num_active_eps += 1;
3539 xhci_dbg(xhci, "Adding 1 ep ctx, %u now active.\n",
3540 xhci->num_active_eps);
3541 return 0;
3542}
3543
3544
3545/*
3546 * Returns 0 if the xHC ran out of device slots, the Enable Slot command
3547 * timed out, or allocating memory failed. Returns 1 on success.
3548 */
3549int xhci_alloc_dev(struct usb_hcd *hcd, struct usb_device *udev)
3550{
3551 struct xhci_hcd *xhci = hcd_to_xhci(hcd);
3552 unsigned long flags;
3553 int timeleft;
3554 int ret;
3555 union xhci_trb *cmd_trb;
3556
3557 spin_lock_irqsave(&xhci->lock, flags);
3558 cmd_trb = xhci->cmd_ring->dequeue;
3559 ret = xhci_queue_slot_control(xhci, TRB_ENABLE_SLOT, 0);
3560 if (ret) {
3561 spin_unlock_irqrestore(&xhci->lock, flags);
3562 xhci_dbg(xhci, "FIXME: allocate a command ring segment\n");
3563 return 0;
3564 }
3565 xhci_ring_cmd_db(xhci);
3566 spin_unlock_irqrestore(&xhci->lock, flags);
3567
3568 /* XXX: how much time for xHC slot assignment? */
3569 timeleft = wait_for_completion_interruptible_timeout(&xhci->addr_dev,
3570 XHCI_CMD_DEFAULT_TIMEOUT);
3571 if (timeleft <= 0) {
3572 xhci_warn(xhci, "%s while waiting for a slot\n",
3573 timeleft == 0 ? "Timeout" : "Signal");
3574 /* cancel the enable slot request */
3575 return xhci_cancel_cmd(xhci, NULL, cmd_trb);
3576 }
3577
3578 if (!xhci->slot_id) {
3579 xhci_err(xhci, "Error while assigning device slot ID\n");
3580 return 0;
3581 }
3582
3583 if ((xhci->quirks & XHCI_EP_LIMIT_QUIRK)) {
3584 spin_lock_irqsave(&xhci->lock, flags);
3585 ret = xhci_reserve_host_control_ep_resources(xhci);
3586 if (ret) {
3587 spin_unlock_irqrestore(&xhci->lock, flags);
3588 xhci_warn(xhci, "Not enough host resources, "
3589 "active endpoint contexts = %u\n",
3590 xhci->num_active_eps);
3591 goto disable_slot;
3592 }
3593 spin_unlock_irqrestore(&xhci->lock, flags);
3594 }
3595 /* Use GFP_NOIO, since this function can be called from
3596 * xhci_discover_or_reset_device(), which may be called as part of
3597 * mass storage driver error handling.
3598 */
3599 if (!xhci_alloc_virt_device(xhci, xhci->slot_id, udev, GFP_NOIO)) {
3600 xhci_warn(xhci, "Could not allocate xHCI USB device data structures\n");
3601 goto disable_slot;
3602 }
3603 udev->slot_id = xhci->slot_id;
3604 /* Is this a LS or FS device under a HS hub? */
3605 /* Hub or peripherial? */
3606 return 1;
3607
3608disable_slot:
3609 /* Disable slot, if we can do it without mem alloc */
3610 spin_lock_irqsave(&xhci->lock, flags);
3611 if (!xhci_queue_slot_control(xhci, TRB_DISABLE_SLOT, udev->slot_id))
3612 xhci_ring_cmd_db(xhci);
3613 spin_unlock_irqrestore(&xhci->lock, flags);
3614 return 0;
3615}
3616
3617/*
3618 * Issue an Address Device command (which will issue a SetAddress request to
3619 * the device).
3620 * We should be protected by the usb_address0_mutex in khubd's hub_port_init, so
3621 * we should only issue and wait on one address command at the same time.
3622 *
3623 * We add one to the device address issued by the hardware because the USB core
3624 * uses address 1 for the root hubs (even though they're not really devices).
3625 */
3626int xhci_address_device(struct usb_hcd *hcd, struct usb_device *udev)
3627{
3628 unsigned long flags;
3629 int timeleft;
3630 struct xhci_virt_device *virt_dev;
3631 int ret = 0;
3632 struct xhci_hcd *xhci = hcd_to_xhci(hcd);
3633 struct xhci_slot_ctx *slot_ctx;
3634 struct xhci_input_control_ctx *ctrl_ctx;
3635 u64 temp_64;
3636 union xhci_trb *cmd_trb;
3637
3638 if (!udev->slot_id) {
3639 xhci_dbg(xhci, "Bad Slot ID %d\n", udev->slot_id);
3640 return -EINVAL;
3641 }
3642
3643 virt_dev = xhci->devs[udev->slot_id];
3644
3645 if (WARN_ON(!virt_dev)) {
3646 /*
3647 * In plug/unplug torture test with an NEC controller,
3648 * a zero-dereference was observed once due to virt_dev = 0.
3649 * Print useful debug rather than crash if it is observed again!
3650 */
3651 xhci_warn(xhci, "Virt dev invalid for slot_id 0x%x!\n",
3652 udev->slot_id);
3653 return -EINVAL;
3654 }
3655
3656 slot_ctx = xhci_get_slot_ctx(xhci, virt_dev->in_ctx);
3657 /*
3658 * If this is the first Set Address since device plug-in or
3659 * virt_device realloaction after a resume with an xHCI power loss,
3660 * then set up the slot context.
3661 */
3662 if (!slot_ctx->dev_info)
3663 xhci_setup_addressable_virt_dev(xhci, udev);
3664 /* Otherwise, update the control endpoint ring enqueue pointer. */
3665 else
3666 xhci_copy_ep0_dequeue_into_input_ctx(xhci, udev);
3667 ctrl_ctx = xhci_get_input_control_ctx(xhci, virt_dev->in_ctx);
3668 ctrl_ctx->add_flags = cpu_to_le32(SLOT_FLAG | EP0_FLAG);
3669 ctrl_ctx->drop_flags = 0;
3670
3671 xhci_dbg(xhci, "Slot ID %d Input Context:\n", udev->slot_id);
3672 xhci_dbg_ctx(xhci, virt_dev->in_ctx, 2);
3673
3674 spin_lock_irqsave(&xhci->lock, flags);
3675 cmd_trb = xhci->cmd_ring->dequeue;
3676 ret = xhci_queue_address_device(xhci, virt_dev->in_ctx->dma,
3677 udev->slot_id);
3678 if (ret) {
3679 spin_unlock_irqrestore(&xhci->lock, flags);
3680 xhci_dbg(xhci, "FIXME: allocate a command ring segment\n");
3681 return ret;
3682 }
3683 xhci_ring_cmd_db(xhci);
3684 spin_unlock_irqrestore(&xhci->lock, flags);
3685
3686 /* ctrl tx can take up to 5 sec; XXX: need more time for xHC? */
3687 timeleft = wait_for_completion_interruptible_timeout(&xhci->addr_dev,
3688 XHCI_CMD_DEFAULT_TIMEOUT);
3689 /* FIXME: From section 4.3.4: "Software shall be responsible for timing
3690 * the SetAddress() "recovery interval" required by USB and aborting the
3691 * command on a timeout.
3692 */
3693 if (timeleft <= 0) {
3694 xhci_warn(xhci, "%s while waiting for address device command\n",
3695 timeleft == 0 ? "Timeout" : "Signal");
3696 /* cancel the address device command */
3697 ret = xhci_cancel_cmd(xhci, NULL, cmd_trb);
3698 if (ret < 0)
3699 return ret;
3700 return -ETIME;
3701 }
3702
3703 switch (virt_dev->cmd_status) {
3704 case COMP_CTX_STATE:
3705 case COMP_EBADSLT:
3706 xhci_err(xhci, "Setup ERROR: address device command for slot %d.\n",
3707 udev->slot_id);
3708 ret = -EINVAL;
3709 break;
3710 case COMP_TX_ERR:
3711 dev_warn(&udev->dev, "Device not responding to set address.\n");
3712 ret = -EPROTO;
3713 break;
3714 case COMP_DEV_ERR:
3715 dev_warn(&udev->dev, "ERROR: Incompatible device for address "
3716 "device command.\n");
3717 ret = -ENODEV;
3718 break;
3719 case COMP_SUCCESS:
3720 xhci_dbg(xhci, "Successful Address Device command\n");
3721 break;
3722 default:
3723 xhci_err(xhci, "ERROR: unexpected command completion "
3724 "code 0x%x.\n", virt_dev->cmd_status);
3725 xhci_dbg(xhci, "Slot ID %d Output Context:\n", udev->slot_id);
3726 xhci_dbg_ctx(xhci, virt_dev->out_ctx, 2);
3727 ret = -EINVAL;
3728 break;
3729 }
3730 if (ret) {
3731 return ret;
3732 }
3733 temp_64 = xhci_read_64(xhci, &xhci->op_regs->dcbaa_ptr);
3734 xhci_dbg(xhci, "Op regs DCBAA ptr = %#016llx\n", temp_64);
3735 xhci_dbg(xhci, "Slot ID %d dcbaa entry @%p = %#016llx\n",
3736 udev->slot_id,
3737 &xhci->dcbaa->dev_context_ptrs[udev->slot_id],
3738 (unsigned long long)
3739 le64_to_cpu(xhci->dcbaa->dev_context_ptrs[udev->slot_id]));
3740 xhci_dbg(xhci, "Output Context DMA address = %#08llx\n",
3741 (unsigned long long)virt_dev->out_ctx->dma);
3742 xhci_dbg(xhci, "Slot ID %d Input Context:\n", udev->slot_id);
3743 xhci_dbg_ctx(xhci, virt_dev->in_ctx, 2);
3744 xhci_dbg(xhci, "Slot ID %d Output Context:\n", udev->slot_id);
3745 xhci_dbg_ctx(xhci, virt_dev->out_ctx, 2);
3746 /*
3747 * USB core uses address 1 for the roothubs, so we add one to the
3748 * address given back to us by the HC.
3749 */
3750 slot_ctx = xhci_get_slot_ctx(xhci, virt_dev->out_ctx);
3751 /* Use kernel assigned address for devices; store xHC assigned
3752 * address locally. */
3753 virt_dev->address = (le32_to_cpu(slot_ctx->dev_state) & DEV_ADDR_MASK)
3754 + 1;
3755 /* Zero the input context control for later use */
3756 ctrl_ctx->add_flags = 0;
3757 ctrl_ctx->drop_flags = 0;
3758
3759 xhci_dbg(xhci, "Internal device address = %d\n", virt_dev->address);
3760
3761 return 0;
3762}
3763
3764#ifdef CONFIG_USB_SUSPEND
3765
3766/* BESL to HIRD Encoding array for USB2 LPM */
3767static int xhci_besl_encoding[16] = {125, 150, 200, 300, 400, 500, 1000, 2000,
3768 3000, 4000, 5000, 6000, 7000, 8000, 9000, 10000};
3769
3770/* Calculate HIRD/BESL for USB2 PORTPMSC*/
3771static int xhci_calculate_hird_besl(struct xhci_hcd *xhci,
3772 struct usb_device *udev)
3773{
3774 int u2del, besl, besl_host;
3775 int besl_device = 0;
3776 u32 field;
3777
3778 u2del = HCS_U2_LATENCY(xhci->hcs_params3);
3779 field = le32_to_cpu(udev->bos->ext_cap->bmAttributes);
3780
3781 if (field & USB_BESL_SUPPORT) {
3782 for (besl_host = 0; besl_host < 16; besl_host++) {
3783 if (xhci_besl_encoding[besl_host] >= u2del)
3784 break;
3785 }
3786 /* Use baseline BESL value as default */
3787 if (field & USB_BESL_BASELINE_VALID)
3788 besl_device = USB_GET_BESL_BASELINE(field);
3789 else if (field & USB_BESL_DEEP_VALID)
3790 besl_device = USB_GET_BESL_DEEP(field);
3791 } else {
3792 if (u2del <= 50)
3793 besl_host = 0;
3794 else
3795 besl_host = (u2del - 51) / 75 + 1;
3796 }
3797
3798 besl = besl_host + besl_device;
3799 if (besl > 15)
3800 besl = 15;
3801
3802 return besl;
3803}
3804
3805static int xhci_usb2_software_lpm_test(struct usb_hcd *hcd,
3806 struct usb_device *udev)
3807{
3808 struct xhci_hcd *xhci = hcd_to_xhci(hcd);
3809 struct dev_info *dev_info;
3810 __le32 __iomem **port_array;
3811 __le32 __iomem *addr, *pm_addr;
3812 u32 temp, dev_id;
3813 unsigned int port_num;
3814 unsigned long flags;
3815 int hird;
3816 int ret;
3817
3818 if (hcd->speed == HCD_USB3 || !xhci->sw_lpm_support ||
3819 !udev->lpm_capable)
3820 return -EINVAL;
3821
3822 /* we only support lpm for non-hub device connected to root hub yet */
3823 if (!udev->parent || udev->parent->parent ||
3824 udev->descriptor.bDeviceClass == USB_CLASS_HUB)
3825 return -EINVAL;
3826
3827 spin_lock_irqsave(&xhci->lock, flags);
3828
3829 /* Look for devices in lpm_failed_devs list */
3830 dev_id = le16_to_cpu(udev->descriptor.idVendor) << 16 |
3831 le16_to_cpu(udev->descriptor.idProduct);
3832 list_for_each_entry(dev_info, &xhci->lpm_failed_devs, list) {
3833 if (dev_info->dev_id == dev_id) {
3834 ret = -EINVAL;
3835 goto finish;
3836 }
3837 }
3838
3839 port_array = xhci->usb2_ports;
3840 port_num = udev->portnum - 1;
3841
3842 if (port_num > HCS_MAX_PORTS(xhci->hcs_params1)) {
3843 xhci_dbg(xhci, "invalid port number %d\n", udev->portnum);
3844 ret = -EINVAL;
3845 goto finish;
3846 }
3847
3848 /*
3849 * Test USB 2.0 software LPM.
3850 * FIXME: some xHCI 1.0 hosts may implement a new register to set up
3851 * hardware-controlled USB 2.0 LPM. See section 5.4.11 and 4.23.5.1.1.1
3852 * in the June 2011 errata release.
3853 */
3854 xhci_dbg(xhci, "test port %d software LPM\n", port_num);
3855 /*
3856 * Set L1 Device Slot and HIRD/BESL.
3857 * Check device's USB 2.0 extension descriptor to determine whether
3858 * HIRD or BESL shoule be used. See USB2.0 LPM errata.
3859 */
3860 pm_addr = port_array[port_num] + 1;
3861 hird = xhci_calculate_hird_besl(xhci, udev);
3862 temp = PORT_L1DS(udev->slot_id) | PORT_HIRD(hird);
3863 xhci_writel(xhci, temp, pm_addr);
3864
3865 /* Set port link state to U2(L1) */
3866 addr = port_array[port_num];
3867 xhci_set_link_state(xhci, port_array, port_num, XDEV_U2);
3868
3869 /* wait for ACK */
3870 spin_unlock_irqrestore(&xhci->lock, flags);
3871 msleep(10);
3872 spin_lock_irqsave(&xhci->lock, flags);
3873
3874 /* Check L1 Status */
3875 ret = handshake(xhci, pm_addr, PORT_L1S_MASK, PORT_L1S_SUCCESS, 125);
3876 if (ret != -ETIMEDOUT) {
3877 /* enter L1 successfully */
3878 temp = xhci_readl(xhci, addr);
3879 xhci_dbg(xhci, "port %d entered L1 state, port status 0x%x\n",
3880 port_num, temp);
3881 ret = 0;
3882 } else {
3883 temp = xhci_readl(xhci, pm_addr);
3884 xhci_dbg(xhci, "port %d software lpm failed, L1 status %d\n",
3885 port_num, temp & PORT_L1S_MASK);
3886 ret = -EINVAL;
3887 }
3888
3889 /* Resume the port */
3890 xhci_set_link_state(xhci, port_array, port_num, XDEV_U0);
3891
3892 spin_unlock_irqrestore(&xhci->lock, flags);
3893 msleep(10);
3894 spin_lock_irqsave(&xhci->lock, flags);
3895
3896 /* Clear PLC */
3897 xhci_test_and_clear_bit(xhci, port_array, port_num, PORT_PLC);
3898
3899 /* Check PORTSC to make sure the device is in the right state */
3900 if (!ret) {
3901 temp = xhci_readl(xhci, addr);
3902 xhci_dbg(xhci, "resumed port %d status 0x%x\n", port_num, temp);
3903 if (!(temp & PORT_CONNECT) || !(temp & PORT_PE) ||
3904 (temp & PORT_PLS_MASK) != XDEV_U0) {
3905 xhci_dbg(xhci, "port L1 resume fail\n");
3906 ret = -EINVAL;
3907 }
3908 }
3909
3910 if (ret) {
3911 /* Insert dev to lpm_failed_devs list */
3912 xhci_warn(xhci, "device LPM test failed, may disconnect and "
3913 "re-enumerate\n");
3914 dev_info = kzalloc(sizeof(struct dev_info), GFP_ATOMIC);
3915 if (!dev_info) {
3916 ret = -ENOMEM;
3917 goto finish;
3918 }
3919 dev_info->dev_id = dev_id;
3920 INIT_LIST_HEAD(&dev_info->list);
3921 list_add(&dev_info->list, &xhci->lpm_failed_devs);
3922 } else {
3923 xhci_ring_device(xhci, udev->slot_id);
3924 }
3925
3926finish:
3927 spin_unlock_irqrestore(&xhci->lock, flags);
3928 return ret;
3929}
3930
3931int xhci_set_usb2_hardware_lpm(struct usb_hcd *hcd,
3932 struct usb_device *udev, int enable)
3933{
3934 struct xhci_hcd *xhci = hcd_to_xhci(hcd);
3935 __le32 __iomem **port_array;
3936 __le32 __iomem *pm_addr;
3937 u32 temp;
3938 unsigned int port_num;
3939 unsigned long flags;
3940 int hird;
3941
3942 if (hcd->speed == HCD_USB3 || !xhci->hw_lpm_support ||
3943 !udev->lpm_capable)
3944 return -EPERM;
3945
3946 if (!udev->parent || udev->parent->parent ||
3947 udev->descriptor.bDeviceClass == USB_CLASS_HUB)
3948 return -EPERM;
3949
3950 if (udev->usb2_hw_lpm_capable != 1)
3951 return -EPERM;
3952
3953 spin_lock_irqsave(&xhci->lock, flags);
3954
3955 port_array = xhci->usb2_ports;
3956 port_num = udev->portnum - 1;
3957 pm_addr = port_array[port_num] + 1;
3958 temp = xhci_readl(xhci, pm_addr);
3959
3960 xhci_dbg(xhci, "%s port %d USB2 hardware LPM\n",
3961 enable ? "enable" : "disable", port_num);
3962
3963 hird = xhci_calculate_hird_besl(xhci, udev);
3964
3965 if (enable) {
3966 temp &= ~PORT_HIRD_MASK;
3967 temp |= PORT_HIRD(hird) | PORT_RWE;
3968 xhci_writel(xhci, temp, pm_addr);
3969 temp = xhci_readl(xhci, pm_addr);
3970 temp |= PORT_HLE;
3971 xhci_writel(xhci, temp, pm_addr);
3972 } else {
3973 temp &= ~(PORT_HLE | PORT_RWE | PORT_HIRD_MASK);
3974 xhci_writel(xhci, temp, pm_addr);
3975 }
3976
3977 spin_unlock_irqrestore(&xhci->lock, flags);
3978 return 0;
3979}
3980
3981int xhci_update_device(struct usb_hcd *hcd, struct usb_device *udev)
3982{
3983 struct xhci_hcd *xhci = hcd_to_xhci(hcd);
3984 int ret;
3985
3986 ret = xhci_usb2_software_lpm_test(hcd, udev);
3987 if (!ret) {
3988 xhci_dbg(xhci, "software LPM test succeed\n");
3989 if (xhci->hw_lpm_support == 1) {
3990 udev->usb2_hw_lpm_capable = 1;
3991 ret = xhci_set_usb2_hardware_lpm(hcd, udev, 1);
3992 if (!ret)
3993 udev->usb2_hw_lpm_enabled = 1;
3994 }
3995 }
3996
3997 return 0;
3998}
3999
4000#else
4001
4002int xhci_set_usb2_hardware_lpm(struct usb_hcd *hcd,
4003 struct usb_device *udev, int enable)
4004{
4005 return 0;
4006}
4007
4008int xhci_update_device(struct usb_hcd *hcd, struct usb_device *udev)
4009{
4010 return 0;
4011}
4012
4013#endif /* CONFIG_USB_SUSPEND */
4014
4015/*---------------------- USB 3.0 Link PM functions ------------------------*/
4016
4017#ifdef CONFIG_PM
4018/* Service interval in nanoseconds = 2^(bInterval - 1) * 125us * 1000ns / 1us */
4019static unsigned long long xhci_service_interval_to_ns(
4020 struct usb_endpoint_descriptor *desc)
4021{
4022 return (1 << (desc->bInterval - 1)) * 125 * 1000;
4023}
4024
4025static u16 xhci_get_timeout_no_hub_lpm(struct usb_device *udev,
4026 enum usb3_link_state state)
4027{
4028 unsigned long long sel;
4029 unsigned long long pel;
4030 unsigned int max_sel_pel;
4031 char *state_name;
4032
4033 switch (state) {
4034 case USB3_LPM_U1:
4035 /* Convert SEL and PEL stored in nanoseconds to microseconds */
4036 sel = DIV_ROUND_UP(udev->u1_params.sel, 1000);
4037 pel = DIV_ROUND_UP(udev->u1_params.pel, 1000);
4038 max_sel_pel = USB3_LPM_MAX_U1_SEL_PEL;
4039 state_name = "U1";
4040 break;
4041 case USB3_LPM_U2:
4042 sel = DIV_ROUND_UP(udev->u2_params.sel, 1000);
4043 pel = DIV_ROUND_UP(udev->u2_params.pel, 1000);
4044 max_sel_pel = USB3_LPM_MAX_U2_SEL_PEL;
4045 state_name = "U2";
4046 break;
4047 default:
4048 dev_warn(&udev->dev, "%s: Can't get timeout for non-U1 or U2 state.\n",
4049 __func__);
4050 return USB3_LPM_DISABLED;
4051 }
4052
4053 if (sel <= max_sel_pel && pel <= max_sel_pel)
4054 return USB3_LPM_DEVICE_INITIATED;
4055
4056 if (sel > max_sel_pel)
4057 dev_dbg(&udev->dev, "Device-initiated %s disabled "
4058 "due to long SEL %llu ms\n",
4059 state_name, sel);
4060 else
4061 dev_dbg(&udev->dev, "Device-initiated %s disabled "
4062 "due to long PEL %llu\n ms",
4063 state_name, pel);
4064 return USB3_LPM_DISABLED;
4065}
4066
4067/* Returns the hub-encoded U1 timeout value.
4068 * The U1 timeout should be the maximum of the following values:
4069 * - For control endpoints, U1 system exit latency (SEL) * 3
4070 * - For bulk endpoints, U1 SEL * 5
4071 * - For interrupt endpoints:
4072 * - Notification EPs, U1 SEL * 3
4073 * - Periodic EPs, max(105% of bInterval, U1 SEL * 2)
4074 * - For isochronous endpoints, max(105% of bInterval, U1 SEL * 2)
4075 */
4076static u16 xhci_calculate_intel_u1_timeout(struct usb_device *udev,
4077 struct usb_endpoint_descriptor *desc)
4078{
4079 unsigned long long timeout_ns;
4080 int ep_type;
4081 int intr_type;
4082
4083 ep_type = usb_endpoint_type(desc);
4084 switch (ep_type) {
4085 case USB_ENDPOINT_XFER_CONTROL:
4086 timeout_ns = udev->u1_params.sel * 3;
4087 break;
4088 case USB_ENDPOINT_XFER_BULK:
4089 timeout_ns = udev->u1_params.sel * 5;
4090 break;
4091 case USB_ENDPOINT_XFER_INT:
4092 intr_type = usb_endpoint_interrupt_type(desc);
4093 if (intr_type == USB_ENDPOINT_INTR_NOTIFICATION) {
4094 timeout_ns = udev->u1_params.sel * 3;
4095 break;
4096 }
4097 /* Otherwise the calculation is the same as isoc eps */
4098 case USB_ENDPOINT_XFER_ISOC:
4099 timeout_ns = xhci_service_interval_to_ns(desc);
4100 timeout_ns = DIV_ROUND_UP_ULL(timeout_ns * 105, 100);
4101 if (timeout_ns < udev->u1_params.sel * 2)
4102 timeout_ns = udev->u1_params.sel * 2;
4103 break;
4104 default:
4105 return 0;
4106 }
4107
4108 /* The U1 timeout is encoded in 1us intervals. */
4109 timeout_ns = DIV_ROUND_UP_ULL(timeout_ns, 1000);
4110 /* Don't return a timeout of zero, because that's USB3_LPM_DISABLED. */
4111 if (timeout_ns == USB3_LPM_DISABLED)
4112 timeout_ns++;
4113
4114 /* If the necessary timeout value is bigger than what we can set in the
4115 * USB 3.0 hub, we have to disable hub-initiated U1.
4116 */
4117 if (timeout_ns <= USB3_LPM_U1_MAX_TIMEOUT)
4118 return timeout_ns;
4119 dev_dbg(&udev->dev, "Hub-initiated U1 disabled "
4120 "due to long timeout %llu ms\n", timeout_ns);
4121 return xhci_get_timeout_no_hub_lpm(udev, USB3_LPM_U1);
4122}
4123
4124/* Returns the hub-encoded U2 timeout value.
4125 * The U2 timeout should be the maximum of:
4126 * - 10 ms (to avoid the bandwidth impact on the scheduler)
4127 * - largest bInterval of any active periodic endpoint (to avoid going
4128 * into lower power link states between intervals).
4129 * - the U2 Exit Latency of the device
4130 */
4131static u16 xhci_calculate_intel_u2_timeout(struct usb_device *udev,
4132 struct usb_endpoint_descriptor *desc)
4133{
4134 unsigned long long timeout_ns;
4135 unsigned long long u2_del_ns;
4136
4137 timeout_ns = 10 * 1000 * 1000;
4138
4139 if ((usb_endpoint_xfer_int(desc) || usb_endpoint_xfer_isoc(desc)) &&
4140 (xhci_service_interval_to_ns(desc) > timeout_ns))
4141 timeout_ns = xhci_service_interval_to_ns(desc);
4142
4143 u2_del_ns = udev->bos->ss_cap->bU2DevExitLat * 1000;
4144 if (u2_del_ns > timeout_ns)
4145 timeout_ns = u2_del_ns;
4146
4147 /* The U2 timeout is encoded in 256us intervals */
4148 timeout_ns = DIV_ROUND_UP_ULL(timeout_ns, 256 * 1000);
4149 /* If the necessary timeout value is bigger than what we can set in the
4150 * USB 3.0 hub, we have to disable hub-initiated U2.
4151 */
4152 if (timeout_ns <= USB3_LPM_U2_MAX_TIMEOUT)
4153 return timeout_ns;
4154 dev_dbg(&udev->dev, "Hub-initiated U2 disabled "
4155 "due to long timeout %llu ms\n", timeout_ns);
4156 return xhci_get_timeout_no_hub_lpm(udev, USB3_LPM_U2);
4157}
4158
4159static u16 xhci_call_host_update_timeout_for_endpoint(struct xhci_hcd *xhci,
4160 struct usb_device *udev,
4161 struct usb_endpoint_descriptor *desc,
4162 enum usb3_link_state state,
4163 u16 *timeout)
4164{
4165 if (state == USB3_LPM_U1) {
4166 if (xhci->quirks & XHCI_INTEL_HOST)
4167 return xhci_calculate_intel_u1_timeout(udev, desc);
4168 } else {
4169 if (xhci->quirks & XHCI_INTEL_HOST)
4170 return xhci_calculate_intel_u2_timeout(udev, desc);
4171 }
4172
4173 return USB3_LPM_DISABLED;
4174}
4175
4176static int xhci_update_timeout_for_endpoint(struct xhci_hcd *xhci,
4177 struct usb_device *udev,
4178 struct usb_endpoint_descriptor *desc,
4179 enum usb3_link_state state,
4180 u16 *timeout)
4181{
4182 u16 alt_timeout;
4183
4184 alt_timeout = xhci_call_host_update_timeout_for_endpoint(xhci, udev,
4185 desc, state, timeout);
4186
4187 /* If we found we can't enable hub-initiated LPM, or
4188 * the U1 or U2 exit latency was too high to allow
4189 * device-initiated LPM as well, just stop searching.
4190 */
4191 if (alt_timeout == USB3_LPM_DISABLED ||
4192 alt_timeout == USB3_LPM_DEVICE_INITIATED) {
4193 *timeout = alt_timeout;
4194 return -E2BIG;
4195 }
4196 if (alt_timeout > *timeout)
4197 *timeout = alt_timeout;
4198 return 0;
4199}
4200
4201static int xhci_update_timeout_for_interface(struct xhci_hcd *xhci,
4202 struct usb_device *udev,
4203 struct usb_host_interface *alt,
4204 enum usb3_link_state state,
4205 u16 *timeout)
4206{
4207 int j;
4208
4209 for (j = 0; j < alt->desc.bNumEndpoints; j++) {
4210 if (xhci_update_timeout_for_endpoint(xhci, udev,
4211 &alt->endpoint[j].desc, state, timeout))
4212 return -E2BIG;
4213 continue;
4214 }
4215 return 0;
4216}
4217
4218static int xhci_check_intel_tier_policy(struct usb_device *udev,
4219 enum usb3_link_state state)
4220{
4221 struct usb_device *parent;
4222 unsigned int num_hubs;
4223
4224 if (state == USB3_LPM_U2)
4225 return 0;
4226
4227 /* Don't enable U1 if the device is on a 2nd tier hub or lower. */
4228 for (parent = udev->parent, num_hubs = 0; parent->parent;
4229 parent = parent->parent)
4230 num_hubs++;
4231
4232 if (num_hubs < 2)
4233 return 0;
4234
4235 dev_dbg(&udev->dev, "Disabling U1 link state for device"
4236 " below second-tier hub.\n");
4237 dev_dbg(&udev->dev, "Plug device into first-tier hub "
4238 "to decrease power consumption.\n");
4239 return -E2BIG;
4240}
4241
4242static int xhci_check_tier_policy(struct xhci_hcd *xhci,
4243 struct usb_device *udev,
4244 enum usb3_link_state state)
4245{
4246 if (xhci->quirks & XHCI_INTEL_HOST)
4247 return xhci_check_intel_tier_policy(udev, state);
4248 return -EINVAL;
4249}
4250
4251/* Returns the U1 or U2 timeout that should be enabled.
4252 * If the tier check or timeout setting functions return with a non-zero exit
4253 * code, that means the timeout value has been finalized and we shouldn't look
4254 * at any more endpoints.
4255 */
4256static u16 xhci_calculate_lpm_timeout(struct usb_hcd *hcd,
4257 struct usb_device *udev, enum usb3_link_state state)
4258{
4259 struct xhci_hcd *xhci = hcd_to_xhci(hcd);
4260 struct usb_host_config *config;
4261 char *state_name;
4262 int i;
4263 u16 timeout = USB3_LPM_DISABLED;
4264
4265 if (state == USB3_LPM_U1)
4266 state_name = "U1";
4267 else if (state == USB3_LPM_U2)
4268 state_name = "U2";
4269 else {
4270 dev_warn(&udev->dev, "Can't enable unknown link state %i\n",
4271 state);
4272 return timeout;
4273 }
4274
4275 if (xhci_check_tier_policy(xhci, udev, state) < 0)
4276 return timeout;
4277
4278 /* Gather some information about the currently installed configuration
4279 * and alternate interface settings.
4280 */
4281 if (xhci_update_timeout_for_endpoint(xhci, udev, &udev->ep0.desc,
4282 state, &timeout))
4283 return timeout;
4284
4285 config = udev->actconfig;
4286 if (!config)
4287 return timeout;
4288
4289 for (i = 0; i < USB_MAXINTERFACES; i++) {
4290 struct usb_driver *driver;
4291 struct usb_interface *intf = config->interface[i];
4292
4293 if (!intf)
4294 continue;
4295
4296 /* Check if any currently bound drivers want hub-initiated LPM
4297 * disabled.
4298 */
4299 if (intf->dev.driver) {
4300 driver = to_usb_driver(intf->dev.driver);
4301 if (driver && driver->disable_hub_initiated_lpm) {
4302 dev_dbg(&udev->dev, "Hub-initiated %s disabled "
4303 "at request of driver %s\n",
4304 state_name, driver->name);
4305 return xhci_get_timeout_no_hub_lpm(udev, state);
4306 }
4307 }
4308
4309 /* Not sure how this could happen... */
4310 if (!intf->cur_altsetting)
4311 continue;
4312
4313 if (xhci_update_timeout_for_interface(xhci, udev,
4314 intf->cur_altsetting,
4315 state, &timeout))
4316 return timeout;
4317 }
4318 return timeout;
4319}
4320
4321/*
4322 * Issue an Evaluate Context command to change the Maximum Exit Latency in the
4323 * slot context. If that succeeds, store the new MEL in the xhci_virt_device.
4324 */
4325static int xhci_change_max_exit_latency(struct xhci_hcd *xhci,
4326 struct usb_device *udev, u16 max_exit_latency)
4327{
4328 struct xhci_virt_device *virt_dev;
4329 struct xhci_command *command;
4330 struct xhci_input_control_ctx *ctrl_ctx;
4331 struct xhci_slot_ctx *slot_ctx;
4332 unsigned long flags;
4333 int ret;
4334
4335 spin_lock_irqsave(&xhci->lock, flags);
4336 if (max_exit_latency == xhci->devs[udev->slot_id]->current_mel) {
4337 spin_unlock_irqrestore(&xhci->lock, flags);
4338 return 0;
4339 }
4340
4341 /* Attempt to issue an Evaluate Context command to change the MEL. */
4342 virt_dev = xhci->devs[udev->slot_id];
4343 command = xhci->lpm_command;
4344 xhci_slot_copy(xhci, command->in_ctx, virt_dev->out_ctx);
4345 spin_unlock_irqrestore(&xhci->lock, flags);
4346
4347 ctrl_ctx = xhci_get_input_control_ctx(xhci, command->in_ctx);
4348 ctrl_ctx->add_flags |= cpu_to_le32(SLOT_FLAG);
4349 slot_ctx = xhci_get_slot_ctx(xhci, command->in_ctx);
4350 slot_ctx->dev_info2 &= cpu_to_le32(~((u32) MAX_EXIT));
4351 slot_ctx->dev_info2 |= cpu_to_le32(max_exit_latency);
4352
4353 xhci_dbg(xhci, "Set up evaluate context for LPM MEL change.\n");
4354 xhci_dbg(xhci, "Slot %u Input Context:\n", udev->slot_id);
4355 xhci_dbg_ctx(xhci, command->in_ctx, 0);
4356
4357 /* Issue and wait for the evaluate context command. */
4358 ret = xhci_configure_endpoint(xhci, udev, command,
4359 true, true);
4360 xhci_dbg(xhci, "Slot %u Output Context:\n", udev->slot_id);
4361 xhci_dbg_ctx(xhci, virt_dev->out_ctx, 0);
4362
4363 if (!ret) {
4364 spin_lock_irqsave(&xhci->lock, flags);
4365 virt_dev->current_mel = max_exit_latency;
4366 spin_unlock_irqrestore(&xhci->lock, flags);
4367 }
4368 return ret;
4369}
4370
4371static int calculate_max_exit_latency(struct usb_device *udev,
4372 enum usb3_link_state state_changed,
4373 u16 hub_encoded_timeout)
4374{
4375 unsigned long long u1_mel_us = 0;
4376 unsigned long long u2_mel_us = 0;
4377 unsigned long long mel_us = 0;
4378 bool disabling_u1;
4379 bool disabling_u2;
4380 bool enabling_u1;
4381 bool enabling_u2;
4382
4383 disabling_u1 = (state_changed == USB3_LPM_U1 &&
4384 hub_encoded_timeout == USB3_LPM_DISABLED);
4385 disabling_u2 = (state_changed == USB3_LPM_U2 &&
4386 hub_encoded_timeout == USB3_LPM_DISABLED);
4387
4388 enabling_u1 = (state_changed == USB3_LPM_U1 &&
4389 hub_encoded_timeout != USB3_LPM_DISABLED);
4390 enabling_u2 = (state_changed == USB3_LPM_U2 &&
4391 hub_encoded_timeout != USB3_LPM_DISABLED);
4392
4393 /* If U1 was already enabled and we're not disabling it,
4394 * or we're going to enable U1, account for the U1 max exit latency.
4395 */
4396 if ((udev->u1_params.timeout != USB3_LPM_DISABLED && !disabling_u1) ||
4397 enabling_u1)
4398 u1_mel_us = DIV_ROUND_UP(udev->u1_params.mel, 1000);
4399 if ((udev->u2_params.timeout != USB3_LPM_DISABLED && !disabling_u2) ||
4400 enabling_u2)
4401 u2_mel_us = DIV_ROUND_UP(udev->u2_params.mel, 1000);
4402
4403 if (u1_mel_us > u2_mel_us)
4404 mel_us = u1_mel_us;
4405 else
4406 mel_us = u2_mel_us;
4407 /* xHCI host controller max exit latency field is only 16 bits wide. */
4408 if (mel_us > MAX_EXIT) {
4409 dev_warn(&udev->dev, "Link PM max exit latency of %lluus "
4410 "is too big.\n", mel_us);
4411 return -E2BIG;
4412 }
4413 return mel_us;
4414}
4415
4416/* Returns the USB3 hub-encoded value for the U1/U2 timeout. */
4417int xhci_enable_usb3_lpm_timeout(struct usb_hcd *hcd,
4418 struct usb_device *udev, enum usb3_link_state state)
4419{
4420 struct xhci_hcd *xhci;
4421 u16 hub_encoded_timeout;
4422 int mel;
4423 int ret;
4424
4425 xhci = hcd_to_xhci(hcd);
4426 /* The LPM timeout values are pretty host-controller specific, so don't
4427 * enable hub-initiated timeouts unless the vendor has provided
4428 * information about their timeout algorithm.
4429 */
4430 if (!xhci || !(xhci->quirks & XHCI_LPM_SUPPORT) ||
4431 !xhci->devs[udev->slot_id])
4432 return USB3_LPM_DISABLED;
4433
4434 hub_encoded_timeout = xhci_calculate_lpm_timeout(hcd, udev, state);
4435 mel = calculate_max_exit_latency(udev, state, hub_encoded_timeout);
4436 if (mel < 0) {
4437 /* Max Exit Latency is too big, disable LPM. */
4438 hub_encoded_timeout = USB3_LPM_DISABLED;
4439 mel = 0;
4440 }
4441
4442 ret = xhci_change_max_exit_latency(xhci, udev, mel);
4443 if (ret)
4444 return ret;
4445 return hub_encoded_timeout;
4446}
4447
4448int xhci_disable_usb3_lpm_timeout(struct usb_hcd *hcd,
4449 struct usb_device *udev, enum usb3_link_state state)
4450{
4451 struct xhci_hcd *xhci;
4452 u16 mel;
4453 int ret;
4454
4455 xhci = hcd_to_xhci(hcd);
4456 if (!xhci || !(xhci->quirks & XHCI_LPM_SUPPORT) ||
4457 !xhci->devs[udev->slot_id])
4458 return 0;
4459
4460 mel = calculate_max_exit_latency(udev, state, USB3_LPM_DISABLED);
4461 ret = xhci_change_max_exit_latency(xhci, udev, mel);
4462 if (ret)
4463 return ret;
4464 return 0;
4465}
4466#else /* CONFIG_PM */
4467
4468int xhci_enable_usb3_lpm_timeout(struct usb_hcd *hcd,
4469 struct usb_device *udev, enum usb3_link_state state)
4470{
4471 return USB3_LPM_DISABLED;
4472}
4473
4474int xhci_disable_usb3_lpm_timeout(struct usb_hcd *hcd,
4475 struct usb_device *udev, enum usb3_link_state state)
4476{
4477 return 0;
4478}
4479#endif /* CONFIG_PM */
4480
4481/*-------------------------------------------------------------------------*/
4482
4483/* Once a hub descriptor is fetched for a device, we need to update the xHC's
4484 * internal data structures for the device.
4485 */
4486int xhci_update_hub_device(struct usb_hcd *hcd, struct usb_device *hdev,
4487 struct usb_tt *tt, gfp_t mem_flags)
4488{
4489 struct xhci_hcd *xhci = hcd_to_xhci(hcd);
4490 struct xhci_virt_device *vdev;
4491 struct xhci_command *config_cmd;
4492 struct xhci_input_control_ctx *ctrl_ctx;
4493 struct xhci_slot_ctx *slot_ctx;
4494 unsigned long flags;
4495 unsigned think_time;
4496 int ret;
4497
4498 /* Ignore root hubs */
4499 if (!hdev->parent)
4500 return 0;
4501
4502 vdev = xhci->devs[hdev->slot_id];
4503 if (!vdev) {
4504 xhci_warn(xhci, "Cannot update hub desc for unknown device.\n");
4505 return -EINVAL;
4506 }
4507 config_cmd = xhci_alloc_command(xhci, true, true, mem_flags);
4508 if (!config_cmd) {
4509 xhci_dbg(xhci, "Could not allocate xHCI command structure.\n");
4510 return -ENOMEM;
4511 }
4512
4513 spin_lock_irqsave(&xhci->lock, flags);
4514 if (hdev->speed == USB_SPEED_HIGH &&
4515 xhci_alloc_tt_info(xhci, vdev, hdev, tt, GFP_ATOMIC)) {
4516 xhci_dbg(xhci, "Could not allocate xHCI TT structure.\n");
4517 xhci_free_command(xhci, config_cmd);
4518 spin_unlock_irqrestore(&xhci->lock, flags);
4519 return -ENOMEM;
4520 }
4521
4522 xhci_slot_copy(xhci, config_cmd->in_ctx, vdev->out_ctx);
4523 ctrl_ctx = xhci_get_input_control_ctx(xhci, config_cmd->in_ctx);
4524 ctrl_ctx->add_flags |= cpu_to_le32(SLOT_FLAG);
4525 slot_ctx = xhci_get_slot_ctx(xhci, config_cmd->in_ctx);
4526 slot_ctx->dev_info |= cpu_to_le32(DEV_HUB);
4527 if (tt->multi)
4528 slot_ctx->dev_info |= cpu_to_le32(DEV_MTT);
4529 if (xhci->hci_version > 0x95) {
4530 xhci_dbg(xhci, "xHCI version %x needs hub "
4531 "TT think time and number of ports\n",
4532 (unsigned int) xhci->hci_version);
4533 slot_ctx->dev_info2 |= cpu_to_le32(XHCI_MAX_PORTS(hdev->maxchild));
4534 /* Set TT think time - convert from ns to FS bit times.
4535 * 0 = 8 FS bit times, 1 = 16 FS bit times,
4536 * 2 = 24 FS bit times, 3 = 32 FS bit times.
4537 *
4538 * xHCI 1.0: this field shall be 0 if the device is not a
4539 * High-spped hub.
4540 */
4541 think_time = tt->think_time;
4542 if (think_time != 0)
4543 think_time = (think_time / 666) - 1;
4544 if (xhci->hci_version < 0x100 || hdev->speed == USB_SPEED_HIGH)
4545 slot_ctx->tt_info |=
4546 cpu_to_le32(TT_THINK_TIME(think_time));
4547 } else {
4548 xhci_dbg(xhci, "xHCI version %x doesn't need hub "
4549 "TT think time or number of ports\n",
4550 (unsigned int) xhci->hci_version);
4551 }
4552 slot_ctx->dev_state = 0;
4553 spin_unlock_irqrestore(&xhci->lock, flags);
4554
4555 xhci_dbg(xhci, "Set up %s for hub device.\n",
4556 (xhci->hci_version > 0x95) ?
4557 "configure endpoint" : "evaluate context");
4558 xhci_dbg(xhci, "Slot %u Input Context:\n", hdev->slot_id);
4559 xhci_dbg_ctx(xhci, config_cmd->in_ctx, 0);
4560
4561 /* Issue and wait for the configure endpoint or
4562 * evaluate context command.
4563 */
4564 if (xhci->hci_version > 0x95)
4565 ret = xhci_configure_endpoint(xhci, hdev, config_cmd,
4566 false, false);
4567 else
4568 ret = xhci_configure_endpoint(xhci, hdev, config_cmd,
4569 true, false);
4570
4571 xhci_dbg(xhci, "Slot %u Output Context:\n", hdev->slot_id);
4572 xhci_dbg_ctx(xhci, vdev->out_ctx, 0);
4573
4574 xhci_free_command(xhci, config_cmd);
4575 return ret;
4576}
4577
4578int xhci_get_frame(struct usb_hcd *hcd)
4579{
4580 struct xhci_hcd *xhci = hcd_to_xhci(hcd);
4581 /* EHCI mods by the periodic size. Why? */
4582 return xhci_readl(xhci, &xhci->run_regs->microframe_index) >> 3;
4583}
4584
4585int xhci_gen_setup(struct usb_hcd *hcd, xhci_get_quirks_t get_quirks)
4586{
4587 struct xhci_hcd *xhci;
4588 struct device *dev = hcd->self.controller;
4589 int retval;
4590 u32 temp;
4591
4592 /* Accept arbitrarily long scatter-gather lists */
4593 hcd->self.sg_tablesize = ~0;
4594
4595 if (usb_hcd_is_primary_hcd(hcd)) {
4596 xhci = kzalloc(sizeof(struct xhci_hcd), GFP_KERNEL);
4597 if (!xhci)
4598 return -ENOMEM;
4599 *((struct xhci_hcd **) hcd->hcd_priv) = xhci;
4600 xhci->main_hcd = hcd;
4601 /* Mark the first roothub as being USB 2.0.
4602 * The xHCI driver will register the USB 3.0 roothub.
4603 */
4604 hcd->speed = HCD_USB2;
4605 hcd->self.root_hub->speed = USB_SPEED_HIGH;
4606 /*
4607 * USB 2.0 roothub under xHCI has an integrated TT,
4608 * (rate matching hub) as opposed to having an OHCI/UHCI
4609 * companion controller.
4610 */
4611 hcd->has_tt = 1;
4612 } else {
4613 /* xHCI private pointer was set in xhci_pci_probe for the second
4614 * registered roothub.
4615 */
4616 xhci = hcd_to_xhci(hcd);
4617 temp = xhci_readl(xhci, &xhci->cap_regs->hcc_params);
4618 if (HCC_64BIT_ADDR(temp)) {
4619 xhci_dbg(xhci, "Enabling 64-bit DMA addresses.\n");
4620 dma_set_mask(hcd->self.controller, DMA_BIT_MASK(64));
4621 } else {
4622 dma_set_mask(hcd->self.controller, DMA_BIT_MASK(32));
4623 }
4624 return 0;
4625 }
4626
4627 xhci->cap_regs = hcd->regs;
4628 xhci->op_regs = hcd->regs +
4629 HC_LENGTH(xhci_readl(xhci, &xhci->cap_regs->hc_capbase));
4630 xhci->run_regs = hcd->regs +
4631 (xhci_readl(xhci, &xhci->cap_regs->run_regs_off) & RTSOFF_MASK);
4632 /* Cache read-only capability registers */
4633 xhci->hcs_params1 = xhci_readl(xhci, &xhci->cap_regs->hcs_params1);
4634 xhci->hcs_params2 = xhci_readl(xhci, &xhci->cap_regs->hcs_params2);
4635 xhci->hcs_params3 = xhci_readl(xhci, &xhci->cap_regs->hcs_params3);
4636 xhci->hcc_params = xhci_readl(xhci, &xhci->cap_regs->hc_capbase);
4637 xhci->hci_version = HC_VERSION(xhci->hcc_params);
4638 xhci->hcc_params = xhci_readl(xhci, &xhci->cap_regs->hcc_params);
4639 xhci_print_registers(xhci);
4640
4641 get_quirks(dev, xhci);
4642
4643 /* Make sure the HC is halted. */
4644 retval = xhci_halt(xhci);
4645 if (retval)
4646 goto error;
4647
4648 xhci_dbg(xhci, "Resetting HCD\n");
4649 /* Reset the internal HC memory state and registers. */
4650 retval = xhci_reset(xhci);
4651 if (retval)
4652 goto error;
4653 xhci_dbg(xhci, "Reset complete\n");
4654
4655 temp = xhci_readl(xhci, &xhci->cap_regs->hcc_params);
4656 if (HCC_64BIT_ADDR(temp)) {
4657 xhci_dbg(xhci, "Enabling 64-bit DMA addresses.\n");
4658 dma_set_mask(hcd->self.controller, DMA_BIT_MASK(64));
4659 } else {
4660 dma_set_mask(hcd->self.controller, DMA_BIT_MASK(32));
4661 }
4662
4663 xhci_dbg(xhci, "Calling HCD init\n");
4664 /* Initialize HCD and host controller data structures. */
4665 retval = xhci_init(hcd);
4666 if (retval)
4667 goto error;
4668 xhci_dbg(xhci, "Called HCD init\n");
4669 return 0;
4670error:
4671 kfree(xhci);
4672 return retval;
4673}
4674
4675MODULE_DESCRIPTION(DRIVER_DESC);
4676MODULE_AUTHOR(DRIVER_AUTHOR);
4677MODULE_LICENSE("GPL");
4678
4679static int __init xhci_hcd_init(void)
4680{
4681 int retval;
4682
4683 retval = xhci_register_pci();
4684 if (retval < 0) {
4685 printk(KERN_DEBUG "Problem registering PCI driver.");
4686 return retval;
4687 }
4688 retval = xhci_register_plat();
4689 if (retval < 0) {
4690 printk(KERN_DEBUG "Problem registering platform driver.");
4691 goto unreg_pci;
4692 }
4693 /*
4694 * Check the compiler generated sizes of structures that must be laid
4695 * out in specific ways for hardware access.
4696 */
4697 BUILD_BUG_ON(sizeof(struct xhci_doorbell_array) != 256*32/8);
4698 BUILD_BUG_ON(sizeof(struct xhci_slot_ctx) != 8*32/8);
4699 BUILD_BUG_ON(sizeof(struct xhci_ep_ctx) != 8*32/8);
4700 /* xhci_device_control has eight fields, and also
4701 * embeds one xhci_slot_ctx and 31 xhci_ep_ctx
4702 */
4703 BUILD_BUG_ON(sizeof(struct xhci_stream_ctx) != 4*32/8);
4704 BUILD_BUG_ON(sizeof(union xhci_trb) != 4*32/8);
4705 BUILD_BUG_ON(sizeof(struct xhci_erst_entry) != 4*32/8);
4706 BUILD_BUG_ON(sizeof(struct xhci_cap_regs) != 7*32/8);
4707 BUILD_BUG_ON(sizeof(struct xhci_intr_reg) != 8*32/8);
4708 /* xhci_run_regs has eight fields and embeds 128 xhci_intr_regs */
4709 BUILD_BUG_ON(sizeof(struct xhci_run_regs) != (8+8*128)*32/8);
4710 return 0;
4711unreg_pci:
4712 xhci_unregister_pci();
4713 return retval;
4714}
4715module_init(xhci_hcd_init);
4716
4717static void __exit xhci_hcd_cleanup(void)
4718{
4719 xhci_unregister_pci();
4720 xhci_unregister_plat();
4721}
4722module_exit(xhci_hcd_cleanup);
1/*
2 * xHCI host controller driver
3 *
4 * Copyright (C) 2008 Intel Corp.
5 *
6 * Author: Sarah Sharp
7 * Some code borrowed from the Linux EHCI driver.
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License version 2 as
11 * published by the Free Software Foundation.
12 *
13 * This program is distributed in the hope that it will be useful, but
14 * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
15 * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
16 * for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software Foundation,
20 * Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
21 */
22
23#include <linux/pci.h>
24#include <linux/irq.h>
25#include <linux/log2.h>
26#include <linux/module.h>
27#include <linux/moduleparam.h>
28#include <linux/slab.h>
29#include <linux/dmi.h>
30#include <linux/dma-mapping.h>
31
32#include "xhci.h"
33#include "xhci-trace.h"
34
35#define DRIVER_AUTHOR "Sarah Sharp"
36#define DRIVER_DESC "'eXtensible' Host Controller (xHC) Driver"
37
38/* Some 0.95 hardware can't handle the chain bit on a Link TRB being cleared */
39static int link_quirk;
40module_param(link_quirk, int, S_IRUGO | S_IWUSR);
41MODULE_PARM_DESC(link_quirk, "Don't clear the chain bit on a link TRB");
42
43static unsigned int quirks;
44module_param(quirks, uint, S_IRUGO);
45MODULE_PARM_DESC(quirks, "Bit flags for quirks to be enabled as default");
46
47/* TODO: copied from ehci-hcd.c - can this be refactored? */
48/*
49 * xhci_handshake - spin reading hc until handshake completes or fails
50 * @ptr: address of hc register to be read
51 * @mask: bits to look at in result of read
52 * @done: value of those bits when handshake succeeds
53 * @usec: timeout in microseconds
54 *
55 * Returns negative errno, or zero on success
56 *
57 * Success happens when the "mask" bits have the specified value (hardware
58 * handshake done). There are two failure modes: "usec" have passed (major
59 * hardware flakeout), or the register reads as all-ones (hardware removed).
60 */
61int xhci_handshake(struct xhci_hcd *xhci, void __iomem *ptr,
62 u32 mask, u32 done, int usec)
63{
64 u32 result;
65
66 do {
67 result = readl(ptr);
68 if (result == ~(u32)0) /* card removed */
69 return -ENODEV;
70 result &= mask;
71 if (result == done)
72 return 0;
73 udelay(1);
74 usec--;
75 } while (usec > 0);
76 return -ETIMEDOUT;
77}
78
79/*
80 * Disable interrupts and begin the xHCI halting process.
81 */
82void xhci_quiesce(struct xhci_hcd *xhci)
83{
84 u32 halted;
85 u32 cmd;
86 u32 mask;
87
88 mask = ~(XHCI_IRQS);
89 halted = readl(&xhci->op_regs->status) & STS_HALT;
90 if (!halted)
91 mask &= ~CMD_RUN;
92
93 cmd = readl(&xhci->op_regs->command);
94 cmd &= mask;
95 writel(cmd, &xhci->op_regs->command);
96}
97
98/*
99 * Force HC into halt state.
100 *
101 * Disable any IRQs and clear the run/stop bit.
102 * HC will complete any current and actively pipelined transactions, and
103 * should halt within 16 ms of the run/stop bit being cleared.
104 * Read HC Halted bit in the status register to see when the HC is finished.
105 */
106int xhci_halt(struct xhci_hcd *xhci)
107{
108 int ret;
109 xhci_dbg_trace(xhci, trace_xhci_dbg_init, "// Halt the HC");
110 xhci_quiesce(xhci);
111
112 ret = xhci_handshake(xhci, &xhci->op_regs->status,
113 STS_HALT, STS_HALT, XHCI_MAX_HALT_USEC);
114 if (!ret) {
115 xhci->xhc_state |= XHCI_STATE_HALTED;
116 xhci->cmd_ring_state = CMD_RING_STATE_STOPPED;
117 } else
118 xhci_warn(xhci, "Host not halted after %u microseconds.\n",
119 XHCI_MAX_HALT_USEC);
120 return ret;
121}
122
123/*
124 * Set the run bit and wait for the host to be running.
125 */
126static int xhci_start(struct xhci_hcd *xhci)
127{
128 u32 temp;
129 int ret;
130
131 temp = readl(&xhci->op_regs->command);
132 temp |= (CMD_RUN);
133 xhci_dbg_trace(xhci, trace_xhci_dbg_init, "// Turn on HC, cmd = 0x%x.",
134 temp);
135 writel(temp, &xhci->op_regs->command);
136
137 /*
138 * Wait for the HCHalted Status bit to be 0 to indicate the host is
139 * running.
140 */
141 ret = xhci_handshake(xhci, &xhci->op_regs->status,
142 STS_HALT, 0, XHCI_MAX_HALT_USEC);
143 if (ret == -ETIMEDOUT)
144 xhci_err(xhci, "Host took too long to start, "
145 "waited %u microseconds.\n",
146 XHCI_MAX_HALT_USEC);
147 if (!ret)
148 xhci->xhc_state &= ~XHCI_STATE_HALTED;
149 return ret;
150}
151
152/*
153 * Reset a halted HC.
154 *
155 * This resets pipelines, timers, counters, state machines, etc.
156 * Transactions will be terminated immediately, and operational registers
157 * will be set to their defaults.
158 */
159int xhci_reset(struct xhci_hcd *xhci)
160{
161 u32 command;
162 u32 state;
163 int ret, i;
164
165 state = readl(&xhci->op_regs->status);
166 if ((state & STS_HALT) == 0) {
167 xhci_warn(xhci, "Host controller not halted, aborting reset.\n");
168 return 0;
169 }
170
171 xhci_dbg_trace(xhci, trace_xhci_dbg_init, "// Reset the HC");
172 command = readl(&xhci->op_regs->command);
173 command |= CMD_RESET;
174 writel(command, &xhci->op_regs->command);
175
176 ret = xhci_handshake(xhci, &xhci->op_regs->command,
177 CMD_RESET, 0, 10 * 1000 * 1000);
178 if (ret)
179 return ret;
180
181 xhci_dbg_trace(xhci, trace_xhci_dbg_init,
182 "Wait for controller to be ready for doorbell rings");
183 /*
184 * xHCI cannot write to any doorbells or operational registers other
185 * than status until the "Controller Not Ready" flag is cleared.
186 */
187 ret = xhci_handshake(xhci, &xhci->op_regs->status,
188 STS_CNR, 0, 10 * 1000 * 1000);
189
190 for (i = 0; i < 2; ++i) {
191 xhci->bus_state[i].port_c_suspend = 0;
192 xhci->bus_state[i].suspended_ports = 0;
193 xhci->bus_state[i].resuming_ports = 0;
194 }
195
196 return ret;
197}
198
199#ifdef CONFIG_PCI
200static int xhci_free_msi(struct xhci_hcd *xhci)
201{
202 int i;
203
204 if (!xhci->msix_entries)
205 return -EINVAL;
206
207 for (i = 0; i < xhci->msix_count; i++)
208 if (xhci->msix_entries[i].vector)
209 free_irq(xhci->msix_entries[i].vector,
210 xhci_to_hcd(xhci));
211 return 0;
212}
213
214/*
215 * Set up MSI
216 */
217static int xhci_setup_msi(struct xhci_hcd *xhci)
218{
219 int ret;
220 struct pci_dev *pdev = to_pci_dev(xhci_to_hcd(xhci)->self.controller);
221
222 ret = pci_enable_msi(pdev);
223 if (ret) {
224 xhci_dbg_trace(xhci, trace_xhci_dbg_init,
225 "failed to allocate MSI entry");
226 return ret;
227 }
228
229 ret = request_irq(pdev->irq, xhci_msi_irq,
230 0, "xhci_hcd", xhci_to_hcd(xhci));
231 if (ret) {
232 xhci_dbg_trace(xhci, trace_xhci_dbg_init,
233 "disable MSI interrupt");
234 pci_disable_msi(pdev);
235 }
236
237 return ret;
238}
239
240/*
241 * Free IRQs
242 * free all IRQs request
243 */
244static void xhci_free_irq(struct xhci_hcd *xhci)
245{
246 struct pci_dev *pdev = to_pci_dev(xhci_to_hcd(xhci)->self.controller);
247 int ret;
248
249 /* return if using legacy interrupt */
250 if (xhci_to_hcd(xhci)->irq > 0)
251 return;
252
253 ret = xhci_free_msi(xhci);
254 if (!ret)
255 return;
256 if (pdev->irq > 0)
257 free_irq(pdev->irq, xhci_to_hcd(xhci));
258
259 return;
260}
261
262/*
263 * Set up MSI-X
264 */
265static int xhci_setup_msix(struct xhci_hcd *xhci)
266{
267 int i, ret = 0;
268 struct usb_hcd *hcd = xhci_to_hcd(xhci);
269 struct pci_dev *pdev = to_pci_dev(hcd->self.controller);
270
271 /*
272 * calculate number of msi-x vectors supported.
273 * - HCS_MAX_INTRS: the max number of interrupts the host can handle,
274 * with max number of interrupters based on the xhci HCSPARAMS1.
275 * - num_online_cpus: maximum msi-x vectors per CPUs core.
276 * Add additional 1 vector to ensure always available interrupt.
277 */
278 xhci->msix_count = min(num_online_cpus() + 1,
279 HCS_MAX_INTRS(xhci->hcs_params1));
280
281 xhci->msix_entries =
282 kmalloc((sizeof(struct msix_entry))*xhci->msix_count,
283 GFP_KERNEL);
284 if (!xhci->msix_entries) {
285 xhci_err(xhci, "Failed to allocate MSI-X entries\n");
286 return -ENOMEM;
287 }
288
289 for (i = 0; i < xhci->msix_count; i++) {
290 xhci->msix_entries[i].entry = i;
291 xhci->msix_entries[i].vector = 0;
292 }
293
294 ret = pci_enable_msix(pdev, xhci->msix_entries, xhci->msix_count);
295 if (ret) {
296 xhci_dbg_trace(xhci, trace_xhci_dbg_init,
297 "Failed to enable MSI-X");
298 goto free_entries;
299 }
300
301 for (i = 0; i < xhci->msix_count; i++) {
302 ret = request_irq(xhci->msix_entries[i].vector,
303 xhci_msi_irq,
304 0, "xhci_hcd", xhci_to_hcd(xhci));
305 if (ret)
306 goto disable_msix;
307 }
308
309 hcd->msix_enabled = 1;
310 return ret;
311
312disable_msix:
313 xhci_dbg_trace(xhci, trace_xhci_dbg_init, "disable MSI-X interrupt");
314 xhci_free_irq(xhci);
315 pci_disable_msix(pdev);
316free_entries:
317 kfree(xhci->msix_entries);
318 xhci->msix_entries = NULL;
319 return ret;
320}
321
322/* Free any IRQs and disable MSI-X */
323static void xhci_cleanup_msix(struct xhci_hcd *xhci)
324{
325 struct usb_hcd *hcd = xhci_to_hcd(xhci);
326 struct pci_dev *pdev = to_pci_dev(hcd->self.controller);
327
328 if (xhci->quirks & XHCI_PLAT)
329 return;
330
331 xhci_free_irq(xhci);
332
333 if (xhci->msix_entries) {
334 pci_disable_msix(pdev);
335 kfree(xhci->msix_entries);
336 xhci->msix_entries = NULL;
337 } else {
338 pci_disable_msi(pdev);
339 }
340
341 hcd->msix_enabled = 0;
342 return;
343}
344
345static void __maybe_unused xhci_msix_sync_irqs(struct xhci_hcd *xhci)
346{
347 int i;
348
349 if (xhci->msix_entries) {
350 for (i = 0; i < xhci->msix_count; i++)
351 synchronize_irq(xhci->msix_entries[i].vector);
352 }
353}
354
355static int xhci_try_enable_msi(struct usb_hcd *hcd)
356{
357 struct xhci_hcd *xhci = hcd_to_xhci(hcd);
358 struct pci_dev *pdev;
359 int ret;
360
361 /* The xhci platform device has set up IRQs through usb_add_hcd. */
362 if (xhci->quirks & XHCI_PLAT)
363 return 0;
364
365 pdev = to_pci_dev(xhci_to_hcd(xhci)->self.controller);
366 /*
367 * Some Fresco Logic host controllers advertise MSI, but fail to
368 * generate interrupts. Don't even try to enable MSI.
369 */
370 if (xhci->quirks & XHCI_BROKEN_MSI)
371 goto legacy_irq;
372
373 /* unregister the legacy interrupt */
374 if (hcd->irq)
375 free_irq(hcd->irq, hcd);
376 hcd->irq = 0;
377
378 ret = xhci_setup_msix(xhci);
379 if (ret)
380 /* fall back to msi*/
381 ret = xhci_setup_msi(xhci);
382
383 if (!ret)
384 /* hcd->irq is 0, we have MSI */
385 return 0;
386
387 if (!pdev->irq) {
388 xhci_err(xhci, "No msi-x/msi found and no IRQ in BIOS\n");
389 return -EINVAL;
390 }
391
392 legacy_irq:
393 if (!strlen(hcd->irq_descr))
394 snprintf(hcd->irq_descr, sizeof(hcd->irq_descr), "%s:usb%d",
395 hcd->driver->description, hcd->self.busnum);
396
397 /* fall back to legacy interrupt*/
398 ret = request_irq(pdev->irq, &usb_hcd_irq, IRQF_SHARED,
399 hcd->irq_descr, hcd);
400 if (ret) {
401 xhci_err(xhci, "request interrupt %d failed\n",
402 pdev->irq);
403 return ret;
404 }
405 hcd->irq = pdev->irq;
406 return 0;
407}
408
409#else
410
411static inline int xhci_try_enable_msi(struct usb_hcd *hcd)
412{
413 return 0;
414}
415
416static inline void xhci_cleanup_msix(struct xhci_hcd *xhci)
417{
418}
419
420static inline void xhci_msix_sync_irqs(struct xhci_hcd *xhci)
421{
422}
423
424#endif
425
426static void compliance_mode_recovery(unsigned long arg)
427{
428 struct xhci_hcd *xhci;
429 struct usb_hcd *hcd;
430 u32 temp;
431 int i;
432
433 xhci = (struct xhci_hcd *)arg;
434
435 for (i = 0; i < xhci->num_usb3_ports; i++) {
436 temp = readl(xhci->usb3_ports[i]);
437 if ((temp & PORT_PLS_MASK) == USB_SS_PORT_LS_COMP_MOD) {
438 /*
439 * Compliance Mode Detected. Letting USB Core
440 * handle the Warm Reset
441 */
442 xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
443 "Compliance mode detected->port %d",
444 i + 1);
445 xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
446 "Attempting compliance mode recovery");
447 hcd = xhci->shared_hcd;
448
449 if (hcd->state == HC_STATE_SUSPENDED)
450 usb_hcd_resume_root_hub(hcd);
451
452 usb_hcd_poll_rh_status(hcd);
453 }
454 }
455
456 if (xhci->port_status_u0 != ((1 << xhci->num_usb3_ports)-1))
457 mod_timer(&xhci->comp_mode_recovery_timer,
458 jiffies + msecs_to_jiffies(COMP_MODE_RCVRY_MSECS));
459}
460
461/*
462 * Quirk to work around issue generated by the SN65LVPE502CP USB3.0 re-driver
463 * that causes ports behind that hardware to enter compliance mode sometimes.
464 * The quirk creates a timer that polls every 2 seconds the link state of
465 * each host controller's port and recovers it by issuing a Warm reset
466 * if Compliance mode is detected, otherwise the port will become "dead" (no
467 * device connections or disconnections will be detected anymore). Becasue no
468 * status event is generated when entering compliance mode (per xhci spec),
469 * this quirk is needed on systems that have the failing hardware installed.
470 */
471static void compliance_mode_recovery_timer_init(struct xhci_hcd *xhci)
472{
473 xhci->port_status_u0 = 0;
474 init_timer(&xhci->comp_mode_recovery_timer);
475
476 xhci->comp_mode_recovery_timer.data = (unsigned long) xhci;
477 xhci->comp_mode_recovery_timer.function = compliance_mode_recovery;
478 xhci->comp_mode_recovery_timer.expires = jiffies +
479 msecs_to_jiffies(COMP_MODE_RCVRY_MSECS);
480
481 set_timer_slack(&xhci->comp_mode_recovery_timer,
482 msecs_to_jiffies(COMP_MODE_RCVRY_MSECS));
483 add_timer(&xhci->comp_mode_recovery_timer);
484 xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
485 "Compliance mode recovery timer initialized");
486}
487
488/*
489 * This function identifies the systems that have installed the SN65LVPE502CP
490 * USB3.0 re-driver and that need the Compliance Mode Quirk.
491 * Systems:
492 * Vendor: Hewlett-Packard -> System Models: Z420, Z620 and Z820
493 */
494bool xhci_compliance_mode_recovery_timer_quirk_check(void)
495{
496 const char *dmi_product_name, *dmi_sys_vendor;
497
498 dmi_product_name = dmi_get_system_info(DMI_PRODUCT_NAME);
499 dmi_sys_vendor = dmi_get_system_info(DMI_SYS_VENDOR);
500 if (!dmi_product_name || !dmi_sys_vendor)
501 return false;
502
503 if (!(strstr(dmi_sys_vendor, "Hewlett-Packard")))
504 return false;
505
506 if (strstr(dmi_product_name, "Z420") ||
507 strstr(dmi_product_name, "Z620") ||
508 strstr(dmi_product_name, "Z820") ||
509 strstr(dmi_product_name, "Z1 Workstation"))
510 return true;
511
512 return false;
513}
514
515static int xhci_all_ports_seen_u0(struct xhci_hcd *xhci)
516{
517 return (xhci->port_status_u0 == ((1 << xhci->num_usb3_ports)-1));
518}
519
520
521/*
522 * Initialize memory for HCD and xHC (one-time init).
523 *
524 * Program the PAGESIZE register, initialize the device context array, create
525 * device contexts (?), set up a command ring segment (or two?), create event
526 * ring (one for now).
527 */
528int xhci_init(struct usb_hcd *hcd)
529{
530 struct xhci_hcd *xhci = hcd_to_xhci(hcd);
531 int retval = 0;
532
533 xhci_dbg_trace(xhci, trace_xhci_dbg_init, "xhci_init");
534 spin_lock_init(&xhci->lock);
535 if (xhci->hci_version == 0x95 && link_quirk) {
536 xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
537 "QUIRK: Not clearing Link TRB chain bits.");
538 xhci->quirks |= XHCI_LINK_TRB_QUIRK;
539 } else {
540 xhci_dbg_trace(xhci, trace_xhci_dbg_init,
541 "xHCI doesn't need link TRB QUIRK");
542 }
543 retval = xhci_mem_init(xhci, GFP_KERNEL);
544 xhci_dbg_trace(xhci, trace_xhci_dbg_init, "Finished xhci_init");
545
546 /* Initializing Compliance Mode Recovery Data If Needed */
547 if (xhci_compliance_mode_recovery_timer_quirk_check()) {
548 xhci->quirks |= XHCI_COMP_MODE_QUIRK;
549 compliance_mode_recovery_timer_init(xhci);
550 }
551
552 return retval;
553}
554
555/*-------------------------------------------------------------------------*/
556
557
558static int xhci_run_finished(struct xhci_hcd *xhci)
559{
560 if (xhci_start(xhci)) {
561 xhci_halt(xhci);
562 return -ENODEV;
563 }
564 xhci->shared_hcd->state = HC_STATE_RUNNING;
565 xhci->cmd_ring_state = CMD_RING_STATE_RUNNING;
566
567 if (xhci->quirks & XHCI_NEC_HOST)
568 xhci_ring_cmd_db(xhci);
569
570 xhci_dbg_trace(xhci, trace_xhci_dbg_init,
571 "Finished xhci_run for USB3 roothub");
572 return 0;
573}
574
575/*
576 * Start the HC after it was halted.
577 *
578 * This function is called by the USB core when the HC driver is added.
579 * Its opposite is xhci_stop().
580 *
581 * xhci_init() must be called once before this function can be called.
582 * Reset the HC, enable device slot contexts, program DCBAAP, and
583 * set command ring pointer and event ring pointer.
584 *
585 * Setup MSI-X vectors and enable interrupts.
586 */
587int xhci_run(struct usb_hcd *hcd)
588{
589 u32 temp;
590 u64 temp_64;
591 int ret;
592 struct xhci_hcd *xhci = hcd_to_xhci(hcd);
593
594 /* Start the xHCI host controller running only after the USB 2.0 roothub
595 * is setup.
596 */
597
598 hcd->uses_new_polling = 1;
599 if (!usb_hcd_is_primary_hcd(hcd))
600 return xhci_run_finished(xhci);
601
602 xhci_dbg_trace(xhci, trace_xhci_dbg_init, "xhci_run");
603
604 ret = xhci_try_enable_msi(hcd);
605 if (ret)
606 return ret;
607
608 xhci_dbg(xhci, "Command ring memory map follows:\n");
609 xhci_debug_ring(xhci, xhci->cmd_ring);
610 xhci_dbg_ring_ptrs(xhci, xhci->cmd_ring);
611 xhci_dbg_cmd_ptrs(xhci);
612
613 xhci_dbg(xhci, "ERST memory map follows:\n");
614 xhci_dbg_erst(xhci, &xhci->erst);
615 xhci_dbg(xhci, "Event ring:\n");
616 xhci_debug_ring(xhci, xhci->event_ring);
617 xhci_dbg_ring_ptrs(xhci, xhci->event_ring);
618 temp_64 = xhci_read_64(xhci, &xhci->ir_set->erst_dequeue);
619 temp_64 &= ~ERST_PTR_MASK;
620 xhci_dbg_trace(xhci, trace_xhci_dbg_init,
621 "ERST deq = 64'h%0lx", (long unsigned int) temp_64);
622
623 xhci_dbg_trace(xhci, trace_xhci_dbg_init,
624 "// Set the interrupt modulation register");
625 temp = readl(&xhci->ir_set->irq_control);
626 temp &= ~ER_IRQ_INTERVAL_MASK;
627 temp |= (u32) 160;
628 writel(temp, &xhci->ir_set->irq_control);
629
630 /* Set the HCD state before we enable the irqs */
631 temp = readl(&xhci->op_regs->command);
632 temp |= (CMD_EIE);
633 xhci_dbg_trace(xhci, trace_xhci_dbg_init,
634 "// Enable interrupts, cmd = 0x%x.", temp);
635 writel(temp, &xhci->op_regs->command);
636
637 temp = readl(&xhci->ir_set->irq_pending);
638 xhci_dbg_trace(xhci, trace_xhci_dbg_init,
639 "// Enabling event ring interrupter %p by writing 0x%x to irq_pending",
640 xhci->ir_set, (unsigned int) ER_IRQ_ENABLE(temp));
641 writel(ER_IRQ_ENABLE(temp), &xhci->ir_set->irq_pending);
642 xhci_print_ir_set(xhci, 0);
643
644 if (xhci->quirks & XHCI_NEC_HOST)
645 xhci_queue_vendor_command(xhci, 0, 0, 0,
646 TRB_TYPE(TRB_NEC_GET_FW));
647
648 xhci_dbg_trace(xhci, trace_xhci_dbg_init,
649 "Finished xhci_run for USB2 roothub");
650 return 0;
651}
652
653static void xhci_only_stop_hcd(struct usb_hcd *hcd)
654{
655 struct xhci_hcd *xhci = hcd_to_xhci(hcd);
656
657 spin_lock_irq(&xhci->lock);
658 xhci_halt(xhci);
659
660 /* The shared_hcd is going to be deallocated shortly (the USB core only
661 * calls this function when allocation fails in usb_add_hcd(), or
662 * usb_remove_hcd() is called). So we need to unset xHCI's pointer.
663 */
664 xhci->shared_hcd = NULL;
665 spin_unlock_irq(&xhci->lock);
666}
667
668/*
669 * Stop xHCI driver.
670 *
671 * This function is called by the USB core when the HC driver is removed.
672 * Its opposite is xhci_run().
673 *
674 * Disable device contexts, disable IRQs, and quiesce the HC.
675 * Reset the HC, finish any completed transactions, and cleanup memory.
676 */
677void xhci_stop(struct usb_hcd *hcd)
678{
679 u32 temp;
680 struct xhci_hcd *xhci = hcd_to_xhci(hcd);
681
682 if (!usb_hcd_is_primary_hcd(hcd)) {
683 xhci_only_stop_hcd(xhci->shared_hcd);
684 return;
685 }
686
687 spin_lock_irq(&xhci->lock);
688 /* Make sure the xHC is halted for a USB3 roothub
689 * (xhci_stop() could be called as part of failed init).
690 */
691 xhci_halt(xhci);
692 xhci_reset(xhci);
693 spin_unlock_irq(&xhci->lock);
694
695 xhci_cleanup_msix(xhci);
696
697 /* Deleting Compliance Mode Recovery Timer */
698 if ((xhci->quirks & XHCI_COMP_MODE_QUIRK) &&
699 (!(xhci_all_ports_seen_u0(xhci)))) {
700 del_timer_sync(&xhci->comp_mode_recovery_timer);
701 xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
702 "%s: compliance mode recovery timer deleted",
703 __func__);
704 }
705
706 if (xhci->quirks & XHCI_AMD_PLL_FIX)
707 usb_amd_dev_put();
708
709 xhci_dbg_trace(xhci, trace_xhci_dbg_init,
710 "// Disabling event ring interrupts");
711 temp = readl(&xhci->op_regs->status);
712 writel(temp & ~STS_EINT, &xhci->op_regs->status);
713 temp = readl(&xhci->ir_set->irq_pending);
714 writel(ER_IRQ_DISABLE(temp), &xhci->ir_set->irq_pending);
715 xhci_print_ir_set(xhci, 0);
716
717 xhci_dbg_trace(xhci, trace_xhci_dbg_init, "cleaning up memory");
718 xhci_mem_cleanup(xhci);
719 xhci_dbg_trace(xhci, trace_xhci_dbg_init,
720 "xhci_stop completed - status = %x",
721 readl(&xhci->op_regs->status));
722}
723
724/*
725 * Shutdown HC (not bus-specific)
726 *
727 * This is called when the machine is rebooting or halting. We assume that the
728 * machine will be powered off, and the HC's internal state will be reset.
729 * Don't bother to free memory.
730 *
731 * This will only ever be called with the main usb_hcd (the USB3 roothub).
732 */
733void xhci_shutdown(struct usb_hcd *hcd)
734{
735 struct xhci_hcd *xhci = hcd_to_xhci(hcd);
736
737 if (xhci->quirks & XHCI_SPURIOUS_REBOOT)
738 usb_disable_xhci_ports(to_pci_dev(hcd->self.controller));
739
740 spin_lock_irq(&xhci->lock);
741 xhci_halt(xhci);
742 /* Workaround for spurious wakeups at shutdown with HSW */
743 if (xhci->quirks & XHCI_SPURIOUS_WAKEUP)
744 xhci_reset(xhci);
745 spin_unlock_irq(&xhci->lock);
746
747 xhci_cleanup_msix(xhci);
748
749 xhci_dbg_trace(xhci, trace_xhci_dbg_init,
750 "xhci_shutdown completed - status = %x",
751 readl(&xhci->op_regs->status));
752
753 /* Yet another workaround for spurious wakeups at shutdown with HSW */
754 if (xhci->quirks & XHCI_SPURIOUS_WAKEUP)
755 pci_set_power_state(to_pci_dev(hcd->self.controller), PCI_D3hot);
756}
757
758#ifdef CONFIG_PM
759static void xhci_save_registers(struct xhci_hcd *xhci)
760{
761 xhci->s3.command = readl(&xhci->op_regs->command);
762 xhci->s3.dev_nt = readl(&xhci->op_regs->dev_notification);
763 xhci->s3.dcbaa_ptr = xhci_read_64(xhci, &xhci->op_regs->dcbaa_ptr);
764 xhci->s3.config_reg = readl(&xhci->op_regs->config_reg);
765 xhci->s3.erst_size = readl(&xhci->ir_set->erst_size);
766 xhci->s3.erst_base = xhci_read_64(xhci, &xhci->ir_set->erst_base);
767 xhci->s3.erst_dequeue = xhci_read_64(xhci, &xhci->ir_set->erst_dequeue);
768 xhci->s3.irq_pending = readl(&xhci->ir_set->irq_pending);
769 xhci->s3.irq_control = readl(&xhci->ir_set->irq_control);
770}
771
772static void xhci_restore_registers(struct xhci_hcd *xhci)
773{
774 writel(xhci->s3.command, &xhci->op_regs->command);
775 writel(xhci->s3.dev_nt, &xhci->op_regs->dev_notification);
776 xhci_write_64(xhci, xhci->s3.dcbaa_ptr, &xhci->op_regs->dcbaa_ptr);
777 writel(xhci->s3.config_reg, &xhci->op_regs->config_reg);
778 writel(xhci->s3.erst_size, &xhci->ir_set->erst_size);
779 xhci_write_64(xhci, xhci->s3.erst_base, &xhci->ir_set->erst_base);
780 xhci_write_64(xhci, xhci->s3.erst_dequeue, &xhci->ir_set->erst_dequeue);
781 writel(xhci->s3.irq_pending, &xhci->ir_set->irq_pending);
782 writel(xhci->s3.irq_control, &xhci->ir_set->irq_control);
783}
784
785static void xhci_set_cmd_ring_deq(struct xhci_hcd *xhci)
786{
787 u64 val_64;
788
789 /* step 2: initialize command ring buffer */
790 val_64 = xhci_read_64(xhci, &xhci->op_regs->cmd_ring);
791 val_64 = (val_64 & (u64) CMD_RING_RSVD_BITS) |
792 (xhci_trb_virt_to_dma(xhci->cmd_ring->deq_seg,
793 xhci->cmd_ring->dequeue) &
794 (u64) ~CMD_RING_RSVD_BITS) |
795 xhci->cmd_ring->cycle_state;
796 xhci_dbg_trace(xhci, trace_xhci_dbg_init,
797 "// Setting command ring address to 0x%llx",
798 (long unsigned long) val_64);
799 xhci_write_64(xhci, val_64, &xhci->op_regs->cmd_ring);
800}
801
802/*
803 * The whole command ring must be cleared to zero when we suspend the host.
804 *
805 * The host doesn't save the command ring pointer in the suspend well, so we
806 * need to re-program it on resume. Unfortunately, the pointer must be 64-byte
807 * aligned, because of the reserved bits in the command ring dequeue pointer
808 * register. Therefore, we can't just set the dequeue pointer back in the
809 * middle of the ring (TRBs are 16-byte aligned).
810 */
811static void xhci_clear_command_ring(struct xhci_hcd *xhci)
812{
813 struct xhci_ring *ring;
814 struct xhci_segment *seg;
815
816 ring = xhci->cmd_ring;
817 seg = ring->deq_seg;
818 do {
819 memset(seg->trbs, 0,
820 sizeof(union xhci_trb) * (TRBS_PER_SEGMENT - 1));
821 seg->trbs[TRBS_PER_SEGMENT - 1].link.control &=
822 cpu_to_le32(~TRB_CYCLE);
823 seg = seg->next;
824 } while (seg != ring->deq_seg);
825
826 /* Reset the software enqueue and dequeue pointers */
827 ring->deq_seg = ring->first_seg;
828 ring->dequeue = ring->first_seg->trbs;
829 ring->enq_seg = ring->deq_seg;
830 ring->enqueue = ring->dequeue;
831
832 ring->num_trbs_free = ring->num_segs * (TRBS_PER_SEGMENT - 1) - 1;
833 /*
834 * Ring is now zeroed, so the HW should look for change of ownership
835 * when the cycle bit is set to 1.
836 */
837 ring->cycle_state = 1;
838
839 /*
840 * Reset the hardware dequeue pointer.
841 * Yes, this will need to be re-written after resume, but we're paranoid
842 * and want to make sure the hardware doesn't access bogus memory
843 * because, say, the BIOS or an SMI started the host without changing
844 * the command ring pointers.
845 */
846 xhci_set_cmd_ring_deq(xhci);
847}
848
849/*
850 * Stop HC (not bus-specific)
851 *
852 * This is called when the machine transition into S3/S4 mode.
853 *
854 */
855int xhci_suspend(struct xhci_hcd *xhci)
856{
857 int rc = 0;
858 unsigned int delay = XHCI_MAX_HALT_USEC;
859 struct usb_hcd *hcd = xhci_to_hcd(xhci);
860 u32 command;
861
862 if (hcd->state != HC_STATE_SUSPENDED ||
863 xhci->shared_hcd->state != HC_STATE_SUSPENDED)
864 return -EINVAL;
865
866 /* Don't poll the roothubs on bus suspend. */
867 xhci_dbg(xhci, "%s: stopping port polling.\n", __func__);
868 clear_bit(HCD_FLAG_POLL_RH, &hcd->flags);
869 del_timer_sync(&hcd->rh_timer);
870
871 spin_lock_irq(&xhci->lock);
872 clear_bit(HCD_FLAG_HW_ACCESSIBLE, &hcd->flags);
873 clear_bit(HCD_FLAG_HW_ACCESSIBLE, &xhci->shared_hcd->flags);
874 /* step 1: stop endpoint */
875 /* skipped assuming that port suspend has done */
876
877 /* step 2: clear Run/Stop bit */
878 command = readl(&xhci->op_regs->command);
879 command &= ~CMD_RUN;
880 writel(command, &xhci->op_regs->command);
881
882 /* Some chips from Fresco Logic need an extraordinary delay */
883 delay *= (xhci->quirks & XHCI_SLOW_SUSPEND) ? 10 : 1;
884
885 if (xhci_handshake(xhci, &xhci->op_regs->status,
886 STS_HALT, STS_HALT, delay)) {
887 xhci_warn(xhci, "WARN: xHC CMD_RUN timeout\n");
888 spin_unlock_irq(&xhci->lock);
889 return -ETIMEDOUT;
890 }
891 xhci_clear_command_ring(xhci);
892
893 /* step 3: save registers */
894 xhci_save_registers(xhci);
895
896 /* step 4: set CSS flag */
897 command = readl(&xhci->op_regs->command);
898 command |= CMD_CSS;
899 writel(command, &xhci->op_regs->command);
900 if (xhci_handshake(xhci, &xhci->op_regs->status,
901 STS_SAVE, 0, 10 * 1000)) {
902 xhci_warn(xhci, "WARN: xHC save state timeout\n");
903 spin_unlock_irq(&xhci->lock);
904 return -ETIMEDOUT;
905 }
906 spin_unlock_irq(&xhci->lock);
907
908 /*
909 * Deleting Compliance Mode Recovery Timer because the xHCI Host
910 * is about to be suspended.
911 */
912 if ((xhci->quirks & XHCI_COMP_MODE_QUIRK) &&
913 (!(xhci_all_ports_seen_u0(xhci)))) {
914 del_timer_sync(&xhci->comp_mode_recovery_timer);
915 xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
916 "%s: compliance mode recovery timer deleted",
917 __func__);
918 }
919
920 /* step 5: remove core well power */
921 /* synchronize irq when using MSI-X */
922 xhci_msix_sync_irqs(xhci);
923
924 return rc;
925}
926
927/*
928 * start xHC (not bus-specific)
929 *
930 * This is called when the machine transition from S3/S4 mode.
931 *
932 */
933int xhci_resume(struct xhci_hcd *xhci, bool hibernated)
934{
935 u32 command, temp = 0;
936 struct usb_hcd *hcd = xhci_to_hcd(xhci);
937 struct usb_hcd *secondary_hcd;
938 int retval = 0;
939 bool comp_timer_running = false;
940
941 /* Wait a bit if either of the roothubs need to settle from the
942 * transition into bus suspend.
943 */
944 if (time_before(jiffies, xhci->bus_state[0].next_statechange) ||
945 time_before(jiffies,
946 xhci->bus_state[1].next_statechange))
947 msleep(100);
948
949 set_bit(HCD_FLAG_HW_ACCESSIBLE, &hcd->flags);
950 set_bit(HCD_FLAG_HW_ACCESSIBLE, &xhci->shared_hcd->flags);
951
952 spin_lock_irq(&xhci->lock);
953 if (xhci->quirks & XHCI_RESET_ON_RESUME)
954 hibernated = true;
955
956 if (!hibernated) {
957 /* step 1: restore register */
958 xhci_restore_registers(xhci);
959 /* step 2: initialize command ring buffer */
960 xhci_set_cmd_ring_deq(xhci);
961 /* step 3: restore state and start state*/
962 /* step 3: set CRS flag */
963 command = readl(&xhci->op_regs->command);
964 command |= CMD_CRS;
965 writel(command, &xhci->op_regs->command);
966 if (xhci_handshake(xhci, &xhci->op_regs->status,
967 STS_RESTORE, 0, 10 * 1000)) {
968 xhci_warn(xhci, "WARN: xHC restore state timeout\n");
969 spin_unlock_irq(&xhci->lock);
970 return -ETIMEDOUT;
971 }
972 temp = readl(&xhci->op_regs->status);
973 }
974
975 /* If restore operation fails, re-initialize the HC during resume */
976 if ((temp & STS_SRE) || hibernated) {
977
978 if ((xhci->quirks & XHCI_COMP_MODE_QUIRK) &&
979 !(xhci_all_ports_seen_u0(xhci))) {
980 del_timer_sync(&xhci->comp_mode_recovery_timer);
981 xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
982 "Compliance Mode Recovery Timer deleted!");
983 }
984
985 /* Let the USB core know _both_ roothubs lost power. */
986 usb_root_hub_lost_power(xhci->main_hcd->self.root_hub);
987 usb_root_hub_lost_power(xhci->shared_hcd->self.root_hub);
988
989 xhci_dbg(xhci, "Stop HCD\n");
990 xhci_halt(xhci);
991 xhci_reset(xhci);
992 spin_unlock_irq(&xhci->lock);
993 xhci_cleanup_msix(xhci);
994
995 xhci_dbg(xhci, "// Disabling event ring interrupts\n");
996 temp = readl(&xhci->op_regs->status);
997 writel(temp & ~STS_EINT, &xhci->op_regs->status);
998 temp = readl(&xhci->ir_set->irq_pending);
999 writel(ER_IRQ_DISABLE(temp), &xhci->ir_set->irq_pending);
1000 xhci_print_ir_set(xhci, 0);
1001
1002 xhci_dbg(xhci, "cleaning up memory\n");
1003 xhci_mem_cleanup(xhci);
1004 xhci_dbg(xhci, "xhci_stop completed - status = %x\n",
1005 readl(&xhci->op_regs->status));
1006
1007 /* USB core calls the PCI reinit and start functions twice:
1008 * first with the primary HCD, and then with the secondary HCD.
1009 * If we don't do the same, the host will never be started.
1010 */
1011 if (!usb_hcd_is_primary_hcd(hcd))
1012 secondary_hcd = hcd;
1013 else
1014 secondary_hcd = xhci->shared_hcd;
1015
1016 xhci_dbg(xhci, "Initialize the xhci_hcd\n");
1017 retval = xhci_init(hcd->primary_hcd);
1018 if (retval)
1019 return retval;
1020 comp_timer_running = true;
1021
1022 xhci_dbg(xhci, "Start the primary HCD\n");
1023 retval = xhci_run(hcd->primary_hcd);
1024 if (!retval) {
1025 xhci_dbg(xhci, "Start the secondary HCD\n");
1026 retval = xhci_run(secondary_hcd);
1027 }
1028 hcd->state = HC_STATE_SUSPENDED;
1029 xhci->shared_hcd->state = HC_STATE_SUSPENDED;
1030 goto done;
1031 }
1032
1033 /* step 4: set Run/Stop bit */
1034 command = readl(&xhci->op_regs->command);
1035 command |= CMD_RUN;
1036 writel(command, &xhci->op_regs->command);
1037 xhci_handshake(xhci, &xhci->op_regs->status, STS_HALT,
1038 0, 250 * 1000);
1039
1040 /* step 5: walk topology and initialize portsc,
1041 * portpmsc and portli
1042 */
1043 /* this is done in bus_resume */
1044
1045 /* step 6: restart each of the previously
1046 * Running endpoints by ringing their doorbells
1047 */
1048
1049 spin_unlock_irq(&xhci->lock);
1050
1051 done:
1052 if (retval == 0) {
1053 usb_hcd_resume_root_hub(hcd);
1054 usb_hcd_resume_root_hub(xhci->shared_hcd);
1055 }
1056
1057 /*
1058 * If system is subject to the Quirk, Compliance Mode Timer needs to
1059 * be re-initialized Always after a system resume. Ports are subject
1060 * to suffer the Compliance Mode issue again. It doesn't matter if
1061 * ports have entered previously to U0 before system's suspension.
1062 */
1063 if ((xhci->quirks & XHCI_COMP_MODE_QUIRK) && !comp_timer_running)
1064 compliance_mode_recovery_timer_init(xhci);
1065
1066 /* Re-enable port polling. */
1067 xhci_dbg(xhci, "%s: starting port polling.\n", __func__);
1068 set_bit(HCD_FLAG_POLL_RH, &hcd->flags);
1069 usb_hcd_poll_rh_status(hcd);
1070
1071 return retval;
1072}
1073#endif /* CONFIG_PM */
1074
1075/*-------------------------------------------------------------------------*/
1076
1077/**
1078 * xhci_get_endpoint_index - Used for passing endpoint bitmasks between the core and
1079 * HCDs. Find the index for an endpoint given its descriptor. Use the return
1080 * value to right shift 1 for the bitmask.
1081 *
1082 * Index = (epnum * 2) + direction - 1,
1083 * where direction = 0 for OUT, 1 for IN.
1084 * For control endpoints, the IN index is used (OUT index is unused), so
1085 * index = (epnum * 2) + direction - 1 = (epnum * 2) + 1 - 1 = (epnum * 2)
1086 */
1087unsigned int xhci_get_endpoint_index(struct usb_endpoint_descriptor *desc)
1088{
1089 unsigned int index;
1090 if (usb_endpoint_xfer_control(desc))
1091 index = (unsigned int) (usb_endpoint_num(desc)*2);
1092 else
1093 index = (unsigned int) (usb_endpoint_num(desc)*2) +
1094 (usb_endpoint_dir_in(desc) ? 1 : 0) - 1;
1095 return index;
1096}
1097
1098/* The reverse operation to xhci_get_endpoint_index. Calculate the USB endpoint
1099 * address from the XHCI endpoint index.
1100 */
1101unsigned int xhci_get_endpoint_address(unsigned int ep_index)
1102{
1103 unsigned int number = DIV_ROUND_UP(ep_index, 2);
1104 unsigned int direction = ep_index % 2 ? USB_DIR_OUT : USB_DIR_IN;
1105 return direction | number;
1106}
1107
1108/* Find the flag for this endpoint (for use in the control context). Use the
1109 * endpoint index to create a bitmask. The slot context is bit 0, endpoint 0 is
1110 * bit 1, etc.
1111 */
1112unsigned int xhci_get_endpoint_flag(struct usb_endpoint_descriptor *desc)
1113{
1114 return 1 << (xhci_get_endpoint_index(desc) + 1);
1115}
1116
1117/* Find the flag for this endpoint (for use in the control context). Use the
1118 * endpoint index to create a bitmask. The slot context is bit 0, endpoint 0 is
1119 * bit 1, etc.
1120 */
1121unsigned int xhci_get_endpoint_flag_from_index(unsigned int ep_index)
1122{
1123 return 1 << (ep_index + 1);
1124}
1125
1126/* Compute the last valid endpoint context index. Basically, this is the
1127 * endpoint index plus one. For slot contexts with more than valid endpoint,
1128 * we find the most significant bit set in the added contexts flags.
1129 * e.g. ep 1 IN (with epnum 0x81) => added_ctxs = 0b1000
1130 * fls(0b1000) = 4, but the endpoint context index is 3, so subtract one.
1131 */
1132unsigned int xhci_last_valid_endpoint(u32 added_ctxs)
1133{
1134 return fls(added_ctxs) - 1;
1135}
1136
1137/* Returns 1 if the arguments are OK;
1138 * returns 0 this is a root hub; returns -EINVAL for NULL pointers.
1139 */
1140static int xhci_check_args(struct usb_hcd *hcd, struct usb_device *udev,
1141 struct usb_host_endpoint *ep, int check_ep, bool check_virt_dev,
1142 const char *func) {
1143 struct xhci_hcd *xhci;
1144 struct xhci_virt_device *virt_dev;
1145
1146 if (!hcd || (check_ep && !ep) || !udev) {
1147 pr_debug("xHCI %s called with invalid args\n", func);
1148 return -EINVAL;
1149 }
1150 if (!udev->parent) {
1151 pr_debug("xHCI %s called for root hub\n", func);
1152 return 0;
1153 }
1154
1155 xhci = hcd_to_xhci(hcd);
1156 if (check_virt_dev) {
1157 if (!udev->slot_id || !xhci->devs[udev->slot_id]) {
1158 xhci_dbg(xhci, "xHCI %s called with unaddressed device\n",
1159 func);
1160 return -EINVAL;
1161 }
1162
1163 virt_dev = xhci->devs[udev->slot_id];
1164 if (virt_dev->udev != udev) {
1165 xhci_dbg(xhci, "xHCI %s called with udev and "
1166 "virt_dev does not match\n", func);
1167 return -EINVAL;
1168 }
1169 }
1170
1171 if (xhci->xhc_state & XHCI_STATE_HALTED)
1172 return -ENODEV;
1173
1174 return 1;
1175}
1176
1177static int xhci_configure_endpoint(struct xhci_hcd *xhci,
1178 struct usb_device *udev, struct xhci_command *command,
1179 bool ctx_change, bool must_succeed);
1180
1181/*
1182 * Full speed devices may have a max packet size greater than 8 bytes, but the
1183 * USB core doesn't know that until it reads the first 8 bytes of the
1184 * descriptor. If the usb_device's max packet size changes after that point,
1185 * we need to issue an evaluate context command and wait on it.
1186 */
1187static int xhci_check_maxpacket(struct xhci_hcd *xhci, unsigned int slot_id,
1188 unsigned int ep_index, struct urb *urb)
1189{
1190 struct xhci_container_ctx *in_ctx;
1191 struct xhci_container_ctx *out_ctx;
1192 struct xhci_input_control_ctx *ctrl_ctx;
1193 struct xhci_ep_ctx *ep_ctx;
1194 int max_packet_size;
1195 int hw_max_packet_size;
1196 int ret = 0;
1197
1198 out_ctx = xhci->devs[slot_id]->out_ctx;
1199 ep_ctx = xhci_get_ep_ctx(xhci, out_ctx, ep_index);
1200 hw_max_packet_size = MAX_PACKET_DECODED(le32_to_cpu(ep_ctx->ep_info2));
1201 max_packet_size = usb_endpoint_maxp(&urb->dev->ep0.desc);
1202 if (hw_max_packet_size != max_packet_size) {
1203 xhci_dbg_trace(xhci, trace_xhci_dbg_context_change,
1204 "Max Packet Size for ep 0 changed.");
1205 xhci_dbg_trace(xhci, trace_xhci_dbg_context_change,
1206 "Max packet size in usb_device = %d",
1207 max_packet_size);
1208 xhci_dbg_trace(xhci, trace_xhci_dbg_context_change,
1209 "Max packet size in xHCI HW = %d",
1210 hw_max_packet_size);
1211 xhci_dbg_trace(xhci, trace_xhci_dbg_context_change,
1212 "Issuing evaluate context command.");
1213
1214 /* Set up the input context flags for the command */
1215 /* FIXME: This won't work if a non-default control endpoint
1216 * changes max packet sizes.
1217 */
1218 in_ctx = xhci->devs[slot_id]->in_ctx;
1219 ctrl_ctx = xhci_get_input_control_ctx(xhci, in_ctx);
1220 if (!ctrl_ctx) {
1221 xhci_warn(xhci, "%s: Could not get input context, bad type.\n",
1222 __func__);
1223 return -ENOMEM;
1224 }
1225 /* Set up the modified control endpoint 0 */
1226 xhci_endpoint_copy(xhci, xhci->devs[slot_id]->in_ctx,
1227 xhci->devs[slot_id]->out_ctx, ep_index);
1228
1229 ep_ctx = xhci_get_ep_ctx(xhci, in_ctx, ep_index);
1230 ep_ctx->ep_info2 &= cpu_to_le32(~MAX_PACKET_MASK);
1231 ep_ctx->ep_info2 |= cpu_to_le32(MAX_PACKET(max_packet_size));
1232
1233 ctrl_ctx->add_flags = cpu_to_le32(EP0_FLAG);
1234 ctrl_ctx->drop_flags = 0;
1235
1236 xhci_dbg(xhci, "Slot %d input context\n", slot_id);
1237 xhci_dbg_ctx(xhci, in_ctx, ep_index);
1238 xhci_dbg(xhci, "Slot %d output context\n", slot_id);
1239 xhci_dbg_ctx(xhci, out_ctx, ep_index);
1240
1241 ret = xhci_configure_endpoint(xhci, urb->dev, NULL,
1242 true, false);
1243
1244 /* Clean up the input context for later use by bandwidth
1245 * functions.
1246 */
1247 ctrl_ctx->add_flags = cpu_to_le32(SLOT_FLAG);
1248 }
1249 return ret;
1250}
1251
1252/*
1253 * non-error returns are a promise to giveback() the urb later
1254 * we drop ownership so next owner (or urb unlink) can get it
1255 */
1256int xhci_urb_enqueue(struct usb_hcd *hcd, struct urb *urb, gfp_t mem_flags)
1257{
1258 struct xhci_hcd *xhci = hcd_to_xhci(hcd);
1259 struct xhci_td *buffer;
1260 unsigned long flags;
1261 int ret = 0;
1262 unsigned int slot_id, ep_index;
1263 struct urb_priv *urb_priv;
1264 int size, i;
1265
1266 if (!urb || xhci_check_args(hcd, urb->dev, urb->ep,
1267 true, true, __func__) <= 0)
1268 return -EINVAL;
1269
1270 slot_id = urb->dev->slot_id;
1271 ep_index = xhci_get_endpoint_index(&urb->ep->desc);
1272
1273 if (!HCD_HW_ACCESSIBLE(hcd)) {
1274 if (!in_interrupt())
1275 xhci_dbg(xhci, "urb submitted during PCI suspend\n");
1276 ret = -ESHUTDOWN;
1277 goto exit;
1278 }
1279
1280 if (usb_endpoint_xfer_isoc(&urb->ep->desc))
1281 size = urb->number_of_packets;
1282 else
1283 size = 1;
1284
1285 urb_priv = kzalloc(sizeof(struct urb_priv) +
1286 size * sizeof(struct xhci_td *), mem_flags);
1287 if (!urb_priv)
1288 return -ENOMEM;
1289
1290 buffer = kzalloc(size * sizeof(struct xhci_td), mem_flags);
1291 if (!buffer) {
1292 kfree(urb_priv);
1293 return -ENOMEM;
1294 }
1295
1296 for (i = 0; i < size; i++) {
1297 urb_priv->td[i] = buffer;
1298 buffer++;
1299 }
1300
1301 urb_priv->length = size;
1302 urb_priv->td_cnt = 0;
1303 urb->hcpriv = urb_priv;
1304
1305 if (usb_endpoint_xfer_control(&urb->ep->desc)) {
1306 /* Check to see if the max packet size for the default control
1307 * endpoint changed during FS device enumeration
1308 */
1309 if (urb->dev->speed == USB_SPEED_FULL) {
1310 ret = xhci_check_maxpacket(xhci, slot_id,
1311 ep_index, urb);
1312 if (ret < 0) {
1313 xhci_urb_free_priv(xhci, urb_priv);
1314 urb->hcpriv = NULL;
1315 return ret;
1316 }
1317 }
1318
1319 /* We have a spinlock and interrupts disabled, so we must pass
1320 * atomic context to this function, which may allocate memory.
1321 */
1322 spin_lock_irqsave(&xhci->lock, flags);
1323 if (xhci->xhc_state & XHCI_STATE_DYING)
1324 goto dying;
1325 ret = xhci_queue_ctrl_tx(xhci, GFP_ATOMIC, urb,
1326 slot_id, ep_index);
1327 if (ret)
1328 goto free_priv;
1329 spin_unlock_irqrestore(&xhci->lock, flags);
1330 } else if (usb_endpoint_xfer_bulk(&urb->ep->desc)) {
1331 spin_lock_irqsave(&xhci->lock, flags);
1332 if (xhci->xhc_state & XHCI_STATE_DYING)
1333 goto dying;
1334 if (xhci->devs[slot_id]->eps[ep_index].ep_state &
1335 EP_GETTING_STREAMS) {
1336 xhci_warn(xhci, "WARN: Can't enqueue URB while bulk ep "
1337 "is transitioning to using streams.\n");
1338 ret = -EINVAL;
1339 } else if (xhci->devs[slot_id]->eps[ep_index].ep_state &
1340 EP_GETTING_NO_STREAMS) {
1341 xhci_warn(xhci, "WARN: Can't enqueue URB while bulk ep "
1342 "is transitioning to "
1343 "not having streams.\n");
1344 ret = -EINVAL;
1345 } else {
1346 ret = xhci_queue_bulk_tx(xhci, GFP_ATOMIC, urb,
1347 slot_id, ep_index);
1348 }
1349 if (ret)
1350 goto free_priv;
1351 spin_unlock_irqrestore(&xhci->lock, flags);
1352 } else if (usb_endpoint_xfer_int(&urb->ep->desc)) {
1353 spin_lock_irqsave(&xhci->lock, flags);
1354 if (xhci->xhc_state & XHCI_STATE_DYING)
1355 goto dying;
1356 ret = xhci_queue_intr_tx(xhci, GFP_ATOMIC, urb,
1357 slot_id, ep_index);
1358 if (ret)
1359 goto free_priv;
1360 spin_unlock_irqrestore(&xhci->lock, flags);
1361 } else {
1362 spin_lock_irqsave(&xhci->lock, flags);
1363 if (xhci->xhc_state & XHCI_STATE_DYING)
1364 goto dying;
1365 ret = xhci_queue_isoc_tx_prepare(xhci, GFP_ATOMIC, urb,
1366 slot_id, ep_index);
1367 if (ret)
1368 goto free_priv;
1369 spin_unlock_irqrestore(&xhci->lock, flags);
1370 }
1371exit:
1372 return ret;
1373dying:
1374 xhci_dbg(xhci, "Ep 0x%x: URB %p submitted for "
1375 "non-responsive xHCI host.\n",
1376 urb->ep->desc.bEndpointAddress, urb);
1377 ret = -ESHUTDOWN;
1378free_priv:
1379 xhci_urb_free_priv(xhci, urb_priv);
1380 urb->hcpriv = NULL;
1381 spin_unlock_irqrestore(&xhci->lock, flags);
1382 return ret;
1383}
1384
1385/* Get the right ring for the given URB.
1386 * If the endpoint supports streams, boundary check the URB's stream ID.
1387 * If the endpoint doesn't support streams, return the singular endpoint ring.
1388 */
1389static struct xhci_ring *xhci_urb_to_transfer_ring(struct xhci_hcd *xhci,
1390 struct urb *urb)
1391{
1392 unsigned int slot_id;
1393 unsigned int ep_index;
1394 unsigned int stream_id;
1395 struct xhci_virt_ep *ep;
1396
1397 slot_id = urb->dev->slot_id;
1398 ep_index = xhci_get_endpoint_index(&urb->ep->desc);
1399 stream_id = urb->stream_id;
1400 ep = &xhci->devs[slot_id]->eps[ep_index];
1401 /* Common case: no streams */
1402 if (!(ep->ep_state & EP_HAS_STREAMS))
1403 return ep->ring;
1404
1405 if (stream_id == 0) {
1406 xhci_warn(xhci,
1407 "WARN: Slot ID %u, ep index %u has streams, "
1408 "but URB has no stream ID.\n",
1409 slot_id, ep_index);
1410 return NULL;
1411 }
1412
1413 if (stream_id < ep->stream_info->num_streams)
1414 return ep->stream_info->stream_rings[stream_id];
1415
1416 xhci_warn(xhci,
1417 "WARN: Slot ID %u, ep index %u has "
1418 "stream IDs 1 to %u allocated, "
1419 "but stream ID %u is requested.\n",
1420 slot_id, ep_index,
1421 ep->stream_info->num_streams - 1,
1422 stream_id);
1423 return NULL;
1424}
1425
1426/*
1427 * Remove the URB's TD from the endpoint ring. This may cause the HC to stop
1428 * USB transfers, potentially stopping in the middle of a TRB buffer. The HC
1429 * should pick up where it left off in the TD, unless a Set Transfer Ring
1430 * Dequeue Pointer is issued.
1431 *
1432 * The TRBs that make up the buffers for the canceled URB will be "removed" from
1433 * the ring. Since the ring is a contiguous structure, they can't be physically
1434 * removed. Instead, there are two options:
1435 *
1436 * 1) If the HC is in the middle of processing the URB to be canceled, we
1437 * simply move the ring's dequeue pointer past those TRBs using the Set
1438 * Transfer Ring Dequeue Pointer command. This will be the common case,
1439 * when drivers timeout on the last submitted URB and attempt to cancel.
1440 *
1441 * 2) If the HC is in the middle of a different TD, we turn the TRBs into a
1442 * series of 1-TRB transfer no-op TDs. (No-ops shouldn't be chained.) The
1443 * HC will need to invalidate the any TRBs it has cached after the stop
1444 * endpoint command, as noted in the xHCI 0.95 errata.
1445 *
1446 * 3) The TD may have completed by the time the Stop Endpoint Command
1447 * completes, so software needs to handle that case too.
1448 *
1449 * This function should protect against the TD enqueueing code ringing the
1450 * doorbell while this code is waiting for a Stop Endpoint command to complete.
1451 * It also needs to account for multiple cancellations on happening at the same
1452 * time for the same endpoint.
1453 *
1454 * Note that this function can be called in any context, or so says
1455 * usb_hcd_unlink_urb()
1456 */
1457int xhci_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status)
1458{
1459 unsigned long flags;
1460 int ret, i;
1461 u32 temp;
1462 struct xhci_hcd *xhci;
1463 struct urb_priv *urb_priv;
1464 struct xhci_td *td;
1465 unsigned int ep_index;
1466 struct xhci_ring *ep_ring;
1467 struct xhci_virt_ep *ep;
1468
1469 xhci = hcd_to_xhci(hcd);
1470 spin_lock_irqsave(&xhci->lock, flags);
1471 /* Make sure the URB hasn't completed or been unlinked already */
1472 ret = usb_hcd_check_unlink_urb(hcd, urb, status);
1473 if (ret || !urb->hcpriv)
1474 goto done;
1475 temp = readl(&xhci->op_regs->status);
1476 if (temp == 0xffffffff || (xhci->xhc_state & XHCI_STATE_HALTED)) {
1477 xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb,
1478 "HW died, freeing TD.");
1479 urb_priv = urb->hcpriv;
1480 for (i = urb_priv->td_cnt; i < urb_priv->length; i++) {
1481 td = urb_priv->td[i];
1482 if (!list_empty(&td->td_list))
1483 list_del_init(&td->td_list);
1484 if (!list_empty(&td->cancelled_td_list))
1485 list_del_init(&td->cancelled_td_list);
1486 }
1487
1488 usb_hcd_unlink_urb_from_ep(hcd, urb);
1489 spin_unlock_irqrestore(&xhci->lock, flags);
1490 usb_hcd_giveback_urb(hcd, urb, -ESHUTDOWN);
1491 xhci_urb_free_priv(xhci, urb_priv);
1492 return ret;
1493 }
1494 if ((xhci->xhc_state & XHCI_STATE_DYING) ||
1495 (xhci->xhc_state & XHCI_STATE_HALTED)) {
1496 xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb,
1497 "Ep 0x%x: URB %p to be canceled on "
1498 "non-responsive xHCI host.",
1499 urb->ep->desc.bEndpointAddress, urb);
1500 /* Let the stop endpoint command watchdog timer (which set this
1501 * state) finish cleaning up the endpoint TD lists. We must
1502 * have caught it in the middle of dropping a lock and giving
1503 * back an URB.
1504 */
1505 goto done;
1506 }
1507
1508 ep_index = xhci_get_endpoint_index(&urb->ep->desc);
1509 ep = &xhci->devs[urb->dev->slot_id]->eps[ep_index];
1510 ep_ring = xhci_urb_to_transfer_ring(xhci, urb);
1511 if (!ep_ring) {
1512 ret = -EINVAL;
1513 goto done;
1514 }
1515
1516 urb_priv = urb->hcpriv;
1517 i = urb_priv->td_cnt;
1518 if (i < urb_priv->length)
1519 xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb,
1520 "Cancel URB %p, dev %s, ep 0x%x, "
1521 "starting at offset 0x%llx",
1522 urb, urb->dev->devpath,
1523 urb->ep->desc.bEndpointAddress,
1524 (unsigned long long) xhci_trb_virt_to_dma(
1525 urb_priv->td[i]->start_seg,
1526 urb_priv->td[i]->first_trb));
1527
1528 for (; i < urb_priv->length; i++) {
1529 td = urb_priv->td[i];
1530 list_add_tail(&td->cancelled_td_list, &ep->cancelled_td_list);
1531 }
1532
1533 /* Queue a stop endpoint command, but only if this is
1534 * the first cancellation to be handled.
1535 */
1536 if (!(ep->ep_state & EP_HALT_PENDING)) {
1537 ep->ep_state |= EP_HALT_PENDING;
1538 ep->stop_cmds_pending++;
1539 ep->stop_cmd_timer.expires = jiffies +
1540 XHCI_STOP_EP_CMD_TIMEOUT * HZ;
1541 add_timer(&ep->stop_cmd_timer);
1542 xhci_queue_stop_endpoint(xhci, urb->dev->slot_id, ep_index, 0);
1543 xhci_ring_cmd_db(xhci);
1544 }
1545done:
1546 spin_unlock_irqrestore(&xhci->lock, flags);
1547 return ret;
1548}
1549
1550/* Drop an endpoint from a new bandwidth configuration for this device.
1551 * Only one call to this function is allowed per endpoint before
1552 * check_bandwidth() or reset_bandwidth() must be called.
1553 * A call to xhci_drop_endpoint() followed by a call to xhci_add_endpoint() will
1554 * add the endpoint to the schedule with possibly new parameters denoted by a
1555 * different endpoint descriptor in usb_host_endpoint.
1556 * A call to xhci_add_endpoint() followed by a call to xhci_drop_endpoint() is
1557 * not allowed.
1558 *
1559 * The USB core will not allow URBs to be queued to an endpoint that is being
1560 * disabled, so there's no need for mutual exclusion to protect
1561 * the xhci->devs[slot_id] structure.
1562 */
1563int xhci_drop_endpoint(struct usb_hcd *hcd, struct usb_device *udev,
1564 struct usb_host_endpoint *ep)
1565{
1566 struct xhci_hcd *xhci;
1567 struct xhci_container_ctx *in_ctx, *out_ctx;
1568 struct xhci_input_control_ctx *ctrl_ctx;
1569 struct xhci_slot_ctx *slot_ctx;
1570 unsigned int last_ctx;
1571 unsigned int ep_index;
1572 struct xhci_ep_ctx *ep_ctx;
1573 u32 drop_flag;
1574 u32 new_add_flags, new_drop_flags, new_slot_info;
1575 int ret;
1576
1577 ret = xhci_check_args(hcd, udev, ep, 1, true, __func__);
1578 if (ret <= 0)
1579 return ret;
1580 xhci = hcd_to_xhci(hcd);
1581 if (xhci->xhc_state & XHCI_STATE_DYING)
1582 return -ENODEV;
1583
1584 xhci_dbg(xhci, "%s called for udev %p\n", __func__, udev);
1585 drop_flag = xhci_get_endpoint_flag(&ep->desc);
1586 if (drop_flag == SLOT_FLAG || drop_flag == EP0_FLAG) {
1587 xhci_dbg(xhci, "xHCI %s - can't drop slot or ep 0 %#x\n",
1588 __func__, drop_flag);
1589 return 0;
1590 }
1591
1592 in_ctx = xhci->devs[udev->slot_id]->in_ctx;
1593 out_ctx = xhci->devs[udev->slot_id]->out_ctx;
1594 ctrl_ctx = xhci_get_input_control_ctx(xhci, in_ctx);
1595 if (!ctrl_ctx) {
1596 xhci_warn(xhci, "%s: Could not get input context, bad type.\n",
1597 __func__);
1598 return 0;
1599 }
1600
1601 ep_index = xhci_get_endpoint_index(&ep->desc);
1602 ep_ctx = xhci_get_ep_ctx(xhci, out_ctx, ep_index);
1603 /* If the HC already knows the endpoint is disabled,
1604 * or the HCD has noted it is disabled, ignore this request
1605 */
1606 if (((ep_ctx->ep_info & cpu_to_le32(EP_STATE_MASK)) ==
1607 cpu_to_le32(EP_STATE_DISABLED)) ||
1608 le32_to_cpu(ctrl_ctx->drop_flags) &
1609 xhci_get_endpoint_flag(&ep->desc)) {
1610 xhci_warn(xhci, "xHCI %s called with disabled ep %p\n",
1611 __func__, ep);
1612 return 0;
1613 }
1614
1615 ctrl_ctx->drop_flags |= cpu_to_le32(drop_flag);
1616 new_drop_flags = le32_to_cpu(ctrl_ctx->drop_flags);
1617
1618 ctrl_ctx->add_flags &= cpu_to_le32(~drop_flag);
1619 new_add_flags = le32_to_cpu(ctrl_ctx->add_flags);
1620
1621 last_ctx = xhci_last_valid_endpoint(le32_to_cpu(ctrl_ctx->add_flags));
1622 slot_ctx = xhci_get_slot_ctx(xhci, in_ctx);
1623 /* Update the last valid endpoint context, if we deleted the last one */
1624 if ((le32_to_cpu(slot_ctx->dev_info) & LAST_CTX_MASK) >
1625 LAST_CTX(last_ctx)) {
1626 slot_ctx->dev_info &= cpu_to_le32(~LAST_CTX_MASK);
1627 slot_ctx->dev_info |= cpu_to_le32(LAST_CTX(last_ctx));
1628 }
1629 new_slot_info = le32_to_cpu(slot_ctx->dev_info);
1630
1631 xhci_endpoint_zero(xhci, xhci->devs[udev->slot_id], ep);
1632
1633 xhci_dbg(xhci, "drop ep 0x%x, slot id %d, new drop flags = %#x, new add flags = %#x, new slot info = %#x\n",
1634 (unsigned int) ep->desc.bEndpointAddress,
1635 udev->slot_id,
1636 (unsigned int) new_drop_flags,
1637 (unsigned int) new_add_flags,
1638 (unsigned int) new_slot_info);
1639 return 0;
1640}
1641
1642/* Add an endpoint to a new possible bandwidth configuration for this device.
1643 * Only one call to this function is allowed per endpoint before
1644 * check_bandwidth() or reset_bandwidth() must be called.
1645 * A call to xhci_drop_endpoint() followed by a call to xhci_add_endpoint() will
1646 * add the endpoint to the schedule with possibly new parameters denoted by a
1647 * different endpoint descriptor in usb_host_endpoint.
1648 * A call to xhci_add_endpoint() followed by a call to xhci_drop_endpoint() is
1649 * not allowed.
1650 *
1651 * The USB core will not allow URBs to be queued to an endpoint until the
1652 * configuration or alt setting is installed in the device, so there's no need
1653 * for mutual exclusion to protect the xhci->devs[slot_id] structure.
1654 */
1655int xhci_add_endpoint(struct usb_hcd *hcd, struct usb_device *udev,
1656 struct usb_host_endpoint *ep)
1657{
1658 struct xhci_hcd *xhci;
1659 struct xhci_container_ctx *in_ctx, *out_ctx;
1660 unsigned int ep_index;
1661 struct xhci_slot_ctx *slot_ctx;
1662 struct xhci_input_control_ctx *ctrl_ctx;
1663 u32 added_ctxs;
1664 unsigned int last_ctx;
1665 u32 new_add_flags, new_drop_flags, new_slot_info;
1666 struct xhci_virt_device *virt_dev;
1667 int ret = 0;
1668
1669 ret = xhci_check_args(hcd, udev, ep, 1, true, __func__);
1670 if (ret <= 0) {
1671 /* So we won't queue a reset ep command for a root hub */
1672 ep->hcpriv = NULL;
1673 return ret;
1674 }
1675 xhci = hcd_to_xhci(hcd);
1676 if (xhci->xhc_state & XHCI_STATE_DYING)
1677 return -ENODEV;
1678
1679 added_ctxs = xhci_get_endpoint_flag(&ep->desc);
1680 last_ctx = xhci_last_valid_endpoint(added_ctxs);
1681 if (added_ctxs == SLOT_FLAG || added_ctxs == EP0_FLAG) {
1682 /* FIXME when we have to issue an evaluate endpoint command to
1683 * deal with ep0 max packet size changing once we get the
1684 * descriptors
1685 */
1686 xhci_dbg(xhci, "xHCI %s - can't add slot or ep 0 %#x\n",
1687 __func__, added_ctxs);
1688 return 0;
1689 }
1690
1691 virt_dev = xhci->devs[udev->slot_id];
1692 in_ctx = virt_dev->in_ctx;
1693 out_ctx = virt_dev->out_ctx;
1694 ctrl_ctx = xhci_get_input_control_ctx(xhci, in_ctx);
1695 if (!ctrl_ctx) {
1696 xhci_warn(xhci, "%s: Could not get input context, bad type.\n",
1697 __func__);
1698 return 0;
1699 }
1700
1701 ep_index = xhci_get_endpoint_index(&ep->desc);
1702 /* If this endpoint is already in use, and the upper layers are trying
1703 * to add it again without dropping it, reject the addition.
1704 */
1705 if (virt_dev->eps[ep_index].ring &&
1706 !(le32_to_cpu(ctrl_ctx->drop_flags) &
1707 xhci_get_endpoint_flag(&ep->desc))) {
1708 xhci_warn(xhci, "Trying to add endpoint 0x%x "
1709 "without dropping it.\n",
1710 (unsigned int) ep->desc.bEndpointAddress);
1711 return -EINVAL;
1712 }
1713
1714 /* If the HCD has already noted the endpoint is enabled,
1715 * ignore this request.
1716 */
1717 if (le32_to_cpu(ctrl_ctx->add_flags) &
1718 xhci_get_endpoint_flag(&ep->desc)) {
1719 xhci_warn(xhci, "xHCI %s called with enabled ep %p\n",
1720 __func__, ep);
1721 return 0;
1722 }
1723
1724 /*
1725 * Configuration and alternate setting changes must be done in
1726 * process context, not interrupt context (or so documenation
1727 * for usb_set_interface() and usb_set_configuration() claim).
1728 */
1729 if (xhci_endpoint_init(xhci, virt_dev, udev, ep, GFP_NOIO) < 0) {
1730 dev_dbg(&udev->dev, "%s - could not initialize ep %#x\n",
1731 __func__, ep->desc.bEndpointAddress);
1732 return -ENOMEM;
1733 }
1734
1735 ctrl_ctx->add_flags |= cpu_to_le32(added_ctxs);
1736 new_add_flags = le32_to_cpu(ctrl_ctx->add_flags);
1737
1738 /* If xhci_endpoint_disable() was called for this endpoint, but the
1739 * xHC hasn't been notified yet through the check_bandwidth() call,
1740 * this re-adds a new state for the endpoint from the new endpoint
1741 * descriptors. We must drop and re-add this endpoint, so we leave the
1742 * drop flags alone.
1743 */
1744 new_drop_flags = le32_to_cpu(ctrl_ctx->drop_flags);
1745
1746 slot_ctx = xhci_get_slot_ctx(xhci, in_ctx);
1747 /* Update the last valid endpoint context, if we just added one past */
1748 if ((le32_to_cpu(slot_ctx->dev_info) & LAST_CTX_MASK) <
1749 LAST_CTX(last_ctx)) {
1750 slot_ctx->dev_info &= cpu_to_le32(~LAST_CTX_MASK);
1751 slot_ctx->dev_info |= cpu_to_le32(LAST_CTX(last_ctx));
1752 }
1753 new_slot_info = le32_to_cpu(slot_ctx->dev_info);
1754
1755 /* Store the usb_device pointer for later use */
1756 ep->hcpriv = udev;
1757
1758 xhci_dbg(xhci, "add ep 0x%x, slot id %d, new drop flags = %#x, new add flags = %#x, new slot info = %#x\n",
1759 (unsigned int) ep->desc.bEndpointAddress,
1760 udev->slot_id,
1761 (unsigned int) new_drop_flags,
1762 (unsigned int) new_add_flags,
1763 (unsigned int) new_slot_info);
1764 return 0;
1765}
1766
1767static void xhci_zero_in_ctx(struct xhci_hcd *xhci, struct xhci_virt_device *virt_dev)
1768{
1769 struct xhci_input_control_ctx *ctrl_ctx;
1770 struct xhci_ep_ctx *ep_ctx;
1771 struct xhci_slot_ctx *slot_ctx;
1772 int i;
1773
1774 ctrl_ctx = xhci_get_input_control_ctx(xhci, virt_dev->in_ctx);
1775 if (!ctrl_ctx) {
1776 xhci_warn(xhci, "%s: Could not get input context, bad type.\n",
1777 __func__);
1778 return;
1779 }
1780
1781 /* When a device's add flag and drop flag are zero, any subsequent
1782 * configure endpoint command will leave that endpoint's state
1783 * untouched. Make sure we don't leave any old state in the input
1784 * endpoint contexts.
1785 */
1786 ctrl_ctx->drop_flags = 0;
1787 ctrl_ctx->add_flags = 0;
1788 slot_ctx = xhci_get_slot_ctx(xhci, virt_dev->in_ctx);
1789 slot_ctx->dev_info &= cpu_to_le32(~LAST_CTX_MASK);
1790 /* Endpoint 0 is always valid */
1791 slot_ctx->dev_info |= cpu_to_le32(LAST_CTX(1));
1792 for (i = 1; i < 31; ++i) {
1793 ep_ctx = xhci_get_ep_ctx(xhci, virt_dev->in_ctx, i);
1794 ep_ctx->ep_info = 0;
1795 ep_ctx->ep_info2 = 0;
1796 ep_ctx->deq = 0;
1797 ep_ctx->tx_info = 0;
1798 }
1799}
1800
1801static int xhci_configure_endpoint_result(struct xhci_hcd *xhci,
1802 struct usb_device *udev, u32 *cmd_status)
1803{
1804 int ret;
1805
1806 switch (*cmd_status) {
1807 case COMP_ENOMEM:
1808 dev_warn(&udev->dev, "Not enough host controller resources "
1809 "for new device state.\n");
1810 ret = -ENOMEM;
1811 /* FIXME: can we allocate more resources for the HC? */
1812 break;
1813 case COMP_BW_ERR:
1814 case COMP_2ND_BW_ERR:
1815 dev_warn(&udev->dev, "Not enough bandwidth "
1816 "for new device state.\n");
1817 ret = -ENOSPC;
1818 /* FIXME: can we go back to the old state? */
1819 break;
1820 case COMP_TRB_ERR:
1821 /* the HCD set up something wrong */
1822 dev_warn(&udev->dev, "ERROR: Endpoint drop flag = 0, "
1823 "add flag = 1, "
1824 "and endpoint is not disabled.\n");
1825 ret = -EINVAL;
1826 break;
1827 case COMP_DEV_ERR:
1828 dev_warn(&udev->dev, "ERROR: Incompatible device for endpoint "
1829 "configure command.\n");
1830 ret = -ENODEV;
1831 break;
1832 case COMP_SUCCESS:
1833 xhci_dbg_trace(xhci, trace_xhci_dbg_context_change,
1834 "Successful Endpoint Configure command");
1835 ret = 0;
1836 break;
1837 default:
1838 xhci_err(xhci, "ERROR: unexpected command completion "
1839 "code 0x%x.\n", *cmd_status);
1840 ret = -EINVAL;
1841 break;
1842 }
1843 return ret;
1844}
1845
1846static int xhci_evaluate_context_result(struct xhci_hcd *xhci,
1847 struct usb_device *udev, u32 *cmd_status)
1848{
1849 int ret;
1850 struct xhci_virt_device *virt_dev = xhci->devs[udev->slot_id];
1851
1852 switch (*cmd_status) {
1853 case COMP_EINVAL:
1854 dev_warn(&udev->dev, "WARN: xHCI driver setup invalid evaluate "
1855 "context command.\n");
1856 ret = -EINVAL;
1857 break;
1858 case COMP_EBADSLT:
1859 dev_warn(&udev->dev, "WARN: slot not enabled for"
1860 "evaluate context command.\n");
1861 ret = -EINVAL;
1862 break;
1863 case COMP_CTX_STATE:
1864 dev_warn(&udev->dev, "WARN: invalid context state for "
1865 "evaluate context command.\n");
1866 xhci_dbg_ctx(xhci, virt_dev->out_ctx, 1);
1867 ret = -EINVAL;
1868 break;
1869 case COMP_DEV_ERR:
1870 dev_warn(&udev->dev, "ERROR: Incompatible device for evaluate "
1871 "context command.\n");
1872 ret = -ENODEV;
1873 break;
1874 case COMP_MEL_ERR:
1875 /* Max Exit Latency too large error */
1876 dev_warn(&udev->dev, "WARN: Max Exit Latency too large\n");
1877 ret = -EINVAL;
1878 break;
1879 case COMP_SUCCESS:
1880 xhci_dbg_trace(xhci, trace_xhci_dbg_context_change,
1881 "Successful evaluate context command");
1882 ret = 0;
1883 break;
1884 default:
1885 xhci_err(xhci, "ERROR: unexpected command completion "
1886 "code 0x%x.\n", *cmd_status);
1887 ret = -EINVAL;
1888 break;
1889 }
1890 return ret;
1891}
1892
1893static u32 xhci_count_num_new_endpoints(struct xhci_hcd *xhci,
1894 struct xhci_input_control_ctx *ctrl_ctx)
1895{
1896 u32 valid_add_flags;
1897 u32 valid_drop_flags;
1898
1899 /* Ignore the slot flag (bit 0), and the default control endpoint flag
1900 * (bit 1). The default control endpoint is added during the Address
1901 * Device command and is never removed until the slot is disabled.
1902 */
1903 valid_add_flags = le32_to_cpu(ctrl_ctx->add_flags) >> 2;
1904 valid_drop_flags = le32_to_cpu(ctrl_ctx->drop_flags) >> 2;
1905
1906 /* Use hweight32 to count the number of ones in the add flags, or
1907 * number of endpoints added. Don't count endpoints that are changed
1908 * (both added and dropped).
1909 */
1910 return hweight32(valid_add_flags) -
1911 hweight32(valid_add_flags & valid_drop_flags);
1912}
1913
1914static unsigned int xhci_count_num_dropped_endpoints(struct xhci_hcd *xhci,
1915 struct xhci_input_control_ctx *ctrl_ctx)
1916{
1917 u32 valid_add_flags;
1918 u32 valid_drop_flags;
1919
1920 valid_add_flags = le32_to_cpu(ctrl_ctx->add_flags) >> 2;
1921 valid_drop_flags = le32_to_cpu(ctrl_ctx->drop_flags) >> 2;
1922
1923 return hweight32(valid_drop_flags) -
1924 hweight32(valid_add_flags & valid_drop_flags);
1925}
1926
1927/*
1928 * We need to reserve the new number of endpoints before the configure endpoint
1929 * command completes. We can't subtract the dropped endpoints from the number
1930 * of active endpoints until the command completes because we can oversubscribe
1931 * the host in this case:
1932 *
1933 * - the first configure endpoint command drops more endpoints than it adds
1934 * - a second configure endpoint command that adds more endpoints is queued
1935 * - the first configure endpoint command fails, so the config is unchanged
1936 * - the second command may succeed, even though there isn't enough resources
1937 *
1938 * Must be called with xhci->lock held.
1939 */
1940static int xhci_reserve_host_resources(struct xhci_hcd *xhci,
1941 struct xhci_input_control_ctx *ctrl_ctx)
1942{
1943 u32 added_eps;
1944
1945 added_eps = xhci_count_num_new_endpoints(xhci, ctrl_ctx);
1946 if (xhci->num_active_eps + added_eps > xhci->limit_active_eps) {
1947 xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
1948 "Not enough ep ctxs: "
1949 "%u active, need to add %u, limit is %u.",
1950 xhci->num_active_eps, added_eps,
1951 xhci->limit_active_eps);
1952 return -ENOMEM;
1953 }
1954 xhci->num_active_eps += added_eps;
1955 xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
1956 "Adding %u ep ctxs, %u now active.", added_eps,
1957 xhci->num_active_eps);
1958 return 0;
1959}
1960
1961/*
1962 * The configure endpoint was failed by the xHC for some other reason, so we
1963 * need to revert the resources that failed configuration would have used.
1964 *
1965 * Must be called with xhci->lock held.
1966 */
1967static void xhci_free_host_resources(struct xhci_hcd *xhci,
1968 struct xhci_input_control_ctx *ctrl_ctx)
1969{
1970 u32 num_failed_eps;
1971
1972 num_failed_eps = xhci_count_num_new_endpoints(xhci, ctrl_ctx);
1973 xhci->num_active_eps -= num_failed_eps;
1974 xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
1975 "Removing %u failed ep ctxs, %u now active.",
1976 num_failed_eps,
1977 xhci->num_active_eps);
1978}
1979
1980/*
1981 * Now that the command has completed, clean up the active endpoint count by
1982 * subtracting out the endpoints that were dropped (but not changed).
1983 *
1984 * Must be called with xhci->lock held.
1985 */
1986static void xhci_finish_resource_reservation(struct xhci_hcd *xhci,
1987 struct xhci_input_control_ctx *ctrl_ctx)
1988{
1989 u32 num_dropped_eps;
1990
1991 num_dropped_eps = xhci_count_num_dropped_endpoints(xhci, ctrl_ctx);
1992 xhci->num_active_eps -= num_dropped_eps;
1993 if (num_dropped_eps)
1994 xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
1995 "Removing %u dropped ep ctxs, %u now active.",
1996 num_dropped_eps,
1997 xhci->num_active_eps);
1998}
1999
2000static unsigned int xhci_get_block_size(struct usb_device *udev)
2001{
2002 switch (udev->speed) {
2003 case USB_SPEED_LOW:
2004 case USB_SPEED_FULL:
2005 return FS_BLOCK;
2006 case USB_SPEED_HIGH:
2007 return HS_BLOCK;
2008 case USB_SPEED_SUPER:
2009 return SS_BLOCK;
2010 case USB_SPEED_UNKNOWN:
2011 case USB_SPEED_WIRELESS:
2012 default:
2013 /* Should never happen */
2014 return 1;
2015 }
2016}
2017
2018static unsigned int
2019xhci_get_largest_overhead(struct xhci_interval_bw *interval_bw)
2020{
2021 if (interval_bw->overhead[LS_OVERHEAD_TYPE])
2022 return LS_OVERHEAD;
2023 if (interval_bw->overhead[FS_OVERHEAD_TYPE])
2024 return FS_OVERHEAD;
2025 return HS_OVERHEAD;
2026}
2027
2028/* If we are changing a LS/FS device under a HS hub,
2029 * make sure (if we are activating a new TT) that the HS bus has enough
2030 * bandwidth for this new TT.
2031 */
2032static int xhci_check_tt_bw_table(struct xhci_hcd *xhci,
2033 struct xhci_virt_device *virt_dev,
2034 int old_active_eps)
2035{
2036 struct xhci_interval_bw_table *bw_table;
2037 struct xhci_tt_bw_info *tt_info;
2038
2039 /* Find the bandwidth table for the root port this TT is attached to. */
2040 bw_table = &xhci->rh_bw[virt_dev->real_port - 1].bw_table;
2041 tt_info = virt_dev->tt_info;
2042 /* If this TT already had active endpoints, the bandwidth for this TT
2043 * has already been added. Removing all periodic endpoints (and thus
2044 * making the TT enactive) will only decrease the bandwidth used.
2045 */
2046 if (old_active_eps)
2047 return 0;
2048 if (old_active_eps == 0 && tt_info->active_eps != 0) {
2049 if (bw_table->bw_used + TT_HS_OVERHEAD > HS_BW_LIMIT)
2050 return -ENOMEM;
2051 return 0;
2052 }
2053 /* Not sure why we would have no new active endpoints...
2054 *
2055 * Maybe because of an Evaluate Context change for a hub update or a
2056 * control endpoint 0 max packet size change?
2057 * FIXME: skip the bandwidth calculation in that case.
2058 */
2059 return 0;
2060}
2061
2062static int xhci_check_ss_bw(struct xhci_hcd *xhci,
2063 struct xhci_virt_device *virt_dev)
2064{
2065 unsigned int bw_reserved;
2066
2067 bw_reserved = DIV_ROUND_UP(SS_BW_RESERVED*SS_BW_LIMIT_IN, 100);
2068 if (virt_dev->bw_table->ss_bw_in > (SS_BW_LIMIT_IN - bw_reserved))
2069 return -ENOMEM;
2070
2071 bw_reserved = DIV_ROUND_UP(SS_BW_RESERVED*SS_BW_LIMIT_OUT, 100);
2072 if (virt_dev->bw_table->ss_bw_out > (SS_BW_LIMIT_OUT - bw_reserved))
2073 return -ENOMEM;
2074
2075 return 0;
2076}
2077
2078/*
2079 * This algorithm is a very conservative estimate of the worst-case scheduling
2080 * scenario for any one interval. The hardware dynamically schedules the
2081 * packets, so we can't tell which microframe could be the limiting factor in
2082 * the bandwidth scheduling. This only takes into account periodic endpoints.
2083 *
2084 * Obviously, we can't solve an NP complete problem to find the minimum worst
2085 * case scenario. Instead, we come up with an estimate that is no less than
2086 * the worst case bandwidth used for any one microframe, but may be an
2087 * over-estimate.
2088 *
2089 * We walk the requirements for each endpoint by interval, starting with the
2090 * smallest interval, and place packets in the schedule where there is only one
2091 * possible way to schedule packets for that interval. In order to simplify
2092 * this algorithm, we record the largest max packet size for each interval, and
2093 * assume all packets will be that size.
2094 *
2095 * For interval 0, we obviously must schedule all packets for each interval.
2096 * The bandwidth for interval 0 is just the amount of data to be transmitted
2097 * (the sum of all max ESIT payload sizes, plus any overhead per packet times
2098 * the number of packets).
2099 *
2100 * For interval 1, we have two possible microframes to schedule those packets
2101 * in. For this algorithm, if we can schedule the same number of packets for
2102 * each possible scheduling opportunity (each microframe), we will do so. The
2103 * remaining number of packets will be saved to be transmitted in the gaps in
2104 * the next interval's scheduling sequence.
2105 *
2106 * As we move those remaining packets to be scheduled with interval 2 packets,
2107 * we have to double the number of remaining packets to transmit. This is
2108 * because the intervals are actually powers of 2, and we would be transmitting
2109 * the previous interval's packets twice in this interval. We also have to be
2110 * sure that when we look at the largest max packet size for this interval, we
2111 * also look at the largest max packet size for the remaining packets and take
2112 * the greater of the two.
2113 *
2114 * The algorithm continues to evenly distribute packets in each scheduling
2115 * opportunity, and push the remaining packets out, until we get to the last
2116 * interval. Then those packets and their associated overhead are just added
2117 * to the bandwidth used.
2118 */
2119static int xhci_check_bw_table(struct xhci_hcd *xhci,
2120 struct xhci_virt_device *virt_dev,
2121 int old_active_eps)
2122{
2123 unsigned int bw_reserved;
2124 unsigned int max_bandwidth;
2125 unsigned int bw_used;
2126 unsigned int block_size;
2127 struct xhci_interval_bw_table *bw_table;
2128 unsigned int packet_size = 0;
2129 unsigned int overhead = 0;
2130 unsigned int packets_transmitted = 0;
2131 unsigned int packets_remaining = 0;
2132 unsigned int i;
2133
2134 if (virt_dev->udev->speed == USB_SPEED_SUPER)
2135 return xhci_check_ss_bw(xhci, virt_dev);
2136
2137 if (virt_dev->udev->speed == USB_SPEED_HIGH) {
2138 max_bandwidth = HS_BW_LIMIT;
2139 /* Convert percent of bus BW reserved to blocks reserved */
2140 bw_reserved = DIV_ROUND_UP(HS_BW_RESERVED * max_bandwidth, 100);
2141 } else {
2142 max_bandwidth = FS_BW_LIMIT;
2143 bw_reserved = DIV_ROUND_UP(FS_BW_RESERVED * max_bandwidth, 100);
2144 }
2145
2146 bw_table = virt_dev->bw_table;
2147 /* We need to translate the max packet size and max ESIT payloads into
2148 * the units the hardware uses.
2149 */
2150 block_size = xhci_get_block_size(virt_dev->udev);
2151
2152 /* If we are manipulating a LS/FS device under a HS hub, double check
2153 * that the HS bus has enough bandwidth if we are activing a new TT.
2154 */
2155 if (virt_dev->tt_info) {
2156 xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
2157 "Recalculating BW for rootport %u",
2158 virt_dev->real_port);
2159 if (xhci_check_tt_bw_table(xhci, virt_dev, old_active_eps)) {
2160 xhci_warn(xhci, "Not enough bandwidth on HS bus for "
2161 "newly activated TT.\n");
2162 return -ENOMEM;
2163 }
2164 xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
2165 "Recalculating BW for TT slot %u port %u",
2166 virt_dev->tt_info->slot_id,
2167 virt_dev->tt_info->ttport);
2168 } else {
2169 xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
2170 "Recalculating BW for rootport %u",
2171 virt_dev->real_port);
2172 }
2173
2174 /* Add in how much bandwidth will be used for interval zero, or the
2175 * rounded max ESIT payload + number of packets * largest overhead.
2176 */
2177 bw_used = DIV_ROUND_UP(bw_table->interval0_esit_payload, block_size) +
2178 bw_table->interval_bw[0].num_packets *
2179 xhci_get_largest_overhead(&bw_table->interval_bw[0]);
2180
2181 for (i = 1; i < XHCI_MAX_INTERVAL; i++) {
2182 unsigned int bw_added;
2183 unsigned int largest_mps;
2184 unsigned int interval_overhead;
2185
2186 /*
2187 * How many packets could we transmit in this interval?
2188 * If packets didn't fit in the previous interval, we will need
2189 * to transmit that many packets twice within this interval.
2190 */
2191 packets_remaining = 2 * packets_remaining +
2192 bw_table->interval_bw[i].num_packets;
2193
2194 /* Find the largest max packet size of this or the previous
2195 * interval.
2196 */
2197 if (list_empty(&bw_table->interval_bw[i].endpoints))
2198 largest_mps = 0;
2199 else {
2200 struct xhci_virt_ep *virt_ep;
2201 struct list_head *ep_entry;
2202
2203 ep_entry = bw_table->interval_bw[i].endpoints.next;
2204 virt_ep = list_entry(ep_entry,
2205 struct xhci_virt_ep, bw_endpoint_list);
2206 /* Convert to blocks, rounding up */
2207 largest_mps = DIV_ROUND_UP(
2208 virt_ep->bw_info.max_packet_size,
2209 block_size);
2210 }
2211 if (largest_mps > packet_size)
2212 packet_size = largest_mps;
2213
2214 /* Use the larger overhead of this or the previous interval. */
2215 interval_overhead = xhci_get_largest_overhead(
2216 &bw_table->interval_bw[i]);
2217 if (interval_overhead > overhead)
2218 overhead = interval_overhead;
2219
2220 /* How many packets can we evenly distribute across
2221 * (1 << (i + 1)) possible scheduling opportunities?
2222 */
2223 packets_transmitted = packets_remaining >> (i + 1);
2224
2225 /* Add in the bandwidth used for those scheduled packets */
2226 bw_added = packets_transmitted * (overhead + packet_size);
2227
2228 /* How many packets do we have remaining to transmit? */
2229 packets_remaining = packets_remaining % (1 << (i + 1));
2230
2231 /* What largest max packet size should those packets have? */
2232 /* If we've transmitted all packets, don't carry over the
2233 * largest packet size.
2234 */
2235 if (packets_remaining == 0) {
2236 packet_size = 0;
2237 overhead = 0;
2238 } else if (packets_transmitted > 0) {
2239 /* Otherwise if we do have remaining packets, and we've
2240 * scheduled some packets in this interval, take the
2241 * largest max packet size from endpoints with this
2242 * interval.
2243 */
2244 packet_size = largest_mps;
2245 overhead = interval_overhead;
2246 }
2247 /* Otherwise carry over packet_size and overhead from the last
2248 * time we had a remainder.
2249 */
2250 bw_used += bw_added;
2251 if (bw_used > max_bandwidth) {
2252 xhci_warn(xhci, "Not enough bandwidth. "
2253 "Proposed: %u, Max: %u\n",
2254 bw_used, max_bandwidth);
2255 return -ENOMEM;
2256 }
2257 }
2258 /*
2259 * Ok, we know we have some packets left over after even-handedly
2260 * scheduling interval 15. We don't know which microframes they will
2261 * fit into, so we over-schedule and say they will be scheduled every
2262 * microframe.
2263 */
2264 if (packets_remaining > 0)
2265 bw_used += overhead + packet_size;
2266
2267 if (!virt_dev->tt_info && virt_dev->udev->speed == USB_SPEED_HIGH) {
2268 unsigned int port_index = virt_dev->real_port - 1;
2269
2270 /* OK, we're manipulating a HS device attached to a
2271 * root port bandwidth domain. Include the number of active TTs
2272 * in the bandwidth used.
2273 */
2274 bw_used += TT_HS_OVERHEAD *
2275 xhci->rh_bw[port_index].num_active_tts;
2276 }
2277
2278 xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
2279 "Final bandwidth: %u, Limit: %u, Reserved: %u, "
2280 "Available: %u " "percent",
2281 bw_used, max_bandwidth, bw_reserved,
2282 (max_bandwidth - bw_used - bw_reserved) * 100 /
2283 max_bandwidth);
2284
2285 bw_used += bw_reserved;
2286 if (bw_used > max_bandwidth) {
2287 xhci_warn(xhci, "Not enough bandwidth. Proposed: %u, Max: %u\n",
2288 bw_used, max_bandwidth);
2289 return -ENOMEM;
2290 }
2291
2292 bw_table->bw_used = bw_used;
2293 return 0;
2294}
2295
2296static bool xhci_is_async_ep(unsigned int ep_type)
2297{
2298 return (ep_type != ISOC_OUT_EP && ep_type != INT_OUT_EP &&
2299 ep_type != ISOC_IN_EP &&
2300 ep_type != INT_IN_EP);
2301}
2302
2303static bool xhci_is_sync_in_ep(unsigned int ep_type)
2304{
2305 return (ep_type == ISOC_IN_EP || ep_type == INT_IN_EP);
2306}
2307
2308static unsigned int xhci_get_ss_bw_consumed(struct xhci_bw_info *ep_bw)
2309{
2310 unsigned int mps = DIV_ROUND_UP(ep_bw->max_packet_size, SS_BLOCK);
2311
2312 if (ep_bw->ep_interval == 0)
2313 return SS_OVERHEAD_BURST +
2314 (ep_bw->mult * ep_bw->num_packets *
2315 (SS_OVERHEAD + mps));
2316 return DIV_ROUND_UP(ep_bw->mult * ep_bw->num_packets *
2317 (SS_OVERHEAD + mps + SS_OVERHEAD_BURST),
2318 1 << ep_bw->ep_interval);
2319
2320}
2321
2322void xhci_drop_ep_from_interval_table(struct xhci_hcd *xhci,
2323 struct xhci_bw_info *ep_bw,
2324 struct xhci_interval_bw_table *bw_table,
2325 struct usb_device *udev,
2326 struct xhci_virt_ep *virt_ep,
2327 struct xhci_tt_bw_info *tt_info)
2328{
2329 struct xhci_interval_bw *interval_bw;
2330 int normalized_interval;
2331
2332 if (xhci_is_async_ep(ep_bw->type))
2333 return;
2334
2335 if (udev->speed == USB_SPEED_SUPER) {
2336 if (xhci_is_sync_in_ep(ep_bw->type))
2337 xhci->devs[udev->slot_id]->bw_table->ss_bw_in -=
2338 xhci_get_ss_bw_consumed(ep_bw);
2339 else
2340 xhci->devs[udev->slot_id]->bw_table->ss_bw_out -=
2341 xhci_get_ss_bw_consumed(ep_bw);
2342 return;
2343 }
2344
2345 /* SuperSpeed endpoints never get added to intervals in the table, so
2346 * this check is only valid for HS/FS/LS devices.
2347 */
2348 if (list_empty(&virt_ep->bw_endpoint_list))
2349 return;
2350 /* For LS/FS devices, we need to translate the interval expressed in
2351 * microframes to frames.
2352 */
2353 if (udev->speed == USB_SPEED_HIGH)
2354 normalized_interval = ep_bw->ep_interval;
2355 else
2356 normalized_interval = ep_bw->ep_interval - 3;
2357
2358 if (normalized_interval == 0)
2359 bw_table->interval0_esit_payload -= ep_bw->max_esit_payload;
2360 interval_bw = &bw_table->interval_bw[normalized_interval];
2361 interval_bw->num_packets -= ep_bw->num_packets;
2362 switch (udev->speed) {
2363 case USB_SPEED_LOW:
2364 interval_bw->overhead[LS_OVERHEAD_TYPE] -= 1;
2365 break;
2366 case USB_SPEED_FULL:
2367 interval_bw->overhead[FS_OVERHEAD_TYPE] -= 1;
2368 break;
2369 case USB_SPEED_HIGH:
2370 interval_bw->overhead[HS_OVERHEAD_TYPE] -= 1;
2371 break;
2372 case USB_SPEED_SUPER:
2373 case USB_SPEED_UNKNOWN:
2374 case USB_SPEED_WIRELESS:
2375 /* Should never happen because only LS/FS/HS endpoints will get
2376 * added to the endpoint list.
2377 */
2378 return;
2379 }
2380 if (tt_info)
2381 tt_info->active_eps -= 1;
2382 list_del_init(&virt_ep->bw_endpoint_list);
2383}
2384
2385static void xhci_add_ep_to_interval_table(struct xhci_hcd *xhci,
2386 struct xhci_bw_info *ep_bw,
2387 struct xhci_interval_bw_table *bw_table,
2388 struct usb_device *udev,
2389 struct xhci_virt_ep *virt_ep,
2390 struct xhci_tt_bw_info *tt_info)
2391{
2392 struct xhci_interval_bw *interval_bw;
2393 struct xhci_virt_ep *smaller_ep;
2394 int normalized_interval;
2395
2396 if (xhci_is_async_ep(ep_bw->type))
2397 return;
2398
2399 if (udev->speed == USB_SPEED_SUPER) {
2400 if (xhci_is_sync_in_ep(ep_bw->type))
2401 xhci->devs[udev->slot_id]->bw_table->ss_bw_in +=
2402 xhci_get_ss_bw_consumed(ep_bw);
2403 else
2404 xhci->devs[udev->slot_id]->bw_table->ss_bw_out +=
2405 xhci_get_ss_bw_consumed(ep_bw);
2406 return;
2407 }
2408
2409 /* For LS/FS devices, we need to translate the interval expressed in
2410 * microframes to frames.
2411 */
2412 if (udev->speed == USB_SPEED_HIGH)
2413 normalized_interval = ep_bw->ep_interval;
2414 else
2415 normalized_interval = ep_bw->ep_interval - 3;
2416
2417 if (normalized_interval == 0)
2418 bw_table->interval0_esit_payload += ep_bw->max_esit_payload;
2419 interval_bw = &bw_table->interval_bw[normalized_interval];
2420 interval_bw->num_packets += ep_bw->num_packets;
2421 switch (udev->speed) {
2422 case USB_SPEED_LOW:
2423 interval_bw->overhead[LS_OVERHEAD_TYPE] += 1;
2424 break;
2425 case USB_SPEED_FULL:
2426 interval_bw->overhead[FS_OVERHEAD_TYPE] += 1;
2427 break;
2428 case USB_SPEED_HIGH:
2429 interval_bw->overhead[HS_OVERHEAD_TYPE] += 1;
2430 break;
2431 case USB_SPEED_SUPER:
2432 case USB_SPEED_UNKNOWN:
2433 case USB_SPEED_WIRELESS:
2434 /* Should never happen because only LS/FS/HS endpoints will get
2435 * added to the endpoint list.
2436 */
2437 return;
2438 }
2439
2440 if (tt_info)
2441 tt_info->active_eps += 1;
2442 /* Insert the endpoint into the list, largest max packet size first. */
2443 list_for_each_entry(smaller_ep, &interval_bw->endpoints,
2444 bw_endpoint_list) {
2445 if (ep_bw->max_packet_size >=
2446 smaller_ep->bw_info.max_packet_size) {
2447 /* Add the new ep before the smaller endpoint */
2448 list_add_tail(&virt_ep->bw_endpoint_list,
2449 &smaller_ep->bw_endpoint_list);
2450 return;
2451 }
2452 }
2453 /* Add the new endpoint at the end of the list. */
2454 list_add_tail(&virt_ep->bw_endpoint_list,
2455 &interval_bw->endpoints);
2456}
2457
2458void xhci_update_tt_active_eps(struct xhci_hcd *xhci,
2459 struct xhci_virt_device *virt_dev,
2460 int old_active_eps)
2461{
2462 struct xhci_root_port_bw_info *rh_bw_info;
2463 if (!virt_dev->tt_info)
2464 return;
2465
2466 rh_bw_info = &xhci->rh_bw[virt_dev->real_port - 1];
2467 if (old_active_eps == 0 &&
2468 virt_dev->tt_info->active_eps != 0) {
2469 rh_bw_info->num_active_tts += 1;
2470 rh_bw_info->bw_table.bw_used += TT_HS_OVERHEAD;
2471 } else if (old_active_eps != 0 &&
2472 virt_dev->tt_info->active_eps == 0) {
2473 rh_bw_info->num_active_tts -= 1;
2474 rh_bw_info->bw_table.bw_used -= TT_HS_OVERHEAD;
2475 }
2476}
2477
2478static int xhci_reserve_bandwidth(struct xhci_hcd *xhci,
2479 struct xhci_virt_device *virt_dev,
2480 struct xhci_container_ctx *in_ctx)
2481{
2482 struct xhci_bw_info ep_bw_info[31];
2483 int i;
2484 struct xhci_input_control_ctx *ctrl_ctx;
2485 int old_active_eps = 0;
2486
2487 if (virt_dev->tt_info)
2488 old_active_eps = virt_dev->tt_info->active_eps;
2489
2490 ctrl_ctx = xhci_get_input_control_ctx(xhci, in_ctx);
2491 if (!ctrl_ctx) {
2492 xhci_warn(xhci, "%s: Could not get input context, bad type.\n",
2493 __func__);
2494 return -ENOMEM;
2495 }
2496
2497 for (i = 0; i < 31; i++) {
2498 if (!EP_IS_ADDED(ctrl_ctx, i) && !EP_IS_DROPPED(ctrl_ctx, i))
2499 continue;
2500
2501 /* Make a copy of the BW info in case we need to revert this */
2502 memcpy(&ep_bw_info[i], &virt_dev->eps[i].bw_info,
2503 sizeof(ep_bw_info[i]));
2504 /* Drop the endpoint from the interval table if the endpoint is
2505 * being dropped or changed.
2506 */
2507 if (EP_IS_DROPPED(ctrl_ctx, i))
2508 xhci_drop_ep_from_interval_table(xhci,
2509 &virt_dev->eps[i].bw_info,
2510 virt_dev->bw_table,
2511 virt_dev->udev,
2512 &virt_dev->eps[i],
2513 virt_dev->tt_info);
2514 }
2515 /* Overwrite the information stored in the endpoints' bw_info */
2516 xhci_update_bw_info(xhci, virt_dev->in_ctx, ctrl_ctx, virt_dev);
2517 for (i = 0; i < 31; i++) {
2518 /* Add any changed or added endpoints to the interval table */
2519 if (EP_IS_ADDED(ctrl_ctx, i))
2520 xhci_add_ep_to_interval_table(xhci,
2521 &virt_dev->eps[i].bw_info,
2522 virt_dev->bw_table,
2523 virt_dev->udev,
2524 &virt_dev->eps[i],
2525 virt_dev->tt_info);
2526 }
2527
2528 if (!xhci_check_bw_table(xhci, virt_dev, old_active_eps)) {
2529 /* Ok, this fits in the bandwidth we have.
2530 * Update the number of active TTs.
2531 */
2532 xhci_update_tt_active_eps(xhci, virt_dev, old_active_eps);
2533 return 0;
2534 }
2535
2536 /* We don't have enough bandwidth for this, revert the stored info. */
2537 for (i = 0; i < 31; i++) {
2538 if (!EP_IS_ADDED(ctrl_ctx, i) && !EP_IS_DROPPED(ctrl_ctx, i))
2539 continue;
2540
2541 /* Drop the new copies of any added or changed endpoints from
2542 * the interval table.
2543 */
2544 if (EP_IS_ADDED(ctrl_ctx, i)) {
2545 xhci_drop_ep_from_interval_table(xhci,
2546 &virt_dev->eps[i].bw_info,
2547 virt_dev->bw_table,
2548 virt_dev->udev,
2549 &virt_dev->eps[i],
2550 virt_dev->tt_info);
2551 }
2552 /* Revert the endpoint back to its old information */
2553 memcpy(&virt_dev->eps[i].bw_info, &ep_bw_info[i],
2554 sizeof(ep_bw_info[i]));
2555 /* Add any changed or dropped endpoints back into the table */
2556 if (EP_IS_DROPPED(ctrl_ctx, i))
2557 xhci_add_ep_to_interval_table(xhci,
2558 &virt_dev->eps[i].bw_info,
2559 virt_dev->bw_table,
2560 virt_dev->udev,
2561 &virt_dev->eps[i],
2562 virt_dev->tt_info);
2563 }
2564 return -ENOMEM;
2565}
2566
2567
2568/* Issue a configure endpoint command or evaluate context command
2569 * and wait for it to finish.
2570 */
2571static int xhci_configure_endpoint(struct xhci_hcd *xhci,
2572 struct usb_device *udev,
2573 struct xhci_command *command,
2574 bool ctx_change, bool must_succeed)
2575{
2576 int ret;
2577 int timeleft;
2578 unsigned long flags;
2579 struct xhci_container_ctx *in_ctx;
2580 struct xhci_input_control_ctx *ctrl_ctx;
2581 struct completion *cmd_completion;
2582 u32 *cmd_status;
2583 struct xhci_virt_device *virt_dev;
2584 union xhci_trb *cmd_trb;
2585
2586 spin_lock_irqsave(&xhci->lock, flags);
2587 virt_dev = xhci->devs[udev->slot_id];
2588
2589 if (command)
2590 in_ctx = command->in_ctx;
2591 else
2592 in_ctx = virt_dev->in_ctx;
2593 ctrl_ctx = xhci_get_input_control_ctx(xhci, in_ctx);
2594 if (!ctrl_ctx) {
2595 spin_unlock_irqrestore(&xhci->lock, flags);
2596 xhci_warn(xhci, "%s: Could not get input context, bad type.\n",
2597 __func__);
2598 return -ENOMEM;
2599 }
2600
2601 if ((xhci->quirks & XHCI_EP_LIMIT_QUIRK) &&
2602 xhci_reserve_host_resources(xhci, ctrl_ctx)) {
2603 spin_unlock_irqrestore(&xhci->lock, flags);
2604 xhci_warn(xhci, "Not enough host resources, "
2605 "active endpoint contexts = %u\n",
2606 xhci->num_active_eps);
2607 return -ENOMEM;
2608 }
2609 if ((xhci->quirks & XHCI_SW_BW_CHECKING) &&
2610 xhci_reserve_bandwidth(xhci, virt_dev, in_ctx)) {
2611 if ((xhci->quirks & XHCI_EP_LIMIT_QUIRK))
2612 xhci_free_host_resources(xhci, ctrl_ctx);
2613 spin_unlock_irqrestore(&xhci->lock, flags);
2614 xhci_warn(xhci, "Not enough bandwidth\n");
2615 return -ENOMEM;
2616 }
2617
2618 if (command) {
2619 cmd_completion = command->completion;
2620 cmd_status = &command->status;
2621 command->command_trb = xhci_find_next_enqueue(xhci->cmd_ring);
2622 list_add_tail(&command->cmd_list, &virt_dev->cmd_list);
2623 } else {
2624 cmd_completion = &virt_dev->cmd_completion;
2625 cmd_status = &virt_dev->cmd_status;
2626 }
2627 init_completion(cmd_completion);
2628
2629 cmd_trb = xhci_find_next_enqueue(xhci->cmd_ring);
2630 if (!ctx_change)
2631 ret = xhci_queue_configure_endpoint(xhci, in_ctx->dma,
2632 udev->slot_id, must_succeed);
2633 else
2634 ret = xhci_queue_evaluate_context(xhci, in_ctx->dma,
2635 udev->slot_id, must_succeed);
2636 if (ret < 0) {
2637 if (command)
2638 list_del(&command->cmd_list);
2639 if ((xhci->quirks & XHCI_EP_LIMIT_QUIRK))
2640 xhci_free_host_resources(xhci, ctrl_ctx);
2641 spin_unlock_irqrestore(&xhci->lock, flags);
2642 xhci_dbg_trace(xhci, trace_xhci_dbg_context_change,
2643 "FIXME allocate a new ring segment");
2644 return -ENOMEM;
2645 }
2646 xhci_ring_cmd_db(xhci);
2647 spin_unlock_irqrestore(&xhci->lock, flags);
2648
2649 /* Wait for the configure endpoint command to complete */
2650 timeleft = wait_for_completion_interruptible_timeout(
2651 cmd_completion,
2652 XHCI_CMD_DEFAULT_TIMEOUT);
2653 if (timeleft <= 0) {
2654 xhci_warn(xhci, "%s while waiting for %s command\n",
2655 timeleft == 0 ? "Timeout" : "Signal",
2656 ctx_change == 0 ?
2657 "configure endpoint" :
2658 "evaluate context");
2659 /* cancel the configure endpoint command */
2660 ret = xhci_cancel_cmd(xhci, command, cmd_trb);
2661 if (ret < 0)
2662 return ret;
2663 return -ETIME;
2664 }
2665
2666 if (!ctx_change)
2667 ret = xhci_configure_endpoint_result(xhci, udev, cmd_status);
2668 else
2669 ret = xhci_evaluate_context_result(xhci, udev, cmd_status);
2670
2671 if ((xhci->quirks & XHCI_EP_LIMIT_QUIRK)) {
2672 spin_lock_irqsave(&xhci->lock, flags);
2673 /* If the command failed, remove the reserved resources.
2674 * Otherwise, clean up the estimate to include dropped eps.
2675 */
2676 if (ret)
2677 xhci_free_host_resources(xhci, ctrl_ctx);
2678 else
2679 xhci_finish_resource_reservation(xhci, ctrl_ctx);
2680 spin_unlock_irqrestore(&xhci->lock, flags);
2681 }
2682 return ret;
2683}
2684
2685static void xhci_check_bw_drop_ep_streams(struct xhci_hcd *xhci,
2686 struct xhci_virt_device *vdev, int i)
2687{
2688 struct xhci_virt_ep *ep = &vdev->eps[i];
2689
2690 if (ep->ep_state & EP_HAS_STREAMS) {
2691 xhci_warn(xhci, "WARN: endpoint 0x%02x has streams on set_interface, freeing streams.\n",
2692 xhci_get_endpoint_address(i));
2693 xhci_free_stream_info(xhci, ep->stream_info);
2694 ep->stream_info = NULL;
2695 ep->ep_state &= ~EP_HAS_STREAMS;
2696 }
2697}
2698
2699/* Called after one or more calls to xhci_add_endpoint() or
2700 * xhci_drop_endpoint(). If this call fails, the USB core is expected
2701 * to call xhci_reset_bandwidth().
2702 *
2703 * Since we are in the middle of changing either configuration or
2704 * installing a new alt setting, the USB core won't allow URBs to be
2705 * enqueued for any endpoint on the old config or interface. Nothing
2706 * else should be touching the xhci->devs[slot_id] structure, so we
2707 * don't need to take the xhci->lock for manipulating that.
2708 */
2709int xhci_check_bandwidth(struct usb_hcd *hcd, struct usb_device *udev)
2710{
2711 int i;
2712 int ret = 0;
2713 struct xhci_hcd *xhci;
2714 struct xhci_virt_device *virt_dev;
2715 struct xhci_input_control_ctx *ctrl_ctx;
2716 struct xhci_slot_ctx *slot_ctx;
2717
2718 ret = xhci_check_args(hcd, udev, NULL, 0, true, __func__);
2719 if (ret <= 0)
2720 return ret;
2721 xhci = hcd_to_xhci(hcd);
2722 if (xhci->xhc_state & XHCI_STATE_DYING)
2723 return -ENODEV;
2724
2725 xhci_dbg(xhci, "%s called for udev %p\n", __func__, udev);
2726 virt_dev = xhci->devs[udev->slot_id];
2727
2728 /* See section 4.6.6 - A0 = 1; A1 = D0 = D1 = 0 */
2729 ctrl_ctx = xhci_get_input_control_ctx(xhci, virt_dev->in_ctx);
2730 if (!ctrl_ctx) {
2731 xhci_warn(xhci, "%s: Could not get input context, bad type.\n",
2732 __func__);
2733 return -ENOMEM;
2734 }
2735 ctrl_ctx->add_flags |= cpu_to_le32(SLOT_FLAG);
2736 ctrl_ctx->add_flags &= cpu_to_le32(~EP0_FLAG);
2737 ctrl_ctx->drop_flags &= cpu_to_le32(~(SLOT_FLAG | EP0_FLAG));
2738
2739 /* Don't issue the command if there's no endpoints to update. */
2740 if (ctrl_ctx->add_flags == cpu_to_le32(SLOT_FLAG) &&
2741 ctrl_ctx->drop_flags == 0)
2742 return 0;
2743
2744 xhci_dbg(xhci, "New Input Control Context:\n");
2745 slot_ctx = xhci_get_slot_ctx(xhci, virt_dev->in_ctx);
2746 xhci_dbg_ctx(xhci, virt_dev->in_ctx,
2747 LAST_CTX_TO_EP_NUM(le32_to_cpu(slot_ctx->dev_info)));
2748
2749 ret = xhci_configure_endpoint(xhci, udev, NULL,
2750 false, false);
2751 if (ret) {
2752 /* Callee should call reset_bandwidth() */
2753 return ret;
2754 }
2755
2756 xhci_dbg(xhci, "Output context after successful config ep cmd:\n");
2757 xhci_dbg_ctx(xhci, virt_dev->out_ctx,
2758 LAST_CTX_TO_EP_NUM(le32_to_cpu(slot_ctx->dev_info)));
2759
2760 /* Free any rings that were dropped, but not changed. */
2761 for (i = 1; i < 31; ++i) {
2762 if ((le32_to_cpu(ctrl_ctx->drop_flags) & (1 << (i + 1))) &&
2763 !(le32_to_cpu(ctrl_ctx->add_flags) & (1 << (i + 1)))) {
2764 xhci_free_or_cache_endpoint_ring(xhci, virt_dev, i);
2765 xhci_check_bw_drop_ep_streams(xhci, virt_dev, i);
2766 }
2767 }
2768 xhci_zero_in_ctx(xhci, virt_dev);
2769 /*
2770 * Install any rings for completely new endpoints or changed endpoints,
2771 * and free or cache any old rings from changed endpoints.
2772 */
2773 for (i = 1; i < 31; ++i) {
2774 if (!virt_dev->eps[i].new_ring)
2775 continue;
2776 /* Only cache or free the old ring if it exists.
2777 * It may not if this is the first add of an endpoint.
2778 */
2779 if (virt_dev->eps[i].ring) {
2780 xhci_free_or_cache_endpoint_ring(xhci, virt_dev, i);
2781 }
2782 xhci_check_bw_drop_ep_streams(xhci, virt_dev, i);
2783 virt_dev->eps[i].ring = virt_dev->eps[i].new_ring;
2784 virt_dev->eps[i].new_ring = NULL;
2785 }
2786
2787 return ret;
2788}
2789
2790void xhci_reset_bandwidth(struct usb_hcd *hcd, struct usb_device *udev)
2791{
2792 struct xhci_hcd *xhci;
2793 struct xhci_virt_device *virt_dev;
2794 int i, ret;
2795
2796 ret = xhci_check_args(hcd, udev, NULL, 0, true, __func__);
2797 if (ret <= 0)
2798 return;
2799 xhci = hcd_to_xhci(hcd);
2800
2801 xhci_dbg(xhci, "%s called for udev %p\n", __func__, udev);
2802 virt_dev = xhci->devs[udev->slot_id];
2803 /* Free any rings allocated for added endpoints */
2804 for (i = 0; i < 31; ++i) {
2805 if (virt_dev->eps[i].new_ring) {
2806 xhci_ring_free(xhci, virt_dev->eps[i].new_ring);
2807 virt_dev->eps[i].new_ring = NULL;
2808 }
2809 }
2810 xhci_zero_in_ctx(xhci, virt_dev);
2811}
2812
2813static void xhci_setup_input_ctx_for_config_ep(struct xhci_hcd *xhci,
2814 struct xhci_container_ctx *in_ctx,
2815 struct xhci_container_ctx *out_ctx,
2816 struct xhci_input_control_ctx *ctrl_ctx,
2817 u32 add_flags, u32 drop_flags)
2818{
2819 ctrl_ctx->add_flags = cpu_to_le32(add_flags);
2820 ctrl_ctx->drop_flags = cpu_to_le32(drop_flags);
2821 xhci_slot_copy(xhci, in_ctx, out_ctx);
2822 ctrl_ctx->add_flags |= cpu_to_le32(SLOT_FLAG);
2823
2824 xhci_dbg(xhci, "Input Context:\n");
2825 xhci_dbg_ctx(xhci, in_ctx, xhci_last_valid_endpoint(add_flags));
2826}
2827
2828static void xhci_setup_input_ctx_for_quirk(struct xhci_hcd *xhci,
2829 unsigned int slot_id, unsigned int ep_index,
2830 struct xhci_dequeue_state *deq_state)
2831{
2832 struct xhci_input_control_ctx *ctrl_ctx;
2833 struct xhci_container_ctx *in_ctx;
2834 struct xhci_ep_ctx *ep_ctx;
2835 u32 added_ctxs;
2836 dma_addr_t addr;
2837
2838 in_ctx = xhci->devs[slot_id]->in_ctx;
2839 ctrl_ctx = xhci_get_input_control_ctx(xhci, in_ctx);
2840 if (!ctrl_ctx) {
2841 xhci_warn(xhci, "%s: Could not get input context, bad type.\n",
2842 __func__);
2843 return;
2844 }
2845
2846 xhci_endpoint_copy(xhci, xhci->devs[slot_id]->in_ctx,
2847 xhci->devs[slot_id]->out_ctx, ep_index);
2848 ep_ctx = xhci_get_ep_ctx(xhci, in_ctx, ep_index);
2849 addr = xhci_trb_virt_to_dma(deq_state->new_deq_seg,
2850 deq_state->new_deq_ptr);
2851 if (addr == 0) {
2852 xhci_warn(xhci, "WARN Cannot submit config ep after "
2853 "reset ep command\n");
2854 xhci_warn(xhci, "WARN deq seg = %p, deq ptr = %p\n",
2855 deq_state->new_deq_seg,
2856 deq_state->new_deq_ptr);
2857 return;
2858 }
2859 ep_ctx->deq = cpu_to_le64(addr | deq_state->new_cycle_state);
2860
2861 added_ctxs = xhci_get_endpoint_flag_from_index(ep_index);
2862 xhci_setup_input_ctx_for_config_ep(xhci, xhci->devs[slot_id]->in_ctx,
2863 xhci->devs[slot_id]->out_ctx, ctrl_ctx,
2864 added_ctxs, added_ctxs);
2865}
2866
2867void xhci_cleanup_stalled_ring(struct xhci_hcd *xhci,
2868 struct usb_device *udev, unsigned int ep_index)
2869{
2870 struct xhci_dequeue_state deq_state;
2871 struct xhci_virt_ep *ep;
2872
2873 xhci_dbg_trace(xhci, trace_xhci_dbg_reset_ep,
2874 "Cleaning up stalled endpoint ring");
2875 ep = &xhci->devs[udev->slot_id]->eps[ep_index];
2876 /* We need to move the HW's dequeue pointer past this TD,
2877 * or it will attempt to resend it on the next doorbell ring.
2878 */
2879 xhci_find_new_dequeue_state(xhci, udev->slot_id,
2880 ep_index, ep->stopped_stream, ep->stopped_td,
2881 &deq_state);
2882
2883 /* HW with the reset endpoint quirk will use the saved dequeue state to
2884 * issue a configure endpoint command later.
2885 */
2886 if (!(xhci->quirks & XHCI_RESET_EP_QUIRK)) {
2887 xhci_dbg_trace(xhci, trace_xhci_dbg_reset_ep,
2888 "Queueing new dequeue state");
2889 xhci_queue_new_dequeue_state(xhci, udev->slot_id,
2890 ep_index, ep->stopped_stream, &deq_state);
2891 } else {
2892 /* Better hope no one uses the input context between now and the
2893 * reset endpoint completion!
2894 * XXX: No idea how this hardware will react when stream rings
2895 * are enabled.
2896 */
2897 xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
2898 "Setting up input context for "
2899 "configure endpoint command");
2900 xhci_setup_input_ctx_for_quirk(xhci, udev->slot_id,
2901 ep_index, &deq_state);
2902 }
2903}
2904
2905/* Deal with stalled endpoints. The core should have sent the control message
2906 * to clear the halt condition. However, we need to make the xHCI hardware
2907 * reset its sequence number, since a device will expect a sequence number of
2908 * zero after the halt condition is cleared.
2909 * Context: in_interrupt
2910 */
2911void xhci_endpoint_reset(struct usb_hcd *hcd,
2912 struct usb_host_endpoint *ep)
2913{
2914 struct xhci_hcd *xhci;
2915 struct usb_device *udev;
2916 unsigned int ep_index;
2917 unsigned long flags;
2918 int ret;
2919 struct xhci_virt_ep *virt_ep;
2920
2921 xhci = hcd_to_xhci(hcd);
2922 udev = (struct usb_device *) ep->hcpriv;
2923 /* Called with a root hub endpoint (or an endpoint that wasn't added
2924 * with xhci_add_endpoint()
2925 */
2926 if (!ep->hcpriv)
2927 return;
2928 ep_index = xhci_get_endpoint_index(&ep->desc);
2929 virt_ep = &xhci->devs[udev->slot_id]->eps[ep_index];
2930 if (!virt_ep->stopped_td) {
2931 xhci_dbg_trace(xhci, trace_xhci_dbg_reset_ep,
2932 "Endpoint 0x%x not halted, refusing to reset.",
2933 ep->desc.bEndpointAddress);
2934 return;
2935 }
2936 if (usb_endpoint_xfer_control(&ep->desc)) {
2937 xhci_dbg_trace(xhci, trace_xhci_dbg_reset_ep,
2938 "Control endpoint stall already handled.");
2939 return;
2940 }
2941
2942 xhci_dbg_trace(xhci, trace_xhci_dbg_reset_ep,
2943 "Queueing reset endpoint command");
2944 spin_lock_irqsave(&xhci->lock, flags);
2945 ret = xhci_queue_reset_ep(xhci, udev->slot_id, ep_index);
2946 /*
2947 * Can't change the ring dequeue pointer until it's transitioned to the
2948 * stopped state, which is only upon a successful reset endpoint
2949 * command. Better hope that last command worked!
2950 */
2951 if (!ret) {
2952 xhci_cleanup_stalled_ring(xhci, udev, ep_index);
2953 kfree(virt_ep->stopped_td);
2954 xhci_ring_cmd_db(xhci);
2955 }
2956 virt_ep->stopped_td = NULL;
2957 virt_ep->stopped_stream = 0;
2958 spin_unlock_irqrestore(&xhci->lock, flags);
2959
2960 if (ret)
2961 xhci_warn(xhci, "FIXME allocate a new ring segment\n");
2962}
2963
2964static int xhci_check_streams_endpoint(struct xhci_hcd *xhci,
2965 struct usb_device *udev, struct usb_host_endpoint *ep,
2966 unsigned int slot_id)
2967{
2968 int ret;
2969 unsigned int ep_index;
2970 unsigned int ep_state;
2971
2972 if (!ep)
2973 return -EINVAL;
2974 ret = xhci_check_args(xhci_to_hcd(xhci), udev, ep, 1, true, __func__);
2975 if (ret <= 0)
2976 return -EINVAL;
2977 if (usb_ss_max_streams(&ep->ss_ep_comp) == 0) {
2978 xhci_warn(xhci, "WARN: SuperSpeed Endpoint Companion"
2979 " descriptor for ep 0x%x does not support streams\n",
2980 ep->desc.bEndpointAddress);
2981 return -EINVAL;
2982 }
2983
2984 ep_index = xhci_get_endpoint_index(&ep->desc);
2985 ep_state = xhci->devs[slot_id]->eps[ep_index].ep_state;
2986 if (ep_state & EP_HAS_STREAMS ||
2987 ep_state & EP_GETTING_STREAMS) {
2988 xhci_warn(xhci, "WARN: SuperSpeed bulk endpoint 0x%x "
2989 "already has streams set up.\n",
2990 ep->desc.bEndpointAddress);
2991 xhci_warn(xhci, "Send email to xHCI maintainer and ask for "
2992 "dynamic stream context array reallocation.\n");
2993 return -EINVAL;
2994 }
2995 if (!list_empty(&xhci->devs[slot_id]->eps[ep_index].ring->td_list)) {
2996 xhci_warn(xhci, "Cannot setup streams for SuperSpeed bulk "
2997 "endpoint 0x%x; URBs are pending.\n",
2998 ep->desc.bEndpointAddress);
2999 return -EINVAL;
3000 }
3001 return 0;
3002}
3003
3004static void xhci_calculate_streams_entries(struct xhci_hcd *xhci,
3005 unsigned int *num_streams, unsigned int *num_stream_ctxs)
3006{
3007 unsigned int max_streams;
3008
3009 /* The stream context array size must be a power of two */
3010 *num_stream_ctxs = roundup_pow_of_two(*num_streams);
3011 /*
3012 * Find out how many primary stream array entries the host controller
3013 * supports. Later we may use secondary stream arrays (similar to 2nd
3014 * level page entries), but that's an optional feature for xHCI host
3015 * controllers. xHCs must support at least 4 stream IDs.
3016 */
3017 max_streams = HCC_MAX_PSA(xhci->hcc_params);
3018 if (*num_stream_ctxs > max_streams) {
3019 xhci_dbg(xhci, "xHCI HW only supports %u stream ctx entries.\n",
3020 max_streams);
3021 *num_stream_ctxs = max_streams;
3022 *num_streams = max_streams;
3023 }
3024}
3025
3026/* Returns an error code if one of the endpoint already has streams.
3027 * This does not change any data structures, it only checks and gathers
3028 * information.
3029 */
3030static int xhci_calculate_streams_and_bitmask(struct xhci_hcd *xhci,
3031 struct usb_device *udev,
3032 struct usb_host_endpoint **eps, unsigned int num_eps,
3033 unsigned int *num_streams, u32 *changed_ep_bitmask)
3034{
3035 unsigned int max_streams;
3036 unsigned int endpoint_flag;
3037 int i;
3038 int ret;
3039
3040 for (i = 0; i < num_eps; i++) {
3041 ret = xhci_check_streams_endpoint(xhci, udev,
3042 eps[i], udev->slot_id);
3043 if (ret < 0)
3044 return ret;
3045
3046 max_streams = usb_ss_max_streams(&eps[i]->ss_ep_comp);
3047 if (max_streams < (*num_streams - 1)) {
3048 xhci_dbg(xhci, "Ep 0x%x only supports %u stream IDs.\n",
3049 eps[i]->desc.bEndpointAddress,
3050 max_streams);
3051 *num_streams = max_streams+1;
3052 }
3053
3054 endpoint_flag = xhci_get_endpoint_flag(&eps[i]->desc);
3055 if (*changed_ep_bitmask & endpoint_flag)
3056 return -EINVAL;
3057 *changed_ep_bitmask |= endpoint_flag;
3058 }
3059 return 0;
3060}
3061
3062static u32 xhci_calculate_no_streams_bitmask(struct xhci_hcd *xhci,
3063 struct usb_device *udev,
3064 struct usb_host_endpoint **eps, unsigned int num_eps)
3065{
3066 u32 changed_ep_bitmask = 0;
3067 unsigned int slot_id;
3068 unsigned int ep_index;
3069 unsigned int ep_state;
3070 int i;
3071
3072 slot_id = udev->slot_id;
3073 if (!xhci->devs[slot_id])
3074 return 0;
3075
3076 for (i = 0; i < num_eps; i++) {
3077 ep_index = xhci_get_endpoint_index(&eps[i]->desc);
3078 ep_state = xhci->devs[slot_id]->eps[ep_index].ep_state;
3079 /* Are streams already being freed for the endpoint? */
3080 if (ep_state & EP_GETTING_NO_STREAMS) {
3081 xhci_warn(xhci, "WARN Can't disable streams for "
3082 "endpoint 0x%x, "
3083 "streams are being disabled already\n",
3084 eps[i]->desc.bEndpointAddress);
3085 return 0;
3086 }
3087 /* Are there actually any streams to free? */
3088 if (!(ep_state & EP_HAS_STREAMS) &&
3089 !(ep_state & EP_GETTING_STREAMS)) {
3090 xhci_warn(xhci, "WARN Can't disable streams for "
3091 "endpoint 0x%x, "
3092 "streams are already disabled!\n",
3093 eps[i]->desc.bEndpointAddress);
3094 xhci_warn(xhci, "WARN xhci_free_streams() called "
3095 "with non-streams endpoint\n");
3096 return 0;
3097 }
3098 changed_ep_bitmask |= xhci_get_endpoint_flag(&eps[i]->desc);
3099 }
3100 return changed_ep_bitmask;
3101}
3102
3103/*
3104 * The USB device drivers use this function (though the HCD interface in USB
3105 * core) to prepare a set of bulk endpoints to use streams. Streams are used to
3106 * coordinate mass storage command queueing across multiple endpoints (basically
3107 * a stream ID == a task ID).
3108 *
3109 * Setting up streams involves allocating the same size stream context array
3110 * for each endpoint and issuing a configure endpoint command for all endpoints.
3111 *
3112 * Don't allow the call to succeed if one endpoint only supports one stream
3113 * (which means it doesn't support streams at all).
3114 *
3115 * Drivers may get less stream IDs than they asked for, if the host controller
3116 * hardware or endpoints claim they can't support the number of requested
3117 * stream IDs.
3118 */
3119int xhci_alloc_streams(struct usb_hcd *hcd, struct usb_device *udev,
3120 struct usb_host_endpoint **eps, unsigned int num_eps,
3121 unsigned int num_streams, gfp_t mem_flags)
3122{
3123 int i, ret;
3124 struct xhci_hcd *xhci;
3125 struct xhci_virt_device *vdev;
3126 struct xhci_command *config_cmd;
3127 struct xhci_input_control_ctx *ctrl_ctx;
3128 unsigned int ep_index;
3129 unsigned int num_stream_ctxs;
3130 unsigned long flags;
3131 u32 changed_ep_bitmask = 0;
3132
3133 if (!eps)
3134 return -EINVAL;
3135
3136 /* Add one to the number of streams requested to account for
3137 * stream 0 that is reserved for xHCI usage.
3138 */
3139 num_streams += 1;
3140 xhci = hcd_to_xhci(hcd);
3141 xhci_dbg(xhci, "Driver wants %u stream IDs (including stream 0).\n",
3142 num_streams);
3143
3144 /* MaxPSASize value 0 (2 streams) means streams are not supported */
3145 if (HCC_MAX_PSA(xhci->hcc_params) < 4) {
3146 xhci_dbg(xhci, "xHCI controller does not support streams.\n");
3147 return -ENOSYS;
3148 }
3149
3150 config_cmd = xhci_alloc_command(xhci, true, true, mem_flags);
3151 if (!config_cmd) {
3152 xhci_dbg(xhci, "Could not allocate xHCI command structure.\n");
3153 return -ENOMEM;
3154 }
3155 ctrl_ctx = xhci_get_input_control_ctx(xhci, config_cmd->in_ctx);
3156 if (!ctrl_ctx) {
3157 xhci_warn(xhci, "%s: Could not get input context, bad type.\n",
3158 __func__);
3159 xhci_free_command(xhci, config_cmd);
3160 return -ENOMEM;
3161 }
3162
3163 /* Check to make sure all endpoints are not already configured for
3164 * streams. While we're at it, find the maximum number of streams that
3165 * all the endpoints will support and check for duplicate endpoints.
3166 */
3167 spin_lock_irqsave(&xhci->lock, flags);
3168 ret = xhci_calculate_streams_and_bitmask(xhci, udev, eps,
3169 num_eps, &num_streams, &changed_ep_bitmask);
3170 if (ret < 0) {
3171 xhci_free_command(xhci, config_cmd);
3172 spin_unlock_irqrestore(&xhci->lock, flags);
3173 return ret;
3174 }
3175 if (num_streams <= 1) {
3176 xhci_warn(xhci, "WARN: endpoints can't handle "
3177 "more than one stream.\n");
3178 xhci_free_command(xhci, config_cmd);
3179 spin_unlock_irqrestore(&xhci->lock, flags);
3180 return -EINVAL;
3181 }
3182 vdev = xhci->devs[udev->slot_id];
3183 /* Mark each endpoint as being in transition, so
3184 * xhci_urb_enqueue() will reject all URBs.
3185 */
3186 for (i = 0; i < num_eps; i++) {
3187 ep_index = xhci_get_endpoint_index(&eps[i]->desc);
3188 vdev->eps[ep_index].ep_state |= EP_GETTING_STREAMS;
3189 }
3190 spin_unlock_irqrestore(&xhci->lock, flags);
3191
3192 /* Setup internal data structures and allocate HW data structures for
3193 * streams (but don't install the HW structures in the input context
3194 * until we're sure all memory allocation succeeded).
3195 */
3196 xhci_calculate_streams_entries(xhci, &num_streams, &num_stream_ctxs);
3197 xhci_dbg(xhci, "Need %u stream ctx entries for %u stream IDs.\n",
3198 num_stream_ctxs, num_streams);
3199
3200 for (i = 0; i < num_eps; i++) {
3201 ep_index = xhci_get_endpoint_index(&eps[i]->desc);
3202 vdev->eps[ep_index].stream_info = xhci_alloc_stream_info(xhci,
3203 num_stream_ctxs,
3204 num_streams, mem_flags);
3205 if (!vdev->eps[ep_index].stream_info)
3206 goto cleanup;
3207 /* Set maxPstreams in endpoint context and update deq ptr to
3208 * point to stream context array. FIXME
3209 */
3210 }
3211
3212 /* Set up the input context for a configure endpoint command. */
3213 for (i = 0; i < num_eps; i++) {
3214 struct xhci_ep_ctx *ep_ctx;
3215
3216 ep_index = xhci_get_endpoint_index(&eps[i]->desc);
3217 ep_ctx = xhci_get_ep_ctx(xhci, config_cmd->in_ctx, ep_index);
3218
3219 xhci_endpoint_copy(xhci, config_cmd->in_ctx,
3220 vdev->out_ctx, ep_index);
3221 xhci_setup_streams_ep_input_ctx(xhci, ep_ctx,
3222 vdev->eps[ep_index].stream_info);
3223 }
3224 /* Tell the HW to drop its old copy of the endpoint context info
3225 * and add the updated copy from the input context.
3226 */
3227 xhci_setup_input_ctx_for_config_ep(xhci, config_cmd->in_ctx,
3228 vdev->out_ctx, ctrl_ctx,
3229 changed_ep_bitmask, changed_ep_bitmask);
3230
3231 /* Issue and wait for the configure endpoint command */
3232 ret = xhci_configure_endpoint(xhci, udev, config_cmd,
3233 false, false);
3234
3235 /* xHC rejected the configure endpoint command for some reason, so we
3236 * leave the old ring intact and free our internal streams data
3237 * structure.
3238 */
3239 if (ret < 0)
3240 goto cleanup;
3241
3242 spin_lock_irqsave(&xhci->lock, flags);
3243 for (i = 0; i < num_eps; i++) {
3244 ep_index = xhci_get_endpoint_index(&eps[i]->desc);
3245 vdev->eps[ep_index].ep_state &= ~EP_GETTING_STREAMS;
3246 xhci_dbg(xhci, "Slot %u ep ctx %u now has streams.\n",
3247 udev->slot_id, ep_index);
3248 vdev->eps[ep_index].ep_state |= EP_HAS_STREAMS;
3249 }
3250 xhci_free_command(xhci, config_cmd);
3251 spin_unlock_irqrestore(&xhci->lock, flags);
3252
3253 /* Subtract 1 for stream 0, which drivers can't use */
3254 return num_streams - 1;
3255
3256cleanup:
3257 /* If it didn't work, free the streams! */
3258 for (i = 0; i < num_eps; i++) {
3259 ep_index = xhci_get_endpoint_index(&eps[i]->desc);
3260 xhci_free_stream_info(xhci, vdev->eps[ep_index].stream_info);
3261 vdev->eps[ep_index].stream_info = NULL;
3262 /* FIXME Unset maxPstreams in endpoint context and
3263 * update deq ptr to point to normal string ring.
3264 */
3265 vdev->eps[ep_index].ep_state &= ~EP_GETTING_STREAMS;
3266 vdev->eps[ep_index].ep_state &= ~EP_HAS_STREAMS;
3267 xhci_endpoint_zero(xhci, vdev, eps[i]);
3268 }
3269 xhci_free_command(xhci, config_cmd);
3270 return -ENOMEM;
3271}
3272
3273/* Transition the endpoint from using streams to being a "normal" endpoint
3274 * without streams.
3275 *
3276 * Modify the endpoint context state, submit a configure endpoint command,
3277 * and free all endpoint rings for streams if that completes successfully.
3278 */
3279int xhci_free_streams(struct usb_hcd *hcd, struct usb_device *udev,
3280 struct usb_host_endpoint **eps, unsigned int num_eps,
3281 gfp_t mem_flags)
3282{
3283 int i, ret;
3284 struct xhci_hcd *xhci;
3285 struct xhci_virt_device *vdev;
3286 struct xhci_command *command;
3287 struct xhci_input_control_ctx *ctrl_ctx;
3288 unsigned int ep_index;
3289 unsigned long flags;
3290 u32 changed_ep_bitmask;
3291
3292 xhci = hcd_to_xhci(hcd);
3293 vdev = xhci->devs[udev->slot_id];
3294
3295 /* Set up a configure endpoint command to remove the streams rings */
3296 spin_lock_irqsave(&xhci->lock, flags);
3297 changed_ep_bitmask = xhci_calculate_no_streams_bitmask(xhci,
3298 udev, eps, num_eps);
3299 if (changed_ep_bitmask == 0) {
3300 spin_unlock_irqrestore(&xhci->lock, flags);
3301 return -EINVAL;
3302 }
3303
3304 /* Use the xhci_command structure from the first endpoint. We may have
3305 * allocated too many, but the driver may call xhci_free_streams() for
3306 * each endpoint it grouped into one call to xhci_alloc_streams().
3307 */
3308 ep_index = xhci_get_endpoint_index(&eps[0]->desc);
3309 command = vdev->eps[ep_index].stream_info->free_streams_command;
3310 ctrl_ctx = xhci_get_input_control_ctx(xhci, command->in_ctx);
3311 if (!ctrl_ctx) {
3312 spin_unlock_irqrestore(&xhci->lock, flags);
3313 xhci_warn(xhci, "%s: Could not get input context, bad type.\n",
3314 __func__);
3315 return -EINVAL;
3316 }
3317
3318 for (i = 0; i < num_eps; i++) {
3319 struct xhci_ep_ctx *ep_ctx;
3320
3321 ep_index = xhci_get_endpoint_index(&eps[i]->desc);
3322 ep_ctx = xhci_get_ep_ctx(xhci, command->in_ctx, ep_index);
3323 xhci->devs[udev->slot_id]->eps[ep_index].ep_state |=
3324 EP_GETTING_NO_STREAMS;
3325
3326 xhci_endpoint_copy(xhci, command->in_ctx,
3327 vdev->out_ctx, ep_index);
3328 xhci_setup_no_streams_ep_input_ctx(xhci, ep_ctx,
3329 &vdev->eps[ep_index]);
3330 }
3331 xhci_setup_input_ctx_for_config_ep(xhci, command->in_ctx,
3332 vdev->out_ctx, ctrl_ctx,
3333 changed_ep_bitmask, changed_ep_bitmask);
3334 spin_unlock_irqrestore(&xhci->lock, flags);
3335
3336 /* Issue and wait for the configure endpoint command,
3337 * which must succeed.
3338 */
3339 ret = xhci_configure_endpoint(xhci, udev, command,
3340 false, true);
3341
3342 /* xHC rejected the configure endpoint command for some reason, so we
3343 * leave the streams rings intact.
3344 */
3345 if (ret < 0)
3346 return ret;
3347
3348 spin_lock_irqsave(&xhci->lock, flags);
3349 for (i = 0; i < num_eps; i++) {
3350 ep_index = xhci_get_endpoint_index(&eps[i]->desc);
3351 xhci_free_stream_info(xhci, vdev->eps[ep_index].stream_info);
3352 vdev->eps[ep_index].stream_info = NULL;
3353 /* FIXME Unset maxPstreams in endpoint context and
3354 * update deq ptr to point to normal string ring.
3355 */
3356 vdev->eps[ep_index].ep_state &= ~EP_GETTING_NO_STREAMS;
3357 vdev->eps[ep_index].ep_state &= ~EP_HAS_STREAMS;
3358 }
3359 spin_unlock_irqrestore(&xhci->lock, flags);
3360
3361 return 0;
3362}
3363
3364/*
3365 * Deletes endpoint resources for endpoints that were active before a Reset
3366 * Device command, or a Disable Slot command. The Reset Device command leaves
3367 * the control endpoint intact, whereas the Disable Slot command deletes it.
3368 *
3369 * Must be called with xhci->lock held.
3370 */
3371void xhci_free_device_endpoint_resources(struct xhci_hcd *xhci,
3372 struct xhci_virt_device *virt_dev, bool drop_control_ep)
3373{
3374 int i;
3375 unsigned int num_dropped_eps = 0;
3376 unsigned int drop_flags = 0;
3377
3378 for (i = (drop_control_ep ? 0 : 1); i < 31; i++) {
3379 if (virt_dev->eps[i].ring) {
3380 drop_flags |= 1 << i;
3381 num_dropped_eps++;
3382 }
3383 }
3384 xhci->num_active_eps -= num_dropped_eps;
3385 if (num_dropped_eps)
3386 xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
3387 "Dropped %u ep ctxs, flags = 0x%x, "
3388 "%u now active.",
3389 num_dropped_eps, drop_flags,
3390 xhci->num_active_eps);
3391}
3392
3393/*
3394 * This submits a Reset Device Command, which will set the device state to 0,
3395 * set the device address to 0, and disable all the endpoints except the default
3396 * control endpoint. The USB core should come back and call
3397 * xhci_address_device(), and then re-set up the configuration. If this is
3398 * called because of a usb_reset_and_verify_device(), then the old alternate
3399 * settings will be re-installed through the normal bandwidth allocation
3400 * functions.
3401 *
3402 * Wait for the Reset Device command to finish. Remove all structures
3403 * associated with the endpoints that were disabled. Clear the input device
3404 * structure? Cache the rings? Reset the control endpoint 0 max packet size?
3405 *
3406 * If the virt_dev to be reset does not exist or does not match the udev,
3407 * it means the device is lost, possibly due to the xHC restore error and
3408 * re-initialization during S3/S4. In this case, call xhci_alloc_dev() to
3409 * re-allocate the device.
3410 */
3411int xhci_discover_or_reset_device(struct usb_hcd *hcd, struct usb_device *udev)
3412{
3413 int ret, i;
3414 unsigned long flags;
3415 struct xhci_hcd *xhci;
3416 unsigned int slot_id;
3417 struct xhci_virt_device *virt_dev;
3418 struct xhci_command *reset_device_cmd;
3419 int timeleft;
3420 int last_freed_endpoint;
3421 struct xhci_slot_ctx *slot_ctx;
3422 int old_active_eps = 0;
3423
3424 ret = xhci_check_args(hcd, udev, NULL, 0, false, __func__);
3425 if (ret <= 0)
3426 return ret;
3427 xhci = hcd_to_xhci(hcd);
3428 slot_id = udev->slot_id;
3429 virt_dev = xhci->devs[slot_id];
3430 if (!virt_dev) {
3431 xhci_dbg(xhci, "The device to be reset with slot ID %u does "
3432 "not exist. Re-allocate the device\n", slot_id);
3433 ret = xhci_alloc_dev(hcd, udev);
3434 if (ret == 1)
3435 return 0;
3436 else
3437 return -EINVAL;
3438 }
3439
3440 if (virt_dev->udev != udev) {
3441 /* If the virt_dev and the udev does not match, this virt_dev
3442 * may belong to another udev.
3443 * Re-allocate the device.
3444 */
3445 xhci_dbg(xhci, "The device to be reset with slot ID %u does "
3446 "not match the udev. Re-allocate the device\n",
3447 slot_id);
3448 ret = xhci_alloc_dev(hcd, udev);
3449 if (ret == 1)
3450 return 0;
3451 else
3452 return -EINVAL;
3453 }
3454
3455 /* If device is not setup, there is no point in resetting it */
3456 slot_ctx = xhci_get_slot_ctx(xhci, virt_dev->out_ctx);
3457 if (GET_SLOT_STATE(le32_to_cpu(slot_ctx->dev_state)) ==
3458 SLOT_STATE_DISABLED)
3459 return 0;
3460
3461 xhci_dbg(xhci, "Resetting device with slot ID %u\n", slot_id);
3462 /* Allocate the command structure that holds the struct completion.
3463 * Assume we're in process context, since the normal device reset
3464 * process has to wait for the device anyway. Storage devices are
3465 * reset as part of error handling, so use GFP_NOIO instead of
3466 * GFP_KERNEL.
3467 */
3468 reset_device_cmd = xhci_alloc_command(xhci, false, true, GFP_NOIO);
3469 if (!reset_device_cmd) {
3470 xhci_dbg(xhci, "Couldn't allocate command structure.\n");
3471 return -ENOMEM;
3472 }
3473
3474 /* Attempt to submit the Reset Device command to the command ring */
3475 spin_lock_irqsave(&xhci->lock, flags);
3476 reset_device_cmd->command_trb = xhci_find_next_enqueue(xhci->cmd_ring);
3477
3478 list_add_tail(&reset_device_cmd->cmd_list, &virt_dev->cmd_list);
3479 ret = xhci_queue_reset_device(xhci, slot_id);
3480 if (ret) {
3481 xhci_dbg(xhci, "FIXME: allocate a command ring segment\n");
3482 list_del(&reset_device_cmd->cmd_list);
3483 spin_unlock_irqrestore(&xhci->lock, flags);
3484 goto command_cleanup;
3485 }
3486 xhci_ring_cmd_db(xhci);
3487 spin_unlock_irqrestore(&xhci->lock, flags);
3488
3489 /* Wait for the Reset Device command to finish */
3490 timeleft = wait_for_completion_interruptible_timeout(
3491 reset_device_cmd->completion,
3492 XHCI_CMD_DEFAULT_TIMEOUT);
3493 if (timeleft <= 0) {
3494 xhci_warn(xhci, "%s while waiting for reset device command\n",
3495 timeleft == 0 ? "Timeout" : "Signal");
3496 spin_lock_irqsave(&xhci->lock, flags);
3497 /* The timeout might have raced with the event ring handler, so
3498 * only delete from the list if the item isn't poisoned.
3499 */
3500 if (reset_device_cmd->cmd_list.next != LIST_POISON1)
3501 list_del(&reset_device_cmd->cmd_list);
3502 spin_unlock_irqrestore(&xhci->lock, flags);
3503 ret = -ETIME;
3504 goto command_cleanup;
3505 }
3506
3507 /* The Reset Device command can't fail, according to the 0.95/0.96 spec,
3508 * unless we tried to reset a slot ID that wasn't enabled,
3509 * or the device wasn't in the addressed or configured state.
3510 */
3511 ret = reset_device_cmd->status;
3512 switch (ret) {
3513 case COMP_EBADSLT: /* 0.95 completion code for bad slot ID */
3514 case COMP_CTX_STATE: /* 0.96 completion code for same thing */
3515 xhci_dbg(xhci, "Can't reset device (slot ID %u) in %s state\n",
3516 slot_id,
3517 xhci_get_slot_state(xhci, virt_dev->out_ctx));
3518 xhci_dbg(xhci, "Not freeing device rings.\n");
3519 /* Don't treat this as an error. May change my mind later. */
3520 ret = 0;
3521 goto command_cleanup;
3522 case COMP_SUCCESS:
3523 xhci_dbg(xhci, "Successful reset device command.\n");
3524 break;
3525 default:
3526 if (xhci_is_vendor_info_code(xhci, ret))
3527 break;
3528 xhci_warn(xhci, "Unknown completion code %u for "
3529 "reset device command.\n", ret);
3530 ret = -EINVAL;
3531 goto command_cleanup;
3532 }
3533
3534 /* Free up host controller endpoint resources */
3535 if ((xhci->quirks & XHCI_EP_LIMIT_QUIRK)) {
3536 spin_lock_irqsave(&xhci->lock, flags);
3537 /* Don't delete the default control endpoint resources */
3538 xhci_free_device_endpoint_resources(xhci, virt_dev, false);
3539 spin_unlock_irqrestore(&xhci->lock, flags);
3540 }
3541
3542 /* Everything but endpoint 0 is disabled, so free or cache the rings. */
3543 last_freed_endpoint = 1;
3544 for (i = 1; i < 31; ++i) {
3545 struct xhci_virt_ep *ep = &virt_dev->eps[i];
3546
3547 if (ep->ep_state & EP_HAS_STREAMS) {
3548 xhci_warn(xhci, "WARN: endpoint 0x%02x has streams on device reset, freeing streams.\n",
3549 xhci_get_endpoint_address(i));
3550 xhci_free_stream_info(xhci, ep->stream_info);
3551 ep->stream_info = NULL;
3552 ep->ep_state &= ~EP_HAS_STREAMS;
3553 }
3554
3555 if (ep->ring) {
3556 xhci_free_or_cache_endpoint_ring(xhci, virt_dev, i);
3557 last_freed_endpoint = i;
3558 }
3559 if (!list_empty(&virt_dev->eps[i].bw_endpoint_list))
3560 xhci_drop_ep_from_interval_table(xhci,
3561 &virt_dev->eps[i].bw_info,
3562 virt_dev->bw_table,
3563 udev,
3564 &virt_dev->eps[i],
3565 virt_dev->tt_info);
3566 xhci_clear_endpoint_bw_info(&virt_dev->eps[i].bw_info);
3567 }
3568 /* If necessary, update the number of active TTs on this root port */
3569 xhci_update_tt_active_eps(xhci, virt_dev, old_active_eps);
3570
3571 xhci_dbg(xhci, "Output context after successful reset device cmd:\n");
3572 xhci_dbg_ctx(xhci, virt_dev->out_ctx, last_freed_endpoint);
3573 ret = 0;
3574
3575command_cleanup:
3576 xhci_free_command(xhci, reset_device_cmd);
3577 return ret;
3578}
3579
3580/*
3581 * At this point, the struct usb_device is about to go away, the device has
3582 * disconnected, and all traffic has been stopped and the endpoints have been
3583 * disabled. Free any HC data structures associated with that device.
3584 */
3585void xhci_free_dev(struct usb_hcd *hcd, struct usb_device *udev)
3586{
3587 struct xhci_hcd *xhci = hcd_to_xhci(hcd);
3588 struct xhci_virt_device *virt_dev;
3589 unsigned long flags;
3590 u32 state;
3591 int i, ret;
3592
3593#ifndef CONFIG_USB_DEFAULT_PERSIST
3594 /*
3595 * We called pm_runtime_get_noresume when the device was attached.
3596 * Decrement the counter here to allow controller to runtime suspend
3597 * if no devices remain.
3598 */
3599 if (xhci->quirks & XHCI_RESET_ON_RESUME)
3600 pm_runtime_put_noidle(hcd->self.controller);
3601#endif
3602
3603 ret = xhci_check_args(hcd, udev, NULL, 0, true, __func__);
3604 /* If the host is halted due to driver unload, we still need to free the
3605 * device.
3606 */
3607 if (ret <= 0 && ret != -ENODEV)
3608 return;
3609
3610 virt_dev = xhci->devs[udev->slot_id];
3611
3612 /* Stop any wayward timer functions (which may grab the lock) */
3613 for (i = 0; i < 31; ++i) {
3614 virt_dev->eps[i].ep_state &= ~EP_HALT_PENDING;
3615 del_timer_sync(&virt_dev->eps[i].stop_cmd_timer);
3616 }
3617
3618 spin_lock_irqsave(&xhci->lock, flags);
3619 /* Don't disable the slot if the host controller is dead. */
3620 state = readl(&xhci->op_regs->status);
3621 if (state == 0xffffffff || (xhci->xhc_state & XHCI_STATE_DYING) ||
3622 (xhci->xhc_state & XHCI_STATE_HALTED)) {
3623 xhci_free_virt_device(xhci, udev->slot_id);
3624 spin_unlock_irqrestore(&xhci->lock, flags);
3625 return;
3626 }
3627
3628 if (xhci_queue_slot_control(xhci, TRB_DISABLE_SLOT, udev->slot_id)) {
3629 spin_unlock_irqrestore(&xhci->lock, flags);
3630 xhci_dbg(xhci, "FIXME: allocate a command ring segment\n");
3631 return;
3632 }
3633 xhci_ring_cmd_db(xhci);
3634 spin_unlock_irqrestore(&xhci->lock, flags);
3635 /*
3636 * Event command completion handler will free any data structures
3637 * associated with the slot. XXX Can free sleep?
3638 */
3639}
3640
3641/*
3642 * Checks if we have enough host controller resources for the default control
3643 * endpoint.
3644 *
3645 * Must be called with xhci->lock held.
3646 */
3647static int xhci_reserve_host_control_ep_resources(struct xhci_hcd *xhci)
3648{
3649 if (xhci->num_active_eps + 1 > xhci->limit_active_eps) {
3650 xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
3651 "Not enough ep ctxs: "
3652 "%u active, need to add 1, limit is %u.",
3653 xhci->num_active_eps, xhci->limit_active_eps);
3654 return -ENOMEM;
3655 }
3656 xhci->num_active_eps += 1;
3657 xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
3658 "Adding 1 ep ctx, %u now active.",
3659 xhci->num_active_eps);
3660 return 0;
3661}
3662
3663
3664/*
3665 * Returns 0 if the xHC ran out of device slots, the Enable Slot command
3666 * timed out, or allocating memory failed. Returns 1 on success.
3667 */
3668int xhci_alloc_dev(struct usb_hcd *hcd, struct usb_device *udev)
3669{
3670 struct xhci_hcd *xhci = hcd_to_xhci(hcd);
3671 unsigned long flags;
3672 int timeleft;
3673 int ret;
3674 union xhci_trb *cmd_trb;
3675
3676 spin_lock_irqsave(&xhci->lock, flags);
3677 cmd_trb = xhci_find_next_enqueue(xhci->cmd_ring);
3678 ret = xhci_queue_slot_control(xhci, TRB_ENABLE_SLOT, 0);
3679 if (ret) {
3680 spin_unlock_irqrestore(&xhci->lock, flags);
3681 xhci_dbg(xhci, "FIXME: allocate a command ring segment\n");
3682 return 0;
3683 }
3684 xhci_ring_cmd_db(xhci);
3685 spin_unlock_irqrestore(&xhci->lock, flags);
3686
3687 /* XXX: how much time for xHC slot assignment? */
3688 timeleft = wait_for_completion_interruptible_timeout(&xhci->addr_dev,
3689 XHCI_CMD_DEFAULT_TIMEOUT);
3690 if (timeleft <= 0) {
3691 xhci_warn(xhci, "%s while waiting for a slot\n",
3692 timeleft == 0 ? "Timeout" : "Signal");
3693 /* cancel the enable slot request */
3694 return xhci_cancel_cmd(xhci, NULL, cmd_trb);
3695 }
3696
3697 if (!xhci->slot_id) {
3698 xhci_err(xhci, "Error while assigning device slot ID\n");
3699 return 0;
3700 }
3701
3702 if ((xhci->quirks & XHCI_EP_LIMIT_QUIRK)) {
3703 spin_lock_irqsave(&xhci->lock, flags);
3704 ret = xhci_reserve_host_control_ep_resources(xhci);
3705 if (ret) {
3706 spin_unlock_irqrestore(&xhci->lock, flags);
3707 xhci_warn(xhci, "Not enough host resources, "
3708 "active endpoint contexts = %u\n",
3709 xhci->num_active_eps);
3710 goto disable_slot;
3711 }
3712 spin_unlock_irqrestore(&xhci->lock, flags);
3713 }
3714 /* Use GFP_NOIO, since this function can be called from
3715 * xhci_discover_or_reset_device(), which may be called as part of
3716 * mass storage driver error handling.
3717 */
3718 if (!xhci_alloc_virt_device(xhci, xhci->slot_id, udev, GFP_NOIO)) {
3719 xhci_warn(xhci, "Could not allocate xHCI USB device data structures\n");
3720 goto disable_slot;
3721 }
3722 udev->slot_id = xhci->slot_id;
3723
3724#ifndef CONFIG_USB_DEFAULT_PERSIST
3725 /*
3726 * If resetting upon resume, we can't put the controller into runtime
3727 * suspend if there is a device attached.
3728 */
3729 if (xhci->quirks & XHCI_RESET_ON_RESUME)
3730 pm_runtime_get_noresume(hcd->self.controller);
3731#endif
3732
3733 /* Is this a LS or FS device under a HS hub? */
3734 /* Hub or peripherial? */
3735 return 1;
3736
3737disable_slot:
3738 /* Disable slot, if we can do it without mem alloc */
3739 spin_lock_irqsave(&xhci->lock, flags);
3740 if (!xhci_queue_slot_control(xhci, TRB_DISABLE_SLOT, udev->slot_id))
3741 xhci_ring_cmd_db(xhci);
3742 spin_unlock_irqrestore(&xhci->lock, flags);
3743 return 0;
3744}
3745
3746/*
3747 * Issue an Address Device command and optionally send a corresponding
3748 * SetAddress request to the device.
3749 * We should be protected by the usb_address0_mutex in khubd's hub_port_init, so
3750 * we should only issue and wait on one address command at the same time.
3751 */
3752static int xhci_setup_device(struct usb_hcd *hcd, struct usb_device *udev,
3753 enum xhci_setup_dev setup)
3754{
3755 const char *act = setup == SETUP_CONTEXT_ONLY ? "context" : "address";
3756 unsigned long flags;
3757 int timeleft;
3758 struct xhci_virt_device *virt_dev;
3759 int ret = 0;
3760 struct xhci_hcd *xhci = hcd_to_xhci(hcd);
3761 struct xhci_slot_ctx *slot_ctx;
3762 struct xhci_input_control_ctx *ctrl_ctx;
3763 u64 temp_64;
3764 union xhci_trb *cmd_trb;
3765
3766 if (!udev->slot_id) {
3767 xhci_dbg_trace(xhci, trace_xhci_dbg_address,
3768 "Bad Slot ID %d", udev->slot_id);
3769 return -EINVAL;
3770 }
3771
3772 virt_dev = xhci->devs[udev->slot_id];
3773
3774 if (WARN_ON(!virt_dev)) {
3775 /*
3776 * In plug/unplug torture test with an NEC controller,
3777 * a zero-dereference was observed once due to virt_dev = 0.
3778 * Print useful debug rather than crash if it is observed again!
3779 */
3780 xhci_warn(xhci, "Virt dev invalid for slot_id 0x%x!\n",
3781 udev->slot_id);
3782 return -EINVAL;
3783 }
3784
3785 slot_ctx = xhci_get_slot_ctx(xhci, virt_dev->in_ctx);
3786 ctrl_ctx = xhci_get_input_control_ctx(xhci, virt_dev->in_ctx);
3787 if (!ctrl_ctx) {
3788 xhci_warn(xhci, "%s: Could not get input context, bad type.\n",
3789 __func__);
3790 return -EINVAL;
3791 }
3792 /*
3793 * If this is the first Set Address since device plug-in or
3794 * virt_device realloaction after a resume with an xHCI power loss,
3795 * then set up the slot context.
3796 */
3797 if (!slot_ctx->dev_info)
3798 xhci_setup_addressable_virt_dev(xhci, udev);
3799 /* Otherwise, update the control endpoint ring enqueue pointer. */
3800 else
3801 xhci_copy_ep0_dequeue_into_input_ctx(xhci, udev);
3802 ctrl_ctx->add_flags = cpu_to_le32(SLOT_FLAG | EP0_FLAG);
3803 ctrl_ctx->drop_flags = 0;
3804
3805 xhci_dbg(xhci, "Slot ID %d Input Context:\n", udev->slot_id);
3806 xhci_dbg_ctx(xhci, virt_dev->in_ctx, 2);
3807 trace_xhci_address_ctx(xhci, virt_dev->in_ctx,
3808 le32_to_cpu(slot_ctx->dev_info) >> 27);
3809
3810 spin_lock_irqsave(&xhci->lock, flags);
3811 cmd_trb = xhci_find_next_enqueue(xhci->cmd_ring);
3812 ret = xhci_queue_address_device(xhci, virt_dev->in_ctx->dma,
3813 udev->slot_id, setup);
3814 if (ret) {
3815 spin_unlock_irqrestore(&xhci->lock, flags);
3816 xhci_dbg_trace(xhci, trace_xhci_dbg_address,
3817 "FIXME: allocate a command ring segment");
3818 return ret;
3819 }
3820 xhci_ring_cmd_db(xhci);
3821 spin_unlock_irqrestore(&xhci->lock, flags);
3822
3823 /* ctrl tx can take up to 5 sec; XXX: need more time for xHC? */
3824 timeleft = wait_for_completion_interruptible_timeout(&xhci->addr_dev,
3825 XHCI_CMD_DEFAULT_TIMEOUT);
3826 /* FIXME: From section 4.3.4: "Software shall be responsible for timing
3827 * the SetAddress() "recovery interval" required by USB and aborting the
3828 * command on a timeout.
3829 */
3830 if (timeleft <= 0) {
3831 xhci_warn(xhci, "%s while waiting for setup %s command\n",
3832 timeleft == 0 ? "Timeout" : "Signal", act);
3833 /* cancel the address device command */
3834 ret = xhci_cancel_cmd(xhci, NULL, cmd_trb);
3835 if (ret < 0)
3836 return ret;
3837 return -ETIME;
3838 }
3839
3840 switch (virt_dev->cmd_status) {
3841 case COMP_CTX_STATE:
3842 case COMP_EBADSLT:
3843 xhci_err(xhci, "Setup ERROR: setup %s command for slot %d.\n",
3844 act, udev->slot_id);
3845 ret = -EINVAL;
3846 break;
3847 case COMP_TX_ERR:
3848 dev_warn(&udev->dev, "Device not responding to setup %s.\n", act);
3849 ret = -EPROTO;
3850 break;
3851 case COMP_DEV_ERR:
3852 dev_warn(&udev->dev,
3853 "ERROR: Incompatible device for setup %s command\n", act);
3854 ret = -ENODEV;
3855 break;
3856 case COMP_SUCCESS:
3857 xhci_dbg_trace(xhci, trace_xhci_dbg_address,
3858 "Successful setup %s command", act);
3859 break;
3860 default:
3861 xhci_err(xhci,
3862 "ERROR: unexpected setup %s command completion code 0x%x.\n",
3863 act, virt_dev->cmd_status);
3864 xhci_dbg(xhci, "Slot ID %d Output Context:\n", udev->slot_id);
3865 xhci_dbg_ctx(xhci, virt_dev->out_ctx, 2);
3866 trace_xhci_address_ctx(xhci, virt_dev->out_ctx, 1);
3867 ret = -EINVAL;
3868 break;
3869 }
3870 if (ret) {
3871 return ret;
3872 }
3873 temp_64 = xhci_read_64(xhci, &xhci->op_regs->dcbaa_ptr);
3874 xhci_dbg_trace(xhci, trace_xhci_dbg_address,
3875 "Op regs DCBAA ptr = %#016llx", temp_64);
3876 xhci_dbg_trace(xhci, trace_xhci_dbg_address,
3877 "Slot ID %d dcbaa entry @%p = %#016llx",
3878 udev->slot_id,
3879 &xhci->dcbaa->dev_context_ptrs[udev->slot_id],
3880 (unsigned long long)
3881 le64_to_cpu(xhci->dcbaa->dev_context_ptrs[udev->slot_id]));
3882 xhci_dbg_trace(xhci, trace_xhci_dbg_address,
3883 "Output Context DMA address = %#08llx",
3884 (unsigned long long)virt_dev->out_ctx->dma);
3885 xhci_dbg(xhci, "Slot ID %d Input Context:\n", udev->slot_id);
3886 xhci_dbg_ctx(xhci, virt_dev->in_ctx, 2);
3887 trace_xhci_address_ctx(xhci, virt_dev->in_ctx,
3888 le32_to_cpu(slot_ctx->dev_info) >> 27);
3889 xhci_dbg(xhci, "Slot ID %d Output Context:\n", udev->slot_id);
3890 xhci_dbg_ctx(xhci, virt_dev->out_ctx, 2);
3891 /*
3892 * USB core uses address 1 for the roothubs, so we add one to the
3893 * address given back to us by the HC.
3894 */
3895 slot_ctx = xhci_get_slot_ctx(xhci, virt_dev->out_ctx);
3896 trace_xhci_address_ctx(xhci, virt_dev->out_ctx,
3897 le32_to_cpu(slot_ctx->dev_info) >> 27);
3898 /* Zero the input context control for later use */
3899 ctrl_ctx->add_flags = 0;
3900 ctrl_ctx->drop_flags = 0;
3901
3902 xhci_dbg_trace(xhci, trace_xhci_dbg_address,
3903 "Internal device address = %d",
3904 le32_to_cpu(slot_ctx->dev_state) & DEV_ADDR_MASK);
3905
3906 return 0;
3907}
3908
3909int xhci_address_device(struct usb_hcd *hcd, struct usb_device *udev)
3910{
3911 return xhci_setup_device(hcd, udev, SETUP_CONTEXT_ADDRESS);
3912}
3913
3914int xhci_enable_device(struct usb_hcd *hcd, struct usb_device *udev)
3915{
3916 return xhci_setup_device(hcd, udev, SETUP_CONTEXT_ONLY);
3917}
3918
3919/*
3920 * Transfer the port index into real index in the HW port status
3921 * registers. Caculate offset between the port's PORTSC register
3922 * and port status base. Divide the number of per port register
3923 * to get the real index. The raw port number bases 1.
3924 */
3925int xhci_find_raw_port_number(struct usb_hcd *hcd, int port1)
3926{
3927 struct xhci_hcd *xhci = hcd_to_xhci(hcd);
3928 __le32 __iomem *base_addr = &xhci->op_regs->port_status_base;
3929 __le32 __iomem *addr;
3930 int raw_port;
3931
3932 if (hcd->speed != HCD_USB3)
3933 addr = xhci->usb2_ports[port1 - 1];
3934 else
3935 addr = xhci->usb3_ports[port1 - 1];
3936
3937 raw_port = (addr - base_addr)/NUM_PORT_REGS + 1;
3938 return raw_port;
3939}
3940
3941/*
3942 * Issue an Evaluate Context command to change the Maximum Exit Latency in the
3943 * slot context. If that succeeds, store the new MEL in the xhci_virt_device.
3944 */
3945static int __maybe_unused xhci_change_max_exit_latency(struct xhci_hcd *xhci,
3946 struct usb_device *udev, u16 max_exit_latency)
3947{
3948 struct xhci_virt_device *virt_dev;
3949 struct xhci_command *command;
3950 struct xhci_input_control_ctx *ctrl_ctx;
3951 struct xhci_slot_ctx *slot_ctx;
3952 unsigned long flags;
3953 int ret;
3954
3955 spin_lock_irqsave(&xhci->lock, flags);
3956 if (max_exit_latency == xhci->devs[udev->slot_id]->current_mel) {
3957 spin_unlock_irqrestore(&xhci->lock, flags);
3958 return 0;
3959 }
3960
3961 /* Attempt to issue an Evaluate Context command to change the MEL. */
3962 virt_dev = xhci->devs[udev->slot_id];
3963 command = xhci->lpm_command;
3964 ctrl_ctx = xhci_get_input_control_ctx(xhci, command->in_ctx);
3965 if (!ctrl_ctx) {
3966 spin_unlock_irqrestore(&xhci->lock, flags);
3967 xhci_warn(xhci, "%s: Could not get input context, bad type.\n",
3968 __func__);
3969 return -ENOMEM;
3970 }
3971
3972 xhci_slot_copy(xhci, command->in_ctx, virt_dev->out_ctx);
3973 spin_unlock_irqrestore(&xhci->lock, flags);
3974
3975 ctrl_ctx->add_flags |= cpu_to_le32(SLOT_FLAG);
3976 slot_ctx = xhci_get_slot_ctx(xhci, command->in_ctx);
3977 slot_ctx->dev_info2 &= cpu_to_le32(~((u32) MAX_EXIT));
3978 slot_ctx->dev_info2 |= cpu_to_le32(max_exit_latency);
3979
3980 xhci_dbg_trace(xhci, trace_xhci_dbg_context_change,
3981 "Set up evaluate context for LPM MEL change.");
3982 xhci_dbg(xhci, "Slot %u Input Context:\n", udev->slot_id);
3983 xhci_dbg_ctx(xhci, command->in_ctx, 0);
3984
3985 /* Issue and wait for the evaluate context command. */
3986 ret = xhci_configure_endpoint(xhci, udev, command,
3987 true, true);
3988 xhci_dbg(xhci, "Slot %u Output Context:\n", udev->slot_id);
3989 xhci_dbg_ctx(xhci, virt_dev->out_ctx, 0);
3990
3991 if (!ret) {
3992 spin_lock_irqsave(&xhci->lock, flags);
3993 virt_dev->current_mel = max_exit_latency;
3994 spin_unlock_irqrestore(&xhci->lock, flags);
3995 }
3996 return ret;
3997}
3998
3999#ifdef CONFIG_PM_RUNTIME
4000
4001/* BESL to HIRD Encoding array for USB2 LPM */
4002static int xhci_besl_encoding[16] = {125, 150, 200, 300, 400, 500, 1000, 2000,
4003 3000, 4000, 5000, 6000, 7000, 8000, 9000, 10000};
4004
4005/* Calculate HIRD/BESL for USB2 PORTPMSC*/
4006static int xhci_calculate_hird_besl(struct xhci_hcd *xhci,
4007 struct usb_device *udev)
4008{
4009 int u2del, besl, besl_host;
4010 int besl_device = 0;
4011 u32 field;
4012
4013 u2del = HCS_U2_LATENCY(xhci->hcs_params3);
4014 field = le32_to_cpu(udev->bos->ext_cap->bmAttributes);
4015
4016 if (field & USB_BESL_SUPPORT) {
4017 for (besl_host = 0; besl_host < 16; besl_host++) {
4018 if (xhci_besl_encoding[besl_host] >= u2del)
4019 break;
4020 }
4021 /* Use baseline BESL value as default */
4022 if (field & USB_BESL_BASELINE_VALID)
4023 besl_device = USB_GET_BESL_BASELINE(field);
4024 else if (field & USB_BESL_DEEP_VALID)
4025 besl_device = USB_GET_BESL_DEEP(field);
4026 } else {
4027 if (u2del <= 50)
4028 besl_host = 0;
4029 else
4030 besl_host = (u2del - 51) / 75 + 1;
4031 }
4032
4033 besl = besl_host + besl_device;
4034 if (besl > 15)
4035 besl = 15;
4036
4037 return besl;
4038}
4039
4040/* Calculate BESLD, L1 timeout and HIRDM for USB2 PORTHLPMC */
4041static int xhci_calculate_usb2_hw_lpm_params(struct usb_device *udev)
4042{
4043 u32 field;
4044 int l1;
4045 int besld = 0;
4046 int hirdm = 0;
4047
4048 field = le32_to_cpu(udev->bos->ext_cap->bmAttributes);
4049
4050 /* xHCI l1 is set in steps of 256us, xHCI 1.0 section 5.4.11.2 */
4051 l1 = udev->l1_params.timeout / 256;
4052
4053 /* device has preferred BESLD */
4054 if (field & USB_BESL_DEEP_VALID) {
4055 besld = USB_GET_BESL_DEEP(field);
4056 hirdm = 1;
4057 }
4058
4059 return PORT_BESLD(besld) | PORT_L1_TIMEOUT(l1) | PORT_HIRDM(hirdm);
4060}
4061
4062int xhci_set_usb2_hardware_lpm(struct usb_hcd *hcd,
4063 struct usb_device *udev, int enable)
4064{
4065 struct xhci_hcd *xhci = hcd_to_xhci(hcd);
4066 __le32 __iomem **port_array;
4067 __le32 __iomem *pm_addr, *hlpm_addr;
4068 u32 pm_val, hlpm_val, field;
4069 unsigned int port_num;
4070 unsigned long flags;
4071 int hird, exit_latency;
4072 int ret;
4073
4074 if (hcd->speed == HCD_USB3 || !xhci->hw_lpm_support ||
4075 !udev->lpm_capable)
4076 return -EPERM;
4077
4078 if (!udev->parent || udev->parent->parent ||
4079 udev->descriptor.bDeviceClass == USB_CLASS_HUB)
4080 return -EPERM;
4081
4082 if (udev->usb2_hw_lpm_capable != 1)
4083 return -EPERM;
4084
4085 spin_lock_irqsave(&xhci->lock, flags);
4086
4087 port_array = xhci->usb2_ports;
4088 port_num = udev->portnum - 1;
4089 pm_addr = port_array[port_num] + PORTPMSC;
4090 pm_val = readl(pm_addr);
4091 hlpm_addr = port_array[port_num] + PORTHLPMC;
4092 field = le32_to_cpu(udev->bos->ext_cap->bmAttributes);
4093
4094 xhci_dbg(xhci, "%s port %d USB2 hardware LPM\n",
4095 enable ? "enable" : "disable", port_num);
4096
4097 if (enable) {
4098 /* Host supports BESL timeout instead of HIRD */
4099 if (udev->usb2_hw_lpm_besl_capable) {
4100 /* if device doesn't have a preferred BESL value use a
4101 * default one which works with mixed HIRD and BESL
4102 * systems. See XHCI_DEFAULT_BESL definition in xhci.h
4103 */
4104 if ((field & USB_BESL_SUPPORT) &&
4105 (field & USB_BESL_BASELINE_VALID))
4106 hird = USB_GET_BESL_BASELINE(field);
4107 else
4108 hird = udev->l1_params.besl;
4109
4110 exit_latency = xhci_besl_encoding[hird];
4111 spin_unlock_irqrestore(&xhci->lock, flags);
4112
4113 /* USB 3.0 code dedicate one xhci->lpm_command->in_ctx
4114 * input context for link powermanagement evaluate
4115 * context commands. It is protected by hcd->bandwidth
4116 * mutex and is shared by all devices. We need to set
4117 * the max ext latency in USB 2 BESL LPM as well, so
4118 * use the same mutex and xhci_change_max_exit_latency()
4119 */
4120 mutex_lock(hcd->bandwidth_mutex);
4121 ret = xhci_change_max_exit_latency(xhci, udev,
4122 exit_latency);
4123 mutex_unlock(hcd->bandwidth_mutex);
4124
4125 if (ret < 0)
4126 return ret;
4127 spin_lock_irqsave(&xhci->lock, flags);
4128
4129 hlpm_val = xhci_calculate_usb2_hw_lpm_params(udev);
4130 writel(hlpm_val, hlpm_addr);
4131 /* flush write */
4132 readl(hlpm_addr);
4133 } else {
4134 hird = xhci_calculate_hird_besl(xhci, udev);
4135 }
4136
4137 pm_val &= ~PORT_HIRD_MASK;
4138 pm_val |= PORT_HIRD(hird) | PORT_RWE | PORT_L1DS(udev->slot_id);
4139 writel(pm_val, pm_addr);
4140 pm_val = readl(pm_addr);
4141 pm_val |= PORT_HLE;
4142 writel(pm_val, pm_addr);
4143 /* flush write */
4144 readl(pm_addr);
4145 } else {
4146 pm_val &= ~(PORT_HLE | PORT_RWE | PORT_HIRD_MASK | PORT_L1DS_MASK);
4147 writel(pm_val, pm_addr);
4148 /* flush write */
4149 readl(pm_addr);
4150 if (udev->usb2_hw_lpm_besl_capable) {
4151 spin_unlock_irqrestore(&xhci->lock, flags);
4152 mutex_lock(hcd->bandwidth_mutex);
4153 xhci_change_max_exit_latency(xhci, udev, 0);
4154 mutex_unlock(hcd->bandwidth_mutex);
4155 return 0;
4156 }
4157 }
4158
4159 spin_unlock_irqrestore(&xhci->lock, flags);
4160 return 0;
4161}
4162
4163/* check if a usb2 port supports a given extened capability protocol
4164 * only USB2 ports extended protocol capability values are cached.
4165 * Return 1 if capability is supported
4166 */
4167static int xhci_check_usb2_port_capability(struct xhci_hcd *xhci, int port,
4168 unsigned capability)
4169{
4170 u32 port_offset, port_count;
4171 int i;
4172
4173 for (i = 0; i < xhci->num_ext_caps; i++) {
4174 if (xhci->ext_caps[i] & capability) {
4175 /* port offsets starts at 1 */
4176 port_offset = XHCI_EXT_PORT_OFF(xhci->ext_caps[i]) - 1;
4177 port_count = XHCI_EXT_PORT_COUNT(xhci->ext_caps[i]);
4178 if (port >= port_offset &&
4179 port < port_offset + port_count)
4180 return 1;
4181 }
4182 }
4183 return 0;
4184}
4185
4186int xhci_update_device(struct usb_hcd *hcd, struct usb_device *udev)
4187{
4188 struct xhci_hcd *xhci = hcd_to_xhci(hcd);
4189 int portnum = udev->portnum - 1;
4190
4191 if (hcd->speed == HCD_USB3 || !xhci->sw_lpm_support ||
4192 !udev->lpm_capable)
4193 return 0;
4194
4195 /* we only support lpm for non-hub device connected to root hub yet */
4196 if (!udev->parent || udev->parent->parent ||
4197 udev->descriptor.bDeviceClass == USB_CLASS_HUB)
4198 return 0;
4199
4200 if (xhci->hw_lpm_support == 1 &&
4201 xhci_check_usb2_port_capability(
4202 xhci, portnum, XHCI_HLC)) {
4203 udev->usb2_hw_lpm_capable = 1;
4204 udev->l1_params.timeout = XHCI_L1_TIMEOUT;
4205 udev->l1_params.besl = XHCI_DEFAULT_BESL;
4206 if (xhci_check_usb2_port_capability(xhci, portnum,
4207 XHCI_BLC))
4208 udev->usb2_hw_lpm_besl_capable = 1;
4209 }
4210
4211 return 0;
4212}
4213
4214#else
4215
4216int xhci_set_usb2_hardware_lpm(struct usb_hcd *hcd,
4217 struct usb_device *udev, int enable)
4218{
4219 return 0;
4220}
4221
4222int xhci_update_device(struct usb_hcd *hcd, struct usb_device *udev)
4223{
4224 return 0;
4225}
4226
4227#endif /* CONFIG_PM_RUNTIME */
4228
4229/*---------------------- USB 3.0 Link PM functions ------------------------*/
4230
4231#ifdef CONFIG_PM
4232/* Service interval in nanoseconds = 2^(bInterval - 1) * 125us * 1000ns / 1us */
4233static unsigned long long xhci_service_interval_to_ns(
4234 struct usb_endpoint_descriptor *desc)
4235{
4236 return (1ULL << (desc->bInterval - 1)) * 125 * 1000;
4237}
4238
4239static u16 xhci_get_timeout_no_hub_lpm(struct usb_device *udev,
4240 enum usb3_link_state state)
4241{
4242 unsigned long long sel;
4243 unsigned long long pel;
4244 unsigned int max_sel_pel;
4245 char *state_name;
4246
4247 switch (state) {
4248 case USB3_LPM_U1:
4249 /* Convert SEL and PEL stored in nanoseconds to microseconds */
4250 sel = DIV_ROUND_UP(udev->u1_params.sel, 1000);
4251 pel = DIV_ROUND_UP(udev->u1_params.pel, 1000);
4252 max_sel_pel = USB3_LPM_MAX_U1_SEL_PEL;
4253 state_name = "U1";
4254 break;
4255 case USB3_LPM_U2:
4256 sel = DIV_ROUND_UP(udev->u2_params.sel, 1000);
4257 pel = DIV_ROUND_UP(udev->u2_params.pel, 1000);
4258 max_sel_pel = USB3_LPM_MAX_U2_SEL_PEL;
4259 state_name = "U2";
4260 break;
4261 default:
4262 dev_warn(&udev->dev, "%s: Can't get timeout for non-U1 or U2 state.\n",
4263 __func__);
4264 return USB3_LPM_DISABLED;
4265 }
4266
4267 if (sel <= max_sel_pel && pel <= max_sel_pel)
4268 return USB3_LPM_DEVICE_INITIATED;
4269
4270 if (sel > max_sel_pel)
4271 dev_dbg(&udev->dev, "Device-initiated %s disabled "
4272 "due to long SEL %llu ms\n",
4273 state_name, sel);
4274 else
4275 dev_dbg(&udev->dev, "Device-initiated %s disabled "
4276 "due to long PEL %llu ms\n",
4277 state_name, pel);
4278 return USB3_LPM_DISABLED;
4279}
4280
4281/* Returns the hub-encoded U1 timeout value.
4282 * The U1 timeout should be the maximum of the following values:
4283 * - For control endpoints, U1 system exit latency (SEL) * 3
4284 * - For bulk endpoints, U1 SEL * 5
4285 * - For interrupt endpoints:
4286 * - Notification EPs, U1 SEL * 3
4287 * - Periodic EPs, max(105% of bInterval, U1 SEL * 2)
4288 * - For isochronous endpoints, max(105% of bInterval, U1 SEL * 2)
4289 */
4290static u16 xhci_calculate_intel_u1_timeout(struct usb_device *udev,
4291 struct usb_endpoint_descriptor *desc)
4292{
4293 unsigned long long timeout_ns;
4294 int ep_type;
4295 int intr_type;
4296
4297 ep_type = usb_endpoint_type(desc);
4298 switch (ep_type) {
4299 case USB_ENDPOINT_XFER_CONTROL:
4300 timeout_ns = udev->u1_params.sel * 3;
4301 break;
4302 case USB_ENDPOINT_XFER_BULK:
4303 timeout_ns = udev->u1_params.sel * 5;
4304 break;
4305 case USB_ENDPOINT_XFER_INT:
4306 intr_type = usb_endpoint_interrupt_type(desc);
4307 if (intr_type == USB_ENDPOINT_INTR_NOTIFICATION) {
4308 timeout_ns = udev->u1_params.sel * 3;
4309 break;
4310 }
4311 /* Otherwise the calculation is the same as isoc eps */
4312 case USB_ENDPOINT_XFER_ISOC:
4313 timeout_ns = xhci_service_interval_to_ns(desc);
4314 timeout_ns = DIV_ROUND_UP_ULL(timeout_ns * 105, 100);
4315 if (timeout_ns < udev->u1_params.sel * 2)
4316 timeout_ns = udev->u1_params.sel * 2;
4317 break;
4318 default:
4319 return 0;
4320 }
4321
4322 /* The U1 timeout is encoded in 1us intervals. */
4323 timeout_ns = DIV_ROUND_UP_ULL(timeout_ns, 1000);
4324 /* Don't return a timeout of zero, because that's USB3_LPM_DISABLED. */
4325 if (timeout_ns == USB3_LPM_DISABLED)
4326 timeout_ns++;
4327
4328 /* If the necessary timeout value is bigger than what we can set in the
4329 * USB 3.0 hub, we have to disable hub-initiated U1.
4330 */
4331 if (timeout_ns <= USB3_LPM_U1_MAX_TIMEOUT)
4332 return timeout_ns;
4333 dev_dbg(&udev->dev, "Hub-initiated U1 disabled "
4334 "due to long timeout %llu ms\n", timeout_ns);
4335 return xhci_get_timeout_no_hub_lpm(udev, USB3_LPM_U1);
4336}
4337
4338/* Returns the hub-encoded U2 timeout value.
4339 * The U2 timeout should be the maximum of:
4340 * - 10 ms (to avoid the bandwidth impact on the scheduler)
4341 * - largest bInterval of any active periodic endpoint (to avoid going
4342 * into lower power link states between intervals).
4343 * - the U2 Exit Latency of the device
4344 */
4345static u16 xhci_calculate_intel_u2_timeout(struct usb_device *udev,
4346 struct usb_endpoint_descriptor *desc)
4347{
4348 unsigned long long timeout_ns;
4349 unsigned long long u2_del_ns;
4350
4351 timeout_ns = 10 * 1000 * 1000;
4352
4353 if ((usb_endpoint_xfer_int(desc) || usb_endpoint_xfer_isoc(desc)) &&
4354 (xhci_service_interval_to_ns(desc) > timeout_ns))
4355 timeout_ns = xhci_service_interval_to_ns(desc);
4356
4357 u2_del_ns = le16_to_cpu(udev->bos->ss_cap->bU2DevExitLat) * 1000ULL;
4358 if (u2_del_ns > timeout_ns)
4359 timeout_ns = u2_del_ns;
4360
4361 /* The U2 timeout is encoded in 256us intervals */
4362 timeout_ns = DIV_ROUND_UP_ULL(timeout_ns, 256 * 1000);
4363 /* If the necessary timeout value is bigger than what we can set in the
4364 * USB 3.0 hub, we have to disable hub-initiated U2.
4365 */
4366 if (timeout_ns <= USB3_LPM_U2_MAX_TIMEOUT)
4367 return timeout_ns;
4368 dev_dbg(&udev->dev, "Hub-initiated U2 disabled "
4369 "due to long timeout %llu ms\n", timeout_ns);
4370 return xhci_get_timeout_no_hub_lpm(udev, USB3_LPM_U2);
4371}
4372
4373static u16 xhci_call_host_update_timeout_for_endpoint(struct xhci_hcd *xhci,
4374 struct usb_device *udev,
4375 struct usb_endpoint_descriptor *desc,
4376 enum usb3_link_state state,
4377 u16 *timeout)
4378{
4379 if (state == USB3_LPM_U1) {
4380 if (xhci->quirks & XHCI_INTEL_HOST)
4381 return xhci_calculate_intel_u1_timeout(udev, desc);
4382 } else {
4383 if (xhci->quirks & XHCI_INTEL_HOST)
4384 return xhci_calculate_intel_u2_timeout(udev, desc);
4385 }
4386
4387 return USB3_LPM_DISABLED;
4388}
4389
4390static int xhci_update_timeout_for_endpoint(struct xhci_hcd *xhci,
4391 struct usb_device *udev,
4392 struct usb_endpoint_descriptor *desc,
4393 enum usb3_link_state state,
4394 u16 *timeout)
4395{
4396 u16 alt_timeout;
4397
4398 alt_timeout = xhci_call_host_update_timeout_for_endpoint(xhci, udev,
4399 desc, state, timeout);
4400
4401 /* If we found we can't enable hub-initiated LPM, or
4402 * the U1 or U2 exit latency was too high to allow
4403 * device-initiated LPM as well, just stop searching.
4404 */
4405 if (alt_timeout == USB3_LPM_DISABLED ||
4406 alt_timeout == USB3_LPM_DEVICE_INITIATED) {
4407 *timeout = alt_timeout;
4408 return -E2BIG;
4409 }
4410 if (alt_timeout > *timeout)
4411 *timeout = alt_timeout;
4412 return 0;
4413}
4414
4415static int xhci_update_timeout_for_interface(struct xhci_hcd *xhci,
4416 struct usb_device *udev,
4417 struct usb_host_interface *alt,
4418 enum usb3_link_state state,
4419 u16 *timeout)
4420{
4421 int j;
4422
4423 for (j = 0; j < alt->desc.bNumEndpoints; j++) {
4424 if (xhci_update_timeout_for_endpoint(xhci, udev,
4425 &alt->endpoint[j].desc, state, timeout))
4426 return -E2BIG;
4427 continue;
4428 }
4429 return 0;
4430}
4431
4432static int xhci_check_intel_tier_policy(struct usb_device *udev,
4433 enum usb3_link_state state)
4434{
4435 struct usb_device *parent;
4436 unsigned int num_hubs;
4437
4438 if (state == USB3_LPM_U2)
4439 return 0;
4440
4441 /* Don't enable U1 if the device is on a 2nd tier hub or lower. */
4442 for (parent = udev->parent, num_hubs = 0; parent->parent;
4443 parent = parent->parent)
4444 num_hubs++;
4445
4446 if (num_hubs < 2)
4447 return 0;
4448
4449 dev_dbg(&udev->dev, "Disabling U1 link state for device"
4450 " below second-tier hub.\n");
4451 dev_dbg(&udev->dev, "Plug device into first-tier hub "
4452 "to decrease power consumption.\n");
4453 return -E2BIG;
4454}
4455
4456static int xhci_check_tier_policy(struct xhci_hcd *xhci,
4457 struct usb_device *udev,
4458 enum usb3_link_state state)
4459{
4460 if (xhci->quirks & XHCI_INTEL_HOST)
4461 return xhci_check_intel_tier_policy(udev, state);
4462 return -EINVAL;
4463}
4464
4465/* Returns the U1 or U2 timeout that should be enabled.
4466 * If the tier check or timeout setting functions return with a non-zero exit
4467 * code, that means the timeout value has been finalized and we shouldn't look
4468 * at any more endpoints.
4469 */
4470static u16 xhci_calculate_lpm_timeout(struct usb_hcd *hcd,
4471 struct usb_device *udev, enum usb3_link_state state)
4472{
4473 struct xhci_hcd *xhci = hcd_to_xhci(hcd);
4474 struct usb_host_config *config;
4475 char *state_name;
4476 int i;
4477 u16 timeout = USB3_LPM_DISABLED;
4478
4479 if (state == USB3_LPM_U1)
4480 state_name = "U1";
4481 else if (state == USB3_LPM_U2)
4482 state_name = "U2";
4483 else {
4484 dev_warn(&udev->dev, "Can't enable unknown link state %i\n",
4485 state);
4486 return timeout;
4487 }
4488
4489 if (xhci_check_tier_policy(xhci, udev, state) < 0)
4490 return timeout;
4491
4492 /* Gather some information about the currently installed configuration
4493 * and alternate interface settings.
4494 */
4495 if (xhci_update_timeout_for_endpoint(xhci, udev, &udev->ep0.desc,
4496 state, &timeout))
4497 return timeout;
4498
4499 config = udev->actconfig;
4500 if (!config)
4501 return timeout;
4502
4503 for (i = 0; i < config->desc.bNumInterfaces; i++) {
4504 struct usb_driver *driver;
4505 struct usb_interface *intf = config->interface[i];
4506
4507 if (!intf)
4508 continue;
4509
4510 /* Check if any currently bound drivers want hub-initiated LPM
4511 * disabled.
4512 */
4513 if (intf->dev.driver) {
4514 driver = to_usb_driver(intf->dev.driver);
4515 if (driver && driver->disable_hub_initiated_lpm) {
4516 dev_dbg(&udev->dev, "Hub-initiated %s disabled "
4517 "at request of driver %s\n",
4518 state_name, driver->name);
4519 return xhci_get_timeout_no_hub_lpm(udev, state);
4520 }
4521 }
4522
4523 /* Not sure how this could happen... */
4524 if (!intf->cur_altsetting)
4525 continue;
4526
4527 if (xhci_update_timeout_for_interface(xhci, udev,
4528 intf->cur_altsetting,
4529 state, &timeout))
4530 return timeout;
4531 }
4532 return timeout;
4533}
4534
4535static int calculate_max_exit_latency(struct usb_device *udev,
4536 enum usb3_link_state state_changed,
4537 u16 hub_encoded_timeout)
4538{
4539 unsigned long long u1_mel_us = 0;
4540 unsigned long long u2_mel_us = 0;
4541 unsigned long long mel_us = 0;
4542 bool disabling_u1;
4543 bool disabling_u2;
4544 bool enabling_u1;
4545 bool enabling_u2;
4546
4547 disabling_u1 = (state_changed == USB3_LPM_U1 &&
4548 hub_encoded_timeout == USB3_LPM_DISABLED);
4549 disabling_u2 = (state_changed == USB3_LPM_U2 &&
4550 hub_encoded_timeout == USB3_LPM_DISABLED);
4551
4552 enabling_u1 = (state_changed == USB3_LPM_U1 &&
4553 hub_encoded_timeout != USB3_LPM_DISABLED);
4554 enabling_u2 = (state_changed == USB3_LPM_U2 &&
4555 hub_encoded_timeout != USB3_LPM_DISABLED);
4556
4557 /* If U1 was already enabled and we're not disabling it,
4558 * or we're going to enable U1, account for the U1 max exit latency.
4559 */
4560 if ((udev->u1_params.timeout != USB3_LPM_DISABLED && !disabling_u1) ||
4561 enabling_u1)
4562 u1_mel_us = DIV_ROUND_UP(udev->u1_params.mel, 1000);
4563 if ((udev->u2_params.timeout != USB3_LPM_DISABLED && !disabling_u2) ||
4564 enabling_u2)
4565 u2_mel_us = DIV_ROUND_UP(udev->u2_params.mel, 1000);
4566
4567 if (u1_mel_us > u2_mel_us)
4568 mel_us = u1_mel_us;
4569 else
4570 mel_us = u2_mel_us;
4571 /* xHCI host controller max exit latency field is only 16 bits wide. */
4572 if (mel_us > MAX_EXIT) {
4573 dev_warn(&udev->dev, "Link PM max exit latency of %lluus "
4574 "is too big.\n", mel_us);
4575 return -E2BIG;
4576 }
4577 return mel_us;
4578}
4579
4580/* Returns the USB3 hub-encoded value for the U1/U2 timeout. */
4581int xhci_enable_usb3_lpm_timeout(struct usb_hcd *hcd,
4582 struct usb_device *udev, enum usb3_link_state state)
4583{
4584 struct xhci_hcd *xhci;
4585 u16 hub_encoded_timeout;
4586 int mel;
4587 int ret;
4588
4589 xhci = hcd_to_xhci(hcd);
4590 /* The LPM timeout values are pretty host-controller specific, so don't
4591 * enable hub-initiated timeouts unless the vendor has provided
4592 * information about their timeout algorithm.
4593 */
4594 if (!xhci || !(xhci->quirks & XHCI_LPM_SUPPORT) ||
4595 !xhci->devs[udev->slot_id])
4596 return USB3_LPM_DISABLED;
4597
4598 hub_encoded_timeout = xhci_calculate_lpm_timeout(hcd, udev, state);
4599 mel = calculate_max_exit_latency(udev, state, hub_encoded_timeout);
4600 if (mel < 0) {
4601 /* Max Exit Latency is too big, disable LPM. */
4602 hub_encoded_timeout = USB3_LPM_DISABLED;
4603 mel = 0;
4604 }
4605
4606 ret = xhci_change_max_exit_latency(xhci, udev, mel);
4607 if (ret)
4608 return ret;
4609 return hub_encoded_timeout;
4610}
4611
4612int xhci_disable_usb3_lpm_timeout(struct usb_hcd *hcd,
4613 struct usb_device *udev, enum usb3_link_state state)
4614{
4615 struct xhci_hcd *xhci;
4616 u16 mel;
4617 int ret;
4618
4619 xhci = hcd_to_xhci(hcd);
4620 if (!xhci || !(xhci->quirks & XHCI_LPM_SUPPORT) ||
4621 !xhci->devs[udev->slot_id])
4622 return 0;
4623
4624 mel = calculate_max_exit_latency(udev, state, USB3_LPM_DISABLED);
4625 ret = xhci_change_max_exit_latency(xhci, udev, mel);
4626 if (ret)
4627 return ret;
4628 return 0;
4629}
4630#else /* CONFIG_PM */
4631
4632int xhci_enable_usb3_lpm_timeout(struct usb_hcd *hcd,
4633 struct usb_device *udev, enum usb3_link_state state)
4634{
4635 return USB3_LPM_DISABLED;
4636}
4637
4638int xhci_disable_usb3_lpm_timeout(struct usb_hcd *hcd,
4639 struct usb_device *udev, enum usb3_link_state state)
4640{
4641 return 0;
4642}
4643#endif /* CONFIG_PM */
4644
4645/*-------------------------------------------------------------------------*/
4646
4647/* Once a hub descriptor is fetched for a device, we need to update the xHC's
4648 * internal data structures for the device.
4649 */
4650int xhci_update_hub_device(struct usb_hcd *hcd, struct usb_device *hdev,
4651 struct usb_tt *tt, gfp_t mem_flags)
4652{
4653 struct xhci_hcd *xhci = hcd_to_xhci(hcd);
4654 struct xhci_virt_device *vdev;
4655 struct xhci_command *config_cmd;
4656 struct xhci_input_control_ctx *ctrl_ctx;
4657 struct xhci_slot_ctx *slot_ctx;
4658 unsigned long flags;
4659 unsigned think_time;
4660 int ret;
4661
4662 /* Ignore root hubs */
4663 if (!hdev->parent)
4664 return 0;
4665
4666 vdev = xhci->devs[hdev->slot_id];
4667 if (!vdev) {
4668 xhci_warn(xhci, "Cannot update hub desc for unknown device.\n");
4669 return -EINVAL;
4670 }
4671 config_cmd = xhci_alloc_command(xhci, true, true, mem_flags);
4672 if (!config_cmd) {
4673 xhci_dbg(xhci, "Could not allocate xHCI command structure.\n");
4674 return -ENOMEM;
4675 }
4676 ctrl_ctx = xhci_get_input_control_ctx(xhci, config_cmd->in_ctx);
4677 if (!ctrl_ctx) {
4678 xhci_warn(xhci, "%s: Could not get input context, bad type.\n",
4679 __func__);
4680 xhci_free_command(xhci, config_cmd);
4681 return -ENOMEM;
4682 }
4683
4684 spin_lock_irqsave(&xhci->lock, flags);
4685 if (hdev->speed == USB_SPEED_HIGH &&
4686 xhci_alloc_tt_info(xhci, vdev, hdev, tt, GFP_ATOMIC)) {
4687 xhci_dbg(xhci, "Could not allocate xHCI TT structure.\n");
4688 xhci_free_command(xhci, config_cmd);
4689 spin_unlock_irqrestore(&xhci->lock, flags);
4690 return -ENOMEM;
4691 }
4692
4693 xhci_slot_copy(xhci, config_cmd->in_ctx, vdev->out_ctx);
4694 ctrl_ctx->add_flags |= cpu_to_le32(SLOT_FLAG);
4695 slot_ctx = xhci_get_slot_ctx(xhci, config_cmd->in_ctx);
4696 slot_ctx->dev_info |= cpu_to_le32(DEV_HUB);
4697 if (tt->multi)
4698 slot_ctx->dev_info |= cpu_to_le32(DEV_MTT);
4699 if (xhci->hci_version > 0x95) {
4700 xhci_dbg(xhci, "xHCI version %x needs hub "
4701 "TT think time and number of ports\n",
4702 (unsigned int) xhci->hci_version);
4703 slot_ctx->dev_info2 |= cpu_to_le32(XHCI_MAX_PORTS(hdev->maxchild));
4704 /* Set TT think time - convert from ns to FS bit times.
4705 * 0 = 8 FS bit times, 1 = 16 FS bit times,
4706 * 2 = 24 FS bit times, 3 = 32 FS bit times.
4707 *
4708 * xHCI 1.0: this field shall be 0 if the device is not a
4709 * High-spped hub.
4710 */
4711 think_time = tt->think_time;
4712 if (think_time != 0)
4713 think_time = (think_time / 666) - 1;
4714 if (xhci->hci_version < 0x100 || hdev->speed == USB_SPEED_HIGH)
4715 slot_ctx->tt_info |=
4716 cpu_to_le32(TT_THINK_TIME(think_time));
4717 } else {
4718 xhci_dbg(xhci, "xHCI version %x doesn't need hub "
4719 "TT think time or number of ports\n",
4720 (unsigned int) xhci->hci_version);
4721 }
4722 slot_ctx->dev_state = 0;
4723 spin_unlock_irqrestore(&xhci->lock, flags);
4724
4725 xhci_dbg(xhci, "Set up %s for hub device.\n",
4726 (xhci->hci_version > 0x95) ?
4727 "configure endpoint" : "evaluate context");
4728 xhci_dbg(xhci, "Slot %u Input Context:\n", hdev->slot_id);
4729 xhci_dbg_ctx(xhci, config_cmd->in_ctx, 0);
4730
4731 /* Issue and wait for the configure endpoint or
4732 * evaluate context command.
4733 */
4734 if (xhci->hci_version > 0x95)
4735 ret = xhci_configure_endpoint(xhci, hdev, config_cmd,
4736 false, false);
4737 else
4738 ret = xhci_configure_endpoint(xhci, hdev, config_cmd,
4739 true, false);
4740
4741 xhci_dbg(xhci, "Slot %u Output Context:\n", hdev->slot_id);
4742 xhci_dbg_ctx(xhci, vdev->out_ctx, 0);
4743
4744 xhci_free_command(xhci, config_cmd);
4745 return ret;
4746}
4747
4748int xhci_get_frame(struct usb_hcd *hcd)
4749{
4750 struct xhci_hcd *xhci = hcd_to_xhci(hcd);
4751 /* EHCI mods by the periodic size. Why? */
4752 return readl(&xhci->run_regs->microframe_index) >> 3;
4753}
4754
4755int xhci_gen_setup(struct usb_hcd *hcd, xhci_get_quirks_t get_quirks)
4756{
4757 struct xhci_hcd *xhci;
4758 struct device *dev = hcd->self.controller;
4759 int retval;
4760
4761 /* Accept arbitrarily long scatter-gather lists */
4762 hcd->self.sg_tablesize = ~0;
4763
4764 /* support to build packet from discontinuous buffers */
4765 hcd->self.no_sg_constraint = 1;
4766
4767 /* XHCI controllers don't stop the ep queue on short packets :| */
4768 hcd->self.no_stop_on_short = 1;
4769
4770 if (usb_hcd_is_primary_hcd(hcd)) {
4771 xhci = kzalloc(sizeof(struct xhci_hcd), GFP_KERNEL);
4772 if (!xhci)
4773 return -ENOMEM;
4774 *((struct xhci_hcd **) hcd->hcd_priv) = xhci;
4775 xhci->main_hcd = hcd;
4776 /* Mark the first roothub as being USB 2.0.
4777 * The xHCI driver will register the USB 3.0 roothub.
4778 */
4779 hcd->speed = HCD_USB2;
4780 hcd->self.root_hub->speed = USB_SPEED_HIGH;
4781 /*
4782 * USB 2.0 roothub under xHCI has an integrated TT,
4783 * (rate matching hub) as opposed to having an OHCI/UHCI
4784 * companion controller.
4785 */
4786 hcd->has_tt = 1;
4787 } else {
4788 /* xHCI private pointer was set in xhci_pci_probe for the second
4789 * registered roothub.
4790 */
4791 return 0;
4792 }
4793
4794 xhci->cap_regs = hcd->regs;
4795 xhci->op_regs = hcd->regs +
4796 HC_LENGTH(readl(&xhci->cap_regs->hc_capbase));
4797 xhci->run_regs = hcd->regs +
4798 (readl(&xhci->cap_regs->run_regs_off) & RTSOFF_MASK);
4799 /* Cache read-only capability registers */
4800 xhci->hcs_params1 = readl(&xhci->cap_regs->hcs_params1);
4801 xhci->hcs_params2 = readl(&xhci->cap_regs->hcs_params2);
4802 xhci->hcs_params3 = readl(&xhci->cap_regs->hcs_params3);
4803 xhci->hcc_params = readl(&xhci->cap_regs->hc_capbase);
4804 xhci->hci_version = HC_VERSION(xhci->hcc_params);
4805 xhci->hcc_params = readl(&xhci->cap_regs->hcc_params);
4806 xhci_print_registers(xhci);
4807
4808 xhci->quirks = quirks;
4809
4810 get_quirks(dev, xhci);
4811
4812 /* In xhci controllers which follow xhci 1.0 spec gives a spurious
4813 * success event after a short transfer. This quirk will ignore such
4814 * spurious event.
4815 */
4816 if (xhci->hci_version > 0x96)
4817 xhci->quirks |= XHCI_SPURIOUS_SUCCESS;
4818
4819 /* Make sure the HC is halted. */
4820 retval = xhci_halt(xhci);
4821 if (retval)
4822 goto error;
4823
4824 xhci_dbg(xhci, "Resetting HCD\n");
4825 /* Reset the internal HC memory state and registers. */
4826 retval = xhci_reset(xhci);
4827 if (retval)
4828 goto error;
4829 xhci_dbg(xhci, "Reset complete\n");
4830
4831 /* Set dma_mask and coherent_dma_mask to 64-bits,
4832 * if xHC supports 64-bit addressing */
4833 if (HCC_64BIT_ADDR(xhci->hcc_params) &&
4834 !dma_set_mask(dev, DMA_BIT_MASK(64))) {
4835 xhci_dbg(xhci, "Enabling 64-bit DMA addresses.\n");
4836 dma_set_coherent_mask(dev, DMA_BIT_MASK(64));
4837 }
4838
4839 xhci_dbg(xhci, "Calling HCD init\n");
4840 /* Initialize HCD and host controller data structures. */
4841 retval = xhci_init(hcd);
4842 if (retval)
4843 goto error;
4844 xhci_dbg(xhci, "Called HCD init\n");
4845 return 0;
4846error:
4847 kfree(xhci);
4848 return retval;
4849}
4850
4851MODULE_DESCRIPTION(DRIVER_DESC);
4852MODULE_AUTHOR(DRIVER_AUTHOR);
4853MODULE_LICENSE("GPL");
4854
4855static int __init xhci_hcd_init(void)
4856{
4857 int retval;
4858
4859 retval = xhci_register_pci();
4860 if (retval < 0) {
4861 pr_debug("Problem registering PCI driver.\n");
4862 return retval;
4863 }
4864 retval = xhci_register_plat();
4865 if (retval < 0) {
4866 pr_debug("Problem registering platform driver.\n");
4867 goto unreg_pci;
4868 }
4869 /*
4870 * Check the compiler generated sizes of structures that must be laid
4871 * out in specific ways for hardware access.
4872 */
4873 BUILD_BUG_ON(sizeof(struct xhci_doorbell_array) != 256*32/8);
4874 BUILD_BUG_ON(sizeof(struct xhci_slot_ctx) != 8*32/8);
4875 BUILD_BUG_ON(sizeof(struct xhci_ep_ctx) != 8*32/8);
4876 /* xhci_device_control has eight fields, and also
4877 * embeds one xhci_slot_ctx and 31 xhci_ep_ctx
4878 */
4879 BUILD_BUG_ON(sizeof(struct xhci_stream_ctx) != 4*32/8);
4880 BUILD_BUG_ON(sizeof(union xhci_trb) != 4*32/8);
4881 BUILD_BUG_ON(sizeof(struct xhci_erst_entry) != 4*32/8);
4882 BUILD_BUG_ON(sizeof(struct xhci_cap_regs) != 7*32/8);
4883 BUILD_BUG_ON(sizeof(struct xhci_intr_reg) != 8*32/8);
4884 /* xhci_run_regs has eight fields and embeds 128 xhci_intr_regs */
4885 BUILD_BUG_ON(sizeof(struct xhci_run_regs) != (8+8*128)*32/8);
4886 return 0;
4887unreg_pci:
4888 xhci_unregister_pci();
4889 return retval;
4890}
4891module_init(xhci_hcd_init);
4892
4893static void __exit xhci_hcd_cleanup(void)
4894{
4895 xhci_unregister_pci();
4896 xhci_unregister_plat();
4897}
4898module_exit(xhci_hcd_cleanup);