Loading...
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * xHCI host controller driver
4 *
5 * Copyright (C) 2008 Intel Corp.
6 *
7 * Author: Sarah Sharp
8 * Some code borrowed from the Linux EHCI driver.
9 */
10
11#include <linux/pci.h>
12#include <linux/iommu.h>
13#include <linux/iopoll.h>
14#include <linux/irq.h>
15#include <linux/log2.h>
16#include <linux/module.h>
17#include <linux/moduleparam.h>
18#include <linux/slab.h>
19#include <linux/dmi.h>
20#include <linux/dma-mapping.h>
21
22#include "xhci.h"
23#include "xhci-trace.h"
24#include "xhci-debugfs.h"
25#include "xhci-dbgcap.h"
26
27#define DRIVER_AUTHOR "Sarah Sharp"
28#define DRIVER_DESC "'eXtensible' Host Controller (xHC) Driver"
29
30#define PORT_WAKE_BITS (PORT_WKOC_E | PORT_WKDISC_E | PORT_WKCONN_E)
31
32/* Some 0.95 hardware can't handle the chain bit on a Link TRB being cleared */
33static int link_quirk;
34module_param(link_quirk, int, S_IRUGO | S_IWUSR);
35MODULE_PARM_DESC(link_quirk, "Don't clear the chain bit on a link TRB");
36
37static unsigned long long quirks;
38module_param(quirks, ullong, S_IRUGO);
39MODULE_PARM_DESC(quirks, "Bit flags for quirks to be enabled as default");
40
41static bool td_on_ring(struct xhci_td *td, struct xhci_ring *ring)
42{
43 struct xhci_segment *seg = ring->first_seg;
44
45 if (!td || !td->start_seg)
46 return false;
47 do {
48 if (seg == td->start_seg)
49 return true;
50 seg = seg->next;
51 } while (seg && seg != ring->first_seg);
52
53 return false;
54}
55
56/*
57 * xhci_handshake - spin reading hc until handshake completes or fails
58 * @ptr: address of hc register to be read
59 * @mask: bits to look at in result of read
60 * @done: value of those bits when handshake succeeds
61 * @usec: timeout in microseconds
62 *
63 * Returns negative errno, or zero on success
64 *
65 * Success happens when the "mask" bits have the specified value (hardware
66 * handshake done). There are two failure modes: "usec" have passed (major
67 * hardware flakeout), or the register reads as all-ones (hardware removed).
68 */
69int xhci_handshake(void __iomem *ptr, u32 mask, u32 done, u64 timeout_us)
70{
71 u32 result;
72 int ret;
73
74 ret = readl_poll_timeout_atomic(ptr, result,
75 (result & mask) == done ||
76 result == U32_MAX,
77 1, timeout_us);
78 if (result == U32_MAX) /* card removed */
79 return -ENODEV;
80
81 return ret;
82}
83
84/*
85 * xhci_handshake_check_state - same as xhci_handshake but takes an additional
86 * exit_state parameter, and bails out with an error immediately when xhc_state
87 * has exit_state flag set.
88 */
89int xhci_handshake_check_state(struct xhci_hcd *xhci, void __iomem *ptr,
90 u32 mask, u32 done, int usec, unsigned int exit_state)
91{
92 u32 result;
93 int ret;
94
95 ret = readl_poll_timeout_atomic(ptr, result,
96 (result & mask) == done ||
97 result == U32_MAX ||
98 xhci->xhc_state & exit_state,
99 1, usec);
100
101 if (result == U32_MAX || xhci->xhc_state & exit_state)
102 return -ENODEV;
103
104 return ret;
105}
106
107/*
108 * Disable interrupts and begin the xHCI halting process.
109 */
110void xhci_quiesce(struct xhci_hcd *xhci)
111{
112 u32 halted;
113 u32 cmd;
114 u32 mask;
115
116 mask = ~(XHCI_IRQS);
117 halted = readl(&xhci->op_regs->status) & STS_HALT;
118 if (!halted)
119 mask &= ~CMD_RUN;
120
121 cmd = readl(&xhci->op_regs->command);
122 cmd &= mask;
123 writel(cmd, &xhci->op_regs->command);
124}
125
126/*
127 * Force HC into halt state.
128 *
129 * Disable any IRQs and clear the run/stop bit.
130 * HC will complete any current and actively pipelined transactions, and
131 * should halt within 16 ms of the run/stop bit being cleared.
132 * Read HC Halted bit in the status register to see when the HC is finished.
133 */
134int xhci_halt(struct xhci_hcd *xhci)
135{
136 int ret;
137
138 xhci_dbg_trace(xhci, trace_xhci_dbg_init, "// Halt the HC");
139 xhci_quiesce(xhci);
140
141 ret = xhci_handshake(&xhci->op_regs->status,
142 STS_HALT, STS_HALT, XHCI_MAX_HALT_USEC);
143 if (ret) {
144 xhci_warn(xhci, "Host halt failed, %d\n", ret);
145 return ret;
146 }
147
148 xhci->xhc_state |= XHCI_STATE_HALTED;
149 xhci->cmd_ring_state = CMD_RING_STATE_STOPPED;
150
151 return ret;
152}
153
154/*
155 * Set the run bit and wait for the host to be running.
156 */
157int xhci_start(struct xhci_hcd *xhci)
158{
159 u32 temp;
160 int ret;
161
162 temp = readl(&xhci->op_regs->command);
163 temp |= (CMD_RUN);
164 xhci_dbg_trace(xhci, trace_xhci_dbg_init, "// Turn on HC, cmd = 0x%x.",
165 temp);
166 writel(temp, &xhci->op_regs->command);
167
168 /*
169 * Wait for the HCHalted Status bit to be 0 to indicate the host is
170 * running.
171 */
172 ret = xhci_handshake(&xhci->op_regs->status,
173 STS_HALT, 0, XHCI_MAX_HALT_USEC);
174 if (ret == -ETIMEDOUT)
175 xhci_err(xhci, "Host took too long to start, "
176 "waited %u microseconds.\n",
177 XHCI_MAX_HALT_USEC);
178 if (!ret) {
179 /* clear state flags. Including dying, halted or removing */
180 xhci->xhc_state = 0;
181 xhci->run_graceperiod = jiffies + msecs_to_jiffies(500);
182 }
183
184 return ret;
185}
186
187/*
188 * Reset a halted HC.
189 *
190 * This resets pipelines, timers, counters, state machines, etc.
191 * Transactions will be terminated immediately, and operational registers
192 * will be set to their defaults.
193 */
194int xhci_reset(struct xhci_hcd *xhci, u64 timeout_us)
195{
196 u32 command;
197 u32 state;
198 int ret;
199
200 state = readl(&xhci->op_regs->status);
201
202 if (state == ~(u32)0) {
203 xhci_warn(xhci, "Host not accessible, reset failed.\n");
204 return -ENODEV;
205 }
206
207 if ((state & STS_HALT) == 0) {
208 xhci_warn(xhci, "Host controller not halted, aborting reset.\n");
209 return 0;
210 }
211
212 xhci_dbg_trace(xhci, trace_xhci_dbg_init, "// Reset the HC");
213 command = readl(&xhci->op_regs->command);
214 command |= CMD_RESET;
215 writel(command, &xhci->op_regs->command);
216
217 /* Existing Intel xHCI controllers require a delay of 1 mS,
218 * after setting the CMD_RESET bit, and before accessing any
219 * HC registers. This allows the HC to complete the
220 * reset operation and be ready for HC register access.
221 * Without this delay, the subsequent HC register access,
222 * may result in a system hang very rarely.
223 */
224 if (xhci->quirks & XHCI_INTEL_HOST)
225 udelay(1000);
226
227 ret = xhci_handshake_check_state(xhci, &xhci->op_regs->command,
228 CMD_RESET, 0, timeout_us, XHCI_STATE_REMOVING);
229 if (ret)
230 return ret;
231
232 if (xhci->quirks & XHCI_ASMEDIA_MODIFY_FLOWCONTROL)
233 usb_asmedia_modifyflowcontrol(to_pci_dev(xhci_to_hcd(xhci)->self.controller));
234
235 xhci_dbg_trace(xhci, trace_xhci_dbg_init,
236 "Wait for controller to be ready for doorbell rings");
237 /*
238 * xHCI cannot write to any doorbells or operational registers other
239 * than status until the "Controller Not Ready" flag is cleared.
240 */
241 ret = xhci_handshake(&xhci->op_regs->status, STS_CNR, 0, timeout_us);
242
243 xhci->usb2_rhub.bus_state.port_c_suspend = 0;
244 xhci->usb2_rhub.bus_state.suspended_ports = 0;
245 xhci->usb2_rhub.bus_state.resuming_ports = 0;
246 xhci->usb3_rhub.bus_state.port_c_suspend = 0;
247 xhci->usb3_rhub.bus_state.suspended_ports = 0;
248 xhci->usb3_rhub.bus_state.resuming_ports = 0;
249
250 return ret;
251}
252
253static void xhci_zero_64b_regs(struct xhci_hcd *xhci)
254{
255 struct device *dev = xhci_to_hcd(xhci)->self.sysdev;
256 struct iommu_domain *domain;
257 int err, i;
258 u64 val;
259 u32 intrs;
260
261 /*
262 * Some Renesas controllers get into a weird state if they are
263 * reset while programmed with 64bit addresses (they will preserve
264 * the top half of the address in internal, non visible
265 * registers). You end up with half the address coming from the
266 * kernel, and the other half coming from the firmware. Also,
267 * changing the programming leads to extra accesses even if the
268 * controller is supposed to be halted. The controller ends up with
269 * a fatal fault, and is then ripe for being properly reset.
270 *
271 * Special care is taken to only apply this if the device is behind
272 * an iommu. Doing anything when there is no iommu is definitely
273 * unsafe...
274 */
275 domain = iommu_get_domain_for_dev(dev);
276 if (!(xhci->quirks & XHCI_ZERO_64B_REGS) || !domain ||
277 domain->type == IOMMU_DOMAIN_IDENTITY)
278 return;
279
280 xhci_info(xhci, "Zeroing 64bit base registers, expecting fault\n");
281
282 /* Clear HSEIE so that faults do not get signaled */
283 val = readl(&xhci->op_regs->command);
284 val &= ~CMD_HSEIE;
285 writel(val, &xhci->op_regs->command);
286
287 /* Clear HSE (aka FATAL) */
288 val = readl(&xhci->op_regs->status);
289 val |= STS_FATAL;
290 writel(val, &xhci->op_regs->status);
291
292 /* Now zero the registers, and brace for impact */
293 val = xhci_read_64(xhci, &xhci->op_regs->dcbaa_ptr);
294 if (upper_32_bits(val))
295 xhci_write_64(xhci, 0, &xhci->op_regs->dcbaa_ptr);
296 val = xhci_read_64(xhci, &xhci->op_regs->cmd_ring);
297 if (upper_32_bits(val))
298 xhci_write_64(xhci, 0, &xhci->op_regs->cmd_ring);
299
300 intrs = min_t(u32, HCS_MAX_INTRS(xhci->hcs_params1),
301 ARRAY_SIZE(xhci->run_regs->ir_set));
302
303 for (i = 0; i < intrs; i++) {
304 struct xhci_intr_reg __iomem *ir;
305
306 ir = &xhci->run_regs->ir_set[i];
307 val = xhci_read_64(xhci, &ir->erst_base);
308 if (upper_32_bits(val))
309 xhci_write_64(xhci, 0, &ir->erst_base);
310 val= xhci_read_64(xhci, &ir->erst_dequeue);
311 if (upper_32_bits(val))
312 xhci_write_64(xhci, 0, &ir->erst_dequeue);
313 }
314
315 /* Wait for the fault to appear. It will be cleared on reset */
316 err = xhci_handshake(&xhci->op_regs->status,
317 STS_FATAL, STS_FATAL,
318 XHCI_MAX_HALT_USEC);
319 if (!err)
320 xhci_info(xhci, "Fault detected\n");
321}
322
323static int xhci_enable_interrupter(struct xhci_interrupter *ir)
324{
325 u32 iman;
326
327 if (!ir || !ir->ir_set)
328 return -EINVAL;
329
330 iman = readl(&ir->ir_set->irq_pending);
331 writel(ER_IRQ_ENABLE(iman), &ir->ir_set->irq_pending);
332
333 return 0;
334}
335
336static int xhci_disable_interrupter(struct xhci_interrupter *ir)
337{
338 u32 iman;
339
340 if (!ir || !ir->ir_set)
341 return -EINVAL;
342
343 iman = readl(&ir->ir_set->irq_pending);
344 writel(ER_IRQ_DISABLE(iman), &ir->ir_set->irq_pending);
345
346 return 0;
347}
348
349static void compliance_mode_recovery(struct timer_list *t)
350{
351 struct xhci_hcd *xhci;
352 struct usb_hcd *hcd;
353 struct xhci_hub *rhub;
354 u32 temp;
355 int i;
356
357 xhci = from_timer(xhci, t, comp_mode_recovery_timer);
358 rhub = &xhci->usb3_rhub;
359 hcd = rhub->hcd;
360
361 if (!hcd)
362 return;
363
364 for (i = 0; i < rhub->num_ports; i++) {
365 temp = readl(rhub->ports[i]->addr);
366 if ((temp & PORT_PLS_MASK) == USB_SS_PORT_LS_COMP_MOD) {
367 /*
368 * Compliance Mode Detected. Letting USB Core
369 * handle the Warm Reset
370 */
371 xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
372 "Compliance mode detected->port %d",
373 i + 1);
374 xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
375 "Attempting compliance mode recovery");
376
377 if (hcd->state == HC_STATE_SUSPENDED)
378 usb_hcd_resume_root_hub(hcd);
379
380 usb_hcd_poll_rh_status(hcd);
381 }
382 }
383
384 if (xhci->port_status_u0 != ((1 << rhub->num_ports) - 1))
385 mod_timer(&xhci->comp_mode_recovery_timer,
386 jiffies + msecs_to_jiffies(COMP_MODE_RCVRY_MSECS));
387}
388
389/*
390 * Quirk to work around issue generated by the SN65LVPE502CP USB3.0 re-driver
391 * that causes ports behind that hardware to enter compliance mode sometimes.
392 * The quirk creates a timer that polls every 2 seconds the link state of
393 * each host controller's port and recovers it by issuing a Warm reset
394 * if Compliance mode is detected, otherwise the port will become "dead" (no
395 * device connections or disconnections will be detected anymore). Becasue no
396 * status event is generated when entering compliance mode (per xhci spec),
397 * this quirk is needed on systems that have the failing hardware installed.
398 */
399static void compliance_mode_recovery_timer_init(struct xhci_hcd *xhci)
400{
401 xhci->port_status_u0 = 0;
402 timer_setup(&xhci->comp_mode_recovery_timer, compliance_mode_recovery,
403 0);
404 xhci->comp_mode_recovery_timer.expires = jiffies +
405 msecs_to_jiffies(COMP_MODE_RCVRY_MSECS);
406
407 add_timer(&xhci->comp_mode_recovery_timer);
408 xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
409 "Compliance mode recovery timer initialized");
410}
411
412/*
413 * This function identifies the systems that have installed the SN65LVPE502CP
414 * USB3.0 re-driver and that need the Compliance Mode Quirk.
415 * Systems:
416 * Vendor: Hewlett-Packard -> System Models: Z420, Z620 and Z820
417 */
418static bool xhci_compliance_mode_recovery_timer_quirk_check(void)
419{
420 const char *dmi_product_name, *dmi_sys_vendor;
421
422 dmi_product_name = dmi_get_system_info(DMI_PRODUCT_NAME);
423 dmi_sys_vendor = dmi_get_system_info(DMI_SYS_VENDOR);
424 if (!dmi_product_name || !dmi_sys_vendor)
425 return false;
426
427 if (!(strstr(dmi_sys_vendor, "Hewlett-Packard")))
428 return false;
429
430 if (strstr(dmi_product_name, "Z420") ||
431 strstr(dmi_product_name, "Z620") ||
432 strstr(dmi_product_name, "Z820") ||
433 strstr(dmi_product_name, "Z1 Workstation"))
434 return true;
435
436 return false;
437}
438
439static int xhci_all_ports_seen_u0(struct xhci_hcd *xhci)
440{
441 return (xhci->port_status_u0 == ((1 << xhci->usb3_rhub.num_ports) - 1));
442}
443
444
445/*
446 * Initialize memory for HCD and xHC (one-time init).
447 *
448 * Program the PAGESIZE register, initialize the device context array, create
449 * device contexts (?), set up a command ring segment (or two?), create event
450 * ring (one for now).
451 */
452static int xhci_init(struct usb_hcd *hcd)
453{
454 struct xhci_hcd *xhci = hcd_to_xhci(hcd);
455 int retval;
456
457 xhci_dbg_trace(xhci, trace_xhci_dbg_init, "xhci_init");
458 spin_lock_init(&xhci->lock);
459 if (xhci->hci_version == 0x95 && link_quirk) {
460 xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
461 "QUIRK: Not clearing Link TRB chain bits.");
462 xhci->quirks |= XHCI_LINK_TRB_QUIRK;
463 } else {
464 xhci_dbg_trace(xhci, trace_xhci_dbg_init,
465 "xHCI doesn't need link TRB QUIRK");
466 }
467 retval = xhci_mem_init(xhci, GFP_KERNEL);
468 xhci_dbg_trace(xhci, trace_xhci_dbg_init, "Finished xhci_init");
469
470 /* Initializing Compliance Mode Recovery Data If Needed */
471 if (xhci_compliance_mode_recovery_timer_quirk_check()) {
472 xhci->quirks |= XHCI_COMP_MODE_QUIRK;
473 compliance_mode_recovery_timer_init(xhci);
474 }
475
476 return retval;
477}
478
479/*-------------------------------------------------------------------------*/
480
481static int xhci_run_finished(struct xhci_hcd *xhci)
482{
483 struct xhci_interrupter *ir = xhci->interrupters[0];
484 unsigned long flags;
485 u32 temp;
486
487 /*
488 * Enable interrupts before starting the host (xhci 4.2 and 5.5.2).
489 * Protect the short window before host is running with a lock
490 */
491 spin_lock_irqsave(&xhci->lock, flags);
492
493 xhci_dbg_trace(xhci, trace_xhci_dbg_init, "Enable interrupts");
494 temp = readl(&xhci->op_regs->command);
495 temp |= (CMD_EIE);
496 writel(temp, &xhci->op_regs->command);
497
498 xhci_dbg_trace(xhci, trace_xhci_dbg_init, "Enable primary interrupter");
499 xhci_enable_interrupter(ir);
500
501 if (xhci_start(xhci)) {
502 xhci_halt(xhci);
503 spin_unlock_irqrestore(&xhci->lock, flags);
504 return -ENODEV;
505 }
506
507 xhci->cmd_ring_state = CMD_RING_STATE_RUNNING;
508
509 if (xhci->quirks & XHCI_NEC_HOST)
510 xhci_ring_cmd_db(xhci);
511
512 spin_unlock_irqrestore(&xhci->lock, flags);
513
514 return 0;
515}
516
517/*
518 * Start the HC after it was halted.
519 *
520 * This function is called by the USB core when the HC driver is added.
521 * Its opposite is xhci_stop().
522 *
523 * xhci_init() must be called once before this function can be called.
524 * Reset the HC, enable device slot contexts, program DCBAAP, and
525 * set command ring pointer and event ring pointer.
526 *
527 * Setup MSI-X vectors and enable interrupts.
528 */
529int xhci_run(struct usb_hcd *hcd)
530{
531 u32 temp;
532 u64 temp_64;
533 int ret;
534 struct xhci_hcd *xhci = hcd_to_xhci(hcd);
535 struct xhci_interrupter *ir = xhci->interrupters[0];
536 /* Start the xHCI host controller running only after the USB 2.0 roothub
537 * is setup.
538 */
539
540 hcd->uses_new_polling = 1;
541 if (!usb_hcd_is_primary_hcd(hcd))
542 return xhci_run_finished(xhci);
543
544 xhci_dbg_trace(xhci, trace_xhci_dbg_init, "xhci_run");
545
546 temp_64 = xhci_read_64(xhci, &ir->ir_set->erst_dequeue);
547 temp_64 &= ERST_PTR_MASK;
548 xhci_dbg_trace(xhci, trace_xhci_dbg_init,
549 "ERST deq = 64'h%0lx", (long unsigned int) temp_64);
550
551 xhci_dbg_trace(xhci, trace_xhci_dbg_init,
552 "// Set the interrupt modulation register");
553 temp = readl(&ir->ir_set->irq_control);
554 temp &= ~ER_IRQ_INTERVAL_MASK;
555 temp |= (xhci->imod_interval / 250) & ER_IRQ_INTERVAL_MASK;
556 writel(temp, &ir->ir_set->irq_control);
557
558 if (xhci->quirks & XHCI_NEC_HOST) {
559 struct xhci_command *command;
560
561 command = xhci_alloc_command(xhci, false, GFP_KERNEL);
562 if (!command)
563 return -ENOMEM;
564
565 ret = xhci_queue_vendor_command(xhci, command, 0, 0, 0,
566 TRB_TYPE(TRB_NEC_GET_FW));
567 if (ret)
568 xhci_free_command(xhci, command);
569 }
570 xhci_dbg_trace(xhci, trace_xhci_dbg_init,
571 "Finished %s for main hcd", __func__);
572
573 xhci_create_dbc_dev(xhci);
574
575 xhci_debugfs_init(xhci);
576
577 if (xhci_has_one_roothub(xhci))
578 return xhci_run_finished(xhci);
579
580 set_bit(HCD_FLAG_DEFER_RH_REGISTER, &hcd->flags);
581
582 return 0;
583}
584EXPORT_SYMBOL_GPL(xhci_run);
585
586/*
587 * Stop xHCI driver.
588 *
589 * This function is called by the USB core when the HC driver is removed.
590 * Its opposite is xhci_run().
591 *
592 * Disable device contexts, disable IRQs, and quiesce the HC.
593 * Reset the HC, finish any completed transactions, and cleanup memory.
594 */
595void xhci_stop(struct usb_hcd *hcd)
596{
597 u32 temp;
598 struct xhci_hcd *xhci = hcd_to_xhci(hcd);
599 struct xhci_interrupter *ir = xhci->interrupters[0];
600
601 mutex_lock(&xhci->mutex);
602
603 /* Only halt host and free memory after both hcds are removed */
604 if (!usb_hcd_is_primary_hcd(hcd)) {
605 mutex_unlock(&xhci->mutex);
606 return;
607 }
608
609 xhci_remove_dbc_dev(xhci);
610
611 spin_lock_irq(&xhci->lock);
612 xhci->xhc_state |= XHCI_STATE_HALTED;
613 xhci->cmd_ring_state = CMD_RING_STATE_STOPPED;
614 xhci_halt(xhci);
615 xhci_reset(xhci, XHCI_RESET_SHORT_USEC);
616 spin_unlock_irq(&xhci->lock);
617
618 /* Deleting Compliance Mode Recovery Timer */
619 if ((xhci->quirks & XHCI_COMP_MODE_QUIRK) &&
620 (!(xhci_all_ports_seen_u0(xhci)))) {
621 del_timer_sync(&xhci->comp_mode_recovery_timer);
622 xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
623 "%s: compliance mode recovery timer deleted",
624 __func__);
625 }
626
627 if (xhci->quirks & XHCI_AMD_PLL_FIX)
628 usb_amd_dev_put();
629
630 xhci_dbg_trace(xhci, trace_xhci_dbg_init,
631 "// Disabling event ring interrupts");
632 temp = readl(&xhci->op_regs->status);
633 writel((temp & ~0x1fff) | STS_EINT, &xhci->op_regs->status);
634 xhci_disable_interrupter(ir);
635
636 xhci_dbg_trace(xhci, trace_xhci_dbg_init, "cleaning up memory");
637 xhci_mem_cleanup(xhci);
638 xhci_debugfs_exit(xhci);
639 xhci_dbg_trace(xhci, trace_xhci_dbg_init,
640 "xhci_stop completed - status = %x",
641 readl(&xhci->op_regs->status));
642 mutex_unlock(&xhci->mutex);
643}
644EXPORT_SYMBOL_GPL(xhci_stop);
645
646/*
647 * Shutdown HC (not bus-specific)
648 *
649 * This is called when the machine is rebooting or halting. We assume that the
650 * machine will be powered off, and the HC's internal state will be reset.
651 * Don't bother to free memory.
652 *
653 * This will only ever be called with the main usb_hcd (the USB3 roothub).
654 */
655void xhci_shutdown(struct usb_hcd *hcd)
656{
657 struct xhci_hcd *xhci = hcd_to_xhci(hcd);
658
659 if (xhci->quirks & XHCI_SPURIOUS_REBOOT)
660 usb_disable_xhci_ports(to_pci_dev(hcd->self.sysdev));
661
662 /* Don't poll the roothubs after shutdown. */
663 xhci_dbg(xhci, "%s: stopping usb%d port polling.\n",
664 __func__, hcd->self.busnum);
665 clear_bit(HCD_FLAG_POLL_RH, &hcd->flags);
666 del_timer_sync(&hcd->rh_timer);
667
668 if (xhci->shared_hcd) {
669 clear_bit(HCD_FLAG_POLL_RH, &xhci->shared_hcd->flags);
670 del_timer_sync(&xhci->shared_hcd->rh_timer);
671 }
672
673 spin_lock_irq(&xhci->lock);
674 xhci_halt(xhci);
675
676 /*
677 * Workaround for spurious wakeps at shutdown with HSW, and for boot
678 * firmware delay in ADL-P PCH if port are left in U3 at shutdown
679 */
680 if (xhci->quirks & XHCI_SPURIOUS_WAKEUP ||
681 xhci->quirks & XHCI_RESET_TO_DEFAULT)
682 xhci_reset(xhci, XHCI_RESET_SHORT_USEC);
683
684 spin_unlock_irq(&xhci->lock);
685
686 xhci_dbg_trace(xhci, trace_xhci_dbg_init,
687 "xhci_shutdown completed - status = %x",
688 readl(&xhci->op_regs->status));
689}
690EXPORT_SYMBOL_GPL(xhci_shutdown);
691
692#ifdef CONFIG_PM
693static void xhci_save_registers(struct xhci_hcd *xhci)
694{
695 struct xhci_interrupter *ir;
696 unsigned int i;
697
698 xhci->s3.command = readl(&xhci->op_regs->command);
699 xhci->s3.dev_nt = readl(&xhci->op_regs->dev_notification);
700 xhci->s3.dcbaa_ptr = xhci_read_64(xhci, &xhci->op_regs->dcbaa_ptr);
701 xhci->s3.config_reg = readl(&xhci->op_regs->config_reg);
702
703 /* save both primary and all secondary interrupters */
704 /* fixme, shold we lock to prevent race with remove secondary interrupter? */
705 for (i = 0; i < xhci->max_interrupters; i++) {
706 ir = xhci->interrupters[i];
707 if (!ir)
708 continue;
709
710 ir->s3_erst_size = readl(&ir->ir_set->erst_size);
711 ir->s3_erst_base = xhci_read_64(xhci, &ir->ir_set->erst_base);
712 ir->s3_erst_dequeue = xhci_read_64(xhci, &ir->ir_set->erst_dequeue);
713 ir->s3_irq_pending = readl(&ir->ir_set->irq_pending);
714 ir->s3_irq_control = readl(&ir->ir_set->irq_control);
715 }
716}
717
718static void xhci_restore_registers(struct xhci_hcd *xhci)
719{
720 struct xhci_interrupter *ir;
721 unsigned int i;
722
723 writel(xhci->s3.command, &xhci->op_regs->command);
724 writel(xhci->s3.dev_nt, &xhci->op_regs->dev_notification);
725 xhci_write_64(xhci, xhci->s3.dcbaa_ptr, &xhci->op_regs->dcbaa_ptr);
726 writel(xhci->s3.config_reg, &xhci->op_regs->config_reg);
727
728 /* FIXME should we lock to protect against freeing of interrupters */
729 for (i = 0; i < xhci->max_interrupters; i++) {
730 ir = xhci->interrupters[i];
731 if (!ir)
732 continue;
733
734 writel(ir->s3_erst_size, &ir->ir_set->erst_size);
735 xhci_write_64(xhci, ir->s3_erst_base, &ir->ir_set->erst_base);
736 xhci_write_64(xhci, ir->s3_erst_dequeue, &ir->ir_set->erst_dequeue);
737 writel(ir->s3_irq_pending, &ir->ir_set->irq_pending);
738 writel(ir->s3_irq_control, &ir->ir_set->irq_control);
739 }
740}
741
742static void xhci_set_cmd_ring_deq(struct xhci_hcd *xhci)
743{
744 u64 val_64;
745
746 /* step 2: initialize command ring buffer */
747 val_64 = xhci_read_64(xhci, &xhci->op_regs->cmd_ring);
748 val_64 = (val_64 & (u64) CMD_RING_RSVD_BITS) |
749 (xhci_trb_virt_to_dma(xhci->cmd_ring->deq_seg,
750 xhci->cmd_ring->dequeue) &
751 (u64) ~CMD_RING_RSVD_BITS) |
752 xhci->cmd_ring->cycle_state;
753 xhci_dbg_trace(xhci, trace_xhci_dbg_init,
754 "// Setting command ring address to 0x%llx",
755 (long unsigned long) val_64);
756 xhci_write_64(xhci, val_64, &xhci->op_regs->cmd_ring);
757}
758
759/*
760 * The whole command ring must be cleared to zero when we suspend the host.
761 *
762 * The host doesn't save the command ring pointer in the suspend well, so we
763 * need to re-program it on resume. Unfortunately, the pointer must be 64-byte
764 * aligned, because of the reserved bits in the command ring dequeue pointer
765 * register. Therefore, we can't just set the dequeue pointer back in the
766 * middle of the ring (TRBs are 16-byte aligned).
767 */
768static void xhci_clear_command_ring(struct xhci_hcd *xhci)
769{
770 struct xhci_ring *ring;
771 struct xhci_segment *seg;
772
773 ring = xhci->cmd_ring;
774 seg = ring->deq_seg;
775 do {
776 memset(seg->trbs, 0,
777 sizeof(union xhci_trb) * (TRBS_PER_SEGMENT - 1));
778 seg->trbs[TRBS_PER_SEGMENT - 1].link.control &=
779 cpu_to_le32(~TRB_CYCLE);
780 seg = seg->next;
781 } while (seg != ring->deq_seg);
782
783 /* Reset the software enqueue and dequeue pointers */
784 ring->deq_seg = ring->first_seg;
785 ring->dequeue = ring->first_seg->trbs;
786 ring->enq_seg = ring->deq_seg;
787 ring->enqueue = ring->dequeue;
788
789 ring->num_trbs_free = ring->num_segs * (TRBS_PER_SEGMENT - 1) - 1;
790 /*
791 * Ring is now zeroed, so the HW should look for change of ownership
792 * when the cycle bit is set to 1.
793 */
794 ring->cycle_state = 1;
795
796 /*
797 * Reset the hardware dequeue pointer.
798 * Yes, this will need to be re-written after resume, but we're paranoid
799 * and want to make sure the hardware doesn't access bogus memory
800 * because, say, the BIOS or an SMI started the host without changing
801 * the command ring pointers.
802 */
803 xhci_set_cmd_ring_deq(xhci);
804}
805
806/*
807 * Disable port wake bits if do_wakeup is not set.
808 *
809 * Also clear a possible internal port wake state left hanging for ports that
810 * detected termination but never successfully enumerated (trained to 0U).
811 * Internal wake causes immediate xHCI wake after suspend. PORT_CSC write done
812 * at enumeration clears this wake, force one here as well for unconnected ports
813 */
814
815static void xhci_disable_hub_port_wake(struct xhci_hcd *xhci,
816 struct xhci_hub *rhub,
817 bool do_wakeup)
818{
819 unsigned long flags;
820 u32 t1, t2, portsc;
821 int i;
822
823 spin_lock_irqsave(&xhci->lock, flags);
824
825 for (i = 0; i < rhub->num_ports; i++) {
826 portsc = readl(rhub->ports[i]->addr);
827 t1 = xhci_port_state_to_neutral(portsc);
828 t2 = t1;
829
830 /* clear wake bits if do_wake is not set */
831 if (!do_wakeup)
832 t2 &= ~PORT_WAKE_BITS;
833
834 /* Don't touch csc bit if connected or connect change is set */
835 if (!(portsc & (PORT_CSC | PORT_CONNECT)))
836 t2 |= PORT_CSC;
837
838 if (t1 != t2) {
839 writel(t2, rhub->ports[i]->addr);
840 xhci_dbg(xhci, "config port %d-%d wake bits, portsc: 0x%x, write: 0x%x\n",
841 rhub->hcd->self.busnum, i + 1, portsc, t2);
842 }
843 }
844 spin_unlock_irqrestore(&xhci->lock, flags);
845}
846
847static bool xhci_pending_portevent(struct xhci_hcd *xhci)
848{
849 struct xhci_port **ports;
850 int port_index;
851 u32 status;
852 u32 portsc;
853
854 status = readl(&xhci->op_regs->status);
855 if (status & STS_EINT)
856 return true;
857 /*
858 * Checking STS_EINT is not enough as there is a lag between a change
859 * bit being set and the Port Status Change Event that it generated
860 * being written to the Event Ring. See note in xhci 1.1 section 4.19.2.
861 */
862
863 port_index = xhci->usb2_rhub.num_ports;
864 ports = xhci->usb2_rhub.ports;
865 while (port_index--) {
866 portsc = readl(ports[port_index]->addr);
867 if (portsc & PORT_CHANGE_MASK ||
868 (portsc & PORT_PLS_MASK) == XDEV_RESUME)
869 return true;
870 }
871 port_index = xhci->usb3_rhub.num_ports;
872 ports = xhci->usb3_rhub.ports;
873 while (port_index--) {
874 portsc = readl(ports[port_index]->addr);
875 if (portsc & (PORT_CHANGE_MASK | PORT_CAS) ||
876 (portsc & PORT_PLS_MASK) == XDEV_RESUME)
877 return true;
878 }
879 return false;
880}
881
882/*
883 * Stop HC (not bus-specific)
884 *
885 * This is called when the machine transition into S3/S4 mode.
886 *
887 */
888int xhci_suspend(struct xhci_hcd *xhci, bool do_wakeup)
889{
890 int rc = 0;
891 unsigned int delay = XHCI_MAX_HALT_USEC * 2;
892 struct usb_hcd *hcd = xhci_to_hcd(xhci);
893 u32 command;
894 u32 res;
895
896 if (!hcd->state)
897 return 0;
898
899 if (hcd->state != HC_STATE_SUSPENDED ||
900 (xhci->shared_hcd && xhci->shared_hcd->state != HC_STATE_SUSPENDED))
901 return -EINVAL;
902
903 /* Clear root port wake on bits if wakeup not allowed. */
904 xhci_disable_hub_port_wake(xhci, &xhci->usb3_rhub, do_wakeup);
905 xhci_disable_hub_port_wake(xhci, &xhci->usb2_rhub, do_wakeup);
906
907 if (!HCD_HW_ACCESSIBLE(hcd))
908 return 0;
909
910 xhci_dbc_suspend(xhci);
911
912 /* Don't poll the roothubs on bus suspend. */
913 xhci_dbg(xhci, "%s: stopping usb%d port polling.\n",
914 __func__, hcd->self.busnum);
915 clear_bit(HCD_FLAG_POLL_RH, &hcd->flags);
916 del_timer_sync(&hcd->rh_timer);
917 if (xhci->shared_hcd) {
918 clear_bit(HCD_FLAG_POLL_RH, &xhci->shared_hcd->flags);
919 del_timer_sync(&xhci->shared_hcd->rh_timer);
920 }
921
922 if (xhci->quirks & XHCI_SUSPEND_DELAY)
923 usleep_range(1000, 1500);
924
925 spin_lock_irq(&xhci->lock);
926 clear_bit(HCD_FLAG_HW_ACCESSIBLE, &hcd->flags);
927 if (xhci->shared_hcd)
928 clear_bit(HCD_FLAG_HW_ACCESSIBLE, &xhci->shared_hcd->flags);
929 /* step 1: stop endpoint */
930 /* skipped assuming that port suspend has done */
931
932 /* step 2: clear Run/Stop bit */
933 command = readl(&xhci->op_regs->command);
934 command &= ~CMD_RUN;
935 writel(command, &xhci->op_regs->command);
936
937 /* Some chips from Fresco Logic need an extraordinary delay */
938 delay *= (xhci->quirks & XHCI_SLOW_SUSPEND) ? 10 : 1;
939
940 if (xhci_handshake(&xhci->op_regs->status,
941 STS_HALT, STS_HALT, delay)) {
942 xhci_warn(xhci, "WARN: xHC CMD_RUN timeout\n");
943 spin_unlock_irq(&xhci->lock);
944 return -ETIMEDOUT;
945 }
946 xhci_clear_command_ring(xhci);
947
948 /* step 3: save registers */
949 xhci_save_registers(xhci);
950
951 /* step 4: set CSS flag */
952 command = readl(&xhci->op_regs->command);
953 command |= CMD_CSS;
954 writel(command, &xhci->op_regs->command);
955 xhci->broken_suspend = 0;
956 if (xhci_handshake(&xhci->op_regs->status,
957 STS_SAVE, 0, 20 * 1000)) {
958 /*
959 * AMD SNPS xHC 3.0 occasionally does not clear the
960 * SSS bit of USBSTS and when driver tries to poll
961 * to see if the xHC clears BIT(8) which never happens
962 * and driver assumes that controller is not responding
963 * and times out. To workaround this, its good to check
964 * if SRE and HCE bits are not set (as per xhci
965 * Section 5.4.2) and bypass the timeout.
966 */
967 res = readl(&xhci->op_regs->status);
968 if ((xhci->quirks & XHCI_SNPS_BROKEN_SUSPEND) &&
969 (((res & STS_SRE) == 0) &&
970 ((res & STS_HCE) == 0))) {
971 xhci->broken_suspend = 1;
972 } else {
973 xhci_warn(xhci, "WARN: xHC save state timeout\n");
974 spin_unlock_irq(&xhci->lock);
975 return -ETIMEDOUT;
976 }
977 }
978 spin_unlock_irq(&xhci->lock);
979
980 /*
981 * Deleting Compliance Mode Recovery Timer because the xHCI Host
982 * is about to be suspended.
983 */
984 if ((xhci->quirks & XHCI_COMP_MODE_QUIRK) &&
985 (!(xhci_all_ports_seen_u0(xhci)))) {
986 del_timer_sync(&xhci->comp_mode_recovery_timer);
987 xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
988 "%s: compliance mode recovery timer deleted",
989 __func__);
990 }
991
992 return rc;
993}
994EXPORT_SYMBOL_GPL(xhci_suspend);
995
996/*
997 * start xHC (not bus-specific)
998 *
999 * This is called when the machine transition from S3/S4 mode.
1000 *
1001 */
1002int xhci_resume(struct xhci_hcd *xhci, pm_message_t msg)
1003{
1004 bool hibernated = (msg.event == PM_EVENT_RESTORE);
1005 u32 command, temp = 0;
1006 struct usb_hcd *hcd = xhci_to_hcd(xhci);
1007 int retval = 0;
1008 bool comp_timer_running = false;
1009 bool pending_portevent = false;
1010 bool suspended_usb3_devs = false;
1011 bool reinit_xhc = false;
1012
1013 if (!hcd->state)
1014 return 0;
1015
1016 /* Wait a bit if either of the roothubs need to settle from the
1017 * transition into bus suspend.
1018 */
1019
1020 if (time_before(jiffies, xhci->usb2_rhub.bus_state.next_statechange) ||
1021 time_before(jiffies, xhci->usb3_rhub.bus_state.next_statechange))
1022 msleep(100);
1023
1024 set_bit(HCD_FLAG_HW_ACCESSIBLE, &hcd->flags);
1025 if (xhci->shared_hcd)
1026 set_bit(HCD_FLAG_HW_ACCESSIBLE, &xhci->shared_hcd->flags);
1027
1028 spin_lock_irq(&xhci->lock);
1029
1030 if (hibernated || xhci->quirks & XHCI_RESET_ON_RESUME || xhci->broken_suspend)
1031 reinit_xhc = true;
1032
1033 if (!reinit_xhc) {
1034 /*
1035 * Some controllers might lose power during suspend, so wait
1036 * for controller not ready bit to clear, just as in xHC init.
1037 */
1038 retval = xhci_handshake(&xhci->op_regs->status,
1039 STS_CNR, 0, 10 * 1000 * 1000);
1040 if (retval) {
1041 xhci_warn(xhci, "Controller not ready at resume %d\n",
1042 retval);
1043 spin_unlock_irq(&xhci->lock);
1044 return retval;
1045 }
1046 /* step 1: restore register */
1047 xhci_restore_registers(xhci);
1048 /* step 2: initialize command ring buffer */
1049 xhci_set_cmd_ring_deq(xhci);
1050 /* step 3: restore state and start state*/
1051 /* step 3: set CRS flag */
1052 command = readl(&xhci->op_regs->command);
1053 command |= CMD_CRS;
1054 writel(command, &xhci->op_regs->command);
1055 /*
1056 * Some controllers take up to 55+ ms to complete the controller
1057 * restore so setting the timeout to 100ms. Xhci specification
1058 * doesn't mention any timeout value.
1059 */
1060 if (xhci_handshake(&xhci->op_regs->status,
1061 STS_RESTORE, 0, 100 * 1000)) {
1062 xhci_warn(xhci, "WARN: xHC restore state timeout\n");
1063 spin_unlock_irq(&xhci->lock);
1064 return -ETIMEDOUT;
1065 }
1066 }
1067
1068 temp = readl(&xhci->op_regs->status);
1069
1070 /* re-initialize the HC on Restore Error, or Host Controller Error */
1071 if ((temp & (STS_SRE | STS_HCE)) &&
1072 !(xhci->xhc_state & XHCI_STATE_REMOVING)) {
1073 reinit_xhc = true;
1074 if (!xhci->broken_suspend)
1075 xhci_warn(xhci, "xHC error in resume, USBSTS 0x%x, Reinit\n", temp);
1076 }
1077
1078 if (reinit_xhc) {
1079 if ((xhci->quirks & XHCI_COMP_MODE_QUIRK) &&
1080 !(xhci_all_ports_seen_u0(xhci))) {
1081 del_timer_sync(&xhci->comp_mode_recovery_timer);
1082 xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
1083 "Compliance Mode Recovery Timer deleted!");
1084 }
1085
1086 /* Let the USB core know _both_ roothubs lost power. */
1087 usb_root_hub_lost_power(xhci->main_hcd->self.root_hub);
1088 if (xhci->shared_hcd)
1089 usb_root_hub_lost_power(xhci->shared_hcd->self.root_hub);
1090
1091 xhci_dbg(xhci, "Stop HCD\n");
1092 xhci_halt(xhci);
1093 xhci_zero_64b_regs(xhci);
1094 retval = xhci_reset(xhci, XHCI_RESET_LONG_USEC);
1095 spin_unlock_irq(&xhci->lock);
1096 if (retval)
1097 return retval;
1098
1099 xhci_dbg(xhci, "// Disabling event ring interrupts\n");
1100 temp = readl(&xhci->op_regs->status);
1101 writel((temp & ~0x1fff) | STS_EINT, &xhci->op_regs->status);
1102 xhci_disable_interrupter(xhci->interrupters[0]);
1103
1104 xhci_dbg(xhci, "cleaning up memory\n");
1105 xhci_mem_cleanup(xhci);
1106 xhci_debugfs_exit(xhci);
1107 xhci_dbg(xhci, "xhci_stop completed - status = %x\n",
1108 readl(&xhci->op_regs->status));
1109
1110 /* USB core calls the PCI reinit and start functions twice:
1111 * first with the primary HCD, and then with the secondary HCD.
1112 * If we don't do the same, the host will never be started.
1113 */
1114 xhci_dbg(xhci, "Initialize the xhci_hcd\n");
1115 retval = xhci_init(hcd);
1116 if (retval)
1117 return retval;
1118 comp_timer_running = true;
1119
1120 xhci_dbg(xhci, "Start the primary HCD\n");
1121 retval = xhci_run(hcd);
1122 if (!retval && xhci->shared_hcd) {
1123 xhci_dbg(xhci, "Start the secondary HCD\n");
1124 retval = xhci_run(xhci->shared_hcd);
1125 }
1126
1127 hcd->state = HC_STATE_SUSPENDED;
1128 if (xhci->shared_hcd)
1129 xhci->shared_hcd->state = HC_STATE_SUSPENDED;
1130 goto done;
1131 }
1132
1133 /* step 4: set Run/Stop bit */
1134 command = readl(&xhci->op_regs->command);
1135 command |= CMD_RUN;
1136 writel(command, &xhci->op_regs->command);
1137 xhci_handshake(&xhci->op_regs->status, STS_HALT,
1138 0, 250 * 1000);
1139
1140 /* step 5: walk topology and initialize portsc,
1141 * portpmsc and portli
1142 */
1143 /* this is done in bus_resume */
1144
1145 /* step 6: restart each of the previously
1146 * Running endpoints by ringing their doorbells
1147 */
1148
1149 spin_unlock_irq(&xhci->lock);
1150
1151 xhci_dbc_resume(xhci);
1152
1153 done:
1154 if (retval == 0) {
1155 /*
1156 * Resume roothubs only if there are pending events.
1157 * USB 3 devices resend U3 LFPS wake after a 100ms delay if
1158 * the first wake signalling failed, give it that chance if
1159 * there are suspended USB 3 devices.
1160 */
1161 if (xhci->usb3_rhub.bus_state.suspended_ports ||
1162 xhci->usb3_rhub.bus_state.bus_suspended)
1163 suspended_usb3_devs = true;
1164
1165 pending_portevent = xhci_pending_portevent(xhci);
1166
1167 if (suspended_usb3_devs && !pending_portevent &&
1168 msg.event == PM_EVENT_AUTO_RESUME) {
1169 msleep(120);
1170 pending_portevent = xhci_pending_portevent(xhci);
1171 }
1172
1173 if (pending_portevent) {
1174 if (xhci->shared_hcd)
1175 usb_hcd_resume_root_hub(xhci->shared_hcd);
1176 usb_hcd_resume_root_hub(hcd);
1177 }
1178 }
1179 /*
1180 * If system is subject to the Quirk, Compliance Mode Timer needs to
1181 * be re-initialized Always after a system resume. Ports are subject
1182 * to suffer the Compliance Mode issue again. It doesn't matter if
1183 * ports have entered previously to U0 before system's suspension.
1184 */
1185 if ((xhci->quirks & XHCI_COMP_MODE_QUIRK) && !comp_timer_running)
1186 compliance_mode_recovery_timer_init(xhci);
1187
1188 if (xhci->quirks & XHCI_ASMEDIA_MODIFY_FLOWCONTROL)
1189 usb_asmedia_modifyflowcontrol(to_pci_dev(hcd->self.controller));
1190
1191 /* Re-enable port polling. */
1192 xhci_dbg(xhci, "%s: starting usb%d port polling.\n",
1193 __func__, hcd->self.busnum);
1194 if (xhci->shared_hcd) {
1195 set_bit(HCD_FLAG_POLL_RH, &xhci->shared_hcd->flags);
1196 usb_hcd_poll_rh_status(xhci->shared_hcd);
1197 }
1198 set_bit(HCD_FLAG_POLL_RH, &hcd->flags);
1199 usb_hcd_poll_rh_status(hcd);
1200
1201 return retval;
1202}
1203EXPORT_SYMBOL_GPL(xhci_resume);
1204#endif /* CONFIG_PM */
1205
1206/*-------------------------------------------------------------------------*/
1207
1208static int xhci_map_temp_buffer(struct usb_hcd *hcd, struct urb *urb)
1209{
1210 void *temp;
1211 int ret = 0;
1212 unsigned int buf_len;
1213 enum dma_data_direction dir;
1214
1215 dir = usb_urb_dir_in(urb) ? DMA_FROM_DEVICE : DMA_TO_DEVICE;
1216 buf_len = urb->transfer_buffer_length;
1217
1218 temp = kzalloc_node(buf_len, GFP_ATOMIC,
1219 dev_to_node(hcd->self.sysdev));
1220
1221 if (usb_urb_dir_out(urb))
1222 sg_pcopy_to_buffer(urb->sg, urb->num_sgs,
1223 temp, buf_len, 0);
1224
1225 urb->transfer_buffer = temp;
1226 urb->transfer_dma = dma_map_single(hcd->self.sysdev,
1227 urb->transfer_buffer,
1228 urb->transfer_buffer_length,
1229 dir);
1230
1231 if (dma_mapping_error(hcd->self.sysdev,
1232 urb->transfer_dma)) {
1233 ret = -EAGAIN;
1234 kfree(temp);
1235 } else {
1236 urb->transfer_flags |= URB_DMA_MAP_SINGLE;
1237 }
1238
1239 return ret;
1240}
1241
1242static bool xhci_urb_temp_buffer_required(struct usb_hcd *hcd,
1243 struct urb *urb)
1244{
1245 bool ret = false;
1246 unsigned int i;
1247 unsigned int len = 0;
1248 unsigned int trb_size;
1249 unsigned int max_pkt;
1250 struct scatterlist *sg;
1251 struct scatterlist *tail_sg;
1252
1253 tail_sg = urb->sg;
1254 max_pkt = usb_endpoint_maxp(&urb->ep->desc);
1255
1256 if (!urb->num_sgs)
1257 return ret;
1258
1259 if (urb->dev->speed >= USB_SPEED_SUPER)
1260 trb_size = TRB_CACHE_SIZE_SS;
1261 else
1262 trb_size = TRB_CACHE_SIZE_HS;
1263
1264 if (urb->transfer_buffer_length != 0 &&
1265 !(urb->transfer_flags & URB_NO_TRANSFER_DMA_MAP)) {
1266 for_each_sg(urb->sg, sg, urb->num_sgs, i) {
1267 len = len + sg->length;
1268 if (i > trb_size - 2) {
1269 len = len - tail_sg->length;
1270 if (len < max_pkt) {
1271 ret = true;
1272 break;
1273 }
1274
1275 tail_sg = sg_next(tail_sg);
1276 }
1277 }
1278 }
1279 return ret;
1280}
1281
1282static void xhci_unmap_temp_buf(struct usb_hcd *hcd, struct urb *urb)
1283{
1284 unsigned int len;
1285 unsigned int buf_len;
1286 enum dma_data_direction dir;
1287
1288 dir = usb_urb_dir_in(urb) ? DMA_FROM_DEVICE : DMA_TO_DEVICE;
1289
1290 buf_len = urb->transfer_buffer_length;
1291
1292 if (IS_ENABLED(CONFIG_HAS_DMA) &&
1293 (urb->transfer_flags & URB_DMA_MAP_SINGLE))
1294 dma_unmap_single(hcd->self.sysdev,
1295 urb->transfer_dma,
1296 urb->transfer_buffer_length,
1297 dir);
1298
1299 if (usb_urb_dir_in(urb)) {
1300 len = sg_pcopy_from_buffer(urb->sg, urb->num_sgs,
1301 urb->transfer_buffer,
1302 buf_len,
1303 0);
1304 if (len != buf_len) {
1305 xhci_dbg(hcd_to_xhci(hcd),
1306 "Copy from tmp buf to urb sg list failed\n");
1307 urb->actual_length = len;
1308 }
1309 }
1310 urb->transfer_flags &= ~URB_DMA_MAP_SINGLE;
1311 kfree(urb->transfer_buffer);
1312 urb->transfer_buffer = NULL;
1313}
1314
1315/*
1316 * Bypass the DMA mapping if URB is suitable for Immediate Transfer (IDT),
1317 * we'll copy the actual data into the TRB address register. This is limited to
1318 * transfers up to 8 bytes on output endpoints of any kind with wMaxPacketSize
1319 * >= 8 bytes. If suitable for IDT only one Transfer TRB per TD is allowed.
1320 */
1321static int xhci_map_urb_for_dma(struct usb_hcd *hcd, struct urb *urb,
1322 gfp_t mem_flags)
1323{
1324 struct xhci_hcd *xhci;
1325
1326 xhci = hcd_to_xhci(hcd);
1327
1328 if (xhci_urb_suitable_for_idt(urb))
1329 return 0;
1330
1331 if (xhci->quirks & XHCI_SG_TRB_CACHE_SIZE_QUIRK) {
1332 if (xhci_urb_temp_buffer_required(hcd, urb))
1333 return xhci_map_temp_buffer(hcd, urb);
1334 }
1335 return usb_hcd_map_urb_for_dma(hcd, urb, mem_flags);
1336}
1337
1338static void xhci_unmap_urb_for_dma(struct usb_hcd *hcd, struct urb *urb)
1339{
1340 struct xhci_hcd *xhci;
1341 bool unmap_temp_buf = false;
1342
1343 xhci = hcd_to_xhci(hcd);
1344
1345 if (urb->num_sgs && (urb->transfer_flags & URB_DMA_MAP_SINGLE))
1346 unmap_temp_buf = true;
1347
1348 if ((xhci->quirks & XHCI_SG_TRB_CACHE_SIZE_QUIRK) && unmap_temp_buf)
1349 xhci_unmap_temp_buf(hcd, urb);
1350 else
1351 usb_hcd_unmap_urb_for_dma(hcd, urb);
1352}
1353
1354/**
1355 * xhci_get_endpoint_index - Used for passing endpoint bitmasks between the core and
1356 * HCDs. Find the index for an endpoint given its descriptor. Use the return
1357 * value to right shift 1 for the bitmask.
1358 *
1359 * Index = (epnum * 2) + direction - 1,
1360 * where direction = 0 for OUT, 1 for IN.
1361 * For control endpoints, the IN index is used (OUT index is unused), so
1362 * index = (epnum * 2) + direction - 1 = (epnum * 2) + 1 - 1 = (epnum * 2)
1363 */
1364unsigned int xhci_get_endpoint_index(struct usb_endpoint_descriptor *desc)
1365{
1366 unsigned int index;
1367 if (usb_endpoint_xfer_control(desc))
1368 index = (unsigned int) (usb_endpoint_num(desc)*2);
1369 else
1370 index = (unsigned int) (usb_endpoint_num(desc)*2) +
1371 (usb_endpoint_dir_in(desc) ? 1 : 0) - 1;
1372 return index;
1373}
1374EXPORT_SYMBOL_GPL(xhci_get_endpoint_index);
1375
1376/* The reverse operation to xhci_get_endpoint_index. Calculate the USB endpoint
1377 * address from the XHCI endpoint index.
1378 */
1379static unsigned int xhci_get_endpoint_address(unsigned int ep_index)
1380{
1381 unsigned int number = DIV_ROUND_UP(ep_index, 2);
1382 unsigned int direction = ep_index % 2 ? USB_DIR_OUT : USB_DIR_IN;
1383 return direction | number;
1384}
1385
1386/* Find the flag for this endpoint (for use in the control context). Use the
1387 * endpoint index to create a bitmask. The slot context is bit 0, endpoint 0 is
1388 * bit 1, etc.
1389 */
1390static unsigned int xhci_get_endpoint_flag(struct usb_endpoint_descriptor *desc)
1391{
1392 return 1 << (xhci_get_endpoint_index(desc) + 1);
1393}
1394
1395/* Compute the last valid endpoint context index. Basically, this is the
1396 * endpoint index plus one. For slot contexts with more than valid endpoint,
1397 * we find the most significant bit set in the added contexts flags.
1398 * e.g. ep 1 IN (with epnum 0x81) => added_ctxs = 0b1000
1399 * fls(0b1000) = 4, but the endpoint context index is 3, so subtract one.
1400 */
1401unsigned int xhci_last_valid_endpoint(u32 added_ctxs)
1402{
1403 return fls(added_ctxs) - 1;
1404}
1405
1406/* Returns 1 if the arguments are OK;
1407 * returns 0 this is a root hub; returns -EINVAL for NULL pointers.
1408 */
1409static int xhci_check_args(struct usb_hcd *hcd, struct usb_device *udev,
1410 struct usb_host_endpoint *ep, int check_ep, bool check_virt_dev,
1411 const char *func) {
1412 struct xhci_hcd *xhci;
1413 struct xhci_virt_device *virt_dev;
1414
1415 if (!hcd || (check_ep && !ep) || !udev) {
1416 pr_debug("xHCI %s called with invalid args\n", func);
1417 return -EINVAL;
1418 }
1419 if (!udev->parent) {
1420 pr_debug("xHCI %s called for root hub\n", func);
1421 return 0;
1422 }
1423
1424 xhci = hcd_to_xhci(hcd);
1425 if (check_virt_dev) {
1426 if (!udev->slot_id || !xhci->devs[udev->slot_id]) {
1427 xhci_dbg(xhci, "xHCI %s called with unaddressed device\n",
1428 func);
1429 return -EINVAL;
1430 }
1431
1432 virt_dev = xhci->devs[udev->slot_id];
1433 if (virt_dev->udev != udev) {
1434 xhci_dbg(xhci, "xHCI %s called with udev and "
1435 "virt_dev does not match\n", func);
1436 return -EINVAL;
1437 }
1438 }
1439
1440 if (xhci->xhc_state & XHCI_STATE_HALTED)
1441 return -ENODEV;
1442
1443 return 1;
1444}
1445
1446static int xhci_configure_endpoint(struct xhci_hcd *xhci,
1447 struct usb_device *udev, struct xhci_command *command,
1448 bool ctx_change, bool must_succeed);
1449
1450/*
1451 * Full speed devices may have a max packet size greater than 8 bytes, but the
1452 * USB core doesn't know that until it reads the first 8 bytes of the
1453 * descriptor. If the usb_device's max packet size changes after that point,
1454 * we need to issue an evaluate context command and wait on it.
1455 */
1456static int xhci_check_ep0_maxpacket(struct xhci_hcd *xhci, struct xhci_virt_device *vdev)
1457{
1458 struct xhci_input_control_ctx *ctrl_ctx;
1459 struct xhci_ep_ctx *ep_ctx;
1460 struct xhci_command *command;
1461 int max_packet_size;
1462 int hw_max_packet_size;
1463 int ret = 0;
1464
1465 ep_ctx = xhci_get_ep_ctx(xhci, vdev->out_ctx, 0);
1466 hw_max_packet_size = MAX_PACKET_DECODED(le32_to_cpu(ep_ctx->ep_info2));
1467 max_packet_size = usb_endpoint_maxp(&vdev->udev->ep0.desc);
1468
1469 if (hw_max_packet_size == max_packet_size)
1470 return 0;
1471
1472 switch (max_packet_size) {
1473 case 8: case 16: case 32: case 64: case 9:
1474 xhci_dbg_trace(xhci, trace_xhci_dbg_context_change,
1475 "Max Packet Size for ep 0 changed.");
1476 xhci_dbg_trace(xhci, trace_xhci_dbg_context_change,
1477 "Max packet size in usb_device = %d",
1478 max_packet_size);
1479 xhci_dbg_trace(xhci, trace_xhci_dbg_context_change,
1480 "Max packet size in xHCI HW = %d",
1481 hw_max_packet_size);
1482 xhci_dbg_trace(xhci, trace_xhci_dbg_context_change,
1483 "Issuing evaluate context command.");
1484
1485 command = xhci_alloc_command(xhci, true, GFP_KERNEL);
1486 if (!command)
1487 return -ENOMEM;
1488
1489 command->in_ctx = vdev->in_ctx;
1490 ctrl_ctx = xhci_get_input_control_ctx(command->in_ctx);
1491 if (!ctrl_ctx) {
1492 xhci_warn(xhci, "%s: Could not get input context, bad type.\n",
1493 __func__);
1494 ret = -ENOMEM;
1495 break;
1496 }
1497 /* Set up the modified control endpoint 0 */
1498 xhci_endpoint_copy(xhci, vdev->in_ctx, vdev->out_ctx, 0);
1499
1500 ep_ctx = xhci_get_ep_ctx(xhci, command->in_ctx, 0);
1501 ep_ctx->ep_info &= cpu_to_le32(~EP_STATE_MASK);/* must clear */
1502 ep_ctx->ep_info2 &= cpu_to_le32(~MAX_PACKET_MASK);
1503 ep_ctx->ep_info2 |= cpu_to_le32(MAX_PACKET(max_packet_size));
1504
1505 ctrl_ctx->add_flags = cpu_to_le32(EP0_FLAG);
1506 ctrl_ctx->drop_flags = 0;
1507
1508 ret = xhci_configure_endpoint(xhci, vdev->udev, command,
1509 true, false);
1510 /* Clean up the input context for later use by bandwidth functions */
1511 ctrl_ctx->add_flags = cpu_to_le32(SLOT_FLAG);
1512 break;
1513 default:
1514 dev_dbg(&vdev->udev->dev, "incorrect max packet size %d for ep0\n",
1515 max_packet_size);
1516 return -EINVAL;
1517 }
1518
1519 kfree(command->completion);
1520 kfree(command);
1521
1522 return ret;
1523}
1524
1525/*
1526 * non-error returns are a promise to giveback() the urb later
1527 * we drop ownership so next owner (or urb unlink) can get it
1528 */
1529static int xhci_urb_enqueue(struct usb_hcd *hcd, struct urb *urb, gfp_t mem_flags)
1530{
1531 struct xhci_hcd *xhci = hcd_to_xhci(hcd);
1532 unsigned long flags;
1533 int ret = 0;
1534 unsigned int slot_id, ep_index;
1535 unsigned int *ep_state;
1536 struct urb_priv *urb_priv;
1537 int num_tds;
1538
1539 ep_index = xhci_get_endpoint_index(&urb->ep->desc);
1540
1541 if (usb_endpoint_xfer_isoc(&urb->ep->desc))
1542 num_tds = urb->number_of_packets;
1543 else if (usb_endpoint_is_bulk_out(&urb->ep->desc) &&
1544 urb->transfer_buffer_length > 0 &&
1545 urb->transfer_flags & URB_ZERO_PACKET &&
1546 !(urb->transfer_buffer_length % usb_endpoint_maxp(&urb->ep->desc)))
1547 num_tds = 2;
1548 else
1549 num_tds = 1;
1550
1551 urb_priv = kzalloc(struct_size(urb_priv, td, num_tds), mem_flags);
1552 if (!urb_priv)
1553 return -ENOMEM;
1554
1555 urb_priv->num_tds = num_tds;
1556 urb_priv->num_tds_done = 0;
1557 urb->hcpriv = urb_priv;
1558
1559 trace_xhci_urb_enqueue(urb);
1560
1561 spin_lock_irqsave(&xhci->lock, flags);
1562
1563 ret = xhci_check_args(hcd, urb->dev, urb->ep,
1564 true, true, __func__);
1565 if (ret <= 0) {
1566 ret = ret ? ret : -EINVAL;
1567 goto free_priv;
1568 }
1569
1570 slot_id = urb->dev->slot_id;
1571
1572 if (!HCD_HW_ACCESSIBLE(hcd)) {
1573 ret = -ESHUTDOWN;
1574 goto free_priv;
1575 }
1576
1577 if (xhci->devs[slot_id]->flags & VDEV_PORT_ERROR) {
1578 xhci_dbg(xhci, "Can't queue urb, port error, link inactive\n");
1579 ret = -ENODEV;
1580 goto free_priv;
1581 }
1582
1583 if (xhci->xhc_state & XHCI_STATE_DYING) {
1584 xhci_dbg(xhci, "Ep 0x%x: URB %p submitted for non-responsive xHCI host.\n",
1585 urb->ep->desc.bEndpointAddress, urb);
1586 ret = -ESHUTDOWN;
1587 goto free_priv;
1588 }
1589
1590 ep_state = &xhci->devs[slot_id]->eps[ep_index].ep_state;
1591
1592 if (*ep_state & (EP_GETTING_STREAMS | EP_GETTING_NO_STREAMS)) {
1593 xhci_warn(xhci, "WARN: Can't enqueue URB, ep in streams transition state %x\n",
1594 *ep_state);
1595 ret = -EINVAL;
1596 goto free_priv;
1597 }
1598 if (*ep_state & EP_SOFT_CLEAR_TOGGLE) {
1599 xhci_warn(xhci, "Can't enqueue URB while manually clearing toggle\n");
1600 ret = -EINVAL;
1601 goto free_priv;
1602 }
1603
1604 switch (usb_endpoint_type(&urb->ep->desc)) {
1605
1606 case USB_ENDPOINT_XFER_CONTROL:
1607 ret = xhci_queue_ctrl_tx(xhci, GFP_ATOMIC, urb,
1608 slot_id, ep_index);
1609 break;
1610 case USB_ENDPOINT_XFER_BULK:
1611 ret = xhci_queue_bulk_tx(xhci, GFP_ATOMIC, urb,
1612 slot_id, ep_index);
1613 break;
1614 case USB_ENDPOINT_XFER_INT:
1615 ret = xhci_queue_intr_tx(xhci, GFP_ATOMIC, urb,
1616 slot_id, ep_index);
1617 break;
1618 case USB_ENDPOINT_XFER_ISOC:
1619 ret = xhci_queue_isoc_tx_prepare(xhci, GFP_ATOMIC, urb,
1620 slot_id, ep_index);
1621 }
1622
1623 if (ret) {
1624free_priv:
1625 xhci_urb_free_priv(urb_priv);
1626 urb->hcpriv = NULL;
1627 }
1628 spin_unlock_irqrestore(&xhci->lock, flags);
1629 return ret;
1630}
1631
1632/*
1633 * Remove the URB's TD from the endpoint ring. This may cause the HC to stop
1634 * USB transfers, potentially stopping in the middle of a TRB buffer. The HC
1635 * should pick up where it left off in the TD, unless a Set Transfer Ring
1636 * Dequeue Pointer is issued.
1637 *
1638 * The TRBs that make up the buffers for the canceled URB will be "removed" from
1639 * the ring. Since the ring is a contiguous structure, they can't be physically
1640 * removed. Instead, there are two options:
1641 *
1642 * 1) If the HC is in the middle of processing the URB to be canceled, we
1643 * simply move the ring's dequeue pointer past those TRBs using the Set
1644 * Transfer Ring Dequeue Pointer command. This will be the common case,
1645 * when drivers timeout on the last submitted URB and attempt to cancel.
1646 *
1647 * 2) If the HC is in the middle of a different TD, we turn the TRBs into a
1648 * series of 1-TRB transfer no-op TDs. (No-ops shouldn't be chained.) The
1649 * HC will need to invalidate the any TRBs it has cached after the stop
1650 * endpoint command, as noted in the xHCI 0.95 errata.
1651 *
1652 * 3) The TD may have completed by the time the Stop Endpoint Command
1653 * completes, so software needs to handle that case too.
1654 *
1655 * This function should protect against the TD enqueueing code ringing the
1656 * doorbell while this code is waiting for a Stop Endpoint command to complete.
1657 * It also needs to account for multiple cancellations on happening at the same
1658 * time for the same endpoint.
1659 *
1660 * Note that this function can be called in any context, or so says
1661 * usb_hcd_unlink_urb()
1662 */
1663static int xhci_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status)
1664{
1665 unsigned long flags;
1666 int ret, i;
1667 u32 temp;
1668 struct xhci_hcd *xhci;
1669 struct urb_priv *urb_priv;
1670 struct xhci_td *td;
1671 unsigned int ep_index;
1672 struct xhci_ring *ep_ring;
1673 struct xhci_virt_ep *ep;
1674 struct xhci_command *command;
1675 struct xhci_virt_device *vdev;
1676
1677 xhci = hcd_to_xhci(hcd);
1678 spin_lock_irqsave(&xhci->lock, flags);
1679
1680 trace_xhci_urb_dequeue(urb);
1681
1682 /* Make sure the URB hasn't completed or been unlinked already */
1683 ret = usb_hcd_check_unlink_urb(hcd, urb, status);
1684 if (ret)
1685 goto done;
1686
1687 /* give back URB now if we can't queue it for cancel */
1688 vdev = xhci->devs[urb->dev->slot_id];
1689 urb_priv = urb->hcpriv;
1690 if (!vdev || !urb_priv)
1691 goto err_giveback;
1692
1693 ep_index = xhci_get_endpoint_index(&urb->ep->desc);
1694 ep = &vdev->eps[ep_index];
1695 ep_ring = xhci_urb_to_transfer_ring(xhci, urb);
1696 if (!ep || !ep_ring)
1697 goto err_giveback;
1698
1699 /* If xHC is dead take it down and return ALL URBs in xhci_hc_died() */
1700 temp = readl(&xhci->op_regs->status);
1701 if (temp == ~(u32)0 || xhci->xhc_state & XHCI_STATE_DYING) {
1702 xhci_hc_died(xhci);
1703 goto done;
1704 }
1705
1706 /*
1707 * check ring is not re-allocated since URB was enqueued. If it is, then
1708 * make sure none of the ring related pointers in this URB private data
1709 * are touched, such as td_list, otherwise we overwrite freed data
1710 */
1711 if (!td_on_ring(&urb_priv->td[0], ep_ring)) {
1712 xhci_err(xhci, "Canceled URB td not found on endpoint ring");
1713 for (i = urb_priv->num_tds_done; i < urb_priv->num_tds; i++) {
1714 td = &urb_priv->td[i];
1715 if (!list_empty(&td->cancelled_td_list))
1716 list_del_init(&td->cancelled_td_list);
1717 }
1718 goto err_giveback;
1719 }
1720
1721 if (xhci->xhc_state & XHCI_STATE_HALTED) {
1722 xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb,
1723 "HC halted, freeing TD manually.");
1724 for (i = urb_priv->num_tds_done;
1725 i < urb_priv->num_tds;
1726 i++) {
1727 td = &urb_priv->td[i];
1728 if (!list_empty(&td->td_list))
1729 list_del_init(&td->td_list);
1730 if (!list_empty(&td->cancelled_td_list))
1731 list_del_init(&td->cancelled_td_list);
1732 }
1733 goto err_giveback;
1734 }
1735
1736 i = urb_priv->num_tds_done;
1737 if (i < urb_priv->num_tds)
1738 xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb,
1739 "Cancel URB %p, dev %s, ep 0x%x, "
1740 "starting at offset 0x%llx",
1741 urb, urb->dev->devpath,
1742 urb->ep->desc.bEndpointAddress,
1743 (unsigned long long) xhci_trb_virt_to_dma(
1744 urb_priv->td[i].start_seg,
1745 urb_priv->td[i].first_trb));
1746
1747 for (; i < urb_priv->num_tds; i++) {
1748 td = &urb_priv->td[i];
1749 /* TD can already be on cancelled list if ep halted on it */
1750 if (list_empty(&td->cancelled_td_list)) {
1751 td->cancel_status = TD_DIRTY;
1752 list_add_tail(&td->cancelled_td_list,
1753 &ep->cancelled_td_list);
1754 }
1755 }
1756
1757 /* Queue a stop endpoint command, but only if this is
1758 * the first cancellation to be handled.
1759 */
1760 if (!(ep->ep_state & EP_STOP_CMD_PENDING)) {
1761 command = xhci_alloc_command(xhci, false, GFP_ATOMIC);
1762 if (!command) {
1763 ret = -ENOMEM;
1764 goto done;
1765 }
1766 ep->ep_state |= EP_STOP_CMD_PENDING;
1767 xhci_queue_stop_endpoint(xhci, command, urb->dev->slot_id,
1768 ep_index, 0);
1769 xhci_ring_cmd_db(xhci);
1770 }
1771done:
1772 spin_unlock_irqrestore(&xhci->lock, flags);
1773 return ret;
1774
1775err_giveback:
1776 if (urb_priv)
1777 xhci_urb_free_priv(urb_priv);
1778 usb_hcd_unlink_urb_from_ep(hcd, urb);
1779 spin_unlock_irqrestore(&xhci->lock, flags);
1780 usb_hcd_giveback_urb(hcd, urb, -ESHUTDOWN);
1781 return ret;
1782}
1783
1784/* Drop an endpoint from a new bandwidth configuration for this device.
1785 * Only one call to this function is allowed per endpoint before
1786 * check_bandwidth() or reset_bandwidth() must be called.
1787 * A call to xhci_drop_endpoint() followed by a call to xhci_add_endpoint() will
1788 * add the endpoint to the schedule with possibly new parameters denoted by a
1789 * different endpoint descriptor in usb_host_endpoint.
1790 * A call to xhci_add_endpoint() followed by a call to xhci_drop_endpoint() is
1791 * not allowed.
1792 *
1793 * The USB core will not allow URBs to be queued to an endpoint that is being
1794 * disabled, so there's no need for mutual exclusion to protect
1795 * the xhci->devs[slot_id] structure.
1796 */
1797int xhci_drop_endpoint(struct usb_hcd *hcd, struct usb_device *udev,
1798 struct usb_host_endpoint *ep)
1799{
1800 struct xhci_hcd *xhci;
1801 struct xhci_container_ctx *in_ctx, *out_ctx;
1802 struct xhci_input_control_ctx *ctrl_ctx;
1803 unsigned int ep_index;
1804 struct xhci_ep_ctx *ep_ctx;
1805 u32 drop_flag;
1806 u32 new_add_flags, new_drop_flags;
1807 int ret;
1808
1809 ret = xhci_check_args(hcd, udev, ep, 1, true, __func__);
1810 if (ret <= 0)
1811 return ret;
1812 xhci = hcd_to_xhci(hcd);
1813 if (xhci->xhc_state & XHCI_STATE_DYING)
1814 return -ENODEV;
1815
1816 xhci_dbg(xhci, "%s called for udev %p\n", __func__, udev);
1817 drop_flag = xhci_get_endpoint_flag(&ep->desc);
1818 if (drop_flag == SLOT_FLAG || drop_flag == EP0_FLAG) {
1819 xhci_dbg(xhci, "xHCI %s - can't drop slot or ep 0 %#x\n",
1820 __func__, drop_flag);
1821 return 0;
1822 }
1823
1824 in_ctx = xhci->devs[udev->slot_id]->in_ctx;
1825 out_ctx = xhci->devs[udev->slot_id]->out_ctx;
1826 ctrl_ctx = xhci_get_input_control_ctx(in_ctx);
1827 if (!ctrl_ctx) {
1828 xhci_warn(xhci, "%s: Could not get input context, bad type.\n",
1829 __func__);
1830 return 0;
1831 }
1832
1833 ep_index = xhci_get_endpoint_index(&ep->desc);
1834 ep_ctx = xhci_get_ep_ctx(xhci, out_ctx, ep_index);
1835 /* If the HC already knows the endpoint is disabled,
1836 * or the HCD has noted it is disabled, ignore this request
1837 */
1838 if ((GET_EP_CTX_STATE(ep_ctx) == EP_STATE_DISABLED) ||
1839 le32_to_cpu(ctrl_ctx->drop_flags) &
1840 xhci_get_endpoint_flag(&ep->desc)) {
1841 /* Do not warn when called after a usb_device_reset */
1842 if (xhci->devs[udev->slot_id]->eps[ep_index].ring != NULL)
1843 xhci_warn(xhci, "xHCI %s called with disabled ep %p\n",
1844 __func__, ep);
1845 return 0;
1846 }
1847
1848 ctrl_ctx->drop_flags |= cpu_to_le32(drop_flag);
1849 new_drop_flags = le32_to_cpu(ctrl_ctx->drop_flags);
1850
1851 ctrl_ctx->add_flags &= cpu_to_le32(~drop_flag);
1852 new_add_flags = le32_to_cpu(ctrl_ctx->add_flags);
1853
1854 xhci_debugfs_remove_endpoint(xhci, xhci->devs[udev->slot_id], ep_index);
1855
1856 xhci_endpoint_zero(xhci, xhci->devs[udev->slot_id], ep);
1857
1858 xhci_dbg(xhci, "drop ep 0x%x, slot id %d, new drop flags = %#x, new add flags = %#x\n",
1859 (unsigned int) ep->desc.bEndpointAddress,
1860 udev->slot_id,
1861 (unsigned int) new_drop_flags,
1862 (unsigned int) new_add_flags);
1863 return 0;
1864}
1865EXPORT_SYMBOL_GPL(xhci_drop_endpoint);
1866
1867/* Add an endpoint to a new possible bandwidth configuration for this device.
1868 * Only one call to this function is allowed per endpoint before
1869 * check_bandwidth() or reset_bandwidth() must be called.
1870 * A call to xhci_drop_endpoint() followed by a call to xhci_add_endpoint() will
1871 * add the endpoint to the schedule with possibly new parameters denoted by a
1872 * different endpoint descriptor in usb_host_endpoint.
1873 * A call to xhci_add_endpoint() followed by a call to xhci_drop_endpoint() is
1874 * not allowed.
1875 *
1876 * The USB core will not allow URBs to be queued to an endpoint until the
1877 * configuration or alt setting is installed in the device, so there's no need
1878 * for mutual exclusion to protect the xhci->devs[slot_id] structure.
1879 */
1880int xhci_add_endpoint(struct usb_hcd *hcd, struct usb_device *udev,
1881 struct usb_host_endpoint *ep)
1882{
1883 struct xhci_hcd *xhci;
1884 struct xhci_container_ctx *in_ctx;
1885 unsigned int ep_index;
1886 struct xhci_input_control_ctx *ctrl_ctx;
1887 struct xhci_ep_ctx *ep_ctx;
1888 u32 added_ctxs;
1889 u32 new_add_flags, new_drop_flags;
1890 struct xhci_virt_device *virt_dev;
1891 int ret = 0;
1892
1893 ret = xhci_check_args(hcd, udev, ep, 1, true, __func__);
1894 if (ret <= 0) {
1895 /* So we won't queue a reset ep command for a root hub */
1896 ep->hcpriv = NULL;
1897 return ret;
1898 }
1899 xhci = hcd_to_xhci(hcd);
1900 if (xhci->xhc_state & XHCI_STATE_DYING)
1901 return -ENODEV;
1902
1903 added_ctxs = xhci_get_endpoint_flag(&ep->desc);
1904 if (added_ctxs == SLOT_FLAG || added_ctxs == EP0_FLAG) {
1905 /* FIXME when we have to issue an evaluate endpoint command to
1906 * deal with ep0 max packet size changing once we get the
1907 * descriptors
1908 */
1909 xhci_dbg(xhci, "xHCI %s - can't add slot or ep 0 %#x\n",
1910 __func__, added_ctxs);
1911 return 0;
1912 }
1913
1914 virt_dev = xhci->devs[udev->slot_id];
1915 in_ctx = virt_dev->in_ctx;
1916 ctrl_ctx = xhci_get_input_control_ctx(in_ctx);
1917 if (!ctrl_ctx) {
1918 xhci_warn(xhci, "%s: Could not get input context, bad type.\n",
1919 __func__);
1920 return 0;
1921 }
1922
1923 ep_index = xhci_get_endpoint_index(&ep->desc);
1924 /* If this endpoint is already in use, and the upper layers are trying
1925 * to add it again without dropping it, reject the addition.
1926 */
1927 if (virt_dev->eps[ep_index].ring &&
1928 !(le32_to_cpu(ctrl_ctx->drop_flags) & added_ctxs)) {
1929 xhci_warn(xhci, "Trying to add endpoint 0x%x "
1930 "without dropping it.\n",
1931 (unsigned int) ep->desc.bEndpointAddress);
1932 return -EINVAL;
1933 }
1934
1935 /* If the HCD has already noted the endpoint is enabled,
1936 * ignore this request.
1937 */
1938 if (le32_to_cpu(ctrl_ctx->add_flags) & added_ctxs) {
1939 xhci_warn(xhci, "xHCI %s called with enabled ep %p\n",
1940 __func__, ep);
1941 return 0;
1942 }
1943
1944 /*
1945 * Configuration and alternate setting changes must be done in
1946 * process context, not interrupt context (or so documenation
1947 * for usb_set_interface() and usb_set_configuration() claim).
1948 */
1949 if (xhci_endpoint_init(xhci, virt_dev, udev, ep, GFP_NOIO) < 0) {
1950 dev_dbg(&udev->dev, "%s - could not initialize ep %#x\n",
1951 __func__, ep->desc.bEndpointAddress);
1952 return -ENOMEM;
1953 }
1954
1955 ctrl_ctx->add_flags |= cpu_to_le32(added_ctxs);
1956 new_add_flags = le32_to_cpu(ctrl_ctx->add_flags);
1957
1958 /* If xhci_endpoint_disable() was called for this endpoint, but the
1959 * xHC hasn't been notified yet through the check_bandwidth() call,
1960 * this re-adds a new state for the endpoint from the new endpoint
1961 * descriptors. We must drop and re-add this endpoint, so we leave the
1962 * drop flags alone.
1963 */
1964 new_drop_flags = le32_to_cpu(ctrl_ctx->drop_flags);
1965
1966 /* Store the usb_device pointer for later use */
1967 ep->hcpriv = udev;
1968
1969 ep_ctx = xhci_get_ep_ctx(xhci, virt_dev->in_ctx, ep_index);
1970 trace_xhci_add_endpoint(ep_ctx);
1971
1972 xhci_dbg(xhci, "add ep 0x%x, slot id %d, new drop flags = %#x, new add flags = %#x\n",
1973 (unsigned int) ep->desc.bEndpointAddress,
1974 udev->slot_id,
1975 (unsigned int) new_drop_flags,
1976 (unsigned int) new_add_flags);
1977 return 0;
1978}
1979EXPORT_SYMBOL_GPL(xhci_add_endpoint);
1980
1981static void xhci_zero_in_ctx(struct xhci_hcd *xhci, struct xhci_virt_device *virt_dev)
1982{
1983 struct xhci_input_control_ctx *ctrl_ctx;
1984 struct xhci_ep_ctx *ep_ctx;
1985 struct xhci_slot_ctx *slot_ctx;
1986 int i;
1987
1988 ctrl_ctx = xhci_get_input_control_ctx(virt_dev->in_ctx);
1989 if (!ctrl_ctx) {
1990 xhci_warn(xhci, "%s: Could not get input context, bad type.\n",
1991 __func__);
1992 return;
1993 }
1994
1995 /* When a device's add flag and drop flag are zero, any subsequent
1996 * configure endpoint command will leave that endpoint's state
1997 * untouched. Make sure we don't leave any old state in the input
1998 * endpoint contexts.
1999 */
2000 ctrl_ctx->drop_flags = 0;
2001 ctrl_ctx->add_flags = 0;
2002 slot_ctx = xhci_get_slot_ctx(xhci, virt_dev->in_ctx);
2003 slot_ctx->dev_info &= cpu_to_le32(~LAST_CTX_MASK);
2004 /* Endpoint 0 is always valid */
2005 slot_ctx->dev_info |= cpu_to_le32(LAST_CTX(1));
2006 for (i = 1; i < 31; i++) {
2007 ep_ctx = xhci_get_ep_ctx(xhci, virt_dev->in_ctx, i);
2008 ep_ctx->ep_info = 0;
2009 ep_ctx->ep_info2 = 0;
2010 ep_ctx->deq = 0;
2011 ep_ctx->tx_info = 0;
2012 }
2013}
2014
2015static int xhci_configure_endpoint_result(struct xhci_hcd *xhci,
2016 struct usb_device *udev, u32 *cmd_status)
2017{
2018 int ret;
2019
2020 switch (*cmd_status) {
2021 case COMP_COMMAND_ABORTED:
2022 case COMP_COMMAND_RING_STOPPED:
2023 xhci_warn(xhci, "Timeout while waiting for configure endpoint command\n");
2024 ret = -ETIME;
2025 break;
2026 case COMP_RESOURCE_ERROR:
2027 dev_warn(&udev->dev,
2028 "Not enough host controller resources for new device state.\n");
2029 ret = -ENOMEM;
2030 /* FIXME: can we allocate more resources for the HC? */
2031 break;
2032 case COMP_BANDWIDTH_ERROR:
2033 case COMP_SECONDARY_BANDWIDTH_ERROR:
2034 dev_warn(&udev->dev,
2035 "Not enough bandwidth for new device state.\n");
2036 ret = -ENOSPC;
2037 /* FIXME: can we go back to the old state? */
2038 break;
2039 case COMP_TRB_ERROR:
2040 /* the HCD set up something wrong */
2041 dev_warn(&udev->dev, "ERROR: Endpoint drop flag = 0, "
2042 "add flag = 1, "
2043 "and endpoint is not disabled.\n");
2044 ret = -EINVAL;
2045 break;
2046 case COMP_INCOMPATIBLE_DEVICE_ERROR:
2047 dev_warn(&udev->dev,
2048 "ERROR: Incompatible device for endpoint configure command.\n");
2049 ret = -ENODEV;
2050 break;
2051 case COMP_SUCCESS:
2052 xhci_dbg_trace(xhci, trace_xhci_dbg_context_change,
2053 "Successful Endpoint Configure command");
2054 ret = 0;
2055 break;
2056 default:
2057 xhci_err(xhci, "ERROR: unexpected command completion code 0x%x.\n",
2058 *cmd_status);
2059 ret = -EINVAL;
2060 break;
2061 }
2062 return ret;
2063}
2064
2065static int xhci_evaluate_context_result(struct xhci_hcd *xhci,
2066 struct usb_device *udev, u32 *cmd_status)
2067{
2068 int ret;
2069
2070 switch (*cmd_status) {
2071 case COMP_COMMAND_ABORTED:
2072 case COMP_COMMAND_RING_STOPPED:
2073 xhci_warn(xhci, "Timeout while waiting for evaluate context command\n");
2074 ret = -ETIME;
2075 break;
2076 case COMP_PARAMETER_ERROR:
2077 dev_warn(&udev->dev,
2078 "WARN: xHCI driver setup invalid evaluate context command.\n");
2079 ret = -EINVAL;
2080 break;
2081 case COMP_SLOT_NOT_ENABLED_ERROR:
2082 dev_warn(&udev->dev,
2083 "WARN: slot not enabled for evaluate context command.\n");
2084 ret = -EINVAL;
2085 break;
2086 case COMP_CONTEXT_STATE_ERROR:
2087 dev_warn(&udev->dev,
2088 "WARN: invalid context state for evaluate context command.\n");
2089 ret = -EINVAL;
2090 break;
2091 case COMP_INCOMPATIBLE_DEVICE_ERROR:
2092 dev_warn(&udev->dev,
2093 "ERROR: Incompatible device for evaluate context command.\n");
2094 ret = -ENODEV;
2095 break;
2096 case COMP_MAX_EXIT_LATENCY_TOO_LARGE_ERROR:
2097 /* Max Exit Latency too large error */
2098 dev_warn(&udev->dev, "WARN: Max Exit Latency too large\n");
2099 ret = -EINVAL;
2100 break;
2101 case COMP_SUCCESS:
2102 xhci_dbg_trace(xhci, trace_xhci_dbg_context_change,
2103 "Successful evaluate context command");
2104 ret = 0;
2105 break;
2106 default:
2107 xhci_err(xhci, "ERROR: unexpected command completion code 0x%x.\n",
2108 *cmd_status);
2109 ret = -EINVAL;
2110 break;
2111 }
2112 return ret;
2113}
2114
2115static u32 xhci_count_num_new_endpoints(struct xhci_hcd *xhci,
2116 struct xhci_input_control_ctx *ctrl_ctx)
2117{
2118 u32 valid_add_flags;
2119 u32 valid_drop_flags;
2120
2121 /* Ignore the slot flag (bit 0), and the default control endpoint flag
2122 * (bit 1). The default control endpoint is added during the Address
2123 * Device command and is never removed until the slot is disabled.
2124 */
2125 valid_add_flags = le32_to_cpu(ctrl_ctx->add_flags) >> 2;
2126 valid_drop_flags = le32_to_cpu(ctrl_ctx->drop_flags) >> 2;
2127
2128 /* Use hweight32 to count the number of ones in the add flags, or
2129 * number of endpoints added. Don't count endpoints that are changed
2130 * (both added and dropped).
2131 */
2132 return hweight32(valid_add_flags) -
2133 hweight32(valid_add_flags & valid_drop_flags);
2134}
2135
2136static unsigned int xhci_count_num_dropped_endpoints(struct xhci_hcd *xhci,
2137 struct xhci_input_control_ctx *ctrl_ctx)
2138{
2139 u32 valid_add_flags;
2140 u32 valid_drop_flags;
2141
2142 valid_add_flags = le32_to_cpu(ctrl_ctx->add_flags) >> 2;
2143 valid_drop_flags = le32_to_cpu(ctrl_ctx->drop_flags) >> 2;
2144
2145 return hweight32(valid_drop_flags) -
2146 hweight32(valid_add_flags & valid_drop_flags);
2147}
2148
2149/*
2150 * We need to reserve the new number of endpoints before the configure endpoint
2151 * command completes. We can't subtract the dropped endpoints from the number
2152 * of active endpoints until the command completes because we can oversubscribe
2153 * the host in this case:
2154 *
2155 * - the first configure endpoint command drops more endpoints than it adds
2156 * - a second configure endpoint command that adds more endpoints is queued
2157 * - the first configure endpoint command fails, so the config is unchanged
2158 * - the second command may succeed, even though there isn't enough resources
2159 *
2160 * Must be called with xhci->lock held.
2161 */
2162static int xhci_reserve_host_resources(struct xhci_hcd *xhci,
2163 struct xhci_input_control_ctx *ctrl_ctx)
2164{
2165 u32 added_eps;
2166
2167 added_eps = xhci_count_num_new_endpoints(xhci, ctrl_ctx);
2168 if (xhci->num_active_eps + added_eps > xhci->limit_active_eps) {
2169 xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
2170 "Not enough ep ctxs: "
2171 "%u active, need to add %u, limit is %u.",
2172 xhci->num_active_eps, added_eps,
2173 xhci->limit_active_eps);
2174 return -ENOMEM;
2175 }
2176 xhci->num_active_eps += added_eps;
2177 xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
2178 "Adding %u ep ctxs, %u now active.", added_eps,
2179 xhci->num_active_eps);
2180 return 0;
2181}
2182
2183/*
2184 * The configure endpoint was failed by the xHC for some other reason, so we
2185 * need to revert the resources that failed configuration would have used.
2186 *
2187 * Must be called with xhci->lock held.
2188 */
2189static void xhci_free_host_resources(struct xhci_hcd *xhci,
2190 struct xhci_input_control_ctx *ctrl_ctx)
2191{
2192 u32 num_failed_eps;
2193
2194 num_failed_eps = xhci_count_num_new_endpoints(xhci, ctrl_ctx);
2195 xhci->num_active_eps -= num_failed_eps;
2196 xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
2197 "Removing %u failed ep ctxs, %u now active.",
2198 num_failed_eps,
2199 xhci->num_active_eps);
2200}
2201
2202/*
2203 * Now that the command has completed, clean up the active endpoint count by
2204 * subtracting out the endpoints that were dropped (but not changed).
2205 *
2206 * Must be called with xhci->lock held.
2207 */
2208static void xhci_finish_resource_reservation(struct xhci_hcd *xhci,
2209 struct xhci_input_control_ctx *ctrl_ctx)
2210{
2211 u32 num_dropped_eps;
2212
2213 num_dropped_eps = xhci_count_num_dropped_endpoints(xhci, ctrl_ctx);
2214 xhci->num_active_eps -= num_dropped_eps;
2215 if (num_dropped_eps)
2216 xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
2217 "Removing %u dropped ep ctxs, %u now active.",
2218 num_dropped_eps,
2219 xhci->num_active_eps);
2220}
2221
2222static unsigned int xhci_get_block_size(struct usb_device *udev)
2223{
2224 switch (udev->speed) {
2225 case USB_SPEED_LOW:
2226 case USB_SPEED_FULL:
2227 return FS_BLOCK;
2228 case USB_SPEED_HIGH:
2229 return HS_BLOCK;
2230 case USB_SPEED_SUPER:
2231 case USB_SPEED_SUPER_PLUS:
2232 return SS_BLOCK;
2233 case USB_SPEED_UNKNOWN:
2234 default:
2235 /* Should never happen */
2236 return 1;
2237 }
2238}
2239
2240static unsigned int
2241xhci_get_largest_overhead(struct xhci_interval_bw *interval_bw)
2242{
2243 if (interval_bw->overhead[LS_OVERHEAD_TYPE])
2244 return LS_OVERHEAD;
2245 if (interval_bw->overhead[FS_OVERHEAD_TYPE])
2246 return FS_OVERHEAD;
2247 return HS_OVERHEAD;
2248}
2249
2250/* If we are changing a LS/FS device under a HS hub,
2251 * make sure (if we are activating a new TT) that the HS bus has enough
2252 * bandwidth for this new TT.
2253 */
2254static int xhci_check_tt_bw_table(struct xhci_hcd *xhci,
2255 struct xhci_virt_device *virt_dev,
2256 int old_active_eps)
2257{
2258 struct xhci_interval_bw_table *bw_table;
2259 struct xhci_tt_bw_info *tt_info;
2260
2261 /* Find the bandwidth table for the root port this TT is attached to. */
2262 bw_table = &xhci->rh_bw[virt_dev->real_port - 1].bw_table;
2263 tt_info = virt_dev->tt_info;
2264 /* If this TT already had active endpoints, the bandwidth for this TT
2265 * has already been added. Removing all periodic endpoints (and thus
2266 * making the TT enactive) will only decrease the bandwidth used.
2267 */
2268 if (old_active_eps)
2269 return 0;
2270 if (old_active_eps == 0 && tt_info->active_eps != 0) {
2271 if (bw_table->bw_used + TT_HS_OVERHEAD > HS_BW_LIMIT)
2272 return -ENOMEM;
2273 return 0;
2274 }
2275 /* Not sure why we would have no new active endpoints...
2276 *
2277 * Maybe because of an Evaluate Context change for a hub update or a
2278 * control endpoint 0 max packet size change?
2279 * FIXME: skip the bandwidth calculation in that case.
2280 */
2281 return 0;
2282}
2283
2284static int xhci_check_ss_bw(struct xhci_hcd *xhci,
2285 struct xhci_virt_device *virt_dev)
2286{
2287 unsigned int bw_reserved;
2288
2289 bw_reserved = DIV_ROUND_UP(SS_BW_RESERVED*SS_BW_LIMIT_IN, 100);
2290 if (virt_dev->bw_table->ss_bw_in > (SS_BW_LIMIT_IN - bw_reserved))
2291 return -ENOMEM;
2292
2293 bw_reserved = DIV_ROUND_UP(SS_BW_RESERVED*SS_BW_LIMIT_OUT, 100);
2294 if (virt_dev->bw_table->ss_bw_out > (SS_BW_LIMIT_OUT - bw_reserved))
2295 return -ENOMEM;
2296
2297 return 0;
2298}
2299
2300/*
2301 * This algorithm is a very conservative estimate of the worst-case scheduling
2302 * scenario for any one interval. The hardware dynamically schedules the
2303 * packets, so we can't tell which microframe could be the limiting factor in
2304 * the bandwidth scheduling. This only takes into account periodic endpoints.
2305 *
2306 * Obviously, we can't solve an NP complete problem to find the minimum worst
2307 * case scenario. Instead, we come up with an estimate that is no less than
2308 * the worst case bandwidth used for any one microframe, but may be an
2309 * over-estimate.
2310 *
2311 * We walk the requirements for each endpoint by interval, starting with the
2312 * smallest interval, and place packets in the schedule where there is only one
2313 * possible way to schedule packets for that interval. In order to simplify
2314 * this algorithm, we record the largest max packet size for each interval, and
2315 * assume all packets will be that size.
2316 *
2317 * For interval 0, we obviously must schedule all packets for each interval.
2318 * The bandwidth for interval 0 is just the amount of data to be transmitted
2319 * (the sum of all max ESIT payload sizes, plus any overhead per packet times
2320 * the number of packets).
2321 *
2322 * For interval 1, we have two possible microframes to schedule those packets
2323 * in. For this algorithm, if we can schedule the same number of packets for
2324 * each possible scheduling opportunity (each microframe), we will do so. The
2325 * remaining number of packets will be saved to be transmitted in the gaps in
2326 * the next interval's scheduling sequence.
2327 *
2328 * As we move those remaining packets to be scheduled with interval 2 packets,
2329 * we have to double the number of remaining packets to transmit. This is
2330 * because the intervals are actually powers of 2, and we would be transmitting
2331 * the previous interval's packets twice in this interval. We also have to be
2332 * sure that when we look at the largest max packet size for this interval, we
2333 * also look at the largest max packet size for the remaining packets and take
2334 * the greater of the two.
2335 *
2336 * The algorithm continues to evenly distribute packets in each scheduling
2337 * opportunity, and push the remaining packets out, until we get to the last
2338 * interval. Then those packets and their associated overhead are just added
2339 * to the bandwidth used.
2340 */
2341static int xhci_check_bw_table(struct xhci_hcd *xhci,
2342 struct xhci_virt_device *virt_dev,
2343 int old_active_eps)
2344{
2345 unsigned int bw_reserved;
2346 unsigned int max_bandwidth;
2347 unsigned int bw_used;
2348 unsigned int block_size;
2349 struct xhci_interval_bw_table *bw_table;
2350 unsigned int packet_size = 0;
2351 unsigned int overhead = 0;
2352 unsigned int packets_transmitted = 0;
2353 unsigned int packets_remaining = 0;
2354 unsigned int i;
2355
2356 if (virt_dev->udev->speed >= USB_SPEED_SUPER)
2357 return xhci_check_ss_bw(xhci, virt_dev);
2358
2359 if (virt_dev->udev->speed == USB_SPEED_HIGH) {
2360 max_bandwidth = HS_BW_LIMIT;
2361 /* Convert percent of bus BW reserved to blocks reserved */
2362 bw_reserved = DIV_ROUND_UP(HS_BW_RESERVED * max_bandwidth, 100);
2363 } else {
2364 max_bandwidth = FS_BW_LIMIT;
2365 bw_reserved = DIV_ROUND_UP(FS_BW_RESERVED * max_bandwidth, 100);
2366 }
2367
2368 bw_table = virt_dev->bw_table;
2369 /* We need to translate the max packet size and max ESIT payloads into
2370 * the units the hardware uses.
2371 */
2372 block_size = xhci_get_block_size(virt_dev->udev);
2373
2374 /* If we are manipulating a LS/FS device under a HS hub, double check
2375 * that the HS bus has enough bandwidth if we are activing a new TT.
2376 */
2377 if (virt_dev->tt_info) {
2378 xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
2379 "Recalculating BW for rootport %u",
2380 virt_dev->real_port);
2381 if (xhci_check_tt_bw_table(xhci, virt_dev, old_active_eps)) {
2382 xhci_warn(xhci, "Not enough bandwidth on HS bus for "
2383 "newly activated TT.\n");
2384 return -ENOMEM;
2385 }
2386 xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
2387 "Recalculating BW for TT slot %u port %u",
2388 virt_dev->tt_info->slot_id,
2389 virt_dev->tt_info->ttport);
2390 } else {
2391 xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
2392 "Recalculating BW for rootport %u",
2393 virt_dev->real_port);
2394 }
2395
2396 /* Add in how much bandwidth will be used for interval zero, or the
2397 * rounded max ESIT payload + number of packets * largest overhead.
2398 */
2399 bw_used = DIV_ROUND_UP(bw_table->interval0_esit_payload, block_size) +
2400 bw_table->interval_bw[0].num_packets *
2401 xhci_get_largest_overhead(&bw_table->interval_bw[0]);
2402
2403 for (i = 1; i < XHCI_MAX_INTERVAL; i++) {
2404 unsigned int bw_added;
2405 unsigned int largest_mps;
2406 unsigned int interval_overhead;
2407
2408 /*
2409 * How many packets could we transmit in this interval?
2410 * If packets didn't fit in the previous interval, we will need
2411 * to transmit that many packets twice within this interval.
2412 */
2413 packets_remaining = 2 * packets_remaining +
2414 bw_table->interval_bw[i].num_packets;
2415
2416 /* Find the largest max packet size of this or the previous
2417 * interval.
2418 */
2419 if (list_empty(&bw_table->interval_bw[i].endpoints))
2420 largest_mps = 0;
2421 else {
2422 struct xhci_virt_ep *virt_ep;
2423 struct list_head *ep_entry;
2424
2425 ep_entry = bw_table->interval_bw[i].endpoints.next;
2426 virt_ep = list_entry(ep_entry,
2427 struct xhci_virt_ep, bw_endpoint_list);
2428 /* Convert to blocks, rounding up */
2429 largest_mps = DIV_ROUND_UP(
2430 virt_ep->bw_info.max_packet_size,
2431 block_size);
2432 }
2433 if (largest_mps > packet_size)
2434 packet_size = largest_mps;
2435
2436 /* Use the larger overhead of this or the previous interval. */
2437 interval_overhead = xhci_get_largest_overhead(
2438 &bw_table->interval_bw[i]);
2439 if (interval_overhead > overhead)
2440 overhead = interval_overhead;
2441
2442 /* How many packets can we evenly distribute across
2443 * (1 << (i + 1)) possible scheduling opportunities?
2444 */
2445 packets_transmitted = packets_remaining >> (i + 1);
2446
2447 /* Add in the bandwidth used for those scheduled packets */
2448 bw_added = packets_transmitted * (overhead + packet_size);
2449
2450 /* How many packets do we have remaining to transmit? */
2451 packets_remaining = packets_remaining % (1 << (i + 1));
2452
2453 /* What largest max packet size should those packets have? */
2454 /* If we've transmitted all packets, don't carry over the
2455 * largest packet size.
2456 */
2457 if (packets_remaining == 0) {
2458 packet_size = 0;
2459 overhead = 0;
2460 } else if (packets_transmitted > 0) {
2461 /* Otherwise if we do have remaining packets, and we've
2462 * scheduled some packets in this interval, take the
2463 * largest max packet size from endpoints with this
2464 * interval.
2465 */
2466 packet_size = largest_mps;
2467 overhead = interval_overhead;
2468 }
2469 /* Otherwise carry over packet_size and overhead from the last
2470 * time we had a remainder.
2471 */
2472 bw_used += bw_added;
2473 if (bw_used > max_bandwidth) {
2474 xhci_warn(xhci, "Not enough bandwidth. "
2475 "Proposed: %u, Max: %u\n",
2476 bw_used, max_bandwidth);
2477 return -ENOMEM;
2478 }
2479 }
2480 /*
2481 * Ok, we know we have some packets left over after even-handedly
2482 * scheduling interval 15. We don't know which microframes they will
2483 * fit into, so we over-schedule and say they will be scheduled every
2484 * microframe.
2485 */
2486 if (packets_remaining > 0)
2487 bw_used += overhead + packet_size;
2488
2489 if (!virt_dev->tt_info && virt_dev->udev->speed == USB_SPEED_HIGH) {
2490 unsigned int port_index = virt_dev->real_port - 1;
2491
2492 /* OK, we're manipulating a HS device attached to a
2493 * root port bandwidth domain. Include the number of active TTs
2494 * in the bandwidth used.
2495 */
2496 bw_used += TT_HS_OVERHEAD *
2497 xhci->rh_bw[port_index].num_active_tts;
2498 }
2499
2500 xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
2501 "Final bandwidth: %u, Limit: %u, Reserved: %u, "
2502 "Available: %u " "percent",
2503 bw_used, max_bandwidth, bw_reserved,
2504 (max_bandwidth - bw_used - bw_reserved) * 100 /
2505 max_bandwidth);
2506
2507 bw_used += bw_reserved;
2508 if (bw_used > max_bandwidth) {
2509 xhci_warn(xhci, "Not enough bandwidth. Proposed: %u, Max: %u\n",
2510 bw_used, max_bandwidth);
2511 return -ENOMEM;
2512 }
2513
2514 bw_table->bw_used = bw_used;
2515 return 0;
2516}
2517
2518static bool xhci_is_async_ep(unsigned int ep_type)
2519{
2520 return (ep_type != ISOC_OUT_EP && ep_type != INT_OUT_EP &&
2521 ep_type != ISOC_IN_EP &&
2522 ep_type != INT_IN_EP);
2523}
2524
2525static bool xhci_is_sync_in_ep(unsigned int ep_type)
2526{
2527 return (ep_type == ISOC_IN_EP || ep_type == INT_IN_EP);
2528}
2529
2530static unsigned int xhci_get_ss_bw_consumed(struct xhci_bw_info *ep_bw)
2531{
2532 unsigned int mps = DIV_ROUND_UP(ep_bw->max_packet_size, SS_BLOCK);
2533
2534 if (ep_bw->ep_interval == 0)
2535 return SS_OVERHEAD_BURST +
2536 (ep_bw->mult * ep_bw->num_packets *
2537 (SS_OVERHEAD + mps));
2538 return DIV_ROUND_UP(ep_bw->mult * ep_bw->num_packets *
2539 (SS_OVERHEAD + mps + SS_OVERHEAD_BURST),
2540 1 << ep_bw->ep_interval);
2541
2542}
2543
2544static void xhci_drop_ep_from_interval_table(struct xhci_hcd *xhci,
2545 struct xhci_bw_info *ep_bw,
2546 struct xhci_interval_bw_table *bw_table,
2547 struct usb_device *udev,
2548 struct xhci_virt_ep *virt_ep,
2549 struct xhci_tt_bw_info *tt_info)
2550{
2551 struct xhci_interval_bw *interval_bw;
2552 int normalized_interval;
2553
2554 if (xhci_is_async_ep(ep_bw->type))
2555 return;
2556
2557 if (udev->speed >= USB_SPEED_SUPER) {
2558 if (xhci_is_sync_in_ep(ep_bw->type))
2559 xhci->devs[udev->slot_id]->bw_table->ss_bw_in -=
2560 xhci_get_ss_bw_consumed(ep_bw);
2561 else
2562 xhci->devs[udev->slot_id]->bw_table->ss_bw_out -=
2563 xhci_get_ss_bw_consumed(ep_bw);
2564 return;
2565 }
2566
2567 /* SuperSpeed endpoints never get added to intervals in the table, so
2568 * this check is only valid for HS/FS/LS devices.
2569 */
2570 if (list_empty(&virt_ep->bw_endpoint_list))
2571 return;
2572 /* For LS/FS devices, we need to translate the interval expressed in
2573 * microframes to frames.
2574 */
2575 if (udev->speed == USB_SPEED_HIGH)
2576 normalized_interval = ep_bw->ep_interval;
2577 else
2578 normalized_interval = ep_bw->ep_interval - 3;
2579
2580 if (normalized_interval == 0)
2581 bw_table->interval0_esit_payload -= ep_bw->max_esit_payload;
2582 interval_bw = &bw_table->interval_bw[normalized_interval];
2583 interval_bw->num_packets -= ep_bw->num_packets;
2584 switch (udev->speed) {
2585 case USB_SPEED_LOW:
2586 interval_bw->overhead[LS_OVERHEAD_TYPE] -= 1;
2587 break;
2588 case USB_SPEED_FULL:
2589 interval_bw->overhead[FS_OVERHEAD_TYPE] -= 1;
2590 break;
2591 case USB_SPEED_HIGH:
2592 interval_bw->overhead[HS_OVERHEAD_TYPE] -= 1;
2593 break;
2594 default:
2595 /* Should never happen because only LS/FS/HS endpoints will get
2596 * added to the endpoint list.
2597 */
2598 return;
2599 }
2600 if (tt_info)
2601 tt_info->active_eps -= 1;
2602 list_del_init(&virt_ep->bw_endpoint_list);
2603}
2604
2605static void xhci_add_ep_to_interval_table(struct xhci_hcd *xhci,
2606 struct xhci_bw_info *ep_bw,
2607 struct xhci_interval_bw_table *bw_table,
2608 struct usb_device *udev,
2609 struct xhci_virt_ep *virt_ep,
2610 struct xhci_tt_bw_info *tt_info)
2611{
2612 struct xhci_interval_bw *interval_bw;
2613 struct xhci_virt_ep *smaller_ep;
2614 int normalized_interval;
2615
2616 if (xhci_is_async_ep(ep_bw->type))
2617 return;
2618
2619 if (udev->speed == USB_SPEED_SUPER) {
2620 if (xhci_is_sync_in_ep(ep_bw->type))
2621 xhci->devs[udev->slot_id]->bw_table->ss_bw_in +=
2622 xhci_get_ss_bw_consumed(ep_bw);
2623 else
2624 xhci->devs[udev->slot_id]->bw_table->ss_bw_out +=
2625 xhci_get_ss_bw_consumed(ep_bw);
2626 return;
2627 }
2628
2629 /* For LS/FS devices, we need to translate the interval expressed in
2630 * microframes to frames.
2631 */
2632 if (udev->speed == USB_SPEED_HIGH)
2633 normalized_interval = ep_bw->ep_interval;
2634 else
2635 normalized_interval = ep_bw->ep_interval - 3;
2636
2637 if (normalized_interval == 0)
2638 bw_table->interval0_esit_payload += ep_bw->max_esit_payload;
2639 interval_bw = &bw_table->interval_bw[normalized_interval];
2640 interval_bw->num_packets += ep_bw->num_packets;
2641 switch (udev->speed) {
2642 case USB_SPEED_LOW:
2643 interval_bw->overhead[LS_OVERHEAD_TYPE] += 1;
2644 break;
2645 case USB_SPEED_FULL:
2646 interval_bw->overhead[FS_OVERHEAD_TYPE] += 1;
2647 break;
2648 case USB_SPEED_HIGH:
2649 interval_bw->overhead[HS_OVERHEAD_TYPE] += 1;
2650 break;
2651 default:
2652 /* Should never happen because only LS/FS/HS endpoints will get
2653 * added to the endpoint list.
2654 */
2655 return;
2656 }
2657
2658 if (tt_info)
2659 tt_info->active_eps += 1;
2660 /* Insert the endpoint into the list, largest max packet size first. */
2661 list_for_each_entry(smaller_ep, &interval_bw->endpoints,
2662 bw_endpoint_list) {
2663 if (ep_bw->max_packet_size >=
2664 smaller_ep->bw_info.max_packet_size) {
2665 /* Add the new ep before the smaller endpoint */
2666 list_add_tail(&virt_ep->bw_endpoint_list,
2667 &smaller_ep->bw_endpoint_list);
2668 return;
2669 }
2670 }
2671 /* Add the new endpoint at the end of the list. */
2672 list_add_tail(&virt_ep->bw_endpoint_list,
2673 &interval_bw->endpoints);
2674}
2675
2676void xhci_update_tt_active_eps(struct xhci_hcd *xhci,
2677 struct xhci_virt_device *virt_dev,
2678 int old_active_eps)
2679{
2680 struct xhci_root_port_bw_info *rh_bw_info;
2681 if (!virt_dev->tt_info)
2682 return;
2683
2684 rh_bw_info = &xhci->rh_bw[virt_dev->real_port - 1];
2685 if (old_active_eps == 0 &&
2686 virt_dev->tt_info->active_eps != 0) {
2687 rh_bw_info->num_active_tts += 1;
2688 rh_bw_info->bw_table.bw_used += TT_HS_OVERHEAD;
2689 } else if (old_active_eps != 0 &&
2690 virt_dev->tt_info->active_eps == 0) {
2691 rh_bw_info->num_active_tts -= 1;
2692 rh_bw_info->bw_table.bw_used -= TT_HS_OVERHEAD;
2693 }
2694}
2695
2696static int xhci_reserve_bandwidth(struct xhci_hcd *xhci,
2697 struct xhci_virt_device *virt_dev,
2698 struct xhci_container_ctx *in_ctx)
2699{
2700 struct xhci_bw_info ep_bw_info[31];
2701 int i;
2702 struct xhci_input_control_ctx *ctrl_ctx;
2703 int old_active_eps = 0;
2704
2705 if (virt_dev->tt_info)
2706 old_active_eps = virt_dev->tt_info->active_eps;
2707
2708 ctrl_ctx = xhci_get_input_control_ctx(in_ctx);
2709 if (!ctrl_ctx) {
2710 xhci_warn(xhci, "%s: Could not get input context, bad type.\n",
2711 __func__);
2712 return -ENOMEM;
2713 }
2714
2715 for (i = 0; i < 31; i++) {
2716 if (!EP_IS_ADDED(ctrl_ctx, i) && !EP_IS_DROPPED(ctrl_ctx, i))
2717 continue;
2718
2719 /* Make a copy of the BW info in case we need to revert this */
2720 memcpy(&ep_bw_info[i], &virt_dev->eps[i].bw_info,
2721 sizeof(ep_bw_info[i]));
2722 /* Drop the endpoint from the interval table if the endpoint is
2723 * being dropped or changed.
2724 */
2725 if (EP_IS_DROPPED(ctrl_ctx, i))
2726 xhci_drop_ep_from_interval_table(xhci,
2727 &virt_dev->eps[i].bw_info,
2728 virt_dev->bw_table,
2729 virt_dev->udev,
2730 &virt_dev->eps[i],
2731 virt_dev->tt_info);
2732 }
2733 /* Overwrite the information stored in the endpoints' bw_info */
2734 xhci_update_bw_info(xhci, virt_dev->in_ctx, ctrl_ctx, virt_dev);
2735 for (i = 0; i < 31; i++) {
2736 /* Add any changed or added endpoints to the interval table */
2737 if (EP_IS_ADDED(ctrl_ctx, i))
2738 xhci_add_ep_to_interval_table(xhci,
2739 &virt_dev->eps[i].bw_info,
2740 virt_dev->bw_table,
2741 virt_dev->udev,
2742 &virt_dev->eps[i],
2743 virt_dev->tt_info);
2744 }
2745
2746 if (!xhci_check_bw_table(xhci, virt_dev, old_active_eps)) {
2747 /* Ok, this fits in the bandwidth we have.
2748 * Update the number of active TTs.
2749 */
2750 xhci_update_tt_active_eps(xhci, virt_dev, old_active_eps);
2751 return 0;
2752 }
2753
2754 /* We don't have enough bandwidth for this, revert the stored info. */
2755 for (i = 0; i < 31; i++) {
2756 if (!EP_IS_ADDED(ctrl_ctx, i) && !EP_IS_DROPPED(ctrl_ctx, i))
2757 continue;
2758
2759 /* Drop the new copies of any added or changed endpoints from
2760 * the interval table.
2761 */
2762 if (EP_IS_ADDED(ctrl_ctx, i)) {
2763 xhci_drop_ep_from_interval_table(xhci,
2764 &virt_dev->eps[i].bw_info,
2765 virt_dev->bw_table,
2766 virt_dev->udev,
2767 &virt_dev->eps[i],
2768 virt_dev->tt_info);
2769 }
2770 /* Revert the endpoint back to its old information */
2771 memcpy(&virt_dev->eps[i].bw_info, &ep_bw_info[i],
2772 sizeof(ep_bw_info[i]));
2773 /* Add any changed or dropped endpoints back into the table */
2774 if (EP_IS_DROPPED(ctrl_ctx, i))
2775 xhci_add_ep_to_interval_table(xhci,
2776 &virt_dev->eps[i].bw_info,
2777 virt_dev->bw_table,
2778 virt_dev->udev,
2779 &virt_dev->eps[i],
2780 virt_dev->tt_info);
2781 }
2782 return -ENOMEM;
2783}
2784
2785
2786/* Issue a configure endpoint command or evaluate context command
2787 * and wait for it to finish.
2788 */
2789static int xhci_configure_endpoint(struct xhci_hcd *xhci,
2790 struct usb_device *udev,
2791 struct xhci_command *command,
2792 bool ctx_change, bool must_succeed)
2793{
2794 int ret;
2795 unsigned long flags;
2796 struct xhci_input_control_ctx *ctrl_ctx;
2797 struct xhci_virt_device *virt_dev;
2798 struct xhci_slot_ctx *slot_ctx;
2799
2800 if (!command)
2801 return -EINVAL;
2802
2803 spin_lock_irqsave(&xhci->lock, flags);
2804
2805 if (xhci->xhc_state & XHCI_STATE_DYING) {
2806 spin_unlock_irqrestore(&xhci->lock, flags);
2807 return -ESHUTDOWN;
2808 }
2809
2810 virt_dev = xhci->devs[udev->slot_id];
2811
2812 ctrl_ctx = xhci_get_input_control_ctx(command->in_ctx);
2813 if (!ctrl_ctx) {
2814 spin_unlock_irqrestore(&xhci->lock, flags);
2815 xhci_warn(xhci, "%s: Could not get input context, bad type.\n",
2816 __func__);
2817 return -ENOMEM;
2818 }
2819
2820 if ((xhci->quirks & XHCI_EP_LIMIT_QUIRK) &&
2821 xhci_reserve_host_resources(xhci, ctrl_ctx)) {
2822 spin_unlock_irqrestore(&xhci->lock, flags);
2823 xhci_warn(xhci, "Not enough host resources, "
2824 "active endpoint contexts = %u\n",
2825 xhci->num_active_eps);
2826 return -ENOMEM;
2827 }
2828 if ((xhci->quirks & XHCI_SW_BW_CHECKING) &&
2829 xhci_reserve_bandwidth(xhci, virt_dev, command->in_ctx)) {
2830 if ((xhci->quirks & XHCI_EP_LIMIT_QUIRK))
2831 xhci_free_host_resources(xhci, ctrl_ctx);
2832 spin_unlock_irqrestore(&xhci->lock, flags);
2833 xhci_warn(xhci, "Not enough bandwidth\n");
2834 return -ENOMEM;
2835 }
2836
2837 slot_ctx = xhci_get_slot_ctx(xhci, command->in_ctx);
2838
2839 trace_xhci_configure_endpoint_ctrl_ctx(ctrl_ctx);
2840 trace_xhci_configure_endpoint(slot_ctx);
2841
2842 if (!ctx_change)
2843 ret = xhci_queue_configure_endpoint(xhci, command,
2844 command->in_ctx->dma,
2845 udev->slot_id, must_succeed);
2846 else
2847 ret = xhci_queue_evaluate_context(xhci, command,
2848 command->in_ctx->dma,
2849 udev->slot_id, must_succeed);
2850 if (ret < 0) {
2851 if ((xhci->quirks & XHCI_EP_LIMIT_QUIRK))
2852 xhci_free_host_resources(xhci, ctrl_ctx);
2853 spin_unlock_irqrestore(&xhci->lock, flags);
2854 xhci_dbg_trace(xhci, trace_xhci_dbg_context_change,
2855 "FIXME allocate a new ring segment");
2856 return -ENOMEM;
2857 }
2858 xhci_ring_cmd_db(xhci);
2859 spin_unlock_irqrestore(&xhci->lock, flags);
2860
2861 /* Wait for the configure endpoint command to complete */
2862 wait_for_completion(command->completion);
2863
2864 if (!ctx_change)
2865 ret = xhci_configure_endpoint_result(xhci, udev,
2866 &command->status);
2867 else
2868 ret = xhci_evaluate_context_result(xhci, udev,
2869 &command->status);
2870
2871 if ((xhci->quirks & XHCI_EP_LIMIT_QUIRK)) {
2872 spin_lock_irqsave(&xhci->lock, flags);
2873 /* If the command failed, remove the reserved resources.
2874 * Otherwise, clean up the estimate to include dropped eps.
2875 */
2876 if (ret)
2877 xhci_free_host_resources(xhci, ctrl_ctx);
2878 else
2879 xhci_finish_resource_reservation(xhci, ctrl_ctx);
2880 spin_unlock_irqrestore(&xhci->lock, flags);
2881 }
2882 return ret;
2883}
2884
2885static void xhci_check_bw_drop_ep_streams(struct xhci_hcd *xhci,
2886 struct xhci_virt_device *vdev, int i)
2887{
2888 struct xhci_virt_ep *ep = &vdev->eps[i];
2889
2890 if (ep->ep_state & EP_HAS_STREAMS) {
2891 xhci_warn(xhci, "WARN: endpoint 0x%02x has streams on set_interface, freeing streams.\n",
2892 xhci_get_endpoint_address(i));
2893 xhci_free_stream_info(xhci, ep->stream_info);
2894 ep->stream_info = NULL;
2895 ep->ep_state &= ~EP_HAS_STREAMS;
2896 }
2897}
2898
2899/* Called after one or more calls to xhci_add_endpoint() or
2900 * xhci_drop_endpoint(). If this call fails, the USB core is expected
2901 * to call xhci_reset_bandwidth().
2902 *
2903 * Since we are in the middle of changing either configuration or
2904 * installing a new alt setting, the USB core won't allow URBs to be
2905 * enqueued for any endpoint on the old config or interface. Nothing
2906 * else should be touching the xhci->devs[slot_id] structure, so we
2907 * don't need to take the xhci->lock for manipulating that.
2908 */
2909int xhci_check_bandwidth(struct usb_hcd *hcd, struct usb_device *udev)
2910{
2911 int i;
2912 int ret = 0;
2913 struct xhci_hcd *xhci;
2914 struct xhci_virt_device *virt_dev;
2915 struct xhci_input_control_ctx *ctrl_ctx;
2916 struct xhci_slot_ctx *slot_ctx;
2917 struct xhci_command *command;
2918
2919 ret = xhci_check_args(hcd, udev, NULL, 0, true, __func__);
2920 if (ret <= 0)
2921 return ret;
2922 xhci = hcd_to_xhci(hcd);
2923 if ((xhci->xhc_state & XHCI_STATE_DYING) ||
2924 (xhci->xhc_state & XHCI_STATE_REMOVING))
2925 return -ENODEV;
2926
2927 xhci_dbg(xhci, "%s called for udev %p\n", __func__, udev);
2928 virt_dev = xhci->devs[udev->slot_id];
2929
2930 command = xhci_alloc_command(xhci, true, GFP_KERNEL);
2931 if (!command)
2932 return -ENOMEM;
2933
2934 command->in_ctx = virt_dev->in_ctx;
2935
2936 /* See section 4.6.6 - A0 = 1; A1 = D0 = D1 = 0 */
2937 ctrl_ctx = xhci_get_input_control_ctx(command->in_ctx);
2938 if (!ctrl_ctx) {
2939 xhci_warn(xhci, "%s: Could not get input context, bad type.\n",
2940 __func__);
2941 ret = -ENOMEM;
2942 goto command_cleanup;
2943 }
2944 ctrl_ctx->add_flags |= cpu_to_le32(SLOT_FLAG);
2945 ctrl_ctx->add_flags &= cpu_to_le32(~EP0_FLAG);
2946 ctrl_ctx->drop_flags &= cpu_to_le32(~(SLOT_FLAG | EP0_FLAG));
2947
2948 /* Don't issue the command if there's no endpoints to update. */
2949 if (ctrl_ctx->add_flags == cpu_to_le32(SLOT_FLAG) &&
2950 ctrl_ctx->drop_flags == 0) {
2951 ret = 0;
2952 goto command_cleanup;
2953 }
2954 /* Fix up Context Entries field. Minimum value is EP0 == BIT(1). */
2955 slot_ctx = xhci_get_slot_ctx(xhci, virt_dev->in_ctx);
2956 for (i = 31; i >= 1; i--) {
2957 __le32 le32 = cpu_to_le32(BIT(i));
2958
2959 if ((virt_dev->eps[i-1].ring && !(ctrl_ctx->drop_flags & le32))
2960 || (ctrl_ctx->add_flags & le32) || i == 1) {
2961 slot_ctx->dev_info &= cpu_to_le32(~LAST_CTX_MASK);
2962 slot_ctx->dev_info |= cpu_to_le32(LAST_CTX(i));
2963 break;
2964 }
2965 }
2966
2967 ret = xhci_configure_endpoint(xhci, udev, command,
2968 false, false);
2969 if (ret)
2970 /* Callee should call reset_bandwidth() */
2971 goto command_cleanup;
2972
2973 /* Free any rings that were dropped, but not changed. */
2974 for (i = 1; i < 31; i++) {
2975 if ((le32_to_cpu(ctrl_ctx->drop_flags) & (1 << (i + 1))) &&
2976 !(le32_to_cpu(ctrl_ctx->add_flags) & (1 << (i + 1)))) {
2977 xhci_free_endpoint_ring(xhci, virt_dev, i);
2978 xhci_check_bw_drop_ep_streams(xhci, virt_dev, i);
2979 }
2980 }
2981 xhci_zero_in_ctx(xhci, virt_dev);
2982 /*
2983 * Install any rings for completely new endpoints or changed endpoints,
2984 * and free any old rings from changed endpoints.
2985 */
2986 for (i = 1; i < 31; i++) {
2987 if (!virt_dev->eps[i].new_ring)
2988 continue;
2989 /* Only free the old ring if it exists.
2990 * It may not if this is the first add of an endpoint.
2991 */
2992 if (virt_dev->eps[i].ring) {
2993 xhci_free_endpoint_ring(xhci, virt_dev, i);
2994 }
2995 xhci_check_bw_drop_ep_streams(xhci, virt_dev, i);
2996 virt_dev->eps[i].ring = virt_dev->eps[i].new_ring;
2997 virt_dev->eps[i].new_ring = NULL;
2998 xhci_debugfs_create_endpoint(xhci, virt_dev, i);
2999 }
3000command_cleanup:
3001 kfree(command->completion);
3002 kfree(command);
3003
3004 return ret;
3005}
3006EXPORT_SYMBOL_GPL(xhci_check_bandwidth);
3007
3008void xhci_reset_bandwidth(struct usb_hcd *hcd, struct usb_device *udev)
3009{
3010 struct xhci_hcd *xhci;
3011 struct xhci_virt_device *virt_dev;
3012 int i, ret;
3013
3014 ret = xhci_check_args(hcd, udev, NULL, 0, true, __func__);
3015 if (ret <= 0)
3016 return;
3017 xhci = hcd_to_xhci(hcd);
3018
3019 xhci_dbg(xhci, "%s called for udev %p\n", __func__, udev);
3020 virt_dev = xhci->devs[udev->slot_id];
3021 /* Free any rings allocated for added endpoints */
3022 for (i = 0; i < 31; i++) {
3023 if (virt_dev->eps[i].new_ring) {
3024 xhci_debugfs_remove_endpoint(xhci, virt_dev, i);
3025 xhci_ring_free(xhci, virt_dev->eps[i].new_ring);
3026 virt_dev->eps[i].new_ring = NULL;
3027 }
3028 }
3029 xhci_zero_in_ctx(xhci, virt_dev);
3030}
3031EXPORT_SYMBOL_GPL(xhci_reset_bandwidth);
3032
3033static void xhci_setup_input_ctx_for_config_ep(struct xhci_hcd *xhci,
3034 struct xhci_container_ctx *in_ctx,
3035 struct xhci_container_ctx *out_ctx,
3036 struct xhci_input_control_ctx *ctrl_ctx,
3037 u32 add_flags, u32 drop_flags)
3038{
3039 ctrl_ctx->add_flags = cpu_to_le32(add_flags);
3040 ctrl_ctx->drop_flags = cpu_to_le32(drop_flags);
3041 xhci_slot_copy(xhci, in_ctx, out_ctx);
3042 ctrl_ctx->add_flags |= cpu_to_le32(SLOT_FLAG);
3043}
3044
3045static void xhci_endpoint_disable(struct usb_hcd *hcd,
3046 struct usb_host_endpoint *host_ep)
3047{
3048 struct xhci_hcd *xhci;
3049 struct xhci_virt_device *vdev;
3050 struct xhci_virt_ep *ep;
3051 struct usb_device *udev;
3052 unsigned long flags;
3053 unsigned int ep_index;
3054
3055 xhci = hcd_to_xhci(hcd);
3056rescan:
3057 spin_lock_irqsave(&xhci->lock, flags);
3058
3059 udev = (struct usb_device *)host_ep->hcpriv;
3060 if (!udev || !udev->slot_id)
3061 goto done;
3062
3063 vdev = xhci->devs[udev->slot_id];
3064 if (!vdev)
3065 goto done;
3066
3067 ep_index = xhci_get_endpoint_index(&host_ep->desc);
3068 ep = &vdev->eps[ep_index];
3069
3070 /* wait for hub_tt_work to finish clearing hub TT */
3071 if (ep->ep_state & EP_CLEARING_TT) {
3072 spin_unlock_irqrestore(&xhci->lock, flags);
3073 schedule_timeout_uninterruptible(1);
3074 goto rescan;
3075 }
3076
3077 if (ep->ep_state)
3078 xhci_dbg(xhci, "endpoint disable with ep_state 0x%x\n",
3079 ep->ep_state);
3080done:
3081 host_ep->hcpriv = NULL;
3082 spin_unlock_irqrestore(&xhci->lock, flags);
3083}
3084
3085/*
3086 * Called after usb core issues a clear halt control message.
3087 * The host side of the halt should already be cleared by a reset endpoint
3088 * command issued when the STALL event was received.
3089 *
3090 * The reset endpoint command may only be issued to endpoints in the halted
3091 * state. For software that wishes to reset the data toggle or sequence number
3092 * of an endpoint that isn't in the halted state this function will issue a
3093 * configure endpoint command with the Drop and Add bits set for the target
3094 * endpoint. Refer to the additional note in xhci spcification section 4.6.8.
3095 *
3096 * vdev may be lost due to xHC restore error and re-initialization during S3/S4
3097 * resume. A new vdev will be allocated later by xhci_discover_or_reset_device()
3098 */
3099
3100static void xhci_endpoint_reset(struct usb_hcd *hcd,
3101 struct usb_host_endpoint *host_ep)
3102{
3103 struct xhci_hcd *xhci;
3104 struct usb_device *udev;
3105 struct xhci_virt_device *vdev;
3106 struct xhci_virt_ep *ep;
3107 struct xhci_input_control_ctx *ctrl_ctx;
3108 struct xhci_command *stop_cmd, *cfg_cmd;
3109 unsigned int ep_index;
3110 unsigned long flags;
3111 u32 ep_flag;
3112 int err;
3113
3114 xhci = hcd_to_xhci(hcd);
3115 ep_index = xhci_get_endpoint_index(&host_ep->desc);
3116
3117 /*
3118 * Usb core assumes a max packet value for ep0 on FS devices until the
3119 * real value is read from the descriptor. Core resets Ep0 if values
3120 * mismatch. Reconfigure the xhci ep0 endpoint context here in that case
3121 */
3122 if (usb_endpoint_xfer_control(&host_ep->desc) && ep_index == 0) {
3123
3124 udev = container_of(host_ep, struct usb_device, ep0);
3125 if (udev->speed != USB_SPEED_FULL || !udev->slot_id)
3126 return;
3127
3128 vdev = xhci->devs[udev->slot_id];
3129 if (!vdev || vdev->udev != udev)
3130 return;
3131
3132 xhci_check_ep0_maxpacket(xhci, vdev);
3133
3134 /* Nothing else should be done here for ep0 during ep reset */
3135 return;
3136 }
3137
3138 if (!host_ep->hcpriv)
3139 return;
3140 udev = (struct usb_device *) host_ep->hcpriv;
3141 vdev = xhci->devs[udev->slot_id];
3142
3143 if (!udev->slot_id || !vdev)
3144 return;
3145
3146 ep = &vdev->eps[ep_index];
3147
3148 /* Bail out if toggle is already being cleared by a endpoint reset */
3149 spin_lock_irqsave(&xhci->lock, flags);
3150 if (ep->ep_state & EP_HARD_CLEAR_TOGGLE) {
3151 ep->ep_state &= ~EP_HARD_CLEAR_TOGGLE;
3152 spin_unlock_irqrestore(&xhci->lock, flags);
3153 return;
3154 }
3155 spin_unlock_irqrestore(&xhci->lock, flags);
3156 /* Only interrupt and bulk ep's use data toggle, USB2 spec 5.5.4-> */
3157 if (usb_endpoint_xfer_control(&host_ep->desc) ||
3158 usb_endpoint_xfer_isoc(&host_ep->desc))
3159 return;
3160
3161 ep_flag = xhci_get_endpoint_flag(&host_ep->desc);
3162
3163 if (ep_flag == SLOT_FLAG || ep_flag == EP0_FLAG)
3164 return;
3165
3166 stop_cmd = xhci_alloc_command(xhci, true, GFP_NOWAIT);
3167 if (!stop_cmd)
3168 return;
3169
3170 cfg_cmd = xhci_alloc_command_with_ctx(xhci, true, GFP_NOWAIT);
3171 if (!cfg_cmd)
3172 goto cleanup;
3173
3174 spin_lock_irqsave(&xhci->lock, flags);
3175
3176 /* block queuing new trbs and ringing ep doorbell */
3177 ep->ep_state |= EP_SOFT_CLEAR_TOGGLE;
3178
3179 /*
3180 * Make sure endpoint ring is empty before resetting the toggle/seq.
3181 * Driver is required to synchronously cancel all transfer request.
3182 * Stop the endpoint to force xHC to update the output context
3183 */
3184
3185 if (!list_empty(&ep->ring->td_list)) {
3186 dev_err(&udev->dev, "EP not empty, refuse reset\n");
3187 spin_unlock_irqrestore(&xhci->lock, flags);
3188 xhci_free_command(xhci, cfg_cmd);
3189 goto cleanup;
3190 }
3191
3192 err = xhci_queue_stop_endpoint(xhci, stop_cmd, udev->slot_id,
3193 ep_index, 0);
3194 if (err < 0) {
3195 spin_unlock_irqrestore(&xhci->lock, flags);
3196 xhci_free_command(xhci, cfg_cmd);
3197 xhci_dbg(xhci, "%s: Failed to queue stop ep command, %d ",
3198 __func__, err);
3199 goto cleanup;
3200 }
3201
3202 xhci_ring_cmd_db(xhci);
3203 spin_unlock_irqrestore(&xhci->lock, flags);
3204
3205 wait_for_completion(stop_cmd->completion);
3206
3207 spin_lock_irqsave(&xhci->lock, flags);
3208
3209 /* config ep command clears toggle if add and drop ep flags are set */
3210 ctrl_ctx = xhci_get_input_control_ctx(cfg_cmd->in_ctx);
3211 if (!ctrl_ctx) {
3212 spin_unlock_irqrestore(&xhci->lock, flags);
3213 xhci_free_command(xhci, cfg_cmd);
3214 xhci_warn(xhci, "%s: Could not get input context, bad type.\n",
3215 __func__);
3216 goto cleanup;
3217 }
3218
3219 xhci_setup_input_ctx_for_config_ep(xhci, cfg_cmd->in_ctx, vdev->out_ctx,
3220 ctrl_ctx, ep_flag, ep_flag);
3221 xhci_endpoint_copy(xhci, cfg_cmd->in_ctx, vdev->out_ctx, ep_index);
3222
3223 err = xhci_queue_configure_endpoint(xhci, cfg_cmd, cfg_cmd->in_ctx->dma,
3224 udev->slot_id, false);
3225 if (err < 0) {
3226 spin_unlock_irqrestore(&xhci->lock, flags);
3227 xhci_free_command(xhci, cfg_cmd);
3228 xhci_dbg(xhci, "%s: Failed to queue config ep command, %d ",
3229 __func__, err);
3230 goto cleanup;
3231 }
3232
3233 xhci_ring_cmd_db(xhci);
3234 spin_unlock_irqrestore(&xhci->lock, flags);
3235
3236 wait_for_completion(cfg_cmd->completion);
3237
3238 xhci_free_command(xhci, cfg_cmd);
3239cleanup:
3240 xhci_free_command(xhci, stop_cmd);
3241 spin_lock_irqsave(&xhci->lock, flags);
3242 if (ep->ep_state & EP_SOFT_CLEAR_TOGGLE)
3243 ep->ep_state &= ~EP_SOFT_CLEAR_TOGGLE;
3244 spin_unlock_irqrestore(&xhci->lock, flags);
3245}
3246
3247static int xhci_check_streams_endpoint(struct xhci_hcd *xhci,
3248 struct usb_device *udev, struct usb_host_endpoint *ep,
3249 unsigned int slot_id)
3250{
3251 int ret;
3252 unsigned int ep_index;
3253 unsigned int ep_state;
3254
3255 if (!ep)
3256 return -EINVAL;
3257 ret = xhci_check_args(xhci_to_hcd(xhci), udev, ep, 1, true, __func__);
3258 if (ret <= 0)
3259 return ret ? ret : -EINVAL;
3260 if (usb_ss_max_streams(&ep->ss_ep_comp) == 0) {
3261 xhci_warn(xhci, "WARN: SuperSpeed Endpoint Companion"
3262 " descriptor for ep 0x%x does not support streams\n",
3263 ep->desc.bEndpointAddress);
3264 return -EINVAL;
3265 }
3266
3267 ep_index = xhci_get_endpoint_index(&ep->desc);
3268 ep_state = xhci->devs[slot_id]->eps[ep_index].ep_state;
3269 if (ep_state & EP_HAS_STREAMS ||
3270 ep_state & EP_GETTING_STREAMS) {
3271 xhci_warn(xhci, "WARN: SuperSpeed bulk endpoint 0x%x "
3272 "already has streams set up.\n",
3273 ep->desc.bEndpointAddress);
3274 xhci_warn(xhci, "Send email to xHCI maintainer and ask for "
3275 "dynamic stream context array reallocation.\n");
3276 return -EINVAL;
3277 }
3278 if (!list_empty(&xhci->devs[slot_id]->eps[ep_index].ring->td_list)) {
3279 xhci_warn(xhci, "Cannot setup streams for SuperSpeed bulk "
3280 "endpoint 0x%x; URBs are pending.\n",
3281 ep->desc.bEndpointAddress);
3282 return -EINVAL;
3283 }
3284 return 0;
3285}
3286
3287static void xhci_calculate_streams_entries(struct xhci_hcd *xhci,
3288 unsigned int *num_streams, unsigned int *num_stream_ctxs)
3289{
3290 unsigned int max_streams;
3291
3292 /* The stream context array size must be a power of two */
3293 *num_stream_ctxs = roundup_pow_of_two(*num_streams);
3294 /*
3295 * Find out how many primary stream array entries the host controller
3296 * supports. Later we may use secondary stream arrays (similar to 2nd
3297 * level page entries), but that's an optional feature for xHCI host
3298 * controllers. xHCs must support at least 4 stream IDs.
3299 */
3300 max_streams = HCC_MAX_PSA(xhci->hcc_params);
3301 if (*num_stream_ctxs > max_streams) {
3302 xhci_dbg(xhci, "xHCI HW only supports %u stream ctx entries.\n",
3303 max_streams);
3304 *num_stream_ctxs = max_streams;
3305 *num_streams = max_streams;
3306 }
3307}
3308
3309/* Returns an error code if one of the endpoint already has streams.
3310 * This does not change any data structures, it only checks and gathers
3311 * information.
3312 */
3313static int xhci_calculate_streams_and_bitmask(struct xhci_hcd *xhci,
3314 struct usb_device *udev,
3315 struct usb_host_endpoint **eps, unsigned int num_eps,
3316 unsigned int *num_streams, u32 *changed_ep_bitmask)
3317{
3318 unsigned int max_streams;
3319 unsigned int endpoint_flag;
3320 int i;
3321 int ret;
3322
3323 for (i = 0; i < num_eps; i++) {
3324 ret = xhci_check_streams_endpoint(xhci, udev,
3325 eps[i], udev->slot_id);
3326 if (ret < 0)
3327 return ret;
3328
3329 max_streams = usb_ss_max_streams(&eps[i]->ss_ep_comp);
3330 if (max_streams < (*num_streams - 1)) {
3331 xhci_dbg(xhci, "Ep 0x%x only supports %u stream IDs.\n",
3332 eps[i]->desc.bEndpointAddress,
3333 max_streams);
3334 *num_streams = max_streams+1;
3335 }
3336
3337 endpoint_flag = xhci_get_endpoint_flag(&eps[i]->desc);
3338 if (*changed_ep_bitmask & endpoint_flag)
3339 return -EINVAL;
3340 *changed_ep_bitmask |= endpoint_flag;
3341 }
3342 return 0;
3343}
3344
3345static u32 xhci_calculate_no_streams_bitmask(struct xhci_hcd *xhci,
3346 struct usb_device *udev,
3347 struct usb_host_endpoint **eps, unsigned int num_eps)
3348{
3349 u32 changed_ep_bitmask = 0;
3350 unsigned int slot_id;
3351 unsigned int ep_index;
3352 unsigned int ep_state;
3353 int i;
3354
3355 slot_id = udev->slot_id;
3356 if (!xhci->devs[slot_id])
3357 return 0;
3358
3359 for (i = 0; i < num_eps; i++) {
3360 ep_index = xhci_get_endpoint_index(&eps[i]->desc);
3361 ep_state = xhci->devs[slot_id]->eps[ep_index].ep_state;
3362 /* Are streams already being freed for the endpoint? */
3363 if (ep_state & EP_GETTING_NO_STREAMS) {
3364 xhci_warn(xhci, "WARN Can't disable streams for "
3365 "endpoint 0x%x, "
3366 "streams are being disabled already\n",
3367 eps[i]->desc.bEndpointAddress);
3368 return 0;
3369 }
3370 /* Are there actually any streams to free? */
3371 if (!(ep_state & EP_HAS_STREAMS) &&
3372 !(ep_state & EP_GETTING_STREAMS)) {
3373 xhci_warn(xhci, "WARN Can't disable streams for "
3374 "endpoint 0x%x, "
3375 "streams are already disabled!\n",
3376 eps[i]->desc.bEndpointAddress);
3377 xhci_warn(xhci, "WARN xhci_free_streams() called "
3378 "with non-streams endpoint\n");
3379 return 0;
3380 }
3381 changed_ep_bitmask |= xhci_get_endpoint_flag(&eps[i]->desc);
3382 }
3383 return changed_ep_bitmask;
3384}
3385
3386/*
3387 * The USB device drivers use this function (through the HCD interface in USB
3388 * core) to prepare a set of bulk endpoints to use streams. Streams are used to
3389 * coordinate mass storage command queueing across multiple endpoints (basically
3390 * a stream ID == a task ID).
3391 *
3392 * Setting up streams involves allocating the same size stream context array
3393 * for each endpoint and issuing a configure endpoint command for all endpoints.
3394 *
3395 * Don't allow the call to succeed if one endpoint only supports one stream
3396 * (which means it doesn't support streams at all).
3397 *
3398 * Drivers may get less stream IDs than they asked for, if the host controller
3399 * hardware or endpoints claim they can't support the number of requested
3400 * stream IDs.
3401 */
3402static int xhci_alloc_streams(struct usb_hcd *hcd, struct usb_device *udev,
3403 struct usb_host_endpoint **eps, unsigned int num_eps,
3404 unsigned int num_streams, gfp_t mem_flags)
3405{
3406 int i, ret;
3407 struct xhci_hcd *xhci;
3408 struct xhci_virt_device *vdev;
3409 struct xhci_command *config_cmd;
3410 struct xhci_input_control_ctx *ctrl_ctx;
3411 unsigned int ep_index;
3412 unsigned int num_stream_ctxs;
3413 unsigned int max_packet;
3414 unsigned long flags;
3415 u32 changed_ep_bitmask = 0;
3416
3417 if (!eps)
3418 return -EINVAL;
3419
3420 /* Add one to the number of streams requested to account for
3421 * stream 0 that is reserved for xHCI usage.
3422 */
3423 num_streams += 1;
3424 xhci = hcd_to_xhci(hcd);
3425 xhci_dbg(xhci, "Driver wants %u stream IDs (including stream 0).\n",
3426 num_streams);
3427
3428 /* MaxPSASize value 0 (2 streams) means streams are not supported */
3429 if ((xhci->quirks & XHCI_BROKEN_STREAMS) ||
3430 HCC_MAX_PSA(xhci->hcc_params) < 4) {
3431 xhci_dbg(xhci, "xHCI controller does not support streams.\n");
3432 return -ENOSYS;
3433 }
3434
3435 config_cmd = xhci_alloc_command_with_ctx(xhci, true, mem_flags);
3436 if (!config_cmd)
3437 return -ENOMEM;
3438
3439 ctrl_ctx = xhci_get_input_control_ctx(config_cmd->in_ctx);
3440 if (!ctrl_ctx) {
3441 xhci_warn(xhci, "%s: Could not get input context, bad type.\n",
3442 __func__);
3443 xhci_free_command(xhci, config_cmd);
3444 return -ENOMEM;
3445 }
3446
3447 /* Check to make sure all endpoints are not already configured for
3448 * streams. While we're at it, find the maximum number of streams that
3449 * all the endpoints will support and check for duplicate endpoints.
3450 */
3451 spin_lock_irqsave(&xhci->lock, flags);
3452 ret = xhci_calculate_streams_and_bitmask(xhci, udev, eps,
3453 num_eps, &num_streams, &changed_ep_bitmask);
3454 if (ret < 0) {
3455 xhci_free_command(xhci, config_cmd);
3456 spin_unlock_irqrestore(&xhci->lock, flags);
3457 return ret;
3458 }
3459 if (num_streams <= 1) {
3460 xhci_warn(xhci, "WARN: endpoints can't handle "
3461 "more than one stream.\n");
3462 xhci_free_command(xhci, config_cmd);
3463 spin_unlock_irqrestore(&xhci->lock, flags);
3464 return -EINVAL;
3465 }
3466 vdev = xhci->devs[udev->slot_id];
3467 /* Mark each endpoint as being in transition, so
3468 * xhci_urb_enqueue() will reject all URBs.
3469 */
3470 for (i = 0; i < num_eps; i++) {
3471 ep_index = xhci_get_endpoint_index(&eps[i]->desc);
3472 vdev->eps[ep_index].ep_state |= EP_GETTING_STREAMS;
3473 }
3474 spin_unlock_irqrestore(&xhci->lock, flags);
3475
3476 /* Setup internal data structures and allocate HW data structures for
3477 * streams (but don't install the HW structures in the input context
3478 * until we're sure all memory allocation succeeded).
3479 */
3480 xhci_calculate_streams_entries(xhci, &num_streams, &num_stream_ctxs);
3481 xhci_dbg(xhci, "Need %u stream ctx entries for %u stream IDs.\n",
3482 num_stream_ctxs, num_streams);
3483
3484 for (i = 0; i < num_eps; i++) {
3485 ep_index = xhci_get_endpoint_index(&eps[i]->desc);
3486 max_packet = usb_endpoint_maxp(&eps[i]->desc);
3487 vdev->eps[ep_index].stream_info = xhci_alloc_stream_info(xhci,
3488 num_stream_ctxs,
3489 num_streams,
3490 max_packet, mem_flags);
3491 if (!vdev->eps[ep_index].stream_info)
3492 goto cleanup;
3493 /* Set maxPstreams in endpoint context and update deq ptr to
3494 * point to stream context array. FIXME
3495 */
3496 }
3497
3498 /* Set up the input context for a configure endpoint command. */
3499 for (i = 0; i < num_eps; i++) {
3500 struct xhci_ep_ctx *ep_ctx;
3501
3502 ep_index = xhci_get_endpoint_index(&eps[i]->desc);
3503 ep_ctx = xhci_get_ep_ctx(xhci, config_cmd->in_ctx, ep_index);
3504
3505 xhci_endpoint_copy(xhci, config_cmd->in_ctx,
3506 vdev->out_ctx, ep_index);
3507 xhci_setup_streams_ep_input_ctx(xhci, ep_ctx,
3508 vdev->eps[ep_index].stream_info);
3509 }
3510 /* Tell the HW to drop its old copy of the endpoint context info
3511 * and add the updated copy from the input context.
3512 */
3513 xhci_setup_input_ctx_for_config_ep(xhci, config_cmd->in_ctx,
3514 vdev->out_ctx, ctrl_ctx,
3515 changed_ep_bitmask, changed_ep_bitmask);
3516
3517 /* Issue and wait for the configure endpoint command */
3518 ret = xhci_configure_endpoint(xhci, udev, config_cmd,
3519 false, false);
3520
3521 /* xHC rejected the configure endpoint command for some reason, so we
3522 * leave the old ring intact and free our internal streams data
3523 * structure.
3524 */
3525 if (ret < 0)
3526 goto cleanup;
3527
3528 spin_lock_irqsave(&xhci->lock, flags);
3529 for (i = 0; i < num_eps; i++) {
3530 ep_index = xhci_get_endpoint_index(&eps[i]->desc);
3531 vdev->eps[ep_index].ep_state &= ~EP_GETTING_STREAMS;
3532 xhci_dbg(xhci, "Slot %u ep ctx %u now has streams.\n",
3533 udev->slot_id, ep_index);
3534 vdev->eps[ep_index].ep_state |= EP_HAS_STREAMS;
3535 }
3536 xhci_free_command(xhci, config_cmd);
3537 spin_unlock_irqrestore(&xhci->lock, flags);
3538
3539 for (i = 0; i < num_eps; i++) {
3540 ep_index = xhci_get_endpoint_index(&eps[i]->desc);
3541 xhci_debugfs_create_stream_files(xhci, vdev, ep_index);
3542 }
3543 /* Subtract 1 for stream 0, which drivers can't use */
3544 return num_streams - 1;
3545
3546cleanup:
3547 /* If it didn't work, free the streams! */
3548 for (i = 0; i < num_eps; i++) {
3549 ep_index = xhci_get_endpoint_index(&eps[i]->desc);
3550 xhci_free_stream_info(xhci, vdev->eps[ep_index].stream_info);
3551 vdev->eps[ep_index].stream_info = NULL;
3552 /* FIXME Unset maxPstreams in endpoint context and
3553 * update deq ptr to point to normal string ring.
3554 */
3555 vdev->eps[ep_index].ep_state &= ~EP_GETTING_STREAMS;
3556 vdev->eps[ep_index].ep_state &= ~EP_HAS_STREAMS;
3557 xhci_endpoint_zero(xhci, vdev, eps[i]);
3558 }
3559 xhci_free_command(xhci, config_cmd);
3560 return -ENOMEM;
3561}
3562
3563/* Transition the endpoint from using streams to being a "normal" endpoint
3564 * without streams.
3565 *
3566 * Modify the endpoint context state, submit a configure endpoint command,
3567 * and free all endpoint rings for streams if that completes successfully.
3568 */
3569static int xhci_free_streams(struct usb_hcd *hcd, struct usb_device *udev,
3570 struct usb_host_endpoint **eps, unsigned int num_eps,
3571 gfp_t mem_flags)
3572{
3573 int i, ret;
3574 struct xhci_hcd *xhci;
3575 struct xhci_virt_device *vdev;
3576 struct xhci_command *command;
3577 struct xhci_input_control_ctx *ctrl_ctx;
3578 unsigned int ep_index;
3579 unsigned long flags;
3580 u32 changed_ep_bitmask;
3581
3582 xhci = hcd_to_xhci(hcd);
3583 vdev = xhci->devs[udev->slot_id];
3584
3585 /* Set up a configure endpoint command to remove the streams rings */
3586 spin_lock_irqsave(&xhci->lock, flags);
3587 changed_ep_bitmask = xhci_calculate_no_streams_bitmask(xhci,
3588 udev, eps, num_eps);
3589 if (changed_ep_bitmask == 0) {
3590 spin_unlock_irqrestore(&xhci->lock, flags);
3591 return -EINVAL;
3592 }
3593
3594 /* Use the xhci_command structure from the first endpoint. We may have
3595 * allocated too many, but the driver may call xhci_free_streams() for
3596 * each endpoint it grouped into one call to xhci_alloc_streams().
3597 */
3598 ep_index = xhci_get_endpoint_index(&eps[0]->desc);
3599 command = vdev->eps[ep_index].stream_info->free_streams_command;
3600 ctrl_ctx = xhci_get_input_control_ctx(command->in_ctx);
3601 if (!ctrl_ctx) {
3602 spin_unlock_irqrestore(&xhci->lock, flags);
3603 xhci_warn(xhci, "%s: Could not get input context, bad type.\n",
3604 __func__);
3605 return -EINVAL;
3606 }
3607
3608 for (i = 0; i < num_eps; i++) {
3609 struct xhci_ep_ctx *ep_ctx;
3610
3611 ep_index = xhci_get_endpoint_index(&eps[i]->desc);
3612 ep_ctx = xhci_get_ep_ctx(xhci, command->in_ctx, ep_index);
3613 xhci->devs[udev->slot_id]->eps[ep_index].ep_state |=
3614 EP_GETTING_NO_STREAMS;
3615
3616 xhci_endpoint_copy(xhci, command->in_ctx,
3617 vdev->out_ctx, ep_index);
3618 xhci_setup_no_streams_ep_input_ctx(ep_ctx,
3619 &vdev->eps[ep_index]);
3620 }
3621 xhci_setup_input_ctx_for_config_ep(xhci, command->in_ctx,
3622 vdev->out_ctx, ctrl_ctx,
3623 changed_ep_bitmask, changed_ep_bitmask);
3624 spin_unlock_irqrestore(&xhci->lock, flags);
3625
3626 /* Issue and wait for the configure endpoint command,
3627 * which must succeed.
3628 */
3629 ret = xhci_configure_endpoint(xhci, udev, command,
3630 false, true);
3631
3632 /* xHC rejected the configure endpoint command for some reason, so we
3633 * leave the streams rings intact.
3634 */
3635 if (ret < 0)
3636 return ret;
3637
3638 spin_lock_irqsave(&xhci->lock, flags);
3639 for (i = 0; i < num_eps; i++) {
3640 ep_index = xhci_get_endpoint_index(&eps[i]->desc);
3641 xhci_free_stream_info(xhci, vdev->eps[ep_index].stream_info);
3642 vdev->eps[ep_index].stream_info = NULL;
3643 /* FIXME Unset maxPstreams in endpoint context and
3644 * update deq ptr to point to normal string ring.
3645 */
3646 vdev->eps[ep_index].ep_state &= ~EP_GETTING_NO_STREAMS;
3647 vdev->eps[ep_index].ep_state &= ~EP_HAS_STREAMS;
3648 }
3649 spin_unlock_irqrestore(&xhci->lock, flags);
3650
3651 return 0;
3652}
3653
3654/*
3655 * Deletes endpoint resources for endpoints that were active before a Reset
3656 * Device command, or a Disable Slot command. The Reset Device command leaves
3657 * the control endpoint intact, whereas the Disable Slot command deletes it.
3658 *
3659 * Must be called with xhci->lock held.
3660 */
3661void xhci_free_device_endpoint_resources(struct xhci_hcd *xhci,
3662 struct xhci_virt_device *virt_dev, bool drop_control_ep)
3663{
3664 int i;
3665 unsigned int num_dropped_eps = 0;
3666 unsigned int drop_flags = 0;
3667
3668 for (i = (drop_control_ep ? 0 : 1); i < 31; i++) {
3669 if (virt_dev->eps[i].ring) {
3670 drop_flags |= 1 << i;
3671 num_dropped_eps++;
3672 }
3673 }
3674 xhci->num_active_eps -= num_dropped_eps;
3675 if (num_dropped_eps)
3676 xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
3677 "Dropped %u ep ctxs, flags = 0x%x, "
3678 "%u now active.",
3679 num_dropped_eps, drop_flags,
3680 xhci->num_active_eps);
3681}
3682
3683/*
3684 * This submits a Reset Device Command, which will set the device state to 0,
3685 * set the device address to 0, and disable all the endpoints except the default
3686 * control endpoint. The USB core should come back and call
3687 * xhci_address_device(), and then re-set up the configuration. If this is
3688 * called because of a usb_reset_and_verify_device(), then the old alternate
3689 * settings will be re-installed through the normal bandwidth allocation
3690 * functions.
3691 *
3692 * Wait for the Reset Device command to finish. Remove all structures
3693 * associated with the endpoints that were disabled. Clear the input device
3694 * structure? Reset the control endpoint 0 max packet size?
3695 *
3696 * If the virt_dev to be reset does not exist or does not match the udev,
3697 * it means the device is lost, possibly due to the xHC restore error and
3698 * re-initialization during S3/S4. In this case, call xhci_alloc_dev() to
3699 * re-allocate the device.
3700 */
3701static int xhci_discover_or_reset_device(struct usb_hcd *hcd,
3702 struct usb_device *udev)
3703{
3704 int ret, i;
3705 unsigned long flags;
3706 struct xhci_hcd *xhci;
3707 unsigned int slot_id;
3708 struct xhci_virt_device *virt_dev;
3709 struct xhci_command *reset_device_cmd;
3710 struct xhci_slot_ctx *slot_ctx;
3711 int old_active_eps = 0;
3712
3713 ret = xhci_check_args(hcd, udev, NULL, 0, false, __func__);
3714 if (ret <= 0)
3715 return ret;
3716 xhci = hcd_to_xhci(hcd);
3717 slot_id = udev->slot_id;
3718 virt_dev = xhci->devs[slot_id];
3719 if (!virt_dev) {
3720 xhci_dbg(xhci, "The device to be reset with slot ID %u does "
3721 "not exist. Re-allocate the device\n", slot_id);
3722 ret = xhci_alloc_dev(hcd, udev);
3723 if (ret == 1)
3724 return 0;
3725 else
3726 return -EINVAL;
3727 }
3728
3729 if (virt_dev->tt_info)
3730 old_active_eps = virt_dev->tt_info->active_eps;
3731
3732 if (virt_dev->udev != udev) {
3733 /* If the virt_dev and the udev does not match, this virt_dev
3734 * may belong to another udev.
3735 * Re-allocate the device.
3736 */
3737 xhci_dbg(xhci, "The device to be reset with slot ID %u does "
3738 "not match the udev. Re-allocate the device\n",
3739 slot_id);
3740 ret = xhci_alloc_dev(hcd, udev);
3741 if (ret == 1)
3742 return 0;
3743 else
3744 return -EINVAL;
3745 }
3746
3747 /* If device is not setup, there is no point in resetting it */
3748 slot_ctx = xhci_get_slot_ctx(xhci, virt_dev->out_ctx);
3749 if (GET_SLOT_STATE(le32_to_cpu(slot_ctx->dev_state)) ==
3750 SLOT_STATE_DISABLED)
3751 return 0;
3752
3753 trace_xhci_discover_or_reset_device(slot_ctx);
3754
3755 xhci_dbg(xhci, "Resetting device with slot ID %u\n", slot_id);
3756 /* Allocate the command structure that holds the struct completion.
3757 * Assume we're in process context, since the normal device reset
3758 * process has to wait for the device anyway. Storage devices are
3759 * reset as part of error handling, so use GFP_NOIO instead of
3760 * GFP_KERNEL.
3761 */
3762 reset_device_cmd = xhci_alloc_command(xhci, true, GFP_NOIO);
3763 if (!reset_device_cmd) {
3764 xhci_dbg(xhci, "Couldn't allocate command structure.\n");
3765 return -ENOMEM;
3766 }
3767
3768 /* Attempt to submit the Reset Device command to the command ring */
3769 spin_lock_irqsave(&xhci->lock, flags);
3770
3771 ret = xhci_queue_reset_device(xhci, reset_device_cmd, slot_id);
3772 if (ret) {
3773 xhci_dbg(xhci, "FIXME: allocate a command ring segment\n");
3774 spin_unlock_irqrestore(&xhci->lock, flags);
3775 goto command_cleanup;
3776 }
3777 xhci_ring_cmd_db(xhci);
3778 spin_unlock_irqrestore(&xhci->lock, flags);
3779
3780 /* Wait for the Reset Device command to finish */
3781 wait_for_completion(reset_device_cmd->completion);
3782
3783 /* The Reset Device command can't fail, according to the 0.95/0.96 spec,
3784 * unless we tried to reset a slot ID that wasn't enabled,
3785 * or the device wasn't in the addressed or configured state.
3786 */
3787 ret = reset_device_cmd->status;
3788 switch (ret) {
3789 case COMP_COMMAND_ABORTED:
3790 case COMP_COMMAND_RING_STOPPED:
3791 xhci_warn(xhci, "Timeout waiting for reset device command\n");
3792 ret = -ETIME;
3793 goto command_cleanup;
3794 case COMP_SLOT_NOT_ENABLED_ERROR: /* 0.95 completion for bad slot ID */
3795 case COMP_CONTEXT_STATE_ERROR: /* 0.96 completion code for same thing */
3796 xhci_dbg(xhci, "Can't reset device (slot ID %u) in %s state\n",
3797 slot_id,
3798 xhci_get_slot_state(xhci, virt_dev->out_ctx));
3799 xhci_dbg(xhci, "Not freeing device rings.\n");
3800 /* Don't treat this as an error. May change my mind later. */
3801 ret = 0;
3802 goto command_cleanup;
3803 case COMP_SUCCESS:
3804 xhci_dbg(xhci, "Successful reset device command.\n");
3805 break;
3806 default:
3807 if (xhci_is_vendor_info_code(xhci, ret))
3808 break;
3809 xhci_warn(xhci, "Unknown completion code %u for "
3810 "reset device command.\n", ret);
3811 ret = -EINVAL;
3812 goto command_cleanup;
3813 }
3814
3815 /* Free up host controller endpoint resources */
3816 if ((xhci->quirks & XHCI_EP_LIMIT_QUIRK)) {
3817 spin_lock_irqsave(&xhci->lock, flags);
3818 /* Don't delete the default control endpoint resources */
3819 xhci_free_device_endpoint_resources(xhci, virt_dev, false);
3820 spin_unlock_irqrestore(&xhci->lock, flags);
3821 }
3822
3823 /* Everything but endpoint 0 is disabled, so free the rings. */
3824 for (i = 1; i < 31; i++) {
3825 struct xhci_virt_ep *ep = &virt_dev->eps[i];
3826
3827 if (ep->ep_state & EP_HAS_STREAMS) {
3828 xhci_warn(xhci, "WARN: endpoint 0x%02x has streams on device reset, freeing streams.\n",
3829 xhci_get_endpoint_address(i));
3830 xhci_free_stream_info(xhci, ep->stream_info);
3831 ep->stream_info = NULL;
3832 ep->ep_state &= ~EP_HAS_STREAMS;
3833 }
3834
3835 if (ep->ring) {
3836 xhci_debugfs_remove_endpoint(xhci, virt_dev, i);
3837 xhci_free_endpoint_ring(xhci, virt_dev, i);
3838 }
3839 if (!list_empty(&virt_dev->eps[i].bw_endpoint_list))
3840 xhci_drop_ep_from_interval_table(xhci,
3841 &virt_dev->eps[i].bw_info,
3842 virt_dev->bw_table,
3843 udev,
3844 &virt_dev->eps[i],
3845 virt_dev->tt_info);
3846 xhci_clear_endpoint_bw_info(&virt_dev->eps[i].bw_info);
3847 }
3848 /* If necessary, update the number of active TTs on this root port */
3849 xhci_update_tt_active_eps(xhci, virt_dev, old_active_eps);
3850 virt_dev->flags = 0;
3851 ret = 0;
3852
3853command_cleanup:
3854 xhci_free_command(xhci, reset_device_cmd);
3855 return ret;
3856}
3857
3858/*
3859 * At this point, the struct usb_device is about to go away, the device has
3860 * disconnected, and all traffic has been stopped and the endpoints have been
3861 * disabled. Free any HC data structures associated with that device.
3862 */
3863static void xhci_free_dev(struct usb_hcd *hcd, struct usb_device *udev)
3864{
3865 struct xhci_hcd *xhci = hcd_to_xhci(hcd);
3866 struct xhci_virt_device *virt_dev;
3867 struct xhci_slot_ctx *slot_ctx;
3868 unsigned long flags;
3869 int i, ret;
3870
3871 /*
3872 * We called pm_runtime_get_noresume when the device was attached.
3873 * Decrement the counter here to allow controller to runtime suspend
3874 * if no devices remain.
3875 */
3876 if (xhci->quirks & XHCI_RESET_ON_RESUME)
3877 pm_runtime_put_noidle(hcd->self.controller);
3878
3879 ret = xhci_check_args(hcd, udev, NULL, 0, true, __func__);
3880 /* If the host is halted due to driver unload, we still need to free the
3881 * device.
3882 */
3883 if (ret <= 0 && ret != -ENODEV)
3884 return;
3885
3886 virt_dev = xhci->devs[udev->slot_id];
3887 slot_ctx = xhci_get_slot_ctx(xhci, virt_dev->out_ctx);
3888 trace_xhci_free_dev(slot_ctx);
3889
3890 /* Stop any wayward timer functions (which may grab the lock) */
3891 for (i = 0; i < 31; i++)
3892 virt_dev->eps[i].ep_state &= ~EP_STOP_CMD_PENDING;
3893 virt_dev->udev = NULL;
3894 xhci_disable_slot(xhci, udev->slot_id);
3895
3896 spin_lock_irqsave(&xhci->lock, flags);
3897 xhci_free_virt_device(xhci, udev->slot_id);
3898 spin_unlock_irqrestore(&xhci->lock, flags);
3899
3900}
3901
3902int xhci_disable_slot(struct xhci_hcd *xhci, u32 slot_id)
3903{
3904 struct xhci_command *command;
3905 unsigned long flags;
3906 u32 state;
3907 int ret;
3908
3909 command = xhci_alloc_command(xhci, true, GFP_KERNEL);
3910 if (!command)
3911 return -ENOMEM;
3912
3913 xhci_debugfs_remove_slot(xhci, slot_id);
3914
3915 spin_lock_irqsave(&xhci->lock, flags);
3916 /* Don't disable the slot if the host controller is dead. */
3917 state = readl(&xhci->op_regs->status);
3918 if (state == 0xffffffff || (xhci->xhc_state & XHCI_STATE_DYING) ||
3919 (xhci->xhc_state & XHCI_STATE_HALTED)) {
3920 spin_unlock_irqrestore(&xhci->lock, flags);
3921 kfree(command);
3922 return -ENODEV;
3923 }
3924
3925 ret = xhci_queue_slot_control(xhci, command, TRB_DISABLE_SLOT,
3926 slot_id);
3927 if (ret) {
3928 spin_unlock_irqrestore(&xhci->lock, flags);
3929 kfree(command);
3930 return ret;
3931 }
3932 xhci_ring_cmd_db(xhci);
3933 spin_unlock_irqrestore(&xhci->lock, flags);
3934
3935 wait_for_completion(command->completion);
3936
3937 if (command->status != COMP_SUCCESS)
3938 xhci_warn(xhci, "Unsuccessful disable slot %u command, status %d\n",
3939 slot_id, command->status);
3940
3941 xhci_free_command(xhci, command);
3942
3943 return 0;
3944}
3945
3946/*
3947 * Checks if we have enough host controller resources for the default control
3948 * endpoint.
3949 *
3950 * Must be called with xhci->lock held.
3951 */
3952static int xhci_reserve_host_control_ep_resources(struct xhci_hcd *xhci)
3953{
3954 if (xhci->num_active_eps + 1 > xhci->limit_active_eps) {
3955 xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
3956 "Not enough ep ctxs: "
3957 "%u active, need to add 1, limit is %u.",
3958 xhci->num_active_eps, xhci->limit_active_eps);
3959 return -ENOMEM;
3960 }
3961 xhci->num_active_eps += 1;
3962 xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
3963 "Adding 1 ep ctx, %u now active.",
3964 xhci->num_active_eps);
3965 return 0;
3966}
3967
3968
3969/*
3970 * Returns 0 if the xHC ran out of device slots, the Enable Slot command
3971 * timed out, or allocating memory failed. Returns 1 on success.
3972 */
3973int xhci_alloc_dev(struct usb_hcd *hcd, struct usb_device *udev)
3974{
3975 struct xhci_hcd *xhci = hcd_to_xhci(hcd);
3976 struct xhci_virt_device *vdev;
3977 struct xhci_slot_ctx *slot_ctx;
3978 unsigned long flags;
3979 int ret, slot_id;
3980 struct xhci_command *command;
3981
3982 command = xhci_alloc_command(xhci, true, GFP_KERNEL);
3983 if (!command)
3984 return 0;
3985
3986 spin_lock_irqsave(&xhci->lock, flags);
3987 ret = xhci_queue_slot_control(xhci, command, TRB_ENABLE_SLOT, 0);
3988 if (ret) {
3989 spin_unlock_irqrestore(&xhci->lock, flags);
3990 xhci_dbg(xhci, "FIXME: allocate a command ring segment\n");
3991 xhci_free_command(xhci, command);
3992 return 0;
3993 }
3994 xhci_ring_cmd_db(xhci);
3995 spin_unlock_irqrestore(&xhci->lock, flags);
3996
3997 wait_for_completion(command->completion);
3998 slot_id = command->slot_id;
3999
4000 if (!slot_id || command->status != COMP_SUCCESS) {
4001 xhci_err(xhci, "Error while assigning device slot ID: %s\n",
4002 xhci_trb_comp_code_string(command->status));
4003 xhci_err(xhci, "Max number of devices this xHCI host supports is %u.\n",
4004 HCS_MAX_SLOTS(
4005 readl(&xhci->cap_regs->hcs_params1)));
4006 xhci_free_command(xhci, command);
4007 return 0;
4008 }
4009
4010 xhci_free_command(xhci, command);
4011
4012 if ((xhci->quirks & XHCI_EP_LIMIT_QUIRK)) {
4013 spin_lock_irqsave(&xhci->lock, flags);
4014 ret = xhci_reserve_host_control_ep_resources(xhci);
4015 if (ret) {
4016 spin_unlock_irqrestore(&xhci->lock, flags);
4017 xhci_warn(xhci, "Not enough host resources, "
4018 "active endpoint contexts = %u\n",
4019 xhci->num_active_eps);
4020 goto disable_slot;
4021 }
4022 spin_unlock_irqrestore(&xhci->lock, flags);
4023 }
4024 /* Use GFP_NOIO, since this function can be called from
4025 * xhci_discover_or_reset_device(), which may be called as part of
4026 * mass storage driver error handling.
4027 */
4028 if (!xhci_alloc_virt_device(xhci, slot_id, udev, GFP_NOIO)) {
4029 xhci_warn(xhci, "Could not allocate xHCI USB device data structures\n");
4030 goto disable_slot;
4031 }
4032 vdev = xhci->devs[slot_id];
4033 slot_ctx = xhci_get_slot_ctx(xhci, vdev->out_ctx);
4034 trace_xhci_alloc_dev(slot_ctx);
4035
4036 udev->slot_id = slot_id;
4037
4038 xhci_debugfs_create_slot(xhci, slot_id);
4039
4040 /*
4041 * If resetting upon resume, we can't put the controller into runtime
4042 * suspend if there is a device attached.
4043 */
4044 if (xhci->quirks & XHCI_RESET_ON_RESUME)
4045 pm_runtime_get_noresume(hcd->self.controller);
4046
4047 /* Is this a LS or FS device under a HS hub? */
4048 /* Hub or peripherial? */
4049 return 1;
4050
4051disable_slot:
4052 xhci_disable_slot(xhci, udev->slot_id);
4053 xhci_free_virt_device(xhci, udev->slot_id);
4054
4055 return 0;
4056}
4057
4058/**
4059 * xhci_setup_device - issues an Address Device command to assign a unique
4060 * USB bus address.
4061 * @hcd: USB host controller data structure.
4062 * @udev: USB dev structure representing the connected device.
4063 * @setup: Enum specifying setup mode: address only or with context.
4064 * @timeout_ms: Max wait time (ms) for the command operation to complete.
4065 *
4066 * Return: 0 if successful; otherwise, negative error code.
4067 */
4068static int xhci_setup_device(struct usb_hcd *hcd, struct usb_device *udev,
4069 enum xhci_setup_dev setup, unsigned int timeout_ms)
4070{
4071 const char *act = setup == SETUP_CONTEXT_ONLY ? "context" : "address";
4072 unsigned long flags;
4073 struct xhci_virt_device *virt_dev;
4074 int ret = 0;
4075 struct xhci_hcd *xhci = hcd_to_xhci(hcd);
4076 struct xhci_slot_ctx *slot_ctx;
4077 struct xhci_input_control_ctx *ctrl_ctx;
4078 u64 temp_64;
4079 struct xhci_command *command = NULL;
4080
4081 mutex_lock(&xhci->mutex);
4082
4083 if (xhci->xhc_state) { /* dying, removing or halted */
4084 ret = -ESHUTDOWN;
4085 goto out;
4086 }
4087
4088 if (!udev->slot_id) {
4089 xhci_dbg_trace(xhci, trace_xhci_dbg_address,
4090 "Bad Slot ID %d", udev->slot_id);
4091 ret = -EINVAL;
4092 goto out;
4093 }
4094
4095 virt_dev = xhci->devs[udev->slot_id];
4096
4097 if (WARN_ON(!virt_dev)) {
4098 /*
4099 * In plug/unplug torture test with an NEC controller,
4100 * a zero-dereference was observed once due to virt_dev = 0.
4101 * Print useful debug rather than crash if it is observed again!
4102 */
4103 xhci_warn(xhci, "Virt dev invalid for slot_id 0x%x!\n",
4104 udev->slot_id);
4105 ret = -EINVAL;
4106 goto out;
4107 }
4108 slot_ctx = xhci_get_slot_ctx(xhci, virt_dev->out_ctx);
4109 trace_xhci_setup_device_slot(slot_ctx);
4110
4111 if (setup == SETUP_CONTEXT_ONLY) {
4112 if (GET_SLOT_STATE(le32_to_cpu(slot_ctx->dev_state)) ==
4113 SLOT_STATE_DEFAULT) {
4114 xhci_dbg(xhci, "Slot already in default state\n");
4115 goto out;
4116 }
4117 }
4118
4119 command = xhci_alloc_command(xhci, true, GFP_KERNEL);
4120 if (!command) {
4121 ret = -ENOMEM;
4122 goto out;
4123 }
4124
4125 command->in_ctx = virt_dev->in_ctx;
4126 command->timeout_ms = timeout_ms;
4127
4128 slot_ctx = xhci_get_slot_ctx(xhci, virt_dev->in_ctx);
4129 ctrl_ctx = xhci_get_input_control_ctx(virt_dev->in_ctx);
4130 if (!ctrl_ctx) {
4131 xhci_warn(xhci, "%s: Could not get input context, bad type.\n",
4132 __func__);
4133 ret = -EINVAL;
4134 goto out;
4135 }
4136 /*
4137 * If this is the first Set Address since device plug-in or
4138 * virt_device realloaction after a resume with an xHCI power loss,
4139 * then set up the slot context.
4140 */
4141 if (!slot_ctx->dev_info)
4142 xhci_setup_addressable_virt_dev(xhci, udev);
4143 /* Otherwise, update the control endpoint ring enqueue pointer. */
4144 else
4145 xhci_copy_ep0_dequeue_into_input_ctx(xhci, udev);
4146 ctrl_ctx->add_flags = cpu_to_le32(SLOT_FLAG | EP0_FLAG);
4147 ctrl_ctx->drop_flags = 0;
4148
4149 trace_xhci_address_ctx(xhci, virt_dev->in_ctx,
4150 le32_to_cpu(slot_ctx->dev_info) >> 27);
4151
4152 trace_xhci_address_ctrl_ctx(ctrl_ctx);
4153 spin_lock_irqsave(&xhci->lock, flags);
4154 trace_xhci_setup_device(virt_dev);
4155 ret = xhci_queue_address_device(xhci, command, virt_dev->in_ctx->dma,
4156 udev->slot_id, setup);
4157 if (ret) {
4158 spin_unlock_irqrestore(&xhci->lock, flags);
4159 xhci_dbg_trace(xhci, trace_xhci_dbg_address,
4160 "FIXME: allocate a command ring segment");
4161 goto out;
4162 }
4163 xhci_ring_cmd_db(xhci);
4164 spin_unlock_irqrestore(&xhci->lock, flags);
4165
4166 /* ctrl tx can take up to 5 sec; XXX: need more time for xHC? */
4167 wait_for_completion(command->completion);
4168
4169 /* FIXME: From section 4.3.4: "Software shall be responsible for timing
4170 * the SetAddress() "recovery interval" required by USB and aborting the
4171 * command on a timeout.
4172 */
4173 switch (command->status) {
4174 case COMP_COMMAND_ABORTED:
4175 case COMP_COMMAND_RING_STOPPED:
4176 xhci_warn(xhci, "Timeout while waiting for setup device command\n");
4177 ret = -ETIME;
4178 break;
4179 case COMP_CONTEXT_STATE_ERROR:
4180 case COMP_SLOT_NOT_ENABLED_ERROR:
4181 xhci_err(xhci, "Setup ERROR: setup %s command for slot %d.\n",
4182 act, udev->slot_id);
4183 ret = -EINVAL;
4184 break;
4185 case COMP_USB_TRANSACTION_ERROR:
4186 dev_warn(&udev->dev, "Device not responding to setup %s.\n", act);
4187
4188 mutex_unlock(&xhci->mutex);
4189 ret = xhci_disable_slot(xhci, udev->slot_id);
4190 xhci_free_virt_device(xhci, udev->slot_id);
4191 if (!ret)
4192 xhci_alloc_dev(hcd, udev);
4193 kfree(command->completion);
4194 kfree(command);
4195 return -EPROTO;
4196 case COMP_INCOMPATIBLE_DEVICE_ERROR:
4197 dev_warn(&udev->dev,
4198 "ERROR: Incompatible device for setup %s command\n", act);
4199 ret = -ENODEV;
4200 break;
4201 case COMP_SUCCESS:
4202 xhci_dbg_trace(xhci, trace_xhci_dbg_address,
4203 "Successful setup %s command", act);
4204 break;
4205 default:
4206 xhci_err(xhci,
4207 "ERROR: unexpected setup %s command completion code 0x%x.\n",
4208 act, command->status);
4209 trace_xhci_address_ctx(xhci, virt_dev->out_ctx, 1);
4210 ret = -EINVAL;
4211 break;
4212 }
4213 if (ret)
4214 goto out;
4215 temp_64 = xhci_read_64(xhci, &xhci->op_regs->dcbaa_ptr);
4216 xhci_dbg_trace(xhci, trace_xhci_dbg_address,
4217 "Op regs DCBAA ptr = %#016llx", temp_64);
4218 xhci_dbg_trace(xhci, trace_xhci_dbg_address,
4219 "Slot ID %d dcbaa entry @%p = %#016llx",
4220 udev->slot_id,
4221 &xhci->dcbaa->dev_context_ptrs[udev->slot_id],
4222 (unsigned long long)
4223 le64_to_cpu(xhci->dcbaa->dev_context_ptrs[udev->slot_id]));
4224 xhci_dbg_trace(xhci, trace_xhci_dbg_address,
4225 "Output Context DMA address = %#08llx",
4226 (unsigned long long)virt_dev->out_ctx->dma);
4227 trace_xhci_address_ctx(xhci, virt_dev->in_ctx,
4228 le32_to_cpu(slot_ctx->dev_info) >> 27);
4229 /*
4230 * USB core uses address 1 for the roothubs, so we add one to the
4231 * address given back to us by the HC.
4232 */
4233 trace_xhci_address_ctx(xhci, virt_dev->out_ctx,
4234 le32_to_cpu(slot_ctx->dev_info) >> 27);
4235 /* Zero the input context control for later use */
4236 ctrl_ctx->add_flags = 0;
4237 ctrl_ctx->drop_flags = 0;
4238 slot_ctx = xhci_get_slot_ctx(xhci, virt_dev->out_ctx);
4239 udev->devaddr = (u8)(le32_to_cpu(slot_ctx->dev_state) & DEV_ADDR_MASK);
4240
4241 xhci_dbg_trace(xhci, trace_xhci_dbg_address,
4242 "Internal device address = %d",
4243 le32_to_cpu(slot_ctx->dev_state) & DEV_ADDR_MASK);
4244out:
4245 mutex_unlock(&xhci->mutex);
4246 if (command) {
4247 kfree(command->completion);
4248 kfree(command);
4249 }
4250 return ret;
4251}
4252
4253static int xhci_address_device(struct usb_hcd *hcd, struct usb_device *udev,
4254 unsigned int timeout_ms)
4255{
4256 return xhci_setup_device(hcd, udev, SETUP_CONTEXT_ADDRESS, timeout_ms);
4257}
4258
4259static int xhci_enable_device(struct usb_hcd *hcd, struct usb_device *udev)
4260{
4261 return xhci_setup_device(hcd, udev, SETUP_CONTEXT_ONLY,
4262 XHCI_CMD_DEFAULT_TIMEOUT);
4263}
4264
4265/*
4266 * Transfer the port index into real index in the HW port status
4267 * registers. Caculate offset between the port's PORTSC register
4268 * and port status base. Divide the number of per port register
4269 * to get the real index. The raw port number bases 1.
4270 */
4271int xhci_find_raw_port_number(struct usb_hcd *hcd, int port1)
4272{
4273 struct xhci_hub *rhub;
4274
4275 rhub = xhci_get_rhub(hcd);
4276 return rhub->ports[port1 - 1]->hw_portnum + 1;
4277}
4278
4279/*
4280 * Issue an Evaluate Context command to change the Maximum Exit Latency in the
4281 * slot context. If that succeeds, store the new MEL in the xhci_virt_device.
4282 */
4283static int __maybe_unused xhci_change_max_exit_latency(struct xhci_hcd *xhci,
4284 struct usb_device *udev, u16 max_exit_latency)
4285{
4286 struct xhci_virt_device *virt_dev;
4287 struct xhci_command *command;
4288 struct xhci_input_control_ctx *ctrl_ctx;
4289 struct xhci_slot_ctx *slot_ctx;
4290 unsigned long flags;
4291 int ret;
4292
4293 command = xhci_alloc_command_with_ctx(xhci, true, GFP_KERNEL);
4294 if (!command)
4295 return -ENOMEM;
4296
4297 spin_lock_irqsave(&xhci->lock, flags);
4298
4299 virt_dev = xhci->devs[udev->slot_id];
4300
4301 /*
4302 * virt_dev might not exists yet if xHC resumed from hibernate (S4) and
4303 * xHC was re-initialized. Exit latency will be set later after
4304 * hub_port_finish_reset() is done and xhci->devs[] are re-allocated
4305 */
4306
4307 if (!virt_dev || max_exit_latency == virt_dev->current_mel) {
4308 spin_unlock_irqrestore(&xhci->lock, flags);
4309 xhci_free_command(xhci, command);
4310 return 0;
4311 }
4312
4313 /* Attempt to issue an Evaluate Context command to change the MEL. */
4314 ctrl_ctx = xhci_get_input_control_ctx(command->in_ctx);
4315 if (!ctrl_ctx) {
4316 spin_unlock_irqrestore(&xhci->lock, flags);
4317 xhci_free_command(xhci, command);
4318 xhci_warn(xhci, "%s: Could not get input context, bad type.\n",
4319 __func__);
4320 return -ENOMEM;
4321 }
4322
4323 xhci_slot_copy(xhci, command->in_ctx, virt_dev->out_ctx);
4324 spin_unlock_irqrestore(&xhci->lock, flags);
4325
4326 ctrl_ctx->add_flags |= cpu_to_le32(SLOT_FLAG);
4327 slot_ctx = xhci_get_slot_ctx(xhci, command->in_ctx);
4328 slot_ctx->dev_info2 &= cpu_to_le32(~((u32) MAX_EXIT));
4329 slot_ctx->dev_info2 |= cpu_to_le32(max_exit_latency);
4330 slot_ctx->dev_state = 0;
4331
4332 xhci_dbg_trace(xhci, trace_xhci_dbg_context_change,
4333 "Set up evaluate context for LPM MEL change.");
4334
4335 /* Issue and wait for the evaluate context command. */
4336 ret = xhci_configure_endpoint(xhci, udev, command,
4337 true, true);
4338
4339 if (!ret) {
4340 spin_lock_irqsave(&xhci->lock, flags);
4341 virt_dev->current_mel = max_exit_latency;
4342 spin_unlock_irqrestore(&xhci->lock, flags);
4343 }
4344
4345 xhci_free_command(xhci, command);
4346
4347 return ret;
4348}
4349
4350#ifdef CONFIG_PM
4351
4352/* BESL to HIRD Encoding array for USB2 LPM */
4353static int xhci_besl_encoding[16] = {125, 150, 200, 300, 400, 500, 1000, 2000,
4354 3000, 4000, 5000, 6000, 7000, 8000, 9000, 10000};
4355
4356/* Calculate HIRD/BESL for USB2 PORTPMSC*/
4357static int xhci_calculate_hird_besl(struct xhci_hcd *xhci,
4358 struct usb_device *udev)
4359{
4360 int u2del, besl, besl_host;
4361 int besl_device = 0;
4362 u32 field;
4363
4364 u2del = HCS_U2_LATENCY(xhci->hcs_params3);
4365 field = le32_to_cpu(udev->bos->ext_cap->bmAttributes);
4366
4367 if (field & USB_BESL_SUPPORT) {
4368 for (besl_host = 0; besl_host < 16; besl_host++) {
4369 if (xhci_besl_encoding[besl_host] >= u2del)
4370 break;
4371 }
4372 /* Use baseline BESL value as default */
4373 if (field & USB_BESL_BASELINE_VALID)
4374 besl_device = USB_GET_BESL_BASELINE(field);
4375 else if (field & USB_BESL_DEEP_VALID)
4376 besl_device = USB_GET_BESL_DEEP(field);
4377 } else {
4378 if (u2del <= 50)
4379 besl_host = 0;
4380 else
4381 besl_host = (u2del - 51) / 75 + 1;
4382 }
4383
4384 besl = besl_host + besl_device;
4385 if (besl > 15)
4386 besl = 15;
4387
4388 return besl;
4389}
4390
4391/* Calculate BESLD, L1 timeout and HIRDM for USB2 PORTHLPMC */
4392static int xhci_calculate_usb2_hw_lpm_params(struct usb_device *udev)
4393{
4394 u32 field;
4395 int l1;
4396 int besld = 0;
4397 int hirdm = 0;
4398
4399 field = le32_to_cpu(udev->bos->ext_cap->bmAttributes);
4400
4401 /* xHCI l1 is set in steps of 256us, xHCI 1.0 section 5.4.11.2 */
4402 l1 = udev->l1_params.timeout / 256;
4403
4404 /* device has preferred BESLD */
4405 if (field & USB_BESL_DEEP_VALID) {
4406 besld = USB_GET_BESL_DEEP(field);
4407 hirdm = 1;
4408 }
4409
4410 return PORT_BESLD(besld) | PORT_L1_TIMEOUT(l1) | PORT_HIRDM(hirdm);
4411}
4412
4413static int xhci_set_usb2_hardware_lpm(struct usb_hcd *hcd,
4414 struct usb_device *udev, int enable)
4415{
4416 struct xhci_hcd *xhci = hcd_to_xhci(hcd);
4417 struct xhci_port **ports;
4418 __le32 __iomem *pm_addr, *hlpm_addr;
4419 u32 pm_val, hlpm_val, field;
4420 unsigned int port_num;
4421 unsigned long flags;
4422 int hird, exit_latency;
4423 int ret;
4424
4425 if (xhci->quirks & XHCI_HW_LPM_DISABLE)
4426 return -EPERM;
4427
4428 if (hcd->speed >= HCD_USB3 || !xhci->hw_lpm_support ||
4429 !udev->lpm_capable)
4430 return -EPERM;
4431
4432 if (!udev->parent || udev->parent->parent ||
4433 udev->descriptor.bDeviceClass == USB_CLASS_HUB)
4434 return -EPERM;
4435
4436 if (udev->usb2_hw_lpm_capable != 1)
4437 return -EPERM;
4438
4439 spin_lock_irqsave(&xhci->lock, flags);
4440
4441 ports = xhci->usb2_rhub.ports;
4442 port_num = udev->portnum - 1;
4443 pm_addr = ports[port_num]->addr + PORTPMSC;
4444 pm_val = readl(pm_addr);
4445 hlpm_addr = ports[port_num]->addr + PORTHLPMC;
4446
4447 xhci_dbg(xhci, "%s port %d USB2 hardware LPM\n",
4448 enable ? "enable" : "disable", port_num + 1);
4449
4450 if (enable) {
4451 /* Host supports BESL timeout instead of HIRD */
4452 if (udev->usb2_hw_lpm_besl_capable) {
4453 /* if device doesn't have a preferred BESL value use a
4454 * default one which works with mixed HIRD and BESL
4455 * systems. See XHCI_DEFAULT_BESL definition in xhci.h
4456 */
4457 field = le32_to_cpu(udev->bos->ext_cap->bmAttributes);
4458 if ((field & USB_BESL_SUPPORT) &&
4459 (field & USB_BESL_BASELINE_VALID))
4460 hird = USB_GET_BESL_BASELINE(field);
4461 else
4462 hird = udev->l1_params.besl;
4463
4464 exit_latency = xhci_besl_encoding[hird];
4465 spin_unlock_irqrestore(&xhci->lock, flags);
4466
4467 ret = xhci_change_max_exit_latency(xhci, udev,
4468 exit_latency);
4469 if (ret < 0)
4470 return ret;
4471 spin_lock_irqsave(&xhci->lock, flags);
4472
4473 hlpm_val = xhci_calculate_usb2_hw_lpm_params(udev);
4474 writel(hlpm_val, hlpm_addr);
4475 /* flush write */
4476 readl(hlpm_addr);
4477 } else {
4478 hird = xhci_calculate_hird_besl(xhci, udev);
4479 }
4480
4481 pm_val &= ~PORT_HIRD_MASK;
4482 pm_val |= PORT_HIRD(hird) | PORT_RWE | PORT_L1DS(udev->slot_id);
4483 writel(pm_val, pm_addr);
4484 pm_val = readl(pm_addr);
4485 pm_val |= PORT_HLE;
4486 writel(pm_val, pm_addr);
4487 /* flush write */
4488 readl(pm_addr);
4489 } else {
4490 pm_val &= ~(PORT_HLE | PORT_RWE | PORT_HIRD_MASK | PORT_L1DS_MASK);
4491 writel(pm_val, pm_addr);
4492 /* flush write */
4493 readl(pm_addr);
4494 if (udev->usb2_hw_lpm_besl_capable) {
4495 spin_unlock_irqrestore(&xhci->lock, flags);
4496 xhci_change_max_exit_latency(xhci, udev, 0);
4497 readl_poll_timeout(ports[port_num]->addr, pm_val,
4498 (pm_val & PORT_PLS_MASK) == XDEV_U0,
4499 100, 10000);
4500 return 0;
4501 }
4502 }
4503
4504 spin_unlock_irqrestore(&xhci->lock, flags);
4505 return 0;
4506}
4507
4508/* check if a usb2 port supports a given extened capability protocol
4509 * only USB2 ports extended protocol capability values are cached.
4510 * Return 1 if capability is supported
4511 */
4512static int xhci_check_usb2_port_capability(struct xhci_hcd *xhci, int port,
4513 unsigned capability)
4514{
4515 u32 port_offset, port_count;
4516 int i;
4517
4518 for (i = 0; i < xhci->num_ext_caps; i++) {
4519 if (xhci->ext_caps[i] & capability) {
4520 /* port offsets starts at 1 */
4521 port_offset = XHCI_EXT_PORT_OFF(xhci->ext_caps[i]) - 1;
4522 port_count = XHCI_EXT_PORT_COUNT(xhci->ext_caps[i]);
4523 if (port >= port_offset &&
4524 port < port_offset + port_count)
4525 return 1;
4526 }
4527 }
4528 return 0;
4529}
4530
4531static int xhci_update_device(struct usb_hcd *hcd, struct usb_device *udev)
4532{
4533 struct xhci_hcd *xhci = hcd_to_xhci(hcd);
4534 int portnum = udev->portnum - 1;
4535
4536 if (hcd->speed >= HCD_USB3 || !udev->lpm_capable)
4537 return 0;
4538
4539 /* we only support lpm for non-hub device connected to root hub yet */
4540 if (!udev->parent || udev->parent->parent ||
4541 udev->descriptor.bDeviceClass == USB_CLASS_HUB)
4542 return 0;
4543
4544 if (xhci->hw_lpm_support == 1 &&
4545 xhci_check_usb2_port_capability(
4546 xhci, portnum, XHCI_HLC)) {
4547 udev->usb2_hw_lpm_capable = 1;
4548 udev->l1_params.timeout = XHCI_L1_TIMEOUT;
4549 udev->l1_params.besl = XHCI_DEFAULT_BESL;
4550 if (xhci_check_usb2_port_capability(xhci, portnum,
4551 XHCI_BLC))
4552 udev->usb2_hw_lpm_besl_capable = 1;
4553 }
4554
4555 return 0;
4556}
4557
4558/*---------------------- USB 3.0 Link PM functions ------------------------*/
4559
4560/* Service interval in nanoseconds = 2^(bInterval - 1) * 125us * 1000ns / 1us */
4561static unsigned long long xhci_service_interval_to_ns(
4562 struct usb_endpoint_descriptor *desc)
4563{
4564 return (1ULL << (desc->bInterval - 1)) * 125 * 1000;
4565}
4566
4567static u16 xhci_get_timeout_no_hub_lpm(struct usb_device *udev,
4568 enum usb3_link_state state)
4569{
4570 unsigned long long sel;
4571 unsigned long long pel;
4572 unsigned int max_sel_pel;
4573 char *state_name;
4574
4575 switch (state) {
4576 case USB3_LPM_U1:
4577 /* Convert SEL and PEL stored in nanoseconds to microseconds */
4578 sel = DIV_ROUND_UP(udev->u1_params.sel, 1000);
4579 pel = DIV_ROUND_UP(udev->u1_params.pel, 1000);
4580 max_sel_pel = USB3_LPM_MAX_U1_SEL_PEL;
4581 state_name = "U1";
4582 break;
4583 case USB3_LPM_U2:
4584 sel = DIV_ROUND_UP(udev->u2_params.sel, 1000);
4585 pel = DIV_ROUND_UP(udev->u2_params.pel, 1000);
4586 max_sel_pel = USB3_LPM_MAX_U2_SEL_PEL;
4587 state_name = "U2";
4588 break;
4589 default:
4590 dev_warn(&udev->dev, "%s: Can't get timeout for non-U1 or U2 state.\n",
4591 __func__);
4592 return USB3_LPM_DISABLED;
4593 }
4594
4595 if (sel <= max_sel_pel && pel <= max_sel_pel)
4596 return USB3_LPM_DEVICE_INITIATED;
4597
4598 if (sel > max_sel_pel)
4599 dev_dbg(&udev->dev, "Device-initiated %s disabled "
4600 "due to long SEL %llu ms\n",
4601 state_name, sel);
4602 else
4603 dev_dbg(&udev->dev, "Device-initiated %s disabled "
4604 "due to long PEL %llu ms\n",
4605 state_name, pel);
4606 return USB3_LPM_DISABLED;
4607}
4608
4609/* The U1 timeout should be the maximum of the following values:
4610 * - For control endpoints, U1 system exit latency (SEL) * 3
4611 * - For bulk endpoints, U1 SEL * 5
4612 * - For interrupt endpoints:
4613 * - Notification EPs, U1 SEL * 3
4614 * - Periodic EPs, max(105% of bInterval, U1 SEL * 2)
4615 * - For isochronous endpoints, max(105% of bInterval, U1 SEL * 2)
4616 */
4617static unsigned long long xhci_calculate_intel_u1_timeout(
4618 struct usb_device *udev,
4619 struct usb_endpoint_descriptor *desc)
4620{
4621 unsigned long long timeout_ns;
4622 int ep_type;
4623 int intr_type;
4624
4625 ep_type = usb_endpoint_type(desc);
4626 switch (ep_type) {
4627 case USB_ENDPOINT_XFER_CONTROL:
4628 timeout_ns = udev->u1_params.sel * 3;
4629 break;
4630 case USB_ENDPOINT_XFER_BULK:
4631 timeout_ns = udev->u1_params.sel * 5;
4632 break;
4633 case USB_ENDPOINT_XFER_INT:
4634 intr_type = usb_endpoint_interrupt_type(desc);
4635 if (intr_type == USB_ENDPOINT_INTR_NOTIFICATION) {
4636 timeout_ns = udev->u1_params.sel * 3;
4637 break;
4638 }
4639 /* Otherwise the calculation is the same as isoc eps */
4640 fallthrough;
4641 case USB_ENDPOINT_XFER_ISOC:
4642 timeout_ns = xhci_service_interval_to_ns(desc);
4643 timeout_ns = DIV_ROUND_UP_ULL(timeout_ns * 105, 100);
4644 if (timeout_ns < udev->u1_params.sel * 2)
4645 timeout_ns = udev->u1_params.sel * 2;
4646 break;
4647 default:
4648 return 0;
4649 }
4650
4651 return timeout_ns;
4652}
4653
4654/* Returns the hub-encoded U1 timeout value. */
4655static u16 xhci_calculate_u1_timeout(struct xhci_hcd *xhci,
4656 struct usb_device *udev,
4657 struct usb_endpoint_descriptor *desc)
4658{
4659 unsigned long long timeout_ns;
4660
4661 /* Prevent U1 if service interval is shorter than U1 exit latency */
4662 if (usb_endpoint_xfer_int(desc) || usb_endpoint_xfer_isoc(desc)) {
4663 if (xhci_service_interval_to_ns(desc) <= udev->u1_params.mel) {
4664 dev_dbg(&udev->dev, "Disable U1, ESIT shorter than exit latency\n");
4665 return USB3_LPM_DISABLED;
4666 }
4667 }
4668
4669 if (xhci->quirks & (XHCI_INTEL_HOST | XHCI_ZHAOXIN_HOST))
4670 timeout_ns = xhci_calculate_intel_u1_timeout(udev, desc);
4671 else
4672 timeout_ns = udev->u1_params.sel;
4673
4674 /* The U1 timeout is encoded in 1us intervals.
4675 * Don't return a timeout of zero, because that's USB3_LPM_DISABLED.
4676 */
4677 if (timeout_ns == USB3_LPM_DISABLED)
4678 timeout_ns = 1;
4679 else
4680 timeout_ns = DIV_ROUND_UP_ULL(timeout_ns, 1000);
4681
4682 /* If the necessary timeout value is bigger than what we can set in the
4683 * USB 3.0 hub, we have to disable hub-initiated U1.
4684 */
4685 if (timeout_ns <= USB3_LPM_U1_MAX_TIMEOUT)
4686 return timeout_ns;
4687 dev_dbg(&udev->dev, "Hub-initiated U1 disabled "
4688 "due to long timeout %llu ms\n", timeout_ns);
4689 return xhci_get_timeout_no_hub_lpm(udev, USB3_LPM_U1);
4690}
4691
4692/* The U2 timeout should be the maximum of:
4693 * - 10 ms (to avoid the bandwidth impact on the scheduler)
4694 * - largest bInterval of any active periodic endpoint (to avoid going
4695 * into lower power link states between intervals).
4696 * - the U2 Exit Latency of the device
4697 */
4698static unsigned long long xhci_calculate_intel_u2_timeout(
4699 struct usb_device *udev,
4700 struct usb_endpoint_descriptor *desc)
4701{
4702 unsigned long long timeout_ns;
4703 unsigned long long u2_del_ns;
4704
4705 timeout_ns = 10 * 1000 * 1000;
4706
4707 if ((usb_endpoint_xfer_int(desc) || usb_endpoint_xfer_isoc(desc)) &&
4708 (xhci_service_interval_to_ns(desc) > timeout_ns))
4709 timeout_ns = xhci_service_interval_to_ns(desc);
4710
4711 u2_del_ns = le16_to_cpu(udev->bos->ss_cap->bU2DevExitLat) * 1000ULL;
4712 if (u2_del_ns > timeout_ns)
4713 timeout_ns = u2_del_ns;
4714
4715 return timeout_ns;
4716}
4717
4718/* Returns the hub-encoded U2 timeout value. */
4719static u16 xhci_calculate_u2_timeout(struct xhci_hcd *xhci,
4720 struct usb_device *udev,
4721 struct usb_endpoint_descriptor *desc)
4722{
4723 unsigned long long timeout_ns;
4724
4725 /* Prevent U2 if service interval is shorter than U2 exit latency */
4726 if (usb_endpoint_xfer_int(desc) || usb_endpoint_xfer_isoc(desc)) {
4727 if (xhci_service_interval_to_ns(desc) <= udev->u2_params.mel) {
4728 dev_dbg(&udev->dev, "Disable U2, ESIT shorter than exit latency\n");
4729 return USB3_LPM_DISABLED;
4730 }
4731 }
4732
4733 if (xhci->quirks & (XHCI_INTEL_HOST | XHCI_ZHAOXIN_HOST))
4734 timeout_ns = xhci_calculate_intel_u2_timeout(udev, desc);
4735 else
4736 timeout_ns = udev->u2_params.sel;
4737
4738 /* The U2 timeout is encoded in 256us intervals */
4739 timeout_ns = DIV_ROUND_UP_ULL(timeout_ns, 256 * 1000);
4740 /* If the necessary timeout value is bigger than what we can set in the
4741 * USB 3.0 hub, we have to disable hub-initiated U2.
4742 */
4743 if (timeout_ns <= USB3_LPM_U2_MAX_TIMEOUT)
4744 return timeout_ns;
4745 dev_dbg(&udev->dev, "Hub-initiated U2 disabled "
4746 "due to long timeout %llu ms\n", timeout_ns);
4747 return xhci_get_timeout_no_hub_lpm(udev, USB3_LPM_U2);
4748}
4749
4750static u16 xhci_call_host_update_timeout_for_endpoint(struct xhci_hcd *xhci,
4751 struct usb_device *udev,
4752 struct usb_endpoint_descriptor *desc,
4753 enum usb3_link_state state,
4754 u16 *timeout)
4755{
4756 if (state == USB3_LPM_U1)
4757 return xhci_calculate_u1_timeout(xhci, udev, desc);
4758 else if (state == USB3_LPM_U2)
4759 return xhci_calculate_u2_timeout(xhci, udev, desc);
4760
4761 return USB3_LPM_DISABLED;
4762}
4763
4764static int xhci_update_timeout_for_endpoint(struct xhci_hcd *xhci,
4765 struct usb_device *udev,
4766 struct usb_endpoint_descriptor *desc,
4767 enum usb3_link_state state,
4768 u16 *timeout)
4769{
4770 u16 alt_timeout;
4771
4772 alt_timeout = xhci_call_host_update_timeout_for_endpoint(xhci, udev,
4773 desc, state, timeout);
4774
4775 /* If we found we can't enable hub-initiated LPM, and
4776 * the U1 or U2 exit latency was too high to allow
4777 * device-initiated LPM as well, then we will disable LPM
4778 * for this device, so stop searching any further.
4779 */
4780 if (alt_timeout == USB3_LPM_DISABLED) {
4781 *timeout = alt_timeout;
4782 return -E2BIG;
4783 }
4784 if (alt_timeout > *timeout)
4785 *timeout = alt_timeout;
4786 return 0;
4787}
4788
4789static int xhci_update_timeout_for_interface(struct xhci_hcd *xhci,
4790 struct usb_device *udev,
4791 struct usb_host_interface *alt,
4792 enum usb3_link_state state,
4793 u16 *timeout)
4794{
4795 int j;
4796
4797 for (j = 0; j < alt->desc.bNumEndpoints; j++) {
4798 if (xhci_update_timeout_for_endpoint(xhci, udev,
4799 &alt->endpoint[j].desc, state, timeout))
4800 return -E2BIG;
4801 }
4802 return 0;
4803}
4804
4805static int xhci_check_tier_policy(struct xhci_hcd *xhci,
4806 struct usb_device *udev,
4807 enum usb3_link_state state)
4808{
4809 struct usb_device *parent = udev->parent;
4810 int tier = 1; /* roothub is tier1 */
4811
4812 while (parent) {
4813 parent = parent->parent;
4814 tier++;
4815 }
4816
4817 if (xhci->quirks & XHCI_INTEL_HOST && tier > 3)
4818 goto fail;
4819 if (xhci->quirks & XHCI_ZHAOXIN_HOST && tier > 2)
4820 goto fail;
4821
4822 return 0;
4823fail:
4824 dev_dbg(&udev->dev, "Tier policy prevents U1/U2 LPM states for devices at tier %d\n",
4825 tier);
4826 return -E2BIG;
4827}
4828
4829/* Returns the U1 or U2 timeout that should be enabled.
4830 * If the tier check or timeout setting functions return with a non-zero exit
4831 * code, that means the timeout value has been finalized and we shouldn't look
4832 * at any more endpoints.
4833 */
4834static u16 xhci_calculate_lpm_timeout(struct usb_hcd *hcd,
4835 struct usb_device *udev, enum usb3_link_state state)
4836{
4837 struct xhci_hcd *xhci = hcd_to_xhci(hcd);
4838 struct usb_host_config *config;
4839 char *state_name;
4840 int i;
4841 u16 timeout = USB3_LPM_DISABLED;
4842
4843 if (state == USB3_LPM_U1)
4844 state_name = "U1";
4845 else if (state == USB3_LPM_U2)
4846 state_name = "U2";
4847 else {
4848 dev_warn(&udev->dev, "Can't enable unknown link state %i\n",
4849 state);
4850 return timeout;
4851 }
4852
4853 /* Gather some information about the currently installed configuration
4854 * and alternate interface settings.
4855 */
4856 if (xhci_update_timeout_for_endpoint(xhci, udev, &udev->ep0.desc,
4857 state, &timeout))
4858 return timeout;
4859
4860 config = udev->actconfig;
4861 if (!config)
4862 return timeout;
4863
4864 for (i = 0; i < config->desc.bNumInterfaces; i++) {
4865 struct usb_driver *driver;
4866 struct usb_interface *intf = config->interface[i];
4867
4868 if (!intf)
4869 continue;
4870
4871 /* Check if any currently bound drivers want hub-initiated LPM
4872 * disabled.
4873 */
4874 if (intf->dev.driver) {
4875 driver = to_usb_driver(intf->dev.driver);
4876 if (driver && driver->disable_hub_initiated_lpm) {
4877 dev_dbg(&udev->dev, "Hub-initiated %s disabled at request of driver %s\n",
4878 state_name, driver->name);
4879 timeout = xhci_get_timeout_no_hub_lpm(udev,
4880 state);
4881 if (timeout == USB3_LPM_DISABLED)
4882 return timeout;
4883 }
4884 }
4885
4886 /* Not sure how this could happen... */
4887 if (!intf->cur_altsetting)
4888 continue;
4889
4890 if (xhci_update_timeout_for_interface(xhci, udev,
4891 intf->cur_altsetting,
4892 state, &timeout))
4893 return timeout;
4894 }
4895 return timeout;
4896}
4897
4898static int calculate_max_exit_latency(struct usb_device *udev,
4899 enum usb3_link_state state_changed,
4900 u16 hub_encoded_timeout)
4901{
4902 unsigned long long u1_mel_us = 0;
4903 unsigned long long u2_mel_us = 0;
4904 unsigned long long mel_us = 0;
4905 bool disabling_u1;
4906 bool disabling_u2;
4907 bool enabling_u1;
4908 bool enabling_u2;
4909
4910 disabling_u1 = (state_changed == USB3_LPM_U1 &&
4911 hub_encoded_timeout == USB3_LPM_DISABLED);
4912 disabling_u2 = (state_changed == USB3_LPM_U2 &&
4913 hub_encoded_timeout == USB3_LPM_DISABLED);
4914
4915 enabling_u1 = (state_changed == USB3_LPM_U1 &&
4916 hub_encoded_timeout != USB3_LPM_DISABLED);
4917 enabling_u2 = (state_changed == USB3_LPM_U2 &&
4918 hub_encoded_timeout != USB3_LPM_DISABLED);
4919
4920 /* If U1 was already enabled and we're not disabling it,
4921 * or we're going to enable U1, account for the U1 max exit latency.
4922 */
4923 if ((udev->u1_params.timeout != USB3_LPM_DISABLED && !disabling_u1) ||
4924 enabling_u1)
4925 u1_mel_us = DIV_ROUND_UP(udev->u1_params.mel, 1000);
4926 if ((udev->u2_params.timeout != USB3_LPM_DISABLED && !disabling_u2) ||
4927 enabling_u2)
4928 u2_mel_us = DIV_ROUND_UP(udev->u2_params.mel, 1000);
4929
4930 mel_us = max(u1_mel_us, u2_mel_us);
4931
4932 /* xHCI host controller max exit latency field is only 16 bits wide. */
4933 if (mel_us > MAX_EXIT) {
4934 dev_warn(&udev->dev, "Link PM max exit latency of %lluus "
4935 "is too big.\n", mel_us);
4936 return -E2BIG;
4937 }
4938 return mel_us;
4939}
4940
4941/* Returns the USB3 hub-encoded value for the U1/U2 timeout. */
4942static int xhci_enable_usb3_lpm_timeout(struct usb_hcd *hcd,
4943 struct usb_device *udev, enum usb3_link_state state)
4944{
4945 struct xhci_hcd *xhci;
4946 struct xhci_port *port;
4947 u16 hub_encoded_timeout;
4948 int mel;
4949 int ret;
4950
4951 xhci = hcd_to_xhci(hcd);
4952 /* The LPM timeout values are pretty host-controller specific, so don't
4953 * enable hub-initiated timeouts unless the vendor has provided
4954 * information about their timeout algorithm.
4955 */
4956 if (!xhci || !(xhci->quirks & XHCI_LPM_SUPPORT) ||
4957 !xhci->devs[udev->slot_id])
4958 return USB3_LPM_DISABLED;
4959
4960 if (xhci_check_tier_policy(xhci, udev, state) < 0)
4961 return USB3_LPM_DISABLED;
4962
4963 /* If connected to root port then check port can handle lpm */
4964 if (udev->parent && !udev->parent->parent) {
4965 port = xhci->usb3_rhub.ports[udev->portnum - 1];
4966 if (port->lpm_incapable)
4967 return USB3_LPM_DISABLED;
4968 }
4969
4970 hub_encoded_timeout = xhci_calculate_lpm_timeout(hcd, udev, state);
4971 mel = calculate_max_exit_latency(udev, state, hub_encoded_timeout);
4972 if (mel < 0) {
4973 /* Max Exit Latency is too big, disable LPM. */
4974 hub_encoded_timeout = USB3_LPM_DISABLED;
4975 mel = 0;
4976 }
4977
4978 ret = xhci_change_max_exit_latency(xhci, udev, mel);
4979 if (ret)
4980 return ret;
4981 return hub_encoded_timeout;
4982}
4983
4984static int xhci_disable_usb3_lpm_timeout(struct usb_hcd *hcd,
4985 struct usb_device *udev, enum usb3_link_state state)
4986{
4987 struct xhci_hcd *xhci;
4988 u16 mel;
4989
4990 xhci = hcd_to_xhci(hcd);
4991 if (!xhci || !(xhci->quirks & XHCI_LPM_SUPPORT) ||
4992 !xhci->devs[udev->slot_id])
4993 return 0;
4994
4995 mel = calculate_max_exit_latency(udev, state, USB3_LPM_DISABLED);
4996 return xhci_change_max_exit_latency(xhci, udev, mel);
4997}
4998#else /* CONFIG_PM */
4999
5000static int xhci_set_usb2_hardware_lpm(struct usb_hcd *hcd,
5001 struct usb_device *udev, int enable)
5002{
5003 return 0;
5004}
5005
5006static int xhci_update_device(struct usb_hcd *hcd, struct usb_device *udev)
5007{
5008 return 0;
5009}
5010
5011static int xhci_enable_usb3_lpm_timeout(struct usb_hcd *hcd,
5012 struct usb_device *udev, enum usb3_link_state state)
5013{
5014 return USB3_LPM_DISABLED;
5015}
5016
5017static int xhci_disable_usb3_lpm_timeout(struct usb_hcd *hcd,
5018 struct usb_device *udev, enum usb3_link_state state)
5019{
5020 return 0;
5021}
5022#endif /* CONFIG_PM */
5023
5024/*-------------------------------------------------------------------------*/
5025
5026/* Once a hub descriptor is fetched for a device, we need to update the xHC's
5027 * internal data structures for the device.
5028 */
5029int xhci_update_hub_device(struct usb_hcd *hcd, struct usb_device *hdev,
5030 struct usb_tt *tt, gfp_t mem_flags)
5031{
5032 struct xhci_hcd *xhci = hcd_to_xhci(hcd);
5033 struct xhci_virt_device *vdev;
5034 struct xhci_command *config_cmd;
5035 struct xhci_input_control_ctx *ctrl_ctx;
5036 struct xhci_slot_ctx *slot_ctx;
5037 unsigned long flags;
5038 unsigned think_time;
5039 int ret;
5040
5041 /* Ignore root hubs */
5042 if (!hdev->parent)
5043 return 0;
5044
5045 vdev = xhci->devs[hdev->slot_id];
5046 if (!vdev) {
5047 xhci_warn(xhci, "Cannot update hub desc for unknown device.\n");
5048 return -EINVAL;
5049 }
5050
5051 config_cmd = xhci_alloc_command_with_ctx(xhci, true, mem_flags);
5052 if (!config_cmd)
5053 return -ENOMEM;
5054
5055 ctrl_ctx = xhci_get_input_control_ctx(config_cmd->in_ctx);
5056 if (!ctrl_ctx) {
5057 xhci_warn(xhci, "%s: Could not get input context, bad type.\n",
5058 __func__);
5059 xhci_free_command(xhci, config_cmd);
5060 return -ENOMEM;
5061 }
5062
5063 spin_lock_irqsave(&xhci->lock, flags);
5064 if (hdev->speed == USB_SPEED_HIGH &&
5065 xhci_alloc_tt_info(xhci, vdev, hdev, tt, GFP_ATOMIC)) {
5066 xhci_dbg(xhci, "Could not allocate xHCI TT structure.\n");
5067 xhci_free_command(xhci, config_cmd);
5068 spin_unlock_irqrestore(&xhci->lock, flags);
5069 return -ENOMEM;
5070 }
5071
5072 xhci_slot_copy(xhci, config_cmd->in_ctx, vdev->out_ctx);
5073 ctrl_ctx->add_flags |= cpu_to_le32(SLOT_FLAG);
5074 slot_ctx = xhci_get_slot_ctx(xhci, config_cmd->in_ctx);
5075 slot_ctx->dev_info |= cpu_to_le32(DEV_HUB);
5076 /*
5077 * refer to section 6.2.2: MTT should be 0 for full speed hub,
5078 * but it may be already set to 1 when setup an xHCI virtual
5079 * device, so clear it anyway.
5080 */
5081 if (tt->multi)
5082 slot_ctx->dev_info |= cpu_to_le32(DEV_MTT);
5083 else if (hdev->speed == USB_SPEED_FULL)
5084 slot_ctx->dev_info &= cpu_to_le32(~DEV_MTT);
5085
5086 if (xhci->hci_version > 0x95) {
5087 xhci_dbg(xhci, "xHCI version %x needs hub "
5088 "TT think time and number of ports\n",
5089 (unsigned int) xhci->hci_version);
5090 slot_ctx->dev_info2 |= cpu_to_le32(XHCI_MAX_PORTS(hdev->maxchild));
5091 /* Set TT think time - convert from ns to FS bit times.
5092 * 0 = 8 FS bit times, 1 = 16 FS bit times,
5093 * 2 = 24 FS bit times, 3 = 32 FS bit times.
5094 *
5095 * xHCI 1.0: this field shall be 0 if the device is not a
5096 * High-spped hub.
5097 */
5098 think_time = tt->think_time;
5099 if (think_time != 0)
5100 think_time = (think_time / 666) - 1;
5101 if (xhci->hci_version < 0x100 || hdev->speed == USB_SPEED_HIGH)
5102 slot_ctx->tt_info |=
5103 cpu_to_le32(TT_THINK_TIME(think_time));
5104 } else {
5105 xhci_dbg(xhci, "xHCI version %x doesn't need hub "
5106 "TT think time or number of ports\n",
5107 (unsigned int) xhci->hci_version);
5108 }
5109 slot_ctx->dev_state = 0;
5110 spin_unlock_irqrestore(&xhci->lock, flags);
5111
5112 xhci_dbg(xhci, "Set up %s for hub device.\n",
5113 (xhci->hci_version > 0x95) ?
5114 "configure endpoint" : "evaluate context");
5115
5116 /* Issue and wait for the configure endpoint or
5117 * evaluate context command.
5118 */
5119 if (xhci->hci_version > 0x95)
5120 ret = xhci_configure_endpoint(xhci, hdev, config_cmd,
5121 false, false);
5122 else
5123 ret = xhci_configure_endpoint(xhci, hdev, config_cmd,
5124 true, false);
5125
5126 xhci_free_command(xhci, config_cmd);
5127 return ret;
5128}
5129EXPORT_SYMBOL_GPL(xhci_update_hub_device);
5130
5131static int xhci_get_frame(struct usb_hcd *hcd)
5132{
5133 struct xhci_hcd *xhci = hcd_to_xhci(hcd);
5134 /* EHCI mods by the periodic size. Why? */
5135 return readl(&xhci->run_regs->microframe_index) >> 3;
5136}
5137
5138static void xhci_hcd_init_usb2_data(struct xhci_hcd *xhci, struct usb_hcd *hcd)
5139{
5140 xhci->usb2_rhub.hcd = hcd;
5141 hcd->speed = HCD_USB2;
5142 hcd->self.root_hub->speed = USB_SPEED_HIGH;
5143 /*
5144 * USB 2.0 roothub under xHCI has an integrated TT,
5145 * (rate matching hub) as opposed to having an OHCI/UHCI
5146 * companion controller.
5147 */
5148 hcd->has_tt = 1;
5149}
5150
5151static void xhci_hcd_init_usb3_data(struct xhci_hcd *xhci, struct usb_hcd *hcd)
5152{
5153 unsigned int minor_rev;
5154
5155 /*
5156 * Early xHCI 1.1 spec did not mention USB 3.1 capable hosts
5157 * should return 0x31 for sbrn, or that the minor revision
5158 * is a two digit BCD containig minor and sub-minor numbers.
5159 * This was later clarified in xHCI 1.2.
5160 *
5161 * Some USB 3.1 capable hosts therefore have sbrn 0x30, and
5162 * minor revision set to 0x1 instead of 0x10.
5163 */
5164 if (xhci->usb3_rhub.min_rev == 0x1)
5165 minor_rev = 1;
5166 else
5167 minor_rev = xhci->usb3_rhub.min_rev / 0x10;
5168
5169 switch (minor_rev) {
5170 case 2:
5171 hcd->speed = HCD_USB32;
5172 hcd->self.root_hub->speed = USB_SPEED_SUPER_PLUS;
5173 hcd->self.root_hub->rx_lanes = 2;
5174 hcd->self.root_hub->tx_lanes = 2;
5175 hcd->self.root_hub->ssp_rate = USB_SSP_GEN_2x2;
5176 break;
5177 case 1:
5178 hcd->speed = HCD_USB31;
5179 hcd->self.root_hub->speed = USB_SPEED_SUPER_PLUS;
5180 hcd->self.root_hub->ssp_rate = USB_SSP_GEN_2x1;
5181 break;
5182 }
5183 xhci_info(xhci, "Host supports USB 3.%x %sSuperSpeed\n",
5184 minor_rev, minor_rev ? "Enhanced " : "");
5185
5186 xhci->usb3_rhub.hcd = hcd;
5187}
5188
5189int xhci_gen_setup(struct usb_hcd *hcd, xhci_get_quirks_t get_quirks)
5190{
5191 struct xhci_hcd *xhci;
5192 /*
5193 * TODO: Check with DWC3 clients for sysdev according to
5194 * quirks
5195 */
5196 struct device *dev = hcd->self.sysdev;
5197 int retval;
5198
5199 /* Accept arbitrarily long scatter-gather lists */
5200 hcd->self.sg_tablesize = ~0;
5201
5202 /* support to build packet from discontinuous buffers */
5203 hcd->self.no_sg_constraint = 1;
5204
5205 /* XHCI controllers don't stop the ep queue on short packets :| */
5206 hcd->self.no_stop_on_short = 1;
5207
5208 xhci = hcd_to_xhci(hcd);
5209
5210 if (!usb_hcd_is_primary_hcd(hcd)) {
5211 xhci_hcd_init_usb3_data(xhci, hcd);
5212 return 0;
5213 }
5214
5215 mutex_init(&xhci->mutex);
5216 xhci->main_hcd = hcd;
5217 xhci->cap_regs = hcd->regs;
5218 xhci->op_regs = hcd->regs +
5219 HC_LENGTH(readl(&xhci->cap_regs->hc_capbase));
5220 xhci->run_regs = hcd->regs +
5221 (readl(&xhci->cap_regs->run_regs_off) & RTSOFF_MASK);
5222 /* Cache read-only capability registers */
5223 xhci->hcs_params1 = readl(&xhci->cap_regs->hcs_params1);
5224 xhci->hcs_params2 = readl(&xhci->cap_regs->hcs_params2);
5225 xhci->hcs_params3 = readl(&xhci->cap_regs->hcs_params3);
5226 xhci->hci_version = HC_VERSION(readl(&xhci->cap_regs->hc_capbase));
5227 xhci->hcc_params = readl(&xhci->cap_regs->hcc_params);
5228 if (xhci->hci_version > 0x100)
5229 xhci->hcc_params2 = readl(&xhci->cap_regs->hcc_params2);
5230
5231 /* xhci-plat or xhci-pci might have set max_interrupters already */
5232 if ((!xhci->max_interrupters) ||
5233 xhci->max_interrupters > HCS_MAX_INTRS(xhci->hcs_params1))
5234 xhci->max_interrupters = HCS_MAX_INTRS(xhci->hcs_params1);
5235
5236 xhci->quirks |= quirks;
5237
5238 if (get_quirks)
5239 get_quirks(dev, xhci);
5240
5241 /* In xhci controllers which follow xhci 1.0 spec gives a spurious
5242 * success event after a short transfer. This quirk will ignore such
5243 * spurious event.
5244 */
5245 if (xhci->hci_version > 0x96)
5246 xhci->quirks |= XHCI_SPURIOUS_SUCCESS;
5247
5248 /* Make sure the HC is halted. */
5249 retval = xhci_halt(xhci);
5250 if (retval)
5251 return retval;
5252
5253 xhci_zero_64b_regs(xhci);
5254
5255 xhci_dbg(xhci, "Resetting HCD\n");
5256 /* Reset the internal HC memory state and registers. */
5257 retval = xhci_reset(xhci, XHCI_RESET_LONG_USEC);
5258 if (retval)
5259 return retval;
5260 xhci_dbg(xhci, "Reset complete\n");
5261
5262 /*
5263 * On some xHCI controllers (e.g. R-Car SoCs), the AC64 bit (bit 0)
5264 * of HCCPARAMS1 is set to 1. However, the xHCs don't support 64-bit
5265 * address memory pointers actually. So, this driver clears the AC64
5266 * bit of xhci->hcc_params to call dma_set_coherent_mask(dev,
5267 * DMA_BIT_MASK(32)) in this xhci_gen_setup().
5268 */
5269 if (xhci->quirks & XHCI_NO_64BIT_SUPPORT)
5270 xhci->hcc_params &= ~BIT(0);
5271
5272 /* Set dma_mask and coherent_dma_mask to 64-bits,
5273 * if xHC supports 64-bit addressing */
5274 if (HCC_64BIT_ADDR(xhci->hcc_params) &&
5275 !dma_set_mask(dev, DMA_BIT_MASK(64))) {
5276 xhci_dbg(xhci, "Enabling 64-bit DMA addresses.\n");
5277 dma_set_coherent_mask(dev, DMA_BIT_MASK(64));
5278 } else {
5279 /*
5280 * This is to avoid error in cases where a 32-bit USB
5281 * controller is used on a 64-bit capable system.
5282 */
5283 retval = dma_set_mask(dev, DMA_BIT_MASK(32));
5284 if (retval)
5285 return retval;
5286 xhci_dbg(xhci, "Enabling 32-bit DMA addresses.\n");
5287 dma_set_coherent_mask(dev, DMA_BIT_MASK(32));
5288 }
5289
5290 xhci_dbg(xhci, "Calling HCD init\n");
5291 /* Initialize HCD and host controller data structures. */
5292 retval = xhci_init(hcd);
5293 if (retval)
5294 return retval;
5295 xhci_dbg(xhci, "Called HCD init\n");
5296
5297 if (xhci_hcd_is_usb3(hcd))
5298 xhci_hcd_init_usb3_data(xhci, hcd);
5299 else
5300 xhci_hcd_init_usb2_data(xhci, hcd);
5301
5302 xhci_info(xhci, "hcc params 0x%08x hci version 0x%x quirks 0x%016llx\n",
5303 xhci->hcc_params, xhci->hci_version, xhci->quirks);
5304
5305 return 0;
5306}
5307EXPORT_SYMBOL_GPL(xhci_gen_setup);
5308
5309static void xhci_clear_tt_buffer_complete(struct usb_hcd *hcd,
5310 struct usb_host_endpoint *ep)
5311{
5312 struct xhci_hcd *xhci;
5313 struct usb_device *udev;
5314 unsigned int slot_id;
5315 unsigned int ep_index;
5316 unsigned long flags;
5317
5318 xhci = hcd_to_xhci(hcd);
5319
5320 spin_lock_irqsave(&xhci->lock, flags);
5321 udev = (struct usb_device *)ep->hcpriv;
5322 slot_id = udev->slot_id;
5323 ep_index = xhci_get_endpoint_index(&ep->desc);
5324
5325 xhci->devs[slot_id]->eps[ep_index].ep_state &= ~EP_CLEARING_TT;
5326 xhci_ring_doorbell_for_active_rings(xhci, slot_id, ep_index);
5327 spin_unlock_irqrestore(&xhci->lock, flags);
5328}
5329
5330static const struct hc_driver xhci_hc_driver = {
5331 .description = "xhci-hcd",
5332 .product_desc = "xHCI Host Controller",
5333 .hcd_priv_size = sizeof(struct xhci_hcd),
5334
5335 /*
5336 * generic hardware linkage
5337 */
5338 .irq = xhci_irq,
5339 .flags = HCD_MEMORY | HCD_DMA | HCD_USB3 | HCD_SHARED |
5340 HCD_BH,
5341
5342 /*
5343 * basic lifecycle operations
5344 */
5345 .reset = NULL, /* set in xhci_init_driver() */
5346 .start = xhci_run,
5347 .stop = xhci_stop,
5348 .shutdown = xhci_shutdown,
5349
5350 /*
5351 * managing i/o requests and associated device resources
5352 */
5353 .map_urb_for_dma = xhci_map_urb_for_dma,
5354 .unmap_urb_for_dma = xhci_unmap_urb_for_dma,
5355 .urb_enqueue = xhci_urb_enqueue,
5356 .urb_dequeue = xhci_urb_dequeue,
5357 .alloc_dev = xhci_alloc_dev,
5358 .free_dev = xhci_free_dev,
5359 .alloc_streams = xhci_alloc_streams,
5360 .free_streams = xhci_free_streams,
5361 .add_endpoint = xhci_add_endpoint,
5362 .drop_endpoint = xhci_drop_endpoint,
5363 .endpoint_disable = xhci_endpoint_disable,
5364 .endpoint_reset = xhci_endpoint_reset,
5365 .check_bandwidth = xhci_check_bandwidth,
5366 .reset_bandwidth = xhci_reset_bandwidth,
5367 .address_device = xhci_address_device,
5368 .enable_device = xhci_enable_device,
5369 .update_hub_device = xhci_update_hub_device,
5370 .reset_device = xhci_discover_or_reset_device,
5371
5372 /*
5373 * scheduling support
5374 */
5375 .get_frame_number = xhci_get_frame,
5376
5377 /*
5378 * root hub support
5379 */
5380 .hub_control = xhci_hub_control,
5381 .hub_status_data = xhci_hub_status_data,
5382 .bus_suspend = xhci_bus_suspend,
5383 .bus_resume = xhci_bus_resume,
5384 .get_resuming_ports = xhci_get_resuming_ports,
5385
5386 /*
5387 * call back when device connected and addressed
5388 */
5389 .update_device = xhci_update_device,
5390 .set_usb2_hw_lpm = xhci_set_usb2_hardware_lpm,
5391 .enable_usb3_lpm_timeout = xhci_enable_usb3_lpm_timeout,
5392 .disable_usb3_lpm_timeout = xhci_disable_usb3_lpm_timeout,
5393 .find_raw_port_number = xhci_find_raw_port_number,
5394 .clear_tt_buffer_complete = xhci_clear_tt_buffer_complete,
5395};
5396
5397void xhci_init_driver(struct hc_driver *drv,
5398 const struct xhci_driver_overrides *over)
5399{
5400 BUG_ON(!over);
5401
5402 /* Copy the generic table to drv then apply the overrides */
5403 *drv = xhci_hc_driver;
5404
5405 if (over) {
5406 drv->hcd_priv_size += over->extra_priv_size;
5407 if (over->reset)
5408 drv->reset = over->reset;
5409 if (over->start)
5410 drv->start = over->start;
5411 if (over->add_endpoint)
5412 drv->add_endpoint = over->add_endpoint;
5413 if (over->drop_endpoint)
5414 drv->drop_endpoint = over->drop_endpoint;
5415 if (over->check_bandwidth)
5416 drv->check_bandwidth = over->check_bandwidth;
5417 if (over->reset_bandwidth)
5418 drv->reset_bandwidth = over->reset_bandwidth;
5419 if (over->update_hub_device)
5420 drv->update_hub_device = over->update_hub_device;
5421 if (over->hub_control)
5422 drv->hub_control = over->hub_control;
5423 }
5424}
5425EXPORT_SYMBOL_GPL(xhci_init_driver);
5426
5427MODULE_DESCRIPTION(DRIVER_DESC);
5428MODULE_AUTHOR(DRIVER_AUTHOR);
5429MODULE_LICENSE("GPL");
5430
5431static int __init xhci_hcd_init(void)
5432{
5433 /*
5434 * Check the compiler generated sizes of structures that must be laid
5435 * out in specific ways for hardware access.
5436 */
5437 BUILD_BUG_ON(sizeof(struct xhci_doorbell_array) != 256*32/8);
5438 BUILD_BUG_ON(sizeof(struct xhci_slot_ctx) != 8*32/8);
5439 BUILD_BUG_ON(sizeof(struct xhci_ep_ctx) != 8*32/8);
5440 /* xhci_device_control has eight fields, and also
5441 * embeds one xhci_slot_ctx and 31 xhci_ep_ctx
5442 */
5443 BUILD_BUG_ON(sizeof(struct xhci_stream_ctx) != 4*32/8);
5444 BUILD_BUG_ON(sizeof(union xhci_trb) != 4*32/8);
5445 BUILD_BUG_ON(sizeof(struct xhci_erst_entry) != 4*32/8);
5446 BUILD_BUG_ON(sizeof(struct xhci_cap_regs) != 8*32/8);
5447 BUILD_BUG_ON(sizeof(struct xhci_intr_reg) != 8*32/8);
5448 /* xhci_run_regs has eight fields and embeds 128 xhci_intr_regs */
5449 BUILD_BUG_ON(sizeof(struct xhci_run_regs) != (8+8*128)*32/8);
5450
5451 if (usb_disabled())
5452 return -ENODEV;
5453
5454 xhci_debugfs_create_root();
5455 xhci_dbc_init();
5456
5457 return 0;
5458}
5459
5460/*
5461 * If an init function is provided, an exit function must also be provided
5462 * to allow module unload.
5463 */
5464static void __exit xhci_hcd_fini(void)
5465{
5466 xhci_debugfs_remove_root();
5467 xhci_dbc_exit();
5468}
5469
5470module_init(xhci_hcd_init);
5471module_exit(xhci_hcd_fini);
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * xHCI host controller driver
4 *
5 * Copyright (C) 2008 Intel Corp.
6 *
7 * Author: Sarah Sharp
8 * Some code borrowed from the Linux EHCI driver.
9 */
10
11#include <linux/pci.h>
12#include <linux/irq.h>
13#include <linux/log2.h>
14#include <linux/module.h>
15#include <linux/moduleparam.h>
16#include <linux/slab.h>
17#include <linux/dmi.h>
18#include <linux/dma-mapping.h>
19
20#include "xhci.h"
21#include "xhci-trace.h"
22#include "xhci-mtk.h"
23#include "xhci-debugfs.h"
24#include "xhci-dbgcap.h"
25
26#define DRIVER_AUTHOR "Sarah Sharp"
27#define DRIVER_DESC "'eXtensible' Host Controller (xHC) Driver"
28
29#define PORT_WAKE_BITS (PORT_WKOC_E | PORT_WKDISC_E | PORT_WKCONN_E)
30
31/* Some 0.95 hardware can't handle the chain bit on a Link TRB being cleared */
32static int link_quirk;
33module_param(link_quirk, int, S_IRUGO | S_IWUSR);
34MODULE_PARM_DESC(link_quirk, "Don't clear the chain bit on a link TRB");
35
36static unsigned int quirks;
37module_param(quirks, uint, S_IRUGO);
38MODULE_PARM_DESC(quirks, "Bit flags for quirks to be enabled as default");
39
40/* TODO: copied from ehci-hcd.c - can this be refactored? */
41/*
42 * xhci_handshake - spin reading hc until handshake completes or fails
43 * @ptr: address of hc register to be read
44 * @mask: bits to look at in result of read
45 * @done: value of those bits when handshake succeeds
46 * @usec: timeout in microseconds
47 *
48 * Returns negative errno, or zero on success
49 *
50 * Success happens when the "mask" bits have the specified value (hardware
51 * handshake done). There are two failure modes: "usec" have passed (major
52 * hardware flakeout), or the register reads as all-ones (hardware removed).
53 */
54int xhci_handshake(void __iomem *ptr, u32 mask, u32 done, int usec)
55{
56 u32 result;
57
58 do {
59 result = readl(ptr);
60 if (result == ~(u32)0) /* card removed */
61 return -ENODEV;
62 result &= mask;
63 if (result == done)
64 return 0;
65 udelay(1);
66 usec--;
67 } while (usec > 0);
68 return -ETIMEDOUT;
69}
70
71/*
72 * Disable interrupts and begin the xHCI halting process.
73 */
74void xhci_quiesce(struct xhci_hcd *xhci)
75{
76 u32 halted;
77 u32 cmd;
78 u32 mask;
79
80 mask = ~(XHCI_IRQS);
81 halted = readl(&xhci->op_regs->status) & STS_HALT;
82 if (!halted)
83 mask &= ~CMD_RUN;
84
85 cmd = readl(&xhci->op_regs->command);
86 cmd &= mask;
87 writel(cmd, &xhci->op_regs->command);
88}
89
90/*
91 * Force HC into halt state.
92 *
93 * Disable any IRQs and clear the run/stop bit.
94 * HC will complete any current and actively pipelined transactions, and
95 * should halt within 16 ms of the run/stop bit being cleared.
96 * Read HC Halted bit in the status register to see when the HC is finished.
97 */
98int xhci_halt(struct xhci_hcd *xhci)
99{
100 int ret;
101 xhci_dbg_trace(xhci, trace_xhci_dbg_init, "// Halt the HC");
102 xhci_quiesce(xhci);
103
104 ret = xhci_handshake(&xhci->op_regs->status,
105 STS_HALT, STS_HALT, XHCI_MAX_HALT_USEC);
106 if (ret) {
107 xhci_warn(xhci, "Host halt failed, %d\n", ret);
108 return ret;
109 }
110 xhci->xhc_state |= XHCI_STATE_HALTED;
111 xhci->cmd_ring_state = CMD_RING_STATE_STOPPED;
112 return ret;
113}
114
115/*
116 * Set the run bit and wait for the host to be running.
117 */
118int xhci_start(struct xhci_hcd *xhci)
119{
120 u32 temp;
121 int ret;
122
123 temp = readl(&xhci->op_regs->command);
124 temp |= (CMD_RUN);
125 xhci_dbg_trace(xhci, trace_xhci_dbg_init, "// Turn on HC, cmd = 0x%x.",
126 temp);
127 writel(temp, &xhci->op_regs->command);
128
129 /*
130 * Wait for the HCHalted Status bit to be 0 to indicate the host is
131 * running.
132 */
133 ret = xhci_handshake(&xhci->op_regs->status,
134 STS_HALT, 0, XHCI_MAX_HALT_USEC);
135 if (ret == -ETIMEDOUT)
136 xhci_err(xhci, "Host took too long to start, "
137 "waited %u microseconds.\n",
138 XHCI_MAX_HALT_USEC);
139 if (!ret)
140 /* clear state flags. Including dying, halted or removing */
141 xhci->xhc_state = 0;
142
143 return ret;
144}
145
146/*
147 * Reset a halted HC.
148 *
149 * This resets pipelines, timers, counters, state machines, etc.
150 * Transactions will be terminated immediately, and operational registers
151 * will be set to their defaults.
152 */
153int xhci_reset(struct xhci_hcd *xhci)
154{
155 u32 command;
156 u32 state;
157 int ret, i;
158
159 state = readl(&xhci->op_regs->status);
160
161 if (state == ~(u32)0) {
162 xhci_warn(xhci, "Host not accessible, reset failed.\n");
163 return -ENODEV;
164 }
165
166 if ((state & STS_HALT) == 0) {
167 xhci_warn(xhci, "Host controller not halted, aborting reset.\n");
168 return 0;
169 }
170
171 xhci_dbg_trace(xhci, trace_xhci_dbg_init, "// Reset the HC");
172 command = readl(&xhci->op_regs->command);
173 command |= CMD_RESET;
174 writel(command, &xhci->op_regs->command);
175
176 /* Existing Intel xHCI controllers require a delay of 1 mS,
177 * after setting the CMD_RESET bit, and before accessing any
178 * HC registers. This allows the HC to complete the
179 * reset operation and be ready for HC register access.
180 * Without this delay, the subsequent HC register access,
181 * may result in a system hang very rarely.
182 */
183 if (xhci->quirks & XHCI_INTEL_HOST)
184 udelay(1000);
185
186 ret = xhci_handshake(&xhci->op_regs->command,
187 CMD_RESET, 0, 10 * 1000 * 1000);
188 if (ret)
189 return ret;
190
191 if (xhci->quirks & XHCI_ASMEDIA_MODIFY_FLOWCONTROL)
192 usb_asmedia_modifyflowcontrol(to_pci_dev(xhci_to_hcd(xhci)->self.controller));
193
194 xhci_dbg_trace(xhci, trace_xhci_dbg_init,
195 "Wait for controller to be ready for doorbell rings");
196 /*
197 * xHCI cannot write to any doorbells or operational registers other
198 * than status until the "Controller Not Ready" flag is cleared.
199 */
200 ret = xhci_handshake(&xhci->op_regs->status,
201 STS_CNR, 0, 10 * 1000 * 1000);
202
203 for (i = 0; i < 2; i++) {
204 xhci->bus_state[i].port_c_suspend = 0;
205 xhci->bus_state[i].suspended_ports = 0;
206 xhci->bus_state[i].resuming_ports = 0;
207 }
208
209 return ret;
210}
211
212
213#ifdef CONFIG_USB_PCI
214/*
215 * Set up MSI
216 */
217static int xhci_setup_msi(struct xhci_hcd *xhci)
218{
219 int ret;
220 /*
221 * TODO:Check with MSI Soc for sysdev
222 */
223 struct pci_dev *pdev = to_pci_dev(xhci_to_hcd(xhci)->self.controller);
224
225 ret = pci_alloc_irq_vectors(pdev, 1, 1, PCI_IRQ_MSI);
226 if (ret < 0) {
227 xhci_dbg_trace(xhci, trace_xhci_dbg_init,
228 "failed to allocate MSI entry");
229 return ret;
230 }
231
232 ret = request_irq(pdev->irq, xhci_msi_irq,
233 0, "xhci_hcd", xhci_to_hcd(xhci));
234 if (ret) {
235 xhci_dbg_trace(xhci, trace_xhci_dbg_init,
236 "disable MSI interrupt");
237 pci_free_irq_vectors(pdev);
238 }
239
240 return ret;
241}
242
243/*
244 * Set up MSI-X
245 */
246static int xhci_setup_msix(struct xhci_hcd *xhci)
247{
248 int i, ret = 0;
249 struct usb_hcd *hcd = xhci_to_hcd(xhci);
250 struct pci_dev *pdev = to_pci_dev(hcd->self.controller);
251
252 /*
253 * calculate number of msi-x vectors supported.
254 * - HCS_MAX_INTRS: the max number of interrupts the host can handle,
255 * with max number of interrupters based on the xhci HCSPARAMS1.
256 * - num_online_cpus: maximum msi-x vectors per CPUs core.
257 * Add additional 1 vector to ensure always available interrupt.
258 */
259 xhci->msix_count = min(num_online_cpus() + 1,
260 HCS_MAX_INTRS(xhci->hcs_params1));
261
262 ret = pci_alloc_irq_vectors(pdev, xhci->msix_count, xhci->msix_count,
263 PCI_IRQ_MSIX);
264 if (ret < 0) {
265 xhci_dbg_trace(xhci, trace_xhci_dbg_init,
266 "Failed to enable MSI-X");
267 return ret;
268 }
269
270 for (i = 0; i < xhci->msix_count; i++) {
271 ret = request_irq(pci_irq_vector(pdev, i), xhci_msi_irq, 0,
272 "xhci_hcd", xhci_to_hcd(xhci));
273 if (ret)
274 goto disable_msix;
275 }
276
277 hcd->msix_enabled = 1;
278 return ret;
279
280disable_msix:
281 xhci_dbg_trace(xhci, trace_xhci_dbg_init, "disable MSI-X interrupt");
282 while (--i >= 0)
283 free_irq(pci_irq_vector(pdev, i), xhci_to_hcd(xhci));
284 pci_free_irq_vectors(pdev);
285 return ret;
286}
287
288/* Free any IRQs and disable MSI-X */
289static void xhci_cleanup_msix(struct xhci_hcd *xhci)
290{
291 struct usb_hcd *hcd = xhci_to_hcd(xhci);
292 struct pci_dev *pdev = to_pci_dev(hcd->self.controller);
293
294 if (xhci->quirks & XHCI_PLAT)
295 return;
296
297 /* return if using legacy interrupt */
298 if (hcd->irq > 0)
299 return;
300
301 if (hcd->msix_enabled) {
302 int i;
303
304 for (i = 0; i < xhci->msix_count; i++)
305 free_irq(pci_irq_vector(pdev, i), xhci_to_hcd(xhci));
306 } else {
307 free_irq(pci_irq_vector(pdev, 0), xhci_to_hcd(xhci));
308 }
309
310 pci_free_irq_vectors(pdev);
311 hcd->msix_enabled = 0;
312}
313
314static void __maybe_unused xhci_msix_sync_irqs(struct xhci_hcd *xhci)
315{
316 struct usb_hcd *hcd = xhci_to_hcd(xhci);
317
318 if (hcd->msix_enabled) {
319 struct pci_dev *pdev = to_pci_dev(hcd->self.controller);
320 int i;
321
322 for (i = 0; i < xhci->msix_count; i++)
323 synchronize_irq(pci_irq_vector(pdev, i));
324 }
325}
326
327static int xhci_try_enable_msi(struct usb_hcd *hcd)
328{
329 struct xhci_hcd *xhci = hcd_to_xhci(hcd);
330 struct pci_dev *pdev;
331 int ret;
332
333 /* The xhci platform device has set up IRQs through usb_add_hcd. */
334 if (xhci->quirks & XHCI_PLAT)
335 return 0;
336
337 pdev = to_pci_dev(xhci_to_hcd(xhci)->self.controller);
338 /*
339 * Some Fresco Logic host controllers advertise MSI, but fail to
340 * generate interrupts. Don't even try to enable MSI.
341 */
342 if (xhci->quirks & XHCI_BROKEN_MSI)
343 goto legacy_irq;
344
345 /* unregister the legacy interrupt */
346 if (hcd->irq)
347 free_irq(hcd->irq, hcd);
348 hcd->irq = 0;
349
350 ret = xhci_setup_msix(xhci);
351 if (ret)
352 /* fall back to msi*/
353 ret = xhci_setup_msi(xhci);
354
355 if (!ret) {
356 hcd->msi_enabled = 1;
357 return 0;
358 }
359
360 if (!pdev->irq) {
361 xhci_err(xhci, "No msi-x/msi found and no IRQ in BIOS\n");
362 return -EINVAL;
363 }
364
365 legacy_irq:
366 if (!strlen(hcd->irq_descr))
367 snprintf(hcd->irq_descr, sizeof(hcd->irq_descr), "%s:usb%d",
368 hcd->driver->description, hcd->self.busnum);
369
370 /* fall back to legacy interrupt*/
371 ret = request_irq(pdev->irq, &usb_hcd_irq, IRQF_SHARED,
372 hcd->irq_descr, hcd);
373 if (ret) {
374 xhci_err(xhci, "request interrupt %d failed\n",
375 pdev->irq);
376 return ret;
377 }
378 hcd->irq = pdev->irq;
379 return 0;
380}
381
382#else
383
384static inline int xhci_try_enable_msi(struct usb_hcd *hcd)
385{
386 return 0;
387}
388
389static inline void xhci_cleanup_msix(struct xhci_hcd *xhci)
390{
391}
392
393static inline void xhci_msix_sync_irqs(struct xhci_hcd *xhci)
394{
395}
396
397#endif
398
399static void compliance_mode_recovery(struct timer_list *t)
400{
401 struct xhci_hcd *xhci;
402 struct usb_hcd *hcd;
403 u32 temp;
404 int i;
405
406 xhci = from_timer(xhci, t, comp_mode_recovery_timer);
407
408 for (i = 0; i < xhci->num_usb3_ports; i++) {
409 temp = readl(xhci->usb3_ports[i]);
410 if ((temp & PORT_PLS_MASK) == USB_SS_PORT_LS_COMP_MOD) {
411 /*
412 * Compliance Mode Detected. Letting USB Core
413 * handle the Warm Reset
414 */
415 xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
416 "Compliance mode detected->port %d",
417 i + 1);
418 xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
419 "Attempting compliance mode recovery");
420 hcd = xhci->shared_hcd;
421
422 if (hcd->state == HC_STATE_SUSPENDED)
423 usb_hcd_resume_root_hub(hcd);
424
425 usb_hcd_poll_rh_status(hcd);
426 }
427 }
428
429 if (xhci->port_status_u0 != ((1 << xhci->num_usb3_ports)-1))
430 mod_timer(&xhci->comp_mode_recovery_timer,
431 jiffies + msecs_to_jiffies(COMP_MODE_RCVRY_MSECS));
432}
433
434/*
435 * Quirk to work around issue generated by the SN65LVPE502CP USB3.0 re-driver
436 * that causes ports behind that hardware to enter compliance mode sometimes.
437 * The quirk creates a timer that polls every 2 seconds the link state of
438 * each host controller's port and recovers it by issuing a Warm reset
439 * if Compliance mode is detected, otherwise the port will become "dead" (no
440 * device connections or disconnections will be detected anymore). Becasue no
441 * status event is generated when entering compliance mode (per xhci spec),
442 * this quirk is needed on systems that have the failing hardware installed.
443 */
444static void compliance_mode_recovery_timer_init(struct xhci_hcd *xhci)
445{
446 xhci->port_status_u0 = 0;
447 timer_setup(&xhci->comp_mode_recovery_timer, compliance_mode_recovery,
448 0);
449 xhci->comp_mode_recovery_timer.expires = jiffies +
450 msecs_to_jiffies(COMP_MODE_RCVRY_MSECS);
451
452 add_timer(&xhci->comp_mode_recovery_timer);
453 xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
454 "Compliance mode recovery timer initialized");
455}
456
457/*
458 * This function identifies the systems that have installed the SN65LVPE502CP
459 * USB3.0 re-driver and that need the Compliance Mode Quirk.
460 * Systems:
461 * Vendor: Hewlett-Packard -> System Models: Z420, Z620 and Z820
462 */
463static bool xhci_compliance_mode_recovery_timer_quirk_check(void)
464{
465 const char *dmi_product_name, *dmi_sys_vendor;
466
467 dmi_product_name = dmi_get_system_info(DMI_PRODUCT_NAME);
468 dmi_sys_vendor = dmi_get_system_info(DMI_SYS_VENDOR);
469 if (!dmi_product_name || !dmi_sys_vendor)
470 return false;
471
472 if (!(strstr(dmi_sys_vendor, "Hewlett-Packard")))
473 return false;
474
475 if (strstr(dmi_product_name, "Z420") ||
476 strstr(dmi_product_name, "Z620") ||
477 strstr(dmi_product_name, "Z820") ||
478 strstr(dmi_product_name, "Z1 Workstation"))
479 return true;
480
481 return false;
482}
483
484static int xhci_all_ports_seen_u0(struct xhci_hcd *xhci)
485{
486 return (xhci->port_status_u0 == ((1 << xhci->num_usb3_ports)-1));
487}
488
489
490/*
491 * Initialize memory for HCD and xHC (one-time init).
492 *
493 * Program the PAGESIZE register, initialize the device context array, create
494 * device contexts (?), set up a command ring segment (or two?), create event
495 * ring (one for now).
496 */
497static int xhci_init(struct usb_hcd *hcd)
498{
499 struct xhci_hcd *xhci = hcd_to_xhci(hcd);
500 int retval = 0;
501
502 xhci_dbg_trace(xhci, trace_xhci_dbg_init, "xhci_init");
503 spin_lock_init(&xhci->lock);
504 if (xhci->hci_version == 0x95 && link_quirk) {
505 xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
506 "QUIRK: Not clearing Link TRB chain bits.");
507 xhci->quirks |= XHCI_LINK_TRB_QUIRK;
508 } else {
509 xhci_dbg_trace(xhci, trace_xhci_dbg_init,
510 "xHCI doesn't need link TRB QUIRK");
511 }
512 retval = xhci_mem_init(xhci, GFP_KERNEL);
513 xhci_dbg_trace(xhci, trace_xhci_dbg_init, "Finished xhci_init");
514
515 /* Initializing Compliance Mode Recovery Data If Needed */
516 if (xhci_compliance_mode_recovery_timer_quirk_check()) {
517 xhci->quirks |= XHCI_COMP_MODE_QUIRK;
518 compliance_mode_recovery_timer_init(xhci);
519 }
520
521 return retval;
522}
523
524/*-------------------------------------------------------------------------*/
525
526
527static int xhci_run_finished(struct xhci_hcd *xhci)
528{
529 if (xhci_start(xhci)) {
530 xhci_halt(xhci);
531 return -ENODEV;
532 }
533 xhci->shared_hcd->state = HC_STATE_RUNNING;
534 xhci->cmd_ring_state = CMD_RING_STATE_RUNNING;
535
536 if (xhci->quirks & XHCI_NEC_HOST)
537 xhci_ring_cmd_db(xhci);
538
539 xhci_dbg_trace(xhci, trace_xhci_dbg_init,
540 "Finished xhci_run for USB3 roothub");
541 return 0;
542}
543
544/*
545 * Start the HC after it was halted.
546 *
547 * This function is called by the USB core when the HC driver is added.
548 * Its opposite is xhci_stop().
549 *
550 * xhci_init() must be called once before this function can be called.
551 * Reset the HC, enable device slot contexts, program DCBAAP, and
552 * set command ring pointer and event ring pointer.
553 *
554 * Setup MSI-X vectors and enable interrupts.
555 */
556int xhci_run(struct usb_hcd *hcd)
557{
558 u32 temp;
559 u64 temp_64;
560 int ret;
561 struct xhci_hcd *xhci = hcd_to_xhci(hcd);
562
563 /* Start the xHCI host controller running only after the USB 2.0 roothub
564 * is setup.
565 */
566
567 hcd->uses_new_polling = 1;
568 if (!usb_hcd_is_primary_hcd(hcd))
569 return xhci_run_finished(xhci);
570
571 xhci_dbg_trace(xhci, trace_xhci_dbg_init, "xhci_run");
572
573 ret = xhci_try_enable_msi(hcd);
574 if (ret)
575 return ret;
576
577 temp_64 = xhci_read_64(xhci, &xhci->ir_set->erst_dequeue);
578 temp_64 &= ~ERST_PTR_MASK;
579 xhci_dbg_trace(xhci, trace_xhci_dbg_init,
580 "ERST deq = 64'h%0lx", (long unsigned int) temp_64);
581
582 xhci_dbg_trace(xhci, trace_xhci_dbg_init,
583 "// Set the interrupt modulation register");
584 temp = readl(&xhci->ir_set->irq_control);
585 temp &= ~ER_IRQ_INTERVAL_MASK;
586 temp |= (xhci->imod_interval / 250) & ER_IRQ_INTERVAL_MASK;
587 writel(temp, &xhci->ir_set->irq_control);
588
589 /* Set the HCD state before we enable the irqs */
590 temp = readl(&xhci->op_regs->command);
591 temp |= (CMD_EIE);
592 xhci_dbg_trace(xhci, trace_xhci_dbg_init,
593 "// Enable interrupts, cmd = 0x%x.", temp);
594 writel(temp, &xhci->op_regs->command);
595
596 temp = readl(&xhci->ir_set->irq_pending);
597 xhci_dbg_trace(xhci, trace_xhci_dbg_init,
598 "// Enabling event ring interrupter %p by writing 0x%x to irq_pending",
599 xhci->ir_set, (unsigned int) ER_IRQ_ENABLE(temp));
600 writel(ER_IRQ_ENABLE(temp), &xhci->ir_set->irq_pending);
601
602 if (xhci->quirks & XHCI_NEC_HOST) {
603 struct xhci_command *command;
604
605 command = xhci_alloc_command(xhci, false, GFP_KERNEL);
606 if (!command)
607 return -ENOMEM;
608
609 ret = xhci_queue_vendor_command(xhci, command, 0, 0, 0,
610 TRB_TYPE(TRB_NEC_GET_FW));
611 if (ret)
612 xhci_free_command(xhci, command);
613 }
614 xhci_dbg_trace(xhci, trace_xhci_dbg_init,
615 "Finished xhci_run for USB2 roothub");
616
617 xhci_dbc_init(xhci);
618
619 xhci_debugfs_init(xhci);
620
621 return 0;
622}
623EXPORT_SYMBOL_GPL(xhci_run);
624
625/*
626 * Stop xHCI driver.
627 *
628 * This function is called by the USB core when the HC driver is removed.
629 * Its opposite is xhci_run().
630 *
631 * Disable device contexts, disable IRQs, and quiesce the HC.
632 * Reset the HC, finish any completed transactions, and cleanup memory.
633 */
634static void xhci_stop(struct usb_hcd *hcd)
635{
636 u32 temp;
637 struct xhci_hcd *xhci = hcd_to_xhci(hcd);
638
639 mutex_lock(&xhci->mutex);
640
641 /* Only halt host and free memory after both hcds are removed */
642 if (!usb_hcd_is_primary_hcd(hcd)) {
643 /* usb core will free this hcd shortly, unset pointer */
644 xhci->shared_hcd = NULL;
645 mutex_unlock(&xhci->mutex);
646 return;
647 }
648
649 xhci_dbc_exit(xhci);
650
651 spin_lock_irq(&xhci->lock);
652 xhci->xhc_state |= XHCI_STATE_HALTED;
653 xhci->cmd_ring_state = CMD_RING_STATE_STOPPED;
654 xhci_halt(xhci);
655 xhci_reset(xhci);
656 spin_unlock_irq(&xhci->lock);
657
658 xhci_cleanup_msix(xhci);
659
660 /* Deleting Compliance Mode Recovery Timer */
661 if ((xhci->quirks & XHCI_COMP_MODE_QUIRK) &&
662 (!(xhci_all_ports_seen_u0(xhci)))) {
663 del_timer_sync(&xhci->comp_mode_recovery_timer);
664 xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
665 "%s: compliance mode recovery timer deleted",
666 __func__);
667 }
668
669 if (xhci->quirks & XHCI_AMD_PLL_FIX)
670 usb_amd_dev_put();
671
672 xhci_dbg_trace(xhci, trace_xhci_dbg_init,
673 "// Disabling event ring interrupts");
674 temp = readl(&xhci->op_regs->status);
675 writel((temp & ~0x1fff) | STS_EINT, &xhci->op_regs->status);
676 temp = readl(&xhci->ir_set->irq_pending);
677 writel(ER_IRQ_DISABLE(temp), &xhci->ir_set->irq_pending);
678
679 xhci_dbg_trace(xhci, trace_xhci_dbg_init, "cleaning up memory");
680 xhci_mem_cleanup(xhci);
681 xhci_debugfs_exit(xhci);
682 xhci_dbg_trace(xhci, trace_xhci_dbg_init,
683 "xhci_stop completed - status = %x",
684 readl(&xhci->op_regs->status));
685 mutex_unlock(&xhci->mutex);
686}
687
688/*
689 * Shutdown HC (not bus-specific)
690 *
691 * This is called when the machine is rebooting or halting. We assume that the
692 * machine will be powered off, and the HC's internal state will be reset.
693 * Don't bother to free memory.
694 *
695 * This will only ever be called with the main usb_hcd (the USB3 roothub).
696 */
697static void xhci_shutdown(struct usb_hcd *hcd)
698{
699 struct xhci_hcd *xhci = hcd_to_xhci(hcd);
700
701 if (xhci->quirks & XHCI_SPURIOUS_REBOOT)
702 usb_disable_xhci_ports(to_pci_dev(hcd->self.sysdev));
703
704 spin_lock_irq(&xhci->lock);
705 xhci_halt(xhci);
706 /* Workaround for spurious wakeups at shutdown with HSW */
707 if (xhci->quirks & XHCI_SPURIOUS_WAKEUP)
708 xhci_reset(xhci);
709 spin_unlock_irq(&xhci->lock);
710
711 xhci_cleanup_msix(xhci);
712
713 xhci_dbg_trace(xhci, trace_xhci_dbg_init,
714 "xhci_shutdown completed - status = %x",
715 readl(&xhci->op_regs->status));
716
717 /* Yet another workaround for spurious wakeups at shutdown with HSW */
718 if (xhci->quirks & XHCI_SPURIOUS_WAKEUP)
719 pci_set_power_state(to_pci_dev(hcd->self.sysdev), PCI_D3hot);
720}
721
722#ifdef CONFIG_PM
723static void xhci_save_registers(struct xhci_hcd *xhci)
724{
725 xhci->s3.command = readl(&xhci->op_regs->command);
726 xhci->s3.dev_nt = readl(&xhci->op_regs->dev_notification);
727 xhci->s3.dcbaa_ptr = xhci_read_64(xhci, &xhci->op_regs->dcbaa_ptr);
728 xhci->s3.config_reg = readl(&xhci->op_regs->config_reg);
729 xhci->s3.erst_size = readl(&xhci->ir_set->erst_size);
730 xhci->s3.erst_base = xhci_read_64(xhci, &xhci->ir_set->erst_base);
731 xhci->s3.erst_dequeue = xhci_read_64(xhci, &xhci->ir_set->erst_dequeue);
732 xhci->s3.irq_pending = readl(&xhci->ir_set->irq_pending);
733 xhci->s3.irq_control = readl(&xhci->ir_set->irq_control);
734}
735
736static void xhci_restore_registers(struct xhci_hcd *xhci)
737{
738 writel(xhci->s3.command, &xhci->op_regs->command);
739 writel(xhci->s3.dev_nt, &xhci->op_regs->dev_notification);
740 xhci_write_64(xhci, xhci->s3.dcbaa_ptr, &xhci->op_regs->dcbaa_ptr);
741 writel(xhci->s3.config_reg, &xhci->op_regs->config_reg);
742 writel(xhci->s3.erst_size, &xhci->ir_set->erst_size);
743 xhci_write_64(xhci, xhci->s3.erst_base, &xhci->ir_set->erst_base);
744 xhci_write_64(xhci, xhci->s3.erst_dequeue, &xhci->ir_set->erst_dequeue);
745 writel(xhci->s3.irq_pending, &xhci->ir_set->irq_pending);
746 writel(xhci->s3.irq_control, &xhci->ir_set->irq_control);
747}
748
749static void xhci_set_cmd_ring_deq(struct xhci_hcd *xhci)
750{
751 u64 val_64;
752
753 /* step 2: initialize command ring buffer */
754 val_64 = xhci_read_64(xhci, &xhci->op_regs->cmd_ring);
755 val_64 = (val_64 & (u64) CMD_RING_RSVD_BITS) |
756 (xhci_trb_virt_to_dma(xhci->cmd_ring->deq_seg,
757 xhci->cmd_ring->dequeue) &
758 (u64) ~CMD_RING_RSVD_BITS) |
759 xhci->cmd_ring->cycle_state;
760 xhci_dbg_trace(xhci, trace_xhci_dbg_init,
761 "// Setting command ring address to 0x%llx",
762 (long unsigned long) val_64);
763 xhci_write_64(xhci, val_64, &xhci->op_regs->cmd_ring);
764}
765
766/*
767 * The whole command ring must be cleared to zero when we suspend the host.
768 *
769 * The host doesn't save the command ring pointer in the suspend well, so we
770 * need to re-program it on resume. Unfortunately, the pointer must be 64-byte
771 * aligned, because of the reserved bits in the command ring dequeue pointer
772 * register. Therefore, we can't just set the dequeue pointer back in the
773 * middle of the ring (TRBs are 16-byte aligned).
774 */
775static void xhci_clear_command_ring(struct xhci_hcd *xhci)
776{
777 struct xhci_ring *ring;
778 struct xhci_segment *seg;
779
780 ring = xhci->cmd_ring;
781 seg = ring->deq_seg;
782 do {
783 memset(seg->trbs, 0,
784 sizeof(union xhci_trb) * (TRBS_PER_SEGMENT - 1));
785 seg->trbs[TRBS_PER_SEGMENT - 1].link.control &=
786 cpu_to_le32(~TRB_CYCLE);
787 seg = seg->next;
788 } while (seg != ring->deq_seg);
789
790 /* Reset the software enqueue and dequeue pointers */
791 ring->deq_seg = ring->first_seg;
792 ring->dequeue = ring->first_seg->trbs;
793 ring->enq_seg = ring->deq_seg;
794 ring->enqueue = ring->dequeue;
795
796 ring->num_trbs_free = ring->num_segs * (TRBS_PER_SEGMENT - 1) - 1;
797 /*
798 * Ring is now zeroed, so the HW should look for change of ownership
799 * when the cycle bit is set to 1.
800 */
801 ring->cycle_state = 1;
802
803 /*
804 * Reset the hardware dequeue pointer.
805 * Yes, this will need to be re-written after resume, but we're paranoid
806 * and want to make sure the hardware doesn't access bogus memory
807 * because, say, the BIOS or an SMI started the host without changing
808 * the command ring pointers.
809 */
810 xhci_set_cmd_ring_deq(xhci);
811}
812
813static void xhci_disable_port_wake_on_bits(struct xhci_hcd *xhci)
814{
815 int port_index;
816 __le32 __iomem **port_array;
817 unsigned long flags;
818 u32 t1, t2;
819
820 spin_lock_irqsave(&xhci->lock, flags);
821
822 /* disable usb3 ports Wake bits */
823 port_index = xhci->num_usb3_ports;
824 port_array = xhci->usb3_ports;
825 while (port_index--) {
826 t1 = readl(port_array[port_index]);
827 t1 = xhci_port_state_to_neutral(t1);
828 t2 = t1 & ~PORT_WAKE_BITS;
829 if (t1 != t2)
830 writel(t2, port_array[port_index]);
831 }
832
833 /* disable usb2 ports Wake bits */
834 port_index = xhci->num_usb2_ports;
835 port_array = xhci->usb2_ports;
836 while (port_index--) {
837 t1 = readl(port_array[port_index]);
838 t1 = xhci_port_state_to_neutral(t1);
839 t2 = t1 & ~PORT_WAKE_BITS;
840 if (t1 != t2)
841 writel(t2, port_array[port_index]);
842 }
843
844 spin_unlock_irqrestore(&xhci->lock, flags);
845}
846
847/*
848 * Stop HC (not bus-specific)
849 *
850 * This is called when the machine transition into S3/S4 mode.
851 *
852 */
853int xhci_suspend(struct xhci_hcd *xhci, bool do_wakeup)
854{
855 int rc = 0;
856 unsigned int delay = XHCI_MAX_HALT_USEC;
857 struct usb_hcd *hcd = xhci_to_hcd(xhci);
858 u32 command;
859
860 if (!hcd->state)
861 return 0;
862
863 if (hcd->state != HC_STATE_SUSPENDED ||
864 xhci->shared_hcd->state != HC_STATE_SUSPENDED)
865 return -EINVAL;
866
867 xhci_dbc_suspend(xhci);
868
869 /* Clear root port wake on bits if wakeup not allowed. */
870 if (!do_wakeup)
871 xhci_disable_port_wake_on_bits(xhci);
872
873 /* Don't poll the roothubs on bus suspend. */
874 xhci_dbg(xhci, "%s: stopping port polling.\n", __func__);
875 clear_bit(HCD_FLAG_POLL_RH, &hcd->flags);
876 del_timer_sync(&hcd->rh_timer);
877 clear_bit(HCD_FLAG_POLL_RH, &xhci->shared_hcd->flags);
878 del_timer_sync(&xhci->shared_hcd->rh_timer);
879
880 if (xhci->quirks & XHCI_SUSPEND_DELAY)
881 usleep_range(1000, 1500);
882
883 spin_lock_irq(&xhci->lock);
884 clear_bit(HCD_FLAG_HW_ACCESSIBLE, &hcd->flags);
885 clear_bit(HCD_FLAG_HW_ACCESSIBLE, &xhci->shared_hcd->flags);
886 /* step 1: stop endpoint */
887 /* skipped assuming that port suspend has done */
888
889 /* step 2: clear Run/Stop bit */
890 command = readl(&xhci->op_regs->command);
891 command &= ~CMD_RUN;
892 writel(command, &xhci->op_regs->command);
893
894 /* Some chips from Fresco Logic need an extraordinary delay */
895 delay *= (xhci->quirks & XHCI_SLOW_SUSPEND) ? 10 : 1;
896
897 if (xhci_handshake(&xhci->op_regs->status,
898 STS_HALT, STS_HALT, delay)) {
899 xhci_warn(xhci, "WARN: xHC CMD_RUN timeout\n");
900 spin_unlock_irq(&xhci->lock);
901 return -ETIMEDOUT;
902 }
903 xhci_clear_command_ring(xhci);
904
905 /* step 3: save registers */
906 xhci_save_registers(xhci);
907
908 /* step 4: set CSS flag */
909 command = readl(&xhci->op_regs->command);
910 command |= CMD_CSS;
911 writel(command, &xhci->op_regs->command);
912 if (xhci_handshake(&xhci->op_regs->status,
913 STS_SAVE, 0, 10 * 1000)) {
914 xhci_warn(xhci, "WARN: xHC save state timeout\n");
915 spin_unlock_irq(&xhci->lock);
916 return -ETIMEDOUT;
917 }
918 spin_unlock_irq(&xhci->lock);
919
920 /*
921 * Deleting Compliance Mode Recovery Timer because the xHCI Host
922 * is about to be suspended.
923 */
924 if ((xhci->quirks & XHCI_COMP_MODE_QUIRK) &&
925 (!(xhci_all_ports_seen_u0(xhci)))) {
926 del_timer_sync(&xhci->comp_mode_recovery_timer);
927 xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
928 "%s: compliance mode recovery timer deleted",
929 __func__);
930 }
931
932 /* step 5: remove core well power */
933 /* synchronize irq when using MSI-X */
934 xhci_msix_sync_irqs(xhci);
935
936 return rc;
937}
938EXPORT_SYMBOL_GPL(xhci_suspend);
939
940/*
941 * start xHC (not bus-specific)
942 *
943 * This is called when the machine transition from S3/S4 mode.
944 *
945 */
946int xhci_resume(struct xhci_hcd *xhci, bool hibernated)
947{
948 u32 command, temp = 0, status;
949 struct usb_hcd *hcd = xhci_to_hcd(xhci);
950 struct usb_hcd *secondary_hcd;
951 int retval = 0;
952 bool comp_timer_running = false;
953
954 if (!hcd->state)
955 return 0;
956
957 /* Wait a bit if either of the roothubs need to settle from the
958 * transition into bus suspend.
959 */
960 if (time_before(jiffies, xhci->bus_state[0].next_statechange) ||
961 time_before(jiffies,
962 xhci->bus_state[1].next_statechange))
963 msleep(100);
964
965 set_bit(HCD_FLAG_HW_ACCESSIBLE, &hcd->flags);
966 set_bit(HCD_FLAG_HW_ACCESSIBLE, &xhci->shared_hcd->flags);
967
968 spin_lock_irq(&xhci->lock);
969 if (xhci->quirks & XHCI_RESET_ON_RESUME)
970 hibernated = true;
971
972 if (!hibernated) {
973 /* step 1: restore register */
974 xhci_restore_registers(xhci);
975 /* step 2: initialize command ring buffer */
976 xhci_set_cmd_ring_deq(xhci);
977 /* step 3: restore state and start state*/
978 /* step 3: set CRS flag */
979 command = readl(&xhci->op_regs->command);
980 command |= CMD_CRS;
981 writel(command, &xhci->op_regs->command);
982 if (xhci_handshake(&xhci->op_regs->status,
983 STS_RESTORE, 0, 10 * 1000)) {
984 xhci_warn(xhci, "WARN: xHC restore state timeout\n");
985 spin_unlock_irq(&xhci->lock);
986 return -ETIMEDOUT;
987 }
988 temp = readl(&xhci->op_regs->status);
989 }
990
991 /* If restore operation fails, re-initialize the HC during resume */
992 if ((temp & STS_SRE) || hibernated) {
993
994 if ((xhci->quirks & XHCI_COMP_MODE_QUIRK) &&
995 !(xhci_all_ports_seen_u0(xhci))) {
996 del_timer_sync(&xhci->comp_mode_recovery_timer);
997 xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
998 "Compliance Mode Recovery Timer deleted!");
999 }
1000
1001 /* Let the USB core know _both_ roothubs lost power. */
1002 usb_root_hub_lost_power(xhci->main_hcd->self.root_hub);
1003 usb_root_hub_lost_power(xhci->shared_hcd->self.root_hub);
1004
1005 xhci_dbg(xhci, "Stop HCD\n");
1006 xhci_halt(xhci);
1007 xhci_reset(xhci);
1008 spin_unlock_irq(&xhci->lock);
1009 xhci_cleanup_msix(xhci);
1010
1011 xhci_dbg(xhci, "// Disabling event ring interrupts\n");
1012 temp = readl(&xhci->op_regs->status);
1013 writel((temp & ~0x1fff) | STS_EINT, &xhci->op_regs->status);
1014 temp = readl(&xhci->ir_set->irq_pending);
1015 writel(ER_IRQ_DISABLE(temp), &xhci->ir_set->irq_pending);
1016
1017 xhci_dbg(xhci, "cleaning up memory\n");
1018 xhci_mem_cleanup(xhci);
1019 xhci_debugfs_exit(xhci);
1020 xhci_dbg(xhci, "xhci_stop completed - status = %x\n",
1021 readl(&xhci->op_regs->status));
1022
1023 /* USB core calls the PCI reinit and start functions twice:
1024 * first with the primary HCD, and then with the secondary HCD.
1025 * If we don't do the same, the host will never be started.
1026 */
1027 if (!usb_hcd_is_primary_hcd(hcd))
1028 secondary_hcd = hcd;
1029 else
1030 secondary_hcd = xhci->shared_hcd;
1031
1032 xhci_dbg(xhci, "Initialize the xhci_hcd\n");
1033 retval = xhci_init(hcd->primary_hcd);
1034 if (retval)
1035 return retval;
1036 comp_timer_running = true;
1037
1038 xhci_dbg(xhci, "Start the primary HCD\n");
1039 retval = xhci_run(hcd->primary_hcd);
1040 if (!retval) {
1041 xhci_dbg(xhci, "Start the secondary HCD\n");
1042 retval = xhci_run(secondary_hcd);
1043 }
1044 hcd->state = HC_STATE_SUSPENDED;
1045 xhci->shared_hcd->state = HC_STATE_SUSPENDED;
1046 goto done;
1047 }
1048
1049 /* step 4: set Run/Stop bit */
1050 command = readl(&xhci->op_regs->command);
1051 command |= CMD_RUN;
1052 writel(command, &xhci->op_regs->command);
1053 xhci_handshake(&xhci->op_regs->status, STS_HALT,
1054 0, 250 * 1000);
1055
1056 /* step 5: walk topology and initialize portsc,
1057 * portpmsc and portli
1058 */
1059 /* this is done in bus_resume */
1060
1061 /* step 6: restart each of the previously
1062 * Running endpoints by ringing their doorbells
1063 */
1064
1065 spin_unlock_irq(&xhci->lock);
1066
1067 xhci_dbc_resume(xhci);
1068
1069 done:
1070 if (retval == 0) {
1071 /* Resume root hubs only when have pending events. */
1072 status = readl(&xhci->op_regs->status);
1073 if (status & STS_EINT) {
1074 usb_hcd_resume_root_hub(xhci->shared_hcd);
1075 usb_hcd_resume_root_hub(hcd);
1076 }
1077 }
1078
1079 /*
1080 * If system is subject to the Quirk, Compliance Mode Timer needs to
1081 * be re-initialized Always after a system resume. Ports are subject
1082 * to suffer the Compliance Mode issue again. It doesn't matter if
1083 * ports have entered previously to U0 before system's suspension.
1084 */
1085 if ((xhci->quirks & XHCI_COMP_MODE_QUIRK) && !comp_timer_running)
1086 compliance_mode_recovery_timer_init(xhci);
1087
1088 if (xhci->quirks & XHCI_ASMEDIA_MODIFY_FLOWCONTROL)
1089 usb_asmedia_modifyflowcontrol(to_pci_dev(hcd->self.controller));
1090
1091 /* Re-enable port polling. */
1092 xhci_dbg(xhci, "%s: starting port polling.\n", __func__);
1093 set_bit(HCD_FLAG_POLL_RH, &xhci->shared_hcd->flags);
1094 usb_hcd_poll_rh_status(xhci->shared_hcd);
1095 set_bit(HCD_FLAG_POLL_RH, &hcd->flags);
1096 usb_hcd_poll_rh_status(hcd);
1097
1098 return retval;
1099}
1100EXPORT_SYMBOL_GPL(xhci_resume);
1101#endif /* CONFIG_PM */
1102
1103/*-------------------------------------------------------------------------*/
1104
1105/**
1106 * xhci_get_endpoint_index - Used for passing endpoint bitmasks between the core and
1107 * HCDs. Find the index for an endpoint given its descriptor. Use the return
1108 * value to right shift 1 for the bitmask.
1109 *
1110 * Index = (epnum * 2) + direction - 1,
1111 * where direction = 0 for OUT, 1 for IN.
1112 * For control endpoints, the IN index is used (OUT index is unused), so
1113 * index = (epnum * 2) + direction - 1 = (epnum * 2) + 1 - 1 = (epnum * 2)
1114 */
1115unsigned int xhci_get_endpoint_index(struct usb_endpoint_descriptor *desc)
1116{
1117 unsigned int index;
1118 if (usb_endpoint_xfer_control(desc))
1119 index = (unsigned int) (usb_endpoint_num(desc)*2);
1120 else
1121 index = (unsigned int) (usb_endpoint_num(desc)*2) +
1122 (usb_endpoint_dir_in(desc) ? 1 : 0) - 1;
1123 return index;
1124}
1125
1126/* The reverse operation to xhci_get_endpoint_index. Calculate the USB endpoint
1127 * address from the XHCI endpoint index.
1128 */
1129unsigned int xhci_get_endpoint_address(unsigned int ep_index)
1130{
1131 unsigned int number = DIV_ROUND_UP(ep_index, 2);
1132 unsigned int direction = ep_index % 2 ? USB_DIR_OUT : USB_DIR_IN;
1133 return direction | number;
1134}
1135
1136/* Find the flag for this endpoint (for use in the control context). Use the
1137 * endpoint index to create a bitmask. The slot context is bit 0, endpoint 0 is
1138 * bit 1, etc.
1139 */
1140static unsigned int xhci_get_endpoint_flag(struct usb_endpoint_descriptor *desc)
1141{
1142 return 1 << (xhci_get_endpoint_index(desc) + 1);
1143}
1144
1145/* Find the flag for this endpoint (for use in the control context). Use the
1146 * endpoint index to create a bitmask. The slot context is bit 0, endpoint 0 is
1147 * bit 1, etc.
1148 */
1149static unsigned int xhci_get_endpoint_flag_from_index(unsigned int ep_index)
1150{
1151 return 1 << (ep_index + 1);
1152}
1153
1154/* Compute the last valid endpoint context index. Basically, this is the
1155 * endpoint index plus one. For slot contexts with more than valid endpoint,
1156 * we find the most significant bit set in the added contexts flags.
1157 * e.g. ep 1 IN (with epnum 0x81) => added_ctxs = 0b1000
1158 * fls(0b1000) = 4, but the endpoint context index is 3, so subtract one.
1159 */
1160unsigned int xhci_last_valid_endpoint(u32 added_ctxs)
1161{
1162 return fls(added_ctxs) - 1;
1163}
1164
1165/* Returns 1 if the arguments are OK;
1166 * returns 0 this is a root hub; returns -EINVAL for NULL pointers.
1167 */
1168static int xhci_check_args(struct usb_hcd *hcd, struct usb_device *udev,
1169 struct usb_host_endpoint *ep, int check_ep, bool check_virt_dev,
1170 const char *func) {
1171 struct xhci_hcd *xhci;
1172 struct xhci_virt_device *virt_dev;
1173
1174 if (!hcd || (check_ep && !ep) || !udev) {
1175 pr_debug("xHCI %s called with invalid args\n", func);
1176 return -EINVAL;
1177 }
1178 if (!udev->parent) {
1179 pr_debug("xHCI %s called for root hub\n", func);
1180 return 0;
1181 }
1182
1183 xhci = hcd_to_xhci(hcd);
1184 if (check_virt_dev) {
1185 if (!udev->slot_id || !xhci->devs[udev->slot_id]) {
1186 xhci_dbg(xhci, "xHCI %s called with unaddressed device\n",
1187 func);
1188 return -EINVAL;
1189 }
1190
1191 virt_dev = xhci->devs[udev->slot_id];
1192 if (virt_dev->udev != udev) {
1193 xhci_dbg(xhci, "xHCI %s called with udev and "
1194 "virt_dev does not match\n", func);
1195 return -EINVAL;
1196 }
1197 }
1198
1199 if (xhci->xhc_state & XHCI_STATE_HALTED)
1200 return -ENODEV;
1201
1202 return 1;
1203}
1204
1205static int xhci_configure_endpoint(struct xhci_hcd *xhci,
1206 struct usb_device *udev, struct xhci_command *command,
1207 bool ctx_change, bool must_succeed);
1208
1209/*
1210 * Full speed devices may have a max packet size greater than 8 bytes, but the
1211 * USB core doesn't know that until it reads the first 8 bytes of the
1212 * descriptor. If the usb_device's max packet size changes after that point,
1213 * we need to issue an evaluate context command and wait on it.
1214 */
1215static int xhci_check_maxpacket(struct xhci_hcd *xhci, unsigned int slot_id,
1216 unsigned int ep_index, struct urb *urb)
1217{
1218 struct xhci_container_ctx *out_ctx;
1219 struct xhci_input_control_ctx *ctrl_ctx;
1220 struct xhci_ep_ctx *ep_ctx;
1221 struct xhci_command *command;
1222 int max_packet_size;
1223 int hw_max_packet_size;
1224 int ret = 0;
1225
1226 out_ctx = xhci->devs[slot_id]->out_ctx;
1227 ep_ctx = xhci_get_ep_ctx(xhci, out_ctx, ep_index);
1228 hw_max_packet_size = MAX_PACKET_DECODED(le32_to_cpu(ep_ctx->ep_info2));
1229 max_packet_size = usb_endpoint_maxp(&urb->dev->ep0.desc);
1230 if (hw_max_packet_size != max_packet_size) {
1231 xhci_dbg_trace(xhci, trace_xhci_dbg_context_change,
1232 "Max Packet Size for ep 0 changed.");
1233 xhci_dbg_trace(xhci, trace_xhci_dbg_context_change,
1234 "Max packet size in usb_device = %d",
1235 max_packet_size);
1236 xhci_dbg_trace(xhci, trace_xhci_dbg_context_change,
1237 "Max packet size in xHCI HW = %d",
1238 hw_max_packet_size);
1239 xhci_dbg_trace(xhci, trace_xhci_dbg_context_change,
1240 "Issuing evaluate context command.");
1241
1242 /* Set up the input context flags for the command */
1243 /* FIXME: This won't work if a non-default control endpoint
1244 * changes max packet sizes.
1245 */
1246
1247 command = xhci_alloc_command(xhci, true, GFP_KERNEL);
1248 if (!command)
1249 return -ENOMEM;
1250
1251 command->in_ctx = xhci->devs[slot_id]->in_ctx;
1252 ctrl_ctx = xhci_get_input_control_ctx(command->in_ctx);
1253 if (!ctrl_ctx) {
1254 xhci_warn(xhci, "%s: Could not get input context, bad type.\n",
1255 __func__);
1256 ret = -ENOMEM;
1257 goto command_cleanup;
1258 }
1259 /* Set up the modified control endpoint 0 */
1260 xhci_endpoint_copy(xhci, xhci->devs[slot_id]->in_ctx,
1261 xhci->devs[slot_id]->out_ctx, ep_index);
1262
1263 ep_ctx = xhci_get_ep_ctx(xhci, command->in_ctx, ep_index);
1264 ep_ctx->ep_info2 &= cpu_to_le32(~MAX_PACKET_MASK);
1265 ep_ctx->ep_info2 |= cpu_to_le32(MAX_PACKET(max_packet_size));
1266
1267 ctrl_ctx->add_flags = cpu_to_le32(EP0_FLAG);
1268 ctrl_ctx->drop_flags = 0;
1269
1270 ret = xhci_configure_endpoint(xhci, urb->dev, command,
1271 true, false);
1272
1273 /* Clean up the input context for later use by bandwidth
1274 * functions.
1275 */
1276 ctrl_ctx->add_flags = cpu_to_le32(SLOT_FLAG);
1277command_cleanup:
1278 kfree(command->completion);
1279 kfree(command);
1280 }
1281 return ret;
1282}
1283
1284/*
1285 * non-error returns are a promise to giveback() the urb later
1286 * we drop ownership so next owner (or urb unlink) can get it
1287 */
1288static int xhci_urb_enqueue(struct usb_hcd *hcd, struct urb *urb, gfp_t mem_flags)
1289{
1290 struct xhci_hcd *xhci = hcd_to_xhci(hcd);
1291 unsigned long flags;
1292 int ret = 0;
1293 unsigned int slot_id, ep_index;
1294 unsigned int *ep_state;
1295 struct urb_priv *urb_priv;
1296 int num_tds;
1297
1298 if (!urb || xhci_check_args(hcd, urb->dev, urb->ep,
1299 true, true, __func__) <= 0)
1300 return -EINVAL;
1301
1302 slot_id = urb->dev->slot_id;
1303 ep_index = xhci_get_endpoint_index(&urb->ep->desc);
1304 ep_state = &xhci->devs[slot_id]->eps[ep_index].ep_state;
1305
1306 if (!HCD_HW_ACCESSIBLE(hcd)) {
1307 if (!in_interrupt())
1308 xhci_dbg(xhci, "urb submitted during PCI suspend\n");
1309 return -ESHUTDOWN;
1310 }
1311
1312 if (usb_endpoint_xfer_isoc(&urb->ep->desc))
1313 num_tds = urb->number_of_packets;
1314 else if (usb_endpoint_is_bulk_out(&urb->ep->desc) &&
1315 urb->transfer_buffer_length > 0 &&
1316 urb->transfer_flags & URB_ZERO_PACKET &&
1317 !(urb->transfer_buffer_length % usb_endpoint_maxp(&urb->ep->desc)))
1318 num_tds = 2;
1319 else
1320 num_tds = 1;
1321
1322 urb_priv = kzalloc(sizeof(struct urb_priv) +
1323 num_tds * sizeof(struct xhci_td), mem_flags);
1324 if (!urb_priv)
1325 return -ENOMEM;
1326
1327 urb_priv->num_tds = num_tds;
1328 urb_priv->num_tds_done = 0;
1329 urb->hcpriv = urb_priv;
1330
1331 trace_xhci_urb_enqueue(urb);
1332
1333 if (usb_endpoint_xfer_control(&urb->ep->desc)) {
1334 /* Check to see if the max packet size for the default control
1335 * endpoint changed during FS device enumeration
1336 */
1337 if (urb->dev->speed == USB_SPEED_FULL) {
1338 ret = xhci_check_maxpacket(xhci, slot_id,
1339 ep_index, urb);
1340 if (ret < 0) {
1341 xhci_urb_free_priv(urb_priv);
1342 urb->hcpriv = NULL;
1343 return ret;
1344 }
1345 }
1346 }
1347
1348 spin_lock_irqsave(&xhci->lock, flags);
1349
1350 if (xhci->xhc_state & XHCI_STATE_DYING) {
1351 xhci_dbg(xhci, "Ep 0x%x: URB %p submitted for non-responsive xHCI host.\n",
1352 urb->ep->desc.bEndpointAddress, urb);
1353 ret = -ESHUTDOWN;
1354 goto free_priv;
1355 }
1356 if (*ep_state & (EP_GETTING_STREAMS | EP_GETTING_NO_STREAMS)) {
1357 xhci_warn(xhci, "WARN: Can't enqueue URB, ep in streams transition state %x\n",
1358 *ep_state);
1359 ret = -EINVAL;
1360 goto free_priv;
1361 }
1362 if (*ep_state & EP_SOFT_CLEAR_TOGGLE) {
1363 xhci_warn(xhci, "Can't enqueue URB while manually clearing toggle\n");
1364 ret = -EINVAL;
1365 goto free_priv;
1366 }
1367
1368 switch (usb_endpoint_type(&urb->ep->desc)) {
1369
1370 case USB_ENDPOINT_XFER_CONTROL:
1371 ret = xhci_queue_ctrl_tx(xhci, GFP_ATOMIC, urb,
1372 slot_id, ep_index);
1373 break;
1374 case USB_ENDPOINT_XFER_BULK:
1375 ret = xhci_queue_bulk_tx(xhci, GFP_ATOMIC, urb,
1376 slot_id, ep_index);
1377 break;
1378 case USB_ENDPOINT_XFER_INT:
1379 ret = xhci_queue_intr_tx(xhci, GFP_ATOMIC, urb,
1380 slot_id, ep_index);
1381 break;
1382 case USB_ENDPOINT_XFER_ISOC:
1383 ret = xhci_queue_isoc_tx_prepare(xhci, GFP_ATOMIC, urb,
1384 slot_id, ep_index);
1385 }
1386
1387 if (ret) {
1388free_priv:
1389 xhci_urb_free_priv(urb_priv);
1390 urb->hcpriv = NULL;
1391 }
1392 spin_unlock_irqrestore(&xhci->lock, flags);
1393 return ret;
1394}
1395
1396/*
1397 * Remove the URB's TD from the endpoint ring. This may cause the HC to stop
1398 * USB transfers, potentially stopping in the middle of a TRB buffer. The HC
1399 * should pick up where it left off in the TD, unless a Set Transfer Ring
1400 * Dequeue Pointer is issued.
1401 *
1402 * The TRBs that make up the buffers for the canceled URB will be "removed" from
1403 * the ring. Since the ring is a contiguous structure, they can't be physically
1404 * removed. Instead, there are two options:
1405 *
1406 * 1) If the HC is in the middle of processing the URB to be canceled, we
1407 * simply move the ring's dequeue pointer past those TRBs using the Set
1408 * Transfer Ring Dequeue Pointer command. This will be the common case,
1409 * when drivers timeout on the last submitted URB and attempt to cancel.
1410 *
1411 * 2) If the HC is in the middle of a different TD, we turn the TRBs into a
1412 * series of 1-TRB transfer no-op TDs. (No-ops shouldn't be chained.) The
1413 * HC will need to invalidate the any TRBs it has cached after the stop
1414 * endpoint command, as noted in the xHCI 0.95 errata.
1415 *
1416 * 3) The TD may have completed by the time the Stop Endpoint Command
1417 * completes, so software needs to handle that case too.
1418 *
1419 * This function should protect against the TD enqueueing code ringing the
1420 * doorbell while this code is waiting for a Stop Endpoint command to complete.
1421 * It also needs to account for multiple cancellations on happening at the same
1422 * time for the same endpoint.
1423 *
1424 * Note that this function can be called in any context, or so says
1425 * usb_hcd_unlink_urb()
1426 */
1427static int xhci_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status)
1428{
1429 unsigned long flags;
1430 int ret, i;
1431 u32 temp;
1432 struct xhci_hcd *xhci;
1433 struct urb_priv *urb_priv;
1434 struct xhci_td *td;
1435 unsigned int ep_index;
1436 struct xhci_ring *ep_ring;
1437 struct xhci_virt_ep *ep;
1438 struct xhci_command *command;
1439 struct xhci_virt_device *vdev;
1440
1441 xhci = hcd_to_xhci(hcd);
1442 spin_lock_irqsave(&xhci->lock, flags);
1443
1444 trace_xhci_urb_dequeue(urb);
1445
1446 /* Make sure the URB hasn't completed or been unlinked already */
1447 ret = usb_hcd_check_unlink_urb(hcd, urb, status);
1448 if (ret)
1449 goto done;
1450
1451 /* give back URB now if we can't queue it for cancel */
1452 vdev = xhci->devs[urb->dev->slot_id];
1453 urb_priv = urb->hcpriv;
1454 if (!vdev || !urb_priv)
1455 goto err_giveback;
1456
1457 ep_index = xhci_get_endpoint_index(&urb->ep->desc);
1458 ep = &vdev->eps[ep_index];
1459 ep_ring = xhci_urb_to_transfer_ring(xhci, urb);
1460 if (!ep || !ep_ring)
1461 goto err_giveback;
1462
1463 /* If xHC is dead take it down and return ALL URBs in xhci_hc_died() */
1464 temp = readl(&xhci->op_regs->status);
1465 if (temp == ~(u32)0 || xhci->xhc_state & XHCI_STATE_DYING) {
1466 xhci_hc_died(xhci);
1467 goto done;
1468 }
1469
1470 if (xhci->xhc_state & XHCI_STATE_HALTED) {
1471 xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb,
1472 "HC halted, freeing TD manually.");
1473 for (i = urb_priv->num_tds_done;
1474 i < urb_priv->num_tds;
1475 i++) {
1476 td = &urb_priv->td[i];
1477 if (!list_empty(&td->td_list))
1478 list_del_init(&td->td_list);
1479 if (!list_empty(&td->cancelled_td_list))
1480 list_del_init(&td->cancelled_td_list);
1481 }
1482 goto err_giveback;
1483 }
1484
1485 i = urb_priv->num_tds_done;
1486 if (i < urb_priv->num_tds)
1487 xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb,
1488 "Cancel URB %p, dev %s, ep 0x%x, "
1489 "starting at offset 0x%llx",
1490 urb, urb->dev->devpath,
1491 urb->ep->desc.bEndpointAddress,
1492 (unsigned long long) xhci_trb_virt_to_dma(
1493 urb_priv->td[i].start_seg,
1494 urb_priv->td[i].first_trb));
1495
1496 for (; i < urb_priv->num_tds; i++) {
1497 td = &urb_priv->td[i];
1498 list_add_tail(&td->cancelled_td_list, &ep->cancelled_td_list);
1499 }
1500
1501 /* Queue a stop endpoint command, but only if this is
1502 * the first cancellation to be handled.
1503 */
1504 if (!(ep->ep_state & EP_STOP_CMD_PENDING)) {
1505 command = xhci_alloc_command(xhci, false, GFP_ATOMIC);
1506 if (!command) {
1507 ret = -ENOMEM;
1508 goto done;
1509 }
1510 ep->ep_state |= EP_STOP_CMD_PENDING;
1511 ep->stop_cmd_timer.expires = jiffies +
1512 XHCI_STOP_EP_CMD_TIMEOUT * HZ;
1513 add_timer(&ep->stop_cmd_timer);
1514 xhci_queue_stop_endpoint(xhci, command, urb->dev->slot_id,
1515 ep_index, 0);
1516 xhci_ring_cmd_db(xhci);
1517 }
1518done:
1519 spin_unlock_irqrestore(&xhci->lock, flags);
1520 return ret;
1521
1522err_giveback:
1523 if (urb_priv)
1524 xhci_urb_free_priv(urb_priv);
1525 usb_hcd_unlink_urb_from_ep(hcd, urb);
1526 spin_unlock_irqrestore(&xhci->lock, flags);
1527 usb_hcd_giveback_urb(hcd, urb, -ESHUTDOWN);
1528 return ret;
1529}
1530
1531/* Drop an endpoint from a new bandwidth configuration for this device.
1532 * Only one call to this function is allowed per endpoint before
1533 * check_bandwidth() or reset_bandwidth() must be called.
1534 * A call to xhci_drop_endpoint() followed by a call to xhci_add_endpoint() will
1535 * add the endpoint to the schedule with possibly new parameters denoted by a
1536 * different endpoint descriptor in usb_host_endpoint.
1537 * A call to xhci_add_endpoint() followed by a call to xhci_drop_endpoint() is
1538 * not allowed.
1539 *
1540 * The USB core will not allow URBs to be queued to an endpoint that is being
1541 * disabled, so there's no need for mutual exclusion to protect
1542 * the xhci->devs[slot_id] structure.
1543 */
1544static int xhci_drop_endpoint(struct usb_hcd *hcd, struct usb_device *udev,
1545 struct usb_host_endpoint *ep)
1546{
1547 struct xhci_hcd *xhci;
1548 struct xhci_container_ctx *in_ctx, *out_ctx;
1549 struct xhci_input_control_ctx *ctrl_ctx;
1550 unsigned int ep_index;
1551 struct xhci_ep_ctx *ep_ctx;
1552 u32 drop_flag;
1553 u32 new_add_flags, new_drop_flags;
1554 int ret;
1555
1556 ret = xhci_check_args(hcd, udev, ep, 1, true, __func__);
1557 if (ret <= 0)
1558 return ret;
1559 xhci = hcd_to_xhci(hcd);
1560 if (xhci->xhc_state & XHCI_STATE_DYING)
1561 return -ENODEV;
1562
1563 xhci_dbg(xhci, "%s called for udev %p\n", __func__, udev);
1564 drop_flag = xhci_get_endpoint_flag(&ep->desc);
1565 if (drop_flag == SLOT_FLAG || drop_flag == EP0_FLAG) {
1566 xhci_dbg(xhci, "xHCI %s - can't drop slot or ep 0 %#x\n",
1567 __func__, drop_flag);
1568 return 0;
1569 }
1570
1571 in_ctx = xhci->devs[udev->slot_id]->in_ctx;
1572 out_ctx = xhci->devs[udev->slot_id]->out_ctx;
1573 ctrl_ctx = xhci_get_input_control_ctx(in_ctx);
1574 if (!ctrl_ctx) {
1575 xhci_warn(xhci, "%s: Could not get input context, bad type.\n",
1576 __func__);
1577 return 0;
1578 }
1579
1580 ep_index = xhci_get_endpoint_index(&ep->desc);
1581 ep_ctx = xhci_get_ep_ctx(xhci, out_ctx, ep_index);
1582 /* If the HC already knows the endpoint is disabled,
1583 * or the HCD has noted it is disabled, ignore this request
1584 */
1585 if ((GET_EP_CTX_STATE(ep_ctx) == EP_STATE_DISABLED) ||
1586 le32_to_cpu(ctrl_ctx->drop_flags) &
1587 xhci_get_endpoint_flag(&ep->desc)) {
1588 /* Do not warn when called after a usb_device_reset */
1589 if (xhci->devs[udev->slot_id]->eps[ep_index].ring != NULL)
1590 xhci_warn(xhci, "xHCI %s called with disabled ep %p\n",
1591 __func__, ep);
1592 return 0;
1593 }
1594
1595 ctrl_ctx->drop_flags |= cpu_to_le32(drop_flag);
1596 new_drop_flags = le32_to_cpu(ctrl_ctx->drop_flags);
1597
1598 ctrl_ctx->add_flags &= cpu_to_le32(~drop_flag);
1599 new_add_flags = le32_to_cpu(ctrl_ctx->add_flags);
1600
1601 xhci_debugfs_remove_endpoint(xhci, xhci->devs[udev->slot_id], ep_index);
1602
1603 xhci_endpoint_zero(xhci, xhci->devs[udev->slot_id], ep);
1604
1605 if (xhci->quirks & XHCI_MTK_HOST)
1606 xhci_mtk_drop_ep_quirk(hcd, udev, ep);
1607
1608 xhci_dbg(xhci, "drop ep 0x%x, slot id %d, new drop flags = %#x, new add flags = %#x\n",
1609 (unsigned int) ep->desc.bEndpointAddress,
1610 udev->slot_id,
1611 (unsigned int) new_drop_flags,
1612 (unsigned int) new_add_flags);
1613 return 0;
1614}
1615
1616/* Add an endpoint to a new possible bandwidth configuration for this device.
1617 * Only one call to this function is allowed per endpoint before
1618 * check_bandwidth() or reset_bandwidth() must be called.
1619 * A call to xhci_drop_endpoint() followed by a call to xhci_add_endpoint() will
1620 * add the endpoint to the schedule with possibly new parameters denoted by a
1621 * different endpoint descriptor in usb_host_endpoint.
1622 * A call to xhci_add_endpoint() followed by a call to xhci_drop_endpoint() is
1623 * not allowed.
1624 *
1625 * The USB core will not allow URBs to be queued to an endpoint until the
1626 * configuration or alt setting is installed in the device, so there's no need
1627 * for mutual exclusion to protect the xhci->devs[slot_id] structure.
1628 */
1629static int xhci_add_endpoint(struct usb_hcd *hcd, struct usb_device *udev,
1630 struct usb_host_endpoint *ep)
1631{
1632 struct xhci_hcd *xhci;
1633 struct xhci_container_ctx *in_ctx;
1634 unsigned int ep_index;
1635 struct xhci_input_control_ctx *ctrl_ctx;
1636 u32 added_ctxs;
1637 u32 new_add_flags, new_drop_flags;
1638 struct xhci_virt_device *virt_dev;
1639 int ret = 0;
1640
1641 ret = xhci_check_args(hcd, udev, ep, 1, true, __func__);
1642 if (ret <= 0) {
1643 /* So we won't queue a reset ep command for a root hub */
1644 ep->hcpriv = NULL;
1645 return ret;
1646 }
1647 xhci = hcd_to_xhci(hcd);
1648 if (xhci->xhc_state & XHCI_STATE_DYING)
1649 return -ENODEV;
1650
1651 added_ctxs = xhci_get_endpoint_flag(&ep->desc);
1652 if (added_ctxs == SLOT_FLAG || added_ctxs == EP0_FLAG) {
1653 /* FIXME when we have to issue an evaluate endpoint command to
1654 * deal with ep0 max packet size changing once we get the
1655 * descriptors
1656 */
1657 xhci_dbg(xhci, "xHCI %s - can't add slot or ep 0 %#x\n",
1658 __func__, added_ctxs);
1659 return 0;
1660 }
1661
1662 virt_dev = xhci->devs[udev->slot_id];
1663 in_ctx = virt_dev->in_ctx;
1664 ctrl_ctx = xhci_get_input_control_ctx(in_ctx);
1665 if (!ctrl_ctx) {
1666 xhci_warn(xhci, "%s: Could not get input context, bad type.\n",
1667 __func__);
1668 return 0;
1669 }
1670
1671 ep_index = xhci_get_endpoint_index(&ep->desc);
1672 /* If this endpoint is already in use, and the upper layers are trying
1673 * to add it again without dropping it, reject the addition.
1674 */
1675 if (virt_dev->eps[ep_index].ring &&
1676 !(le32_to_cpu(ctrl_ctx->drop_flags) & added_ctxs)) {
1677 xhci_warn(xhci, "Trying to add endpoint 0x%x "
1678 "without dropping it.\n",
1679 (unsigned int) ep->desc.bEndpointAddress);
1680 return -EINVAL;
1681 }
1682
1683 /* If the HCD has already noted the endpoint is enabled,
1684 * ignore this request.
1685 */
1686 if (le32_to_cpu(ctrl_ctx->add_flags) & added_ctxs) {
1687 xhci_warn(xhci, "xHCI %s called with enabled ep %p\n",
1688 __func__, ep);
1689 return 0;
1690 }
1691
1692 /*
1693 * Configuration and alternate setting changes must be done in
1694 * process context, not interrupt context (or so documenation
1695 * for usb_set_interface() and usb_set_configuration() claim).
1696 */
1697 if (xhci_endpoint_init(xhci, virt_dev, udev, ep, GFP_NOIO) < 0) {
1698 dev_dbg(&udev->dev, "%s - could not initialize ep %#x\n",
1699 __func__, ep->desc.bEndpointAddress);
1700 return -ENOMEM;
1701 }
1702
1703 if (xhci->quirks & XHCI_MTK_HOST) {
1704 ret = xhci_mtk_add_ep_quirk(hcd, udev, ep);
1705 if (ret < 0) {
1706 xhci_ring_free(xhci, virt_dev->eps[ep_index].new_ring);
1707 virt_dev->eps[ep_index].new_ring = NULL;
1708 return ret;
1709 }
1710 }
1711
1712 ctrl_ctx->add_flags |= cpu_to_le32(added_ctxs);
1713 new_add_flags = le32_to_cpu(ctrl_ctx->add_flags);
1714
1715 /* If xhci_endpoint_disable() was called for this endpoint, but the
1716 * xHC hasn't been notified yet through the check_bandwidth() call,
1717 * this re-adds a new state for the endpoint from the new endpoint
1718 * descriptors. We must drop and re-add this endpoint, so we leave the
1719 * drop flags alone.
1720 */
1721 new_drop_flags = le32_to_cpu(ctrl_ctx->drop_flags);
1722
1723 /* Store the usb_device pointer for later use */
1724 ep->hcpriv = udev;
1725
1726 xhci_debugfs_create_endpoint(xhci, virt_dev, ep_index);
1727
1728 xhci_dbg(xhci, "add ep 0x%x, slot id %d, new drop flags = %#x, new add flags = %#x\n",
1729 (unsigned int) ep->desc.bEndpointAddress,
1730 udev->slot_id,
1731 (unsigned int) new_drop_flags,
1732 (unsigned int) new_add_flags);
1733 return 0;
1734}
1735
1736static void xhci_zero_in_ctx(struct xhci_hcd *xhci, struct xhci_virt_device *virt_dev)
1737{
1738 struct xhci_input_control_ctx *ctrl_ctx;
1739 struct xhci_ep_ctx *ep_ctx;
1740 struct xhci_slot_ctx *slot_ctx;
1741 int i;
1742
1743 ctrl_ctx = xhci_get_input_control_ctx(virt_dev->in_ctx);
1744 if (!ctrl_ctx) {
1745 xhci_warn(xhci, "%s: Could not get input context, bad type.\n",
1746 __func__);
1747 return;
1748 }
1749
1750 /* When a device's add flag and drop flag are zero, any subsequent
1751 * configure endpoint command will leave that endpoint's state
1752 * untouched. Make sure we don't leave any old state in the input
1753 * endpoint contexts.
1754 */
1755 ctrl_ctx->drop_flags = 0;
1756 ctrl_ctx->add_flags = 0;
1757 slot_ctx = xhci_get_slot_ctx(xhci, virt_dev->in_ctx);
1758 slot_ctx->dev_info &= cpu_to_le32(~LAST_CTX_MASK);
1759 /* Endpoint 0 is always valid */
1760 slot_ctx->dev_info |= cpu_to_le32(LAST_CTX(1));
1761 for (i = 1; i < 31; i++) {
1762 ep_ctx = xhci_get_ep_ctx(xhci, virt_dev->in_ctx, i);
1763 ep_ctx->ep_info = 0;
1764 ep_ctx->ep_info2 = 0;
1765 ep_ctx->deq = 0;
1766 ep_ctx->tx_info = 0;
1767 }
1768}
1769
1770static int xhci_configure_endpoint_result(struct xhci_hcd *xhci,
1771 struct usb_device *udev, u32 *cmd_status)
1772{
1773 int ret;
1774
1775 switch (*cmd_status) {
1776 case COMP_COMMAND_ABORTED:
1777 case COMP_COMMAND_RING_STOPPED:
1778 xhci_warn(xhci, "Timeout while waiting for configure endpoint command\n");
1779 ret = -ETIME;
1780 break;
1781 case COMP_RESOURCE_ERROR:
1782 dev_warn(&udev->dev,
1783 "Not enough host controller resources for new device state.\n");
1784 ret = -ENOMEM;
1785 /* FIXME: can we allocate more resources for the HC? */
1786 break;
1787 case COMP_BANDWIDTH_ERROR:
1788 case COMP_SECONDARY_BANDWIDTH_ERROR:
1789 dev_warn(&udev->dev,
1790 "Not enough bandwidth for new device state.\n");
1791 ret = -ENOSPC;
1792 /* FIXME: can we go back to the old state? */
1793 break;
1794 case COMP_TRB_ERROR:
1795 /* the HCD set up something wrong */
1796 dev_warn(&udev->dev, "ERROR: Endpoint drop flag = 0, "
1797 "add flag = 1, "
1798 "and endpoint is not disabled.\n");
1799 ret = -EINVAL;
1800 break;
1801 case COMP_INCOMPATIBLE_DEVICE_ERROR:
1802 dev_warn(&udev->dev,
1803 "ERROR: Incompatible device for endpoint configure command.\n");
1804 ret = -ENODEV;
1805 break;
1806 case COMP_SUCCESS:
1807 xhci_dbg_trace(xhci, trace_xhci_dbg_context_change,
1808 "Successful Endpoint Configure command");
1809 ret = 0;
1810 break;
1811 default:
1812 xhci_err(xhci, "ERROR: unexpected command completion code 0x%x.\n",
1813 *cmd_status);
1814 ret = -EINVAL;
1815 break;
1816 }
1817 return ret;
1818}
1819
1820static int xhci_evaluate_context_result(struct xhci_hcd *xhci,
1821 struct usb_device *udev, u32 *cmd_status)
1822{
1823 int ret;
1824
1825 switch (*cmd_status) {
1826 case COMP_COMMAND_ABORTED:
1827 case COMP_COMMAND_RING_STOPPED:
1828 xhci_warn(xhci, "Timeout while waiting for evaluate context command\n");
1829 ret = -ETIME;
1830 break;
1831 case COMP_PARAMETER_ERROR:
1832 dev_warn(&udev->dev,
1833 "WARN: xHCI driver setup invalid evaluate context command.\n");
1834 ret = -EINVAL;
1835 break;
1836 case COMP_SLOT_NOT_ENABLED_ERROR:
1837 dev_warn(&udev->dev,
1838 "WARN: slot not enabled for evaluate context command.\n");
1839 ret = -EINVAL;
1840 break;
1841 case COMP_CONTEXT_STATE_ERROR:
1842 dev_warn(&udev->dev,
1843 "WARN: invalid context state for evaluate context command.\n");
1844 ret = -EINVAL;
1845 break;
1846 case COMP_INCOMPATIBLE_DEVICE_ERROR:
1847 dev_warn(&udev->dev,
1848 "ERROR: Incompatible device for evaluate context command.\n");
1849 ret = -ENODEV;
1850 break;
1851 case COMP_MAX_EXIT_LATENCY_TOO_LARGE_ERROR:
1852 /* Max Exit Latency too large error */
1853 dev_warn(&udev->dev, "WARN: Max Exit Latency too large\n");
1854 ret = -EINVAL;
1855 break;
1856 case COMP_SUCCESS:
1857 xhci_dbg_trace(xhci, trace_xhci_dbg_context_change,
1858 "Successful evaluate context command");
1859 ret = 0;
1860 break;
1861 default:
1862 xhci_err(xhci, "ERROR: unexpected command completion code 0x%x.\n",
1863 *cmd_status);
1864 ret = -EINVAL;
1865 break;
1866 }
1867 return ret;
1868}
1869
1870static u32 xhci_count_num_new_endpoints(struct xhci_hcd *xhci,
1871 struct xhci_input_control_ctx *ctrl_ctx)
1872{
1873 u32 valid_add_flags;
1874 u32 valid_drop_flags;
1875
1876 /* Ignore the slot flag (bit 0), and the default control endpoint flag
1877 * (bit 1). The default control endpoint is added during the Address
1878 * Device command and is never removed until the slot is disabled.
1879 */
1880 valid_add_flags = le32_to_cpu(ctrl_ctx->add_flags) >> 2;
1881 valid_drop_flags = le32_to_cpu(ctrl_ctx->drop_flags) >> 2;
1882
1883 /* Use hweight32 to count the number of ones in the add flags, or
1884 * number of endpoints added. Don't count endpoints that are changed
1885 * (both added and dropped).
1886 */
1887 return hweight32(valid_add_flags) -
1888 hweight32(valid_add_flags & valid_drop_flags);
1889}
1890
1891static unsigned int xhci_count_num_dropped_endpoints(struct xhci_hcd *xhci,
1892 struct xhci_input_control_ctx *ctrl_ctx)
1893{
1894 u32 valid_add_flags;
1895 u32 valid_drop_flags;
1896
1897 valid_add_flags = le32_to_cpu(ctrl_ctx->add_flags) >> 2;
1898 valid_drop_flags = le32_to_cpu(ctrl_ctx->drop_flags) >> 2;
1899
1900 return hweight32(valid_drop_flags) -
1901 hweight32(valid_add_flags & valid_drop_flags);
1902}
1903
1904/*
1905 * We need to reserve the new number of endpoints before the configure endpoint
1906 * command completes. We can't subtract the dropped endpoints from the number
1907 * of active endpoints until the command completes because we can oversubscribe
1908 * the host in this case:
1909 *
1910 * - the first configure endpoint command drops more endpoints than it adds
1911 * - a second configure endpoint command that adds more endpoints is queued
1912 * - the first configure endpoint command fails, so the config is unchanged
1913 * - the second command may succeed, even though there isn't enough resources
1914 *
1915 * Must be called with xhci->lock held.
1916 */
1917static int xhci_reserve_host_resources(struct xhci_hcd *xhci,
1918 struct xhci_input_control_ctx *ctrl_ctx)
1919{
1920 u32 added_eps;
1921
1922 added_eps = xhci_count_num_new_endpoints(xhci, ctrl_ctx);
1923 if (xhci->num_active_eps + added_eps > xhci->limit_active_eps) {
1924 xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
1925 "Not enough ep ctxs: "
1926 "%u active, need to add %u, limit is %u.",
1927 xhci->num_active_eps, added_eps,
1928 xhci->limit_active_eps);
1929 return -ENOMEM;
1930 }
1931 xhci->num_active_eps += added_eps;
1932 xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
1933 "Adding %u ep ctxs, %u now active.", added_eps,
1934 xhci->num_active_eps);
1935 return 0;
1936}
1937
1938/*
1939 * The configure endpoint was failed by the xHC for some other reason, so we
1940 * need to revert the resources that failed configuration would have used.
1941 *
1942 * Must be called with xhci->lock held.
1943 */
1944static void xhci_free_host_resources(struct xhci_hcd *xhci,
1945 struct xhci_input_control_ctx *ctrl_ctx)
1946{
1947 u32 num_failed_eps;
1948
1949 num_failed_eps = xhci_count_num_new_endpoints(xhci, ctrl_ctx);
1950 xhci->num_active_eps -= num_failed_eps;
1951 xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
1952 "Removing %u failed ep ctxs, %u now active.",
1953 num_failed_eps,
1954 xhci->num_active_eps);
1955}
1956
1957/*
1958 * Now that the command has completed, clean up the active endpoint count by
1959 * subtracting out the endpoints that were dropped (but not changed).
1960 *
1961 * Must be called with xhci->lock held.
1962 */
1963static void xhci_finish_resource_reservation(struct xhci_hcd *xhci,
1964 struct xhci_input_control_ctx *ctrl_ctx)
1965{
1966 u32 num_dropped_eps;
1967
1968 num_dropped_eps = xhci_count_num_dropped_endpoints(xhci, ctrl_ctx);
1969 xhci->num_active_eps -= num_dropped_eps;
1970 if (num_dropped_eps)
1971 xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
1972 "Removing %u dropped ep ctxs, %u now active.",
1973 num_dropped_eps,
1974 xhci->num_active_eps);
1975}
1976
1977static unsigned int xhci_get_block_size(struct usb_device *udev)
1978{
1979 switch (udev->speed) {
1980 case USB_SPEED_LOW:
1981 case USB_SPEED_FULL:
1982 return FS_BLOCK;
1983 case USB_SPEED_HIGH:
1984 return HS_BLOCK;
1985 case USB_SPEED_SUPER:
1986 case USB_SPEED_SUPER_PLUS:
1987 return SS_BLOCK;
1988 case USB_SPEED_UNKNOWN:
1989 case USB_SPEED_WIRELESS:
1990 default:
1991 /* Should never happen */
1992 return 1;
1993 }
1994}
1995
1996static unsigned int
1997xhci_get_largest_overhead(struct xhci_interval_bw *interval_bw)
1998{
1999 if (interval_bw->overhead[LS_OVERHEAD_TYPE])
2000 return LS_OVERHEAD;
2001 if (interval_bw->overhead[FS_OVERHEAD_TYPE])
2002 return FS_OVERHEAD;
2003 return HS_OVERHEAD;
2004}
2005
2006/* If we are changing a LS/FS device under a HS hub,
2007 * make sure (if we are activating a new TT) that the HS bus has enough
2008 * bandwidth for this new TT.
2009 */
2010static int xhci_check_tt_bw_table(struct xhci_hcd *xhci,
2011 struct xhci_virt_device *virt_dev,
2012 int old_active_eps)
2013{
2014 struct xhci_interval_bw_table *bw_table;
2015 struct xhci_tt_bw_info *tt_info;
2016
2017 /* Find the bandwidth table for the root port this TT is attached to. */
2018 bw_table = &xhci->rh_bw[virt_dev->real_port - 1].bw_table;
2019 tt_info = virt_dev->tt_info;
2020 /* If this TT already had active endpoints, the bandwidth for this TT
2021 * has already been added. Removing all periodic endpoints (and thus
2022 * making the TT enactive) will only decrease the bandwidth used.
2023 */
2024 if (old_active_eps)
2025 return 0;
2026 if (old_active_eps == 0 && tt_info->active_eps != 0) {
2027 if (bw_table->bw_used + TT_HS_OVERHEAD > HS_BW_LIMIT)
2028 return -ENOMEM;
2029 return 0;
2030 }
2031 /* Not sure why we would have no new active endpoints...
2032 *
2033 * Maybe because of an Evaluate Context change for a hub update or a
2034 * control endpoint 0 max packet size change?
2035 * FIXME: skip the bandwidth calculation in that case.
2036 */
2037 return 0;
2038}
2039
2040static int xhci_check_ss_bw(struct xhci_hcd *xhci,
2041 struct xhci_virt_device *virt_dev)
2042{
2043 unsigned int bw_reserved;
2044
2045 bw_reserved = DIV_ROUND_UP(SS_BW_RESERVED*SS_BW_LIMIT_IN, 100);
2046 if (virt_dev->bw_table->ss_bw_in > (SS_BW_LIMIT_IN - bw_reserved))
2047 return -ENOMEM;
2048
2049 bw_reserved = DIV_ROUND_UP(SS_BW_RESERVED*SS_BW_LIMIT_OUT, 100);
2050 if (virt_dev->bw_table->ss_bw_out > (SS_BW_LIMIT_OUT - bw_reserved))
2051 return -ENOMEM;
2052
2053 return 0;
2054}
2055
2056/*
2057 * This algorithm is a very conservative estimate of the worst-case scheduling
2058 * scenario for any one interval. The hardware dynamically schedules the
2059 * packets, so we can't tell which microframe could be the limiting factor in
2060 * the bandwidth scheduling. This only takes into account periodic endpoints.
2061 *
2062 * Obviously, we can't solve an NP complete problem to find the minimum worst
2063 * case scenario. Instead, we come up with an estimate that is no less than
2064 * the worst case bandwidth used for any one microframe, but may be an
2065 * over-estimate.
2066 *
2067 * We walk the requirements for each endpoint by interval, starting with the
2068 * smallest interval, and place packets in the schedule where there is only one
2069 * possible way to schedule packets for that interval. In order to simplify
2070 * this algorithm, we record the largest max packet size for each interval, and
2071 * assume all packets will be that size.
2072 *
2073 * For interval 0, we obviously must schedule all packets for each interval.
2074 * The bandwidth for interval 0 is just the amount of data to be transmitted
2075 * (the sum of all max ESIT payload sizes, plus any overhead per packet times
2076 * the number of packets).
2077 *
2078 * For interval 1, we have two possible microframes to schedule those packets
2079 * in. For this algorithm, if we can schedule the same number of packets for
2080 * each possible scheduling opportunity (each microframe), we will do so. The
2081 * remaining number of packets will be saved to be transmitted in the gaps in
2082 * the next interval's scheduling sequence.
2083 *
2084 * As we move those remaining packets to be scheduled with interval 2 packets,
2085 * we have to double the number of remaining packets to transmit. This is
2086 * because the intervals are actually powers of 2, and we would be transmitting
2087 * the previous interval's packets twice in this interval. We also have to be
2088 * sure that when we look at the largest max packet size for this interval, we
2089 * also look at the largest max packet size for the remaining packets and take
2090 * the greater of the two.
2091 *
2092 * The algorithm continues to evenly distribute packets in each scheduling
2093 * opportunity, and push the remaining packets out, until we get to the last
2094 * interval. Then those packets and their associated overhead are just added
2095 * to the bandwidth used.
2096 */
2097static int xhci_check_bw_table(struct xhci_hcd *xhci,
2098 struct xhci_virt_device *virt_dev,
2099 int old_active_eps)
2100{
2101 unsigned int bw_reserved;
2102 unsigned int max_bandwidth;
2103 unsigned int bw_used;
2104 unsigned int block_size;
2105 struct xhci_interval_bw_table *bw_table;
2106 unsigned int packet_size = 0;
2107 unsigned int overhead = 0;
2108 unsigned int packets_transmitted = 0;
2109 unsigned int packets_remaining = 0;
2110 unsigned int i;
2111
2112 if (virt_dev->udev->speed >= USB_SPEED_SUPER)
2113 return xhci_check_ss_bw(xhci, virt_dev);
2114
2115 if (virt_dev->udev->speed == USB_SPEED_HIGH) {
2116 max_bandwidth = HS_BW_LIMIT;
2117 /* Convert percent of bus BW reserved to blocks reserved */
2118 bw_reserved = DIV_ROUND_UP(HS_BW_RESERVED * max_bandwidth, 100);
2119 } else {
2120 max_bandwidth = FS_BW_LIMIT;
2121 bw_reserved = DIV_ROUND_UP(FS_BW_RESERVED * max_bandwidth, 100);
2122 }
2123
2124 bw_table = virt_dev->bw_table;
2125 /* We need to translate the max packet size and max ESIT payloads into
2126 * the units the hardware uses.
2127 */
2128 block_size = xhci_get_block_size(virt_dev->udev);
2129
2130 /* If we are manipulating a LS/FS device under a HS hub, double check
2131 * that the HS bus has enough bandwidth if we are activing a new TT.
2132 */
2133 if (virt_dev->tt_info) {
2134 xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
2135 "Recalculating BW for rootport %u",
2136 virt_dev->real_port);
2137 if (xhci_check_tt_bw_table(xhci, virt_dev, old_active_eps)) {
2138 xhci_warn(xhci, "Not enough bandwidth on HS bus for "
2139 "newly activated TT.\n");
2140 return -ENOMEM;
2141 }
2142 xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
2143 "Recalculating BW for TT slot %u port %u",
2144 virt_dev->tt_info->slot_id,
2145 virt_dev->tt_info->ttport);
2146 } else {
2147 xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
2148 "Recalculating BW for rootport %u",
2149 virt_dev->real_port);
2150 }
2151
2152 /* Add in how much bandwidth will be used for interval zero, or the
2153 * rounded max ESIT payload + number of packets * largest overhead.
2154 */
2155 bw_used = DIV_ROUND_UP(bw_table->interval0_esit_payload, block_size) +
2156 bw_table->interval_bw[0].num_packets *
2157 xhci_get_largest_overhead(&bw_table->interval_bw[0]);
2158
2159 for (i = 1; i < XHCI_MAX_INTERVAL; i++) {
2160 unsigned int bw_added;
2161 unsigned int largest_mps;
2162 unsigned int interval_overhead;
2163
2164 /*
2165 * How many packets could we transmit in this interval?
2166 * If packets didn't fit in the previous interval, we will need
2167 * to transmit that many packets twice within this interval.
2168 */
2169 packets_remaining = 2 * packets_remaining +
2170 bw_table->interval_bw[i].num_packets;
2171
2172 /* Find the largest max packet size of this or the previous
2173 * interval.
2174 */
2175 if (list_empty(&bw_table->interval_bw[i].endpoints))
2176 largest_mps = 0;
2177 else {
2178 struct xhci_virt_ep *virt_ep;
2179 struct list_head *ep_entry;
2180
2181 ep_entry = bw_table->interval_bw[i].endpoints.next;
2182 virt_ep = list_entry(ep_entry,
2183 struct xhci_virt_ep, bw_endpoint_list);
2184 /* Convert to blocks, rounding up */
2185 largest_mps = DIV_ROUND_UP(
2186 virt_ep->bw_info.max_packet_size,
2187 block_size);
2188 }
2189 if (largest_mps > packet_size)
2190 packet_size = largest_mps;
2191
2192 /* Use the larger overhead of this or the previous interval. */
2193 interval_overhead = xhci_get_largest_overhead(
2194 &bw_table->interval_bw[i]);
2195 if (interval_overhead > overhead)
2196 overhead = interval_overhead;
2197
2198 /* How many packets can we evenly distribute across
2199 * (1 << (i + 1)) possible scheduling opportunities?
2200 */
2201 packets_transmitted = packets_remaining >> (i + 1);
2202
2203 /* Add in the bandwidth used for those scheduled packets */
2204 bw_added = packets_transmitted * (overhead + packet_size);
2205
2206 /* How many packets do we have remaining to transmit? */
2207 packets_remaining = packets_remaining % (1 << (i + 1));
2208
2209 /* What largest max packet size should those packets have? */
2210 /* If we've transmitted all packets, don't carry over the
2211 * largest packet size.
2212 */
2213 if (packets_remaining == 0) {
2214 packet_size = 0;
2215 overhead = 0;
2216 } else if (packets_transmitted > 0) {
2217 /* Otherwise if we do have remaining packets, and we've
2218 * scheduled some packets in this interval, take the
2219 * largest max packet size from endpoints with this
2220 * interval.
2221 */
2222 packet_size = largest_mps;
2223 overhead = interval_overhead;
2224 }
2225 /* Otherwise carry over packet_size and overhead from the last
2226 * time we had a remainder.
2227 */
2228 bw_used += bw_added;
2229 if (bw_used > max_bandwidth) {
2230 xhci_warn(xhci, "Not enough bandwidth. "
2231 "Proposed: %u, Max: %u\n",
2232 bw_used, max_bandwidth);
2233 return -ENOMEM;
2234 }
2235 }
2236 /*
2237 * Ok, we know we have some packets left over after even-handedly
2238 * scheduling interval 15. We don't know which microframes they will
2239 * fit into, so we over-schedule and say they will be scheduled every
2240 * microframe.
2241 */
2242 if (packets_remaining > 0)
2243 bw_used += overhead + packet_size;
2244
2245 if (!virt_dev->tt_info && virt_dev->udev->speed == USB_SPEED_HIGH) {
2246 unsigned int port_index = virt_dev->real_port - 1;
2247
2248 /* OK, we're manipulating a HS device attached to a
2249 * root port bandwidth domain. Include the number of active TTs
2250 * in the bandwidth used.
2251 */
2252 bw_used += TT_HS_OVERHEAD *
2253 xhci->rh_bw[port_index].num_active_tts;
2254 }
2255
2256 xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
2257 "Final bandwidth: %u, Limit: %u, Reserved: %u, "
2258 "Available: %u " "percent",
2259 bw_used, max_bandwidth, bw_reserved,
2260 (max_bandwidth - bw_used - bw_reserved) * 100 /
2261 max_bandwidth);
2262
2263 bw_used += bw_reserved;
2264 if (bw_used > max_bandwidth) {
2265 xhci_warn(xhci, "Not enough bandwidth. Proposed: %u, Max: %u\n",
2266 bw_used, max_bandwidth);
2267 return -ENOMEM;
2268 }
2269
2270 bw_table->bw_used = bw_used;
2271 return 0;
2272}
2273
2274static bool xhci_is_async_ep(unsigned int ep_type)
2275{
2276 return (ep_type != ISOC_OUT_EP && ep_type != INT_OUT_EP &&
2277 ep_type != ISOC_IN_EP &&
2278 ep_type != INT_IN_EP);
2279}
2280
2281static bool xhci_is_sync_in_ep(unsigned int ep_type)
2282{
2283 return (ep_type == ISOC_IN_EP || ep_type == INT_IN_EP);
2284}
2285
2286static unsigned int xhci_get_ss_bw_consumed(struct xhci_bw_info *ep_bw)
2287{
2288 unsigned int mps = DIV_ROUND_UP(ep_bw->max_packet_size, SS_BLOCK);
2289
2290 if (ep_bw->ep_interval == 0)
2291 return SS_OVERHEAD_BURST +
2292 (ep_bw->mult * ep_bw->num_packets *
2293 (SS_OVERHEAD + mps));
2294 return DIV_ROUND_UP(ep_bw->mult * ep_bw->num_packets *
2295 (SS_OVERHEAD + mps + SS_OVERHEAD_BURST),
2296 1 << ep_bw->ep_interval);
2297
2298}
2299
2300static void xhci_drop_ep_from_interval_table(struct xhci_hcd *xhci,
2301 struct xhci_bw_info *ep_bw,
2302 struct xhci_interval_bw_table *bw_table,
2303 struct usb_device *udev,
2304 struct xhci_virt_ep *virt_ep,
2305 struct xhci_tt_bw_info *tt_info)
2306{
2307 struct xhci_interval_bw *interval_bw;
2308 int normalized_interval;
2309
2310 if (xhci_is_async_ep(ep_bw->type))
2311 return;
2312
2313 if (udev->speed >= USB_SPEED_SUPER) {
2314 if (xhci_is_sync_in_ep(ep_bw->type))
2315 xhci->devs[udev->slot_id]->bw_table->ss_bw_in -=
2316 xhci_get_ss_bw_consumed(ep_bw);
2317 else
2318 xhci->devs[udev->slot_id]->bw_table->ss_bw_out -=
2319 xhci_get_ss_bw_consumed(ep_bw);
2320 return;
2321 }
2322
2323 /* SuperSpeed endpoints never get added to intervals in the table, so
2324 * this check is only valid for HS/FS/LS devices.
2325 */
2326 if (list_empty(&virt_ep->bw_endpoint_list))
2327 return;
2328 /* For LS/FS devices, we need to translate the interval expressed in
2329 * microframes to frames.
2330 */
2331 if (udev->speed == USB_SPEED_HIGH)
2332 normalized_interval = ep_bw->ep_interval;
2333 else
2334 normalized_interval = ep_bw->ep_interval - 3;
2335
2336 if (normalized_interval == 0)
2337 bw_table->interval0_esit_payload -= ep_bw->max_esit_payload;
2338 interval_bw = &bw_table->interval_bw[normalized_interval];
2339 interval_bw->num_packets -= ep_bw->num_packets;
2340 switch (udev->speed) {
2341 case USB_SPEED_LOW:
2342 interval_bw->overhead[LS_OVERHEAD_TYPE] -= 1;
2343 break;
2344 case USB_SPEED_FULL:
2345 interval_bw->overhead[FS_OVERHEAD_TYPE] -= 1;
2346 break;
2347 case USB_SPEED_HIGH:
2348 interval_bw->overhead[HS_OVERHEAD_TYPE] -= 1;
2349 break;
2350 case USB_SPEED_SUPER:
2351 case USB_SPEED_SUPER_PLUS:
2352 case USB_SPEED_UNKNOWN:
2353 case USB_SPEED_WIRELESS:
2354 /* Should never happen because only LS/FS/HS endpoints will get
2355 * added to the endpoint list.
2356 */
2357 return;
2358 }
2359 if (tt_info)
2360 tt_info->active_eps -= 1;
2361 list_del_init(&virt_ep->bw_endpoint_list);
2362}
2363
2364static void xhci_add_ep_to_interval_table(struct xhci_hcd *xhci,
2365 struct xhci_bw_info *ep_bw,
2366 struct xhci_interval_bw_table *bw_table,
2367 struct usb_device *udev,
2368 struct xhci_virt_ep *virt_ep,
2369 struct xhci_tt_bw_info *tt_info)
2370{
2371 struct xhci_interval_bw *interval_bw;
2372 struct xhci_virt_ep *smaller_ep;
2373 int normalized_interval;
2374
2375 if (xhci_is_async_ep(ep_bw->type))
2376 return;
2377
2378 if (udev->speed == USB_SPEED_SUPER) {
2379 if (xhci_is_sync_in_ep(ep_bw->type))
2380 xhci->devs[udev->slot_id]->bw_table->ss_bw_in +=
2381 xhci_get_ss_bw_consumed(ep_bw);
2382 else
2383 xhci->devs[udev->slot_id]->bw_table->ss_bw_out +=
2384 xhci_get_ss_bw_consumed(ep_bw);
2385 return;
2386 }
2387
2388 /* For LS/FS devices, we need to translate the interval expressed in
2389 * microframes to frames.
2390 */
2391 if (udev->speed == USB_SPEED_HIGH)
2392 normalized_interval = ep_bw->ep_interval;
2393 else
2394 normalized_interval = ep_bw->ep_interval - 3;
2395
2396 if (normalized_interval == 0)
2397 bw_table->interval0_esit_payload += ep_bw->max_esit_payload;
2398 interval_bw = &bw_table->interval_bw[normalized_interval];
2399 interval_bw->num_packets += ep_bw->num_packets;
2400 switch (udev->speed) {
2401 case USB_SPEED_LOW:
2402 interval_bw->overhead[LS_OVERHEAD_TYPE] += 1;
2403 break;
2404 case USB_SPEED_FULL:
2405 interval_bw->overhead[FS_OVERHEAD_TYPE] += 1;
2406 break;
2407 case USB_SPEED_HIGH:
2408 interval_bw->overhead[HS_OVERHEAD_TYPE] += 1;
2409 break;
2410 case USB_SPEED_SUPER:
2411 case USB_SPEED_SUPER_PLUS:
2412 case USB_SPEED_UNKNOWN:
2413 case USB_SPEED_WIRELESS:
2414 /* Should never happen because only LS/FS/HS endpoints will get
2415 * added to the endpoint list.
2416 */
2417 return;
2418 }
2419
2420 if (tt_info)
2421 tt_info->active_eps += 1;
2422 /* Insert the endpoint into the list, largest max packet size first. */
2423 list_for_each_entry(smaller_ep, &interval_bw->endpoints,
2424 bw_endpoint_list) {
2425 if (ep_bw->max_packet_size >=
2426 smaller_ep->bw_info.max_packet_size) {
2427 /* Add the new ep before the smaller endpoint */
2428 list_add_tail(&virt_ep->bw_endpoint_list,
2429 &smaller_ep->bw_endpoint_list);
2430 return;
2431 }
2432 }
2433 /* Add the new endpoint at the end of the list. */
2434 list_add_tail(&virt_ep->bw_endpoint_list,
2435 &interval_bw->endpoints);
2436}
2437
2438void xhci_update_tt_active_eps(struct xhci_hcd *xhci,
2439 struct xhci_virt_device *virt_dev,
2440 int old_active_eps)
2441{
2442 struct xhci_root_port_bw_info *rh_bw_info;
2443 if (!virt_dev->tt_info)
2444 return;
2445
2446 rh_bw_info = &xhci->rh_bw[virt_dev->real_port - 1];
2447 if (old_active_eps == 0 &&
2448 virt_dev->tt_info->active_eps != 0) {
2449 rh_bw_info->num_active_tts += 1;
2450 rh_bw_info->bw_table.bw_used += TT_HS_OVERHEAD;
2451 } else if (old_active_eps != 0 &&
2452 virt_dev->tt_info->active_eps == 0) {
2453 rh_bw_info->num_active_tts -= 1;
2454 rh_bw_info->bw_table.bw_used -= TT_HS_OVERHEAD;
2455 }
2456}
2457
2458static int xhci_reserve_bandwidth(struct xhci_hcd *xhci,
2459 struct xhci_virt_device *virt_dev,
2460 struct xhci_container_ctx *in_ctx)
2461{
2462 struct xhci_bw_info ep_bw_info[31];
2463 int i;
2464 struct xhci_input_control_ctx *ctrl_ctx;
2465 int old_active_eps = 0;
2466
2467 if (virt_dev->tt_info)
2468 old_active_eps = virt_dev->tt_info->active_eps;
2469
2470 ctrl_ctx = xhci_get_input_control_ctx(in_ctx);
2471 if (!ctrl_ctx) {
2472 xhci_warn(xhci, "%s: Could not get input context, bad type.\n",
2473 __func__);
2474 return -ENOMEM;
2475 }
2476
2477 for (i = 0; i < 31; i++) {
2478 if (!EP_IS_ADDED(ctrl_ctx, i) && !EP_IS_DROPPED(ctrl_ctx, i))
2479 continue;
2480
2481 /* Make a copy of the BW info in case we need to revert this */
2482 memcpy(&ep_bw_info[i], &virt_dev->eps[i].bw_info,
2483 sizeof(ep_bw_info[i]));
2484 /* Drop the endpoint from the interval table if the endpoint is
2485 * being dropped or changed.
2486 */
2487 if (EP_IS_DROPPED(ctrl_ctx, i))
2488 xhci_drop_ep_from_interval_table(xhci,
2489 &virt_dev->eps[i].bw_info,
2490 virt_dev->bw_table,
2491 virt_dev->udev,
2492 &virt_dev->eps[i],
2493 virt_dev->tt_info);
2494 }
2495 /* Overwrite the information stored in the endpoints' bw_info */
2496 xhci_update_bw_info(xhci, virt_dev->in_ctx, ctrl_ctx, virt_dev);
2497 for (i = 0; i < 31; i++) {
2498 /* Add any changed or added endpoints to the interval table */
2499 if (EP_IS_ADDED(ctrl_ctx, i))
2500 xhci_add_ep_to_interval_table(xhci,
2501 &virt_dev->eps[i].bw_info,
2502 virt_dev->bw_table,
2503 virt_dev->udev,
2504 &virt_dev->eps[i],
2505 virt_dev->tt_info);
2506 }
2507
2508 if (!xhci_check_bw_table(xhci, virt_dev, old_active_eps)) {
2509 /* Ok, this fits in the bandwidth we have.
2510 * Update the number of active TTs.
2511 */
2512 xhci_update_tt_active_eps(xhci, virt_dev, old_active_eps);
2513 return 0;
2514 }
2515
2516 /* We don't have enough bandwidth for this, revert the stored info. */
2517 for (i = 0; i < 31; i++) {
2518 if (!EP_IS_ADDED(ctrl_ctx, i) && !EP_IS_DROPPED(ctrl_ctx, i))
2519 continue;
2520
2521 /* Drop the new copies of any added or changed endpoints from
2522 * the interval table.
2523 */
2524 if (EP_IS_ADDED(ctrl_ctx, i)) {
2525 xhci_drop_ep_from_interval_table(xhci,
2526 &virt_dev->eps[i].bw_info,
2527 virt_dev->bw_table,
2528 virt_dev->udev,
2529 &virt_dev->eps[i],
2530 virt_dev->tt_info);
2531 }
2532 /* Revert the endpoint back to its old information */
2533 memcpy(&virt_dev->eps[i].bw_info, &ep_bw_info[i],
2534 sizeof(ep_bw_info[i]));
2535 /* Add any changed or dropped endpoints back into the table */
2536 if (EP_IS_DROPPED(ctrl_ctx, i))
2537 xhci_add_ep_to_interval_table(xhci,
2538 &virt_dev->eps[i].bw_info,
2539 virt_dev->bw_table,
2540 virt_dev->udev,
2541 &virt_dev->eps[i],
2542 virt_dev->tt_info);
2543 }
2544 return -ENOMEM;
2545}
2546
2547
2548/* Issue a configure endpoint command or evaluate context command
2549 * and wait for it to finish.
2550 */
2551static int xhci_configure_endpoint(struct xhci_hcd *xhci,
2552 struct usb_device *udev,
2553 struct xhci_command *command,
2554 bool ctx_change, bool must_succeed)
2555{
2556 int ret;
2557 unsigned long flags;
2558 struct xhci_input_control_ctx *ctrl_ctx;
2559 struct xhci_virt_device *virt_dev;
2560 struct xhci_slot_ctx *slot_ctx;
2561
2562 if (!command)
2563 return -EINVAL;
2564
2565 spin_lock_irqsave(&xhci->lock, flags);
2566
2567 if (xhci->xhc_state & XHCI_STATE_DYING) {
2568 spin_unlock_irqrestore(&xhci->lock, flags);
2569 return -ESHUTDOWN;
2570 }
2571
2572 virt_dev = xhci->devs[udev->slot_id];
2573
2574 ctrl_ctx = xhci_get_input_control_ctx(command->in_ctx);
2575 if (!ctrl_ctx) {
2576 spin_unlock_irqrestore(&xhci->lock, flags);
2577 xhci_warn(xhci, "%s: Could not get input context, bad type.\n",
2578 __func__);
2579 return -ENOMEM;
2580 }
2581
2582 if ((xhci->quirks & XHCI_EP_LIMIT_QUIRK) &&
2583 xhci_reserve_host_resources(xhci, ctrl_ctx)) {
2584 spin_unlock_irqrestore(&xhci->lock, flags);
2585 xhci_warn(xhci, "Not enough host resources, "
2586 "active endpoint contexts = %u\n",
2587 xhci->num_active_eps);
2588 return -ENOMEM;
2589 }
2590 if ((xhci->quirks & XHCI_SW_BW_CHECKING) &&
2591 xhci_reserve_bandwidth(xhci, virt_dev, command->in_ctx)) {
2592 if ((xhci->quirks & XHCI_EP_LIMIT_QUIRK))
2593 xhci_free_host_resources(xhci, ctrl_ctx);
2594 spin_unlock_irqrestore(&xhci->lock, flags);
2595 xhci_warn(xhci, "Not enough bandwidth\n");
2596 return -ENOMEM;
2597 }
2598
2599 slot_ctx = xhci_get_slot_ctx(xhci, command->in_ctx);
2600 trace_xhci_configure_endpoint(slot_ctx);
2601
2602 if (!ctx_change)
2603 ret = xhci_queue_configure_endpoint(xhci, command,
2604 command->in_ctx->dma,
2605 udev->slot_id, must_succeed);
2606 else
2607 ret = xhci_queue_evaluate_context(xhci, command,
2608 command->in_ctx->dma,
2609 udev->slot_id, must_succeed);
2610 if (ret < 0) {
2611 if ((xhci->quirks & XHCI_EP_LIMIT_QUIRK))
2612 xhci_free_host_resources(xhci, ctrl_ctx);
2613 spin_unlock_irqrestore(&xhci->lock, flags);
2614 xhci_dbg_trace(xhci, trace_xhci_dbg_context_change,
2615 "FIXME allocate a new ring segment");
2616 return -ENOMEM;
2617 }
2618 xhci_ring_cmd_db(xhci);
2619 spin_unlock_irqrestore(&xhci->lock, flags);
2620
2621 /* Wait for the configure endpoint command to complete */
2622 wait_for_completion(command->completion);
2623
2624 if (!ctx_change)
2625 ret = xhci_configure_endpoint_result(xhci, udev,
2626 &command->status);
2627 else
2628 ret = xhci_evaluate_context_result(xhci, udev,
2629 &command->status);
2630
2631 if ((xhci->quirks & XHCI_EP_LIMIT_QUIRK)) {
2632 spin_lock_irqsave(&xhci->lock, flags);
2633 /* If the command failed, remove the reserved resources.
2634 * Otherwise, clean up the estimate to include dropped eps.
2635 */
2636 if (ret)
2637 xhci_free_host_resources(xhci, ctrl_ctx);
2638 else
2639 xhci_finish_resource_reservation(xhci, ctrl_ctx);
2640 spin_unlock_irqrestore(&xhci->lock, flags);
2641 }
2642 return ret;
2643}
2644
2645static void xhci_check_bw_drop_ep_streams(struct xhci_hcd *xhci,
2646 struct xhci_virt_device *vdev, int i)
2647{
2648 struct xhci_virt_ep *ep = &vdev->eps[i];
2649
2650 if (ep->ep_state & EP_HAS_STREAMS) {
2651 xhci_warn(xhci, "WARN: endpoint 0x%02x has streams on set_interface, freeing streams.\n",
2652 xhci_get_endpoint_address(i));
2653 xhci_free_stream_info(xhci, ep->stream_info);
2654 ep->stream_info = NULL;
2655 ep->ep_state &= ~EP_HAS_STREAMS;
2656 }
2657}
2658
2659/* Called after one or more calls to xhci_add_endpoint() or
2660 * xhci_drop_endpoint(). If this call fails, the USB core is expected
2661 * to call xhci_reset_bandwidth().
2662 *
2663 * Since we are in the middle of changing either configuration or
2664 * installing a new alt setting, the USB core won't allow URBs to be
2665 * enqueued for any endpoint on the old config or interface. Nothing
2666 * else should be touching the xhci->devs[slot_id] structure, so we
2667 * don't need to take the xhci->lock for manipulating that.
2668 */
2669static int xhci_check_bandwidth(struct usb_hcd *hcd, struct usb_device *udev)
2670{
2671 int i;
2672 int ret = 0;
2673 struct xhci_hcd *xhci;
2674 struct xhci_virt_device *virt_dev;
2675 struct xhci_input_control_ctx *ctrl_ctx;
2676 struct xhci_slot_ctx *slot_ctx;
2677 struct xhci_command *command;
2678
2679 ret = xhci_check_args(hcd, udev, NULL, 0, true, __func__);
2680 if (ret <= 0)
2681 return ret;
2682 xhci = hcd_to_xhci(hcd);
2683 if ((xhci->xhc_state & XHCI_STATE_DYING) ||
2684 (xhci->xhc_state & XHCI_STATE_REMOVING))
2685 return -ENODEV;
2686
2687 xhci_dbg(xhci, "%s called for udev %p\n", __func__, udev);
2688 virt_dev = xhci->devs[udev->slot_id];
2689
2690 command = xhci_alloc_command(xhci, true, GFP_KERNEL);
2691 if (!command)
2692 return -ENOMEM;
2693
2694 command->in_ctx = virt_dev->in_ctx;
2695
2696 /* See section 4.6.6 - A0 = 1; A1 = D0 = D1 = 0 */
2697 ctrl_ctx = xhci_get_input_control_ctx(command->in_ctx);
2698 if (!ctrl_ctx) {
2699 xhci_warn(xhci, "%s: Could not get input context, bad type.\n",
2700 __func__);
2701 ret = -ENOMEM;
2702 goto command_cleanup;
2703 }
2704 ctrl_ctx->add_flags |= cpu_to_le32(SLOT_FLAG);
2705 ctrl_ctx->add_flags &= cpu_to_le32(~EP0_FLAG);
2706 ctrl_ctx->drop_flags &= cpu_to_le32(~(SLOT_FLAG | EP0_FLAG));
2707
2708 /* Don't issue the command if there's no endpoints to update. */
2709 if (ctrl_ctx->add_flags == cpu_to_le32(SLOT_FLAG) &&
2710 ctrl_ctx->drop_flags == 0) {
2711 ret = 0;
2712 goto command_cleanup;
2713 }
2714 /* Fix up Context Entries field. Minimum value is EP0 == BIT(1). */
2715 slot_ctx = xhci_get_slot_ctx(xhci, virt_dev->in_ctx);
2716 for (i = 31; i >= 1; i--) {
2717 __le32 le32 = cpu_to_le32(BIT(i));
2718
2719 if ((virt_dev->eps[i-1].ring && !(ctrl_ctx->drop_flags & le32))
2720 || (ctrl_ctx->add_flags & le32) || i == 1) {
2721 slot_ctx->dev_info &= cpu_to_le32(~LAST_CTX_MASK);
2722 slot_ctx->dev_info |= cpu_to_le32(LAST_CTX(i));
2723 break;
2724 }
2725 }
2726
2727 ret = xhci_configure_endpoint(xhci, udev, command,
2728 false, false);
2729 if (ret)
2730 /* Callee should call reset_bandwidth() */
2731 goto command_cleanup;
2732
2733 /* Free any rings that were dropped, but not changed. */
2734 for (i = 1; i < 31; i++) {
2735 if ((le32_to_cpu(ctrl_ctx->drop_flags) & (1 << (i + 1))) &&
2736 !(le32_to_cpu(ctrl_ctx->add_flags) & (1 << (i + 1)))) {
2737 xhci_free_endpoint_ring(xhci, virt_dev, i);
2738 xhci_check_bw_drop_ep_streams(xhci, virt_dev, i);
2739 }
2740 }
2741 xhci_zero_in_ctx(xhci, virt_dev);
2742 /*
2743 * Install any rings for completely new endpoints or changed endpoints,
2744 * and free any old rings from changed endpoints.
2745 */
2746 for (i = 1; i < 31; i++) {
2747 if (!virt_dev->eps[i].new_ring)
2748 continue;
2749 /* Only free the old ring if it exists.
2750 * It may not if this is the first add of an endpoint.
2751 */
2752 if (virt_dev->eps[i].ring) {
2753 xhci_free_endpoint_ring(xhci, virt_dev, i);
2754 }
2755 xhci_check_bw_drop_ep_streams(xhci, virt_dev, i);
2756 virt_dev->eps[i].ring = virt_dev->eps[i].new_ring;
2757 virt_dev->eps[i].new_ring = NULL;
2758 }
2759command_cleanup:
2760 kfree(command->completion);
2761 kfree(command);
2762
2763 return ret;
2764}
2765
2766static void xhci_reset_bandwidth(struct usb_hcd *hcd, struct usb_device *udev)
2767{
2768 struct xhci_hcd *xhci;
2769 struct xhci_virt_device *virt_dev;
2770 int i, ret;
2771
2772 ret = xhci_check_args(hcd, udev, NULL, 0, true, __func__);
2773 if (ret <= 0)
2774 return;
2775 xhci = hcd_to_xhci(hcd);
2776
2777 xhci_dbg(xhci, "%s called for udev %p\n", __func__, udev);
2778 virt_dev = xhci->devs[udev->slot_id];
2779 /* Free any rings allocated for added endpoints */
2780 for (i = 0; i < 31; i++) {
2781 if (virt_dev->eps[i].new_ring) {
2782 xhci_debugfs_remove_endpoint(xhci, virt_dev, i);
2783 xhci_ring_free(xhci, virt_dev->eps[i].new_ring);
2784 virt_dev->eps[i].new_ring = NULL;
2785 }
2786 }
2787 xhci_zero_in_ctx(xhci, virt_dev);
2788}
2789
2790static void xhci_setup_input_ctx_for_config_ep(struct xhci_hcd *xhci,
2791 struct xhci_container_ctx *in_ctx,
2792 struct xhci_container_ctx *out_ctx,
2793 struct xhci_input_control_ctx *ctrl_ctx,
2794 u32 add_flags, u32 drop_flags)
2795{
2796 ctrl_ctx->add_flags = cpu_to_le32(add_flags);
2797 ctrl_ctx->drop_flags = cpu_to_le32(drop_flags);
2798 xhci_slot_copy(xhci, in_ctx, out_ctx);
2799 ctrl_ctx->add_flags |= cpu_to_le32(SLOT_FLAG);
2800}
2801
2802static void xhci_setup_input_ctx_for_quirk(struct xhci_hcd *xhci,
2803 unsigned int slot_id, unsigned int ep_index,
2804 struct xhci_dequeue_state *deq_state)
2805{
2806 struct xhci_input_control_ctx *ctrl_ctx;
2807 struct xhci_container_ctx *in_ctx;
2808 struct xhci_ep_ctx *ep_ctx;
2809 u32 added_ctxs;
2810 dma_addr_t addr;
2811
2812 in_ctx = xhci->devs[slot_id]->in_ctx;
2813 ctrl_ctx = xhci_get_input_control_ctx(in_ctx);
2814 if (!ctrl_ctx) {
2815 xhci_warn(xhci, "%s: Could not get input context, bad type.\n",
2816 __func__);
2817 return;
2818 }
2819
2820 xhci_endpoint_copy(xhci, xhci->devs[slot_id]->in_ctx,
2821 xhci->devs[slot_id]->out_ctx, ep_index);
2822 ep_ctx = xhci_get_ep_ctx(xhci, in_ctx, ep_index);
2823 addr = xhci_trb_virt_to_dma(deq_state->new_deq_seg,
2824 deq_state->new_deq_ptr);
2825 if (addr == 0) {
2826 xhci_warn(xhci, "WARN Cannot submit config ep after "
2827 "reset ep command\n");
2828 xhci_warn(xhci, "WARN deq seg = %p, deq ptr = %p\n",
2829 deq_state->new_deq_seg,
2830 deq_state->new_deq_ptr);
2831 return;
2832 }
2833 ep_ctx->deq = cpu_to_le64(addr | deq_state->new_cycle_state);
2834
2835 added_ctxs = xhci_get_endpoint_flag_from_index(ep_index);
2836 xhci_setup_input_ctx_for_config_ep(xhci, xhci->devs[slot_id]->in_ctx,
2837 xhci->devs[slot_id]->out_ctx, ctrl_ctx,
2838 added_ctxs, added_ctxs);
2839}
2840
2841void xhci_cleanup_stalled_ring(struct xhci_hcd *xhci, unsigned int ep_index,
2842 unsigned int stream_id, struct xhci_td *td)
2843{
2844 struct xhci_dequeue_state deq_state;
2845 struct usb_device *udev = td->urb->dev;
2846
2847 xhci_dbg_trace(xhci, trace_xhci_dbg_reset_ep,
2848 "Cleaning up stalled endpoint ring");
2849 /* We need to move the HW's dequeue pointer past this TD,
2850 * or it will attempt to resend it on the next doorbell ring.
2851 */
2852 xhci_find_new_dequeue_state(xhci, udev->slot_id,
2853 ep_index, stream_id, td, &deq_state);
2854
2855 if (!deq_state.new_deq_ptr || !deq_state.new_deq_seg)
2856 return;
2857
2858 /* HW with the reset endpoint quirk will use the saved dequeue state to
2859 * issue a configure endpoint command later.
2860 */
2861 if (!(xhci->quirks & XHCI_RESET_EP_QUIRK)) {
2862 xhci_dbg_trace(xhci, trace_xhci_dbg_reset_ep,
2863 "Queueing new dequeue state");
2864 xhci_queue_new_dequeue_state(xhci, udev->slot_id,
2865 ep_index, &deq_state);
2866 } else {
2867 /* Better hope no one uses the input context between now and the
2868 * reset endpoint completion!
2869 * XXX: No idea how this hardware will react when stream rings
2870 * are enabled.
2871 */
2872 xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
2873 "Setting up input context for "
2874 "configure endpoint command");
2875 xhci_setup_input_ctx_for_quirk(xhci, udev->slot_id,
2876 ep_index, &deq_state);
2877 }
2878}
2879
2880/*
2881 * Called after usb core issues a clear halt control message.
2882 * The host side of the halt should already be cleared by a reset endpoint
2883 * command issued when the STALL event was received.
2884 *
2885 * The reset endpoint command may only be issued to endpoints in the halted
2886 * state. For software that wishes to reset the data toggle or sequence number
2887 * of an endpoint that isn't in the halted state this function will issue a
2888 * configure endpoint command with the Drop and Add bits set for the target
2889 * endpoint. Refer to the additional note in xhci spcification section 4.6.8.
2890 */
2891
2892static void xhci_endpoint_reset(struct usb_hcd *hcd,
2893 struct usb_host_endpoint *host_ep)
2894{
2895 struct xhci_hcd *xhci;
2896 struct usb_device *udev;
2897 struct xhci_virt_device *vdev;
2898 struct xhci_virt_ep *ep;
2899 struct xhci_input_control_ctx *ctrl_ctx;
2900 struct xhci_command *stop_cmd, *cfg_cmd;
2901 unsigned int ep_index;
2902 unsigned long flags;
2903 u32 ep_flag;
2904
2905 xhci = hcd_to_xhci(hcd);
2906 if (!host_ep->hcpriv)
2907 return;
2908 udev = (struct usb_device *) host_ep->hcpriv;
2909 vdev = xhci->devs[udev->slot_id];
2910 ep_index = xhci_get_endpoint_index(&host_ep->desc);
2911 ep = &vdev->eps[ep_index];
2912
2913 /* Bail out if toggle is already being cleared by a endpoint reset */
2914 if (ep->ep_state & EP_HARD_CLEAR_TOGGLE) {
2915 ep->ep_state &= ~EP_HARD_CLEAR_TOGGLE;
2916 return;
2917 }
2918 /* Only interrupt and bulk ep's use data toggle, USB2 spec 5.5.4-> */
2919 if (usb_endpoint_xfer_control(&host_ep->desc) ||
2920 usb_endpoint_xfer_isoc(&host_ep->desc))
2921 return;
2922
2923 ep_flag = xhci_get_endpoint_flag(&host_ep->desc);
2924
2925 if (ep_flag == SLOT_FLAG || ep_flag == EP0_FLAG)
2926 return;
2927
2928 stop_cmd = xhci_alloc_command(xhci, true, GFP_NOWAIT);
2929 if (!stop_cmd)
2930 return;
2931
2932 cfg_cmd = xhci_alloc_command_with_ctx(xhci, true, GFP_NOWAIT);
2933 if (!cfg_cmd)
2934 goto cleanup;
2935
2936 spin_lock_irqsave(&xhci->lock, flags);
2937
2938 /* block queuing new trbs and ringing ep doorbell */
2939 ep->ep_state |= EP_SOFT_CLEAR_TOGGLE;
2940
2941 /*
2942 * Make sure endpoint ring is empty before resetting the toggle/seq.
2943 * Driver is required to synchronously cancel all transfer request.
2944 * Stop the endpoint to force xHC to update the output context
2945 */
2946
2947 if (!list_empty(&ep->ring->td_list)) {
2948 dev_err(&udev->dev, "EP not empty, refuse reset\n");
2949 spin_unlock_irqrestore(&xhci->lock, flags);
2950 goto cleanup;
2951 }
2952 xhci_queue_stop_endpoint(xhci, stop_cmd, udev->slot_id, ep_index, 0);
2953 xhci_ring_cmd_db(xhci);
2954 spin_unlock_irqrestore(&xhci->lock, flags);
2955
2956 wait_for_completion(stop_cmd->completion);
2957
2958 spin_lock_irqsave(&xhci->lock, flags);
2959
2960 /* config ep command clears toggle if add and drop ep flags are set */
2961 ctrl_ctx = xhci_get_input_control_ctx(cfg_cmd->in_ctx);
2962 xhci_setup_input_ctx_for_config_ep(xhci, cfg_cmd->in_ctx, vdev->out_ctx,
2963 ctrl_ctx, ep_flag, ep_flag);
2964 xhci_endpoint_copy(xhci, cfg_cmd->in_ctx, vdev->out_ctx, ep_index);
2965
2966 xhci_queue_configure_endpoint(xhci, cfg_cmd, cfg_cmd->in_ctx->dma,
2967 udev->slot_id, false);
2968 xhci_ring_cmd_db(xhci);
2969 spin_unlock_irqrestore(&xhci->lock, flags);
2970
2971 wait_for_completion(cfg_cmd->completion);
2972
2973 ep->ep_state &= ~EP_SOFT_CLEAR_TOGGLE;
2974 xhci_free_command(xhci, cfg_cmd);
2975cleanup:
2976 xhci_free_command(xhci, stop_cmd);
2977}
2978
2979static int xhci_check_streams_endpoint(struct xhci_hcd *xhci,
2980 struct usb_device *udev, struct usb_host_endpoint *ep,
2981 unsigned int slot_id)
2982{
2983 int ret;
2984 unsigned int ep_index;
2985 unsigned int ep_state;
2986
2987 if (!ep)
2988 return -EINVAL;
2989 ret = xhci_check_args(xhci_to_hcd(xhci), udev, ep, 1, true, __func__);
2990 if (ret <= 0)
2991 return -EINVAL;
2992 if (usb_ss_max_streams(&ep->ss_ep_comp) == 0) {
2993 xhci_warn(xhci, "WARN: SuperSpeed Endpoint Companion"
2994 " descriptor for ep 0x%x does not support streams\n",
2995 ep->desc.bEndpointAddress);
2996 return -EINVAL;
2997 }
2998
2999 ep_index = xhci_get_endpoint_index(&ep->desc);
3000 ep_state = xhci->devs[slot_id]->eps[ep_index].ep_state;
3001 if (ep_state & EP_HAS_STREAMS ||
3002 ep_state & EP_GETTING_STREAMS) {
3003 xhci_warn(xhci, "WARN: SuperSpeed bulk endpoint 0x%x "
3004 "already has streams set up.\n",
3005 ep->desc.bEndpointAddress);
3006 xhci_warn(xhci, "Send email to xHCI maintainer and ask for "
3007 "dynamic stream context array reallocation.\n");
3008 return -EINVAL;
3009 }
3010 if (!list_empty(&xhci->devs[slot_id]->eps[ep_index].ring->td_list)) {
3011 xhci_warn(xhci, "Cannot setup streams for SuperSpeed bulk "
3012 "endpoint 0x%x; URBs are pending.\n",
3013 ep->desc.bEndpointAddress);
3014 return -EINVAL;
3015 }
3016 return 0;
3017}
3018
3019static void xhci_calculate_streams_entries(struct xhci_hcd *xhci,
3020 unsigned int *num_streams, unsigned int *num_stream_ctxs)
3021{
3022 unsigned int max_streams;
3023
3024 /* The stream context array size must be a power of two */
3025 *num_stream_ctxs = roundup_pow_of_two(*num_streams);
3026 /*
3027 * Find out how many primary stream array entries the host controller
3028 * supports. Later we may use secondary stream arrays (similar to 2nd
3029 * level page entries), but that's an optional feature for xHCI host
3030 * controllers. xHCs must support at least 4 stream IDs.
3031 */
3032 max_streams = HCC_MAX_PSA(xhci->hcc_params);
3033 if (*num_stream_ctxs > max_streams) {
3034 xhci_dbg(xhci, "xHCI HW only supports %u stream ctx entries.\n",
3035 max_streams);
3036 *num_stream_ctxs = max_streams;
3037 *num_streams = max_streams;
3038 }
3039}
3040
3041/* Returns an error code if one of the endpoint already has streams.
3042 * This does not change any data structures, it only checks and gathers
3043 * information.
3044 */
3045static int xhci_calculate_streams_and_bitmask(struct xhci_hcd *xhci,
3046 struct usb_device *udev,
3047 struct usb_host_endpoint **eps, unsigned int num_eps,
3048 unsigned int *num_streams, u32 *changed_ep_bitmask)
3049{
3050 unsigned int max_streams;
3051 unsigned int endpoint_flag;
3052 int i;
3053 int ret;
3054
3055 for (i = 0; i < num_eps; i++) {
3056 ret = xhci_check_streams_endpoint(xhci, udev,
3057 eps[i], udev->slot_id);
3058 if (ret < 0)
3059 return ret;
3060
3061 max_streams = usb_ss_max_streams(&eps[i]->ss_ep_comp);
3062 if (max_streams < (*num_streams - 1)) {
3063 xhci_dbg(xhci, "Ep 0x%x only supports %u stream IDs.\n",
3064 eps[i]->desc.bEndpointAddress,
3065 max_streams);
3066 *num_streams = max_streams+1;
3067 }
3068
3069 endpoint_flag = xhci_get_endpoint_flag(&eps[i]->desc);
3070 if (*changed_ep_bitmask & endpoint_flag)
3071 return -EINVAL;
3072 *changed_ep_bitmask |= endpoint_flag;
3073 }
3074 return 0;
3075}
3076
3077static u32 xhci_calculate_no_streams_bitmask(struct xhci_hcd *xhci,
3078 struct usb_device *udev,
3079 struct usb_host_endpoint **eps, unsigned int num_eps)
3080{
3081 u32 changed_ep_bitmask = 0;
3082 unsigned int slot_id;
3083 unsigned int ep_index;
3084 unsigned int ep_state;
3085 int i;
3086
3087 slot_id = udev->slot_id;
3088 if (!xhci->devs[slot_id])
3089 return 0;
3090
3091 for (i = 0; i < num_eps; i++) {
3092 ep_index = xhci_get_endpoint_index(&eps[i]->desc);
3093 ep_state = xhci->devs[slot_id]->eps[ep_index].ep_state;
3094 /* Are streams already being freed for the endpoint? */
3095 if (ep_state & EP_GETTING_NO_STREAMS) {
3096 xhci_warn(xhci, "WARN Can't disable streams for "
3097 "endpoint 0x%x, "
3098 "streams are being disabled already\n",
3099 eps[i]->desc.bEndpointAddress);
3100 return 0;
3101 }
3102 /* Are there actually any streams to free? */
3103 if (!(ep_state & EP_HAS_STREAMS) &&
3104 !(ep_state & EP_GETTING_STREAMS)) {
3105 xhci_warn(xhci, "WARN Can't disable streams for "
3106 "endpoint 0x%x, "
3107 "streams are already disabled!\n",
3108 eps[i]->desc.bEndpointAddress);
3109 xhci_warn(xhci, "WARN xhci_free_streams() called "
3110 "with non-streams endpoint\n");
3111 return 0;
3112 }
3113 changed_ep_bitmask |= xhci_get_endpoint_flag(&eps[i]->desc);
3114 }
3115 return changed_ep_bitmask;
3116}
3117
3118/*
3119 * The USB device drivers use this function (through the HCD interface in USB
3120 * core) to prepare a set of bulk endpoints to use streams. Streams are used to
3121 * coordinate mass storage command queueing across multiple endpoints (basically
3122 * a stream ID == a task ID).
3123 *
3124 * Setting up streams involves allocating the same size stream context array
3125 * for each endpoint and issuing a configure endpoint command for all endpoints.
3126 *
3127 * Don't allow the call to succeed if one endpoint only supports one stream
3128 * (which means it doesn't support streams at all).
3129 *
3130 * Drivers may get less stream IDs than they asked for, if the host controller
3131 * hardware or endpoints claim they can't support the number of requested
3132 * stream IDs.
3133 */
3134static int xhci_alloc_streams(struct usb_hcd *hcd, struct usb_device *udev,
3135 struct usb_host_endpoint **eps, unsigned int num_eps,
3136 unsigned int num_streams, gfp_t mem_flags)
3137{
3138 int i, ret;
3139 struct xhci_hcd *xhci;
3140 struct xhci_virt_device *vdev;
3141 struct xhci_command *config_cmd;
3142 struct xhci_input_control_ctx *ctrl_ctx;
3143 unsigned int ep_index;
3144 unsigned int num_stream_ctxs;
3145 unsigned int max_packet;
3146 unsigned long flags;
3147 u32 changed_ep_bitmask = 0;
3148
3149 if (!eps)
3150 return -EINVAL;
3151
3152 /* Add one to the number of streams requested to account for
3153 * stream 0 that is reserved for xHCI usage.
3154 */
3155 num_streams += 1;
3156 xhci = hcd_to_xhci(hcd);
3157 xhci_dbg(xhci, "Driver wants %u stream IDs (including stream 0).\n",
3158 num_streams);
3159
3160 /* MaxPSASize value 0 (2 streams) means streams are not supported */
3161 if ((xhci->quirks & XHCI_BROKEN_STREAMS) ||
3162 HCC_MAX_PSA(xhci->hcc_params) < 4) {
3163 xhci_dbg(xhci, "xHCI controller does not support streams.\n");
3164 return -ENOSYS;
3165 }
3166
3167 config_cmd = xhci_alloc_command_with_ctx(xhci, true, mem_flags);
3168 if (!config_cmd)
3169 return -ENOMEM;
3170
3171 ctrl_ctx = xhci_get_input_control_ctx(config_cmd->in_ctx);
3172 if (!ctrl_ctx) {
3173 xhci_warn(xhci, "%s: Could not get input context, bad type.\n",
3174 __func__);
3175 xhci_free_command(xhci, config_cmd);
3176 return -ENOMEM;
3177 }
3178
3179 /* Check to make sure all endpoints are not already configured for
3180 * streams. While we're at it, find the maximum number of streams that
3181 * all the endpoints will support and check for duplicate endpoints.
3182 */
3183 spin_lock_irqsave(&xhci->lock, flags);
3184 ret = xhci_calculate_streams_and_bitmask(xhci, udev, eps,
3185 num_eps, &num_streams, &changed_ep_bitmask);
3186 if (ret < 0) {
3187 xhci_free_command(xhci, config_cmd);
3188 spin_unlock_irqrestore(&xhci->lock, flags);
3189 return ret;
3190 }
3191 if (num_streams <= 1) {
3192 xhci_warn(xhci, "WARN: endpoints can't handle "
3193 "more than one stream.\n");
3194 xhci_free_command(xhci, config_cmd);
3195 spin_unlock_irqrestore(&xhci->lock, flags);
3196 return -EINVAL;
3197 }
3198 vdev = xhci->devs[udev->slot_id];
3199 /* Mark each endpoint as being in transition, so
3200 * xhci_urb_enqueue() will reject all URBs.
3201 */
3202 for (i = 0; i < num_eps; i++) {
3203 ep_index = xhci_get_endpoint_index(&eps[i]->desc);
3204 vdev->eps[ep_index].ep_state |= EP_GETTING_STREAMS;
3205 }
3206 spin_unlock_irqrestore(&xhci->lock, flags);
3207
3208 /* Setup internal data structures and allocate HW data structures for
3209 * streams (but don't install the HW structures in the input context
3210 * until we're sure all memory allocation succeeded).
3211 */
3212 xhci_calculate_streams_entries(xhci, &num_streams, &num_stream_ctxs);
3213 xhci_dbg(xhci, "Need %u stream ctx entries for %u stream IDs.\n",
3214 num_stream_ctxs, num_streams);
3215
3216 for (i = 0; i < num_eps; i++) {
3217 ep_index = xhci_get_endpoint_index(&eps[i]->desc);
3218 max_packet = usb_endpoint_maxp(&eps[i]->desc);
3219 vdev->eps[ep_index].stream_info = xhci_alloc_stream_info(xhci,
3220 num_stream_ctxs,
3221 num_streams,
3222 max_packet, mem_flags);
3223 if (!vdev->eps[ep_index].stream_info)
3224 goto cleanup;
3225 /* Set maxPstreams in endpoint context and update deq ptr to
3226 * point to stream context array. FIXME
3227 */
3228 }
3229
3230 /* Set up the input context for a configure endpoint command. */
3231 for (i = 0; i < num_eps; i++) {
3232 struct xhci_ep_ctx *ep_ctx;
3233
3234 ep_index = xhci_get_endpoint_index(&eps[i]->desc);
3235 ep_ctx = xhci_get_ep_ctx(xhci, config_cmd->in_ctx, ep_index);
3236
3237 xhci_endpoint_copy(xhci, config_cmd->in_ctx,
3238 vdev->out_ctx, ep_index);
3239 xhci_setup_streams_ep_input_ctx(xhci, ep_ctx,
3240 vdev->eps[ep_index].stream_info);
3241 }
3242 /* Tell the HW to drop its old copy of the endpoint context info
3243 * and add the updated copy from the input context.
3244 */
3245 xhci_setup_input_ctx_for_config_ep(xhci, config_cmd->in_ctx,
3246 vdev->out_ctx, ctrl_ctx,
3247 changed_ep_bitmask, changed_ep_bitmask);
3248
3249 /* Issue and wait for the configure endpoint command */
3250 ret = xhci_configure_endpoint(xhci, udev, config_cmd,
3251 false, false);
3252
3253 /* xHC rejected the configure endpoint command for some reason, so we
3254 * leave the old ring intact and free our internal streams data
3255 * structure.
3256 */
3257 if (ret < 0)
3258 goto cleanup;
3259
3260 spin_lock_irqsave(&xhci->lock, flags);
3261 for (i = 0; i < num_eps; i++) {
3262 ep_index = xhci_get_endpoint_index(&eps[i]->desc);
3263 vdev->eps[ep_index].ep_state &= ~EP_GETTING_STREAMS;
3264 xhci_dbg(xhci, "Slot %u ep ctx %u now has streams.\n",
3265 udev->slot_id, ep_index);
3266 vdev->eps[ep_index].ep_state |= EP_HAS_STREAMS;
3267 }
3268 xhci_free_command(xhci, config_cmd);
3269 spin_unlock_irqrestore(&xhci->lock, flags);
3270
3271 /* Subtract 1 for stream 0, which drivers can't use */
3272 return num_streams - 1;
3273
3274cleanup:
3275 /* If it didn't work, free the streams! */
3276 for (i = 0; i < num_eps; i++) {
3277 ep_index = xhci_get_endpoint_index(&eps[i]->desc);
3278 xhci_free_stream_info(xhci, vdev->eps[ep_index].stream_info);
3279 vdev->eps[ep_index].stream_info = NULL;
3280 /* FIXME Unset maxPstreams in endpoint context and
3281 * update deq ptr to point to normal string ring.
3282 */
3283 vdev->eps[ep_index].ep_state &= ~EP_GETTING_STREAMS;
3284 vdev->eps[ep_index].ep_state &= ~EP_HAS_STREAMS;
3285 xhci_endpoint_zero(xhci, vdev, eps[i]);
3286 }
3287 xhci_free_command(xhci, config_cmd);
3288 return -ENOMEM;
3289}
3290
3291/* Transition the endpoint from using streams to being a "normal" endpoint
3292 * without streams.
3293 *
3294 * Modify the endpoint context state, submit a configure endpoint command,
3295 * and free all endpoint rings for streams if that completes successfully.
3296 */
3297static int xhci_free_streams(struct usb_hcd *hcd, struct usb_device *udev,
3298 struct usb_host_endpoint **eps, unsigned int num_eps,
3299 gfp_t mem_flags)
3300{
3301 int i, ret;
3302 struct xhci_hcd *xhci;
3303 struct xhci_virt_device *vdev;
3304 struct xhci_command *command;
3305 struct xhci_input_control_ctx *ctrl_ctx;
3306 unsigned int ep_index;
3307 unsigned long flags;
3308 u32 changed_ep_bitmask;
3309
3310 xhci = hcd_to_xhci(hcd);
3311 vdev = xhci->devs[udev->slot_id];
3312
3313 /* Set up a configure endpoint command to remove the streams rings */
3314 spin_lock_irqsave(&xhci->lock, flags);
3315 changed_ep_bitmask = xhci_calculate_no_streams_bitmask(xhci,
3316 udev, eps, num_eps);
3317 if (changed_ep_bitmask == 0) {
3318 spin_unlock_irqrestore(&xhci->lock, flags);
3319 return -EINVAL;
3320 }
3321
3322 /* Use the xhci_command structure from the first endpoint. We may have
3323 * allocated too many, but the driver may call xhci_free_streams() for
3324 * each endpoint it grouped into one call to xhci_alloc_streams().
3325 */
3326 ep_index = xhci_get_endpoint_index(&eps[0]->desc);
3327 command = vdev->eps[ep_index].stream_info->free_streams_command;
3328 ctrl_ctx = xhci_get_input_control_ctx(command->in_ctx);
3329 if (!ctrl_ctx) {
3330 spin_unlock_irqrestore(&xhci->lock, flags);
3331 xhci_warn(xhci, "%s: Could not get input context, bad type.\n",
3332 __func__);
3333 return -EINVAL;
3334 }
3335
3336 for (i = 0; i < num_eps; i++) {
3337 struct xhci_ep_ctx *ep_ctx;
3338
3339 ep_index = xhci_get_endpoint_index(&eps[i]->desc);
3340 ep_ctx = xhci_get_ep_ctx(xhci, command->in_ctx, ep_index);
3341 xhci->devs[udev->slot_id]->eps[ep_index].ep_state |=
3342 EP_GETTING_NO_STREAMS;
3343
3344 xhci_endpoint_copy(xhci, command->in_ctx,
3345 vdev->out_ctx, ep_index);
3346 xhci_setup_no_streams_ep_input_ctx(ep_ctx,
3347 &vdev->eps[ep_index]);
3348 }
3349 xhci_setup_input_ctx_for_config_ep(xhci, command->in_ctx,
3350 vdev->out_ctx, ctrl_ctx,
3351 changed_ep_bitmask, changed_ep_bitmask);
3352 spin_unlock_irqrestore(&xhci->lock, flags);
3353
3354 /* Issue and wait for the configure endpoint command,
3355 * which must succeed.
3356 */
3357 ret = xhci_configure_endpoint(xhci, udev, command,
3358 false, true);
3359
3360 /* xHC rejected the configure endpoint command for some reason, so we
3361 * leave the streams rings intact.
3362 */
3363 if (ret < 0)
3364 return ret;
3365
3366 spin_lock_irqsave(&xhci->lock, flags);
3367 for (i = 0; i < num_eps; i++) {
3368 ep_index = xhci_get_endpoint_index(&eps[i]->desc);
3369 xhci_free_stream_info(xhci, vdev->eps[ep_index].stream_info);
3370 vdev->eps[ep_index].stream_info = NULL;
3371 /* FIXME Unset maxPstreams in endpoint context and
3372 * update deq ptr to point to normal string ring.
3373 */
3374 vdev->eps[ep_index].ep_state &= ~EP_GETTING_NO_STREAMS;
3375 vdev->eps[ep_index].ep_state &= ~EP_HAS_STREAMS;
3376 }
3377 spin_unlock_irqrestore(&xhci->lock, flags);
3378
3379 return 0;
3380}
3381
3382/*
3383 * Deletes endpoint resources for endpoints that were active before a Reset
3384 * Device command, or a Disable Slot command. The Reset Device command leaves
3385 * the control endpoint intact, whereas the Disable Slot command deletes it.
3386 *
3387 * Must be called with xhci->lock held.
3388 */
3389void xhci_free_device_endpoint_resources(struct xhci_hcd *xhci,
3390 struct xhci_virt_device *virt_dev, bool drop_control_ep)
3391{
3392 int i;
3393 unsigned int num_dropped_eps = 0;
3394 unsigned int drop_flags = 0;
3395
3396 for (i = (drop_control_ep ? 0 : 1); i < 31; i++) {
3397 if (virt_dev->eps[i].ring) {
3398 drop_flags |= 1 << i;
3399 num_dropped_eps++;
3400 }
3401 }
3402 xhci->num_active_eps -= num_dropped_eps;
3403 if (num_dropped_eps)
3404 xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
3405 "Dropped %u ep ctxs, flags = 0x%x, "
3406 "%u now active.",
3407 num_dropped_eps, drop_flags,
3408 xhci->num_active_eps);
3409}
3410
3411/*
3412 * This submits a Reset Device Command, which will set the device state to 0,
3413 * set the device address to 0, and disable all the endpoints except the default
3414 * control endpoint. The USB core should come back and call
3415 * xhci_address_device(), and then re-set up the configuration. If this is
3416 * called because of a usb_reset_and_verify_device(), then the old alternate
3417 * settings will be re-installed through the normal bandwidth allocation
3418 * functions.
3419 *
3420 * Wait for the Reset Device command to finish. Remove all structures
3421 * associated with the endpoints that were disabled. Clear the input device
3422 * structure? Reset the control endpoint 0 max packet size?
3423 *
3424 * If the virt_dev to be reset does not exist or does not match the udev,
3425 * it means the device is lost, possibly due to the xHC restore error and
3426 * re-initialization during S3/S4. In this case, call xhci_alloc_dev() to
3427 * re-allocate the device.
3428 */
3429static int xhci_discover_or_reset_device(struct usb_hcd *hcd,
3430 struct usb_device *udev)
3431{
3432 int ret, i;
3433 unsigned long flags;
3434 struct xhci_hcd *xhci;
3435 unsigned int slot_id;
3436 struct xhci_virt_device *virt_dev;
3437 struct xhci_command *reset_device_cmd;
3438 struct xhci_slot_ctx *slot_ctx;
3439 int old_active_eps = 0;
3440
3441 ret = xhci_check_args(hcd, udev, NULL, 0, false, __func__);
3442 if (ret <= 0)
3443 return ret;
3444 xhci = hcd_to_xhci(hcd);
3445 slot_id = udev->slot_id;
3446 virt_dev = xhci->devs[slot_id];
3447 if (!virt_dev) {
3448 xhci_dbg(xhci, "The device to be reset with slot ID %u does "
3449 "not exist. Re-allocate the device\n", slot_id);
3450 ret = xhci_alloc_dev(hcd, udev);
3451 if (ret == 1)
3452 return 0;
3453 else
3454 return -EINVAL;
3455 }
3456
3457 if (virt_dev->tt_info)
3458 old_active_eps = virt_dev->tt_info->active_eps;
3459
3460 if (virt_dev->udev != udev) {
3461 /* If the virt_dev and the udev does not match, this virt_dev
3462 * may belong to another udev.
3463 * Re-allocate the device.
3464 */
3465 xhci_dbg(xhci, "The device to be reset with slot ID %u does "
3466 "not match the udev. Re-allocate the device\n",
3467 slot_id);
3468 ret = xhci_alloc_dev(hcd, udev);
3469 if (ret == 1)
3470 return 0;
3471 else
3472 return -EINVAL;
3473 }
3474
3475 /* If device is not setup, there is no point in resetting it */
3476 slot_ctx = xhci_get_slot_ctx(xhci, virt_dev->out_ctx);
3477 if (GET_SLOT_STATE(le32_to_cpu(slot_ctx->dev_state)) ==
3478 SLOT_STATE_DISABLED)
3479 return 0;
3480
3481 trace_xhci_discover_or_reset_device(slot_ctx);
3482
3483 xhci_dbg(xhci, "Resetting device with slot ID %u\n", slot_id);
3484 /* Allocate the command structure that holds the struct completion.
3485 * Assume we're in process context, since the normal device reset
3486 * process has to wait for the device anyway. Storage devices are
3487 * reset as part of error handling, so use GFP_NOIO instead of
3488 * GFP_KERNEL.
3489 */
3490 reset_device_cmd = xhci_alloc_command(xhci, true, GFP_NOIO);
3491 if (!reset_device_cmd) {
3492 xhci_dbg(xhci, "Couldn't allocate command structure.\n");
3493 return -ENOMEM;
3494 }
3495
3496 /* Attempt to submit the Reset Device command to the command ring */
3497 spin_lock_irqsave(&xhci->lock, flags);
3498
3499 ret = xhci_queue_reset_device(xhci, reset_device_cmd, slot_id);
3500 if (ret) {
3501 xhci_dbg(xhci, "FIXME: allocate a command ring segment\n");
3502 spin_unlock_irqrestore(&xhci->lock, flags);
3503 goto command_cleanup;
3504 }
3505 xhci_ring_cmd_db(xhci);
3506 spin_unlock_irqrestore(&xhci->lock, flags);
3507
3508 /* Wait for the Reset Device command to finish */
3509 wait_for_completion(reset_device_cmd->completion);
3510
3511 /* The Reset Device command can't fail, according to the 0.95/0.96 spec,
3512 * unless we tried to reset a slot ID that wasn't enabled,
3513 * or the device wasn't in the addressed or configured state.
3514 */
3515 ret = reset_device_cmd->status;
3516 switch (ret) {
3517 case COMP_COMMAND_ABORTED:
3518 case COMP_COMMAND_RING_STOPPED:
3519 xhci_warn(xhci, "Timeout waiting for reset device command\n");
3520 ret = -ETIME;
3521 goto command_cleanup;
3522 case COMP_SLOT_NOT_ENABLED_ERROR: /* 0.95 completion for bad slot ID */
3523 case COMP_CONTEXT_STATE_ERROR: /* 0.96 completion code for same thing */
3524 xhci_dbg(xhci, "Can't reset device (slot ID %u) in %s state\n",
3525 slot_id,
3526 xhci_get_slot_state(xhci, virt_dev->out_ctx));
3527 xhci_dbg(xhci, "Not freeing device rings.\n");
3528 /* Don't treat this as an error. May change my mind later. */
3529 ret = 0;
3530 goto command_cleanup;
3531 case COMP_SUCCESS:
3532 xhci_dbg(xhci, "Successful reset device command.\n");
3533 break;
3534 default:
3535 if (xhci_is_vendor_info_code(xhci, ret))
3536 break;
3537 xhci_warn(xhci, "Unknown completion code %u for "
3538 "reset device command.\n", ret);
3539 ret = -EINVAL;
3540 goto command_cleanup;
3541 }
3542
3543 /* Free up host controller endpoint resources */
3544 if ((xhci->quirks & XHCI_EP_LIMIT_QUIRK)) {
3545 spin_lock_irqsave(&xhci->lock, flags);
3546 /* Don't delete the default control endpoint resources */
3547 xhci_free_device_endpoint_resources(xhci, virt_dev, false);
3548 spin_unlock_irqrestore(&xhci->lock, flags);
3549 }
3550
3551 /* Everything but endpoint 0 is disabled, so free the rings. */
3552 for (i = 1; i < 31; i++) {
3553 struct xhci_virt_ep *ep = &virt_dev->eps[i];
3554
3555 if (ep->ep_state & EP_HAS_STREAMS) {
3556 xhci_warn(xhci, "WARN: endpoint 0x%02x has streams on device reset, freeing streams.\n",
3557 xhci_get_endpoint_address(i));
3558 xhci_free_stream_info(xhci, ep->stream_info);
3559 ep->stream_info = NULL;
3560 ep->ep_state &= ~EP_HAS_STREAMS;
3561 }
3562
3563 if (ep->ring) {
3564 xhci_debugfs_remove_endpoint(xhci, virt_dev, i);
3565 xhci_free_endpoint_ring(xhci, virt_dev, i);
3566 }
3567 if (!list_empty(&virt_dev->eps[i].bw_endpoint_list))
3568 xhci_drop_ep_from_interval_table(xhci,
3569 &virt_dev->eps[i].bw_info,
3570 virt_dev->bw_table,
3571 udev,
3572 &virt_dev->eps[i],
3573 virt_dev->tt_info);
3574 xhci_clear_endpoint_bw_info(&virt_dev->eps[i].bw_info);
3575 }
3576 /* If necessary, update the number of active TTs on this root port */
3577 xhci_update_tt_active_eps(xhci, virt_dev, old_active_eps);
3578 ret = 0;
3579
3580command_cleanup:
3581 xhci_free_command(xhci, reset_device_cmd);
3582 return ret;
3583}
3584
3585/*
3586 * At this point, the struct usb_device is about to go away, the device has
3587 * disconnected, and all traffic has been stopped and the endpoints have been
3588 * disabled. Free any HC data structures associated with that device.
3589 */
3590static void xhci_free_dev(struct usb_hcd *hcd, struct usb_device *udev)
3591{
3592 struct xhci_hcd *xhci = hcd_to_xhci(hcd);
3593 struct xhci_virt_device *virt_dev;
3594 struct xhci_slot_ctx *slot_ctx;
3595 int i, ret;
3596
3597#ifndef CONFIG_USB_DEFAULT_PERSIST
3598 /*
3599 * We called pm_runtime_get_noresume when the device was attached.
3600 * Decrement the counter here to allow controller to runtime suspend
3601 * if no devices remain.
3602 */
3603 if (xhci->quirks & XHCI_RESET_ON_RESUME)
3604 pm_runtime_put_noidle(hcd->self.controller);
3605#endif
3606
3607 ret = xhci_check_args(hcd, udev, NULL, 0, true, __func__);
3608 /* If the host is halted due to driver unload, we still need to free the
3609 * device.
3610 */
3611 if (ret <= 0 && ret != -ENODEV)
3612 return;
3613
3614 virt_dev = xhci->devs[udev->slot_id];
3615 slot_ctx = xhci_get_slot_ctx(xhci, virt_dev->out_ctx);
3616 trace_xhci_free_dev(slot_ctx);
3617
3618 /* Stop any wayward timer functions (which may grab the lock) */
3619 for (i = 0; i < 31; i++) {
3620 virt_dev->eps[i].ep_state &= ~EP_STOP_CMD_PENDING;
3621 del_timer_sync(&virt_dev->eps[i].stop_cmd_timer);
3622 }
3623 xhci_debugfs_remove_slot(xhci, udev->slot_id);
3624 virt_dev->udev = NULL;
3625 ret = xhci_disable_slot(xhci, udev->slot_id);
3626 if (ret)
3627 xhci_free_virt_device(xhci, udev->slot_id);
3628}
3629
3630int xhci_disable_slot(struct xhci_hcd *xhci, u32 slot_id)
3631{
3632 struct xhci_command *command;
3633 unsigned long flags;
3634 u32 state;
3635 int ret = 0;
3636
3637 command = xhci_alloc_command(xhci, false, GFP_KERNEL);
3638 if (!command)
3639 return -ENOMEM;
3640
3641 spin_lock_irqsave(&xhci->lock, flags);
3642 /* Don't disable the slot if the host controller is dead. */
3643 state = readl(&xhci->op_regs->status);
3644 if (state == 0xffffffff || (xhci->xhc_state & XHCI_STATE_DYING) ||
3645 (xhci->xhc_state & XHCI_STATE_HALTED)) {
3646 spin_unlock_irqrestore(&xhci->lock, flags);
3647 kfree(command);
3648 return -ENODEV;
3649 }
3650
3651 ret = xhci_queue_slot_control(xhci, command, TRB_DISABLE_SLOT,
3652 slot_id);
3653 if (ret) {
3654 spin_unlock_irqrestore(&xhci->lock, flags);
3655 kfree(command);
3656 return ret;
3657 }
3658 xhci_ring_cmd_db(xhci);
3659 spin_unlock_irqrestore(&xhci->lock, flags);
3660 return ret;
3661}
3662
3663/*
3664 * Checks if we have enough host controller resources for the default control
3665 * endpoint.
3666 *
3667 * Must be called with xhci->lock held.
3668 */
3669static int xhci_reserve_host_control_ep_resources(struct xhci_hcd *xhci)
3670{
3671 if (xhci->num_active_eps + 1 > xhci->limit_active_eps) {
3672 xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
3673 "Not enough ep ctxs: "
3674 "%u active, need to add 1, limit is %u.",
3675 xhci->num_active_eps, xhci->limit_active_eps);
3676 return -ENOMEM;
3677 }
3678 xhci->num_active_eps += 1;
3679 xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
3680 "Adding 1 ep ctx, %u now active.",
3681 xhci->num_active_eps);
3682 return 0;
3683}
3684
3685
3686/*
3687 * Returns 0 if the xHC ran out of device slots, the Enable Slot command
3688 * timed out, or allocating memory failed. Returns 1 on success.
3689 */
3690int xhci_alloc_dev(struct usb_hcd *hcd, struct usb_device *udev)
3691{
3692 struct xhci_hcd *xhci = hcd_to_xhci(hcd);
3693 struct xhci_virt_device *vdev;
3694 struct xhci_slot_ctx *slot_ctx;
3695 unsigned long flags;
3696 int ret, slot_id;
3697 struct xhci_command *command;
3698
3699 command = xhci_alloc_command(xhci, true, GFP_KERNEL);
3700 if (!command)
3701 return 0;
3702
3703 spin_lock_irqsave(&xhci->lock, flags);
3704 ret = xhci_queue_slot_control(xhci, command, TRB_ENABLE_SLOT, 0);
3705 if (ret) {
3706 spin_unlock_irqrestore(&xhci->lock, flags);
3707 xhci_dbg(xhci, "FIXME: allocate a command ring segment\n");
3708 xhci_free_command(xhci, command);
3709 return 0;
3710 }
3711 xhci_ring_cmd_db(xhci);
3712 spin_unlock_irqrestore(&xhci->lock, flags);
3713
3714 wait_for_completion(command->completion);
3715 slot_id = command->slot_id;
3716
3717 if (!slot_id || command->status != COMP_SUCCESS) {
3718 xhci_err(xhci, "Error while assigning device slot ID\n");
3719 xhci_err(xhci, "Max number of devices this xHCI host supports is %u.\n",
3720 HCS_MAX_SLOTS(
3721 readl(&xhci->cap_regs->hcs_params1)));
3722 xhci_free_command(xhci, command);
3723 return 0;
3724 }
3725
3726 xhci_free_command(xhci, command);
3727
3728 if ((xhci->quirks & XHCI_EP_LIMIT_QUIRK)) {
3729 spin_lock_irqsave(&xhci->lock, flags);
3730 ret = xhci_reserve_host_control_ep_resources(xhci);
3731 if (ret) {
3732 spin_unlock_irqrestore(&xhci->lock, flags);
3733 xhci_warn(xhci, "Not enough host resources, "
3734 "active endpoint contexts = %u\n",
3735 xhci->num_active_eps);
3736 goto disable_slot;
3737 }
3738 spin_unlock_irqrestore(&xhci->lock, flags);
3739 }
3740 /* Use GFP_NOIO, since this function can be called from
3741 * xhci_discover_or_reset_device(), which may be called as part of
3742 * mass storage driver error handling.
3743 */
3744 if (!xhci_alloc_virt_device(xhci, slot_id, udev, GFP_NOIO)) {
3745 xhci_warn(xhci, "Could not allocate xHCI USB device data structures\n");
3746 goto disable_slot;
3747 }
3748 vdev = xhci->devs[slot_id];
3749 slot_ctx = xhci_get_slot_ctx(xhci, vdev->out_ctx);
3750 trace_xhci_alloc_dev(slot_ctx);
3751
3752 udev->slot_id = slot_id;
3753
3754 xhci_debugfs_create_slot(xhci, slot_id);
3755
3756#ifndef CONFIG_USB_DEFAULT_PERSIST
3757 /*
3758 * If resetting upon resume, we can't put the controller into runtime
3759 * suspend if there is a device attached.
3760 */
3761 if (xhci->quirks & XHCI_RESET_ON_RESUME)
3762 pm_runtime_get_noresume(hcd->self.controller);
3763#endif
3764
3765 /* Is this a LS or FS device under a HS hub? */
3766 /* Hub or peripherial? */
3767 return 1;
3768
3769disable_slot:
3770 ret = xhci_disable_slot(xhci, udev->slot_id);
3771 if (ret)
3772 xhci_free_virt_device(xhci, udev->slot_id);
3773
3774 return 0;
3775}
3776
3777/*
3778 * Issue an Address Device command and optionally send a corresponding
3779 * SetAddress request to the device.
3780 */
3781static int xhci_setup_device(struct usb_hcd *hcd, struct usb_device *udev,
3782 enum xhci_setup_dev setup)
3783{
3784 const char *act = setup == SETUP_CONTEXT_ONLY ? "context" : "address";
3785 unsigned long flags;
3786 struct xhci_virt_device *virt_dev;
3787 int ret = 0;
3788 struct xhci_hcd *xhci = hcd_to_xhci(hcd);
3789 struct xhci_slot_ctx *slot_ctx;
3790 struct xhci_input_control_ctx *ctrl_ctx;
3791 u64 temp_64;
3792 struct xhci_command *command = NULL;
3793
3794 mutex_lock(&xhci->mutex);
3795
3796 if (xhci->xhc_state) { /* dying, removing or halted */
3797 ret = -ESHUTDOWN;
3798 goto out;
3799 }
3800
3801 if (!udev->slot_id) {
3802 xhci_dbg_trace(xhci, trace_xhci_dbg_address,
3803 "Bad Slot ID %d", udev->slot_id);
3804 ret = -EINVAL;
3805 goto out;
3806 }
3807
3808 virt_dev = xhci->devs[udev->slot_id];
3809
3810 if (WARN_ON(!virt_dev)) {
3811 /*
3812 * In plug/unplug torture test with an NEC controller,
3813 * a zero-dereference was observed once due to virt_dev = 0.
3814 * Print useful debug rather than crash if it is observed again!
3815 */
3816 xhci_warn(xhci, "Virt dev invalid for slot_id 0x%x!\n",
3817 udev->slot_id);
3818 ret = -EINVAL;
3819 goto out;
3820 }
3821 slot_ctx = xhci_get_slot_ctx(xhci, virt_dev->out_ctx);
3822 trace_xhci_setup_device_slot(slot_ctx);
3823
3824 if (setup == SETUP_CONTEXT_ONLY) {
3825 if (GET_SLOT_STATE(le32_to_cpu(slot_ctx->dev_state)) ==
3826 SLOT_STATE_DEFAULT) {
3827 xhci_dbg(xhci, "Slot already in default state\n");
3828 goto out;
3829 }
3830 }
3831
3832 command = xhci_alloc_command(xhci, true, GFP_KERNEL);
3833 if (!command) {
3834 ret = -ENOMEM;
3835 goto out;
3836 }
3837
3838 command->in_ctx = virt_dev->in_ctx;
3839
3840 slot_ctx = xhci_get_slot_ctx(xhci, virt_dev->in_ctx);
3841 ctrl_ctx = xhci_get_input_control_ctx(virt_dev->in_ctx);
3842 if (!ctrl_ctx) {
3843 xhci_warn(xhci, "%s: Could not get input context, bad type.\n",
3844 __func__);
3845 ret = -EINVAL;
3846 goto out;
3847 }
3848 /*
3849 * If this is the first Set Address since device plug-in or
3850 * virt_device realloaction after a resume with an xHCI power loss,
3851 * then set up the slot context.
3852 */
3853 if (!slot_ctx->dev_info)
3854 xhci_setup_addressable_virt_dev(xhci, udev);
3855 /* Otherwise, update the control endpoint ring enqueue pointer. */
3856 else
3857 xhci_copy_ep0_dequeue_into_input_ctx(xhci, udev);
3858 ctrl_ctx->add_flags = cpu_to_le32(SLOT_FLAG | EP0_FLAG);
3859 ctrl_ctx->drop_flags = 0;
3860
3861 trace_xhci_address_ctx(xhci, virt_dev->in_ctx,
3862 le32_to_cpu(slot_ctx->dev_info) >> 27);
3863
3864 spin_lock_irqsave(&xhci->lock, flags);
3865 trace_xhci_setup_device(virt_dev);
3866 ret = xhci_queue_address_device(xhci, command, virt_dev->in_ctx->dma,
3867 udev->slot_id, setup);
3868 if (ret) {
3869 spin_unlock_irqrestore(&xhci->lock, flags);
3870 xhci_dbg_trace(xhci, trace_xhci_dbg_address,
3871 "FIXME: allocate a command ring segment");
3872 goto out;
3873 }
3874 xhci_ring_cmd_db(xhci);
3875 spin_unlock_irqrestore(&xhci->lock, flags);
3876
3877 /* ctrl tx can take up to 5 sec; XXX: need more time for xHC? */
3878 wait_for_completion(command->completion);
3879
3880 /* FIXME: From section 4.3.4: "Software shall be responsible for timing
3881 * the SetAddress() "recovery interval" required by USB and aborting the
3882 * command on a timeout.
3883 */
3884 switch (command->status) {
3885 case COMP_COMMAND_ABORTED:
3886 case COMP_COMMAND_RING_STOPPED:
3887 xhci_warn(xhci, "Timeout while waiting for setup device command\n");
3888 ret = -ETIME;
3889 break;
3890 case COMP_CONTEXT_STATE_ERROR:
3891 case COMP_SLOT_NOT_ENABLED_ERROR:
3892 xhci_err(xhci, "Setup ERROR: setup %s command for slot %d.\n",
3893 act, udev->slot_id);
3894 ret = -EINVAL;
3895 break;
3896 case COMP_USB_TRANSACTION_ERROR:
3897 dev_warn(&udev->dev, "Device not responding to setup %s.\n", act);
3898
3899 mutex_unlock(&xhci->mutex);
3900 ret = xhci_disable_slot(xhci, udev->slot_id);
3901 if (!ret)
3902 xhci_alloc_dev(hcd, udev);
3903 kfree(command->completion);
3904 kfree(command);
3905 return -EPROTO;
3906 case COMP_INCOMPATIBLE_DEVICE_ERROR:
3907 dev_warn(&udev->dev,
3908 "ERROR: Incompatible device for setup %s command\n", act);
3909 ret = -ENODEV;
3910 break;
3911 case COMP_SUCCESS:
3912 xhci_dbg_trace(xhci, trace_xhci_dbg_address,
3913 "Successful setup %s command", act);
3914 break;
3915 default:
3916 xhci_err(xhci,
3917 "ERROR: unexpected setup %s command completion code 0x%x.\n",
3918 act, command->status);
3919 trace_xhci_address_ctx(xhci, virt_dev->out_ctx, 1);
3920 ret = -EINVAL;
3921 break;
3922 }
3923 if (ret)
3924 goto out;
3925 temp_64 = xhci_read_64(xhci, &xhci->op_regs->dcbaa_ptr);
3926 xhci_dbg_trace(xhci, trace_xhci_dbg_address,
3927 "Op regs DCBAA ptr = %#016llx", temp_64);
3928 xhci_dbg_trace(xhci, trace_xhci_dbg_address,
3929 "Slot ID %d dcbaa entry @%p = %#016llx",
3930 udev->slot_id,
3931 &xhci->dcbaa->dev_context_ptrs[udev->slot_id],
3932 (unsigned long long)
3933 le64_to_cpu(xhci->dcbaa->dev_context_ptrs[udev->slot_id]));
3934 xhci_dbg_trace(xhci, trace_xhci_dbg_address,
3935 "Output Context DMA address = %#08llx",
3936 (unsigned long long)virt_dev->out_ctx->dma);
3937 trace_xhci_address_ctx(xhci, virt_dev->in_ctx,
3938 le32_to_cpu(slot_ctx->dev_info) >> 27);
3939 /*
3940 * USB core uses address 1 for the roothubs, so we add one to the
3941 * address given back to us by the HC.
3942 */
3943 trace_xhci_address_ctx(xhci, virt_dev->out_ctx,
3944 le32_to_cpu(slot_ctx->dev_info) >> 27);
3945 /* Zero the input context control for later use */
3946 ctrl_ctx->add_flags = 0;
3947 ctrl_ctx->drop_flags = 0;
3948
3949 xhci_dbg_trace(xhci, trace_xhci_dbg_address,
3950 "Internal device address = %d",
3951 le32_to_cpu(slot_ctx->dev_state) & DEV_ADDR_MASK);
3952out:
3953 mutex_unlock(&xhci->mutex);
3954 if (command) {
3955 kfree(command->completion);
3956 kfree(command);
3957 }
3958 return ret;
3959}
3960
3961static int xhci_address_device(struct usb_hcd *hcd, struct usb_device *udev)
3962{
3963 return xhci_setup_device(hcd, udev, SETUP_CONTEXT_ADDRESS);
3964}
3965
3966static int xhci_enable_device(struct usb_hcd *hcd, struct usb_device *udev)
3967{
3968 return xhci_setup_device(hcd, udev, SETUP_CONTEXT_ONLY);
3969}
3970
3971/*
3972 * Transfer the port index into real index in the HW port status
3973 * registers. Caculate offset between the port's PORTSC register
3974 * and port status base. Divide the number of per port register
3975 * to get the real index. The raw port number bases 1.
3976 */
3977int xhci_find_raw_port_number(struct usb_hcd *hcd, int port1)
3978{
3979 struct xhci_hcd *xhci = hcd_to_xhci(hcd);
3980 __le32 __iomem *base_addr = &xhci->op_regs->port_status_base;
3981 __le32 __iomem *addr;
3982 int raw_port;
3983
3984 if (hcd->speed < HCD_USB3)
3985 addr = xhci->usb2_ports[port1 - 1];
3986 else
3987 addr = xhci->usb3_ports[port1 - 1];
3988
3989 raw_port = (addr - base_addr)/NUM_PORT_REGS + 1;
3990 return raw_port;
3991}
3992
3993/*
3994 * Issue an Evaluate Context command to change the Maximum Exit Latency in the
3995 * slot context. If that succeeds, store the new MEL in the xhci_virt_device.
3996 */
3997static int __maybe_unused xhci_change_max_exit_latency(struct xhci_hcd *xhci,
3998 struct usb_device *udev, u16 max_exit_latency)
3999{
4000 struct xhci_virt_device *virt_dev;
4001 struct xhci_command *command;
4002 struct xhci_input_control_ctx *ctrl_ctx;
4003 struct xhci_slot_ctx *slot_ctx;
4004 unsigned long flags;
4005 int ret;
4006
4007 spin_lock_irqsave(&xhci->lock, flags);
4008
4009 virt_dev = xhci->devs[udev->slot_id];
4010
4011 /*
4012 * virt_dev might not exists yet if xHC resumed from hibernate (S4) and
4013 * xHC was re-initialized. Exit latency will be set later after
4014 * hub_port_finish_reset() is done and xhci->devs[] are re-allocated
4015 */
4016
4017 if (!virt_dev || max_exit_latency == virt_dev->current_mel) {
4018 spin_unlock_irqrestore(&xhci->lock, flags);
4019 return 0;
4020 }
4021
4022 /* Attempt to issue an Evaluate Context command to change the MEL. */
4023 command = xhci->lpm_command;
4024 ctrl_ctx = xhci_get_input_control_ctx(command->in_ctx);
4025 if (!ctrl_ctx) {
4026 spin_unlock_irqrestore(&xhci->lock, flags);
4027 xhci_warn(xhci, "%s: Could not get input context, bad type.\n",
4028 __func__);
4029 return -ENOMEM;
4030 }
4031
4032 xhci_slot_copy(xhci, command->in_ctx, virt_dev->out_ctx);
4033 spin_unlock_irqrestore(&xhci->lock, flags);
4034
4035 ctrl_ctx->add_flags |= cpu_to_le32(SLOT_FLAG);
4036 slot_ctx = xhci_get_slot_ctx(xhci, command->in_ctx);
4037 slot_ctx->dev_info2 &= cpu_to_le32(~((u32) MAX_EXIT));
4038 slot_ctx->dev_info2 |= cpu_to_le32(max_exit_latency);
4039 slot_ctx->dev_state = 0;
4040
4041 xhci_dbg_trace(xhci, trace_xhci_dbg_context_change,
4042 "Set up evaluate context for LPM MEL change.");
4043
4044 /* Issue and wait for the evaluate context command. */
4045 ret = xhci_configure_endpoint(xhci, udev, command,
4046 true, true);
4047
4048 if (!ret) {
4049 spin_lock_irqsave(&xhci->lock, flags);
4050 virt_dev->current_mel = max_exit_latency;
4051 spin_unlock_irqrestore(&xhci->lock, flags);
4052 }
4053 return ret;
4054}
4055
4056#ifdef CONFIG_PM
4057
4058/* BESL to HIRD Encoding array for USB2 LPM */
4059static int xhci_besl_encoding[16] = {125, 150, 200, 300, 400, 500, 1000, 2000,
4060 3000, 4000, 5000, 6000, 7000, 8000, 9000, 10000};
4061
4062/* Calculate HIRD/BESL for USB2 PORTPMSC*/
4063static int xhci_calculate_hird_besl(struct xhci_hcd *xhci,
4064 struct usb_device *udev)
4065{
4066 int u2del, besl, besl_host;
4067 int besl_device = 0;
4068 u32 field;
4069
4070 u2del = HCS_U2_LATENCY(xhci->hcs_params3);
4071 field = le32_to_cpu(udev->bos->ext_cap->bmAttributes);
4072
4073 if (field & USB_BESL_SUPPORT) {
4074 for (besl_host = 0; besl_host < 16; besl_host++) {
4075 if (xhci_besl_encoding[besl_host] >= u2del)
4076 break;
4077 }
4078 /* Use baseline BESL value as default */
4079 if (field & USB_BESL_BASELINE_VALID)
4080 besl_device = USB_GET_BESL_BASELINE(field);
4081 else if (field & USB_BESL_DEEP_VALID)
4082 besl_device = USB_GET_BESL_DEEP(field);
4083 } else {
4084 if (u2del <= 50)
4085 besl_host = 0;
4086 else
4087 besl_host = (u2del - 51) / 75 + 1;
4088 }
4089
4090 besl = besl_host + besl_device;
4091 if (besl > 15)
4092 besl = 15;
4093
4094 return besl;
4095}
4096
4097/* Calculate BESLD, L1 timeout and HIRDM for USB2 PORTHLPMC */
4098static int xhci_calculate_usb2_hw_lpm_params(struct usb_device *udev)
4099{
4100 u32 field;
4101 int l1;
4102 int besld = 0;
4103 int hirdm = 0;
4104
4105 field = le32_to_cpu(udev->bos->ext_cap->bmAttributes);
4106
4107 /* xHCI l1 is set in steps of 256us, xHCI 1.0 section 5.4.11.2 */
4108 l1 = udev->l1_params.timeout / 256;
4109
4110 /* device has preferred BESLD */
4111 if (field & USB_BESL_DEEP_VALID) {
4112 besld = USB_GET_BESL_DEEP(field);
4113 hirdm = 1;
4114 }
4115
4116 return PORT_BESLD(besld) | PORT_L1_TIMEOUT(l1) | PORT_HIRDM(hirdm);
4117}
4118
4119static int xhci_set_usb2_hardware_lpm(struct usb_hcd *hcd,
4120 struct usb_device *udev, int enable)
4121{
4122 struct xhci_hcd *xhci = hcd_to_xhci(hcd);
4123 __le32 __iomem **port_array;
4124 __le32 __iomem *pm_addr, *hlpm_addr;
4125 u32 pm_val, hlpm_val, field;
4126 unsigned int port_num;
4127 unsigned long flags;
4128 int hird, exit_latency;
4129 int ret;
4130
4131 if (hcd->speed >= HCD_USB3 || !xhci->hw_lpm_support ||
4132 !udev->lpm_capable)
4133 return -EPERM;
4134
4135 if (!udev->parent || udev->parent->parent ||
4136 udev->descriptor.bDeviceClass == USB_CLASS_HUB)
4137 return -EPERM;
4138
4139 if (udev->usb2_hw_lpm_capable != 1)
4140 return -EPERM;
4141
4142 spin_lock_irqsave(&xhci->lock, flags);
4143
4144 port_array = xhci->usb2_ports;
4145 port_num = udev->portnum - 1;
4146 pm_addr = port_array[port_num] + PORTPMSC;
4147 pm_val = readl(pm_addr);
4148 hlpm_addr = port_array[port_num] + PORTHLPMC;
4149 field = le32_to_cpu(udev->bos->ext_cap->bmAttributes);
4150
4151 xhci_dbg(xhci, "%s port %d USB2 hardware LPM\n",
4152 enable ? "enable" : "disable", port_num + 1);
4153
4154 if (enable && !(xhci->quirks & XHCI_HW_LPM_DISABLE)) {
4155 /* Host supports BESL timeout instead of HIRD */
4156 if (udev->usb2_hw_lpm_besl_capable) {
4157 /* if device doesn't have a preferred BESL value use a
4158 * default one which works with mixed HIRD and BESL
4159 * systems. See XHCI_DEFAULT_BESL definition in xhci.h
4160 */
4161 if ((field & USB_BESL_SUPPORT) &&
4162 (field & USB_BESL_BASELINE_VALID))
4163 hird = USB_GET_BESL_BASELINE(field);
4164 else
4165 hird = udev->l1_params.besl;
4166
4167 exit_latency = xhci_besl_encoding[hird];
4168 spin_unlock_irqrestore(&xhci->lock, flags);
4169
4170 /* USB 3.0 code dedicate one xhci->lpm_command->in_ctx
4171 * input context for link powermanagement evaluate
4172 * context commands. It is protected by hcd->bandwidth
4173 * mutex and is shared by all devices. We need to set
4174 * the max ext latency in USB 2 BESL LPM as well, so
4175 * use the same mutex and xhci_change_max_exit_latency()
4176 */
4177 mutex_lock(hcd->bandwidth_mutex);
4178 ret = xhci_change_max_exit_latency(xhci, udev,
4179 exit_latency);
4180 mutex_unlock(hcd->bandwidth_mutex);
4181
4182 if (ret < 0)
4183 return ret;
4184 spin_lock_irqsave(&xhci->lock, flags);
4185
4186 hlpm_val = xhci_calculate_usb2_hw_lpm_params(udev);
4187 writel(hlpm_val, hlpm_addr);
4188 /* flush write */
4189 readl(hlpm_addr);
4190 } else {
4191 hird = xhci_calculate_hird_besl(xhci, udev);
4192 }
4193
4194 pm_val &= ~PORT_HIRD_MASK;
4195 pm_val |= PORT_HIRD(hird) | PORT_RWE | PORT_L1DS(udev->slot_id);
4196 writel(pm_val, pm_addr);
4197 pm_val = readl(pm_addr);
4198 pm_val |= PORT_HLE;
4199 writel(pm_val, pm_addr);
4200 /* flush write */
4201 readl(pm_addr);
4202 } else {
4203 pm_val &= ~(PORT_HLE | PORT_RWE | PORT_HIRD_MASK | PORT_L1DS_MASK);
4204 writel(pm_val, pm_addr);
4205 /* flush write */
4206 readl(pm_addr);
4207 if (udev->usb2_hw_lpm_besl_capable) {
4208 spin_unlock_irqrestore(&xhci->lock, flags);
4209 mutex_lock(hcd->bandwidth_mutex);
4210 xhci_change_max_exit_latency(xhci, udev, 0);
4211 mutex_unlock(hcd->bandwidth_mutex);
4212 return 0;
4213 }
4214 }
4215
4216 spin_unlock_irqrestore(&xhci->lock, flags);
4217 return 0;
4218}
4219
4220/* check if a usb2 port supports a given extened capability protocol
4221 * only USB2 ports extended protocol capability values are cached.
4222 * Return 1 if capability is supported
4223 */
4224static int xhci_check_usb2_port_capability(struct xhci_hcd *xhci, int port,
4225 unsigned capability)
4226{
4227 u32 port_offset, port_count;
4228 int i;
4229
4230 for (i = 0; i < xhci->num_ext_caps; i++) {
4231 if (xhci->ext_caps[i] & capability) {
4232 /* port offsets starts at 1 */
4233 port_offset = XHCI_EXT_PORT_OFF(xhci->ext_caps[i]) - 1;
4234 port_count = XHCI_EXT_PORT_COUNT(xhci->ext_caps[i]);
4235 if (port >= port_offset &&
4236 port < port_offset + port_count)
4237 return 1;
4238 }
4239 }
4240 return 0;
4241}
4242
4243static int xhci_update_device(struct usb_hcd *hcd, struct usb_device *udev)
4244{
4245 struct xhci_hcd *xhci = hcd_to_xhci(hcd);
4246 int portnum = udev->portnum - 1;
4247
4248 if (hcd->speed >= HCD_USB3 || !xhci->sw_lpm_support ||
4249 !udev->lpm_capable)
4250 return 0;
4251
4252 /* we only support lpm for non-hub device connected to root hub yet */
4253 if (!udev->parent || udev->parent->parent ||
4254 udev->descriptor.bDeviceClass == USB_CLASS_HUB)
4255 return 0;
4256
4257 if (xhci->hw_lpm_support == 1 &&
4258 xhci_check_usb2_port_capability(
4259 xhci, portnum, XHCI_HLC)) {
4260 udev->usb2_hw_lpm_capable = 1;
4261 udev->l1_params.timeout = XHCI_L1_TIMEOUT;
4262 udev->l1_params.besl = XHCI_DEFAULT_BESL;
4263 if (xhci_check_usb2_port_capability(xhci, portnum,
4264 XHCI_BLC))
4265 udev->usb2_hw_lpm_besl_capable = 1;
4266 }
4267
4268 return 0;
4269}
4270
4271/*---------------------- USB 3.0 Link PM functions ------------------------*/
4272
4273/* Service interval in nanoseconds = 2^(bInterval - 1) * 125us * 1000ns / 1us */
4274static unsigned long long xhci_service_interval_to_ns(
4275 struct usb_endpoint_descriptor *desc)
4276{
4277 return (1ULL << (desc->bInterval - 1)) * 125 * 1000;
4278}
4279
4280static u16 xhci_get_timeout_no_hub_lpm(struct usb_device *udev,
4281 enum usb3_link_state state)
4282{
4283 unsigned long long sel;
4284 unsigned long long pel;
4285 unsigned int max_sel_pel;
4286 char *state_name;
4287
4288 switch (state) {
4289 case USB3_LPM_U1:
4290 /* Convert SEL and PEL stored in nanoseconds to microseconds */
4291 sel = DIV_ROUND_UP(udev->u1_params.sel, 1000);
4292 pel = DIV_ROUND_UP(udev->u1_params.pel, 1000);
4293 max_sel_pel = USB3_LPM_MAX_U1_SEL_PEL;
4294 state_name = "U1";
4295 break;
4296 case USB3_LPM_U2:
4297 sel = DIV_ROUND_UP(udev->u2_params.sel, 1000);
4298 pel = DIV_ROUND_UP(udev->u2_params.pel, 1000);
4299 max_sel_pel = USB3_LPM_MAX_U2_SEL_PEL;
4300 state_name = "U2";
4301 break;
4302 default:
4303 dev_warn(&udev->dev, "%s: Can't get timeout for non-U1 or U2 state.\n",
4304 __func__);
4305 return USB3_LPM_DISABLED;
4306 }
4307
4308 if (sel <= max_sel_pel && pel <= max_sel_pel)
4309 return USB3_LPM_DEVICE_INITIATED;
4310
4311 if (sel > max_sel_pel)
4312 dev_dbg(&udev->dev, "Device-initiated %s disabled "
4313 "due to long SEL %llu ms\n",
4314 state_name, sel);
4315 else
4316 dev_dbg(&udev->dev, "Device-initiated %s disabled "
4317 "due to long PEL %llu ms\n",
4318 state_name, pel);
4319 return USB3_LPM_DISABLED;
4320}
4321
4322/* The U1 timeout should be the maximum of the following values:
4323 * - For control endpoints, U1 system exit latency (SEL) * 3
4324 * - For bulk endpoints, U1 SEL * 5
4325 * - For interrupt endpoints:
4326 * - Notification EPs, U1 SEL * 3
4327 * - Periodic EPs, max(105% of bInterval, U1 SEL * 2)
4328 * - For isochronous endpoints, max(105% of bInterval, U1 SEL * 2)
4329 */
4330static unsigned long long xhci_calculate_intel_u1_timeout(
4331 struct usb_device *udev,
4332 struct usb_endpoint_descriptor *desc)
4333{
4334 unsigned long long timeout_ns;
4335 int ep_type;
4336 int intr_type;
4337
4338 ep_type = usb_endpoint_type(desc);
4339 switch (ep_type) {
4340 case USB_ENDPOINT_XFER_CONTROL:
4341 timeout_ns = udev->u1_params.sel * 3;
4342 break;
4343 case USB_ENDPOINT_XFER_BULK:
4344 timeout_ns = udev->u1_params.sel * 5;
4345 break;
4346 case USB_ENDPOINT_XFER_INT:
4347 intr_type = usb_endpoint_interrupt_type(desc);
4348 if (intr_type == USB_ENDPOINT_INTR_NOTIFICATION) {
4349 timeout_ns = udev->u1_params.sel * 3;
4350 break;
4351 }
4352 /* Otherwise the calculation is the same as isoc eps */
4353 /* fall through */
4354 case USB_ENDPOINT_XFER_ISOC:
4355 timeout_ns = xhci_service_interval_to_ns(desc);
4356 timeout_ns = DIV_ROUND_UP_ULL(timeout_ns * 105, 100);
4357 if (timeout_ns < udev->u1_params.sel * 2)
4358 timeout_ns = udev->u1_params.sel * 2;
4359 break;
4360 default:
4361 return 0;
4362 }
4363
4364 return timeout_ns;
4365}
4366
4367/* Returns the hub-encoded U1 timeout value. */
4368static u16 xhci_calculate_u1_timeout(struct xhci_hcd *xhci,
4369 struct usb_device *udev,
4370 struct usb_endpoint_descriptor *desc)
4371{
4372 unsigned long long timeout_ns;
4373
4374 if (xhci->quirks & XHCI_INTEL_HOST)
4375 timeout_ns = xhci_calculate_intel_u1_timeout(udev, desc);
4376 else
4377 timeout_ns = udev->u1_params.sel;
4378
4379 /* The U1 timeout is encoded in 1us intervals.
4380 * Don't return a timeout of zero, because that's USB3_LPM_DISABLED.
4381 */
4382 if (timeout_ns == USB3_LPM_DISABLED)
4383 timeout_ns = 1;
4384 else
4385 timeout_ns = DIV_ROUND_UP_ULL(timeout_ns, 1000);
4386
4387 /* If the necessary timeout value is bigger than what we can set in the
4388 * USB 3.0 hub, we have to disable hub-initiated U1.
4389 */
4390 if (timeout_ns <= USB3_LPM_U1_MAX_TIMEOUT)
4391 return timeout_ns;
4392 dev_dbg(&udev->dev, "Hub-initiated U1 disabled "
4393 "due to long timeout %llu ms\n", timeout_ns);
4394 return xhci_get_timeout_no_hub_lpm(udev, USB3_LPM_U1);
4395}
4396
4397/* The U2 timeout should be the maximum of:
4398 * - 10 ms (to avoid the bandwidth impact on the scheduler)
4399 * - largest bInterval of any active periodic endpoint (to avoid going
4400 * into lower power link states between intervals).
4401 * - the U2 Exit Latency of the device
4402 */
4403static unsigned long long xhci_calculate_intel_u2_timeout(
4404 struct usb_device *udev,
4405 struct usb_endpoint_descriptor *desc)
4406{
4407 unsigned long long timeout_ns;
4408 unsigned long long u2_del_ns;
4409
4410 timeout_ns = 10 * 1000 * 1000;
4411
4412 if ((usb_endpoint_xfer_int(desc) || usb_endpoint_xfer_isoc(desc)) &&
4413 (xhci_service_interval_to_ns(desc) > timeout_ns))
4414 timeout_ns = xhci_service_interval_to_ns(desc);
4415
4416 u2_del_ns = le16_to_cpu(udev->bos->ss_cap->bU2DevExitLat) * 1000ULL;
4417 if (u2_del_ns > timeout_ns)
4418 timeout_ns = u2_del_ns;
4419
4420 return timeout_ns;
4421}
4422
4423/* Returns the hub-encoded U2 timeout value. */
4424static u16 xhci_calculate_u2_timeout(struct xhci_hcd *xhci,
4425 struct usb_device *udev,
4426 struct usb_endpoint_descriptor *desc)
4427{
4428 unsigned long long timeout_ns;
4429
4430 if (xhci->quirks & XHCI_INTEL_HOST)
4431 timeout_ns = xhci_calculate_intel_u2_timeout(udev, desc);
4432 else
4433 timeout_ns = udev->u2_params.sel;
4434
4435 /* The U2 timeout is encoded in 256us intervals */
4436 timeout_ns = DIV_ROUND_UP_ULL(timeout_ns, 256 * 1000);
4437 /* If the necessary timeout value is bigger than what we can set in the
4438 * USB 3.0 hub, we have to disable hub-initiated U2.
4439 */
4440 if (timeout_ns <= USB3_LPM_U2_MAX_TIMEOUT)
4441 return timeout_ns;
4442 dev_dbg(&udev->dev, "Hub-initiated U2 disabled "
4443 "due to long timeout %llu ms\n", timeout_ns);
4444 return xhci_get_timeout_no_hub_lpm(udev, USB3_LPM_U2);
4445}
4446
4447static u16 xhci_call_host_update_timeout_for_endpoint(struct xhci_hcd *xhci,
4448 struct usb_device *udev,
4449 struct usb_endpoint_descriptor *desc,
4450 enum usb3_link_state state,
4451 u16 *timeout)
4452{
4453 if (state == USB3_LPM_U1)
4454 return xhci_calculate_u1_timeout(xhci, udev, desc);
4455 else if (state == USB3_LPM_U2)
4456 return xhci_calculate_u2_timeout(xhci, udev, desc);
4457
4458 return USB3_LPM_DISABLED;
4459}
4460
4461static int xhci_update_timeout_for_endpoint(struct xhci_hcd *xhci,
4462 struct usb_device *udev,
4463 struct usb_endpoint_descriptor *desc,
4464 enum usb3_link_state state,
4465 u16 *timeout)
4466{
4467 u16 alt_timeout;
4468
4469 alt_timeout = xhci_call_host_update_timeout_for_endpoint(xhci, udev,
4470 desc, state, timeout);
4471
4472 /* If we found we can't enable hub-initiated LPM, or
4473 * the U1 or U2 exit latency was too high to allow
4474 * device-initiated LPM as well, just stop searching.
4475 */
4476 if (alt_timeout == USB3_LPM_DISABLED ||
4477 alt_timeout == USB3_LPM_DEVICE_INITIATED) {
4478 *timeout = alt_timeout;
4479 return -E2BIG;
4480 }
4481 if (alt_timeout > *timeout)
4482 *timeout = alt_timeout;
4483 return 0;
4484}
4485
4486static int xhci_update_timeout_for_interface(struct xhci_hcd *xhci,
4487 struct usb_device *udev,
4488 struct usb_host_interface *alt,
4489 enum usb3_link_state state,
4490 u16 *timeout)
4491{
4492 int j;
4493
4494 for (j = 0; j < alt->desc.bNumEndpoints; j++) {
4495 if (xhci_update_timeout_for_endpoint(xhci, udev,
4496 &alt->endpoint[j].desc, state, timeout))
4497 return -E2BIG;
4498 continue;
4499 }
4500 return 0;
4501}
4502
4503static int xhci_check_intel_tier_policy(struct usb_device *udev,
4504 enum usb3_link_state state)
4505{
4506 struct usb_device *parent;
4507 unsigned int num_hubs;
4508
4509 if (state == USB3_LPM_U2)
4510 return 0;
4511
4512 /* Don't enable U1 if the device is on a 2nd tier hub or lower. */
4513 for (parent = udev->parent, num_hubs = 0; parent->parent;
4514 parent = parent->parent)
4515 num_hubs++;
4516
4517 if (num_hubs < 2)
4518 return 0;
4519
4520 dev_dbg(&udev->dev, "Disabling U1 link state for device"
4521 " below second-tier hub.\n");
4522 dev_dbg(&udev->dev, "Plug device into first-tier hub "
4523 "to decrease power consumption.\n");
4524 return -E2BIG;
4525}
4526
4527static int xhci_check_tier_policy(struct xhci_hcd *xhci,
4528 struct usb_device *udev,
4529 enum usb3_link_state state)
4530{
4531 if (xhci->quirks & XHCI_INTEL_HOST)
4532 return xhci_check_intel_tier_policy(udev, state);
4533 else
4534 return 0;
4535}
4536
4537/* Returns the U1 or U2 timeout that should be enabled.
4538 * If the tier check or timeout setting functions return with a non-zero exit
4539 * code, that means the timeout value has been finalized and we shouldn't look
4540 * at any more endpoints.
4541 */
4542static u16 xhci_calculate_lpm_timeout(struct usb_hcd *hcd,
4543 struct usb_device *udev, enum usb3_link_state state)
4544{
4545 struct xhci_hcd *xhci = hcd_to_xhci(hcd);
4546 struct usb_host_config *config;
4547 char *state_name;
4548 int i;
4549 u16 timeout = USB3_LPM_DISABLED;
4550
4551 if (state == USB3_LPM_U1)
4552 state_name = "U1";
4553 else if (state == USB3_LPM_U2)
4554 state_name = "U2";
4555 else {
4556 dev_warn(&udev->dev, "Can't enable unknown link state %i\n",
4557 state);
4558 return timeout;
4559 }
4560
4561 if (xhci_check_tier_policy(xhci, udev, state) < 0)
4562 return timeout;
4563
4564 /* Gather some information about the currently installed configuration
4565 * and alternate interface settings.
4566 */
4567 if (xhci_update_timeout_for_endpoint(xhci, udev, &udev->ep0.desc,
4568 state, &timeout))
4569 return timeout;
4570
4571 config = udev->actconfig;
4572 if (!config)
4573 return timeout;
4574
4575 for (i = 0; i < config->desc.bNumInterfaces; i++) {
4576 struct usb_driver *driver;
4577 struct usb_interface *intf = config->interface[i];
4578
4579 if (!intf)
4580 continue;
4581
4582 /* Check if any currently bound drivers want hub-initiated LPM
4583 * disabled.
4584 */
4585 if (intf->dev.driver) {
4586 driver = to_usb_driver(intf->dev.driver);
4587 if (driver && driver->disable_hub_initiated_lpm) {
4588 dev_dbg(&udev->dev, "Hub-initiated %s disabled "
4589 "at request of driver %s\n",
4590 state_name, driver->name);
4591 return xhci_get_timeout_no_hub_lpm(udev, state);
4592 }
4593 }
4594
4595 /* Not sure how this could happen... */
4596 if (!intf->cur_altsetting)
4597 continue;
4598
4599 if (xhci_update_timeout_for_interface(xhci, udev,
4600 intf->cur_altsetting,
4601 state, &timeout))
4602 return timeout;
4603 }
4604 return timeout;
4605}
4606
4607static int calculate_max_exit_latency(struct usb_device *udev,
4608 enum usb3_link_state state_changed,
4609 u16 hub_encoded_timeout)
4610{
4611 unsigned long long u1_mel_us = 0;
4612 unsigned long long u2_mel_us = 0;
4613 unsigned long long mel_us = 0;
4614 bool disabling_u1;
4615 bool disabling_u2;
4616 bool enabling_u1;
4617 bool enabling_u2;
4618
4619 disabling_u1 = (state_changed == USB3_LPM_U1 &&
4620 hub_encoded_timeout == USB3_LPM_DISABLED);
4621 disabling_u2 = (state_changed == USB3_LPM_U2 &&
4622 hub_encoded_timeout == USB3_LPM_DISABLED);
4623
4624 enabling_u1 = (state_changed == USB3_LPM_U1 &&
4625 hub_encoded_timeout != USB3_LPM_DISABLED);
4626 enabling_u2 = (state_changed == USB3_LPM_U2 &&
4627 hub_encoded_timeout != USB3_LPM_DISABLED);
4628
4629 /* If U1 was already enabled and we're not disabling it,
4630 * or we're going to enable U1, account for the U1 max exit latency.
4631 */
4632 if ((udev->u1_params.timeout != USB3_LPM_DISABLED && !disabling_u1) ||
4633 enabling_u1)
4634 u1_mel_us = DIV_ROUND_UP(udev->u1_params.mel, 1000);
4635 if ((udev->u2_params.timeout != USB3_LPM_DISABLED && !disabling_u2) ||
4636 enabling_u2)
4637 u2_mel_us = DIV_ROUND_UP(udev->u2_params.mel, 1000);
4638
4639 if (u1_mel_us > u2_mel_us)
4640 mel_us = u1_mel_us;
4641 else
4642 mel_us = u2_mel_us;
4643 /* xHCI host controller max exit latency field is only 16 bits wide. */
4644 if (mel_us > MAX_EXIT) {
4645 dev_warn(&udev->dev, "Link PM max exit latency of %lluus "
4646 "is too big.\n", mel_us);
4647 return -E2BIG;
4648 }
4649 return mel_us;
4650}
4651
4652/* Returns the USB3 hub-encoded value for the U1/U2 timeout. */
4653static int xhci_enable_usb3_lpm_timeout(struct usb_hcd *hcd,
4654 struct usb_device *udev, enum usb3_link_state state)
4655{
4656 struct xhci_hcd *xhci;
4657 u16 hub_encoded_timeout;
4658 int mel;
4659 int ret;
4660
4661 xhci = hcd_to_xhci(hcd);
4662 /* The LPM timeout values are pretty host-controller specific, so don't
4663 * enable hub-initiated timeouts unless the vendor has provided
4664 * information about their timeout algorithm.
4665 */
4666 if (!xhci || !(xhci->quirks & XHCI_LPM_SUPPORT) ||
4667 !xhci->devs[udev->slot_id])
4668 return USB3_LPM_DISABLED;
4669
4670 hub_encoded_timeout = xhci_calculate_lpm_timeout(hcd, udev, state);
4671 mel = calculate_max_exit_latency(udev, state, hub_encoded_timeout);
4672 if (mel < 0) {
4673 /* Max Exit Latency is too big, disable LPM. */
4674 hub_encoded_timeout = USB3_LPM_DISABLED;
4675 mel = 0;
4676 }
4677
4678 ret = xhci_change_max_exit_latency(xhci, udev, mel);
4679 if (ret)
4680 return ret;
4681 return hub_encoded_timeout;
4682}
4683
4684static int xhci_disable_usb3_lpm_timeout(struct usb_hcd *hcd,
4685 struct usb_device *udev, enum usb3_link_state state)
4686{
4687 struct xhci_hcd *xhci;
4688 u16 mel;
4689
4690 xhci = hcd_to_xhci(hcd);
4691 if (!xhci || !(xhci->quirks & XHCI_LPM_SUPPORT) ||
4692 !xhci->devs[udev->slot_id])
4693 return 0;
4694
4695 mel = calculate_max_exit_latency(udev, state, USB3_LPM_DISABLED);
4696 return xhci_change_max_exit_latency(xhci, udev, mel);
4697}
4698#else /* CONFIG_PM */
4699
4700static int xhci_set_usb2_hardware_lpm(struct usb_hcd *hcd,
4701 struct usb_device *udev, int enable)
4702{
4703 return 0;
4704}
4705
4706static int xhci_update_device(struct usb_hcd *hcd, struct usb_device *udev)
4707{
4708 return 0;
4709}
4710
4711static int xhci_enable_usb3_lpm_timeout(struct usb_hcd *hcd,
4712 struct usb_device *udev, enum usb3_link_state state)
4713{
4714 return USB3_LPM_DISABLED;
4715}
4716
4717static int xhci_disable_usb3_lpm_timeout(struct usb_hcd *hcd,
4718 struct usb_device *udev, enum usb3_link_state state)
4719{
4720 return 0;
4721}
4722#endif /* CONFIG_PM */
4723
4724/*-------------------------------------------------------------------------*/
4725
4726/* Once a hub descriptor is fetched for a device, we need to update the xHC's
4727 * internal data structures for the device.
4728 */
4729static int xhci_update_hub_device(struct usb_hcd *hcd, struct usb_device *hdev,
4730 struct usb_tt *tt, gfp_t mem_flags)
4731{
4732 struct xhci_hcd *xhci = hcd_to_xhci(hcd);
4733 struct xhci_virt_device *vdev;
4734 struct xhci_command *config_cmd;
4735 struct xhci_input_control_ctx *ctrl_ctx;
4736 struct xhci_slot_ctx *slot_ctx;
4737 unsigned long flags;
4738 unsigned think_time;
4739 int ret;
4740
4741 /* Ignore root hubs */
4742 if (!hdev->parent)
4743 return 0;
4744
4745 vdev = xhci->devs[hdev->slot_id];
4746 if (!vdev) {
4747 xhci_warn(xhci, "Cannot update hub desc for unknown device.\n");
4748 return -EINVAL;
4749 }
4750
4751 config_cmd = xhci_alloc_command_with_ctx(xhci, true, mem_flags);
4752 if (!config_cmd)
4753 return -ENOMEM;
4754
4755 ctrl_ctx = xhci_get_input_control_ctx(config_cmd->in_ctx);
4756 if (!ctrl_ctx) {
4757 xhci_warn(xhci, "%s: Could not get input context, bad type.\n",
4758 __func__);
4759 xhci_free_command(xhci, config_cmd);
4760 return -ENOMEM;
4761 }
4762
4763 spin_lock_irqsave(&xhci->lock, flags);
4764 if (hdev->speed == USB_SPEED_HIGH &&
4765 xhci_alloc_tt_info(xhci, vdev, hdev, tt, GFP_ATOMIC)) {
4766 xhci_dbg(xhci, "Could not allocate xHCI TT structure.\n");
4767 xhci_free_command(xhci, config_cmd);
4768 spin_unlock_irqrestore(&xhci->lock, flags);
4769 return -ENOMEM;
4770 }
4771
4772 xhci_slot_copy(xhci, config_cmd->in_ctx, vdev->out_ctx);
4773 ctrl_ctx->add_flags |= cpu_to_le32(SLOT_FLAG);
4774 slot_ctx = xhci_get_slot_ctx(xhci, config_cmd->in_ctx);
4775 slot_ctx->dev_info |= cpu_to_le32(DEV_HUB);
4776 /*
4777 * refer to section 6.2.2: MTT should be 0 for full speed hub,
4778 * but it may be already set to 1 when setup an xHCI virtual
4779 * device, so clear it anyway.
4780 */
4781 if (tt->multi)
4782 slot_ctx->dev_info |= cpu_to_le32(DEV_MTT);
4783 else if (hdev->speed == USB_SPEED_FULL)
4784 slot_ctx->dev_info &= cpu_to_le32(~DEV_MTT);
4785
4786 if (xhci->hci_version > 0x95) {
4787 xhci_dbg(xhci, "xHCI version %x needs hub "
4788 "TT think time and number of ports\n",
4789 (unsigned int) xhci->hci_version);
4790 slot_ctx->dev_info2 |= cpu_to_le32(XHCI_MAX_PORTS(hdev->maxchild));
4791 /* Set TT think time - convert from ns to FS bit times.
4792 * 0 = 8 FS bit times, 1 = 16 FS bit times,
4793 * 2 = 24 FS bit times, 3 = 32 FS bit times.
4794 *
4795 * xHCI 1.0: this field shall be 0 if the device is not a
4796 * High-spped hub.
4797 */
4798 think_time = tt->think_time;
4799 if (think_time != 0)
4800 think_time = (think_time / 666) - 1;
4801 if (xhci->hci_version < 0x100 || hdev->speed == USB_SPEED_HIGH)
4802 slot_ctx->tt_info |=
4803 cpu_to_le32(TT_THINK_TIME(think_time));
4804 } else {
4805 xhci_dbg(xhci, "xHCI version %x doesn't need hub "
4806 "TT think time or number of ports\n",
4807 (unsigned int) xhci->hci_version);
4808 }
4809 slot_ctx->dev_state = 0;
4810 spin_unlock_irqrestore(&xhci->lock, flags);
4811
4812 xhci_dbg(xhci, "Set up %s for hub device.\n",
4813 (xhci->hci_version > 0x95) ?
4814 "configure endpoint" : "evaluate context");
4815
4816 /* Issue and wait for the configure endpoint or
4817 * evaluate context command.
4818 */
4819 if (xhci->hci_version > 0x95)
4820 ret = xhci_configure_endpoint(xhci, hdev, config_cmd,
4821 false, false);
4822 else
4823 ret = xhci_configure_endpoint(xhci, hdev, config_cmd,
4824 true, false);
4825
4826 xhci_free_command(xhci, config_cmd);
4827 return ret;
4828}
4829
4830static int xhci_get_frame(struct usb_hcd *hcd)
4831{
4832 struct xhci_hcd *xhci = hcd_to_xhci(hcd);
4833 /* EHCI mods by the periodic size. Why? */
4834 return readl(&xhci->run_regs->microframe_index) >> 3;
4835}
4836
4837int xhci_gen_setup(struct usb_hcd *hcd, xhci_get_quirks_t get_quirks)
4838{
4839 struct xhci_hcd *xhci;
4840 /*
4841 * TODO: Check with DWC3 clients for sysdev according to
4842 * quirks
4843 */
4844 struct device *dev = hcd->self.sysdev;
4845 unsigned int minor_rev;
4846 int retval;
4847
4848 /* Accept arbitrarily long scatter-gather lists */
4849 hcd->self.sg_tablesize = ~0;
4850
4851 /* support to build packet from discontinuous buffers */
4852 hcd->self.no_sg_constraint = 1;
4853
4854 /* XHCI controllers don't stop the ep queue on short packets :| */
4855 hcd->self.no_stop_on_short = 1;
4856
4857 xhci = hcd_to_xhci(hcd);
4858
4859 if (usb_hcd_is_primary_hcd(hcd)) {
4860 xhci->main_hcd = hcd;
4861 /* Mark the first roothub as being USB 2.0.
4862 * The xHCI driver will register the USB 3.0 roothub.
4863 */
4864 hcd->speed = HCD_USB2;
4865 hcd->self.root_hub->speed = USB_SPEED_HIGH;
4866 /*
4867 * USB 2.0 roothub under xHCI has an integrated TT,
4868 * (rate matching hub) as opposed to having an OHCI/UHCI
4869 * companion controller.
4870 */
4871 hcd->has_tt = 1;
4872 } else {
4873 /*
4874 * Some 3.1 hosts return sbrn 0x30, use xhci supported protocol
4875 * minor revision instead of sbrn
4876 */
4877 minor_rev = xhci->usb3_rhub.min_rev;
4878 if (minor_rev) {
4879 hcd->speed = HCD_USB31;
4880 hcd->self.root_hub->speed = USB_SPEED_SUPER_PLUS;
4881 }
4882 xhci_info(xhci, "Host supports USB 3.%x %s SuperSpeed\n",
4883 minor_rev,
4884 minor_rev ? "Enhanced" : "");
4885
4886 /* xHCI private pointer was set in xhci_pci_probe for the second
4887 * registered roothub.
4888 */
4889 return 0;
4890 }
4891
4892 mutex_init(&xhci->mutex);
4893 xhci->cap_regs = hcd->regs;
4894 xhci->op_regs = hcd->regs +
4895 HC_LENGTH(readl(&xhci->cap_regs->hc_capbase));
4896 xhci->run_regs = hcd->regs +
4897 (readl(&xhci->cap_regs->run_regs_off) & RTSOFF_MASK);
4898 /* Cache read-only capability registers */
4899 xhci->hcs_params1 = readl(&xhci->cap_regs->hcs_params1);
4900 xhci->hcs_params2 = readl(&xhci->cap_regs->hcs_params2);
4901 xhci->hcs_params3 = readl(&xhci->cap_regs->hcs_params3);
4902 xhci->hcc_params = readl(&xhci->cap_regs->hc_capbase);
4903 xhci->hci_version = HC_VERSION(xhci->hcc_params);
4904 xhci->hcc_params = readl(&xhci->cap_regs->hcc_params);
4905 if (xhci->hci_version > 0x100)
4906 xhci->hcc_params2 = readl(&xhci->cap_regs->hcc_params2);
4907
4908 xhci->quirks |= quirks;
4909
4910 get_quirks(dev, xhci);
4911
4912 /* In xhci controllers which follow xhci 1.0 spec gives a spurious
4913 * success event after a short transfer. This quirk will ignore such
4914 * spurious event.
4915 */
4916 if (xhci->hci_version > 0x96)
4917 xhci->quirks |= XHCI_SPURIOUS_SUCCESS;
4918
4919 /* Make sure the HC is halted. */
4920 retval = xhci_halt(xhci);
4921 if (retval)
4922 return retval;
4923
4924 xhci_dbg(xhci, "Resetting HCD\n");
4925 /* Reset the internal HC memory state and registers. */
4926 retval = xhci_reset(xhci);
4927 if (retval)
4928 return retval;
4929 xhci_dbg(xhci, "Reset complete\n");
4930
4931 /*
4932 * On some xHCI controllers (e.g. R-Car SoCs), the AC64 bit (bit 0)
4933 * of HCCPARAMS1 is set to 1. However, the xHCs don't support 64-bit
4934 * address memory pointers actually. So, this driver clears the AC64
4935 * bit of xhci->hcc_params to call dma_set_coherent_mask(dev,
4936 * DMA_BIT_MASK(32)) in this xhci_gen_setup().
4937 */
4938 if (xhci->quirks & XHCI_NO_64BIT_SUPPORT)
4939 xhci->hcc_params &= ~BIT(0);
4940
4941 /* Set dma_mask and coherent_dma_mask to 64-bits,
4942 * if xHC supports 64-bit addressing */
4943 if (HCC_64BIT_ADDR(xhci->hcc_params) &&
4944 !dma_set_mask(dev, DMA_BIT_MASK(64))) {
4945 xhci_dbg(xhci, "Enabling 64-bit DMA addresses.\n");
4946 dma_set_coherent_mask(dev, DMA_BIT_MASK(64));
4947 } else {
4948 /*
4949 * This is to avoid error in cases where a 32-bit USB
4950 * controller is used on a 64-bit capable system.
4951 */
4952 retval = dma_set_mask(dev, DMA_BIT_MASK(32));
4953 if (retval)
4954 return retval;
4955 xhci_dbg(xhci, "Enabling 32-bit DMA addresses.\n");
4956 dma_set_coherent_mask(dev, DMA_BIT_MASK(32));
4957 }
4958
4959 xhci_dbg(xhci, "Calling HCD init\n");
4960 /* Initialize HCD and host controller data structures. */
4961 retval = xhci_init(hcd);
4962 if (retval)
4963 return retval;
4964 xhci_dbg(xhci, "Called HCD init\n");
4965
4966 xhci_info(xhci, "hcc params 0x%08x hci version 0x%x quirks 0x%08x\n",
4967 xhci->hcc_params, xhci->hci_version, xhci->quirks);
4968
4969 return 0;
4970}
4971EXPORT_SYMBOL_GPL(xhci_gen_setup);
4972
4973static const struct hc_driver xhci_hc_driver = {
4974 .description = "xhci-hcd",
4975 .product_desc = "xHCI Host Controller",
4976 .hcd_priv_size = sizeof(struct xhci_hcd),
4977
4978 /*
4979 * generic hardware linkage
4980 */
4981 .irq = xhci_irq,
4982 .flags = HCD_MEMORY | HCD_USB3 | HCD_SHARED,
4983
4984 /*
4985 * basic lifecycle operations
4986 */
4987 .reset = NULL, /* set in xhci_init_driver() */
4988 .start = xhci_run,
4989 .stop = xhci_stop,
4990 .shutdown = xhci_shutdown,
4991
4992 /*
4993 * managing i/o requests and associated device resources
4994 */
4995 .urb_enqueue = xhci_urb_enqueue,
4996 .urb_dequeue = xhci_urb_dequeue,
4997 .alloc_dev = xhci_alloc_dev,
4998 .free_dev = xhci_free_dev,
4999 .alloc_streams = xhci_alloc_streams,
5000 .free_streams = xhci_free_streams,
5001 .add_endpoint = xhci_add_endpoint,
5002 .drop_endpoint = xhci_drop_endpoint,
5003 .endpoint_reset = xhci_endpoint_reset,
5004 .check_bandwidth = xhci_check_bandwidth,
5005 .reset_bandwidth = xhci_reset_bandwidth,
5006 .address_device = xhci_address_device,
5007 .enable_device = xhci_enable_device,
5008 .update_hub_device = xhci_update_hub_device,
5009 .reset_device = xhci_discover_or_reset_device,
5010
5011 /*
5012 * scheduling support
5013 */
5014 .get_frame_number = xhci_get_frame,
5015
5016 /*
5017 * root hub support
5018 */
5019 .hub_control = xhci_hub_control,
5020 .hub_status_data = xhci_hub_status_data,
5021 .bus_suspend = xhci_bus_suspend,
5022 .bus_resume = xhci_bus_resume,
5023
5024 /*
5025 * call back when device connected and addressed
5026 */
5027 .update_device = xhci_update_device,
5028 .set_usb2_hw_lpm = xhci_set_usb2_hardware_lpm,
5029 .enable_usb3_lpm_timeout = xhci_enable_usb3_lpm_timeout,
5030 .disable_usb3_lpm_timeout = xhci_disable_usb3_lpm_timeout,
5031 .find_raw_port_number = xhci_find_raw_port_number,
5032};
5033
5034void xhci_init_driver(struct hc_driver *drv,
5035 const struct xhci_driver_overrides *over)
5036{
5037 BUG_ON(!over);
5038
5039 /* Copy the generic table to drv then apply the overrides */
5040 *drv = xhci_hc_driver;
5041
5042 if (over) {
5043 drv->hcd_priv_size += over->extra_priv_size;
5044 if (over->reset)
5045 drv->reset = over->reset;
5046 if (over->start)
5047 drv->start = over->start;
5048 }
5049}
5050EXPORT_SYMBOL_GPL(xhci_init_driver);
5051
5052MODULE_DESCRIPTION(DRIVER_DESC);
5053MODULE_AUTHOR(DRIVER_AUTHOR);
5054MODULE_LICENSE("GPL");
5055
5056static int __init xhci_hcd_init(void)
5057{
5058 /*
5059 * Check the compiler generated sizes of structures that must be laid
5060 * out in specific ways for hardware access.
5061 */
5062 BUILD_BUG_ON(sizeof(struct xhci_doorbell_array) != 256*32/8);
5063 BUILD_BUG_ON(sizeof(struct xhci_slot_ctx) != 8*32/8);
5064 BUILD_BUG_ON(sizeof(struct xhci_ep_ctx) != 8*32/8);
5065 /* xhci_device_control has eight fields, and also
5066 * embeds one xhci_slot_ctx and 31 xhci_ep_ctx
5067 */
5068 BUILD_BUG_ON(sizeof(struct xhci_stream_ctx) != 4*32/8);
5069 BUILD_BUG_ON(sizeof(union xhci_trb) != 4*32/8);
5070 BUILD_BUG_ON(sizeof(struct xhci_erst_entry) != 4*32/8);
5071 BUILD_BUG_ON(sizeof(struct xhci_cap_regs) != 8*32/8);
5072 BUILD_BUG_ON(sizeof(struct xhci_intr_reg) != 8*32/8);
5073 /* xhci_run_regs has eight fields and embeds 128 xhci_intr_regs */
5074 BUILD_BUG_ON(sizeof(struct xhci_run_regs) != (8+8*128)*32/8);
5075
5076 if (usb_disabled())
5077 return -ENODEV;
5078
5079 xhci_debugfs_create_root();
5080
5081 return 0;
5082}
5083
5084/*
5085 * If an init function is provided, an exit function must also be provided
5086 * to allow module unload.
5087 */
5088static void __exit xhci_hcd_fini(void)
5089{
5090 xhci_debugfs_remove_root();
5091}
5092
5093module_init(xhci_hcd_init);
5094module_exit(xhci_hcd_fini);