Loading...
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * xHCI host controller driver
4 *
5 * Copyright (C) 2008 Intel Corp.
6 *
7 * Author: Sarah Sharp
8 * Some code borrowed from the Linux EHCI driver.
9 */
10
11/*
12 * Ring initialization rules:
13 * 1. Each segment is initialized to zero, except for link TRBs.
14 * 2. Ring cycle state = 0. This represents Producer Cycle State (PCS) or
15 * Consumer Cycle State (CCS), depending on ring function.
16 * 3. Enqueue pointer = dequeue pointer = address of first TRB in the segment.
17 *
18 * Ring behavior rules:
19 * 1. A ring is empty if enqueue == dequeue. This means there will always be at
20 * least one free TRB in the ring. This is useful if you want to turn that
21 * into a link TRB and expand the ring.
22 * 2. When incrementing an enqueue or dequeue pointer, if the next TRB is a
23 * link TRB, then load the pointer with the address in the link TRB. If the
24 * link TRB had its toggle bit set, you may need to update the ring cycle
25 * state (see cycle bit rules). You may have to do this multiple times
26 * until you reach a non-link TRB.
27 * 3. A ring is full if enqueue++ (for the definition of increment above)
28 * equals the dequeue pointer.
29 *
30 * Cycle bit rules:
31 * 1. When a consumer increments a dequeue pointer and encounters a toggle bit
32 * in a link TRB, it must toggle the ring cycle state.
33 * 2. When a producer increments an enqueue pointer and encounters a toggle bit
34 * in a link TRB, it must toggle the ring cycle state.
35 *
36 * Producer rules:
37 * 1. Check if ring is full before you enqueue.
38 * 2. Write the ring cycle state to the cycle bit in the TRB you're enqueuing.
39 * Update enqueue pointer between each write (which may update the ring
40 * cycle state).
41 * 3. Notify consumer. If SW is producer, it rings the doorbell for command
42 * and endpoint rings. If HC is the producer for the event ring,
43 * and it generates an interrupt according to interrupt modulation rules.
44 *
45 * Consumer rules:
46 * 1. Check if TRB belongs to you. If the cycle bit == your ring cycle state,
47 * the TRB is owned by the consumer.
48 * 2. Update dequeue pointer (which may update the ring cycle state) and
49 * continue processing TRBs until you reach a TRB which is not owned by you.
50 * 3. Notify the producer. SW is the consumer for the event ring, and it
51 * updates event ring dequeue pointer. HC is the consumer for the command and
52 * endpoint rings; it generates events on the event ring for these.
53 */
54
55#include <linux/scatterlist.h>
56#include <linux/slab.h>
57#include <linux/dma-mapping.h>
58#include "xhci.h"
59#include "xhci-trace.h"
60
61static int queue_command(struct xhci_hcd *xhci, struct xhci_command *cmd,
62 u32 field1, u32 field2,
63 u32 field3, u32 field4, bool command_must_succeed);
64
65/*
66 * Returns zero if the TRB isn't in this segment, otherwise it returns the DMA
67 * address of the TRB.
68 */
69dma_addr_t xhci_trb_virt_to_dma(struct xhci_segment *seg,
70 union xhci_trb *trb)
71{
72 unsigned long segment_offset;
73
74 if (!seg || !trb || trb < seg->trbs)
75 return 0;
76 /* offset in TRBs */
77 segment_offset = trb - seg->trbs;
78 if (segment_offset >= TRBS_PER_SEGMENT)
79 return 0;
80 return seg->dma + (segment_offset * sizeof(*trb));
81}
82
83static bool trb_is_noop(union xhci_trb *trb)
84{
85 return TRB_TYPE_NOOP_LE32(trb->generic.field[3]);
86}
87
88static bool trb_is_link(union xhci_trb *trb)
89{
90 return TRB_TYPE_LINK_LE32(trb->link.control);
91}
92
93static bool last_trb_on_seg(struct xhci_segment *seg, union xhci_trb *trb)
94{
95 return trb == &seg->trbs[TRBS_PER_SEGMENT - 1];
96}
97
98static bool last_trb_on_ring(struct xhci_ring *ring,
99 struct xhci_segment *seg, union xhci_trb *trb)
100{
101 return last_trb_on_seg(seg, trb) && (seg->next == ring->first_seg);
102}
103
104static bool link_trb_toggles_cycle(union xhci_trb *trb)
105{
106 return le32_to_cpu(trb->link.control) & LINK_TOGGLE;
107}
108
109static bool last_td_in_urb(struct xhci_td *td)
110{
111 struct urb_priv *urb_priv = td->urb->hcpriv;
112
113 return urb_priv->num_tds_done == urb_priv->num_tds;
114}
115
116static bool unhandled_event_trb(struct xhci_ring *ring)
117{
118 return ((le32_to_cpu(ring->dequeue->event_cmd.flags) & TRB_CYCLE) ==
119 ring->cycle_state);
120}
121
122static void inc_td_cnt(struct urb *urb)
123{
124 struct urb_priv *urb_priv = urb->hcpriv;
125
126 urb_priv->num_tds_done++;
127}
128
129static void trb_to_noop(union xhci_trb *trb, u32 noop_type)
130{
131 if (trb_is_link(trb)) {
132 /* unchain chained link TRBs */
133 trb->link.control &= cpu_to_le32(~TRB_CHAIN);
134 } else {
135 trb->generic.field[0] = 0;
136 trb->generic.field[1] = 0;
137 trb->generic.field[2] = 0;
138 /* Preserve only the cycle bit of this TRB */
139 trb->generic.field[3] &= cpu_to_le32(TRB_CYCLE);
140 trb->generic.field[3] |= cpu_to_le32(TRB_TYPE(noop_type));
141 }
142}
143
144/* Updates trb to point to the next TRB in the ring, and updates seg if the next
145 * TRB is in a new segment. This does not skip over link TRBs, and it does not
146 * effect the ring dequeue or enqueue pointers.
147 */
148static void next_trb(struct xhci_hcd *xhci,
149 struct xhci_ring *ring,
150 struct xhci_segment **seg,
151 union xhci_trb **trb)
152{
153 if (trb_is_link(*trb) || last_trb_on_seg(*seg, *trb)) {
154 *seg = (*seg)->next;
155 *trb = ((*seg)->trbs);
156 } else {
157 (*trb)++;
158 }
159}
160
161/*
162 * See Cycle bit rules. SW is the consumer for the event ring only.
163 */
164void inc_deq(struct xhci_hcd *xhci, struct xhci_ring *ring)
165{
166 unsigned int link_trb_count = 0;
167
168 /* event ring doesn't have link trbs, check for last trb */
169 if (ring->type == TYPE_EVENT) {
170 if (!last_trb_on_seg(ring->deq_seg, ring->dequeue)) {
171 ring->dequeue++;
172 goto out;
173 }
174 if (last_trb_on_ring(ring, ring->deq_seg, ring->dequeue))
175 ring->cycle_state ^= 1;
176 ring->deq_seg = ring->deq_seg->next;
177 ring->dequeue = ring->deq_seg->trbs;
178 goto out;
179 }
180
181 /* All other rings have link trbs */
182 if (!trb_is_link(ring->dequeue)) {
183 if (last_trb_on_seg(ring->deq_seg, ring->dequeue))
184 xhci_warn(xhci, "Missing link TRB at end of segment\n");
185 else
186 ring->dequeue++;
187 }
188
189 while (trb_is_link(ring->dequeue)) {
190 ring->deq_seg = ring->deq_seg->next;
191 ring->dequeue = ring->deq_seg->trbs;
192
193 if (link_trb_count++ > ring->num_segs) {
194 xhci_warn(xhci, "Ring is an endless link TRB loop\n");
195 break;
196 }
197 }
198out:
199 trace_xhci_inc_deq(ring);
200
201 return;
202}
203
204/*
205 * See Cycle bit rules. SW is the consumer for the event ring only.
206 *
207 * If we've just enqueued a TRB that is in the middle of a TD (meaning the
208 * chain bit is set), then set the chain bit in all the following link TRBs.
209 * If we've enqueued the last TRB in a TD, make sure the following link TRBs
210 * have their chain bit cleared (so that each Link TRB is a separate TD).
211 *
212 * Section 6.4.4.1 of the 0.95 spec says link TRBs cannot have the chain bit
213 * set, but other sections talk about dealing with the chain bit set. This was
214 * fixed in the 0.96 specification errata, but we have to assume that all 0.95
215 * xHCI hardware can't handle the chain bit being cleared on a link TRB.
216 *
217 * @more_trbs_coming: Will you enqueue more TRBs before calling
218 * prepare_transfer()?
219 */
220static void inc_enq(struct xhci_hcd *xhci, struct xhci_ring *ring,
221 bool more_trbs_coming)
222{
223 u32 chain;
224 union xhci_trb *next;
225 unsigned int link_trb_count = 0;
226
227 chain = le32_to_cpu(ring->enqueue->generic.field[3]) & TRB_CHAIN;
228
229 if (last_trb_on_seg(ring->enq_seg, ring->enqueue)) {
230 xhci_err(xhci, "Tried to move enqueue past ring segment\n");
231 return;
232 }
233
234 next = ++(ring->enqueue);
235
236 /* Update the dequeue pointer further if that was a link TRB */
237 while (trb_is_link(next)) {
238
239 /*
240 * If the caller doesn't plan on enqueueing more TDs before
241 * ringing the doorbell, then we don't want to give the link TRB
242 * to the hardware just yet. We'll give the link TRB back in
243 * prepare_ring() just before we enqueue the TD at the top of
244 * the ring.
245 */
246 if (!chain && !more_trbs_coming)
247 break;
248
249 /* If we're not dealing with 0.95 hardware or isoc rings on
250 * AMD 0.96 host, carry over the chain bit of the previous TRB
251 * (which may mean the chain bit is cleared).
252 */
253 if (!(ring->type == TYPE_ISOC &&
254 (xhci->quirks & XHCI_AMD_0x96_HOST)) &&
255 !xhci_link_trb_quirk(xhci)) {
256 next->link.control &= cpu_to_le32(~TRB_CHAIN);
257 next->link.control |= cpu_to_le32(chain);
258 }
259 /* Give this link TRB to the hardware */
260 wmb();
261 next->link.control ^= cpu_to_le32(TRB_CYCLE);
262
263 /* Toggle the cycle bit after the last ring segment. */
264 if (link_trb_toggles_cycle(next))
265 ring->cycle_state ^= 1;
266
267 ring->enq_seg = ring->enq_seg->next;
268 ring->enqueue = ring->enq_seg->trbs;
269 next = ring->enqueue;
270
271 if (link_trb_count++ > ring->num_segs) {
272 xhci_warn(xhci, "%s: Ring link TRB loop\n", __func__);
273 break;
274 }
275 }
276
277 trace_xhci_inc_enq(ring);
278}
279
280/*
281 * Return number of free normal TRBs from enqueue to dequeue pointer on ring.
282 * Not counting an assumed link TRB at end of each TRBS_PER_SEGMENT sized segment.
283 * Only for transfer and command rings where driver is the producer, not for
284 * event rings.
285 */
286static unsigned int xhci_num_trbs_free(struct xhci_hcd *xhci, struct xhci_ring *ring)
287{
288 struct xhci_segment *enq_seg = ring->enq_seg;
289 union xhci_trb *enq = ring->enqueue;
290 union xhci_trb *last_on_seg;
291 unsigned int free = 0;
292 int i = 0;
293
294 /* Ring might be empty even if enq != deq if enq is left on a link trb */
295 if (trb_is_link(enq)) {
296 enq_seg = enq_seg->next;
297 enq = enq_seg->trbs;
298 }
299
300 /* Empty ring, common case, don't walk the segments */
301 if (enq == ring->dequeue)
302 return ring->num_segs * (TRBS_PER_SEGMENT - 1);
303
304 do {
305 if (ring->deq_seg == enq_seg && ring->dequeue >= enq)
306 return free + (ring->dequeue - enq);
307 last_on_seg = &enq_seg->trbs[TRBS_PER_SEGMENT - 1];
308 free += last_on_seg - enq;
309 enq_seg = enq_seg->next;
310 enq = enq_seg->trbs;
311 } while (i++ <= ring->num_segs);
312
313 return free;
314}
315
316/*
317 * Check to see if there's room to enqueue num_trbs on the ring and make sure
318 * enqueue pointer will not advance into dequeue segment. See rules above.
319 * return number of new segments needed to ensure this.
320 */
321
322static unsigned int xhci_ring_expansion_needed(struct xhci_hcd *xhci, struct xhci_ring *ring,
323 unsigned int num_trbs)
324{
325 struct xhci_segment *seg;
326 int trbs_past_seg;
327 int enq_used;
328 int new_segs;
329
330 enq_used = ring->enqueue - ring->enq_seg->trbs;
331
332 /* how many trbs will be queued past the enqueue segment? */
333 trbs_past_seg = enq_used + num_trbs - (TRBS_PER_SEGMENT - 1);
334
335 /*
336 * Consider expanding the ring already if num_trbs fills the current
337 * segment (i.e. trbs_past_seg == 0), not only when num_trbs goes into
338 * the next segment. Avoids confusing full ring with special empty ring
339 * case below
340 */
341 if (trbs_past_seg < 0)
342 return 0;
343
344 /* Empty ring special case, enqueue stuck on link trb while dequeue advanced */
345 if (trb_is_link(ring->enqueue) && ring->enq_seg->next->trbs == ring->dequeue)
346 return 0;
347
348 new_segs = 1 + (trbs_past_seg / (TRBS_PER_SEGMENT - 1));
349 seg = ring->enq_seg;
350
351 while (new_segs > 0) {
352 seg = seg->next;
353 if (seg == ring->deq_seg) {
354 xhci_dbg(xhci, "Ring expansion by %d segments needed\n",
355 new_segs);
356 xhci_dbg(xhci, "Adding %d trbs moves enq %d trbs into deq seg\n",
357 num_trbs, trbs_past_seg % TRBS_PER_SEGMENT);
358 return new_segs;
359 }
360 new_segs--;
361 }
362
363 return 0;
364}
365
366/* Ring the host controller doorbell after placing a command on the ring */
367void xhci_ring_cmd_db(struct xhci_hcd *xhci)
368{
369 if (!(xhci->cmd_ring_state & CMD_RING_STATE_RUNNING))
370 return;
371
372 xhci_dbg(xhci, "// Ding dong!\n");
373
374 trace_xhci_ring_host_doorbell(0, DB_VALUE_HOST);
375
376 writel(DB_VALUE_HOST, &xhci->dba->doorbell[0]);
377 /* Flush PCI posted writes */
378 readl(&xhci->dba->doorbell[0]);
379}
380
381static bool xhci_mod_cmd_timer(struct xhci_hcd *xhci)
382{
383 return mod_delayed_work(system_wq, &xhci->cmd_timer,
384 msecs_to_jiffies(xhci->current_cmd->timeout_ms));
385}
386
387static struct xhci_command *xhci_next_queued_cmd(struct xhci_hcd *xhci)
388{
389 return list_first_entry_or_null(&xhci->cmd_list, struct xhci_command,
390 cmd_list);
391}
392
393/*
394 * Turn all commands on command ring with status set to "aborted" to no-op trbs.
395 * If there are other commands waiting then restart the ring and kick the timer.
396 * This must be called with command ring stopped and xhci->lock held.
397 */
398static void xhci_handle_stopped_cmd_ring(struct xhci_hcd *xhci,
399 struct xhci_command *cur_cmd)
400{
401 struct xhci_command *i_cmd;
402
403 /* Turn all aborted commands in list to no-ops, then restart */
404 list_for_each_entry(i_cmd, &xhci->cmd_list, cmd_list) {
405
406 if (i_cmd->status != COMP_COMMAND_ABORTED)
407 continue;
408
409 i_cmd->status = COMP_COMMAND_RING_STOPPED;
410
411 xhci_dbg(xhci, "Turn aborted command %p to no-op\n",
412 i_cmd->command_trb);
413
414 trb_to_noop(i_cmd->command_trb, TRB_CMD_NOOP);
415
416 /*
417 * caller waiting for completion is called when command
418 * completion event is received for these no-op commands
419 */
420 }
421
422 xhci->cmd_ring_state = CMD_RING_STATE_RUNNING;
423
424 /* ring command ring doorbell to restart the command ring */
425 if ((xhci->cmd_ring->dequeue != xhci->cmd_ring->enqueue) &&
426 !(xhci->xhc_state & XHCI_STATE_DYING)) {
427 xhci->current_cmd = cur_cmd;
428 xhci_mod_cmd_timer(xhci);
429 xhci_ring_cmd_db(xhci);
430 }
431}
432
433/* Must be called with xhci->lock held, releases and aquires lock back */
434static int xhci_abort_cmd_ring(struct xhci_hcd *xhci, unsigned long flags)
435{
436 struct xhci_segment *new_seg = xhci->cmd_ring->deq_seg;
437 union xhci_trb *new_deq = xhci->cmd_ring->dequeue;
438 u64 crcr;
439 int ret;
440
441 xhci_dbg(xhci, "Abort command ring\n");
442
443 reinit_completion(&xhci->cmd_ring_stop_completion);
444
445 /*
446 * The control bits like command stop, abort are located in lower
447 * dword of the command ring control register.
448 * Some controllers require all 64 bits to be written to abort the ring.
449 * Make sure the upper dword is valid, pointing to the next command,
450 * avoiding corrupting the command ring pointer in case the command ring
451 * is stopped by the time the upper dword is written.
452 */
453 next_trb(xhci, NULL, &new_seg, &new_deq);
454 if (trb_is_link(new_deq))
455 next_trb(xhci, NULL, &new_seg, &new_deq);
456
457 crcr = xhci_trb_virt_to_dma(new_seg, new_deq);
458 xhci_write_64(xhci, crcr | CMD_RING_ABORT, &xhci->op_regs->cmd_ring);
459
460 /* Section 4.6.1.2 of xHCI 1.0 spec says software should also time the
461 * completion of the Command Abort operation. If CRR is not negated in 5
462 * seconds then driver handles it as if host died (-ENODEV).
463 * In the future we should distinguish between -ENODEV and -ETIMEDOUT
464 * and try to recover a -ETIMEDOUT with a host controller reset.
465 */
466 ret = xhci_handshake_check_state(xhci, &xhci->op_regs->cmd_ring,
467 CMD_RING_RUNNING, 0, 5 * 1000 * 1000,
468 XHCI_STATE_REMOVING);
469 if (ret < 0) {
470 xhci_err(xhci, "Abort failed to stop command ring: %d\n", ret);
471 xhci_halt(xhci);
472 xhci_hc_died(xhci);
473 return ret;
474 }
475 /*
476 * Writing the CMD_RING_ABORT bit should cause a cmd completion event,
477 * however on some host hw the CMD_RING_RUNNING bit is correctly cleared
478 * but the completion event in never sent. Wait 2 secs (arbitrary
479 * number) to handle those cases after negation of CMD_RING_RUNNING.
480 */
481 spin_unlock_irqrestore(&xhci->lock, flags);
482 ret = wait_for_completion_timeout(&xhci->cmd_ring_stop_completion,
483 msecs_to_jiffies(2000));
484 spin_lock_irqsave(&xhci->lock, flags);
485 if (!ret) {
486 xhci_dbg(xhci, "No stop event for abort, ring start fail?\n");
487 xhci_cleanup_command_queue(xhci);
488 } else {
489 xhci_handle_stopped_cmd_ring(xhci, xhci_next_queued_cmd(xhci));
490 }
491 return 0;
492}
493
494void xhci_ring_ep_doorbell(struct xhci_hcd *xhci,
495 unsigned int slot_id,
496 unsigned int ep_index,
497 unsigned int stream_id)
498{
499 __le32 __iomem *db_addr = &xhci->dba->doorbell[slot_id];
500 struct xhci_virt_ep *ep = &xhci->devs[slot_id]->eps[ep_index];
501 unsigned int ep_state = ep->ep_state;
502
503 /* Don't ring the doorbell for this endpoint if there are pending
504 * cancellations because we don't want to interrupt processing.
505 * We don't want to restart any stream rings if there's a set dequeue
506 * pointer command pending because the device can choose to start any
507 * stream once the endpoint is on the HW schedule.
508 */
509 if ((ep_state & EP_STOP_CMD_PENDING) || (ep_state & SET_DEQ_PENDING) ||
510 (ep_state & EP_HALTED) || (ep_state & EP_CLEARING_TT))
511 return;
512
513 trace_xhci_ring_ep_doorbell(slot_id, DB_VALUE(ep_index, stream_id));
514
515 writel(DB_VALUE(ep_index, stream_id), db_addr);
516 /* flush the write */
517 readl(db_addr);
518}
519
520/* Ring the doorbell for any rings with pending URBs */
521static void ring_doorbell_for_active_rings(struct xhci_hcd *xhci,
522 unsigned int slot_id,
523 unsigned int ep_index)
524{
525 unsigned int stream_id;
526 struct xhci_virt_ep *ep;
527
528 ep = &xhci->devs[slot_id]->eps[ep_index];
529
530 /* A ring has pending URBs if its TD list is not empty */
531 if (!(ep->ep_state & EP_HAS_STREAMS)) {
532 if (ep->ring && !(list_empty(&ep->ring->td_list)))
533 xhci_ring_ep_doorbell(xhci, slot_id, ep_index, 0);
534 return;
535 }
536
537 for (stream_id = 1; stream_id < ep->stream_info->num_streams;
538 stream_id++) {
539 struct xhci_stream_info *stream_info = ep->stream_info;
540 if (!list_empty(&stream_info->stream_rings[stream_id]->td_list))
541 xhci_ring_ep_doorbell(xhci, slot_id, ep_index,
542 stream_id);
543 }
544}
545
546void xhci_ring_doorbell_for_active_rings(struct xhci_hcd *xhci,
547 unsigned int slot_id,
548 unsigned int ep_index)
549{
550 ring_doorbell_for_active_rings(xhci, slot_id, ep_index);
551}
552
553static struct xhci_virt_ep *xhci_get_virt_ep(struct xhci_hcd *xhci,
554 unsigned int slot_id,
555 unsigned int ep_index)
556{
557 if (slot_id == 0 || slot_id >= MAX_HC_SLOTS) {
558 xhci_warn(xhci, "Invalid slot_id %u\n", slot_id);
559 return NULL;
560 }
561 if (ep_index >= EP_CTX_PER_DEV) {
562 xhci_warn(xhci, "Invalid endpoint index %u\n", ep_index);
563 return NULL;
564 }
565 if (!xhci->devs[slot_id]) {
566 xhci_warn(xhci, "No xhci virt device for slot_id %u\n", slot_id);
567 return NULL;
568 }
569
570 return &xhci->devs[slot_id]->eps[ep_index];
571}
572
573static struct xhci_ring *xhci_virt_ep_to_ring(struct xhci_hcd *xhci,
574 struct xhci_virt_ep *ep,
575 unsigned int stream_id)
576{
577 /* common case, no streams */
578 if (!(ep->ep_state & EP_HAS_STREAMS))
579 return ep->ring;
580
581 if (!ep->stream_info)
582 return NULL;
583
584 if (stream_id == 0 || stream_id >= ep->stream_info->num_streams) {
585 xhci_warn(xhci, "Invalid stream_id %u request for slot_id %u ep_index %u\n",
586 stream_id, ep->vdev->slot_id, ep->ep_index);
587 return NULL;
588 }
589
590 return ep->stream_info->stream_rings[stream_id];
591}
592
593/* Get the right ring for the given slot_id, ep_index and stream_id.
594 * If the endpoint supports streams, boundary check the URB's stream ID.
595 * If the endpoint doesn't support streams, return the singular endpoint ring.
596 */
597struct xhci_ring *xhci_triad_to_transfer_ring(struct xhci_hcd *xhci,
598 unsigned int slot_id, unsigned int ep_index,
599 unsigned int stream_id)
600{
601 struct xhci_virt_ep *ep;
602
603 ep = xhci_get_virt_ep(xhci, slot_id, ep_index);
604 if (!ep)
605 return NULL;
606
607 return xhci_virt_ep_to_ring(xhci, ep, stream_id);
608}
609
610
611/*
612 * Get the hw dequeue pointer xHC stopped on, either directly from the
613 * endpoint context, or if streams are in use from the stream context.
614 * The returned hw_dequeue contains the lowest four bits with cycle state
615 * and possbile stream context type.
616 */
617static u64 xhci_get_hw_deq(struct xhci_hcd *xhci, struct xhci_virt_device *vdev,
618 unsigned int ep_index, unsigned int stream_id)
619{
620 struct xhci_ep_ctx *ep_ctx;
621 struct xhci_stream_ctx *st_ctx;
622 struct xhci_virt_ep *ep;
623
624 ep = &vdev->eps[ep_index];
625
626 if (ep->ep_state & EP_HAS_STREAMS) {
627 st_ctx = &ep->stream_info->stream_ctx_array[stream_id];
628 return le64_to_cpu(st_ctx->stream_ring);
629 }
630 ep_ctx = xhci_get_ep_ctx(xhci, vdev->out_ctx, ep_index);
631 return le64_to_cpu(ep_ctx->deq);
632}
633
634static int xhci_move_dequeue_past_td(struct xhci_hcd *xhci,
635 unsigned int slot_id, unsigned int ep_index,
636 unsigned int stream_id, struct xhci_td *td)
637{
638 struct xhci_virt_device *dev = xhci->devs[slot_id];
639 struct xhci_virt_ep *ep = &dev->eps[ep_index];
640 struct xhci_ring *ep_ring;
641 struct xhci_command *cmd;
642 struct xhci_segment *new_seg;
643 union xhci_trb *new_deq;
644 int new_cycle;
645 dma_addr_t addr;
646 u64 hw_dequeue;
647 bool cycle_found = false;
648 bool td_last_trb_found = false;
649 u32 trb_sct = 0;
650 int ret;
651
652 ep_ring = xhci_triad_to_transfer_ring(xhci, slot_id,
653 ep_index, stream_id);
654 if (!ep_ring) {
655 xhci_warn(xhci, "WARN can't find new dequeue, invalid stream ID %u\n",
656 stream_id);
657 return -ENODEV;
658 }
659 /*
660 * A cancelled TD can complete with a stall if HW cached the trb.
661 * In this case driver can't find td, but if the ring is empty we
662 * can move the dequeue pointer to the current enqueue position.
663 * We shouldn't hit this anymore as cached cancelled TRBs are given back
664 * after clearing the cache, but be on the safe side and keep it anyway
665 */
666 if (!td) {
667 if (list_empty(&ep_ring->td_list)) {
668 new_seg = ep_ring->enq_seg;
669 new_deq = ep_ring->enqueue;
670 new_cycle = ep_ring->cycle_state;
671 xhci_dbg(xhci, "ep ring empty, Set new dequeue = enqueue");
672 goto deq_found;
673 } else {
674 xhci_warn(xhci, "Can't find new dequeue state, missing td\n");
675 return -EINVAL;
676 }
677 }
678
679 hw_dequeue = xhci_get_hw_deq(xhci, dev, ep_index, stream_id);
680 new_seg = ep_ring->deq_seg;
681 new_deq = ep_ring->dequeue;
682 new_cycle = hw_dequeue & 0x1;
683
684 /*
685 * We want to find the pointer, segment and cycle state of the new trb
686 * (the one after current TD's last_trb). We know the cycle state at
687 * hw_dequeue, so walk the ring until both hw_dequeue and last_trb are
688 * found.
689 */
690 do {
691 if (!cycle_found && xhci_trb_virt_to_dma(new_seg, new_deq)
692 == (dma_addr_t)(hw_dequeue & ~0xf)) {
693 cycle_found = true;
694 if (td_last_trb_found)
695 break;
696 }
697 if (new_deq == td->last_trb)
698 td_last_trb_found = true;
699
700 if (cycle_found && trb_is_link(new_deq) &&
701 link_trb_toggles_cycle(new_deq))
702 new_cycle ^= 0x1;
703
704 next_trb(xhci, ep_ring, &new_seg, &new_deq);
705
706 /* Search wrapped around, bail out */
707 if (new_deq == ep->ring->dequeue) {
708 xhci_err(xhci, "Error: Failed finding new dequeue state\n");
709 return -EINVAL;
710 }
711
712 } while (!cycle_found || !td_last_trb_found);
713
714deq_found:
715
716 /* Don't update the ring cycle state for the producer (us). */
717 addr = xhci_trb_virt_to_dma(new_seg, new_deq);
718 if (addr == 0) {
719 xhci_warn(xhci, "Can't find dma of new dequeue ptr\n");
720 xhci_warn(xhci, "deq seg = %p, deq ptr = %p\n", new_seg, new_deq);
721 return -EINVAL;
722 }
723
724 if ((ep->ep_state & SET_DEQ_PENDING)) {
725 xhci_warn(xhci, "Set TR Deq already pending, don't submit for 0x%pad\n",
726 &addr);
727 return -EBUSY;
728 }
729
730 /* This function gets called from contexts where it cannot sleep */
731 cmd = xhci_alloc_command(xhci, false, GFP_ATOMIC);
732 if (!cmd) {
733 xhci_warn(xhci, "Can't alloc Set TR Deq cmd 0x%pad\n", &addr);
734 return -ENOMEM;
735 }
736
737 if (stream_id)
738 trb_sct = SCT_FOR_TRB(SCT_PRI_TR);
739 ret = queue_command(xhci, cmd,
740 lower_32_bits(addr) | trb_sct | new_cycle,
741 upper_32_bits(addr),
742 STREAM_ID_FOR_TRB(stream_id), SLOT_ID_FOR_TRB(slot_id) |
743 EP_ID_FOR_TRB(ep_index) | TRB_TYPE(TRB_SET_DEQ), false);
744 if (ret < 0) {
745 xhci_free_command(xhci, cmd);
746 return ret;
747 }
748 ep->queued_deq_seg = new_seg;
749 ep->queued_deq_ptr = new_deq;
750
751 xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb,
752 "Set TR Deq ptr 0x%llx, cycle %u\n", addr, new_cycle);
753
754 /* Stop the TD queueing code from ringing the doorbell until
755 * this command completes. The HC won't set the dequeue pointer
756 * if the ring is running, and ringing the doorbell starts the
757 * ring running.
758 */
759 ep->ep_state |= SET_DEQ_PENDING;
760 xhci_ring_cmd_db(xhci);
761 return 0;
762}
763
764/* flip_cycle means flip the cycle bit of all but the first and last TRB.
765 * (The last TRB actually points to the ring enqueue pointer, which is not part
766 * of this TD.) This is used to remove partially enqueued isoc TDs from a ring.
767 */
768static void td_to_noop(struct xhci_hcd *xhci, struct xhci_ring *ep_ring,
769 struct xhci_td *td, bool flip_cycle)
770{
771 struct xhci_segment *seg = td->start_seg;
772 union xhci_trb *trb = td->first_trb;
773
774 while (1) {
775 trb_to_noop(trb, TRB_TR_NOOP);
776
777 /* flip cycle if asked to */
778 if (flip_cycle && trb != td->first_trb && trb != td->last_trb)
779 trb->generic.field[3] ^= cpu_to_le32(TRB_CYCLE);
780
781 if (trb == td->last_trb)
782 break;
783
784 next_trb(xhci, ep_ring, &seg, &trb);
785 }
786}
787
788/*
789 * Must be called with xhci->lock held in interrupt context,
790 * releases and re-acquires xhci->lock
791 */
792static void xhci_giveback_urb_in_irq(struct xhci_hcd *xhci,
793 struct xhci_td *cur_td, int status)
794{
795 struct urb *urb = cur_td->urb;
796 struct urb_priv *urb_priv = urb->hcpriv;
797 struct usb_hcd *hcd = bus_to_hcd(urb->dev->bus);
798
799 if (usb_pipetype(urb->pipe) == PIPE_ISOCHRONOUS) {
800 xhci_to_hcd(xhci)->self.bandwidth_isoc_reqs--;
801 if (xhci_to_hcd(xhci)->self.bandwidth_isoc_reqs == 0) {
802 if (xhci->quirks & XHCI_AMD_PLL_FIX)
803 usb_amd_quirk_pll_enable();
804 }
805 }
806 xhci_urb_free_priv(urb_priv);
807 usb_hcd_unlink_urb_from_ep(hcd, urb);
808 trace_xhci_urb_giveback(urb);
809 usb_hcd_giveback_urb(hcd, urb, status);
810}
811
812static void xhci_unmap_td_bounce_buffer(struct xhci_hcd *xhci,
813 struct xhci_ring *ring, struct xhci_td *td)
814{
815 struct device *dev = xhci_to_hcd(xhci)->self.sysdev;
816 struct xhci_segment *seg = td->bounce_seg;
817 struct urb *urb = td->urb;
818 size_t len;
819
820 if (!ring || !seg || !urb)
821 return;
822
823 if (usb_urb_dir_out(urb)) {
824 dma_unmap_single(dev, seg->bounce_dma, ring->bounce_buf_len,
825 DMA_TO_DEVICE);
826 return;
827 }
828
829 dma_unmap_single(dev, seg->bounce_dma, ring->bounce_buf_len,
830 DMA_FROM_DEVICE);
831 /* for in tranfers we need to copy the data from bounce to sg */
832 if (urb->num_sgs) {
833 len = sg_pcopy_from_buffer(urb->sg, urb->num_sgs, seg->bounce_buf,
834 seg->bounce_len, seg->bounce_offs);
835 if (len != seg->bounce_len)
836 xhci_warn(xhci, "WARN Wrong bounce buffer read length: %zu != %d\n",
837 len, seg->bounce_len);
838 } else {
839 memcpy(urb->transfer_buffer + seg->bounce_offs, seg->bounce_buf,
840 seg->bounce_len);
841 }
842 seg->bounce_len = 0;
843 seg->bounce_offs = 0;
844}
845
846static int xhci_td_cleanup(struct xhci_hcd *xhci, struct xhci_td *td,
847 struct xhci_ring *ep_ring, int status)
848{
849 struct urb *urb = NULL;
850
851 /* Clean up the endpoint's TD list */
852 urb = td->urb;
853
854 /* if a bounce buffer was used to align this td then unmap it */
855 xhci_unmap_td_bounce_buffer(xhci, ep_ring, td);
856
857 /* Do one last check of the actual transfer length.
858 * If the host controller said we transferred more data than the buffer
859 * length, urb->actual_length will be a very big number (since it's
860 * unsigned). Play it safe and say we didn't transfer anything.
861 */
862 if (urb->actual_length > urb->transfer_buffer_length) {
863 xhci_warn(xhci, "URB req %u and actual %u transfer length mismatch\n",
864 urb->transfer_buffer_length, urb->actual_length);
865 urb->actual_length = 0;
866 status = 0;
867 }
868 /* TD might be removed from td_list if we are giving back a cancelled URB */
869 if (!list_empty(&td->td_list))
870 list_del_init(&td->td_list);
871 /* Giving back a cancelled URB, or if a slated TD completed anyway */
872 if (!list_empty(&td->cancelled_td_list))
873 list_del_init(&td->cancelled_td_list);
874
875 inc_td_cnt(urb);
876 /* Giveback the urb when all the tds are completed */
877 if (last_td_in_urb(td)) {
878 if ((urb->actual_length != urb->transfer_buffer_length &&
879 (urb->transfer_flags & URB_SHORT_NOT_OK)) ||
880 (status != 0 && !usb_endpoint_xfer_isoc(&urb->ep->desc)))
881 xhci_dbg(xhci, "Giveback URB %p, len = %d, expected = %d, status = %d\n",
882 urb, urb->actual_length,
883 urb->transfer_buffer_length, status);
884
885 /* set isoc urb status to 0 just as EHCI, UHCI, and OHCI */
886 if (usb_pipetype(urb->pipe) == PIPE_ISOCHRONOUS)
887 status = 0;
888 xhci_giveback_urb_in_irq(xhci, td, status);
889 }
890
891 return 0;
892}
893
894
895/* Complete the cancelled URBs we unlinked from td_list. */
896static void xhci_giveback_invalidated_tds(struct xhci_virt_ep *ep)
897{
898 struct xhci_ring *ring;
899 struct xhci_td *td, *tmp_td;
900
901 list_for_each_entry_safe(td, tmp_td, &ep->cancelled_td_list,
902 cancelled_td_list) {
903
904 ring = xhci_urb_to_transfer_ring(ep->xhci, td->urb);
905
906 if (td->cancel_status == TD_CLEARED) {
907 xhci_dbg(ep->xhci, "%s: Giveback cancelled URB %p TD\n",
908 __func__, td->urb);
909 xhci_td_cleanup(ep->xhci, td, ring, td->status);
910 } else {
911 xhci_dbg(ep->xhci, "%s: Keep cancelled URB %p TD as cancel_status is %d\n",
912 __func__, td->urb, td->cancel_status);
913 }
914 if (ep->xhci->xhc_state & XHCI_STATE_DYING)
915 return;
916 }
917}
918
919static int xhci_reset_halted_ep(struct xhci_hcd *xhci, unsigned int slot_id,
920 unsigned int ep_index, enum xhci_ep_reset_type reset_type)
921{
922 struct xhci_command *command;
923 int ret = 0;
924
925 command = xhci_alloc_command(xhci, false, GFP_ATOMIC);
926 if (!command) {
927 ret = -ENOMEM;
928 goto done;
929 }
930
931 xhci_dbg(xhci, "%s-reset ep %u, slot %u\n",
932 (reset_type == EP_HARD_RESET) ? "Hard" : "Soft",
933 ep_index, slot_id);
934
935 ret = xhci_queue_reset_ep(xhci, command, slot_id, ep_index, reset_type);
936done:
937 if (ret)
938 xhci_err(xhci, "ERROR queuing reset endpoint for slot %d ep_index %d, %d\n",
939 slot_id, ep_index, ret);
940 return ret;
941}
942
943static int xhci_handle_halted_endpoint(struct xhci_hcd *xhci,
944 struct xhci_virt_ep *ep,
945 struct xhci_td *td,
946 enum xhci_ep_reset_type reset_type)
947{
948 unsigned int slot_id = ep->vdev->slot_id;
949 int err;
950
951 /*
952 * Avoid resetting endpoint if link is inactive. Can cause host hang.
953 * Device will be reset soon to recover the link so don't do anything
954 */
955 if (ep->vdev->flags & VDEV_PORT_ERROR)
956 return -ENODEV;
957
958 /* add td to cancelled list and let reset ep handler take care of it */
959 if (reset_type == EP_HARD_RESET) {
960 ep->ep_state |= EP_HARD_CLEAR_TOGGLE;
961 if (td && list_empty(&td->cancelled_td_list)) {
962 list_add_tail(&td->cancelled_td_list, &ep->cancelled_td_list);
963 td->cancel_status = TD_HALTED;
964 }
965 }
966
967 if (ep->ep_state & EP_HALTED) {
968 xhci_dbg(xhci, "Reset ep command for ep_index %d already pending\n",
969 ep->ep_index);
970 return 0;
971 }
972
973 err = xhci_reset_halted_ep(xhci, slot_id, ep->ep_index, reset_type);
974 if (err)
975 return err;
976
977 ep->ep_state |= EP_HALTED;
978
979 xhci_ring_cmd_db(xhci);
980
981 return 0;
982}
983
984/*
985 * Fix up the ep ring first, so HW stops executing cancelled TDs.
986 * We have the xHCI lock, so nothing can modify this list until we drop it.
987 * We're also in the event handler, so we can't get re-interrupted if another
988 * Stop Endpoint command completes.
989 *
990 * only call this when ring is not in a running state
991 */
992
993static int xhci_invalidate_cancelled_tds(struct xhci_virt_ep *ep)
994{
995 struct xhci_hcd *xhci;
996 struct xhci_td *td = NULL;
997 struct xhci_td *tmp_td = NULL;
998 struct xhci_td *cached_td = NULL;
999 struct xhci_ring *ring;
1000 u64 hw_deq;
1001 unsigned int slot_id = ep->vdev->slot_id;
1002 int err;
1003
1004 xhci = ep->xhci;
1005
1006 list_for_each_entry_safe(td, tmp_td, &ep->cancelled_td_list, cancelled_td_list) {
1007 xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb,
1008 "Removing canceled TD starting at 0x%llx (dma) in stream %u URB %p",
1009 (unsigned long long)xhci_trb_virt_to_dma(
1010 td->start_seg, td->first_trb),
1011 td->urb->stream_id, td->urb);
1012 list_del_init(&td->td_list);
1013 ring = xhci_urb_to_transfer_ring(xhci, td->urb);
1014 if (!ring) {
1015 xhci_warn(xhci, "WARN Cancelled URB %p has invalid stream ID %u.\n",
1016 td->urb, td->urb->stream_id);
1017 continue;
1018 }
1019 /*
1020 * If a ring stopped on the TD we need to cancel then we have to
1021 * move the xHC endpoint ring dequeue pointer past this TD.
1022 * Rings halted due to STALL may show hw_deq is past the stalled
1023 * TD, but still require a set TR Deq command to flush xHC cache.
1024 */
1025 hw_deq = xhci_get_hw_deq(xhci, ep->vdev, ep->ep_index,
1026 td->urb->stream_id);
1027 hw_deq &= ~0xf;
1028
1029 if (td->cancel_status == TD_HALTED ||
1030 trb_in_td(xhci, td->start_seg, td->first_trb, td->last_trb, hw_deq, false)) {
1031 switch (td->cancel_status) {
1032 case TD_CLEARED: /* TD is already no-op */
1033 case TD_CLEARING_CACHE: /* set TR deq command already queued */
1034 break;
1035 case TD_DIRTY: /* TD is cached, clear it */
1036 case TD_HALTED:
1037 td->cancel_status = TD_CLEARING_CACHE;
1038 if (cached_td)
1039 /* FIXME stream case, several stopped rings */
1040 xhci_dbg(xhci,
1041 "Move dq past stream %u URB %p instead of stream %u URB %p\n",
1042 td->urb->stream_id, td->urb,
1043 cached_td->urb->stream_id, cached_td->urb);
1044 cached_td = td;
1045 break;
1046 }
1047 } else {
1048 td_to_noop(xhci, ring, td, false);
1049 td->cancel_status = TD_CLEARED;
1050 }
1051 }
1052
1053 /* If there's no need to move the dequeue pointer then we're done */
1054 if (!cached_td)
1055 return 0;
1056
1057 err = xhci_move_dequeue_past_td(xhci, slot_id, ep->ep_index,
1058 cached_td->urb->stream_id,
1059 cached_td);
1060 if (err) {
1061 /* Failed to move past cached td, just set cached TDs to no-op */
1062 list_for_each_entry_safe(td, tmp_td, &ep->cancelled_td_list, cancelled_td_list) {
1063 if (td->cancel_status != TD_CLEARING_CACHE)
1064 continue;
1065 xhci_dbg(xhci, "Failed to clear cancelled cached URB %p, mark clear anyway\n",
1066 td->urb);
1067 td_to_noop(xhci, ring, td, false);
1068 td->cancel_status = TD_CLEARED;
1069 }
1070 }
1071 return 0;
1072}
1073
1074/*
1075 * Returns the TD the endpoint ring halted on.
1076 * Only call for non-running rings without streams.
1077 */
1078static struct xhci_td *find_halted_td(struct xhci_virt_ep *ep)
1079{
1080 struct xhci_td *td;
1081 u64 hw_deq;
1082
1083 if (!list_empty(&ep->ring->td_list)) { /* Not streams compatible */
1084 hw_deq = xhci_get_hw_deq(ep->xhci, ep->vdev, ep->ep_index, 0);
1085 hw_deq &= ~0xf;
1086 td = list_first_entry(&ep->ring->td_list, struct xhci_td, td_list);
1087 if (trb_in_td(ep->xhci, td->start_seg, td->first_trb,
1088 td->last_trb, hw_deq, false))
1089 return td;
1090 }
1091 return NULL;
1092}
1093
1094/*
1095 * When we get a command completion for a Stop Endpoint Command, we need to
1096 * unlink any cancelled TDs from the ring. There are two ways to do that:
1097 *
1098 * 1. If the HW was in the middle of processing the TD that needs to be
1099 * cancelled, then we must move the ring's dequeue pointer past the last TRB
1100 * in the TD with a Set Dequeue Pointer Command.
1101 * 2. Otherwise, we turn all the TRBs in the TD into No-op TRBs (with the chain
1102 * bit cleared) so that the HW will skip over them.
1103 */
1104static void xhci_handle_cmd_stop_ep(struct xhci_hcd *xhci, int slot_id,
1105 union xhci_trb *trb, u32 comp_code)
1106{
1107 unsigned int ep_index;
1108 struct xhci_virt_ep *ep;
1109 struct xhci_ep_ctx *ep_ctx;
1110 struct xhci_td *td = NULL;
1111 enum xhci_ep_reset_type reset_type;
1112 struct xhci_command *command;
1113 int err;
1114
1115 if (unlikely(TRB_TO_SUSPEND_PORT(le32_to_cpu(trb->generic.field[3])))) {
1116 if (!xhci->devs[slot_id])
1117 xhci_warn(xhci, "Stop endpoint command completion for disabled slot %u\n",
1118 slot_id);
1119 return;
1120 }
1121
1122 ep_index = TRB_TO_EP_INDEX(le32_to_cpu(trb->generic.field[3]));
1123 ep = xhci_get_virt_ep(xhci, slot_id, ep_index);
1124 if (!ep)
1125 return;
1126
1127 ep_ctx = xhci_get_ep_ctx(xhci, ep->vdev->out_ctx, ep_index);
1128
1129 trace_xhci_handle_cmd_stop_ep(ep_ctx);
1130
1131 if (comp_code == COMP_CONTEXT_STATE_ERROR) {
1132 /*
1133 * If stop endpoint command raced with a halting endpoint we need to
1134 * reset the host side endpoint first.
1135 * If the TD we halted on isn't cancelled the TD should be given back
1136 * with a proper error code, and the ring dequeue moved past the TD.
1137 * If streams case we can't find hw_deq, or the TD we halted on so do a
1138 * soft reset.
1139 *
1140 * Proper error code is unknown here, it would be -EPIPE if device side
1141 * of enadpoit halted (aka STALL), and -EPROTO if not (transaction error)
1142 * We use -EPROTO, if device is stalled it should return a stall error on
1143 * next transfer, which then will return -EPIPE, and device side stall is
1144 * noted and cleared by class driver.
1145 */
1146 switch (GET_EP_CTX_STATE(ep_ctx)) {
1147 case EP_STATE_HALTED:
1148 xhci_dbg(xhci, "Stop ep completion raced with stall, reset ep\n");
1149 if (ep->ep_state & EP_HAS_STREAMS) {
1150 reset_type = EP_SOFT_RESET;
1151 } else {
1152 reset_type = EP_HARD_RESET;
1153 td = find_halted_td(ep);
1154 if (td)
1155 td->status = -EPROTO;
1156 }
1157 /* reset ep, reset handler cleans up cancelled tds */
1158 err = xhci_handle_halted_endpoint(xhci, ep, td, reset_type);
1159 if (err)
1160 break;
1161 ep->ep_state &= ~EP_STOP_CMD_PENDING;
1162 return;
1163 case EP_STATE_STOPPED:
1164 /*
1165 * NEC uPD720200 sometimes sets this state and fails with
1166 * Context Error while continuing to process TRBs.
1167 * Be conservative and trust EP_CTX_STATE on other chips.
1168 */
1169 if (!(xhci->quirks & XHCI_NEC_HOST))
1170 break;
1171 fallthrough;
1172 case EP_STATE_RUNNING:
1173 /* Race, HW handled stop ep cmd before ep was running */
1174 xhci_dbg(xhci, "Stop ep completion ctx error, ep is running\n");
1175
1176 command = xhci_alloc_command(xhci, false, GFP_ATOMIC);
1177 if (!command) {
1178 ep->ep_state &= ~EP_STOP_CMD_PENDING;
1179 return;
1180 }
1181 xhci_queue_stop_endpoint(xhci, command, slot_id, ep_index, 0);
1182 xhci_ring_cmd_db(xhci);
1183
1184 return;
1185 default:
1186 break;
1187 }
1188 }
1189
1190 /* will queue a set TR deq if stopped on a cancelled, uncleared TD */
1191 xhci_invalidate_cancelled_tds(ep);
1192 ep->ep_state &= ~EP_STOP_CMD_PENDING;
1193
1194 /* Otherwise ring the doorbell(s) to restart queued transfers */
1195 xhci_giveback_invalidated_tds(ep);
1196 ring_doorbell_for_active_rings(xhci, slot_id, ep_index);
1197}
1198
1199static void xhci_kill_ring_urbs(struct xhci_hcd *xhci, struct xhci_ring *ring)
1200{
1201 struct xhci_td *cur_td;
1202 struct xhci_td *tmp;
1203
1204 list_for_each_entry_safe(cur_td, tmp, &ring->td_list, td_list) {
1205 list_del_init(&cur_td->td_list);
1206
1207 if (!list_empty(&cur_td->cancelled_td_list))
1208 list_del_init(&cur_td->cancelled_td_list);
1209
1210 xhci_unmap_td_bounce_buffer(xhci, ring, cur_td);
1211
1212 inc_td_cnt(cur_td->urb);
1213 if (last_td_in_urb(cur_td))
1214 xhci_giveback_urb_in_irq(xhci, cur_td, -ESHUTDOWN);
1215 }
1216}
1217
1218static void xhci_kill_endpoint_urbs(struct xhci_hcd *xhci,
1219 int slot_id, int ep_index)
1220{
1221 struct xhci_td *cur_td;
1222 struct xhci_td *tmp;
1223 struct xhci_virt_ep *ep;
1224 struct xhci_ring *ring;
1225
1226 ep = xhci_get_virt_ep(xhci, slot_id, ep_index);
1227 if (!ep)
1228 return;
1229
1230 if ((ep->ep_state & EP_HAS_STREAMS) ||
1231 (ep->ep_state & EP_GETTING_NO_STREAMS)) {
1232 int stream_id;
1233
1234 for (stream_id = 1; stream_id < ep->stream_info->num_streams;
1235 stream_id++) {
1236 ring = ep->stream_info->stream_rings[stream_id];
1237 if (!ring)
1238 continue;
1239
1240 xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb,
1241 "Killing URBs for slot ID %u, ep index %u, stream %u",
1242 slot_id, ep_index, stream_id);
1243 xhci_kill_ring_urbs(xhci, ring);
1244 }
1245 } else {
1246 ring = ep->ring;
1247 if (!ring)
1248 return;
1249 xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb,
1250 "Killing URBs for slot ID %u, ep index %u",
1251 slot_id, ep_index);
1252 xhci_kill_ring_urbs(xhci, ring);
1253 }
1254
1255 list_for_each_entry_safe(cur_td, tmp, &ep->cancelled_td_list,
1256 cancelled_td_list) {
1257 list_del_init(&cur_td->cancelled_td_list);
1258 inc_td_cnt(cur_td->urb);
1259
1260 if (last_td_in_urb(cur_td))
1261 xhci_giveback_urb_in_irq(xhci, cur_td, -ESHUTDOWN);
1262 }
1263}
1264
1265/*
1266 * host controller died, register read returns 0xffffffff
1267 * Complete pending commands, mark them ABORTED.
1268 * URBs need to be given back as usb core might be waiting with device locks
1269 * held for the URBs to finish during device disconnect, blocking host remove.
1270 *
1271 * Call with xhci->lock held.
1272 * lock is relased and re-acquired while giving back urb.
1273 */
1274void xhci_hc_died(struct xhci_hcd *xhci)
1275{
1276 int i, j;
1277
1278 if (xhci->xhc_state & XHCI_STATE_DYING)
1279 return;
1280
1281 xhci_err(xhci, "xHCI host controller not responding, assume dead\n");
1282 xhci->xhc_state |= XHCI_STATE_DYING;
1283
1284 xhci_cleanup_command_queue(xhci);
1285
1286 /* return any pending urbs, remove may be waiting for them */
1287 for (i = 0; i <= HCS_MAX_SLOTS(xhci->hcs_params1); i++) {
1288 if (!xhci->devs[i])
1289 continue;
1290 for (j = 0; j < 31; j++)
1291 xhci_kill_endpoint_urbs(xhci, i, j);
1292 }
1293
1294 /* inform usb core hc died if PCI remove isn't already handling it */
1295 if (!(xhci->xhc_state & XHCI_STATE_REMOVING))
1296 usb_hc_died(xhci_to_hcd(xhci));
1297}
1298
1299static void update_ring_for_set_deq_completion(struct xhci_hcd *xhci,
1300 struct xhci_virt_device *dev,
1301 struct xhci_ring *ep_ring,
1302 unsigned int ep_index)
1303{
1304 union xhci_trb *dequeue_temp;
1305
1306 dequeue_temp = ep_ring->dequeue;
1307
1308 /* If we get two back-to-back stalls, and the first stalled transfer
1309 * ends just before a link TRB, the dequeue pointer will be left on
1310 * the link TRB by the code in the while loop. So we have to update
1311 * the dequeue pointer one segment further, or we'll jump off
1312 * the segment into la-la-land.
1313 */
1314 if (trb_is_link(ep_ring->dequeue)) {
1315 ep_ring->deq_seg = ep_ring->deq_seg->next;
1316 ep_ring->dequeue = ep_ring->deq_seg->trbs;
1317 }
1318
1319 while (ep_ring->dequeue != dev->eps[ep_index].queued_deq_ptr) {
1320 /* We have more usable TRBs */
1321 ep_ring->dequeue++;
1322 if (trb_is_link(ep_ring->dequeue)) {
1323 if (ep_ring->dequeue ==
1324 dev->eps[ep_index].queued_deq_ptr)
1325 break;
1326 ep_ring->deq_seg = ep_ring->deq_seg->next;
1327 ep_ring->dequeue = ep_ring->deq_seg->trbs;
1328 }
1329 if (ep_ring->dequeue == dequeue_temp) {
1330 xhci_dbg(xhci, "Unable to find new dequeue pointer\n");
1331 break;
1332 }
1333 }
1334}
1335
1336/*
1337 * When we get a completion for a Set Transfer Ring Dequeue Pointer command,
1338 * we need to clear the set deq pending flag in the endpoint ring state, so that
1339 * the TD queueing code can ring the doorbell again. We also need to ring the
1340 * endpoint doorbell to restart the ring, but only if there aren't more
1341 * cancellations pending.
1342 */
1343static void xhci_handle_cmd_set_deq(struct xhci_hcd *xhci, int slot_id,
1344 union xhci_trb *trb, u32 cmd_comp_code)
1345{
1346 unsigned int ep_index;
1347 unsigned int stream_id;
1348 struct xhci_ring *ep_ring;
1349 struct xhci_virt_ep *ep;
1350 struct xhci_ep_ctx *ep_ctx;
1351 struct xhci_slot_ctx *slot_ctx;
1352 struct xhci_td *td, *tmp_td;
1353
1354 ep_index = TRB_TO_EP_INDEX(le32_to_cpu(trb->generic.field[3]));
1355 stream_id = TRB_TO_STREAM_ID(le32_to_cpu(trb->generic.field[2]));
1356 ep = xhci_get_virt_ep(xhci, slot_id, ep_index);
1357 if (!ep)
1358 return;
1359
1360 ep_ring = xhci_virt_ep_to_ring(xhci, ep, stream_id);
1361 if (!ep_ring) {
1362 xhci_warn(xhci, "WARN Set TR deq ptr command for freed stream ID %u\n",
1363 stream_id);
1364 /* XXX: Harmless??? */
1365 goto cleanup;
1366 }
1367
1368 ep_ctx = xhci_get_ep_ctx(xhci, ep->vdev->out_ctx, ep_index);
1369 slot_ctx = xhci_get_slot_ctx(xhci, ep->vdev->out_ctx);
1370 trace_xhci_handle_cmd_set_deq(slot_ctx);
1371 trace_xhci_handle_cmd_set_deq_ep(ep_ctx);
1372
1373 if (cmd_comp_code != COMP_SUCCESS) {
1374 unsigned int ep_state;
1375 unsigned int slot_state;
1376
1377 switch (cmd_comp_code) {
1378 case COMP_TRB_ERROR:
1379 xhci_warn(xhci, "WARN Set TR Deq Ptr cmd invalid because of stream ID configuration\n");
1380 break;
1381 case COMP_CONTEXT_STATE_ERROR:
1382 xhci_warn(xhci, "WARN Set TR Deq Ptr cmd failed due to incorrect slot or ep state.\n");
1383 ep_state = GET_EP_CTX_STATE(ep_ctx);
1384 slot_state = le32_to_cpu(slot_ctx->dev_state);
1385 slot_state = GET_SLOT_STATE(slot_state);
1386 xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb,
1387 "Slot state = %u, EP state = %u",
1388 slot_state, ep_state);
1389 break;
1390 case COMP_SLOT_NOT_ENABLED_ERROR:
1391 xhci_warn(xhci, "WARN Set TR Deq Ptr cmd failed because slot %u was not enabled.\n",
1392 slot_id);
1393 break;
1394 default:
1395 xhci_warn(xhci, "WARN Set TR Deq Ptr cmd with unknown completion code of %u.\n",
1396 cmd_comp_code);
1397 break;
1398 }
1399 /* OK what do we do now? The endpoint state is hosed, and we
1400 * should never get to this point if the synchronization between
1401 * queueing, and endpoint state are correct. This might happen
1402 * if the device gets disconnected after we've finished
1403 * cancelling URBs, which might not be an error...
1404 */
1405 } else {
1406 u64 deq;
1407 /* 4.6.10 deq ptr is written to the stream ctx for streams */
1408 if (ep->ep_state & EP_HAS_STREAMS) {
1409 struct xhci_stream_ctx *ctx =
1410 &ep->stream_info->stream_ctx_array[stream_id];
1411 deq = le64_to_cpu(ctx->stream_ring) & SCTX_DEQ_MASK;
1412 } else {
1413 deq = le64_to_cpu(ep_ctx->deq) & ~EP_CTX_CYCLE_MASK;
1414 }
1415 xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb,
1416 "Successful Set TR Deq Ptr cmd, deq = @%08llx", deq);
1417 if (xhci_trb_virt_to_dma(ep->queued_deq_seg,
1418 ep->queued_deq_ptr) == deq) {
1419 /* Update the ring's dequeue segment and dequeue pointer
1420 * to reflect the new position.
1421 */
1422 update_ring_for_set_deq_completion(xhci, ep->vdev,
1423 ep_ring, ep_index);
1424 } else {
1425 xhci_warn(xhci, "Mismatch between completed Set TR Deq Ptr command & xHCI internal state.\n");
1426 xhci_warn(xhci, "ep deq seg = %p, deq ptr = %p\n",
1427 ep->queued_deq_seg, ep->queued_deq_ptr);
1428 }
1429 }
1430 /* HW cached TDs cleared from cache, give them back */
1431 list_for_each_entry_safe(td, tmp_td, &ep->cancelled_td_list,
1432 cancelled_td_list) {
1433 ep_ring = xhci_urb_to_transfer_ring(ep->xhci, td->urb);
1434 if (td->cancel_status == TD_CLEARING_CACHE) {
1435 td->cancel_status = TD_CLEARED;
1436 xhci_dbg(ep->xhci, "%s: Giveback cancelled URB %p TD\n",
1437 __func__, td->urb);
1438 xhci_td_cleanup(ep->xhci, td, ep_ring, td->status);
1439 } else {
1440 xhci_dbg(ep->xhci, "%s: Keep cancelled URB %p TD as cancel_status is %d\n",
1441 __func__, td->urb, td->cancel_status);
1442 }
1443 }
1444cleanup:
1445 ep->ep_state &= ~SET_DEQ_PENDING;
1446 ep->queued_deq_seg = NULL;
1447 ep->queued_deq_ptr = NULL;
1448 /* Restart any rings with pending URBs */
1449 ring_doorbell_for_active_rings(xhci, slot_id, ep_index);
1450}
1451
1452static void xhci_handle_cmd_reset_ep(struct xhci_hcd *xhci, int slot_id,
1453 union xhci_trb *trb, u32 cmd_comp_code)
1454{
1455 struct xhci_virt_ep *ep;
1456 struct xhci_ep_ctx *ep_ctx;
1457 unsigned int ep_index;
1458
1459 ep_index = TRB_TO_EP_INDEX(le32_to_cpu(trb->generic.field[3]));
1460 ep = xhci_get_virt_ep(xhci, slot_id, ep_index);
1461 if (!ep)
1462 return;
1463
1464 ep_ctx = xhci_get_ep_ctx(xhci, ep->vdev->out_ctx, ep_index);
1465 trace_xhci_handle_cmd_reset_ep(ep_ctx);
1466
1467 /* This command will only fail if the endpoint wasn't halted,
1468 * but we don't care.
1469 */
1470 xhci_dbg_trace(xhci, trace_xhci_dbg_reset_ep,
1471 "Ignoring reset ep completion code of %u", cmd_comp_code);
1472
1473 /* Cleanup cancelled TDs as ep is stopped. May queue a Set TR Deq cmd */
1474 xhci_invalidate_cancelled_tds(ep);
1475
1476 /* Clear our internal halted state */
1477 ep->ep_state &= ~EP_HALTED;
1478
1479 xhci_giveback_invalidated_tds(ep);
1480
1481 /* if this was a soft reset, then restart */
1482 if ((le32_to_cpu(trb->generic.field[3])) & TRB_TSP)
1483 ring_doorbell_for_active_rings(xhci, slot_id, ep_index);
1484}
1485
1486static void xhci_handle_cmd_enable_slot(struct xhci_hcd *xhci, int slot_id,
1487 struct xhci_command *command, u32 cmd_comp_code)
1488{
1489 if (cmd_comp_code == COMP_SUCCESS)
1490 command->slot_id = slot_id;
1491 else
1492 command->slot_id = 0;
1493}
1494
1495static void xhci_handle_cmd_disable_slot(struct xhci_hcd *xhci, int slot_id)
1496{
1497 struct xhci_virt_device *virt_dev;
1498 struct xhci_slot_ctx *slot_ctx;
1499
1500 virt_dev = xhci->devs[slot_id];
1501 if (!virt_dev)
1502 return;
1503
1504 slot_ctx = xhci_get_slot_ctx(xhci, virt_dev->out_ctx);
1505 trace_xhci_handle_cmd_disable_slot(slot_ctx);
1506
1507 if (xhci->quirks & XHCI_EP_LIMIT_QUIRK)
1508 /* Delete default control endpoint resources */
1509 xhci_free_device_endpoint_resources(xhci, virt_dev, true);
1510}
1511
1512static void xhci_handle_cmd_config_ep(struct xhci_hcd *xhci, int slot_id,
1513 u32 cmd_comp_code)
1514{
1515 struct xhci_virt_device *virt_dev;
1516 struct xhci_input_control_ctx *ctrl_ctx;
1517 struct xhci_ep_ctx *ep_ctx;
1518 unsigned int ep_index;
1519 u32 add_flags;
1520
1521 /*
1522 * Configure endpoint commands can come from the USB core configuration
1523 * or alt setting changes, or when streams were being configured.
1524 */
1525
1526 virt_dev = xhci->devs[slot_id];
1527 if (!virt_dev)
1528 return;
1529 ctrl_ctx = xhci_get_input_control_ctx(virt_dev->in_ctx);
1530 if (!ctrl_ctx) {
1531 xhci_warn(xhci, "Could not get input context, bad type.\n");
1532 return;
1533 }
1534
1535 add_flags = le32_to_cpu(ctrl_ctx->add_flags);
1536
1537 /* Input ctx add_flags are the endpoint index plus one */
1538 ep_index = xhci_last_valid_endpoint(add_flags) - 1;
1539
1540 ep_ctx = xhci_get_ep_ctx(xhci, virt_dev->out_ctx, ep_index);
1541 trace_xhci_handle_cmd_config_ep(ep_ctx);
1542
1543 return;
1544}
1545
1546static void xhci_handle_cmd_addr_dev(struct xhci_hcd *xhci, int slot_id)
1547{
1548 struct xhci_virt_device *vdev;
1549 struct xhci_slot_ctx *slot_ctx;
1550
1551 vdev = xhci->devs[slot_id];
1552 if (!vdev)
1553 return;
1554 slot_ctx = xhci_get_slot_ctx(xhci, vdev->out_ctx);
1555 trace_xhci_handle_cmd_addr_dev(slot_ctx);
1556}
1557
1558static void xhci_handle_cmd_reset_dev(struct xhci_hcd *xhci, int slot_id)
1559{
1560 struct xhci_virt_device *vdev;
1561 struct xhci_slot_ctx *slot_ctx;
1562
1563 vdev = xhci->devs[slot_id];
1564 if (!vdev) {
1565 xhci_warn(xhci, "Reset device command completion for disabled slot %u\n",
1566 slot_id);
1567 return;
1568 }
1569 slot_ctx = xhci_get_slot_ctx(xhci, vdev->out_ctx);
1570 trace_xhci_handle_cmd_reset_dev(slot_ctx);
1571
1572 xhci_dbg(xhci, "Completed reset device command.\n");
1573}
1574
1575static void xhci_handle_cmd_nec_get_fw(struct xhci_hcd *xhci,
1576 struct xhci_event_cmd *event)
1577{
1578 if (!(xhci->quirks & XHCI_NEC_HOST)) {
1579 xhci_warn(xhci, "WARN NEC_GET_FW command on non-NEC host\n");
1580 return;
1581 }
1582 xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
1583 "NEC firmware version %2x.%02x",
1584 NEC_FW_MAJOR(le32_to_cpu(event->status)),
1585 NEC_FW_MINOR(le32_to_cpu(event->status)));
1586}
1587
1588static void xhci_complete_del_and_free_cmd(struct xhci_command *cmd, u32 status)
1589{
1590 list_del(&cmd->cmd_list);
1591
1592 if (cmd->completion) {
1593 cmd->status = status;
1594 complete(cmd->completion);
1595 } else {
1596 kfree(cmd);
1597 }
1598}
1599
1600void xhci_cleanup_command_queue(struct xhci_hcd *xhci)
1601{
1602 struct xhci_command *cur_cmd, *tmp_cmd;
1603 xhci->current_cmd = NULL;
1604 list_for_each_entry_safe(cur_cmd, tmp_cmd, &xhci->cmd_list, cmd_list)
1605 xhci_complete_del_and_free_cmd(cur_cmd, COMP_COMMAND_ABORTED);
1606}
1607
1608void xhci_handle_command_timeout(struct work_struct *work)
1609{
1610 struct xhci_hcd *xhci;
1611 unsigned long flags;
1612 char str[XHCI_MSG_MAX];
1613 u64 hw_ring_state;
1614 u32 cmd_field3;
1615 u32 usbsts;
1616
1617 xhci = container_of(to_delayed_work(work), struct xhci_hcd, cmd_timer);
1618
1619 spin_lock_irqsave(&xhci->lock, flags);
1620
1621 /*
1622 * If timeout work is pending, or current_cmd is NULL, it means we
1623 * raced with command completion. Command is handled so just return.
1624 */
1625 if (!xhci->current_cmd || delayed_work_pending(&xhci->cmd_timer)) {
1626 spin_unlock_irqrestore(&xhci->lock, flags);
1627 return;
1628 }
1629
1630 cmd_field3 = le32_to_cpu(xhci->current_cmd->command_trb->generic.field[3]);
1631 usbsts = readl(&xhci->op_regs->status);
1632 xhci_dbg(xhci, "Command timeout, USBSTS:%s\n", xhci_decode_usbsts(str, usbsts));
1633
1634 /* Bail out and tear down xhci if a stop endpoint command failed */
1635 if (TRB_FIELD_TO_TYPE(cmd_field3) == TRB_STOP_RING) {
1636 struct xhci_virt_ep *ep;
1637
1638 xhci_warn(xhci, "xHCI host not responding to stop endpoint command\n");
1639
1640 ep = xhci_get_virt_ep(xhci, TRB_TO_SLOT_ID(cmd_field3),
1641 TRB_TO_EP_INDEX(cmd_field3));
1642 if (ep)
1643 ep->ep_state &= ~EP_STOP_CMD_PENDING;
1644
1645 xhci_halt(xhci);
1646 xhci_hc_died(xhci);
1647 goto time_out_completed;
1648 }
1649
1650 /* mark this command to be cancelled */
1651 xhci->current_cmd->status = COMP_COMMAND_ABORTED;
1652
1653 /* Make sure command ring is running before aborting it */
1654 hw_ring_state = xhci_read_64(xhci, &xhci->op_regs->cmd_ring);
1655 if (hw_ring_state == ~(u64)0) {
1656 xhci_hc_died(xhci);
1657 goto time_out_completed;
1658 }
1659
1660 if ((xhci->cmd_ring_state & CMD_RING_STATE_RUNNING) &&
1661 (hw_ring_state & CMD_RING_RUNNING)) {
1662 /* Prevent new doorbell, and start command abort */
1663 xhci->cmd_ring_state = CMD_RING_STATE_ABORTED;
1664 xhci_dbg(xhci, "Command timeout\n");
1665 xhci_abort_cmd_ring(xhci, flags);
1666 goto time_out_completed;
1667 }
1668
1669 /* host removed. Bail out */
1670 if (xhci->xhc_state & XHCI_STATE_REMOVING) {
1671 xhci_dbg(xhci, "host removed, ring start fail?\n");
1672 xhci_cleanup_command_queue(xhci);
1673
1674 goto time_out_completed;
1675 }
1676
1677 /* command timeout on stopped ring, ring can't be aborted */
1678 xhci_dbg(xhci, "Command timeout on stopped ring\n");
1679 xhci_handle_stopped_cmd_ring(xhci, xhci->current_cmd);
1680
1681time_out_completed:
1682 spin_unlock_irqrestore(&xhci->lock, flags);
1683 return;
1684}
1685
1686static void handle_cmd_completion(struct xhci_hcd *xhci,
1687 struct xhci_event_cmd *event)
1688{
1689 unsigned int slot_id = TRB_TO_SLOT_ID(le32_to_cpu(event->flags));
1690 u64 cmd_dma;
1691 dma_addr_t cmd_dequeue_dma;
1692 u32 cmd_comp_code;
1693 union xhci_trb *cmd_trb;
1694 struct xhci_command *cmd;
1695 u32 cmd_type;
1696
1697 if (slot_id >= MAX_HC_SLOTS) {
1698 xhci_warn(xhci, "Invalid slot_id %u\n", slot_id);
1699 return;
1700 }
1701
1702 cmd_dma = le64_to_cpu(event->cmd_trb);
1703 cmd_trb = xhci->cmd_ring->dequeue;
1704
1705 trace_xhci_handle_command(xhci->cmd_ring, &cmd_trb->generic);
1706
1707 cmd_dequeue_dma = xhci_trb_virt_to_dma(xhci->cmd_ring->deq_seg,
1708 cmd_trb);
1709 /*
1710 * Check whether the completion event is for our internal kept
1711 * command.
1712 */
1713 if (!cmd_dequeue_dma || cmd_dma != (u64)cmd_dequeue_dma) {
1714 xhci_warn(xhci,
1715 "ERROR mismatched command completion event\n");
1716 return;
1717 }
1718
1719 cmd = list_first_entry(&xhci->cmd_list, struct xhci_command, cmd_list);
1720
1721 cancel_delayed_work(&xhci->cmd_timer);
1722
1723 cmd_comp_code = GET_COMP_CODE(le32_to_cpu(event->status));
1724
1725 /* If CMD ring stopped we own the trbs between enqueue and dequeue */
1726 if (cmd_comp_code == COMP_COMMAND_RING_STOPPED) {
1727 complete_all(&xhci->cmd_ring_stop_completion);
1728 return;
1729 }
1730
1731 if (cmd->command_trb != xhci->cmd_ring->dequeue) {
1732 xhci_err(xhci,
1733 "Command completion event does not match command\n");
1734 return;
1735 }
1736
1737 /*
1738 * Host aborted the command ring, check if the current command was
1739 * supposed to be aborted, otherwise continue normally.
1740 * The command ring is stopped now, but the xHC will issue a Command
1741 * Ring Stopped event which will cause us to restart it.
1742 */
1743 if (cmd_comp_code == COMP_COMMAND_ABORTED) {
1744 xhci->cmd_ring_state = CMD_RING_STATE_STOPPED;
1745 if (cmd->status == COMP_COMMAND_ABORTED) {
1746 if (xhci->current_cmd == cmd)
1747 xhci->current_cmd = NULL;
1748 goto event_handled;
1749 }
1750 }
1751
1752 cmd_type = TRB_FIELD_TO_TYPE(le32_to_cpu(cmd_trb->generic.field[3]));
1753 switch (cmd_type) {
1754 case TRB_ENABLE_SLOT:
1755 xhci_handle_cmd_enable_slot(xhci, slot_id, cmd, cmd_comp_code);
1756 break;
1757 case TRB_DISABLE_SLOT:
1758 xhci_handle_cmd_disable_slot(xhci, slot_id);
1759 break;
1760 case TRB_CONFIG_EP:
1761 if (!cmd->completion)
1762 xhci_handle_cmd_config_ep(xhci, slot_id, cmd_comp_code);
1763 break;
1764 case TRB_EVAL_CONTEXT:
1765 break;
1766 case TRB_ADDR_DEV:
1767 xhci_handle_cmd_addr_dev(xhci, slot_id);
1768 break;
1769 case TRB_STOP_RING:
1770 WARN_ON(slot_id != TRB_TO_SLOT_ID(
1771 le32_to_cpu(cmd_trb->generic.field[3])));
1772 if (!cmd->completion)
1773 xhci_handle_cmd_stop_ep(xhci, slot_id, cmd_trb,
1774 cmd_comp_code);
1775 break;
1776 case TRB_SET_DEQ:
1777 WARN_ON(slot_id != TRB_TO_SLOT_ID(
1778 le32_to_cpu(cmd_trb->generic.field[3])));
1779 xhci_handle_cmd_set_deq(xhci, slot_id, cmd_trb, cmd_comp_code);
1780 break;
1781 case TRB_CMD_NOOP:
1782 /* Is this an aborted command turned to NO-OP? */
1783 if (cmd->status == COMP_COMMAND_RING_STOPPED)
1784 cmd_comp_code = COMP_COMMAND_RING_STOPPED;
1785 break;
1786 case TRB_RESET_EP:
1787 WARN_ON(slot_id != TRB_TO_SLOT_ID(
1788 le32_to_cpu(cmd_trb->generic.field[3])));
1789 xhci_handle_cmd_reset_ep(xhci, slot_id, cmd_trb, cmd_comp_code);
1790 break;
1791 case TRB_RESET_DEV:
1792 /* SLOT_ID field in reset device cmd completion event TRB is 0.
1793 * Use the SLOT_ID from the command TRB instead (xhci 4.6.11)
1794 */
1795 slot_id = TRB_TO_SLOT_ID(
1796 le32_to_cpu(cmd_trb->generic.field[3]));
1797 xhci_handle_cmd_reset_dev(xhci, slot_id);
1798 break;
1799 case TRB_NEC_GET_FW:
1800 xhci_handle_cmd_nec_get_fw(xhci, event);
1801 break;
1802 default:
1803 /* Skip over unknown commands on the event ring */
1804 xhci_info(xhci, "INFO unknown command type %d\n", cmd_type);
1805 break;
1806 }
1807
1808 /* restart timer if this wasn't the last command */
1809 if (!list_is_singular(&xhci->cmd_list)) {
1810 xhci->current_cmd = list_first_entry(&cmd->cmd_list,
1811 struct xhci_command, cmd_list);
1812 xhci_mod_cmd_timer(xhci);
1813 } else if (xhci->current_cmd == cmd) {
1814 xhci->current_cmd = NULL;
1815 }
1816
1817event_handled:
1818 xhci_complete_del_and_free_cmd(cmd, cmd_comp_code);
1819
1820 inc_deq(xhci, xhci->cmd_ring);
1821}
1822
1823static void handle_vendor_event(struct xhci_hcd *xhci,
1824 union xhci_trb *event, u32 trb_type)
1825{
1826 xhci_dbg(xhci, "Vendor specific event TRB type = %u\n", trb_type);
1827 if (trb_type == TRB_NEC_CMD_COMP && (xhci->quirks & XHCI_NEC_HOST))
1828 handle_cmd_completion(xhci, &event->event_cmd);
1829}
1830
1831static void handle_device_notification(struct xhci_hcd *xhci,
1832 union xhci_trb *event)
1833{
1834 u32 slot_id;
1835 struct usb_device *udev;
1836
1837 slot_id = TRB_TO_SLOT_ID(le32_to_cpu(event->generic.field[3]));
1838 if (!xhci->devs[slot_id]) {
1839 xhci_warn(xhci, "Device Notification event for "
1840 "unused slot %u\n", slot_id);
1841 return;
1842 }
1843
1844 xhci_dbg(xhci, "Device Wake Notification event for slot ID %u\n",
1845 slot_id);
1846 udev = xhci->devs[slot_id]->udev;
1847 if (udev && udev->parent)
1848 usb_wakeup_notification(udev->parent, udev->portnum);
1849}
1850
1851/*
1852 * Quirk hanlder for errata seen on Cavium ThunderX2 processor XHCI
1853 * Controller.
1854 * As per ThunderX2errata-129 USB 2 device may come up as USB 1
1855 * If a connection to a USB 1 device is followed by another connection
1856 * to a USB 2 device.
1857 *
1858 * Reset the PHY after the USB device is disconnected if device speed
1859 * is less than HCD_USB3.
1860 * Retry the reset sequence max of 4 times checking the PLL lock status.
1861 *
1862 */
1863static void xhci_cavium_reset_phy_quirk(struct xhci_hcd *xhci)
1864{
1865 struct usb_hcd *hcd = xhci_to_hcd(xhci);
1866 u32 pll_lock_check;
1867 u32 retry_count = 4;
1868
1869 do {
1870 /* Assert PHY reset */
1871 writel(0x6F, hcd->regs + 0x1048);
1872 udelay(10);
1873 /* De-assert the PHY reset */
1874 writel(0x7F, hcd->regs + 0x1048);
1875 udelay(200);
1876 pll_lock_check = readl(hcd->regs + 0x1070);
1877 } while (!(pll_lock_check & 0x1) && --retry_count);
1878}
1879
1880static void handle_port_status(struct xhci_hcd *xhci,
1881 struct xhci_interrupter *ir,
1882 union xhci_trb *event)
1883{
1884 struct usb_hcd *hcd;
1885 u32 port_id;
1886 u32 portsc, cmd_reg;
1887 int max_ports;
1888 unsigned int hcd_portnum;
1889 struct xhci_bus_state *bus_state;
1890 bool bogus_port_status = false;
1891 struct xhci_port *port;
1892
1893 /* Port status change events always have a successful completion code */
1894 if (GET_COMP_CODE(le32_to_cpu(event->generic.field[2])) != COMP_SUCCESS)
1895 xhci_warn(xhci,
1896 "WARN: xHC returned failed port status event\n");
1897
1898 port_id = GET_PORT_ID(le32_to_cpu(event->generic.field[0]));
1899 max_ports = HCS_MAX_PORTS(xhci->hcs_params1);
1900
1901 if ((port_id <= 0) || (port_id > max_ports)) {
1902 xhci_warn(xhci, "Port change event with invalid port ID %d\n",
1903 port_id);
1904 return;
1905 }
1906
1907 port = &xhci->hw_ports[port_id - 1];
1908 if (!port || !port->rhub || port->hcd_portnum == DUPLICATE_ENTRY) {
1909 xhci_warn(xhci, "Port change event, no port for port ID %u\n",
1910 port_id);
1911 bogus_port_status = true;
1912 goto cleanup;
1913 }
1914
1915 /* We might get interrupts after shared_hcd is removed */
1916 if (port->rhub == &xhci->usb3_rhub && xhci->shared_hcd == NULL) {
1917 xhci_dbg(xhci, "ignore port event for removed USB3 hcd\n");
1918 bogus_port_status = true;
1919 goto cleanup;
1920 }
1921
1922 hcd = port->rhub->hcd;
1923 bus_state = &port->rhub->bus_state;
1924 hcd_portnum = port->hcd_portnum;
1925 portsc = readl(port->addr);
1926
1927 xhci_dbg(xhci, "Port change event, %d-%d, id %d, portsc: 0x%x\n",
1928 hcd->self.busnum, hcd_portnum + 1, port_id, portsc);
1929
1930 trace_xhci_handle_port_status(port, portsc);
1931
1932 if (hcd->state == HC_STATE_SUSPENDED) {
1933 xhci_dbg(xhci, "resume root hub\n");
1934 usb_hcd_resume_root_hub(hcd);
1935 }
1936
1937 if (hcd->speed >= HCD_USB3 &&
1938 (portsc & PORT_PLS_MASK) == XDEV_INACTIVE) {
1939 if (port->slot_id && xhci->devs[port->slot_id])
1940 xhci->devs[port->slot_id]->flags |= VDEV_PORT_ERROR;
1941 }
1942
1943 if ((portsc & PORT_PLC) && (portsc & PORT_PLS_MASK) == XDEV_RESUME) {
1944 xhci_dbg(xhci, "port resume event for port %d\n", port_id);
1945
1946 cmd_reg = readl(&xhci->op_regs->command);
1947 if (!(cmd_reg & CMD_RUN)) {
1948 xhci_warn(xhci, "xHC is not running.\n");
1949 goto cleanup;
1950 }
1951
1952 if (DEV_SUPERSPEED_ANY(portsc)) {
1953 xhci_dbg(xhci, "remote wake SS port %d\n", port_id);
1954 /* Set a flag to say the port signaled remote wakeup,
1955 * so we can tell the difference between the end of
1956 * device and host initiated resume.
1957 */
1958 bus_state->port_remote_wakeup |= 1 << hcd_portnum;
1959 xhci_test_and_clear_bit(xhci, port, PORT_PLC);
1960 usb_hcd_start_port_resume(&hcd->self, hcd_portnum);
1961 xhci_set_link_state(xhci, port, XDEV_U0);
1962 /* Need to wait until the next link state change
1963 * indicates the device is actually in U0.
1964 */
1965 bogus_port_status = true;
1966 goto cleanup;
1967 } else if (!test_bit(hcd_portnum, &bus_state->resuming_ports)) {
1968 xhci_dbg(xhci, "resume HS port %d\n", port_id);
1969 port->resume_timestamp = jiffies +
1970 msecs_to_jiffies(USB_RESUME_TIMEOUT);
1971 set_bit(hcd_portnum, &bus_state->resuming_ports);
1972 /* Do the rest in GetPortStatus after resume time delay.
1973 * Avoid polling roothub status before that so that a
1974 * usb device auto-resume latency around ~40ms.
1975 */
1976 set_bit(HCD_FLAG_POLL_RH, &hcd->flags);
1977 mod_timer(&hcd->rh_timer,
1978 port->resume_timestamp);
1979 usb_hcd_start_port_resume(&hcd->self, hcd_portnum);
1980 bogus_port_status = true;
1981 }
1982 }
1983
1984 if ((portsc & PORT_PLC) &&
1985 DEV_SUPERSPEED_ANY(portsc) &&
1986 ((portsc & PORT_PLS_MASK) == XDEV_U0 ||
1987 (portsc & PORT_PLS_MASK) == XDEV_U1 ||
1988 (portsc & PORT_PLS_MASK) == XDEV_U2)) {
1989 xhci_dbg(xhci, "resume SS port %d finished\n", port_id);
1990 complete(&port->u3exit_done);
1991 /* We've just brought the device into U0/1/2 through either the
1992 * Resume state after a device remote wakeup, or through the
1993 * U3Exit state after a host-initiated resume. If it's a device
1994 * initiated remote wake, don't pass up the link state change,
1995 * so the roothub behavior is consistent with external
1996 * USB 3.0 hub behavior.
1997 */
1998 if (port->slot_id && xhci->devs[port->slot_id])
1999 xhci_ring_device(xhci, port->slot_id);
2000 if (bus_state->port_remote_wakeup & (1 << hcd_portnum)) {
2001 xhci_test_and_clear_bit(xhci, port, PORT_PLC);
2002 usb_wakeup_notification(hcd->self.root_hub,
2003 hcd_portnum + 1);
2004 bogus_port_status = true;
2005 goto cleanup;
2006 }
2007 }
2008
2009 /*
2010 * Check to see if xhci-hub.c is waiting on RExit to U0 transition (or
2011 * RExit to a disconnect state). If so, let the driver know it's
2012 * out of the RExit state.
2013 */
2014 if (hcd->speed < HCD_USB3 && port->rexit_active) {
2015 complete(&port->rexit_done);
2016 port->rexit_active = false;
2017 bogus_port_status = true;
2018 goto cleanup;
2019 }
2020
2021 if (hcd->speed < HCD_USB3) {
2022 xhci_test_and_clear_bit(xhci, port, PORT_PLC);
2023 if ((xhci->quirks & XHCI_RESET_PLL_ON_DISCONNECT) &&
2024 (portsc & PORT_CSC) && !(portsc & PORT_CONNECT))
2025 xhci_cavium_reset_phy_quirk(xhci);
2026 }
2027
2028cleanup:
2029
2030 /* Don't make the USB core poll the roothub if we got a bad port status
2031 * change event. Besides, at that point we can't tell which roothub
2032 * (USB 2.0 or USB 3.0) to kick.
2033 */
2034 if (bogus_port_status)
2035 return;
2036
2037 /*
2038 * xHCI port-status-change events occur when the "or" of all the
2039 * status-change bits in the portsc register changes from 0 to 1.
2040 * New status changes won't cause an event if any other change
2041 * bits are still set. When an event occurs, switch over to
2042 * polling to avoid losing status changes.
2043 */
2044 xhci_dbg(xhci, "%s: starting usb%d port polling.\n",
2045 __func__, hcd->self.busnum);
2046 set_bit(HCD_FLAG_POLL_RH, &hcd->flags);
2047 spin_unlock(&xhci->lock);
2048 /* Pass this up to the core */
2049 usb_hcd_poll_rh_status(hcd);
2050 spin_lock(&xhci->lock);
2051}
2052
2053/*
2054 * This TD is defined by the TRBs starting at start_trb in start_seg and ending
2055 * at end_trb, which may be in another segment. If the suspect DMA address is a
2056 * TRB in this TD, this function returns that TRB's segment. Otherwise it
2057 * returns 0.
2058 */
2059struct xhci_segment *trb_in_td(struct xhci_hcd *xhci,
2060 struct xhci_segment *start_seg,
2061 union xhci_trb *start_trb,
2062 union xhci_trb *end_trb,
2063 dma_addr_t suspect_dma,
2064 bool debug)
2065{
2066 dma_addr_t start_dma;
2067 dma_addr_t end_seg_dma;
2068 dma_addr_t end_trb_dma;
2069 struct xhci_segment *cur_seg;
2070
2071 start_dma = xhci_trb_virt_to_dma(start_seg, start_trb);
2072 cur_seg = start_seg;
2073
2074 do {
2075 if (start_dma == 0)
2076 return NULL;
2077 /* We may get an event for a Link TRB in the middle of a TD */
2078 end_seg_dma = xhci_trb_virt_to_dma(cur_seg,
2079 &cur_seg->trbs[TRBS_PER_SEGMENT - 1]);
2080 /* If the end TRB isn't in this segment, this is set to 0 */
2081 end_trb_dma = xhci_trb_virt_to_dma(cur_seg, end_trb);
2082
2083 if (debug)
2084 xhci_warn(xhci,
2085 "Looking for event-dma %016llx trb-start %016llx trb-end %016llx seg-start %016llx seg-end %016llx\n",
2086 (unsigned long long)suspect_dma,
2087 (unsigned long long)start_dma,
2088 (unsigned long long)end_trb_dma,
2089 (unsigned long long)cur_seg->dma,
2090 (unsigned long long)end_seg_dma);
2091
2092 if (end_trb_dma > 0) {
2093 /* The end TRB is in this segment, so suspect should be here */
2094 if (start_dma <= end_trb_dma) {
2095 if (suspect_dma >= start_dma && suspect_dma <= end_trb_dma)
2096 return cur_seg;
2097 } else {
2098 /* Case for one segment with
2099 * a TD wrapped around to the top
2100 */
2101 if ((suspect_dma >= start_dma &&
2102 suspect_dma <= end_seg_dma) ||
2103 (suspect_dma >= cur_seg->dma &&
2104 suspect_dma <= end_trb_dma))
2105 return cur_seg;
2106 }
2107 return NULL;
2108 } else {
2109 /* Might still be somewhere in this segment */
2110 if (suspect_dma >= start_dma && suspect_dma <= end_seg_dma)
2111 return cur_seg;
2112 }
2113 cur_seg = cur_seg->next;
2114 start_dma = xhci_trb_virt_to_dma(cur_seg, &cur_seg->trbs[0]);
2115 } while (cur_seg != start_seg);
2116
2117 return NULL;
2118}
2119
2120static void xhci_clear_hub_tt_buffer(struct xhci_hcd *xhci, struct xhci_td *td,
2121 struct xhci_virt_ep *ep)
2122{
2123 /*
2124 * As part of low/full-speed endpoint-halt processing
2125 * we must clear the TT buffer (USB 2.0 specification 11.17.5).
2126 */
2127 if (td->urb->dev->tt && !usb_pipeint(td->urb->pipe) &&
2128 (td->urb->dev->tt->hub != xhci_to_hcd(xhci)->self.root_hub) &&
2129 !(ep->ep_state & EP_CLEARING_TT)) {
2130 ep->ep_state |= EP_CLEARING_TT;
2131 td->urb->ep->hcpriv = td->urb->dev;
2132 if (usb_hub_clear_tt_buffer(td->urb))
2133 ep->ep_state &= ~EP_CLEARING_TT;
2134 }
2135}
2136
2137/* Check if an error has halted the endpoint ring. The class driver will
2138 * cleanup the halt for a non-default control endpoint if we indicate a stall.
2139 * However, a babble and other errors also halt the endpoint ring, and the class
2140 * driver won't clear the halt in that case, so we need to issue a Set Transfer
2141 * Ring Dequeue Pointer command manually.
2142 */
2143static int xhci_requires_manual_halt_cleanup(struct xhci_hcd *xhci,
2144 struct xhci_ep_ctx *ep_ctx,
2145 unsigned int trb_comp_code)
2146{
2147 /* TRB completion codes that may require a manual halt cleanup */
2148 if (trb_comp_code == COMP_USB_TRANSACTION_ERROR ||
2149 trb_comp_code == COMP_BABBLE_DETECTED_ERROR ||
2150 trb_comp_code == COMP_SPLIT_TRANSACTION_ERROR)
2151 /* The 0.95 spec says a babbling control endpoint
2152 * is not halted. The 0.96 spec says it is. Some HW
2153 * claims to be 0.95 compliant, but it halts the control
2154 * endpoint anyway. Check if a babble halted the
2155 * endpoint.
2156 */
2157 if (GET_EP_CTX_STATE(ep_ctx) == EP_STATE_HALTED)
2158 return 1;
2159
2160 return 0;
2161}
2162
2163int xhci_is_vendor_info_code(struct xhci_hcd *xhci, unsigned int trb_comp_code)
2164{
2165 if (trb_comp_code >= 224 && trb_comp_code <= 255) {
2166 /* Vendor defined "informational" completion code,
2167 * treat as not-an-error.
2168 */
2169 xhci_dbg(xhci, "Vendor defined info completion code %u\n",
2170 trb_comp_code);
2171 xhci_dbg(xhci, "Treating code as success.\n");
2172 return 1;
2173 }
2174 return 0;
2175}
2176
2177static int finish_td(struct xhci_hcd *xhci, struct xhci_virt_ep *ep,
2178 struct xhci_ring *ep_ring, struct xhci_td *td,
2179 u32 trb_comp_code)
2180{
2181 struct xhci_ep_ctx *ep_ctx;
2182
2183 ep_ctx = xhci_get_ep_ctx(xhci, ep->vdev->out_ctx, ep->ep_index);
2184
2185 switch (trb_comp_code) {
2186 case COMP_STOPPED_LENGTH_INVALID:
2187 case COMP_STOPPED_SHORT_PACKET:
2188 case COMP_STOPPED:
2189 /*
2190 * The "Stop Endpoint" completion will take care of any
2191 * stopped TDs. A stopped TD may be restarted, so don't update
2192 * the ring dequeue pointer or take this TD off any lists yet.
2193 */
2194 return 0;
2195 case COMP_USB_TRANSACTION_ERROR:
2196 case COMP_BABBLE_DETECTED_ERROR:
2197 case COMP_SPLIT_TRANSACTION_ERROR:
2198 /*
2199 * If endpoint context state is not halted we might be
2200 * racing with a reset endpoint command issued by a unsuccessful
2201 * stop endpoint completion (context error). In that case the
2202 * td should be on the cancelled list, and EP_HALTED flag set.
2203 *
2204 * Or then it's not halted due to the 0.95 spec stating that a
2205 * babbling control endpoint should not halt. The 0.96 spec
2206 * again says it should. Some HW claims to be 0.95 compliant,
2207 * but it halts the control endpoint anyway.
2208 */
2209 if (GET_EP_CTX_STATE(ep_ctx) != EP_STATE_HALTED) {
2210 /*
2211 * If EP_HALTED is set and TD is on the cancelled list
2212 * the TD and dequeue pointer will be handled by reset
2213 * ep command completion
2214 */
2215 if ((ep->ep_state & EP_HALTED) &&
2216 !list_empty(&td->cancelled_td_list)) {
2217 xhci_dbg(xhci, "Already resolving halted ep for 0x%llx\n",
2218 (unsigned long long)xhci_trb_virt_to_dma(
2219 td->start_seg, td->first_trb));
2220 return 0;
2221 }
2222 /* endpoint not halted, don't reset it */
2223 break;
2224 }
2225 /* Almost same procedure as for STALL_ERROR below */
2226 xhci_clear_hub_tt_buffer(xhci, td, ep);
2227 xhci_handle_halted_endpoint(xhci, ep, td, EP_HARD_RESET);
2228 return 0;
2229 case COMP_STALL_ERROR:
2230 /*
2231 * xhci internal endpoint state will go to a "halt" state for
2232 * any stall, including default control pipe protocol stall.
2233 * To clear the host side halt we need to issue a reset endpoint
2234 * command, followed by a set dequeue command to move past the
2235 * TD.
2236 * Class drivers clear the device side halt from a functional
2237 * stall later. Hub TT buffer should only be cleared for FS/LS
2238 * devices behind HS hubs for functional stalls.
2239 */
2240 if (ep->ep_index != 0)
2241 xhci_clear_hub_tt_buffer(xhci, td, ep);
2242
2243 xhci_handle_halted_endpoint(xhci, ep, td, EP_HARD_RESET);
2244
2245 return 0; /* xhci_handle_halted_endpoint marked td cancelled */
2246 default:
2247 break;
2248 }
2249
2250 /* Update ring dequeue pointer */
2251 ep_ring->dequeue = td->last_trb;
2252 ep_ring->deq_seg = td->last_trb_seg;
2253 inc_deq(xhci, ep_ring);
2254
2255 return xhci_td_cleanup(xhci, td, ep_ring, td->status);
2256}
2257
2258/* sum trb lengths from ring dequeue up to stop_trb, _excluding_ stop_trb */
2259static int sum_trb_lengths(struct xhci_hcd *xhci, struct xhci_ring *ring,
2260 union xhci_trb *stop_trb)
2261{
2262 u32 sum;
2263 union xhci_trb *trb = ring->dequeue;
2264 struct xhci_segment *seg = ring->deq_seg;
2265
2266 for (sum = 0; trb != stop_trb; next_trb(xhci, ring, &seg, &trb)) {
2267 if (!trb_is_noop(trb) && !trb_is_link(trb))
2268 sum += TRB_LEN(le32_to_cpu(trb->generic.field[2]));
2269 }
2270 return sum;
2271}
2272
2273/*
2274 * Process control tds, update urb status and actual_length.
2275 */
2276static int process_ctrl_td(struct xhci_hcd *xhci, struct xhci_virt_ep *ep,
2277 struct xhci_ring *ep_ring, struct xhci_td *td,
2278 union xhci_trb *ep_trb, struct xhci_transfer_event *event)
2279{
2280 struct xhci_ep_ctx *ep_ctx;
2281 u32 trb_comp_code;
2282 u32 remaining, requested;
2283 u32 trb_type;
2284
2285 trb_type = TRB_FIELD_TO_TYPE(le32_to_cpu(ep_trb->generic.field[3]));
2286 ep_ctx = xhci_get_ep_ctx(xhci, ep->vdev->out_ctx, ep->ep_index);
2287 trb_comp_code = GET_COMP_CODE(le32_to_cpu(event->transfer_len));
2288 requested = td->urb->transfer_buffer_length;
2289 remaining = EVENT_TRB_LEN(le32_to_cpu(event->transfer_len));
2290
2291 switch (trb_comp_code) {
2292 case COMP_SUCCESS:
2293 if (trb_type != TRB_STATUS) {
2294 xhci_warn(xhci, "WARN: Success on ctrl %s TRB without IOC set?\n",
2295 (trb_type == TRB_DATA) ? "data" : "setup");
2296 td->status = -ESHUTDOWN;
2297 break;
2298 }
2299 td->status = 0;
2300 break;
2301 case COMP_SHORT_PACKET:
2302 td->status = 0;
2303 break;
2304 case COMP_STOPPED_SHORT_PACKET:
2305 if (trb_type == TRB_DATA || trb_type == TRB_NORMAL)
2306 td->urb->actual_length = remaining;
2307 else
2308 xhci_warn(xhci, "WARN: Stopped Short Packet on ctrl setup or status TRB\n");
2309 goto finish_td;
2310 case COMP_STOPPED:
2311 switch (trb_type) {
2312 case TRB_SETUP:
2313 td->urb->actual_length = 0;
2314 goto finish_td;
2315 case TRB_DATA:
2316 case TRB_NORMAL:
2317 td->urb->actual_length = requested - remaining;
2318 goto finish_td;
2319 case TRB_STATUS:
2320 td->urb->actual_length = requested;
2321 goto finish_td;
2322 default:
2323 xhci_warn(xhci, "WARN: unexpected TRB Type %d\n",
2324 trb_type);
2325 goto finish_td;
2326 }
2327 case COMP_STOPPED_LENGTH_INVALID:
2328 goto finish_td;
2329 default:
2330 if (!xhci_requires_manual_halt_cleanup(xhci,
2331 ep_ctx, trb_comp_code))
2332 break;
2333 xhci_dbg(xhci, "TRB error %u, halted endpoint index = %u\n",
2334 trb_comp_code, ep->ep_index);
2335 fallthrough;
2336 case COMP_STALL_ERROR:
2337 /* Did we transfer part of the data (middle) phase? */
2338 if (trb_type == TRB_DATA || trb_type == TRB_NORMAL)
2339 td->urb->actual_length = requested - remaining;
2340 else if (!td->urb_length_set)
2341 td->urb->actual_length = 0;
2342 goto finish_td;
2343 }
2344
2345 /* stopped at setup stage, no data transferred */
2346 if (trb_type == TRB_SETUP)
2347 goto finish_td;
2348
2349 /*
2350 * if on data stage then update the actual_length of the URB and flag it
2351 * as set, so it won't be overwritten in the event for the last TRB.
2352 */
2353 if (trb_type == TRB_DATA ||
2354 trb_type == TRB_NORMAL) {
2355 td->urb_length_set = true;
2356 td->urb->actual_length = requested - remaining;
2357 xhci_dbg(xhci, "Waiting for status stage event\n");
2358 return 0;
2359 }
2360
2361 /* at status stage */
2362 if (!td->urb_length_set)
2363 td->urb->actual_length = requested;
2364
2365finish_td:
2366 return finish_td(xhci, ep, ep_ring, td, trb_comp_code);
2367}
2368
2369/*
2370 * Process isochronous tds, update urb packet status and actual_length.
2371 */
2372static int process_isoc_td(struct xhci_hcd *xhci, struct xhci_virt_ep *ep,
2373 struct xhci_ring *ep_ring, struct xhci_td *td,
2374 union xhci_trb *ep_trb, struct xhci_transfer_event *event)
2375{
2376 struct urb_priv *urb_priv;
2377 int idx;
2378 struct usb_iso_packet_descriptor *frame;
2379 u32 trb_comp_code;
2380 bool sum_trbs_for_length = false;
2381 u32 remaining, requested, ep_trb_len;
2382 int short_framestatus;
2383
2384 trb_comp_code = GET_COMP_CODE(le32_to_cpu(event->transfer_len));
2385 urb_priv = td->urb->hcpriv;
2386 idx = urb_priv->num_tds_done;
2387 frame = &td->urb->iso_frame_desc[idx];
2388 requested = frame->length;
2389 remaining = EVENT_TRB_LEN(le32_to_cpu(event->transfer_len));
2390 ep_trb_len = TRB_LEN(le32_to_cpu(ep_trb->generic.field[2]));
2391 short_framestatus = td->urb->transfer_flags & URB_SHORT_NOT_OK ?
2392 -EREMOTEIO : 0;
2393
2394 /* handle completion code */
2395 switch (trb_comp_code) {
2396 case COMP_SUCCESS:
2397 /* Don't overwrite status if TD had an error, see xHCI 4.9.1 */
2398 if (td->error_mid_td)
2399 break;
2400 if (remaining) {
2401 frame->status = short_framestatus;
2402 if (xhci->quirks & XHCI_TRUST_TX_LENGTH)
2403 sum_trbs_for_length = true;
2404 break;
2405 }
2406 frame->status = 0;
2407 break;
2408 case COMP_SHORT_PACKET:
2409 frame->status = short_framestatus;
2410 sum_trbs_for_length = true;
2411 break;
2412 case COMP_BANDWIDTH_OVERRUN_ERROR:
2413 frame->status = -ECOMM;
2414 break;
2415 case COMP_BABBLE_DETECTED_ERROR:
2416 sum_trbs_for_length = true;
2417 fallthrough;
2418 case COMP_ISOCH_BUFFER_OVERRUN:
2419 frame->status = -EOVERFLOW;
2420 if (ep_trb != td->last_trb)
2421 td->error_mid_td = true;
2422 break;
2423 case COMP_INCOMPATIBLE_DEVICE_ERROR:
2424 case COMP_STALL_ERROR:
2425 frame->status = -EPROTO;
2426 break;
2427 case COMP_USB_TRANSACTION_ERROR:
2428 frame->status = -EPROTO;
2429 sum_trbs_for_length = true;
2430 if (ep_trb != td->last_trb)
2431 td->error_mid_td = true;
2432 break;
2433 case COMP_STOPPED:
2434 sum_trbs_for_length = true;
2435 break;
2436 case COMP_STOPPED_SHORT_PACKET:
2437 /* field normally containing residue now contains tranferred */
2438 frame->status = short_framestatus;
2439 requested = remaining;
2440 break;
2441 case COMP_STOPPED_LENGTH_INVALID:
2442 requested = 0;
2443 remaining = 0;
2444 break;
2445 default:
2446 sum_trbs_for_length = true;
2447 frame->status = -1;
2448 break;
2449 }
2450
2451 if (td->urb_length_set)
2452 goto finish_td;
2453
2454 if (sum_trbs_for_length)
2455 frame->actual_length = sum_trb_lengths(xhci, ep->ring, ep_trb) +
2456 ep_trb_len - remaining;
2457 else
2458 frame->actual_length = requested;
2459
2460 td->urb->actual_length += frame->actual_length;
2461
2462finish_td:
2463 /* Don't give back TD yet if we encountered an error mid TD */
2464 if (td->error_mid_td && ep_trb != td->last_trb) {
2465 xhci_dbg(xhci, "Error mid isoc TD, wait for final completion event\n");
2466 td->urb_length_set = true;
2467 return 0;
2468 }
2469
2470 return finish_td(xhci, ep, ep_ring, td, trb_comp_code);
2471}
2472
2473static int skip_isoc_td(struct xhci_hcd *xhci, struct xhci_td *td,
2474 struct xhci_virt_ep *ep, int status)
2475{
2476 struct urb_priv *urb_priv;
2477 struct usb_iso_packet_descriptor *frame;
2478 int idx;
2479
2480 urb_priv = td->urb->hcpriv;
2481 idx = urb_priv->num_tds_done;
2482 frame = &td->urb->iso_frame_desc[idx];
2483
2484 /* The transfer is partly done. */
2485 frame->status = -EXDEV;
2486
2487 /* calc actual length */
2488 frame->actual_length = 0;
2489
2490 /* Update ring dequeue pointer */
2491 ep->ring->dequeue = td->last_trb;
2492 ep->ring->deq_seg = td->last_trb_seg;
2493 inc_deq(xhci, ep->ring);
2494
2495 return xhci_td_cleanup(xhci, td, ep->ring, status);
2496}
2497
2498/*
2499 * Process bulk and interrupt tds, update urb status and actual_length.
2500 */
2501static int process_bulk_intr_td(struct xhci_hcd *xhci, struct xhci_virt_ep *ep,
2502 struct xhci_ring *ep_ring, struct xhci_td *td,
2503 union xhci_trb *ep_trb, struct xhci_transfer_event *event)
2504{
2505 struct xhci_slot_ctx *slot_ctx;
2506 u32 trb_comp_code;
2507 u32 remaining, requested, ep_trb_len;
2508
2509 slot_ctx = xhci_get_slot_ctx(xhci, ep->vdev->out_ctx);
2510 trb_comp_code = GET_COMP_CODE(le32_to_cpu(event->transfer_len));
2511 remaining = EVENT_TRB_LEN(le32_to_cpu(event->transfer_len));
2512 ep_trb_len = TRB_LEN(le32_to_cpu(ep_trb->generic.field[2]));
2513 requested = td->urb->transfer_buffer_length;
2514
2515 switch (trb_comp_code) {
2516 case COMP_SUCCESS:
2517 ep->err_count = 0;
2518 /* handle success with untransferred data as short packet */
2519 if (ep_trb != td->last_trb || remaining) {
2520 xhci_warn(xhci, "WARN Successful completion on short TX\n");
2521 xhci_dbg(xhci, "ep %#x - asked for %d bytes, %d bytes untransferred\n",
2522 td->urb->ep->desc.bEndpointAddress,
2523 requested, remaining);
2524 }
2525 td->status = 0;
2526 break;
2527 case COMP_SHORT_PACKET:
2528 xhci_dbg(xhci, "ep %#x - asked for %d bytes, %d bytes untransferred\n",
2529 td->urb->ep->desc.bEndpointAddress,
2530 requested, remaining);
2531 td->status = 0;
2532 break;
2533 case COMP_STOPPED_SHORT_PACKET:
2534 td->urb->actual_length = remaining;
2535 goto finish_td;
2536 case COMP_STOPPED_LENGTH_INVALID:
2537 /* stopped on ep trb with invalid length, exclude it */
2538 ep_trb_len = 0;
2539 remaining = 0;
2540 break;
2541 case COMP_USB_TRANSACTION_ERROR:
2542 if (xhci->quirks & XHCI_NO_SOFT_RETRY ||
2543 (ep->err_count++ > MAX_SOFT_RETRY) ||
2544 le32_to_cpu(slot_ctx->tt_info) & TT_SLOT)
2545 break;
2546
2547 td->status = 0;
2548
2549 xhci_handle_halted_endpoint(xhci, ep, td, EP_SOFT_RESET);
2550 return 0;
2551 default:
2552 /* do nothing */
2553 break;
2554 }
2555
2556 if (ep_trb == td->last_trb)
2557 td->urb->actual_length = requested - remaining;
2558 else
2559 td->urb->actual_length =
2560 sum_trb_lengths(xhci, ep_ring, ep_trb) +
2561 ep_trb_len - remaining;
2562finish_td:
2563 if (remaining > requested) {
2564 xhci_warn(xhci, "bad transfer trb length %d in event trb\n",
2565 remaining);
2566 td->urb->actual_length = 0;
2567 }
2568
2569 return finish_td(xhci, ep, ep_ring, td, trb_comp_code);
2570}
2571
2572/*
2573 * If this function returns an error condition, it means it got a Transfer
2574 * event with a corrupted Slot ID, Endpoint ID, or TRB DMA address.
2575 * At this point, the host controller is probably hosed and should be reset.
2576 */
2577static int handle_tx_event(struct xhci_hcd *xhci,
2578 struct xhci_interrupter *ir,
2579 struct xhci_transfer_event *event)
2580{
2581 struct xhci_virt_ep *ep;
2582 struct xhci_ring *ep_ring;
2583 unsigned int slot_id;
2584 int ep_index;
2585 struct xhci_td *td = NULL;
2586 dma_addr_t ep_trb_dma;
2587 struct xhci_segment *ep_seg;
2588 union xhci_trb *ep_trb;
2589 int status = -EINPROGRESS;
2590 struct xhci_ep_ctx *ep_ctx;
2591 u32 trb_comp_code;
2592 int td_num = 0;
2593 bool handling_skipped_tds = false;
2594
2595 slot_id = TRB_TO_SLOT_ID(le32_to_cpu(event->flags));
2596 ep_index = TRB_TO_EP_ID(le32_to_cpu(event->flags)) - 1;
2597 trb_comp_code = GET_COMP_CODE(le32_to_cpu(event->transfer_len));
2598 ep_trb_dma = le64_to_cpu(event->buffer);
2599
2600 ep = xhci_get_virt_ep(xhci, slot_id, ep_index);
2601 if (!ep) {
2602 xhci_err(xhci, "ERROR Invalid Transfer event\n");
2603 goto err_out;
2604 }
2605
2606 ep_ring = xhci_dma_to_transfer_ring(ep, ep_trb_dma);
2607 ep_ctx = xhci_get_ep_ctx(xhci, ep->vdev->out_ctx, ep_index);
2608
2609 if (GET_EP_CTX_STATE(ep_ctx) == EP_STATE_DISABLED) {
2610 xhci_err(xhci,
2611 "ERROR Transfer event for disabled endpoint slot %u ep %u\n",
2612 slot_id, ep_index);
2613 goto err_out;
2614 }
2615
2616 /* Some transfer events don't always point to a trb, see xhci 4.17.4 */
2617 if (!ep_ring) {
2618 switch (trb_comp_code) {
2619 case COMP_STALL_ERROR:
2620 case COMP_USB_TRANSACTION_ERROR:
2621 case COMP_INVALID_STREAM_TYPE_ERROR:
2622 case COMP_INVALID_STREAM_ID_ERROR:
2623 xhci_dbg(xhci, "Stream transaction error ep %u no id\n",
2624 ep_index);
2625 if (ep->err_count++ > MAX_SOFT_RETRY)
2626 xhci_handle_halted_endpoint(xhci, ep, NULL,
2627 EP_HARD_RESET);
2628 else
2629 xhci_handle_halted_endpoint(xhci, ep, NULL,
2630 EP_SOFT_RESET);
2631 goto cleanup;
2632 case COMP_RING_UNDERRUN:
2633 case COMP_RING_OVERRUN:
2634 case COMP_STOPPED_LENGTH_INVALID:
2635 goto cleanup;
2636 default:
2637 xhci_err(xhci, "ERROR Transfer event for unknown stream ring slot %u ep %u\n",
2638 slot_id, ep_index);
2639 goto err_out;
2640 }
2641 }
2642
2643 /* Count current td numbers if ep->skip is set */
2644 if (ep->skip)
2645 td_num += list_count_nodes(&ep_ring->td_list);
2646
2647 /* Look for common error cases */
2648 switch (trb_comp_code) {
2649 /* Skip codes that require special handling depending on
2650 * transfer type
2651 */
2652 case COMP_SUCCESS:
2653 if (EVENT_TRB_LEN(le32_to_cpu(event->transfer_len)) == 0)
2654 break;
2655 if (xhci->quirks & XHCI_TRUST_TX_LENGTH ||
2656 ep_ring->last_td_was_short)
2657 trb_comp_code = COMP_SHORT_PACKET;
2658 else
2659 xhci_warn_ratelimited(xhci,
2660 "WARN Successful completion on short TX for slot %u ep %u: needs XHCI_TRUST_TX_LENGTH quirk?\n",
2661 slot_id, ep_index);
2662 break;
2663 case COMP_SHORT_PACKET:
2664 break;
2665 /* Completion codes for endpoint stopped state */
2666 case COMP_STOPPED:
2667 xhci_dbg(xhci, "Stopped on Transfer TRB for slot %u ep %u\n",
2668 slot_id, ep_index);
2669 break;
2670 case COMP_STOPPED_LENGTH_INVALID:
2671 xhci_dbg(xhci,
2672 "Stopped on No-op or Link TRB for slot %u ep %u\n",
2673 slot_id, ep_index);
2674 break;
2675 case COMP_STOPPED_SHORT_PACKET:
2676 xhci_dbg(xhci,
2677 "Stopped with short packet transfer detected for slot %u ep %u\n",
2678 slot_id, ep_index);
2679 break;
2680 /* Completion codes for endpoint halted state */
2681 case COMP_STALL_ERROR:
2682 xhci_dbg(xhci, "Stalled endpoint for slot %u ep %u\n", slot_id,
2683 ep_index);
2684 status = -EPIPE;
2685 break;
2686 case COMP_SPLIT_TRANSACTION_ERROR:
2687 xhci_dbg(xhci, "Split transaction error for slot %u ep %u\n",
2688 slot_id, ep_index);
2689 status = -EPROTO;
2690 break;
2691 case COMP_USB_TRANSACTION_ERROR:
2692 xhci_dbg(xhci, "Transfer error for slot %u ep %u on endpoint\n",
2693 slot_id, ep_index);
2694 status = -EPROTO;
2695 break;
2696 case COMP_BABBLE_DETECTED_ERROR:
2697 xhci_dbg(xhci, "Babble error for slot %u ep %u on endpoint\n",
2698 slot_id, ep_index);
2699 status = -EOVERFLOW;
2700 break;
2701 /* Completion codes for endpoint error state */
2702 case COMP_TRB_ERROR:
2703 xhci_warn(xhci,
2704 "WARN: TRB error for slot %u ep %u on endpoint\n",
2705 slot_id, ep_index);
2706 status = -EILSEQ;
2707 break;
2708 /* completion codes not indicating endpoint state change */
2709 case COMP_DATA_BUFFER_ERROR:
2710 xhci_warn(xhci,
2711 "WARN: HC couldn't access mem fast enough for slot %u ep %u\n",
2712 slot_id, ep_index);
2713 status = -ENOSR;
2714 break;
2715 case COMP_BANDWIDTH_OVERRUN_ERROR:
2716 xhci_warn(xhci,
2717 "WARN: bandwidth overrun event for slot %u ep %u on endpoint\n",
2718 slot_id, ep_index);
2719 break;
2720 case COMP_ISOCH_BUFFER_OVERRUN:
2721 xhci_warn(xhci,
2722 "WARN: buffer overrun event for slot %u ep %u on endpoint",
2723 slot_id, ep_index);
2724 break;
2725 case COMP_RING_UNDERRUN:
2726 /*
2727 * When the Isoch ring is empty, the xHC will generate
2728 * a Ring Overrun Event for IN Isoch endpoint or Ring
2729 * Underrun Event for OUT Isoch endpoint.
2730 */
2731 xhci_dbg(xhci, "underrun event on endpoint\n");
2732 if (!list_empty(&ep_ring->td_list))
2733 xhci_dbg(xhci, "Underrun Event for slot %d ep %d "
2734 "still with TDs queued?\n",
2735 TRB_TO_SLOT_ID(le32_to_cpu(event->flags)),
2736 ep_index);
2737 goto cleanup;
2738 case COMP_RING_OVERRUN:
2739 xhci_dbg(xhci, "overrun event on endpoint\n");
2740 if (!list_empty(&ep_ring->td_list))
2741 xhci_dbg(xhci, "Overrun Event for slot %d ep %d "
2742 "still with TDs queued?\n",
2743 TRB_TO_SLOT_ID(le32_to_cpu(event->flags)),
2744 ep_index);
2745 goto cleanup;
2746 case COMP_MISSED_SERVICE_ERROR:
2747 /*
2748 * When encounter missed service error, one or more isoc tds
2749 * may be missed by xHC.
2750 * Set skip flag of the ep_ring; Complete the missed tds as
2751 * short transfer when process the ep_ring next time.
2752 */
2753 ep->skip = true;
2754 xhci_dbg(xhci,
2755 "Miss service interval error for slot %u ep %u, set skip flag\n",
2756 slot_id, ep_index);
2757 goto cleanup;
2758 case COMP_NO_PING_RESPONSE_ERROR:
2759 ep->skip = true;
2760 xhci_dbg(xhci,
2761 "No Ping response error for slot %u ep %u, Skip one Isoc TD\n",
2762 slot_id, ep_index);
2763 goto cleanup;
2764
2765 case COMP_INCOMPATIBLE_DEVICE_ERROR:
2766 /* needs disable slot command to recover */
2767 xhci_warn(xhci,
2768 "WARN: detect an incompatible device for slot %u ep %u",
2769 slot_id, ep_index);
2770 status = -EPROTO;
2771 break;
2772 default:
2773 if (xhci_is_vendor_info_code(xhci, trb_comp_code)) {
2774 status = 0;
2775 break;
2776 }
2777 xhci_warn(xhci,
2778 "ERROR Unknown event condition %u for slot %u ep %u , HC probably busted\n",
2779 trb_comp_code, slot_id, ep_index);
2780 goto cleanup;
2781 }
2782
2783 do {
2784 /* This TRB should be in the TD at the head of this ring's
2785 * TD list.
2786 */
2787 if (list_empty(&ep_ring->td_list)) {
2788 /*
2789 * Don't print wanings if it's due to a stopped endpoint
2790 * generating an extra completion event if the device
2791 * was suspended. Or, a event for the last TRB of a
2792 * short TD we already got a short event for.
2793 * The short TD is already removed from the TD list.
2794 */
2795
2796 if (!(trb_comp_code == COMP_STOPPED ||
2797 trb_comp_code == COMP_STOPPED_LENGTH_INVALID ||
2798 ep_ring->last_td_was_short)) {
2799 xhci_warn(xhci, "WARN Event TRB for slot %d ep %d with no TDs queued?\n",
2800 TRB_TO_SLOT_ID(le32_to_cpu(event->flags)),
2801 ep_index);
2802 }
2803 if (ep->skip) {
2804 ep->skip = false;
2805 xhci_dbg(xhci, "td_list is empty while skip flag set. Clear skip flag for slot %u ep %u.\n",
2806 slot_id, ep_index);
2807 }
2808 if (trb_comp_code == COMP_STALL_ERROR ||
2809 xhci_requires_manual_halt_cleanup(xhci, ep_ctx,
2810 trb_comp_code)) {
2811 xhci_handle_halted_endpoint(xhci, ep, NULL,
2812 EP_HARD_RESET);
2813 }
2814 goto cleanup;
2815 }
2816
2817 /* We've skipped all the TDs on the ep ring when ep->skip set */
2818 if (ep->skip && td_num == 0) {
2819 ep->skip = false;
2820 xhci_dbg(xhci, "All tds on the ep_ring skipped. Clear skip flag for slot %u ep %u.\n",
2821 slot_id, ep_index);
2822 goto cleanup;
2823 }
2824
2825 td = list_first_entry(&ep_ring->td_list, struct xhci_td,
2826 td_list);
2827 if (ep->skip)
2828 td_num--;
2829
2830 /* Is this a TRB in the currently executing TD? */
2831 ep_seg = trb_in_td(xhci, td->start_seg, td->first_trb,
2832 td->last_trb, ep_trb_dma, false);
2833
2834 /*
2835 * Skip the Force Stopped Event. The event_trb(event_dma) of FSE
2836 * is not in the current TD pointed by ep_ring->dequeue because
2837 * that the hardware dequeue pointer still at the previous TRB
2838 * of the current TD. The previous TRB maybe a Link TD or the
2839 * last TRB of the previous TD. The command completion handle
2840 * will take care the rest.
2841 */
2842 if (!ep_seg && (trb_comp_code == COMP_STOPPED ||
2843 trb_comp_code == COMP_STOPPED_LENGTH_INVALID)) {
2844 goto cleanup;
2845 }
2846
2847 if (!ep_seg) {
2848
2849 if (ep->skip && usb_endpoint_xfer_isoc(&td->urb->ep->desc)) {
2850 skip_isoc_td(xhci, td, ep, status);
2851 goto cleanup;
2852 }
2853
2854 /*
2855 * Some hosts give a spurious success event after a short
2856 * transfer. Ignore it.
2857 */
2858 if ((xhci->quirks & XHCI_SPURIOUS_SUCCESS) &&
2859 ep_ring->last_td_was_short) {
2860 ep_ring->last_td_was_short = false;
2861 goto cleanup;
2862 }
2863
2864 /*
2865 * xhci 4.10.2 states isoc endpoints should continue
2866 * processing the next TD if there was an error mid TD.
2867 * So host like NEC don't generate an event for the last
2868 * isoc TRB even if the IOC flag is set.
2869 * xhci 4.9.1 states that if there are errors in mult-TRB
2870 * TDs xHC should generate an error for that TRB, and if xHC
2871 * proceeds to the next TD it should genete an event for
2872 * any TRB with IOC flag on the way. Other host follow this.
2873 * So this event might be for the next TD.
2874 */
2875 if (td->error_mid_td &&
2876 !list_is_last(&td->td_list, &ep_ring->td_list)) {
2877 struct xhci_td *td_next = list_next_entry(td, td_list);
2878
2879 ep_seg = trb_in_td(xhci, td_next->start_seg, td_next->first_trb,
2880 td_next->last_trb, ep_trb_dma, false);
2881 if (ep_seg) {
2882 /* give back previous TD, start handling new */
2883 xhci_dbg(xhci, "Missing TD completion event after mid TD error\n");
2884 ep_ring->dequeue = td->last_trb;
2885 ep_ring->deq_seg = td->last_trb_seg;
2886 inc_deq(xhci, ep_ring);
2887 xhci_td_cleanup(xhci, td, ep_ring, td->status);
2888 td = td_next;
2889 }
2890 }
2891
2892 if (!ep_seg) {
2893 /* HC is busted, give up! */
2894 xhci_err(xhci,
2895 "ERROR Transfer event TRB DMA ptr not "
2896 "part of current TD ep_index %d "
2897 "comp_code %u\n", ep_index,
2898 trb_comp_code);
2899 trb_in_td(xhci, td->start_seg, td->first_trb,
2900 td->last_trb, ep_trb_dma, true);
2901 return -ESHUTDOWN;
2902 }
2903 }
2904 if (trb_comp_code == COMP_SHORT_PACKET)
2905 ep_ring->last_td_was_short = true;
2906 else
2907 ep_ring->last_td_was_short = false;
2908
2909 if (ep->skip) {
2910 xhci_dbg(xhci,
2911 "Found td. Clear skip flag for slot %u ep %u.\n",
2912 slot_id, ep_index);
2913 ep->skip = false;
2914 }
2915
2916 ep_trb = &ep_seg->trbs[(ep_trb_dma - ep_seg->dma) /
2917 sizeof(*ep_trb)];
2918
2919 trace_xhci_handle_transfer(ep_ring,
2920 (struct xhci_generic_trb *) ep_trb);
2921
2922 /*
2923 * No-op TRB could trigger interrupts in a case where
2924 * a URB was killed and a STALL_ERROR happens right
2925 * after the endpoint ring stopped. Reset the halted
2926 * endpoint. Otherwise, the endpoint remains stalled
2927 * indefinitely.
2928 */
2929
2930 if (trb_is_noop(ep_trb)) {
2931 if (trb_comp_code == COMP_STALL_ERROR ||
2932 xhci_requires_manual_halt_cleanup(xhci, ep_ctx,
2933 trb_comp_code))
2934 xhci_handle_halted_endpoint(xhci, ep, td,
2935 EP_HARD_RESET);
2936 goto cleanup;
2937 }
2938
2939 td->status = status;
2940
2941 /* update the urb's actual_length and give back to the core */
2942 if (usb_endpoint_xfer_control(&td->urb->ep->desc))
2943 process_ctrl_td(xhci, ep, ep_ring, td, ep_trb, event);
2944 else if (usb_endpoint_xfer_isoc(&td->urb->ep->desc))
2945 process_isoc_td(xhci, ep, ep_ring, td, ep_trb, event);
2946 else
2947 process_bulk_intr_td(xhci, ep, ep_ring, td, ep_trb, event);
2948cleanup:
2949 handling_skipped_tds = ep->skip &&
2950 trb_comp_code != COMP_MISSED_SERVICE_ERROR &&
2951 trb_comp_code != COMP_NO_PING_RESPONSE_ERROR;
2952
2953 /*
2954 * If ep->skip is set, it means there are missed tds on the
2955 * endpoint ring need to take care of.
2956 * Process them as short transfer until reach the td pointed by
2957 * the event.
2958 */
2959 } while (handling_skipped_tds);
2960
2961 return 0;
2962
2963err_out:
2964 xhci_err(xhci, "@%016llx %08x %08x %08x %08x\n",
2965 (unsigned long long) xhci_trb_virt_to_dma(
2966 ir->event_ring->deq_seg,
2967 ir->event_ring->dequeue),
2968 lower_32_bits(le64_to_cpu(event->buffer)),
2969 upper_32_bits(le64_to_cpu(event->buffer)),
2970 le32_to_cpu(event->transfer_len),
2971 le32_to_cpu(event->flags));
2972 return -ENODEV;
2973}
2974
2975/*
2976 * This function handles one OS-owned event on the event ring. It may drop
2977 * xhci->lock between event processing (e.g. to pass up port status changes).
2978 */
2979static int xhci_handle_event_trb(struct xhci_hcd *xhci, struct xhci_interrupter *ir,
2980 union xhci_trb *event)
2981{
2982 u32 trb_type;
2983
2984 trace_xhci_handle_event(ir->event_ring, &event->generic);
2985
2986 /*
2987 * Barrier between reading the TRB_CYCLE (valid) flag before, and any
2988 * speculative reads of the event's flags/data below.
2989 */
2990 rmb();
2991 trb_type = TRB_FIELD_TO_TYPE(le32_to_cpu(event->event_cmd.flags));
2992 /* FIXME: Handle more event types. */
2993
2994 switch (trb_type) {
2995 case TRB_COMPLETION:
2996 handle_cmd_completion(xhci, &event->event_cmd);
2997 break;
2998 case TRB_PORT_STATUS:
2999 handle_port_status(xhci, ir, event);
3000 break;
3001 case TRB_TRANSFER:
3002 handle_tx_event(xhci, ir, &event->trans_event);
3003 break;
3004 case TRB_DEV_NOTE:
3005 handle_device_notification(xhci, event);
3006 break;
3007 default:
3008 if (trb_type >= TRB_VENDOR_DEFINED_LOW)
3009 handle_vendor_event(xhci, event, trb_type);
3010 else
3011 xhci_warn(xhci, "ERROR unknown event type %d\n", trb_type);
3012 }
3013 /* Any of the above functions may drop and re-acquire the lock, so check
3014 * to make sure a watchdog timer didn't mark the host as non-responsive.
3015 */
3016 if (xhci->xhc_state & XHCI_STATE_DYING) {
3017 xhci_dbg(xhci, "xHCI host dying, returning from event handler.\n");
3018 return -ENODEV;
3019 }
3020
3021 return 0;
3022}
3023
3024/*
3025 * Update Event Ring Dequeue Pointer:
3026 * - When all events have finished
3027 * - To avoid "Event Ring Full Error" condition
3028 */
3029static void xhci_update_erst_dequeue(struct xhci_hcd *xhci,
3030 struct xhci_interrupter *ir,
3031 bool clear_ehb)
3032{
3033 u64 temp_64;
3034 dma_addr_t deq;
3035
3036 temp_64 = xhci_read_64(xhci, &ir->ir_set->erst_dequeue);
3037 deq = xhci_trb_virt_to_dma(ir->event_ring->deq_seg,
3038 ir->event_ring->dequeue);
3039 if (deq == 0)
3040 xhci_warn(xhci, "WARN something wrong with SW event ring dequeue ptr\n");
3041 /*
3042 * Per 4.9.4, Software writes to the ERDP register shall always advance
3043 * the Event Ring Dequeue Pointer value.
3044 */
3045 if ((temp_64 & ERST_PTR_MASK) == (deq & ERST_PTR_MASK) && !clear_ehb)
3046 return;
3047
3048 /* Update HC event ring dequeue pointer */
3049 temp_64 = ir->event_ring->deq_seg->num & ERST_DESI_MASK;
3050 temp_64 |= deq & ERST_PTR_MASK;
3051
3052 /* Clear the event handler busy flag (RW1C) */
3053 if (clear_ehb)
3054 temp_64 |= ERST_EHB;
3055 xhci_write_64(xhci, temp_64, &ir->ir_set->erst_dequeue);
3056}
3057
3058/* Clear the interrupt pending bit for a specific interrupter. */
3059static void xhci_clear_interrupt_pending(struct xhci_hcd *xhci,
3060 struct xhci_interrupter *ir)
3061{
3062 if (!ir->ip_autoclear) {
3063 u32 irq_pending;
3064
3065 irq_pending = readl(&ir->ir_set->irq_pending);
3066 irq_pending |= IMAN_IP;
3067 writel(irq_pending, &ir->ir_set->irq_pending);
3068 }
3069}
3070
3071/*
3072 * Handle all OS-owned events on an interrupter event ring. It may drop
3073 * and reaquire xhci->lock between event processing.
3074 */
3075static int xhci_handle_events(struct xhci_hcd *xhci, struct xhci_interrupter *ir)
3076{
3077 int event_loop = 0;
3078 int err;
3079 u64 temp;
3080
3081 xhci_clear_interrupt_pending(xhci, ir);
3082
3083 /* Event ring hasn't been allocated yet. */
3084 if (!ir->event_ring || !ir->event_ring->dequeue) {
3085 xhci_err(xhci, "ERROR interrupter event ring not ready\n");
3086 return -ENOMEM;
3087 }
3088
3089 if (xhci->xhc_state & XHCI_STATE_DYING ||
3090 xhci->xhc_state & XHCI_STATE_HALTED) {
3091 xhci_dbg(xhci, "xHCI dying, ignoring interrupt. Shouldn't IRQs be disabled?\n");
3092
3093 /* Clear the event handler busy flag (RW1C) */
3094 temp = xhci_read_64(xhci, &ir->ir_set->erst_dequeue);
3095 xhci_write_64(xhci, temp | ERST_EHB, &ir->ir_set->erst_dequeue);
3096 return -ENODEV;
3097 }
3098
3099 /* Process all OS owned event TRBs on this event ring */
3100 while (unhandled_event_trb(ir->event_ring)) {
3101 err = xhci_handle_event_trb(xhci, ir, ir->event_ring->dequeue);
3102
3103 /*
3104 * If half a segment of events have been handled in one go then
3105 * update ERDP, and force isoc trbs to interrupt more often
3106 */
3107 if (event_loop++ > TRBS_PER_SEGMENT / 2) {
3108 xhci_update_erst_dequeue(xhci, ir, false);
3109
3110 if (ir->isoc_bei_interval > AVOID_BEI_INTERVAL_MIN)
3111 ir->isoc_bei_interval = ir->isoc_bei_interval / 2;
3112
3113 event_loop = 0;
3114 }
3115
3116 /* Update SW event ring dequeue pointer */
3117 inc_deq(xhci, ir->event_ring);
3118
3119 if (err)
3120 break;
3121 }
3122
3123 xhci_update_erst_dequeue(xhci, ir, true);
3124
3125 return 0;
3126}
3127
3128/*
3129 * xHCI spec says we can get an interrupt, and if the HC has an error condition,
3130 * we might get bad data out of the event ring. Section 4.10.2.7 has a list of
3131 * indicators of an event TRB error, but we check the status *first* to be safe.
3132 */
3133irqreturn_t xhci_irq(struct usb_hcd *hcd)
3134{
3135 struct xhci_hcd *xhci = hcd_to_xhci(hcd);
3136 irqreturn_t ret = IRQ_HANDLED;
3137 u32 status;
3138
3139 spin_lock(&xhci->lock);
3140 /* Check if the xHC generated the interrupt, or the irq is shared */
3141 status = readl(&xhci->op_regs->status);
3142 if (status == ~(u32)0) {
3143 xhci_hc_died(xhci);
3144 goto out;
3145 }
3146
3147 if (!(status & STS_EINT)) {
3148 ret = IRQ_NONE;
3149 goto out;
3150 }
3151
3152 if (status & STS_HCE) {
3153 xhci_warn(xhci, "WARNING: Host Controller Error\n");
3154 goto out;
3155 }
3156
3157 if (status & STS_FATAL) {
3158 xhci_warn(xhci, "WARNING: Host System Error\n");
3159 xhci_halt(xhci);
3160 goto out;
3161 }
3162
3163 /*
3164 * Clear the op reg interrupt status first,
3165 * so we can receive interrupts from other MSI-X interrupters.
3166 * Write 1 to clear the interrupt status.
3167 */
3168 status |= STS_EINT;
3169 writel(status, &xhci->op_regs->status);
3170
3171 /* This is the handler of the primary interrupter */
3172 xhci_handle_events(xhci, xhci->interrupters[0]);
3173out:
3174 spin_unlock(&xhci->lock);
3175
3176 return ret;
3177}
3178
3179irqreturn_t xhci_msi_irq(int irq, void *hcd)
3180{
3181 return xhci_irq(hcd);
3182}
3183EXPORT_SYMBOL_GPL(xhci_msi_irq);
3184
3185/**** Endpoint Ring Operations ****/
3186
3187/*
3188 * Generic function for queueing a TRB on a ring.
3189 * The caller must have checked to make sure there's room on the ring.
3190 *
3191 * @more_trbs_coming: Will you enqueue more TRBs before calling
3192 * prepare_transfer()?
3193 */
3194static void queue_trb(struct xhci_hcd *xhci, struct xhci_ring *ring,
3195 bool more_trbs_coming,
3196 u32 field1, u32 field2, u32 field3, u32 field4)
3197{
3198 struct xhci_generic_trb *trb;
3199
3200 trb = &ring->enqueue->generic;
3201 trb->field[0] = cpu_to_le32(field1);
3202 trb->field[1] = cpu_to_le32(field2);
3203 trb->field[2] = cpu_to_le32(field3);
3204 /* make sure TRB is fully written before giving it to the controller */
3205 wmb();
3206 trb->field[3] = cpu_to_le32(field4);
3207
3208 trace_xhci_queue_trb(ring, trb);
3209
3210 inc_enq(xhci, ring, more_trbs_coming);
3211}
3212
3213/*
3214 * Does various checks on the endpoint ring, and makes it ready to queue num_trbs.
3215 * expand ring if it start to be full.
3216 */
3217static int prepare_ring(struct xhci_hcd *xhci, struct xhci_ring *ep_ring,
3218 u32 ep_state, unsigned int num_trbs, gfp_t mem_flags)
3219{
3220 unsigned int link_trb_count = 0;
3221 unsigned int new_segs = 0;
3222
3223 /* Make sure the endpoint has been added to xHC schedule */
3224 switch (ep_state) {
3225 case EP_STATE_DISABLED:
3226 /*
3227 * USB core changed config/interfaces without notifying us,
3228 * or hardware is reporting the wrong state.
3229 */
3230 xhci_warn(xhci, "WARN urb submitted to disabled ep\n");
3231 return -ENOENT;
3232 case EP_STATE_ERROR:
3233 xhci_warn(xhci, "WARN waiting for error on ep to be cleared\n");
3234 /* FIXME event handling code for error needs to clear it */
3235 /* XXX not sure if this should be -ENOENT or not */
3236 return -EINVAL;
3237 case EP_STATE_HALTED:
3238 xhci_dbg(xhci, "WARN halted endpoint, queueing URB anyway.\n");
3239 break;
3240 case EP_STATE_STOPPED:
3241 case EP_STATE_RUNNING:
3242 break;
3243 default:
3244 xhci_err(xhci, "ERROR unknown endpoint state for ep\n");
3245 /*
3246 * FIXME issue Configure Endpoint command to try to get the HC
3247 * back into a known state.
3248 */
3249 return -EINVAL;
3250 }
3251
3252 if (ep_ring != xhci->cmd_ring) {
3253 new_segs = xhci_ring_expansion_needed(xhci, ep_ring, num_trbs);
3254 } else if (xhci_num_trbs_free(xhci, ep_ring) <= num_trbs) {
3255 xhci_err(xhci, "Do not support expand command ring\n");
3256 return -ENOMEM;
3257 }
3258
3259 if (new_segs) {
3260 xhci_dbg_trace(xhci, trace_xhci_dbg_ring_expansion,
3261 "ERROR no room on ep ring, try ring expansion");
3262 if (xhci_ring_expansion(xhci, ep_ring, new_segs, mem_flags)) {
3263 xhci_err(xhci, "Ring expansion failed\n");
3264 return -ENOMEM;
3265 }
3266 }
3267
3268 while (trb_is_link(ep_ring->enqueue)) {
3269 /* If we're not dealing with 0.95 hardware or isoc rings
3270 * on AMD 0.96 host, clear the chain bit.
3271 */
3272 if (!xhci_link_trb_quirk(xhci) &&
3273 !(ep_ring->type == TYPE_ISOC &&
3274 (xhci->quirks & XHCI_AMD_0x96_HOST)))
3275 ep_ring->enqueue->link.control &=
3276 cpu_to_le32(~TRB_CHAIN);
3277 else
3278 ep_ring->enqueue->link.control |=
3279 cpu_to_le32(TRB_CHAIN);
3280
3281 wmb();
3282 ep_ring->enqueue->link.control ^= cpu_to_le32(TRB_CYCLE);
3283
3284 /* Toggle the cycle bit after the last ring segment. */
3285 if (link_trb_toggles_cycle(ep_ring->enqueue))
3286 ep_ring->cycle_state ^= 1;
3287
3288 ep_ring->enq_seg = ep_ring->enq_seg->next;
3289 ep_ring->enqueue = ep_ring->enq_seg->trbs;
3290
3291 /* prevent infinite loop if all first trbs are link trbs */
3292 if (link_trb_count++ > ep_ring->num_segs) {
3293 xhci_warn(xhci, "Ring is an endless link TRB loop\n");
3294 return -EINVAL;
3295 }
3296 }
3297
3298 if (last_trb_on_seg(ep_ring->enq_seg, ep_ring->enqueue)) {
3299 xhci_warn(xhci, "Missing link TRB at end of ring segment\n");
3300 return -EINVAL;
3301 }
3302
3303 return 0;
3304}
3305
3306static int prepare_transfer(struct xhci_hcd *xhci,
3307 struct xhci_virt_device *xdev,
3308 unsigned int ep_index,
3309 unsigned int stream_id,
3310 unsigned int num_trbs,
3311 struct urb *urb,
3312 unsigned int td_index,
3313 gfp_t mem_flags)
3314{
3315 int ret;
3316 struct urb_priv *urb_priv;
3317 struct xhci_td *td;
3318 struct xhci_ring *ep_ring;
3319 struct xhci_ep_ctx *ep_ctx = xhci_get_ep_ctx(xhci, xdev->out_ctx, ep_index);
3320
3321 ep_ring = xhci_triad_to_transfer_ring(xhci, xdev->slot_id, ep_index,
3322 stream_id);
3323 if (!ep_ring) {
3324 xhci_dbg(xhci, "Can't prepare ring for bad stream ID %u\n",
3325 stream_id);
3326 return -EINVAL;
3327 }
3328
3329 ret = prepare_ring(xhci, ep_ring, GET_EP_CTX_STATE(ep_ctx),
3330 num_trbs, mem_flags);
3331 if (ret)
3332 return ret;
3333
3334 urb_priv = urb->hcpriv;
3335 td = &urb_priv->td[td_index];
3336
3337 INIT_LIST_HEAD(&td->td_list);
3338 INIT_LIST_HEAD(&td->cancelled_td_list);
3339
3340 if (td_index == 0) {
3341 ret = usb_hcd_link_urb_to_ep(bus_to_hcd(urb->dev->bus), urb);
3342 if (unlikely(ret))
3343 return ret;
3344 }
3345
3346 td->urb = urb;
3347 /* Add this TD to the tail of the endpoint ring's TD list */
3348 list_add_tail(&td->td_list, &ep_ring->td_list);
3349 td->start_seg = ep_ring->enq_seg;
3350 td->first_trb = ep_ring->enqueue;
3351
3352 return 0;
3353}
3354
3355unsigned int count_trbs(u64 addr, u64 len)
3356{
3357 unsigned int num_trbs;
3358
3359 num_trbs = DIV_ROUND_UP(len + (addr & (TRB_MAX_BUFF_SIZE - 1)),
3360 TRB_MAX_BUFF_SIZE);
3361 if (num_trbs == 0)
3362 num_trbs++;
3363
3364 return num_trbs;
3365}
3366
3367static inline unsigned int count_trbs_needed(struct urb *urb)
3368{
3369 return count_trbs(urb->transfer_dma, urb->transfer_buffer_length);
3370}
3371
3372static unsigned int count_sg_trbs_needed(struct urb *urb)
3373{
3374 struct scatterlist *sg;
3375 unsigned int i, len, full_len, num_trbs = 0;
3376
3377 full_len = urb->transfer_buffer_length;
3378
3379 for_each_sg(urb->sg, sg, urb->num_mapped_sgs, i) {
3380 len = sg_dma_len(sg);
3381 num_trbs += count_trbs(sg_dma_address(sg), len);
3382 len = min_t(unsigned int, len, full_len);
3383 full_len -= len;
3384 if (full_len == 0)
3385 break;
3386 }
3387
3388 return num_trbs;
3389}
3390
3391static unsigned int count_isoc_trbs_needed(struct urb *urb, int i)
3392{
3393 u64 addr, len;
3394
3395 addr = (u64) (urb->transfer_dma + urb->iso_frame_desc[i].offset);
3396 len = urb->iso_frame_desc[i].length;
3397
3398 return count_trbs(addr, len);
3399}
3400
3401static void check_trb_math(struct urb *urb, int running_total)
3402{
3403 if (unlikely(running_total != urb->transfer_buffer_length))
3404 dev_err(&urb->dev->dev, "%s - ep %#x - Miscalculated tx length, "
3405 "queued %#x (%d), asked for %#x (%d)\n",
3406 __func__,
3407 urb->ep->desc.bEndpointAddress,
3408 running_total, running_total,
3409 urb->transfer_buffer_length,
3410 urb->transfer_buffer_length);
3411}
3412
3413static void giveback_first_trb(struct xhci_hcd *xhci, int slot_id,
3414 unsigned int ep_index, unsigned int stream_id, int start_cycle,
3415 struct xhci_generic_trb *start_trb)
3416{
3417 /*
3418 * Pass all the TRBs to the hardware at once and make sure this write
3419 * isn't reordered.
3420 */
3421 wmb();
3422 if (start_cycle)
3423 start_trb->field[3] |= cpu_to_le32(start_cycle);
3424 else
3425 start_trb->field[3] &= cpu_to_le32(~TRB_CYCLE);
3426 xhci_ring_ep_doorbell(xhci, slot_id, ep_index, stream_id);
3427}
3428
3429static void check_interval(struct xhci_hcd *xhci, struct urb *urb,
3430 struct xhci_ep_ctx *ep_ctx)
3431{
3432 int xhci_interval;
3433 int ep_interval;
3434
3435 xhci_interval = EP_INTERVAL_TO_UFRAMES(le32_to_cpu(ep_ctx->ep_info));
3436 ep_interval = urb->interval;
3437
3438 /* Convert to microframes */
3439 if (urb->dev->speed == USB_SPEED_LOW ||
3440 urb->dev->speed == USB_SPEED_FULL)
3441 ep_interval *= 8;
3442
3443 /* FIXME change this to a warning and a suggestion to use the new API
3444 * to set the polling interval (once the API is added).
3445 */
3446 if (xhci_interval != ep_interval) {
3447 dev_dbg_ratelimited(&urb->dev->dev,
3448 "Driver uses different interval (%d microframe%s) than xHCI (%d microframe%s)\n",
3449 ep_interval, ep_interval == 1 ? "" : "s",
3450 xhci_interval, xhci_interval == 1 ? "" : "s");
3451 urb->interval = xhci_interval;
3452 /* Convert back to frames for LS/FS devices */
3453 if (urb->dev->speed == USB_SPEED_LOW ||
3454 urb->dev->speed == USB_SPEED_FULL)
3455 urb->interval /= 8;
3456 }
3457}
3458
3459/*
3460 * xHCI uses normal TRBs for both bulk and interrupt. When the interrupt
3461 * endpoint is to be serviced, the xHC will consume (at most) one TD. A TD
3462 * (comprised of sg list entries) can take several service intervals to
3463 * transmit.
3464 */
3465int xhci_queue_intr_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
3466 struct urb *urb, int slot_id, unsigned int ep_index)
3467{
3468 struct xhci_ep_ctx *ep_ctx;
3469
3470 ep_ctx = xhci_get_ep_ctx(xhci, xhci->devs[slot_id]->out_ctx, ep_index);
3471 check_interval(xhci, urb, ep_ctx);
3472
3473 return xhci_queue_bulk_tx(xhci, mem_flags, urb, slot_id, ep_index);
3474}
3475
3476/*
3477 * For xHCI 1.0 host controllers, TD size is the number of max packet sized
3478 * packets remaining in the TD (*not* including this TRB).
3479 *
3480 * Total TD packet count = total_packet_count =
3481 * DIV_ROUND_UP(TD size in bytes / wMaxPacketSize)
3482 *
3483 * Packets transferred up to and including this TRB = packets_transferred =
3484 * rounddown(total bytes transferred including this TRB / wMaxPacketSize)
3485 *
3486 * TD size = total_packet_count - packets_transferred
3487 *
3488 * For xHCI 0.96 and older, TD size field should be the remaining bytes
3489 * including this TRB, right shifted by 10
3490 *
3491 * For all hosts it must fit in bits 21:17, so it can't be bigger than 31.
3492 * This is taken care of in the TRB_TD_SIZE() macro
3493 *
3494 * The last TRB in a TD must have the TD size set to zero.
3495 */
3496static u32 xhci_td_remainder(struct xhci_hcd *xhci, int transferred,
3497 int trb_buff_len, unsigned int td_total_len,
3498 struct urb *urb, bool more_trbs_coming)
3499{
3500 u32 maxp, total_packet_count;
3501
3502 /* MTK xHCI 0.96 contains some features from 1.0 */
3503 if (xhci->hci_version < 0x100 && !(xhci->quirks & XHCI_MTK_HOST))
3504 return ((td_total_len - transferred) >> 10);
3505
3506 /* One TRB with a zero-length data packet. */
3507 if (!more_trbs_coming || (transferred == 0 && trb_buff_len == 0) ||
3508 trb_buff_len == td_total_len)
3509 return 0;
3510
3511 /* for MTK xHCI 0.96, TD size include this TRB, but not in 1.x */
3512 if ((xhci->quirks & XHCI_MTK_HOST) && (xhci->hci_version < 0x100))
3513 trb_buff_len = 0;
3514
3515 maxp = usb_endpoint_maxp(&urb->ep->desc);
3516 total_packet_count = DIV_ROUND_UP(td_total_len, maxp);
3517
3518 /* Queueing functions don't count the current TRB into transferred */
3519 return (total_packet_count - ((transferred + trb_buff_len) / maxp));
3520}
3521
3522
3523static int xhci_align_td(struct xhci_hcd *xhci, struct urb *urb, u32 enqd_len,
3524 u32 *trb_buff_len, struct xhci_segment *seg)
3525{
3526 struct device *dev = xhci_to_hcd(xhci)->self.sysdev;
3527 unsigned int unalign;
3528 unsigned int max_pkt;
3529 u32 new_buff_len;
3530 size_t len;
3531
3532 max_pkt = usb_endpoint_maxp(&urb->ep->desc);
3533 unalign = (enqd_len + *trb_buff_len) % max_pkt;
3534
3535 /* we got lucky, last normal TRB data on segment is packet aligned */
3536 if (unalign == 0)
3537 return 0;
3538
3539 xhci_dbg(xhci, "Unaligned %d bytes, buff len %d\n",
3540 unalign, *trb_buff_len);
3541
3542 /* is the last nornal TRB alignable by splitting it */
3543 if (*trb_buff_len > unalign) {
3544 *trb_buff_len -= unalign;
3545 xhci_dbg(xhci, "split align, new buff len %d\n", *trb_buff_len);
3546 return 0;
3547 }
3548
3549 /*
3550 * We want enqd_len + trb_buff_len to sum up to a number aligned to
3551 * number which is divisible by the endpoint's wMaxPacketSize. IOW:
3552 * (size of currently enqueued TRBs + remainder) % wMaxPacketSize == 0.
3553 */
3554 new_buff_len = max_pkt - (enqd_len % max_pkt);
3555
3556 if (new_buff_len > (urb->transfer_buffer_length - enqd_len))
3557 new_buff_len = (urb->transfer_buffer_length - enqd_len);
3558
3559 /* create a max max_pkt sized bounce buffer pointed to by last trb */
3560 if (usb_urb_dir_out(urb)) {
3561 if (urb->num_sgs) {
3562 len = sg_pcopy_to_buffer(urb->sg, urb->num_sgs,
3563 seg->bounce_buf, new_buff_len, enqd_len);
3564 if (len != new_buff_len)
3565 xhci_warn(xhci, "WARN Wrong bounce buffer write length: %zu != %d\n",
3566 len, new_buff_len);
3567 } else {
3568 memcpy(seg->bounce_buf, urb->transfer_buffer + enqd_len, new_buff_len);
3569 }
3570
3571 seg->bounce_dma = dma_map_single(dev, seg->bounce_buf,
3572 max_pkt, DMA_TO_DEVICE);
3573 } else {
3574 seg->bounce_dma = dma_map_single(dev, seg->bounce_buf,
3575 max_pkt, DMA_FROM_DEVICE);
3576 }
3577
3578 if (dma_mapping_error(dev, seg->bounce_dma)) {
3579 /* try without aligning. Some host controllers survive */
3580 xhci_warn(xhci, "Failed mapping bounce buffer, not aligning\n");
3581 return 0;
3582 }
3583 *trb_buff_len = new_buff_len;
3584 seg->bounce_len = new_buff_len;
3585 seg->bounce_offs = enqd_len;
3586
3587 xhci_dbg(xhci, "Bounce align, new buff len %d\n", *trb_buff_len);
3588
3589 return 1;
3590}
3591
3592/* This is very similar to what ehci-q.c qtd_fill() does */
3593int xhci_queue_bulk_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
3594 struct urb *urb, int slot_id, unsigned int ep_index)
3595{
3596 struct xhci_ring *ring;
3597 struct urb_priv *urb_priv;
3598 struct xhci_td *td;
3599 struct xhci_generic_trb *start_trb;
3600 struct scatterlist *sg = NULL;
3601 bool more_trbs_coming = true;
3602 bool need_zero_pkt = false;
3603 bool first_trb = true;
3604 unsigned int num_trbs;
3605 unsigned int start_cycle, num_sgs = 0;
3606 unsigned int enqd_len, block_len, trb_buff_len, full_len;
3607 int sent_len, ret;
3608 u32 field, length_field, remainder;
3609 u64 addr, send_addr;
3610
3611 ring = xhci_urb_to_transfer_ring(xhci, urb);
3612 if (!ring)
3613 return -EINVAL;
3614
3615 full_len = urb->transfer_buffer_length;
3616 /* If we have scatter/gather list, we use it. */
3617 if (urb->num_sgs && !(urb->transfer_flags & URB_DMA_MAP_SINGLE)) {
3618 num_sgs = urb->num_mapped_sgs;
3619 sg = urb->sg;
3620 addr = (u64) sg_dma_address(sg);
3621 block_len = sg_dma_len(sg);
3622 num_trbs = count_sg_trbs_needed(urb);
3623 } else {
3624 num_trbs = count_trbs_needed(urb);
3625 addr = (u64) urb->transfer_dma;
3626 block_len = full_len;
3627 }
3628 ret = prepare_transfer(xhci, xhci->devs[slot_id],
3629 ep_index, urb->stream_id,
3630 num_trbs, urb, 0, mem_flags);
3631 if (unlikely(ret < 0))
3632 return ret;
3633
3634 urb_priv = urb->hcpriv;
3635
3636 /* Deal with URB_ZERO_PACKET - need one more td/trb */
3637 if (urb->transfer_flags & URB_ZERO_PACKET && urb_priv->num_tds > 1)
3638 need_zero_pkt = true;
3639
3640 td = &urb_priv->td[0];
3641
3642 /*
3643 * Don't give the first TRB to the hardware (by toggling the cycle bit)
3644 * until we've finished creating all the other TRBs. The ring's cycle
3645 * state may change as we enqueue the other TRBs, so save it too.
3646 */
3647 start_trb = &ring->enqueue->generic;
3648 start_cycle = ring->cycle_state;
3649 send_addr = addr;
3650
3651 /* Queue the TRBs, even if they are zero-length */
3652 for (enqd_len = 0; first_trb || enqd_len < full_len;
3653 enqd_len += trb_buff_len) {
3654 field = TRB_TYPE(TRB_NORMAL);
3655
3656 /* TRB buffer should not cross 64KB boundaries */
3657 trb_buff_len = TRB_BUFF_LEN_UP_TO_BOUNDARY(addr);
3658 trb_buff_len = min_t(unsigned int, trb_buff_len, block_len);
3659
3660 if (enqd_len + trb_buff_len > full_len)
3661 trb_buff_len = full_len - enqd_len;
3662
3663 /* Don't change the cycle bit of the first TRB until later */
3664 if (first_trb) {
3665 first_trb = false;
3666 if (start_cycle == 0)
3667 field |= TRB_CYCLE;
3668 } else
3669 field |= ring->cycle_state;
3670
3671 /* Chain all the TRBs together; clear the chain bit in the last
3672 * TRB to indicate it's the last TRB in the chain.
3673 */
3674 if (enqd_len + trb_buff_len < full_len) {
3675 field |= TRB_CHAIN;
3676 if (trb_is_link(ring->enqueue + 1)) {
3677 if (xhci_align_td(xhci, urb, enqd_len,
3678 &trb_buff_len,
3679 ring->enq_seg)) {
3680 send_addr = ring->enq_seg->bounce_dma;
3681 /* assuming TD won't span 2 segs */
3682 td->bounce_seg = ring->enq_seg;
3683 }
3684 }
3685 }
3686 if (enqd_len + trb_buff_len >= full_len) {
3687 field &= ~TRB_CHAIN;
3688 field |= TRB_IOC;
3689 more_trbs_coming = false;
3690 td->last_trb = ring->enqueue;
3691 td->last_trb_seg = ring->enq_seg;
3692 if (xhci_urb_suitable_for_idt(urb)) {
3693 memcpy(&send_addr, urb->transfer_buffer,
3694 trb_buff_len);
3695 le64_to_cpus(&send_addr);
3696 field |= TRB_IDT;
3697 }
3698 }
3699
3700 /* Only set interrupt on short packet for IN endpoints */
3701 if (usb_urb_dir_in(urb))
3702 field |= TRB_ISP;
3703
3704 /* Set the TRB length, TD size, and interrupter fields. */
3705 remainder = xhci_td_remainder(xhci, enqd_len, trb_buff_len,
3706 full_len, urb, more_trbs_coming);
3707
3708 length_field = TRB_LEN(trb_buff_len) |
3709 TRB_TD_SIZE(remainder) |
3710 TRB_INTR_TARGET(0);
3711
3712 queue_trb(xhci, ring, more_trbs_coming | need_zero_pkt,
3713 lower_32_bits(send_addr),
3714 upper_32_bits(send_addr),
3715 length_field,
3716 field);
3717 td->num_trbs++;
3718 addr += trb_buff_len;
3719 sent_len = trb_buff_len;
3720
3721 while (sg && sent_len >= block_len) {
3722 /* New sg entry */
3723 --num_sgs;
3724 sent_len -= block_len;
3725 sg = sg_next(sg);
3726 if (num_sgs != 0 && sg) {
3727 block_len = sg_dma_len(sg);
3728 addr = (u64) sg_dma_address(sg);
3729 addr += sent_len;
3730 }
3731 }
3732 block_len -= sent_len;
3733 send_addr = addr;
3734 }
3735
3736 if (need_zero_pkt) {
3737 ret = prepare_transfer(xhci, xhci->devs[slot_id],
3738 ep_index, urb->stream_id,
3739 1, urb, 1, mem_flags);
3740 urb_priv->td[1].last_trb = ring->enqueue;
3741 urb_priv->td[1].last_trb_seg = ring->enq_seg;
3742 field = TRB_TYPE(TRB_NORMAL) | ring->cycle_state | TRB_IOC;
3743 queue_trb(xhci, ring, 0, 0, 0, TRB_INTR_TARGET(0), field);
3744 urb_priv->td[1].num_trbs++;
3745 }
3746
3747 check_trb_math(urb, enqd_len);
3748 giveback_first_trb(xhci, slot_id, ep_index, urb->stream_id,
3749 start_cycle, start_trb);
3750 return 0;
3751}
3752
3753/* Caller must have locked xhci->lock */
3754int xhci_queue_ctrl_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
3755 struct urb *urb, int slot_id, unsigned int ep_index)
3756{
3757 struct xhci_ring *ep_ring;
3758 int num_trbs;
3759 int ret;
3760 struct usb_ctrlrequest *setup;
3761 struct xhci_generic_trb *start_trb;
3762 int start_cycle;
3763 u32 field;
3764 struct urb_priv *urb_priv;
3765 struct xhci_td *td;
3766
3767 ep_ring = xhci_urb_to_transfer_ring(xhci, urb);
3768 if (!ep_ring)
3769 return -EINVAL;
3770
3771 /*
3772 * Need to copy setup packet into setup TRB, so we can't use the setup
3773 * DMA address.
3774 */
3775 if (!urb->setup_packet)
3776 return -EINVAL;
3777
3778 /* 1 TRB for setup, 1 for status */
3779 num_trbs = 2;
3780 /*
3781 * Don't need to check if we need additional event data and normal TRBs,
3782 * since data in control transfers will never get bigger than 16MB
3783 * XXX: can we get a buffer that crosses 64KB boundaries?
3784 */
3785 if (urb->transfer_buffer_length > 0)
3786 num_trbs++;
3787 ret = prepare_transfer(xhci, xhci->devs[slot_id],
3788 ep_index, urb->stream_id,
3789 num_trbs, urb, 0, mem_flags);
3790 if (ret < 0)
3791 return ret;
3792
3793 urb_priv = urb->hcpriv;
3794 td = &urb_priv->td[0];
3795 td->num_trbs = num_trbs;
3796
3797 /*
3798 * Don't give the first TRB to the hardware (by toggling the cycle bit)
3799 * until we've finished creating all the other TRBs. The ring's cycle
3800 * state may change as we enqueue the other TRBs, so save it too.
3801 */
3802 start_trb = &ep_ring->enqueue->generic;
3803 start_cycle = ep_ring->cycle_state;
3804
3805 /* Queue setup TRB - see section 6.4.1.2.1 */
3806 /* FIXME better way to translate setup_packet into two u32 fields? */
3807 setup = (struct usb_ctrlrequest *) urb->setup_packet;
3808 field = 0;
3809 field |= TRB_IDT | TRB_TYPE(TRB_SETUP);
3810 if (start_cycle == 0)
3811 field |= 0x1;
3812
3813 /* xHCI 1.0/1.1 6.4.1.2.1: Transfer Type field */
3814 if ((xhci->hci_version >= 0x100) || (xhci->quirks & XHCI_MTK_HOST)) {
3815 if (urb->transfer_buffer_length > 0) {
3816 if (setup->bRequestType & USB_DIR_IN)
3817 field |= TRB_TX_TYPE(TRB_DATA_IN);
3818 else
3819 field |= TRB_TX_TYPE(TRB_DATA_OUT);
3820 }
3821 }
3822
3823 queue_trb(xhci, ep_ring, true,
3824 setup->bRequestType | setup->bRequest << 8 | le16_to_cpu(setup->wValue) << 16,
3825 le16_to_cpu(setup->wIndex) | le16_to_cpu(setup->wLength) << 16,
3826 TRB_LEN(8) | TRB_INTR_TARGET(0),
3827 /* Immediate data in pointer */
3828 field);
3829
3830 /* If there's data, queue data TRBs */
3831 /* Only set interrupt on short packet for IN endpoints */
3832 if (usb_urb_dir_in(urb))
3833 field = TRB_ISP | TRB_TYPE(TRB_DATA);
3834 else
3835 field = TRB_TYPE(TRB_DATA);
3836
3837 if (urb->transfer_buffer_length > 0) {
3838 u32 length_field, remainder;
3839 u64 addr;
3840
3841 if (xhci_urb_suitable_for_idt(urb)) {
3842 memcpy(&addr, urb->transfer_buffer,
3843 urb->transfer_buffer_length);
3844 le64_to_cpus(&addr);
3845 field |= TRB_IDT;
3846 } else {
3847 addr = (u64) urb->transfer_dma;
3848 }
3849
3850 remainder = xhci_td_remainder(xhci, 0,
3851 urb->transfer_buffer_length,
3852 urb->transfer_buffer_length,
3853 urb, 1);
3854 length_field = TRB_LEN(urb->transfer_buffer_length) |
3855 TRB_TD_SIZE(remainder) |
3856 TRB_INTR_TARGET(0);
3857 if (setup->bRequestType & USB_DIR_IN)
3858 field |= TRB_DIR_IN;
3859 queue_trb(xhci, ep_ring, true,
3860 lower_32_bits(addr),
3861 upper_32_bits(addr),
3862 length_field,
3863 field | ep_ring->cycle_state);
3864 }
3865
3866 /* Save the DMA address of the last TRB in the TD */
3867 td->last_trb = ep_ring->enqueue;
3868 td->last_trb_seg = ep_ring->enq_seg;
3869
3870 /* Queue status TRB - see Table 7 and sections 4.11.2.2 and 6.4.1.2.3 */
3871 /* If the device sent data, the status stage is an OUT transfer */
3872 if (urb->transfer_buffer_length > 0 && setup->bRequestType & USB_DIR_IN)
3873 field = 0;
3874 else
3875 field = TRB_DIR_IN;
3876 queue_trb(xhci, ep_ring, false,
3877 0,
3878 0,
3879 TRB_INTR_TARGET(0),
3880 /* Event on completion */
3881 field | TRB_IOC | TRB_TYPE(TRB_STATUS) | ep_ring->cycle_state);
3882
3883 giveback_first_trb(xhci, slot_id, ep_index, 0,
3884 start_cycle, start_trb);
3885 return 0;
3886}
3887
3888/*
3889 * The transfer burst count field of the isochronous TRB defines the number of
3890 * bursts that are required to move all packets in this TD. Only SuperSpeed
3891 * devices can burst up to bMaxBurst number of packets per service interval.
3892 * This field is zero based, meaning a value of zero in the field means one
3893 * burst. Basically, for everything but SuperSpeed devices, this field will be
3894 * zero. Only xHCI 1.0 host controllers support this field.
3895 */
3896static unsigned int xhci_get_burst_count(struct xhci_hcd *xhci,
3897 struct urb *urb, unsigned int total_packet_count)
3898{
3899 unsigned int max_burst;
3900
3901 if (xhci->hci_version < 0x100 || urb->dev->speed < USB_SPEED_SUPER)
3902 return 0;
3903
3904 max_burst = urb->ep->ss_ep_comp.bMaxBurst;
3905 return DIV_ROUND_UP(total_packet_count, max_burst + 1) - 1;
3906}
3907
3908/*
3909 * Returns the number of packets in the last "burst" of packets. This field is
3910 * valid for all speeds of devices. USB 2.0 devices can only do one "burst", so
3911 * the last burst packet count is equal to the total number of packets in the
3912 * TD. SuperSpeed endpoints can have up to 3 bursts. All but the last burst
3913 * must contain (bMaxBurst + 1) number of packets, but the last burst can
3914 * contain 1 to (bMaxBurst + 1) packets.
3915 */
3916static unsigned int xhci_get_last_burst_packet_count(struct xhci_hcd *xhci,
3917 struct urb *urb, unsigned int total_packet_count)
3918{
3919 unsigned int max_burst;
3920 unsigned int residue;
3921
3922 if (xhci->hci_version < 0x100)
3923 return 0;
3924
3925 if (urb->dev->speed >= USB_SPEED_SUPER) {
3926 /* bMaxBurst is zero based: 0 means 1 packet per burst */
3927 max_burst = urb->ep->ss_ep_comp.bMaxBurst;
3928 residue = total_packet_count % (max_burst + 1);
3929 /* If residue is zero, the last burst contains (max_burst + 1)
3930 * number of packets, but the TLBPC field is zero-based.
3931 */
3932 if (residue == 0)
3933 return max_burst;
3934 return residue - 1;
3935 }
3936 if (total_packet_count == 0)
3937 return 0;
3938 return total_packet_count - 1;
3939}
3940
3941/*
3942 * Calculates Frame ID field of the isochronous TRB identifies the
3943 * target frame that the Interval associated with this Isochronous
3944 * Transfer Descriptor will start on. Refer to 4.11.2.5 in 1.1 spec.
3945 *
3946 * Returns actual frame id on success, negative value on error.
3947 */
3948static int xhci_get_isoc_frame_id(struct xhci_hcd *xhci,
3949 struct urb *urb, int index)
3950{
3951 int start_frame, ist, ret = 0;
3952 int start_frame_id, end_frame_id, current_frame_id;
3953
3954 if (urb->dev->speed == USB_SPEED_LOW ||
3955 urb->dev->speed == USB_SPEED_FULL)
3956 start_frame = urb->start_frame + index * urb->interval;
3957 else
3958 start_frame = (urb->start_frame + index * urb->interval) >> 3;
3959
3960 /* Isochronous Scheduling Threshold (IST, bits 0~3 in HCSPARAMS2):
3961 *
3962 * If bit [3] of IST is cleared to '0', software can add a TRB no
3963 * later than IST[2:0] Microframes before that TRB is scheduled to
3964 * be executed.
3965 * If bit [3] of IST is set to '1', software can add a TRB no later
3966 * than IST[2:0] Frames before that TRB is scheduled to be executed.
3967 */
3968 ist = HCS_IST(xhci->hcs_params2) & 0x7;
3969 if (HCS_IST(xhci->hcs_params2) & (1 << 3))
3970 ist <<= 3;
3971
3972 /* Software shall not schedule an Isoch TD with a Frame ID value that
3973 * is less than the Start Frame ID or greater than the End Frame ID,
3974 * where:
3975 *
3976 * End Frame ID = (Current MFINDEX register value + 895 ms.) MOD 2048
3977 * Start Frame ID = (Current MFINDEX register value + IST + 1) MOD 2048
3978 *
3979 * Both the End Frame ID and Start Frame ID values are calculated
3980 * in microframes. When software determines the valid Frame ID value;
3981 * The End Frame ID value should be rounded down to the nearest Frame
3982 * boundary, and the Start Frame ID value should be rounded up to the
3983 * nearest Frame boundary.
3984 */
3985 current_frame_id = readl(&xhci->run_regs->microframe_index);
3986 start_frame_id = roundup(current_frame_id + ist + 1, 8);
3987 end_frame_id = rounddown(current_frame_id + 895 * 8, 8);
3988
3989 start_frame &= 0x7ff;
3990 start_frame_id = (start_frame_id >> 3) & 0x7ff;
3991 end_frame_id = (end_frame_id >> 3) & 0x7ff;
3992
3993 xhci_dbg(xhci, "%s: index %d, reg 0x%x start_frame_id 0x%x, end_frame_id 0x%x, start_frame 0x%x\n",
3994 __func__, index, readl(&xhci->run_regs->microframe_index),
3995 start_frame_id, end_frame_id, start_frame);
3996
3997 if (start_frame_id < end_frame_id) {
3998 if (start_frame > end_frame_id ||
3999 start_frame < start_frame_id)
4000 ret = -EINVAL;
4001 } else if (start_frame_id > end_frame_id) {
4002 if ((start_frame > end_frame_id &&
4003 start_frame < start_frame_id))
4004 ret = -EINVAL;
4005 } else {
4006 ret = -EINVAL;
4007 }
4008
4009 if (index == 0) {
4010 if (ret == -EINVAL || start_frame == start_frame_id) {
4011 start_frame = start_frame_id + 1;
4012 if (urb->dev->speed == USB_SPEED_LOW ||
4013 urb->dev->speed == USB_SPEED_FULL)
4014 urb->start_frame = start_frame;
4015 else
4016 urb->start_frame = start_frame << 3;
4017 ret = 0;
4018 }
4019 }
4020
4021 if (ret) {
4022 xhci_warn(xhci, "Frame ID %d (reg %d, index %d) beyond range (%d, %d)\n",
4023 start_frame, current_frame_id, index,
4024 start_frame_id, end_frame_id);
4025 xhci_warn(xhci, "Ignore frame ID field, use SIA bit instead\n");
4026 return ret;
4027 }
4028
4029 return start_frame;
4030}
4031
4032/* Check if we should generate event interrupt for a TD in an isoc URB */
4033static bool trb_block_event_intr(struct xhci_hcd *xhci, int num_tds, int i,
4034 struct xhci_interrupter *ir)
4035{
4036 if (xhci->hci_version < 0x100)
4037 return false;
4038 /* always generate an event interrupt for the last TD */
4039 if (i == num_tds - 1)
4040 return false;
4041 /*
4042 * If AVOID_BEI is set the host handles full event rings poorly,
4043 * generate an event at least every 8th TD to clear the event ring
4044 */
4045 if (i && ir->isoc_bei_interval && xhci->quirks & XHCI_AVOID_BEI)
4046 return !!(i % ir->isoc_bei_interval);
4047
4048 return true;
4049}
4050
4051/* This is for isoc transfer */
4052static int xhci_queue_isoc_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
4053 struct urb *urb, int slot_id, unsigned int ep_index)
4054{
4055 struct xhci_interrupter *ir;
4056 struct xhci_ring *ep_ring;
4057 struct urb_priv *urb_priv;
4058 struct xhci_td *td;
4059 int num_tds, trbs_per_td;
4060 struct xhci_generic_trb *start_trb;
4061 bool first_trb;
4062 int start_cycle;
4063 u32 field, length_field;
4064 int running_total, trb_buff_len, td_len, td_remain_len, ret;
4065 u64 start_addr, addr;
4066 int i, j;
4067 bool more_trbs_coming;
4068 struct xhci_virt_ep *xep;
4069 int frame_id;
4070
4071 xep = &xhci->devs[slot_id]->eps[ep_index];
4072 ep_ring = xhci->devs[slot_id]->eps[ep_index].ring;
4073 ir = xhci->interrupters[0];
4074
4075 num_tds = urb->number_of_packets;
4076 if (num_tds < 1) {
4077 xhci_dbg(xhci, "Isoc URB with zero packets?\n");
4078 return -EINVAL;
4079 }
4080 start_addr = (u64) urb->transfer_dma;
4081 start_trb = &ep_ring->enqueue->generic;
4082 start_cycle = ep_ring->cycle_state;
4083
4084 urb_priv = urb->hcpriv;
4085 /* Queue the TRBs for each TD, even if they are zero-length */
4086 for (i = 0; i < num_tds; i++) {
4087 unsigned int total_pkt_count, max_pkt;
4088 unsigned int burst_count, last_burst_pkt_count;
4089 u32 sia_frame_id;
4090
4091 first_trb = true;
4092 running_total = 0;
4093 addr = start_addr + urb->iso_frame_desc[i].offset;
4094 td_len = urb->iso_frame_desc[i].length;
4095 td_remain_len = td_len;
4096 max_pkt = usb_endpoint_maxp(&urb->ep->desc);
4097 total_pkt_count = DIV_ROUND_UP(td_len, max_pkt);
4098
4099 /* A zero-length transfer still involves at least one packet. */
4100 if (total_pkt_count == 0)
4101 total_pkt_count++;
4102 burst_count = xhci_get_burst_count(xhci, urb, total_pkt_count);
4103 last_burst_pkt_count = xhci_get_last_burst_packet_count(xhci,
4104 urb, total_pkt_count);
4105
4106 trbs_per_td = count_isoc_trbs_needed(urb, i);
4107
4108 ret = prepare_transfer(xhci, xhci->devs[slot_id], ep_index,
4109 urb->stream_id, trbs_per_td, urb, i, mem_flags);
4110 if (ret < 0) {
4111 if (i == 0)
4112 return ret;
4113 goto cleanup;
4114 }
4115 td = &urb_priv->td[i];
4116 td->num_trbs = trbs_per_td;
4117 /* use SIA as default, if frame id is used overwrite it */
4118 sia_frame_id = TRB_SIA;
4119 if (!(urb->transfer_flags & URB_ISO_ASAP) &&
4120 HCC_CFC(xhci->hcc_params)) {
4121 frame_id = xhci_get_isoc_frame_id(xhci, urb, i);
4122 if (frame_id >= 0)
4123 sia_frame_id = TRB_FRAME_ID(frame_id);
4124 }
4125 /*
4126 * Set isoc specific data for the first TRB in a TD.
4127 * Prevent HW from getting the TRBs by keeping the cycle state
4128 * inverted in the first TDs isoc TRB.
4129 */
4130 field = TRB_TYPE(TRB_ISOC) |
4131 TRB_TLBPC(last_burst_pkt_count) |
4132 sia_frame_id |
4133 (i ? ep_ring->cycle_state : !start_cycle);
4134
4135 /* xhci 1.1 with ETE uses TD_Size field for TBC, old is Rsvdz */
4136 if (!xep->use_extended_tbc)
4137 field |= TRB_TBC(burst_count);
4138
4139 /* fill the rest of the TRB fields, and remaining normal TRBs */
4140 for (j = 0; j < trbs_per_td; j++) {
4141 u32 remainder = 0;
4142
4143 /* only first TRB is isoc, overwrite otherwise */
4144 if (!first_trb)
4145 field = TRB_TYPE(TRB_NORMAL) |
4146 ep_ring->cycle_state;
4147
4148 /* Only set interrupt on short packet for IN EPs */
4149 if (usb_urb_dir_in(urb))
4150 field |= TRB_ISP;
4151
4152 /* Set the chain bit for all except the last TRB */
4153 if (j < trbs_per_td - 1) {
4154 more_trbs_coming = true;
4155 field |= TRB_CHAIN;
4156 } else {
4157 more_trbs_coming = false;
4158 td->last_trb = ep_ring->enqueue;
4159 td->last_trb_seg = ep_ring->enq_seg;
4160 field |= TRB_IOC;
4161 if (trb_block_event_intr(xhci, num_tds, i, ir))
4162 field |= TRB_BEI;
4163 }
4164 /* Calculate TRB length */
4165 trb_buff_len = TRB_BUFF_LEN_UP_TO_BOUNDARY(addr);
4166 if (trb_buff_len > td_remain_len)
4167 trb_buff_len = td_remain_len;
4168
4169 /* Set the TRB length, TD size, & interrupter fields. */
4170 remainder = xhci_td_remainder(xhci, running_total,
4171 trb_buff_len, td_len,
4172 urb, more_trbs_coming);
4173
4174 length_field = TRB_LEN(trb_buff_len) |
4175 TRB_INTR_TARGET(0);
4176
4177 /* xhci 1.1 with ETE uses TD Size field for TBC */
4178 if (first_trb && xep->use_extended_tbc)
4179 length_field |= TRB_TD_SIZE_TBC(burst_count);
4180 else
4181 length_field |= TRB_TD_SIZE(remainder);
4182 first_trb = false;
4183
4184 queue_trb(xhci, ep_ring, more_trbs_coming,
4185 lower_32_bits(addr),
4186 upper_32_bits(addr),
4187 length_field,
4188 field);
4189 running_total += trb_buff_len;
4190
4191 addr += trb_buff_len;
4192 td_remain_len -= trb_buff_len;
4193 }
4194
4195 /* Check TD length */
4196 if (running_total != td_len) {
4197 xhci_err(xhci, "ISOC TD length unmatch\n");
4198 ret = -EINVAL;
4199 goto cleanup;
4200 }
4201 }
4202
4203 /* store the next frame id */
4204 if (HCC_CFC(xhci->hcc_params))
4205 xep->next_frame_id = urb->start_frame + num_tds * urb->interval;
4206
4207 if (xhci_to_hcd(xhci)->self.bandwidth_isoc_reqs == 0) {
4208 if (xhci->quirks & XHCI_AMD_PLL_FIX)
4209 usb_amd_quirk_pll_disable();
4210 }
4211 xhci_to_hcd(xhci)->self.bandwidth_isoc_reqs++;
4212
4213 giveback_first_trb(xhci, slot_id, ep_index, urb->stream_id,
4214 start_cycle, start_trb);
4215 return 0;
4216cleanup:
4217 /* Clean up a partially enqueued isoc transfer. */
4218
4219 for (i--; i >= 0; i--)
4220 list_del_init(&urb_priv->td[i].td_list);
4221
4222 /* Use the first TD as a temporary variable to turn the TDs we've queued
4223 * into No-ops with a software-owned cycle bit. That way the hardware
4224 * won't accidentally start executing bogus TDs when we partially
4225 * overwrite them. td->first_trb and td->start_seg are already set.
4226 */
4227 urb_priv->td[0].last_trb = ep_ring->enqueue;
4228 /* Every TRB except the first & last will have its cycle bit flipped. */
4229 td_to_noop(xhci, ep_ring, &urb_priv->td[0], true);
4230
4231 /* Reset the ring enqueue back to the first TRB and its cycle bit. */
4232 ep_ring->enqueue = urb_priv->td[0].first_trb;
4233 ep_ring->enq_seg = urb_priv->td[0].start_seg;
4234 ep_ring->cycle_state = start_cycle;
4235 usb_hcd_unlink_urb_from_ep(bus_to_hcd(urb->dev->bus), urb);
4236 return ret;
4237}
4238
4239/*
4240 * Check transfer ring to guarantee there is enough room for the urb.
4241 * Update ISO URB start_frame and interval.
4242 * Update interval as xhci_queue_intr_tx does. Use xhci frame_index to
4243 * update urb->start_frame if URB_ISO_ASAP is set in transfer_flags or
4244 * Contiguous Frame ID is not supported by HC.
4245 */
4246int xhci_queue_isoc_tx_prepare(struct xhci_hcd *xhci, gfp_t mem_flags,
4247 struct urb *urb, int slot_id, unsigned int ep_index)
4248{
4249 struct xhci_virt_device *xdev;
4250 struct xhci_ring *ep_ring;
4251 struct xhci_ep_ctx *ep_ctx;
4252 int start_frame;
4253 int num_tds, num_trbs, i;
4254 int ret;
4255 struct xhci_virt_ep *xep;
4256 int ist;
4257
4258 xdev = xhci->devs[slot_id];
4259 xep = &xhci->devs[slot_id]->eps[ep_index];
4260 ep_ring = xdev->eps[ep_index].ring;
4261 ep_ctx = xhci_get_ep_ctx(xhci, xdev->out_ctx, ep_index);
4262
4263 num_trbs = 0;
4264 num_tds = urb->number_of_packets;
4265 for (i = 0; i < num_tds; i++)
4266 num_trbs += count_isoc_trbs_needed(urb, i);
4267
4268 /* Check the ring to guarantee there is enough room for the whole urb.
4269 * Do not insert any td of the urb to the ring if the check failed.
4270 */
4271 ret = prepare_ring(xhci, ep_ring, GET_EP_CTX_STATE(ep_ctx),
4272 num_trbs, mem_flags);
4273 if (ret)
4274 return ret;
4275
4276 /*
4277 * Check interval value. This should be done before we start to
4278 * calculate the start frame value.
4279 */
4280 check_interval(xhci, urb, ep_ctx);
4281
4282 /* Calculate the start frame and put it in urb->start_frame. */
4283 if (HCC_CFC(xhci->hcc_params) && !list_empty(&ep_ring->td_list)) {
4284 if (GET_EP_CTX_STATE(ep_ctx) == EP_STATE_RUNNING) {
4285 urb->start_frame = xep->next_frame_id;
4286 goto skip_start_over;
4287 }
4288 }
4289
4290 start_frame = readl(&xhci->run_regs->microframe_index);
4291 start_frame &= 0x3fff;
4292 /*
4293 * Round up to the next frame and consider the time before trb really
4294 * gets scheduled by hardare.
4295 */
4296 ist = HCS_IST(xhci->hcs_params2) & 0x7;
4297 if (HCS_IST(xhci->hcs_params2) & (1 << 3))
4298 ist <<= 3;
4299 start_frame += ist + XHCI_CFC_DELAY;
4300 start_frame = roundup(start_frame, 8);
4301
4302 /*
4303 * Round up to the next ESIT (Endpoint Service Interval Time) if ESIT
4304 * is greate than 8 microframes.
4305 */
4306 if (urb->dev->speed == USB_SPEED_LOW ||
4307 urb->dev->speed == USB_SPEED_FULL) {
4308 start_frame = roundup(start_frame, urb->interval << 3);
4309 urb->start_frame = start_frame >> 3;
4310 } else {
4311 start_frame = roundup(start_frame, urb->interval);
4312 urb->start_frame = start_frame;
4313 }
4314
4315skip_start_over:
4316
4317 return xhci_queue_isoc_tx(xhci, mem_flags, urb, slot_id, ep_index);
4318}
4319
4320/**** Command Ring Operations ****/
4321
4322/* Generic function for queueing a command TRB on the command ring.
4323 * Check to make sure there's room on the command ring for one command TRB.
4324 * Also check that there's room reserved for commands that must not fail.
4325 * If this is a command that must not fail, meaning command_must_succeed = TRUE,
4326 * then only check for the number of reserved spots.
4327 * Don't decrement xhci->cmd_ring_reserved_trbs after we've queued the TRB
4328 * because the command event handler may want to resubmit a failed command.
4329 */
4330static int queue_command(struct xhci_hcd *xhci, struct xhci_command *cmd,
4331 u32 field1, u32 field2,
4332 u32 field3, u32 field4, bool command_must_succeed)
4333{
4334 int reserved_trbs = xhci->cmd_ring_reserved_trbs;
4335 int ret;
4336
4337 if ((xhci->xhc_state & XHCI_STATE_DYING) ||
4338 (xhci->xhc_state & XHCI_STATE_HALTED)) {
4339 xhci_dbg(xhci, "xHCI dying or halted, can't queue_command\n");
4340 return -ESHUTDOWN;
4341 }
4342
4343 if (!command_must_succeed)
4344 reserved_trbs++;
4345
4346 ret = prepare_ring(xhci, xhci->cmd_ring, EP_STATE_RUNNING,
4347 reserved_trbs, GFP_ATOMIC);
4348 if (ret < 0) {
4349 xhci_err(xhci, "ERR: No room for command on command ring\n");
4350 if (command_must_succeed)
4351 xhci_err(xhci, "ERR: Reserved TRB counting for "
4352 "unfailable commands failed.\n");
4353 return ret;
4354 }
4355
4356 cmd->command_trb = xhci->cmd_ring->enqueue;
4357
4358 /* if there are no other commands queued we start the timeout timer */
4359 if (list_empty(&xhci->cmd_list)) {
4360 xhci->current_cmd = cmd;
4361 xhci_mod_cmd_timer(xhci);
4362 }
4363
4364 list_add_tail(&cmd->cmd_list, &xhci->cmd_list);
4365
4366 queue_trb(xhci, xhci->cmd_ring, false, field1, field2, field3,
4367 field4 | xhci->cmd_ring->cycle_state);
4368 return 0;
4369}
4370
4371/* Queue a slot enable or disable request on the command ring */
4372int xhci_queue_slot_control(struct xhci_hcd *xhci, struct xhci_command *cmd,
4373 u32 trb_type, u32 slot_id)
4374{
4375 return queue_command(xhci, cmd, 0, 0, 0,
4376 TRB_TYPE(trb_type) | SLOT_ID_FOR_TRB(slot_id), false);
4377}
4378
4379/* Queue an address device command TRB */
4380int xhci_queue_address_device(struct xhci_hcd *xhci, struct xhci_command *cmd,
4381 dma_addr_t in_ctx_ptr, u32 slot_id, enum xhci_setup_dev setup)
4382{
4383 return queue_command(xhci, cmd, lower_32_bits(in_ctx_ptr),
4384 upper_32_bits(in_ctx_ptr), 0,
4385 TRB_TYPE(TRB_ADDR_DEV) | SLOT_ID_FOR_TRB(slot_id)
4386 | (setup == SETUP_CONTEXT_ONLY ? TRB_BSR : 0), false);
4387}
4388
4389int xhci_queue_vendor_command(struct xhci_hcd *xhci, struct xhci_command *cmd,
4390 u32 field1, u32 field2, u32 field3, u32 field4)
4391{
4392 return queue_command(xhci, cmd, field1, field2, field3, field4, false);
4393}
4394
4395/* Queue a reset device command TRB */
4396int xhci_queue_reset_device(struct xhci_hcd *xhci, struct xhci_command *cmd,
4397 u32 slot_id)
4398{
4399 return queue_command(xhci, cmd, 0, 0, 0,
4400 TRB_TYPE(TRB_RESET_DEV) | SLOT_ID_FOR_TRB(slot_id),
4401 false);
4402}
4403
4404/* Queue a configure endpoint command TRB */
4405int xhci_queue_configure_endpoint(struct xhci_hcd *xhci,
4406 struct xhci_command *cmd, dma_addr_t in_ctx_ptr,
4407 u32 slot_id, bool command_must_succeed)
4408{
4409 return queue_command(xhci, cmd, lower_32_bits(in_ctx_ptr),
4410 upper_32_bits(in_ctx_ptr), 0,
4411 TRB_TYPE(TRB_CONFIG_EP) | SLOT_ID_FOR_TRB(slot_id),
4412 command_must_succeed);
4413}
4414
4415/* Queue an evaluate context command TRB */
4416int xhci_queue_evaluate_context(struct xhci_hcd *xhci, struct xhci_command *cmd,
4417 dma_addr_t in_ctx_ptr, u32 slot_id, bool command_must_succeed)
4418{
4419 return queue_command(xhci, cmd, lower_32_bits(in_ctx_ptr),
4420 upper_32_bits(in_ctx_ptr), 0,
4421 TRB_TYPE(TRB_EVAL_CONTEXT) | SLOT_ID_FOR_TRB(slot_id),
4422 command_must_succeed);
4423}
4424
4425/*
4426 * Suspend is set to indicate "Stop Endpoint Command" is being issued to stop
4427 * activity on an endpoint that is about to be suspended.
4428 */
4429int xhci_queue_stop_endpoint(struct xhci_hcd *xhci, struct xhci_command *cmd,
4430 int slot_id, unsigned int ep_index, int suspend)
4431{
4432 u32 trb_slot_id = SLOT_ID_FOR_TRB(slot_id);
4433 u32 trb_ep_index = EP_ID_FOR_TRB(ep_index);
4434 u32 type = TRB_TYPE(TRB_STOP_RING);
4435 u32 trb_suspend = SUSPEND_PORT_FOR_TRB(suspend);
4436
4437 return queue_command(xhci, cmd, 0, 0, 0,
4438 trb_slot_id | trb_ep_index | type | trb_suspend, false);
4439}
4440
4441int xhci_queue_reset_ep(struct xhci_hcd *xhci, struct xhci_command *cmd,
4442 int slot_id, unsigned int ep_index,
4443 enum xhci_ep_reset_type reset_type)
4444{
4445 u32 trb_slot_id = SLOT_ID_FOR_TRB(slot_id);
4446 u32 trb_ep_index = EP_ID_FOR_TRB(ep_index);
4447 u32 type = TRB_TYPE(TRB_RESET_EP);
4448
4449 if (reset_type == EP_SOFT_RESET)
4450 type |= TRB_TSP;
4451
4452 return queue_command(xhci, cmd, 0, 0, 0,
4453 trb_slot_id | trb_ep_index | type, false);
4454}
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * xHCI host controller driver
4 *
5 * Copyright (C) 2008 Intel Corp.
6 *
7 * Author: Sarah Sharp
8 * Some code borrowed from the Linux EHCI driver.
9 */
10
11/*
12 * Ring initialization rules:
13 * 1. Each segment is initialized to zero, except for link TRBs.
14 * 2. Ring cycle state = 0. This represents Producer Cycle State (PCS) or
15 * Consumer Cycle State (CCS), depending on ring function.
16 * 3. Enqueue pointer = dequeue pointer = address of first TRB in the segment.
17 *
18 * Ring behavior rules:
19 * 1. A ring is empty if enqueue == dequeue. This means there will always be at
20 * least one free TRB in the ring. This is useful if you want to turn that
21 * into a link TRB and expand the ring.
22 * 2. When incrementing an enqueue or dequeue pointer, if the next TRB is a
23 * link TRB, then load the pointer with the address in the link TRB. If the
24 * link TRB had its toggle bit set, you may need to update the ring cycle
25 * state (see cycle bit rules). You may have to do this multiple times
26 * until you reach a non-link TRB.
27 * 3. A ring is full if enqueue++ (for the definition of increment above)
28 * equals the dequeue pointer.
29 *
30 * Cycle bit rules:
31 * 1. When a consumer increments a dequeue pointer and encounters a toggle bit
32 * in a link TRB, it must toggle the ring cycle state.
33 * 2. When a producer increments an enqueue pointer and encounters a toggle bit
34 * in a link TRB, it must toggle the ring cycle state.
35 *
36 * Producer rules:
37 * 1. Check if ring is full before you enqueue.
38 * 2. Write the ring cycle state to the cycle bit in the TRB you're enqueuing.
39 * Update enqueue pointer between each write (which may update the ring
40 * cycle state).
41 * 3. Notify consumer. If SW is producer, it rings the doorbell for command
42 * and endpoint rings. If HC is the producer for the event ring,
43 * and it generates an interrupt according to interrupt modulation rules.
44 *
45 * Consumer rules:
46 * 1. Check if TRB belongs to you. If the cycle bit == your ring cycle state,
47 * the TRB is owned by the consumer.
48 * 2. Update dequeue pointer (which may update the ring cycle state) and
49 * continue processing TRBs until you reach a TRB which is not owned by you.
50 * 3. Notify the producer. SW is the consumer for the event ring, and it
51 * updates event ring dequeue pointer. HC is the consumer for the command and
52 * endpoint rings; it generates events on the event ring for these.
53 */
54
55#include <linux/jiffies.h>
56#include <linux/scatterlist.h>
57#include <linux/slab.h>
58#include <linux/dma-mapping.h>
59#include "xhci.h"
60#include "xhci-trace.h"
61
62static int queue_command(struct xhci_hcd *xhci, struct xhci_command *cmd,
63 u32 field1, u32 field2,
64 u32 field3, u32 field4, bool command_must_succeed);
65
66/*
67 * Returns zero if the TRB isn't in this segment, otherwise it returns the DMA
68 * address of the TRB.
69 */
70dma_addr_t xhci_trb_virt_to_dma(struct xhci_segment *seg,
71 union xhci_trb *trb)
72{
73 unsigned long segment_offset;
74
75 if (!seg || !trb || trb < seg->trbs)
76 return 0;
77 /* offset in TRBs */
78 segment_offset = trb - seg->trbs;
79 if (segment_offset >= TRBS_PER_SEGMENT)
80 return 0;
81 return seg->dma + (segment_offset * sizeof(*trb));
82}
83
84static bool trb_is_noop(union xhci_trb *trb)
85{
86 return TRB_TYPE_NOOP_LE32(trb->generic.field[3]);
87}
88
89static bool trb_is_link(union xhci_trb *trb)
90{
91 return TRB_TYPE_LINK_LE32(trb->link.control);
92}
93
94static bool last_trb_on_seg(struct xhci_segment *seg, union xhci_trb *trb)
95{
96 return trb == &seg->trbs[TRBS_PER_SEGMENT - 1];
97}
98
99static bool last_trb_on_ring(struct xhci_ring *ring,
100 struct xhci_segment *seg, union xhci_trb *trb)
101{
102 return last_trb_on_seg(seg, trb) && (seg->next == ring->first_seg);
103}
104
105static bool link_trb_toggles_cycle(union xhci_trb *trb)
106{
107 return le32_to_cpu(trb->link.control) & LINK_TOGGLE;
108}
109
110static bool last_td_in_urb(struct xhci_td *td)
111{
112 struct urb_priv *urb_priv = td->urb->hcpriv;
113
114 return urb_priv->num_tds_done == urb_priv->num_tds;
115}
116
117static bool unhandled_event_trb(struct xhci_ring *ring)
118{
119 return ((le32_to_cpu(ring->dequeue->event_cmd.flags) & TRB_CYCLE) ==
120 ring->cycle_state);
121}
122
123static void inc_td_cnt(struct urb *urb)
124{
125 struct urb_priv *urb_priv = urb->hcpriv;
126
127 urb_priv->num_tds_done++;
128}
129
130static void trb_to_noop(union xhci_trb *trb, u32 noop_type)
131{
132 if (trb_is_link(trb)) {
133 /* unchain chained link TRBs */
134 trb->link.control &= cpu_to_le32(~TRB_CHAIN);
135 } else {
136 trb->generic.field[0] = 0;
137 trb->generic.field[1] = 0;
138 trb->generic.field[2] = 0;
139 /* Preserve only the cycle bit of this TRB */
140 trb->generic.field[3] &= cpu_to_le32(TRB_CYCLE);
141 trb->generic.field[3] |= cpu_to_le32(TRB_TYPE(noop_type));
142 }
143}
144
145/* Updates trb to point to the next TRB in the ring, and updates seg if the next
146 * TRB is in a new segment. This does not skip over link TRBs, and it does not
147 * effect the ring dequeue or enqueue pointers.
148 */
149static void next_trb(struct xhci_segment **seg,
150 union xhci_trb **trb)
151{
152 if (trb_is_link(*trb) || last_trb_on_seg(*seg, *trb)) {
153 *seg = (*seg)->next;
154 *trb = ((*seg)->trbs);
155 } else {
156 (*trb)++;
157 }
158}
159
160/*
161 * See Cycle bit rules. SW is the consumer for the event ring only.
162 */
163void inc_deq(struct xhci_hcd *xhci, struct xhci_ring *ring)
164{
165 unsigned int link_trb_count = 0;
166
167 /* event ring doesn't have link trbs, check for last trb */
168 if (ring->type == TYPE_EVENT) {
169 if (!last_trb_on_seg(ring->deq_seg, ring->dequeue)) {
170 ring->dequeue++;
171 return;
172 }
173 if (last_trb_on_ring(ring, ring->deq_seg, ring->dequeue))
174 ring->cycle_state ^= 1;
175 ring->deq_seg = ring->deq_seg->next;
176 ring->dequeue = ring->deq_seg->trbs;
177
178 trace_xhci_inc_deq(ring);
179
180 return;
181 }
182
183 /* All other rings have link trbs */
184 if (!trb_is_link(ring->dequeue)) {
185 if (last_trb_on_seg(ring->deq_seg, ring->dequeue))
186 xhci_warn(xhci, "Missing link TRB at end of segment\n");
187 else
188 ring->dequeue++;
189 }
190
191 while (trb_is_link(ring->dequeue)) {
192 ring->deq_seg = ring->deq_seg->next;
193 ring->dequeue = ring->deq_seg->trbs;
194
195 trace_xhci_inc_deq(ring);
196
197 if (link_trb_count++ > ring->num_segs) {
198 xhci_warn(xhci, "Ring is an endless link TRB loop\n");
199 break;
200 }
201 }
202 return;
203}
204
205/*
206 * See Cycle bit rules. SW is the consumer for the event ring only.
207 *
208 * If we've just enqueued a TRB that is in the middle of a TD (meaning the
209 * chain bit is set), then set the chain bit in all the following link TRBs.
210 * If we've enqueued the last TRB in a TD, make sure the following link TRBs
211 * have their chain bit cleared (so that each Link TRB is a separate TD).
212 *
213 * Section 6.4.4.1 of the 0.95 spec says link TRBs cannot have the chain bit
214 * set, but other sections talk about dealing with the chain bit set. This was
215 * fixed in the 0.96 specification errata, but we have to assume that all 0.95
216 * xHCI hardware can't handle the chain bit being cleared on a link TRB.
217 *
218 * @more_trbs_coming: Will you enqueue more TRBs before calling
219 * prepare_transfer()?
220 */
221static void inc_enq(struct xhci_hcd *xhci, struct xhci_ring *ring,
222 bool more_trbs_coming)
223{
224 u32 chain;
225 union xhci_trb *next;
226 unsigned int link_trb_count = 0;
227
228 chain = le32_to_cpu(ring->enqueue->generic.field[3]) & TRB_CHAIN;
229
230 if (last_trb_on_seg(ring->enq_seg, ring->enqueue)) {
231 xhci_err(xhci, "Tried to move enqueue past ring segment\n");
232 return;
233 }
234
235 next = ++(ring->enqueue);
236
237 /* Update the dequeue pointer further if that was a link TRB */
238 while (trb_is_link(next)) {
239
240 /*
241 * If the caller doesn't plan on enqueueing more TDs before
242 * ringing the doorbell, then we don't want to give the link TRB
243 * to the hardware just yet. We'll give the link TRB back in
244 * prepare_ring() just before we enqueue the TD at the top of
245 * the ring.
246 */
247 if (!chain && !more_trbs_coming)
248 break;
249
250 /* If we're not dealing with 0.95 hardware or isoc rings on
251 * AMD 0.96 host, carry over the chain bit of the previous TRB
252 * (which may mean the chain bit is cleared).
253 */
254 if (!xhci_link_chain_quirk(xhci, ring->type)) {
255 next->link.control &= cpu_to_le32(~TRB_CHAIN);
256 next->link.control |= cpu_to_le32(chain);
257 }
258 /* Give this link TRB to the hardware */
259 wmb();
260 next->link.control ^= cpu_to_le32(TRB_CYCLE);
261
262 /* Toggle the cycle bit after the last ring segment. */
263 if (link_trb_toggles_cycle(next))
264 ring->cycle_state ^= 1;
265
266 ring->enq_seg = ring->enq_seg->next;
267 ring->enqueue = ring->enq_seg->trbs;
268 next = ring->enqueue;
269
270 trace_xhci_inc_enq(ring);
271
272 if (link_trb_count++ > ring->num_segs) {
273 xhci_warn(xhci, "%s: Ring link TRB loop\n", __func__);
274 break;
275 }
276 }
277}
278
279/*
280 * Return number of free normal TRBs from enqueue to dequeue pointer on ring.
281 * Not counting an assumed link TRB at end of each TRBS_PER_SEGMENT sized segment.
282 * Only for transfer and command rings where driver is the producer, not for
283 * event rings.
284 */
285static unsigned int xhci_num_trbs_free(struct xhci_ring *ring)
286{
287 struct xhci_segment *enq_seg = ring->enq_seg;
288 union xhci_trb *enq = ring->enqueue;
289 union xhci_trb *last_on_seg;
290 unsigned int free = 0;
291 int i = 0;
292
293 /* Ring might be empty even if enq != deq if enq is left on a link trb */
294 if (trb_is_link(enq)) {
295 enq_seg = enq_seg->next;
296 enq = enq_seg->trbs;
297 }
298
299 /* Empty ring, common case, don't walk the segments */
300 if (enq == ring->dequeue)
301 return ring->num_segs * (TRBS_PER_SEGMENT - 1);
302
303 do {
304 if (ring->deq_seg == enq_seg && ring->dequeue >= enq)
305 return free + (ring->dequeue - enq);
306 last_on_seg = &enq_seg->trbs[TRBS_PER_SEGMENT - 1];
307 free += last_on_seg - enq;
308 enq_seg = enq_seg->next;
309 enq = enq_seg->trbs;
310 } while (i++ < ring->num_segs);
311
312 return free;
313}
314
315/*
316 * Check to see if there's room to enqueue num_trbs on the ring and make sure
317 * enqueue pointer will not advance into dequeue segment. See rules above.
318 * return number of new segments needed to ensure this.
319 */
320
321static unsigned int xhci_ring_expansion_needed(struct xhci_hcd *xhci, struct xhci_ring *ring,
322 unsigned int num_trbs)
323{
324 struct xhci_segment *seg;
325 int trbs_past_seg;
326 int enq_used;
327 int new_segs;
328
329 enq_used = ring->enqueue - ring->enq_seg->trbs;
330
331 /* how many trbs will be queued past the enqueue segment? */
332 trbs_past_seg = enq_used + num_trbs - (TRBS_PER_SEGMENT - 1);
333
334 /*
335 * Consider expanding the ring already if num_trbs fills the current
336 * segment (i.e. trbs_past_seg == 0), not only when num_trbs goes into
337 * the next segment. Avoids confusing full ring with special empty ring
338 * case below
339 */
340 if (trbs_past_seg < 0)
341 return 0;
342
343 /* Empty ring special case, enqueue stuck on link trb while dequeue advanced */
344 if (trb_is_link(ring->enqueue) && ring->enq_seg->next->trbs == ring->dequeue)
345 return 0;
346
347 new_segs = 1 + (trbs_past_seg / (TRBS_PER_SEGMENT - 1));
348 seg = ring->enq_seg;
349
350 while (new_segs > 0) {
351 seg = seg->next;
352 if (seg == ring->deq_seg) {
353 xhci_dbg(xhci, "Adding %d trbs requires expanding ring by %d segments\n",
354 num_trbs, new_segs);
355 return new_segs;
356 }
357 new_segs--;
358 }
359
360 return 0;
361}
362
363/* Ring the host controller doorbell after placing a command on the ring */
364void xhci_ring_cmd_db(struct xhci_hcd *xhci)
365{
366 if (!(xhci->cmd_ring_state & CMD_RING_STATE_RUNNING))
367 return;
368
369 xhci_dbg(xhci, "// Ding dong!\n");
370
371 trace_xhci_ring_host_doorbell(0, DB_VALUE_HOST);
372
373 writel(DB_VALUE_HOST, &xhci->dba->doorbell[0]);
374 /* Flush PCI posted writes */
375 readl(&xhci->dba->doorbell[0]);
376}
377
378static bool xhci_mod_cmd_timer(struct xhci_hcd *xhci)
379{
380 return mod_delayed_work(system_wq, &xhci->cmd_timer,
381 msecs_to_jiffies(xhci->current_cmd->timeout_ms));
382}
383
384static struct xhci_command *xhci_next_queued_cmd(struct xhci_hcd *xhci)
385{
386 return list_first_entry_or_null(&xhci->cmd_list, struct xhci_command,
387 cmd_list);
388}
389
390/*
391 * Turn all commands on command ring with status set to "aborted" to no-op trbs.
392 * If there are other commands waiting then restart the ring and kick the timer.
393 * This must be called with command ring stopped and xhci->lock held.
394 */
395static void xhci_handle_stopped_cmd_ring(struct xhci_hcd *xhci,
396 struct xhci_command *cur_cmd)
397{
398 struct xhci_command *i_cmd;
399
400 /* Turn all aborted commands in list to no-ops, then restart */
401 list_for_each_entry(i_cmd, &xhci->cmd_list, cmd_list) {
402
403 if (i_cmd->status != COMP_COMMAND_ABORTED)
404 continue;
405
406 i_cmd->status = COMP_COMMAND_RING_STOPPED;
407
408 xhci_dbg(xhci, "Turn aborted command %p to no-op\n",
409 i_cmd->command_trb);
410
411 trb_to_noop(i_cmd->command_trb, TRB_CMD_NOOP);
412
413 /*
414 * caller waiting for completion is called when command
415 * completion event is received for these no-op commands
416 */
417 }
418
419 xhci->cmd_ring_state = CMD_RING_STATE_RUNNING;
420
421 /* ring command ring doorbell to restart the command ring */
422 if ((xhci->cmd_ring->dequeue != xhci->cmd_ring->enqueue) &&
423 !(xhci->xhc_state & XHCI_STATE_DYING)) {
424 xhci->current_cmd = cur_cmd;
425 if (cur_cmd)
426 xhci_mod_cmd_timer(xhci);
427 xhci_ring_cmd_db(xhci);
428 }
429}
430
431/* Must be called with xhci->lock held, releases and acquires lock back */
432static int xhci_abort_cmd_ring(struct xhci_hcd *xhci, unsigned long flags)
433{
434 struct xhci_segment *new_seg = xhci->cmd_ring->deq_seg;
435 union xhci_trb *new_deq = xhci->cmd_ring->dequeue;
436 u64 crcr;
437 int ret;
438
439 xhci_dbg(xhci, "Abort command ring\n");
440
441 reinit_completion(&xhci->cmd_ring_stop_completion);
442
443 /*
444 * The control bits like command stop, abort are located in lower
445 * dword of the command ring control register.
446 * Some controllers require all 64 bits to be written to abort the ring.
447 * Make sure the upper dword is valid, pointing to the next command,
448 * avoiding corrupting the command ring pointer in case the command ring
449 * is stopped by the time the upper dword is written.
450 */
451 next_trb(&new_seg, &new_deq);
452 if (trb_is_link(new_deq))
453 next_trb(&new_seg, &new_deq);
454
455 crcr = xhci_trb_virt_to_dma(new_seg, new_deq);
456 xhci_write_64(xhci, crcr | CMD_RING_ABORT, &xhci->op_regs->cmd_ring);
457
458 /* Section 4.6.1.2 of xHCI 1.0 spec says software should also time the
459 * completion of the Command Abort operation. If CRR is not negated in 5
460 * seconds then driver handles it as if host died (-ENODEV).
461 * In the future we should distinguish between -ENODEV and -ETIMEDOUT
462 * and try to recover a -ETIMEDOUT with a host controller reset.
463 */
464 ret = xhci_handshake_check_state(xhci, &xhci->op_regs->cmd_ring,
465 CMD_RING_RUNNING, 0, 5 * 1000 * 1000,
466 XHCI_STATE_REMOVING);
467 if (ret < 0) {
468 xhci_err(xhci, "Abort failed to stop command ring: %d\n", ret);
469 xhci_halt(xhci);
470 xhci_hc_died(xhci);
471 return ret;
472 }
473 /*
474 * Writing the CMD_RING_ABORT bit should cause a cmd completion event,
475 * however on some host hw the CMD_RING_RUNNING bit is correctly cleared
476 * but the completion event in never sent. Wait 2 secs (arbitrary
477 * number) to handle those cases after negation of CMD_RING_RUNNING.
478 */
479 spin_unlock_irqrestore(&xhci->lock, flags);
480 ret = wait_for_completion_timeout(&xhci->cmd_ring_stop_completion,
481 msecs_to_jiffies(2000));
482 spin_lock_irqsave(&xhci->lock, flags);
483 if (!ret) {
484 xhci_dbg(xhci, "No stop event for abort, ring start fail?\n");
485 xhci_cleanup_command_queue(xhci);
486 } else {
487 xhci_handle_stopped_cmd_ring(xhci, xhci_next_queued_cmd(xhci));
488 }
489 return 0;
490}
491
492void xhci_ring_ep_doorbell(struct xhci_hcd *xhci,
493 unsigned int slot_id,
494 unsigned int ep_index,
495 unsigned int stream_id)
496{
497 __le32 __iomem *db_addr = &xhci->dba->doorbell[slot_id];
498 struct xhci_virt_ep *ep = &xhci->devs[slot_id]->eps[ep_index];
499 unsigned int ep_state = ep->ep_state;
500
501 /* Don't ring the doorbell for this endpoint if there are pending
502 * cancellations because we don't want to interrupt processing.
503 * We don't want to restart any stream rings if there's a set dequeue
504 * pointer command pending because the device can choose to start any
505 * stream once the endpoint is on the HW schedule.
506 */
507 if ((ep_state & EP_STOP_CMD_PENDING) || (ep_state & SET_DEQ_PENDING) ||
508 (ep_state & EP_HALTED) || (ep_state & EP_CLEARING_TT))
509 return;
510
511 trace_xhci_ring_ep_doorbell(slot_id, DB_VALUE(ep_index, stream_id));
512
513 writel(DB_VALUE(ep_index, stream_id), db_addr);
514 /* flush the write */
515 readl(db_addr);
516}
517
518/* Ring the doorbell for any rings with pending URBs */
519static void ring_doorbell_for_active_rings(struct xhci_hcd *xhci,
520 unsigned int slot_id,
521 unsigned int ep_index)
522{
523 unsigned int stream_id;
524 struct xhci_virt_ep *ep;
525
526 ep = &xhci->devs[slot_id]->eps[ep_index];
527
528 /* A ring has pending URBs if its TD list is not empty */
529 if (!(ep->ep_state & EP_HAS_STREAMS)) {
530 if (ep->ring && !(list_empty(&ep->ring->td_list)))
531 xhci_ring_ep_doorbell(xhci, slot_id, ep_index, 0);
532 return;
533 }
534
535 for (stream_id = 1; stream_id < ep->stream_info->num_streams;
536 stream_id++) {
537 struct xhci_stream_info *stream_info = ep->stream_info;
538 if (!list_empty(&stream_info->stream_rings[stream_id]->td_list))
539 xhci_ring_ep_doorbell(xhci, slot_id, ep_index,
540 stream_id);
541 }
542}
543
544void xhci_ring_doorbell_for_active_rings(struct xhci_hcd *xhci,
545 unsigned int slot_id,
546 unsigned int ep_index)
547{
548 ring_doorbell_for_active_rings(xhci, slot_id, ep_index);
549}
550
551static struct xhci_virt_ep *xhci_get_virt_ep(struct xhci_hcd *xhci,
552 unsigned int slot_id,
553 unsigned int ep_index)
554{
555 if (slot_id == 0 || slot_id >= MAX_HC_SLOTS) {
556 xhci_warn(xhci, "Invalid slot_id %u\n", slot_id);
557 return NULL;
558 }
559 if (ep_index >= EP_CTX_PER_DEV) {
560 xhci_warn(xhci, "Invalid endpoint index %u\n", ep_index);
561 return NULL;
562 }
563 if (!xhci->devs[slot_id]) {
564 xhci_warn(xhci, "No xhci virt device for slot_id %u\n", slot_id);
565 return NULL;
566 }
567
568 return &xhci->devs[slot_id]->eps[ep_index];
569}
570
571static struct xhci_ring *xhci_virt_ep_to_ring(struct xhci_hcd *xhci,
572 struct xhci_virt_ep *ep,
573 unsigned int stream_id)
574{
575 /* common case, no streams */
576 if (!(ep->ep_state & EP_HAS_STREAMS))
577 return ep->ring;
578
579 if (!ep->stream_info)
580 return NULL;
581
582 if (stream_id == 0 || stream_id >= ep->stream_info->num_streams) {
583 xhci_warn(xhci, "Invalid stream_id %u request for slot_id %u ep_index %u\n",
584 stream_id, ep->vdev->slot_id, ep->ep_index);
585 return NULL;
586 }
587
588 return ep->stream_info->stream_rings[stream_id];
589}
590
591/* Get the right ring for the given slot_id, ep_index and stream_id.
592 * If the endpoint supports streams, boundary check the URB's stream ID.
593 * If the endpoint doesn't support streams, return the singular endpoint ring.
594 */
595struct xhci_ring *xhci_triad_to_transfer_ring(struct xhci_hcd *xhci,
596 unsigned int slot_id, unsigned int ep_index,
597 unsigned int stream_id)
598{
599 struct xhci_virt_ep *ep;
600
601 ep = xhci_get_virt_ep(xhci, slot_id, ep_index);
602 if (!ep)
603 return NULL;
604
605 return xhci_virt_ep_to_ring(xhci, ep, stream_id);
606}
607
608
609/*
610 * Get the hw dequeue pointer xHC stopped on, either directly from the
611 * endpoint context, or if streams are in use from the stream context.
612 * The returned hw_dequeue contains the lowest four bits with cycle state
613 * and possbile stream context type.
614 */
615static u64 xhci_get_hw_deq(struct xhci_hcd *xhci, struct xhci_virt_device *vdev,
616 unsigned int ep_index, unsigned int stream_id)
617{
618 struct xhci_ep_ctx *ep_ctx;
619 struct xhci_stream_ctx *st_ctx;
620 struct xhci_virt_ep *ep;
621
622 ep = &vdev->eps[ep_index];
623
624 if (ep->ep_state & EP_HAS_STREAMS) {
625 st_ctx = &ep->stream_info->stream_ctx_array[stream_id];
626 return le64_to_cpu(st_ctx->stream_ring);
627 }
628 ep_ctx = xhci_get_ep_ctx(xhci, vdev->out_ctx, ep_index);
629 return le64_to_cpu(ep_ctx->deq);
630}
631
632static int xhci_move_dequeue_past_td(struct xhci_hcd *xhci,
633 unsigned int slot_id, unsigned int ep_index,
634 unsigned int stream_id, struct xhci_td *td)
635{
636 struct xhci_virt_device *dev = xhci->devs[slot_id];
637 struct xhci_virt_ep *ep = &dev->eps[ep_index];
638 struct xhci_ring *ep_ring;
639 struct xhci_command *cmd;
640 struct xhci_segment *new_seg;
641 union xhci_trb *new_deq;
642 int new_cycle;
643 dma_addr_t addr;
644 u64 hw_dequeue;
645 bool cycle_found = false;
646 bool td_last_trb_found = false;
647 u32 trb_sct = 0;
648 int ret;
649
650 ep_ring = xhci_triad_to_transfer_ring(xhci, slot_id,
651 ep_index, stream_id);
652 if (!ep_ring) {
653 xhci_warn(xhci, "WARN can't find new dequeue, invalid stream ID %u\n",
654 stream_id);
655 return -ENODEV;
656 }
657
658 hw_dequeue = xhci_get_hw_deq(xhci, dev, ep_index, stream_id);
659 new_seg = ep_ring->deq_seg;
660 new_deq = ep_ring->dequeue;
661 new_cycle = hw_dequeue & 0x1;
662
663 /*
664 * We want to find the pointer, segment and cycle state of the new trb
665 * (the one after current TD's end_trb). We know the cycle state at
666 * hw_dequeue, so walk the ring until both hw_dequeue and end_trb are
667 * found.
668 */
669 do {
670 if (!cycle_found && xhci_trb_virt_to_dma(new_seg, new_deq)
671 == (dma_addr_t)(hw_dequeue & ~0xf)) {
672 cycle_found = true;
673 if (td_last_trb_found)
674 break;
675 }
676 if (new_deq == td->end_trb)
677 td_last_trb_found = true;
678
679 if (cycle_found && trb_is_link(new_deq) &&
680 link_trb_toggles_cycle(new_deq))
681 new_cycle ^= 0x1;
682
683 next_trb(&new_seg, &new_deq);
684
685 /* Search wrapped around, bail out */
686 if (new_deq == ep->ring->dequeue) {
687 xhci_err(xhci, "Error: Failed finding new dequeue state\n");
688 return -EINVAL;
689 }
690
691 } while (!cycle_found || !td_last_trb_found);
692
693 /* Don't update the ring cycle state for the producer (us). */
694 addr = xhci_trb_virt_to_dma(new_seg, new_deq);
695 if (addr == 0) {
696 xhci_warn(xhci, "Can't find dma of new dequeue ptr\n");
697 xhci_warn(xhci, "deq seg = %p, deq ptr = %p\n", new_seg, new_deq);
698 return -EINVAL;
699 }
700
701 if ((ep->ep_state & SET_DEQ_PENDING)) {
702 xhci_warn(xhci, "Set TR Deq already pending, don't submit for 0x%pad\n",
703 &addr);
704 return -EBUSY;
705 }
706
707 /* This function gets called from contexts where it cannot sleep */
708 cmd = xhci_alloc_command(xhci, false, GFP_ATOMIC);
709 if (!cmd) {
710 xhci_warn(xhci, "Can't alloc Set TR Deq cmd 0x%pad\n", &addr);
711 return -ENOMEM;
712 }
713
714 if (stream_id)
715 trb_sct = SCT_FOR_TRB(SCT_PRI_TR);
716 ret = queue_command(xhci, cmd,
717 lower_32_bits(addr) | trb_sct | new_cycle,
718 upper_32_bits(addr),
719 STREAM_ID_FOR_TRB(stream_id), SLOT_ID_FOR_TRB(slot_id) |
720 EP_INDEX_FOR_TRB(ep_index) | TRB_TYPE(TRB_SET_DEQ), false);
721 if (ret < 0) {
722 xhci_free_command(xhci, cmd);
723 return ret;
724 }
725 ep->queued_deq_seg = new_seg;
726 ep->queued_deq_ptr = new_deq;
727
728 xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb,
729 "Set TR Deq ptr 0x%llx, cycle %u\n", addr, new_cycle);
730
731 /* Stop the TD queueing code from ringing the doorbell until
732 * this command completes. The HC won't set the dequeue pointer
733 * if the ring is running, and ringing the doorbell starts the
734 * ring running.
735 */
736 ep->ep_state |= SET_DEQ_PENDING;
737 xhci_ring_cmd_db(xhci);
738 return 0;
739}
740
741/* flip_cycle means flip the cycle bit of all but the first and last TRB.
742 * (The last TRB actually points to the ring enqueue pointer, which is not part
743 * of this TD.) This is used to remove partially enqueued isoc TDs from a ring.
744 */
745static void td_to_noop(struct xhci_td *td, bool flip_cycle)
746{
747 struct xhci_segment *seg = td->start_seg;
748 union xhci_trb *trb = td->start_trb;
749
750 while (1) {
751 trb_to_noop(trb, TRB_TR_NOOP);
752
753 /* flip cycle if asked to */
754 if (flip_cycle && trb != td->start_trb && trb != td->end_trb)
755 trb->generic.field[3] ^= cpu_to_le32(TRB_CYCLE);
756
757 if (trb == td->end_trb)
758 break;
759
760 next_trb(&seg, &trb);
761 }
762}
763
764static void xhci_giveback_urb_in_irq(struct xhci_hcd *xhci,
765 struct xhci_td *cur_td, int status)
766{
767 struct urb *urb = cur_td->urb;
768 struct urb_priv *urb_priv = urb->hcpriv;
769 struct usb_hcd *hcd = bus_to_hcd(urb->dev->bus);
770
771 if (usb_pipetype(urb->pipe) == PIPE_ISOCHRONOUS) {
772 xhci_to_hcd(xhci)->self.bandwidth_isoc_reqs--;
773 if (xhci_to_hcd(xhci)->self.bandwidth_isoc_reqs == 0) {
774 if (xhci->quirks & XHCI_AMD_PLL_FIX)
775 usb_amd_quirk_pll_enable();
776 }
777 }
778 xhci_urb_free_priv(urb_priv);
779 usb_hcd_unlink_urb_from_ep(hcd, urb);
780 trace_xhci_urb_giveback(urb);
781 usb_hcd_giveback_urb(hcd, urb, status);
782}
783
784static void xhci_unmap_td_bounce_buffer(struct xhci_hcd *xhci,
785 struct xhci_ring *ring, struct xhci_td *td)
786{
787 struct device *dev = xhci_to_hcd(xhci)->self.sysdev;
788 struct xhci_segment *seg = td->bounce_seg;
789 struct urb *urb = td->urb;
790 size_t len;
791
792 if (!ring || !seg || !urb)
793 return;
794
795 if (usb_urb_dir_out(urb)) {
796 dma_unmap_single(dev, seg->bounce_dma, ring->bounce_buf_len,
797 DMA_TO_DEVICE);
798 return;
799 }
800
801 dma_unmap_single(dev, seg->bounce_dma, ring->bounce_buf_len,
802 DMA_FROM_DEVICE);
803 /* for in transfers we need to copy the data from bounce to sg */
804 if (urb->num_sgs) {
805 len = sg_pcopy_from_buffer(urb->sg, urb->num_sgs, seg->bounce_buf,
806 seg->bounce_len, seg->bounce_offs);
807 if (len != seg->bounce_len)
808 xhci_warn(xhci, "WARN Wrong bounce buffer read length: %zu != %d\n",
809 len, seg->bounce_len);
810 } else {
811 memcpy(urb->transfer_buffer + seg->bounce_offs, seg->bounce_buf,
812 seg->bounce_len);
813 }
814 seg->bounce_len = 0;
815 seg->bounce_offs = 0;
816}
817
818static void xhci_td_cleanup(struct xhci_hcd *xhci, struct xhci_td *td,
819 struct xhci_ring *ep_ring, int status)
820{
821 struct urb *urb = NULL;
822
823 /* Clean up the endpoint's TD list */
824 urb = td->urb;
825
826 /* if a bounce buffer was used to align this td then unmap it */
827 xhci_unmap_td_bounce_buffer(xhci, ep_ring, td);
828
829 /* Do one last check of the actual transfer length.
830 * If the host controller said we transferred more data than the buffer
831 * length, urb->actual_length will be a very big number (since it's
832 * unsigned). Play it safe and say we didn't transfer anything.
833 */
834 if (urb->actual_length > urb->transfer_buffer_length) {
835 xhci_warn(xhci, "URB req %u and actual %u transfer length mismatch\n",
836 urb->transfer_buffer_length, urb->actual_length);
837 urb->actual_length = 0;
838 status = 0;
839 }
840 /* TD might be removed from td_list if we are giving back a cancelled URB */
841 if (!list_empty(&td->td_list))
842 list_del_init(&td->td_list);
843 /* Giving back a cancelled URB, or if a slated TD completed anyway */
844 if (!list_empty(&td->cancelled_td_list))
845 list_del_init(&td->cancelled_td_list);
846
847 inc_td_cnt(urb);
848 /* Giveback the urb when all the tds are completed */
849 if (last_td_in_urb(td)) {
850 if ((urb->actual_length != urb->transfer_buffer_length &&
851 (urb->transfer_flags & URB_SHORT_NOT_OK)) ||
852 (status != 0 && !usb_endpoint_xfer_isoc(&urb->ep->desc)))
853 xhci_dbg(xhci, "Giveback URB %p, len = %d, expected = %d, status = %d\n",
854 urb, urb->actual_length,
855 urb->transfer_buffer_length, status);
856
857 /* set isoc urb status to 0 just as EHCI, UHCI, and OHCI */
858 if (usb_pipetype(urb->pipe) == PIPE_ISOCHRONOUS)
859 status = 0;
860 xhci_giveback_urb_in_irq(xhci, td, status);
861 }
862}
863
864/* Give back previous TD and move on to the next TD. */
865static void xhci_dequeue_td(struct xhci_hcd *xhci, struct xhci_td *td, struct xhci_ring *ring,
866 u32 status)
867{
868 ring->dequeue = td->end_trb;
869 ring->deq_seg = td->end_seg;
870 inc_deq(xhci, ring);
871
872 xhci_td_cleanup(xhci, td, ring, status);
873}
874
875/* Complete the cancelled URBs we unlinked from td_list. */
876static void xhci_giveback_invalidated_tds(struct xhci_virt_ep *ep)
877{
878 struct xhci_ring *ring;
879 struct xhci_td *td, *tmp_td;
880
881 list_for_each_entry_safe(td, tmp_td, &ep->cancelled_td_list,
882 cancelled_td_list) {
883
884 ring = xhci_urb_to_transfer_ring(ep->xhci, td->urb);
885
886 if (td->cancel_status == TD_CLEARED) {
887 xhci_dbg(ep->xhci, "%s: Giveback cancelled URB %p TD\n",
888 __func__, td->urb);
889 xhci_td_cleanup(ep->xhci, td, ring, td->status);
890 } else {
891 xhci_dbg(ep->xhci, "%s: Keep cancelled URB %p TD as cancel_status is %d\n",
892 __func__, td->urb, td->cancel_status);
893 }
894 if (ep->xhci->xhc_state & XHCI_STATE_DYING)
895 return;
896 }
897}
898
899static int xhci_reset_halted_ep(struct xhci_hcd *xhci, unsigned int slot_id,
900 unsigned int ep_index, enum xhci_ep_reset_type reset_type)
901{
902 struct xhci_command *command;
903 int ret = 0;
904
905 command = xhci_alloc_command(xhci, false, GFP_ATOMIC);
906 if (!command) {
907 ret = -ENOMEM;
908 goto done;
909 }
910
911 xhci_dbg(xhci, "%s-reset ep %u, slot %u\n",
912 (reset_type == EP_HARD_RESET) ? "Hard" : "Soft",
913 ep_index, slot_id);
914
915 ret = xhci_queue_reset_ep(xhci, command, slot_id, ep_index, reset_type);
916done:
917 if (ret)
918 xhci_err(xhci, "ERROR queuing reset endpoint for slot %d ep_index %d, %d\n",
919 slot_id, ep_index, ret);
920 return ret;
921}
922
923static int xhci_handle_halted_endpoint(struct xhci_hcd *xhci,
924 struct xhci_virt_ep *ep,
925 struct xhci_td *td,
926 enum xhci_ep_reset_type reset_type)
927{
928 unsigned int slot_id = ep->vdev->slot_id;
929 int err;
930
931 /*
932 * Avoid resetting endpoint if link is inactive. Can cause host hang.
933 * Device will be reset soon to recover the link so don't do anything
934 */
935 if (ep->vdev->flags & VDEV_PORT_ERROR)
936 return -ENODEV;
937
938 /* add td to cancelled list and let reset ep handler take care of it */
939 if (reset_type == EP_HARD_RESET) {
940 ep->ep_state |= EP_HARD_CLEAR_TOGGLE;
941 if (td && list_empty(&td->cancelled_td_list)) {
942 list_add_tail(&td->cancelled_td_list, &ep->cancelled_td_list);
943 td->cancel_status = TD_HALTED;
944 }
945 }
946
947 if (ep->ep_state & EP_HALTED) {
948 xhci_dbg(xhci, "Reset ep command for ep_index %d already pending\n",
949 ep->ep_index);
950 return 0;
951 }
952
953 err = xhci_reset_halted_ep(xhci, slot_id, ep->ep_index, reset_type);
954 if (err)
955 return err;
956
957 ep->ep_state |= EP_HALTED;
958
959 xhci_ring_cmd_db(xhci);
960
961 return 0;
962}
963
964/*
965 * Fix up the ep ring first, so HW stops executing cancelled TDs.
966 * We have the xHCI lock, so nothing can modify this list until we drop it.
967 * We're also in the event handler, so we can't get re-interrupted if another
968 * Stop Endpoint command completes.
969 *
970 * only call this when ring is not in a running state
971 */
972
973static int xhci_invalidate_cancelled_tds(struct xhci_virt_ep *ep)
974{
975 struct xhci_hcd *xhci;
976 struct xhci_td *td = NULL;
977 struct xhci_td *tmp_td = NULL;
978 struct xhci_td *cached_td = NULL;
979 struct xhci_ring *ring;
980 u64 hw_deq;
981 unsigned int slot_id = ep->vdev->slot_id;
982 int err;
983
984 /*
985 * This is not going to work if the hardware is changing its dequeue
986 * pointers as we look at them. Completion handler will call us later.
987 */
988 if (ep->ep_state & SET_DEQ_PENDING)
989 return 0;
990
991 xhci = ep->xhci;
992
993 list_for_each_entry_safe(td, tmp_td, &ep->cancelled_td_list, cancelled_td_list) {
994 xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb,
995 "Removing canceled TD starting at 0x%llx (dma) in stream %u URB %p",
996 (unsigned long long)xhci_trb_virt_to_dma(
997 td->start_seg, td->start_trb),
998 td->urb->stream_id, td->urb);
999 list_del_init(&td->td_list);
1000 ring = xhci_urb_to_transfer_ring(xhci, td->urb);
1001 if (!ring) {
1002 xhci_warn(xhci, "WARN Cancelled URB %p has invalid stream ID %u.\n",
1003 td->urb, td->urb->stream_id);
1004 continue;
1005 }
1006 /*
1007 * If a ring stopped on the TD we need to cancel then we have to
1008 * move the xHC endpoint ring dequeue pointer past this TD.
1009 * Rings halted due to STALL may show hw_deq is past the stalled
1010 * TD, but still require a set TR Deq command to flush xHC cache.
1011 */
1012 hw_deq = xhci_get_hw_deq(xhci, ep->vdev, ep->ep_index,
1013 td->urb->stream_id);
1014 hw_deq &= ~0xf;
1015
1016 if (td->cancel_status == TD_HALTED || trb_in_td(xhci, td, hw_deq, false)) {
1017 switch (td->cancel_status) {
1018 case TD_CLEARED: /* TD is already no-op */
1019 case TD_CLEARING_CACHE: /* set TR deq command already queued */
1020 break;
1021 case TD_DIRTY: /* TD is cached, clear it */
1022 case TD_HALTED:
1023 case TD_CLEARING_CACHE_DEFERRED:
1024 if (cached_td) {
1025 if (cached_td->urb->stream_id != td->urb->stream_id) {
1026 /* Multiple streams case, defer move dq */
1027 xhci_dbg(xhci,
1028 "Move dq deferred: stream %u URB %p\n",
1029 td->urb->stream_id, td->urb);
1030 td->cancel_status = TD_CLEARING_CACHE_DEFERRED;
1031 break;
1032 }
1033
1034 /* Should never happen, but clear the TD if it does */
1035 xhci_warn(xhci,
1036 "Found multiple active URBs %p and %p in stream %u?\n",
1037 td->urb, cached_td->urb,
1038 td->urb->stream_id);
1039 td_to_noop(cached_td, false);
1040 cached_td->cancel_status = TD_CLEARED;
1041 }
1042 td_to_noop(td, false);
1043 td->cancel_status = TD_CLEARING_CACHE;
1044 cached_td = td;
1045 break;
1046 }
1047 } else {
1048 td_to_noop(td, false);
1049 td->cancel_status = TD_CLEARED;
1050 }
1051 }
1052
1053 /* If there's no need to move the dequeue pointer then we're done */
1054 if (!cached_td)
1055 return 0;
1056
1057 err = xhci_move_dequeue_past_td(xhci, slot_id, ep->ep_index,
1058 cached_td->urb->stream_id,
1059 cached_td);
1060 if (err) {
1061 /* Failed to move past cached td, just set cached TDs to no-op */
1062 list_for_each_entry_safe(td, tmp_td, &ep->cancelled_td_list, cancelled_td_list) {
1063 /*
1064 * Deferred TDs need to have the deq pointer set after the above command
1065 * completes, so if that failed we just give up on all of them (and
1066 * complain loudly since this could cause issues due to caching).
1067 */
1068 if (td->cancel_status != TD_CLEARING_CACHE &&
1069 td->cancel_status != TD_CLEARING_CACHE_DEFERRED)
1070 continue;
1071 xhci_warn(xhci, "Failed to clear cancelled cached URB %p, mark clear anyway\n",
1072 td->urb);
1073 td_to_noop(td, false);
1074 td->cancel_status = TD_CLEARED;
1075 }
1076 }
1077 return 0;
1078}
1079
1080/*
1081 * Erase queued TDs from transfer ring(s) and give back those the xHC didn't
1082 * stop on. If necessary, queue commands to move the xHC off cancelled TDs it
1083 * stopped on. Those will be given back later when the commands complete.
1084 *
1085 * Call under xhci->lock on a stopped endpoint.
1086 */
1087void xhci_process_cancelled_tds(struct xhci_virt_ep *ep)
1088{
1089 xhci_invalidate_cancelled_tds(ep);
1090 xhci_giveback_invalidated_tds(ep);
1091}
1092
1093/*
1094 * Returns the TD the endpoint ring halted on.
1095 * Only call for non-running rings without streams.
1096 */
1097static struct xhci_td *find_halted_td(struct xhci_virt_ep *ep)
1098{
1099 struct xhci_td *td;
1100 u64 hw_deq;
1101
1102 if (!list_empty(&ep->ring->td_list)) { /* Not streams compatible */
1103 hw_deq = xhci_get_hw_deq(ep->xhci, ep->vdev, ep->ep_index, 0);
1104 hw_deq &= ~0xf;
1105 td = list_first_entry(&ep->ring->td_list, struct xhci_td, td_list);
1106 if (trb_in_td(ep->xhci, td, hw_deq, false))
1107 return td;
1108 }
1109 return NULL;
1110}
1111
1112/*
1113 * When we get a command completion for a Stop Endpoint Command, we need to
1114 * unlink any cancelled TDs from the ring. There are two ways to do that:
1115 *
1116 * 1. If the HW was in the middle of processing the TD that needs to be
1117 * cancelled, then we must move the ring's dequeue pointer past the last TRB
1118 * in the TD with a Set Dequeue Pointer Command.
1119 * 2. Otherwise, we turn all the TRBs in the TD into No-op TRBs (with the chain
1120 * bit cleared) so that the HW will skip over them.
1121 */
1122static void xhci_handle_cmd_stop_ep(struct xhci_hcd *xhci, int slot_id,
1123 union xhci_trb *trb, u32 comp_code)
1124{
1125 unsigned int ep_index;
1126 struct xhci_virt_ep *ep;
1127 struct xhci_ep_ctx *ep_ctx;
1128 struct xhci_td *td = NULL;
1129 enum xhci_ep_reset_type reset_type;
1130 struct xhci_command *command;
1131 int err;
1132
1133 if (unlikely(TRB_TO_SUSPEND_PORT(le32_to_cpu(trb->generic.field[3])))) {
1134 if (!xhci->devs[slot_id])
1135 xhci_warn(xhci, "Stop endpoint command completion for disabled slot %u\n",
1136 slot_id);
1137 return;
1138 }
1139
1140 ep_index = TRB_TO_EP_INDEX(le32_to_cpu(trb->generic.field[3]));
1141 ep = xhci_get_virt_ep(xhci, slot_id, ep_index);
1142 if (!ep)
1143 return;
1144
1145 ep_ctx = xhci_get_ep_ctx(xhci, ep->vdev->out_ctx, ep_index);
1146
1147 trace_xhci_handle_cmd_stop_ep(ep_ctx);
1148
1149 if (comp_code == COMP_CONTEXT_STATE_ERROR) {
1150 /*
1151 * If stop endpoint command raced with a halting endpoint we need to
1152 * reset the host side endpoint first.
1153 * If the TD we halted on isn't cancelled the TD should be given back
1154 * with a proper error code, and the ring dequeue moved past the TD.
1155 * If streams case we can't find hw_deq, or the TD we halted on so do a
1156 * soft reset.
1157 *
1158 * Proper error code is unknown here, it would be -EPIPE if device side
1159 * of enadpoit halted (aka STALL), and -EPROTO if not (transaction error)
1160 * We use -EPROTO, if device is stalled it should return a stall error on
1161 * next transfer, which then will return -EPIPE, and device side stall is
1162 * noted and cleared by class driver.
1163 */
1164 switch (GET_EP_CTX_STATE(ep_ctx)) {
1165 case EP_STATE_HALTED:
1166 xhci_dbg(xhci, "Stop ep completion raced with stall, reset ep\n");
1167 if (ep->ep_state & EP_HAS_STREAMS) {
1168 reset_type = EP_SOFT_RESET;
1169 } else {
1170 reset_type = EP_HARD_RESET;
1171 td = find_halted_td(ep);
1172 if (td)
1173 td->status = -EPROTO;
1174 }
1175 /* reset ep, reset handler cleans up cancelled tds */
1176 err = xhci_handle_halted_endpoint(xhci, ep, td, reset_type);
1177 if (err)
1178 break;
1179 ep->ep_state &= ~EP_STOP_CMD_PENDING;
1180 return;
1181 case EP_STATE_STOPPED:
1182 /*
1183 * Per xHCI 4.6.9, Stop Endpoint command on a Stopped
1184 * EP is a Context State Error, and EP stays Stopped.
1185 *
1186 * But maybe it failed on Halted, and somebody ran Reset
1187 * Endpoint later. EP state is now Stopped and EP_HALTED
1188 * still set because Reset EP handler will run after us.
1189 */
1190 if (ep->ep_state & EP_HALTED)
1191 break;
1192 /*
1193 * On some HCs EP state remains Stopped for some tens of
1194 * us to a few ms or more after a doorbell ring, and any
1195 * new Stop Endpoint fails without aborting the restart.
1196 * This handler may run quickly enough to still see this
1197 * Stopped state, but it will soon change to Running.
1198 *
1199 * Assume this bug on unexpected Stop Endpoint failures.
1200 * Keep retrying until the EP starts and stops again, on
1201 * chips where this is known to help. Wait for 100ms.
1202 */
1203 if (time_is_before_jiffies(ep->stop_time + msecs_to_jiffies(100)))
1204 break;
1205 fallthrough;
1206 case EP_STATE_RUNNING:
1207 /* Race, HW handled stop ep cmd before ep was running */
1208 xhci_dbg(xhci, "Stop ep completion ctx error, ctx_state %d\n",
1209 GET_EP_CTX_STATE(ep_ctx));
1210
1211 command = xhci_alloc_command(xhci, false, GFP_ATOMIC);
1212 if (!command) {
1213 ep->ep_state &= ~EP_STOP_CMD_PENDING;
1214 return;
1215 }
1216 xhci_queue_stop_endpoint(xhci, command, slot_id, ep_index, 0);
1217 xhci_ring_cmd_db(xhci);
1218
1219 return;
1220 default:
1221 break;
1222 }
1223 }
1224
1225 /* will queue a set TR deq if stopped on a cancelled, uncleared TD */
1226 xhci_invalidate_cancelled_tds(ep);
1227 ep->ep_state &= ~EP_STOP_CMD_PENDING;
1228
1229 /* Otherwise ring the doorbell(s) to restart queued transfers */
1230 xhci_giveback_invalidated_tds(ep);
1231 ring_doorbell_for_active_rings(xhci, slot_id, ep_index);
1232}
1233
1234static void xhci_kill_ring_urbs(struct xhci_hcd *xhci, struct xhci_ring *ring)
1235{
1236 struct xhci_td *cur_td;
1237 struct xhci_td *tmp;
1238
1239 list_for_each_entry_safe(cur_td, tmp, &ring->td_list, td_list) {
1240 list_del_init(&cur_td->td_list);
1241
1242 if (!list_empty(&cur_td->cancelled_td_list))
1243 list_del_init(&cur_td->cancelled_td_list);
1244
1245 xhci_unmap_td_bounce_buffer(xhci, ring, cur_td);
1246
1247 inc_td_cnt(cur_td->urb);
1248 if (last_td_in_urb(cur_td))
1249 xhci_giveback_urb_in_irq(xhci, cur_td, -ESHUTDOWN);
1250 }
1251}
1252
1253static void xhci_kill_endpoint_urbs(struct xhci_hcd *xhci,
1254 int slot_id, int ep_index)
1255{
1256 struct xhci_td *cur_td;
1257 struct xhci_td *tmp;
1258 struct xhci_virt_ep *ep;
1259 struct xhci_ring *ring;
1260
1261 ep = xhci_get_virt_ep(xhci, slot_id, ep_index);
1262 if (!ep)
1263 return;
1264
1265 if ((ep->ep_state & EP_HAS_STREAMS) ||
1266 (ep->ep_state & EP_GETTING_NO_STREAMS)) {
1267 int stream_id;
1268
1269 for (stream_id = 1; stream_id < ep->stream_info->num_streams;
1270 stream_id++) {
1271 ring = ep->stream_info->stream_rings[stream_id];
1272 if (!ring)
1273 continue;
1274
1275 xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb,
1276 "Killing URBs for slot ID %u, ep index %u, stream %u",
1277 slot_id, ep_index, stream_id);
1278 xhci_kill_ring_urbs(xhci, ring);
1279 }
1280 } else {
1281 ring = ep->ring;
1282 if (!ring)
1283 return;
1284 xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb,
1285 "Killing URBs for slot ID %u, ep index %u",
1286 slot_id, ep_index);
1287 xhci_kill_ring_urbs(xhci, ring);
1288 }
1289
1290 list_for_each_entry_safe(cur_td, tmp, &ep->cancelled_td_list,
1291 cancelled_td_list) {
1292 list_del_init(&cur_td->cancelled_td_list);
1293 inc_td_cnt(cur_td->urb);
1294
1295 if (last_td_in_urb(cur_td))
1296 xhci_giveback_urb_in_irq(xhci, cur_td, -ESHUTDOWN);
1297 }
1298}
1299
1300/*
1301 * host controller died, register read returns 0xffffffff
1302 * Complete pending commands, mark them ABORTED.
1303 * URBs need to be given back as usb core might be waiting with device locks
1304 * held for the URBs to finish during device disconnect, blocking host remove.
1305 *
1306 * Call with xhci->lock held.
1307 * lock is relased and re-acquired while giving back urb.
1308 */
1309void xhci_hc_died(struct xhci_hcd *xhci)
1310{
1311 int i, j;
1312
1313 if (xhci->xhc_state & XHCI_STATE_DYING)
1314 return;
1315
1316 xhci_err(xhci, "xHCI host controller not responding, assume dead\n");
1317 xhci->xhc_state |= XHCI_STATE_DYING;
1318
1319 xhci_cleanup_command_queue(xhci);
1320
1321 /* return any pending urbs, remove may be waiting for them */
1322 for (i = 0; i <= HCS_MAX_SLOTS(xhci->hcs_params1); i++) {
1323 if (!xhci->devs[i])
1324 continue;
1325 for (j = 0; j < 31; j++)
1326 xhci_kill_endpoint_urbs(xhci, i, j);
1327 }
1328
1329 /* inform usb core hc died if PCI remove isn't already handling it */
1330 if (!(xhci->xhc_state & XHCI_STATE_REMOVING))
1331 usb_hc_died(xhci_to_hcd(xhci));
1332}
1333
1334static void update_ring_for_set_deq_completion(struct xhci_hcd *xhci,
1335 struct xhci_virt_device *dev,
1336 struct xhci_ring *ep_ring,
1337 unsigned int ep_index)
1338{
1339 union xhci_trb *dequeue_temp;
1340
1341 dequeue_temp = ep_ring->dequeue;
1342
1343 /* If we get two back-to-back stalls, and the first stalled transfer
1344 * ends just before a link TRB, the dequeue pointer will be left on
1345 * the link TRB by the code in the while loop. So we have to update
1346 * the dequeue pointer one segment further, or we'll jump off
1347 * the segment into la-la-land.
1348 */
1349 if (trb_is_link(ep_ring->dequeue)) {
1350 ep_ring->deq_seg = ep_ring->deq_seg->next;
1351 ep_ring->dequeue = ep_ring->deq_seg->trbs;
1352 }
1353
1354 while (ep_ring->dequeue != dev->eps[ep_index].queued_deq_ptr) {
1355 /* We have more usable TRBs */
1356 ep_ring->dequeue++;
1357 if (trb_is_link(ep_ring->dequeue)) {
1358 if (ep_ring->dequeue ==
1359 dev->eps[ep_index].queued_deq_ptr)
1360 break;
1361 ep_ring->deq_seg = ep_ring->deq_seg->next;
1362 ep_ring->dequeue = ep_ring->deq_seg->trbs;
1363 }
1364 if (ep_ring->dequeue == dequeue_temp) {
1365 xhci_dbg(xhci, "Unable to find new dequeue pointer\n");
1366 break;
1367 }
1368 }
1369}
1370
1371/*
1372 * When we get a completion for a Set Transfer Ring Dequeue Pointer command,
1373 * we need to clear the set deq pending flag in the endpoint ring state, so that
1374 * the TD queueing code can ring the doorbell again. We also need to ring the
1375 * endpoint doorbell to restart the ring, but only if there aren't more
1376 * cancellations pending.
1377 */
1378static void xhci_handle_cmd_set_deq(struct xhci_hcd *xhci, int slot_id,
1379 union xhci_trb *trb, u32 cmd_comp_code)
1380{
1381 unsigned int ep_index;
1382 unsigned int stream_id;
1383 struct xhci_ring *ep_ring;
1384 struct xhci_virt_ep *ep;
1385 struct xhci_ep_ctx *ep_ctx;
1386 struct xhci_slot_ctx *slot_ctx;
1387 struct xhci_stream_ctx *stream_ctx;
1388 struct xhci_td *td, *tmp_td;
1389
1390 ep_index = TRB_TO_EP_INDEX(le32_to_cpu(trb->generic.field[3]));
1391 stream_id = TRB_TO_STREAM_ID(le32_to_cpu(trb->generic.field[2]));
1392 ep = xhci_get_virt_ep(xhci, slot_id, ep_index);
1393 if (!ep)
1394 return;
1395
1396 ep_ring = xhci_virt_ep_to_ring(xhci, ep, stream_id);
1397 if (!ep_ring) {
1398 xhci_warn(xhci, "WARN Set TR deq ptr command for freed stream ID %u\n",
1399 stream_id);
1400 /* XXX: Harmless??? */
1401 goto cleanup;
1402 }
1403
1404 ep_ctx = xhci_get_ep_ctx(xhci, ep->vdev->out_ctx, ep_index);
1405 slot_ctx = xhci_get_slot_ctx(xhci, ep->vdev->out_ctx);
1406 trace_xhci_handle_cmd_set_deq(slot_ctx);
1407 trace_xhci_handle_cmd_set_deq_ep(ep_ctx);
1408
1409 if (ep->ep_state & EP_HAS_STREAMS) {
1410 stream_ctx = &ep->stream_info->stream_ctx_array[stream_id];
1411 trace_xhci_handle_cmd_set_deq_stream(ep->stream_info, stream_id);
1412 }
1413
1414 if (cmd_comp_code != COMP_SUCCESS) {
1415 unsigned int ep_state;
1416 unsigned int slot_state;
1417
1418 switch (cmd_comp_code) {
1419 case COMP_TRB_ERROR:
1420 xhci_warn(xhci, "WARN Set TR Deq Ptr cmd invalid because of stream ID configuration\n");
1421 break;
1422 case COMP_CONTEXT_STATE_ERROR:
1423 xhci_warn(xhci, "WARN Set TR Deq Ptr cmd failed due to incorrect slot or ep state.\n");
1424 ep_state = GET_EP_CTX_STATE(ep_ctx);
1425 slot_state = le32_to_cpu(slot_ctx->dev_state);
1426 slot_state = GET_SLOT_STATE(slot_state);
1427 xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb,
1428 "Slot state = %u, EP state = %u",
1429 slot_state, ep_state);
1430 break;
1431 case COMP_SLOT_NOT_ENABLED_ERROR:
1432 xhci_warn(xhci, "WARN Set TR Deq Ptr cmd failed because slot %u was not enabled.\n",
1433 slot_id);
1434 break;
1435 default:
1436 xhci_warn(xhci, "WARN Set TR Deq Ptr cmd with unknown completion code of %u.\n",
1437 cmd_comp_code);
1438 break;
1439 }
1440 /* OK what do we do now? The endpoint state is hosed, and we
1441 * should never get to this point if the synchronization between
1442 * queueing, and endpoint state are correct. This might happen
1443 * if the device gets disconnected after we've finished
1444 * cancelling URBs, which might not be an error...
1445 */
1446 } else {
1447 u64 deq;
1448 /* 4.6.10 deq ptr is written to the stream ctx for streams */
1449 if (ep->ep_state & EP_HAS_STREAMS) {
1450 deq = le64_to_cpu(stream_ctx->stream_ring) & SCTX_DEQ_MASK;
1451
1452 /*
1453 * Cadence xHCI controllers store some endpoint state
1454 * information within Rsvd0 fields of Stream Endpoint
1455 * context. This field is not cleared during Set TR
1456 * Dequeue Pointer command which causes XDMA to skip
1457 * over transfer ring and leads to data loss on stream
1458 * pipe.
1459 * To fix this issue driver must clear Rsvd0 field.
1460 */
1461 if (xhci->quirks & XHCI_CDNS_SCTX_QUIRK) {
1462 stream_ctx->reserved[0] = 0;
1463 stream_ctx->reserved[1] = 0;
1464 }
1465 } else {
1466 deq = le64_to_cpu(ep_ctx->deq) & ~EP_CTX_CYCLE_MASK;
1467 }
1468 xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb,
1469 "Successful Set TR Deq Ptr cmd, deq = @%08llx", deq);
1470 if (xhci_trb_virt_to_dma(ep->queued_deq_seg,
1471 ep->queued_deq_ptr) == deq) {
1472 /* Update the ring's dequeue segment and dequeue pointer
1473 * to reflect the new position.
1474 */
1475 update_ring_for_set_deq_completion(xhci, ep->vdev,
1476 ep_ring, ep_index);
1477 } else {
1478 xhci_warn(xhci, "Mismatch between completed Set TR Deq Ptr command & xHCI internal state.\n");
1479 xhci_warn(xhci, "ep deq seg = %p, deq ptr = %p\n",
1480 ep->queued_deq_seg, ep->queued_deq_ptr);
1481 }
1482 }
1483 /* HW cached TDs cleared from cache, give them back */
1484 list_for_each_entry_safe(td, tmp_td, &ep->cancelled_td_list,
1485 cancelled_td_list) {
1486 ep_ring = xhci_urb_to_transfer_ring(ep->xhci, td->urb);
1487 if (td->cancel_status == TD_CLEARING_CACHE) {
1488 td->cancel_status = TD_CLEARED;
1489 xhci_dbg(ep->xhci, "%s: Giveback cancelled URB %p TD\n",
1490 __func__, td->urb);
1491 xhci_td_cleanup(ep->xhci, td, ep_ring, td->status);
1492 } else {
1493 xhci_dbg(ep->xhci, "%s: Keep cancelled URB %p TD as cancel_status is %d\n",
1494 __func__, td->urb, td->cancel_status);
1495 }
1496 }
1497cleanup:
1498 ep->ep_state &= ~SET_DEQ_PENDING;
1499 ep->queued_deq_seg = NULL;
1500 ep->queued_deq_ptr = NULL;
1501
1502 /* Check for deferred or newly cancelled TDs */
1503 if (!list_empty(&ep->cancelled_td_list)) {
1504 xhci_dbg(ep->xhci, "%s: Pending TDs to clear, continuing with invalidation\n",
1505 __func__);
1506 xhci_invalidate_cancelled_tds(ep);
1507 /* Try to restart the endpoint if all is done */
1508 ring_doorbell_for_active_rings(xhci, slot_id, ep_index);
1509 /* Start giving back any TDs invalidated above */
1510 xhci_giveback_invalidated_tds(ep);
1511 } else {
1512 /* Restart any rings with pending URBs */
1513 xhci_dbg(ep->xhci, "%s: All TDs cleared, ring doorbell\n", __func__);
1514 ring_doorbell_for_active_rings(xhci, slot_id, ep_index);
1515 }
1516}
1517
1518static void xhci_handle_cmd_reset_ep(struct xhci_hcd *xhci, int slot_id,
1519 union xhci_trb *trb, u32 cmd_comp_code)
1520{
1521 struct xhci_virt_ep *ep;
1522 struct xhci_ep_ctx *ep_ctx;
1523 unsigned int ep_index;
1524
1525 ep_index = TRB_TO_EP_INDEX(le32_to_cpu(trb->generic.field[3]));
1526 ep = xhci_get_virt_ep(xhci, slot_id, ep_index);
1527 if (!ep)
1528 return;
1529
1530 ep_ctx = xhci_get_ep_ctx(xhci, ep->vdev->out_ctx, ep_index);
1531 trace_xhci_handle_cmd_reset_ep(ep_ctx);
1532
1533 /* This command will only fail if the endpoint wasn't halted,
1534 * but we don't care.
1535 */
1536 xhci_dbg_trace(xhci, trace_xhci_dbg_reset_ep,
1537 "Ignoring reset ep completion code of %u", cmd_comp_code);
1538
1539 /* Cleanup cancelled TDs as ep is stopped. May queue a Set TR Deq cmd */
1540 xhci_invalidate_cancelled_tds(ep);
1541
1542 /* Clear our internal halted state */
1543 ep->ep_state &= ~EP_HALTED;
1544
1545 xhci_giveback_invalidated_tds(ep);
1546
1547 /* if this was a soft reset, then restart */
1548 if ((le32_to_cpu(trb->generic.field[3])) & TRB_TSP)
1549 ring_doorbell_for_active_rings(xhci, slot_id, ep_index);
1550}
1551
1552static void xhci_handle_cmd_enable_slot(int slot_id, struct xhci_command *command,
1553 u32 cmd_comp_code)
1554{
1555 if (cmd_comp_code == COMP_SUCCESS)
1556 command->slot_id = slot_id;
1557 else
1558 command->slot_id = 0;
1559}
1560
1561static void xhci_handle_cmd_disable_slot(struct xhci_hcd *xhci, int slot_id)
1562{
1563 struct xhci_virt_device *virt_dev;
1564 struct xhci_slot_ctx *slot_ctx;
1565
1566 virt_dev = xhci->devs[slot_id];
1567 if (!virt_dev)
1568 return;
1569
1570 slot_ctx = xhci_get_slot_ctx(xhci, virt_dev->out_ctx);
1571 trace_xhci_handle_cmd_disable_slot(slot_ctx);
1572
1573 if (xhci->quirks & XHCI_EP_LIMIT_QUIRK)
1574 /* Delete default control endpoint resources */
1575 xhci_free_device_endpoint_resources(xhci, virt_dev, true);
1576}
1577
1578static void xhci_handle_cmd_config_ep(struct xhci_hcd *xhci, int slot_id)
1579{
1580 struct xhci_virt_device *virt_dev;
1581 struct xhci_input_control_ctx *ctrl_ctx;
1582 struct xhci_ep_ctx *ep_ctx;
1583 unsigned int ep_index;
1584 u32 add_flags;
1585
1586 /*
1587 * Configure endpoint commands can come from the USB core configuration
1588 * or alt setting changes, or when streams were being configured.
1589 */
1590
1591 virt_dev = xhci->devs[slot_id];
1592 if (!virt_dev)
1593 return;
1594 ctrl_ctx = xhci_get_input_control_ctx(virt_dev->in_ctx);
1595 if (!ctrl_ctx) {
1596 xhci_warn(xhci, "Could not get input context, bad type.\n");
1597 return;
1598 }
1599
1600 add_flags = le32_to_cpu(ctrl_ctx->add_flags);
1601
1602 /* Input ctx add_flags are the endpoint index plus one */
1603 ep_index = xhci_last_valid_endpoint(add_flags) - 1;
1604
1605 ep_ctx = xhci_get_ep_ctx(xhci, virt_dev->out_ctx, ep_index);
1606 trace_xhci_handle_cmd_config_ep(ep_ctx);
1607
1608 return;
1609}
1610
1611static void xhci_handle_cmd_addr_dev(struct xhci_hcd *xhci, int slot_id)
1612{
1613 struct xhci_virt_device *vdev;
1614 struct xhci_slot_ctx *slot_ctx;
1615
1616 vdev = xhci->devs[slot_id];
1617 if (!vdev)
1618 return;
1619 slot_ctx = xhci_get_slot_ctx(xhci, vdev->out_ctx);
1620 trace_xhci_handle_cmd_addr_dev(slot_ctx);
1621}
1622
1623static void xhci_handle_cmd_reset_dev(struct xhci_hcd *xhci, int slot_id)
1624{
1625 struct xhci_virt_device *vdev;
1626 struct xhci_slot_ctx *slot_ctx;
1627
1628 vdev = xhci->devs[slot_id];
1629 if (!vdev) {
1630 xhci_warn(xhci, "Reset device command completion for disabled slot %u\n",
1631 slot_id);
1632 return;
1633 }
1634 slot_ctx = xhci_get_slot_ctx(xhci, vdev->out_ctx);
1635 trace_xhci_handle_cmd_reset_dev(slot_ctx);
1636
1637 xhci_dbg(xhci, "Completed reset device command.\n");
1638}
1639
1640static void xhci_handle_cmd_nec_get_fw(struct xhci_hcd *xhci,
1641 struct xhci_event_cmd *event)
1642{
1643 if (!(xhci->quirks & XHCI_NEC_HOST)) {
1644 xhci_warn(xhci, "WARN NEC_GET_FW command on non-NEC host\n");
1645 return;
1646 }
1647 xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
1648 "NEC firmware version %2x.%02x",
1649 NEC_FW_MAJOR(le32_to_cpu(event->status)),
1650 NEC_FW_MINOR(le32_to_cpu(event->status)));
1651}
1652
1653static void xhci_complete_del_and_free_cmd(struct xhci_command *cmd, u32 status)
1654{
1655 list_del(&cmd->cmd_list);
1656
1657 if (cmd->completion) {
1658 cmd->status = status;
1659 complete(cmd->completion);
1660 } else {
1661 kfree(cmd);
1662 }
1663}
1664
1665void xhci_cleanup_command_queue(struct xhci_hcd *xhci)
1666{
1667 struct xhci_command *cur_cmd, *tmp_cmd;
1668 xhci->current_cmd = NULL;
1669 list_for_each_entry_safe(cur_cmd, tmp_cmd, &xhci->cmd_list, cmd_list)
1670 xhci_complete_del_and_free_cmd(cur_cmd, COMP_COMMAND_ABORTED);
1671}
1672
1673void xhci_handle_command_timeout(struct work_struct *work)
1674{
1675 struct xhci_hcd *xhci;
1676 unsigned long flags;
1677 char str[XHCI_MSG_MAX];
1678 u64 hw_ring_state;
1679 u32 cmd_field3;
1680 u32 usbsts;
1681
1682 xhci = container_of(to_delayed_work(work), struct xhci_hcd, cmd_timer);
1683
1684 spin_lock_irqsave(&xhci->lock, flags);
1685
1686 /*
1687 * If timeout work is pending, or current_cmd is NULL, it means we
1688 * raced with command completion. Command is handled so just return.
1689 */
1690 if (!xhci->current_cmd || delayed_work_pending(&xhci->cmd_timer)) {
1691 spin_unlock_irqrestore(&xhci->lock, flags);
1692 return;
1693 }
1694
1695 cmd_field3 = le32_to_cpu(xhci->current_cmd->command_trb->generic.field[3]);
1696 usbsts = readl(&xhci->op_regs->status);
1697 xhci_dbg(xhci, "Command timeout, USBSTS:%s\n", xhci_decode_usbsts(str, usbsts));
1698
1699 /* Bail out and tear down xhci if a stop endpoint command failed */
1700 if (TRB_FIELD_TO_TYPE(cmd_field3) == TRB_STOP_RING) {
1701 struct xhci_virt_ep *ep;
1702
1703 xhci_warn(xhci, "xHCI host not responding to stop endpoint command\n");
1704
1705 ep = xhci_get_virt_ep(xhci, TRB_TO_SLOT_ID(cmd_field3),
1706 TRB_TO_EP_INDEX(cmd_field3));
1707 if (ep)
1708 ep->ep_state &= ~EP_STOP_CMD_PENDING;
1709
1710 xhci_halt(xhci);
1711 xhci_hc_died(xhci);
1712 goto time_out_completed;
1713 }
1714
1715 /* mark this command to be cancelled */
1716 xhci->current_cmd->status = COMP_COMMAND_ABORTED;
1717
1718 /* Make sure command ring is running before aborting it */
1719 hw_ring_state = xhci_read_64(xhci, &xhci->op_regs->cmd_ring);
1720 if (hw_ring_state == ~(u64)0) {
1721 xhci_hc_died(xhci);
1722 goto time_out_completed;
1723 }
1724
1725 if ((xhci->cmd_ring_state & CMD_RING_STATE_RUNNING) &&
1726 (hw_ring_state & CMD_RING_RUNNING)) {
1727 /* Prevent new doorbell, and start command abort */
1728 xhci->cmd_ring_state = CMD_RING_STATE_ABORTED;
1729 xhci_dbg(xhci, "Command timeout\n");
1730 xhci_abort_cmd_ring(xhci, flags);
1731 goto time_out_completed;
1732 }
1733
1734 /* host removed. Bail out */
1735 if (xhci->xhc_state & XHCI_STATE_REMOVING) {
1736 xhci_dbg(xhci, "host removed, ring start fail?\n");
1737 xhci_cleanup_command_queue(xhci);
1738
1739 goto time_out_completed;
1740 }
1741
1742 /* command timeout on stopped ring, ring can't be aborted */
1743 xhci_dbg(xhci, "Command timeout on stopped ring\n");
1744 xhci_handle_stopped_cmd_ring(xhci, xhci->current_cmd);
1745
1746time_out_completed:
1747 spin_unlock_irqrestore(&xhci->lock, flags);
1748 return;
1749}
1750
1751static void handle_cmd_completion(struct xhci_hcd *xhci,
1752 struct xhci_event_cmd *event)
1753{
1754 unsigned int slot_id = TRB_TO_SLOT_ID(le32_to_cpu(event->flags));
1755 u64 cmd_dma;
1756 dma_addr_t cmd_dequeue_dma;
1757 u32 cmd_comp_code;
1758 union xhci_trb *cmd_trb;
1759 struct xhci_command *cmd;
1760 u32 cmd_type;
1761
1762 if (slot_id >= MAX_HC_SLOTS) {
1763 xhci_warn(xhci, "Invalid slot_id %u\n", slot_id);
1764 return;
1765 }
1766
1767 cmd_dma = le64_to_cpu(event->cmd_trb);
1768 cmd_trb = xhci->cmd_ring->dequeue;
1769
1770 trace_xhci_handle_command(xhci->cmd_ring, &cmd_trb->generic, cmd_dma);
1771
1772 cmd_comp_code = GET_COMP_CODE(le32_to_cpu(event->status));
1773
1774 /* If CMD ring stopped we own the trbs between enqueue and dequeue */
1775 if (cmd_comp_code == COMP_COMMAND_RING_STOPPED) {
1776 complete_all(&xhci->cmd_ring_stop_completion);
1777 return;
1778 }
1779
1780 cmd_dequeue_dma = xhci_trb_virt_to_dma(xhci->cmd_ring->deq_seg,
1781 cmd_trb);
1782 /*
1783 * Check whether the completion event is for our internal kept
1784 * command.
1785 */
1786 if (!cmd_dequeue_dma || cmd_dma != (u64)cmd_dequeue_dma) {
1787 xhci_warn(xhci,
1788 "ERROR mismatched command completion event\n");
1789 return;
1790 }
1791
1792 cmd = list_first_entry(&xhci->cmd_list, struct xhci_command, cmd_list);
1793
1794 cancel_delayed_work(&xhci->cmd_timer);
1795
1796 if (cmd->command_trb != xhci->cmd_ring->dequeue) {
1797 xhci_err(xhci,
1798 "Command completion event does not match command\n");
1799 return;
1800 }
1801
1802 /*
1803 * Host aborted the command ring, check if the current command was
1804 * supposed to be aborted, otherwise continue normally.
1805 * The command ring is stopped now, but the xHC will issue a Command
1806 * Ring Stopped event which will cause us to restart it.
1807 */
1808 if (cmd_comp_code == COMP_COMMAND_ABORTED) {
1809 xhci->cmd_ring_state = CMD_RING_STATE_STOPPED;
1810 if (cmd->status == COMP_COMMAND_ABORTED) {
1811 if (xhci->current_cmd == cmd)
1812 xhci->current_cmd = NULL;
1813 goto event_handled;
1814 }
1815 }
1816
1817 cmd_type = TRB_FIELD_TO_TYPE(le32_to_cpu(cmd_trb->generic.field[3]));
1818 switch (cmd_type) {
1819 case TRB_ENABLE_SLOT:
1820 xhci_handle_cmd_enable_slot(slot_id, cmd, cmd_comp_code);
1821 break;
1822 case TRB_DISABLE_SLOT:
1823 xhci_handle_cmd_disable_slot(xhci, slot_id);
1824 break;
1825 case TRB_CONFIG_EP:
1826 if (!cmd->completion)
1827 xhci_handle_cmd_config_ep(xhci, slot_id);
1828 break;
1829 case TRB_EVAL_CONTEXT:
1830 break;
1831 case TRB_ADDR_DEV:
1832 xhci_handle_cmd_addr_dev(xhci, slot_id);
1833 break;
1834 case TRB_STOP_RING:
1835 WARN_ON(slot_id != TRB_TO_SLOT_ID(
1836 le32_to_cpu(cmd_trb->generic.field[3])));
1837 if (!cmd->completion)
1838 xhci_handle_cmd_stop_ep(xhci, slot_id, cmd_trb,
1839 cmd_comp_code);
1840 break;
1841 case TRB_SET_DEQ:
1842 WARN_ON(slot_id != TRB_TO_SLOT_ID(
1843 le32_to_cpu(cmd_trb->generic.field[3])));
1844 xhci_handle_cmd_set_deq(xhci, slot_id, cmd_trb, cmd_comp_code);
1845 break;
1846 case TRB_CMD_NOOP:
1847 /* Is this an aborted command turned to NO-OP? */
1848 if (cmd->status == COMP_COMMAND_RING_STOPPED)
1849 cmd_comp_code = COMP_COMMAND_RING_STOPPED;
1850 break;
1851 case TRB_RESET_EP:
1852 WARN_ON(slot_id != TRB_TO_SLOT_ID(
1853 le32_to_cpu(cmd_trb->generic.field[3])));
1854 xhci_handle_cmd_reset_ep(xhci, slot_id, cmd_trb, cmd_comp_code);
1855 break;
1856 case TRB_RESET_DEV:
1857 /* SLOT_ID field in reset device cmd completion event TRB is 0.
1858 * Use the SLOT_ID from the command TRB instead (xhci 4.6.11)
1859 */
1860 slot_id = TRB_TO_SLOT_ID(
1861 le32_to_cpu(cmd_trb->generic.field[3]));
1862 xhci_handle_cmd_reset_dev(xhci, slot_id);
1863 break;
1864 case TRB_NEC_GET_FW:
1865 xhci_handle_cmd_nec_get_fw(xhci, event);
1866 break;
1867 default:
1868 /* Skip over unknown commands on the event ring */
1869 xhci_info(xhci, "INFO unknown command type %d\n", cmd_type);
1870 break;
1871 }
1872
1873 /* restart timer if this wasn't the last command */
1874 if (!list_is_singular(&xhci->cmd_list)) {
1875 xhci->current_cmd = list_first_entry(&cmd->cmd_list,
1876 struct xhci_command, cmd_list);
1877 xhci_mod_cmd_timer(xhci);
1878 } else if (xhci->current_cmd == cmd) {
1879 xhci->current_cmd = NULL;
1880 }
1881
1882event_handled:
1883 xhci_complete_del_and_free_cmd(cmd, cmd_comp_code);
1884
1885 inc_deq(xhci, xhci->cmd_ring);
1886}
1887
1888static void handle_vendor_event(struct xhci_hcd *xhci,
1889 union xhci_trb *event, u32 trb_type)
1890{
1891 xhci_dbg(xhci, "Vendor specific event TRB type = %u\n", trb_type);
1892 if (trb_type == TRB_NEC_CMD_COMP && (xhci->quirks & XHCI_NEC_HOST))
1893 handle_cmd_completion(xhci, &event->event_cmd);
1894}
1895
1896static void handle_device_notification(struct xhci_hcd *xhci,
1897 union xhci_trb *event)
1898{
1899 u32 slot_id;
1900 struct usb_device *udev;
1901
1902 slot_id = TRB_TO_SLOT_ID(le32_to_cpu(event->generic.field[3]));
1903 if (!xhci->devs[slot_id]) {
1904 xhci_warn(xhci, "Device Notification event for "
1905 "unused slot %u\n", slot_id);
1906 return;
1907 }
1908
1909 xhci_dbg(xhci, "Device Wake Notification event for slot ID %u\n",
1910 slot_id);
1911 udev = xhci->devs[slot_id]->udev;
1912 if (udev && udev->parent)
1913 usb_wakeup_notification(udev->parent, udev->portnum);
1914}
1915
1916/*
1917 * Quirk hanlder for errata seen on Cavium ThunderX2 processor XHCI
1918 * Controller.
1919 * As per ThunderX2errata-129 USB 2 device may come up as USB 1
1920 * If a connection to a USB 1 device is followed by another connection
1921 * to a USB 2 device.
1922 *
1923 * Reset the PHY after the USB device is disconnected if device speed
1924 * is less than HCD_USB3.
1925 * Retry the reset sequence max of 4 times checking the PLL lock status.
1926 *
1927 */
1928static void xhci_cavium_reset_phy_quirk(struct xhci_hcd *xhci)
1929{
1930 struct usb_hcd *hcd = xhci_to_hcd(xhci);
1931 u32 pll_lock_check;
1932 u32 retry_count = 4;
1933
1934 do {
1935 /* Assert PHY reset */
1936 writel(0x6F, hcd->regs + 0x1048);
1937 udelay(10);
1938 /* De-assert the PHY reset */
1939 writel(0x7F, hcd->regs + 0x1048);
1940 udelay(200);
1941 pll_lock_check = readl(hcd->regs + 0x1070);
1942 } while (!(pll_lock_check & 0x1) && --retry_count);
1943}
1944
1945static void handle_port_status(struct xhci_hcd *xhci, union xhci_trb *event)
1946{
1947 struct usb_hcd *hcd;
1948 u32 port_id;
1949 u32 portsc, cmd_reg;
1950 int max_ports;
1951 unsigned int hcd_portnum;
1952 struct xhci_bus_state *bus_state;
1953 bool bogus_port_status = false;
1954 struct xhci_port *port;
1955
1956 /* Port status change events always have a successful completion code */
1957 if (GET_COMP_CODE(le32_to_cpu(event->generic.field[2])) != COMP_SUCCESS)
1958 xhci_warn(xhci,
1959 "WARN: xHC returned failed port status event\n");
1960
1961 port_id = GET_PORT_ID(le32_to_cpu(event->generic.field[0]));
1962 max_ports = HCS_MAX_PORTS(xhci->hcs_params1);
1963
1964 if ((port_id <= 0) || (port_id > max_ports)) {
1965 xhci_warn(xhci, "Port change event with invalid port ID %d\n",
1966 port_id);
1967 return;
1968 }
1969
1970 port = &xhci->hw_ports[port_id - 1];
1971 if (!port || !port->rhub || port->hcd_portnum == DUPLICATE_ENTRY) {
1972 xhci_warn(xhci, "Port change event, no port for port ID %u\n",
1973 port_id);
1974 bogus_port_status = true;
1975 goto cleanup;
1976 }
1977
1978 /* We might get interrupts after shared_hcd is removed */
1979 if (port->rhub == &xhci->usb3_rhub && xhci->shared_hcd == NULL) {
1980 xhci_dbg(xhci, "ignore port event for removed USB3 hcd\n");
1981 bogus_port_status = true;
1982 goto cleanup;
1983 }
1984
1985 hcd = port->rhub->hcd;
1986 bus_state = &port->rhub->bus_state;
1987 hcd_portnum = port->hcd_portnum;
1988 portsc = readl(port->addr);
1989
1990 xhci_dbg(xhci, "Port change event, %d-%d, id %d, portsc: 0x%x\n",
1991 hcd->self.busnum, hcd_portnum + 1, port_id, portsc);
1992
1993 trace_xhci_handle_port_status(port, portsc);
1994
1995 if (hcd->state == HC_STATE_SUSPENDED) {
1996 xhci_dbg(xhci, "resume root hub\n");
1997 usb_hcd_resume_root_hub(hcd);
1998 }
1999
2000 if (hcd->speed >= HCD_USB3 &&
2001 (portsc & PORT_PLS_MASK) == XDEV_INACTIVE) {
2002 if (port->slot_id && xhci->devs[port->slot_id])
2003 xhci->devs[port->slot_id]->flags |= VDEV_PORT_ERROR;
2004 }
2005
2006 if ((portsc & PORT_PLC) && (portsc & PORT_PLS_MASK) == XDEV_RESUME) {
2007 xhci_dbg(xhci, "port resume event for port %d\n", port_id);
2008
2009 cmd_reg = readl(&xhci->op_regs->command);
2010 if (!(cmd_reg & CMD_RUN)) {
2011 xhci_warn(xhci, "xHC is not running.\n");
2012 goto cleanup;
2013 }
2014
2015 if (DEV_SUPERSPEED_ANY(portsc)) {
2016 xhci_dbg(xhci, "remote wake SS port %d\n", port_id);
2017 /* Set a flag to say the port signaled remote wakeup,
2018 * so we can tell the difference between the end of
2019 * device and host initiated resume.
2020 */
2021 bus_state->port_remote_wakeup |= 1 << hcd_portnum;
2022 xhci_test_and_clear_bit(xhci, port, PORT_PLC);
2023 usb_hcd_start_port_resume(&hcd->self, hcd_portnum);
2024 xhci_set_link_state(xhci, port, XDEV_U0);
2025 /* Need to wait until the next link state change
2026 * indicates the device is actually in U0.
2027 */
2028 bogus_port_status = true;
2029 goto cleanup;
2030 } else if (!test_bit(hcd_portnum, &bus_state->resuming_ports)) {
2031 xhci_dbg(xhci, "resume HS port %d\n", port_id);
2032 port->resume_timestamp = jiffies +
2033 msecs_to_jiffies(USB_RESUME_TIMEOUT);
2034 set_bit(hcd_portnum, &bus_state->resuming_ports);
2035 /* Do the rest in GetPortStatus after resume time delay.
2036 * Avoid polling roothub status before that so that a
2037 * usb device auto-resume latency around ~40ms.
2038 */
2039 set_bit(HCD_FLAG_POLL_RH, &hcd->flags);
2040 mod_timer(&hcd->rh_timer,
2041 port->resume_timestamp);
2042 usb_hcd_start_port_resume(&hcd->self, hcd_portnum);
2043 bogus_port_status = true;
2044 }
2045 }
2046
2047 if ((portsc & PORT_PLC) &&
2048 DEV_SUPERSPEED_ANY(portsc) &&
2049 ((portsc & PORT_PLS_MASK) == XDEV_U0 ||
2050 (portsc & PORT_PLS_MASK) == XDEV_U1 ||
2051 (portsc & PORT_PLS_MASK) == XDEV_U2)) {
2052 xhci_dbg(xhci, "resume SS port %d finished\n", port_id);
2053 complete(&port->u3exit_done);
2054 /* We've just brought the device into U0/1/2 through either the
2055 * Resume state after a device remote wakeup, or through the
2056 * U3Exit state after a host-initiated resume. If it's a device
2057 * initiated remote wake, don't pass up the link state change,
2058 * so the roothub behavior is consistent with external
2059 * USB 3.0 hub behavior.
2060 */
2061 if (port->slot_id && xhci->devs[port->slot_id])
2062 xhci_ring_device(xhci, port->slot_id);
2063 if (bus_state->port_remote_wakeup & (1 << hcd_portnum)) {
2064 xhci_test_and_clear_bit(xhci, port, PORT_PLC);
2065 usb_wakeup_notification(hcd->self.root_hub,
2066 hcd_portnum + 1);
2067 bogus_port_status = true;
2068 goto cleanup;
2069 }
2070 }
2071
2072 /*
2073 * Check to see if xhci-hub.c is waiting on RExit to U0 transition (or
2074 * RExit to a disconnect state). If so, let the driver know it's
2075 * out of the RExit state.
2076 */
2077 if (hcd->speed < HCD_USB3 && port->rexit_active) {
2078 complete(&port->rexit_done);
2079 port->rexit_active = false;
2080 bogus_port_status = true;
2081 goto cleanup;
2082 }
2083
2084 if (hcd->speed < HCD_USB3) {
2085 xhci_test_and_clear_bit(xhci, port, PORT_PLC);
2086 if ((xhci->quirks & XHCI_RESET_PLL_ON_DISCONNECT) &&
2087 (portsc & PORT_CSC) && !(portsc & PORT_CONNECT))
2088 xhci_cavium_reset_phy_quirk(xhci);
2089 }
2090
2091cleanup:
2092
2093 /* Don't make the USB core poll the roothub if we got a bad port status
2094 * change event. Besides, at that point we can't tell which roothub
2095 * (USB 2.0 or USB 3.0) to kick.
2096 */
2097 if (bogus_port_status)
2098 return;
2099
2100 /*
2101 * xHCI port-status-change events occur when the "or" of all the
2102 * status-change bits in the portsc register changes from 0 to 1.
2103 * New status changes won't cause an event if any other change
2104 * bits are still set. When an event occurs, switch over to
2105 * polling to avoid losing status changes.
2106 */
2107 xhci_dbg(xhci, "%s: starting usb%d port polling.\n",
2108 __func__, hcd->self.busnum);
2109 set_bit(HCD_FLAG_POLL_RH, &hcd->flags);
2110 spin_unlock(&xhci->lock);
2111 /* Pass this up to the core */
2112 usb_hcd_poll_rh_status(hcd);
2113 spin_lock(&xhci->lock);
2114}
2115
2116/*
2117 * If the suspect DMA address is a TRB in this TD, this function returns that
2118 * TRB's segment. Otherwise it returns 0.
2119 */
2120struct xhci_segment *trb_in_td(struct xhci_hcd *xhci, struct xhci_td *td, dma_addr_t suspect_dma,
2121 bool debug)
2122{
2123 dma_addr_t start_dma;
2124 dma_addr_t end_seg_dma;
2125 dma_addr_t end_trb_dma;
2126 struct xhci_segment *cur_seg;
2127
2128 start_dma = xhci_trb_virt_to_dma(td->start_seg, td->start_trb);
2129 cur_seg = td->start_seg;
2130
2131 do {
2132 if (start_dma == 0)
2133 return NULL;
2134 /* We may get an event for a Link TRB in the middle of a TD */
2135 end_seg_dma = xhci_trb_virt_to_dma(cur_seg,
2136 &cur_seg->trbs[TRBS_PER_SEGMENT - 1]);
2137 /* If the end TRB isn't in this segment, this is set to 0 */
2138 end_trb_dma = xhci_trb_virt_to_dma(cur_seg, td->end_trb);
2139
2140 if (debug)
2141 xhci_warn(xhci,
2142 "Looking for event-dma %016llx trb-start %016llx trb-end %016llx seg-start %016llx seg-end %016llx\n",
2143 (unsigned long long)suspect_dma,
2144 (unsigned long long)start_dma,
2145 (unsigned long long)end_trb_dma,
2146 (unsigned long long)cur_seg->dma,
2147 (unsigned long long)end_seg_dma);
2148
2149 if (end_trb_dma > 0) {
2150 /* The end TRB is in this segment, so suspect should be here */
2151 if (start_dma <= end_trb_dma) {
2152 if (suspect_dma >= start_dma && suspect_dma <= end_trb_dma)
2153 return cur_seg;
2154 } else {
2155 /* Case for one segment with
2156 * a TD wrapped around to the top
2157 */
2158 if ((suspect_dma >= start_dma &&
2159 suspect_dma <= end_seg_dma) ||
2160 (suspect_dma >= cur_seg->dma &&
2161 suspect_dma <= end_trb_dma))
2162 return cur_seg;
2163 }
2164 return NULL;
2165 } else {
2166 /* Might still be somewhere in this segment */
2167 if (suspect_dma >= start_dma && suspect_dma <= end_seg_dma)
2168 return cur_seg;
2169 }
2170 cur_seg = cur_seg->next;
2171 start_dma = xhci_trb_virt_to_dma(cur_seg, &cur_seg->trbs[0]);
2172 } while (cur_seg != td->start_seg);
2173
2174 return NULL;
2175}
2176
2177static void xhci_clear_hub_tt_buffer(struct xhci_hcd *xhci, struct xhci_td *td,
2178 struct xhci_virt_ep *ep)
2179{
2180 /*
2181 * As part of low/full-speed endpoint-halt processing
2182 * we must clear the TT buffer (USB 2.0 specification 11.17.5).
2183 */
2184 if (td->urb->dev->tt && !usb_pipeint(td->urb->pipe) &&
2185 (td->urb->dev->tt->hub != xhci_to_hcd(xhci)->self.root_hub) &&
2186 !(ep->ep_state & EP_CLEARING_TT)) {
2187 ep->ep_state |= EP_CLEARING_TT;
2188 td->urb->ep->hcpriv = td->urb->dev;
2189 if (usb_hub_clear_tt_buffer(td->urb))
2190 ep->ep_state &= ~EP_CLEARING_TT;
2191 }
2192}
2193
2194/*
2195 * Check if xhci internal endpoint state has gone to a "halt" state due to an
2196 * error or stall, including default control pipe protocol stall.
2197 * The internal halt needs to be cleared with a reset endpoint command.
2198 *
2199 * External device side is also halted in functional stall cases. Class driver
2200 * will clear the device halt with a CLEAR_FEATURE(ENDPOINT_HALT) request later.
2201 */
2202static bool xhci_halted_host_endpoint(struct xhci_ep_ctx *ep_ctx, unsigned int comp_code)
2203{
2204 /* Stall halts both internal and device side endpoint */
2205 if (comp_code == COMP_STALL_ERROR)
2206 return true;
2207
2208 /* TRB completion codes that may require internal halt cleanup */
2209 if (comp_code == COMP_USB_TRANSACTION_ERROR ||
2210 comp_code == COMP_BABBLE_DETECTED_ERROR ||
2211 comp_code == COMP_SPLIT_TRANSACTION_ERROR)
2212 /*
2213 * The 0.95 spec says a babbling control endpoint is not halted.
2214 * The 0.96 spec says it is. Some HW claims to be 0.95
2215 * compliant, but it halts the control endpoint anyway.
2216 * Check endpoint context if endpoint is halted.
2217 */
2218 if (GET_EP_CTX_STATE(ep_ctx) == EP_STATE_HALTED)
2219 return true;
2220
2221 return false;
2222}
2223
2224int xhci_is_vendor_info_code(struct xhci_hcd *xhci, unsigned int trb_comp_code)
2225{
2226 if (trb_comp_code >= 224 && trb_comp_code <= 255) {
2227 /* Vendor defined "informational" completion code,
2228 * treat as not-an-error.
2229 */
2230 xhci_dbg(xhci, "Vendor defined info completion code %u\n",
2231 trb_comp_code);
2232 xhci_dbg(xhci, "Treating code as success.\n");
2233 return 1;
2234 }
2235 return 0;
2236}
2237
2238static void finish_td(struct xhci_hcd *xhci, struct xhci_virt_ep *ep,
2239 struct xhci_ring *ep_ring, struct xhci_td *td,
2240 u32 trb_comp_code)
2241{
2242 struct xhci_ep_ctx *ep_ctx;
2243
2244 ep_ctx = xhci_get_ep_ctx(xhci, ep->vdev->out_ctx, ep->ep_index);
2245
2246 switch (trb_comp_code) {
2247 case COMP_STOPPED_LENGTH_INVALID:
2248 case COMP_STOPPED_SHORT_PACKET:
2249 case COMP_STOPPED:
2250 /*
2251 * The "Stop Endpoint" completion will take care of any
2252 * stopped TDs. A stopped TD may be restarted, so don't update
2253 * the ring dequeue pointer or take this TD off any lists yet.
2254 */
2255 return;
2256 case COMP_USB_TRANSACTION_ERROR:
2257 case COMP_BABBLE_DETECTED_ERROR:
2258 case COMP_SPLIT_TRANSACTION_ERROR:
2259 /*
2260 * If endpoint context state is not halted we might be
2261 * racing with a reset endpoint command issued by a unsuccessful
2262 * stop endpoint completion (context error). In that case the
2263 * td should be on the cancelled list, and EP_HALTED flag set.
2264 *
2265 * Or then it's not halted due to the 0.95 spec stating that a
2266 * babbling control endpoint should not halt. The 0.96 spec
2267 * again says it should. Some HW claims to be 0.95 compliant,
2268 * but it halts the control endpoint anyway.
2269 */
2270 if (GET_EP_CTX_STATE(ep_ctx) != EP_STATE_HALTED) {
2271 /*
2272 * If EP_HALTED is set and TD is on the cancelled list
2273 * the TD and dequeue pointer will be handled by reset
2274 * ep command completion
2275 */
2276 if ((ep->ep_state & EP_HALTED) &&
2277 !list_empty(&td->cancelled_td_list)) {
2278 xhci_dbg(xhci, "Already resolving halted ep for 0x%llx\n",
2279 (unsigned long long)xhci_trb_virt_to_dma(
2280 td->start_seg, td->start_trb));
2281 return;
2282 }
2283 /* endpoint not halted, don't reset it */
2284 break;
2285 }
2286 /* Almost same procedure as for STALL_ERROR below */
2287 xhci_clear_hub_tt_buffer(xhci, td, ep);
2288 xhci_handle_halted_endpoint(xhci, ep, td, EP_HARD_RESET);
2289 return;
2290 case COMP_STALL_ERROR:
2291 /*
2292 * xhci internal endpoint state will go to a "halt" state for
2293 * any stall, including default control pipe protocol stall.
2294 * To clear the host side halt we need to issue a reset endpoint
2295 * command, followed by a set dequeue command to move past the
2296 * TD.
2297 * Class drivers clear the device side halt from a functional
2298 * stall later. Hub TT buffer should only be cleared for FS/LS
2299 * devices behind HS hubs for functional stalls.
2300 */
2301 if (ep->ep_index != 0)
2302 xhci_clear_hub_tt_buffer(xhci, td, ep);
2303
2304 xhci_handle_halted_endpoint(xhci, ep, td, EP_HARD_RESET);
2305
2306 return; /* xhci_handle_halted_endpoint marked td cancelled */
2307 default:
2308 break;
2309 }
2310
2311 xhci_dequeue_td(xhci, td, ep_ring, td->status);
2312}
2313
2314/* sum trb lengths from the first trb up to stop_trb, _excluding_ stop_trb */
2315static u32 sum_trb_lengths(struct xhci_td *td, union xhci_trb *stop_trb)
2316{
2317 u32 sum;
2318 union xhci_trb *trb = td->start_trb;
2319 struct xhci_segment *seg = td->start_seg;
2320
2321 for (sum = 0; trb != stop_trb; next_trb(&seg, &trb)) {
2322 if (!trb_is_noop(trb) && !trb_is_link(trb))
2323 sum += TRB_LEN(le32_to_cpu(trb->generic.field[2]));
2324 }
2325 return sum;
2326}
2327
2328/*
2329 * Process control tds, update urb status and actual_length.
2330 */
2331static void process_ctrl_td(struct xhci_hcd *xhci, struct xhci_virt_ep *ep,
2332 struct xhci_ring *ep_ring, struct xhci_td *td,
2333 union xhci_trb *ep_trb, struct xhci_transfer_event *event)
2334{
2335 struct xhci_ep_ctx *ep_ctx;
2336 u32 trb_comp_code;
2337 u32 remaining, requested;
2338 u32 trb_type;
2339
2340 trb_type = TRB_FIELD_TO_TYPE(le32_to_cpu(ep_trb->generic.field[3]));
2341 ep_ctx = xhci_get_ep_ctx(xhci, ep->vdev->out_ctx, ep->ep_index);
2342 trb_comp_code = GET_COMP_CODE(le32_to_cpu(event->transfer_len));
2343 requested = td->urb->transfer_buffer_length;
2344 remaining = EVENT_TRB_LEN(le32_to_cpu(event->transfer_len));
2345
2346 switch (trb_comp_code) {
2347 case COMP_SUCCESS:
2348 if (trb_type != TRB_STATUS) {
2349 xhci_warn(xhci, "WARN: Success on ctrl %s TRB without IOC set?\n",
2350 (trb_type == TRB_DATA) ? "data" : "setup");
2351 td->status = -ESHUTDOWN;
2352 break;
2353 }
2354 td->status = 0;
2355 break;
2356 case COMP_SHORT_PACKET:
2357 td->status = 0;
2358 break;
2359 case COMP_STOPPED_SHORT_PACKET:
2360 if (trb_type == TRB_DATA || trb_type == TRB_NORMAL)
2361 td->urb->actual_length = remaining;
2362 else
2363 xhci_warn(xhci, "WARN: Stopped Short Packet on ctrl setup or status TRB\n");
2364 goto finish_td;
2365 case COMP_STOPPED:
2366 switch (trb_type) {
2367 case TRB_SETUP:
2368 td->urb->actual_length = 0;
2369 goto finish_td;
2370 case TRB_DATA:
2371 case TRB_NORMAL:
2372 td->urb->actual_length = requested - remaining;
2373 goto finish_td;
2374 case TRB_STATUS:
2375 td->urb->actual_length = requested;
2376 goto finish_td;
2377 default:
2378 xhci_warn(xhci, "WARN: unexpected TRB Type %d\n",
2379 trb_type);
2380 goto finish_td;
2381 }
2382 case COMP_STOPPED_LENGTH_INVALID:
2383 goto finish_td;
2384 default:
2385 if (!xhci_halted_host_endpoint(ep_ctx, trb_comp_code))
2386 break;
2387 xhci_dbg(xhci, "TRB error %u, halted endpoint index = %u\n",
2388 trb_comp_code, ep->ep_index);
2389 fallthrough;
2390 case COMP_STALL_ERROR:
2391 /* Did we transfer part of the data (middle) phase? */
2392 if (trb_type == TRB_DATA || trb_type == TRB_NORMAL)
2393 td->urb->actual_length = requested - remaining;
2394 else if (!td->urb_length_set)
2395 td->urb->actual_length = 0;
2396 goto finish_td;
2397 }
2398
2399 /* stopped at setup stage, no data transferred */
2400 if (trb_type == TRB_SETUP)
2401 goto finish_td;
2402
2403 /*
2404 * if on data stage then update the actual_length of the URB and flag it
2405 * as set, so it won't be overwritten in the event for the last TRB.
2406 */
2407 if (trb_type == TRB_DATA ||
2408 trb_type == TRB_NORMAL) {
2409 td->urb_length_set = true;
2410 td->urb->actual_length = requested - remaining;
2411 xhci_dbg(xhci, "Waiting for status stage event\n");
2412 return;
2413 }
2414
2415 /* at status stage */
2416 if (!td->urb_length_set)
2417 td->urb->actual_length = requested;
2418
2419finish_td:
2420 finish_td(xhci, ep, ep_ring, td, trb_comp_code);
2421}
2422
2423/*
2424 * Process isochronous tds, update urb packet status and actual_length.
2425 */
2426static void process_isoc_td(struct xhci_hcd *xhci, struct xhci_virt_ep *ep,
2427 struct xhci_ring *ep_ring, struct xhci_td *td,
2428 union xhci_trb *ep_trb, struct xhci_transfer_event *event)
2429{
2430 struct urb_priv *urb_priv;
2431 int idx;
2432 struct usb_iso_packet_descriptor *frame;
2433 u32 trb_comp_code;
2434 bool sum_trbs_for_length = false;
2435 u32 remaining, requested, ep_trb_len;
2436 int short_framestatus;
2437
2438 trb_comp_code = GET_COMP_CODE(le32_to_cpu(event->transfer_len));
2439 urb_priv = td->urb->hcpriv;
2440 idx = urb_priv->num_tds_done;
2441 frame = &td->urb->iso_frame_desc[idx];
2442 requested = frame->length;
2443 remaining = EVENT_TRB_LEN(le32_to_cpu(event->transfer_len));
2444 ep_trb_len = TRB_LEN(le32_to_cpu(ep_trb->generic.field[2]));
2445 short_framestatus = td->urb->transfer_flags & URB_SHORT_NOT_OK ?
2446 -EREMOTEIO : 0;
2447
2448 /* handle completion code */
2449 switch (trb_comp_code) {
2450 case COMP_SUCCESS:
2451 /* Don't overwrite status if TD had an error, see xHCI 4.9.1 */
2452 if (td->error_mid_td)
2453 break;
2454 if (remaining) {
2455 frame->status = short_framestatus;
2456 sum_trbs_for_length = true;
2457 break;
2458 }
2459 frame->status = 0;
2460 break;
2461 case COMP_SHORT_PACKET:
2462 frame->status = short_framestatus;
2463 sum_trbs_for_length = true;
2464 break;
2465 case COMP_BANDWIDTH_OVERRUN_ERROR:
2466 frame->status = -ECOMM;
2467 break;
2468 case COMP_BABBLE_DETECTED_ERROR:
2469 sum_trbs_for_length = true;
2470 fallthrough;
2471 case COMP_ISOCH_BUFFER_OVERRUN:
2472 frame->status = -EOVERFLOW;
2473 if (ep_trb != td->end_trb)
2474 td->error_mid_td = true;
2475 break;
2476 case COMP_INCOMPATIBLE_DEVICE_ERROR:
2477 case COMP_STALL_ERROR:
2478 frame->status = -EPROTO;
2479 break;
2480 case COMP_USB_TRANSACTION_ERROR:
2481 frame->status = -EPROTO;
2482 sum_trbs_for_length = true;
2483 if (ep_trb != td->end_trb)
2484 td->error_mid_td = true;
2485 break;
2486 case COMP_STOPPED:
2487 sum_trbs_for_length = true;
2488 break;
2489 case COMP_STOPPED_SHORT_PACKET:
2490 /* field normally containing residue now contains transferred */
2491 frame->status = short_framestatus;
2492 requested = remaining;
2493 break;
2494 case COMP_STOPPED_LENGTH_INVALID:
2495 /* exclude stopped trb with invalid length from length sum */
2496 sum_trbs_for_length = true;
2497 ep_trb_len = 0;
2498 remaining = 0;
2499 break;
2500 default:
2501 sum_trbs_for_length = true;
2502 frame->status = -1;
2503 break;
2504 }
2505
2506 if (td->urb_length_set)
2507 goto finish_td;
2508
2509 if (sum_trbs_for_length)
2510 frame->actual_length = sum_trb_lengths(td, ep_trb) +
2511 ep_trb_len - remaining;
2512 else
2513 frame->actual_length = requested;
2514
2515 td->urb->actual_length += frame->actual_length;
2516
2517finish_td:
2518 /* Don't give back TD yet if we encountered an error mid TD */
2519 if (td->error_mid_td && ep_trb != td->end_trb) {
2520 xhci_dbg(xhci, "Error mid isoc TD, wait for final completion event\n");
2521 td->urb_length_set = true;
2522 return;
2523 }
2524 finish_td(xhci, ep, ep_ring, td, trb_comp_code);
2525}
2526
2527static void skip_isoc_td(struct xhci_hcd *xhci, struct xhci_td *td,
2528 struct xhci_virt_ep *ep, int status)
2529{
2530 struct urb_priv *urb_priv;
2531 struct usb_iso_packet_descriptor *frame;
2532 int idx;
2533
2534 urb_priv = td->urb->hcpriv;
2535 idx = urb_priv->num_tds_done;
2536 frame = &td->urb->iso_frame_desc[idx];
2537
2538 /* The transfer is partly done. */
2539 frame->status = -EXDEV;
2540
2541 /* calc actual length */
2542 frame->actual_length = 0;
2543
2544 xhci_dequeue_td(xhci, td, ep->ring, status);
2545}
2546
2547/*
2548 * Process bulk and interrupt tds, update urb status and actual_length.
2549 */
2550static void process_bulk_intr_td(struct xhci_hcd *xhci, struct xhci_virt_ep *ep,
2551 struct xhci_ring *ep_ring, struct xhci_td *td,
2552 union xhci_trb *ep_trb, struct xhci_transfer_event *event)
2553{
2554 struct xhci_slot_ctx *slot_ctx;
2555 u32 trb_comp_code;
2556 u32 remaining, requested, ep_trb_len;
2557
2558 slot_ctx = xhci_get_slot_ctx(xhci, ep->vdev->out_ctx);
2559 trb_comp_code = GET_COMP_CODE(le32_to_cpu(event->transfer_len));
2560 remaining = EVENT_TRB_LEN(le32_to_cpu(event->transfer_len));
2561 ep_trb_len = TRB_LEN(le32_to_cpu(ep_trb->generic.field[2]));
2562 requested = td->urb->transfer_buffer_length;
2563
2564 switch (trb_comp_code) {
2565 case COMP_SUCCESS:
2566 ep->err_count = 0;
2567 /* handle success with untransferred data as short packet */
2568 if (ep_trb != td->end_trb || remaining) {
2569 xhci_warn(xhci, "WARN Successful completion on short TX\n");
2570 xhci_dbg(xhci, "ep %#x - asked for %d bytes, %d bytes untransferred\n",
2571 td->urb->ep->desc.bEndpointAddress,
2572 requested, remaining);
2573 }
2574 td->status = 0;
2575 break;
2576 case COMP_SHORT_PACKET:
2577 td->status = 0;
2578 break;
2579 case COMP_STOPPED_SHORT_PACKET:
2580 td->urb->actual_length = remaining;
2581 goto finish_td;
2582 case COMP_STOPPED_LENGTH_INVALID:
2583 /* stopped on ep trb with invalid length, exclude it */
2584 td->urb->actual_length = sum_trb_lengths(td, ep_trb);
2585 goto finish_td;
2586 case COMP_USB_TRANSACTION_ERROR:
2587 if (xhci->quirks & XHCI_NO_SOFT_RETRY ||
2588 (ep->err_count++ > MAX_SOFT_RETRY) ||
2589 le32_to_cpu(slot_ctx->tt_info) & TT_SLOT)
2590 break;
2591
2592 td->status = 0;
2593
2594 xhci_handle_halted_endpoint(xhci, ep, td, EP_SOFT_RESET);
2595 return;
2596 default:
2597 /* do nothing */
2598 break;
2599 }
2600
2601 if (ep_trb == td->end_trb)
2602 td->urb->actual_length = requested - remaining;
2603 else
2604 td->urb->actual_length =
2605 sum_trb_lengths(td, ep_trb) +
2606 ep_trb_len - remaining;
2607finish_td:
2608 if (remaining > requested) {
2609 xhci_warn(xhci, "bad transfer trb length %d in event trb\n",
2610 remaining);
2611 td->urb->actual_length = 0;
2612 }
2613
2614 finish_td(xhci, ep, ep_ring, td, trb_comp_code);
2615}
2616
2617/* Transfer events which don't point to a transfer TRB, see xhci 4.17.4 */
2618static int handle_transferless_tx_event(struct xhci_hcd *xhci, struct xhci_virt_ep *ep,
2619 u32 trb_comp_code)
2620{
2621 switch (trb_comp_code) {
2622 case COMP_STALL_ERROR:
2623 case COMP_USB_TRANSACTION_ERROR:
2624 case COMP_INVALID_STREAM_TYPE_ERROR:
2625 case COMP_INVALID_STREAM_ID_ERROR:
2626 xhci_dbg(xhci, "Stream transaction error ep %u no id\n", ep->ep_index);
2627 if (ep->err_count++ > MAX_SOFT_RETRY)
2628 xhci_handle_halted_endpoint(xhci, ep, NULL, EP_HARD_RESET);
2629 else
2630 xhci_handle_halted_endpoint(xhci, ep, NULL, EP_SOFT_RESET);
2631 break;
2632 case COMP_RING_UNDERRUN:
2633 case COMP_RING_OVERRUN:
2634 case COMP_STOPPED_LENGTH_INVALID:
2635 break;
2636 default:
2637 xhci_err(xhci, "Transfer event %u for unknown stream ring slot %u ep %u\n",
2638 trb_comp_code, ep->vdev->slot_id, ep->ep_index);
2639 return -ENODEV;
2640 }
2641 return 0;
2642}
2643
2644/*
2645 * If this function returns an error condition, it means it got a Transfer
2646 * event with a corrupted Slot ID, Endpoint ID, or TRB DMA address.
2647 * At this point, the host controller is probably hosed and should be reset.
2648 */
2649static int handle_tx_event(struct xhci_hcd *xhci,
2650 struct xhci_interrupter *ir,
2651 struct xhci_transfer_event *event)
2652{
2653 struct xhci_virt_ep *ep;
2654 struct xhci_ring *ep_ring;
2655 unsigned int slot_id;
2656 int ep_index;
2657 struct xhci_td *td = NULL;
2658 dma_addr_t ep_trb_dma;
2659 struct xhci_segment *ep_seg;
2660 union xhci_trb *ep_trb;
2661 int status = -EINPROGRESS;
2662 struct xhci_ep_ctx *ep_ctx;
2663 u32 trb_comp_code;
2664
2665 slot_id = TRB_TO_SLOT_ID(le32_to_cpu(event->flags));
2666 ep_index = TRB_TO_EP_ID(le32_to_cpu(event->flags)) - 1;
2667 trb_comp_code = GET_COMP_CODE(le32_to_cpu(event->transfer_len));
2668 ep_trb_dma = le64_to_cpu(event->buffer);
2669
2670 ep = xhci_get_virt_ep(xhci, slot_id, ep_index);
2671 if (!ep) {
2672 xhci_err(xhci, "ERROR Invalid Transfer event\n");
2673 goto err_out;
2674 }
2675
2676 ep_ring = xhci_dma_to_transfer_ring(ep, ep_trb_dma);
2677 ep_ctx = xhci_get_ep_ctx(xhci, ep->vdev->out_ctx, ep_index);
2678
2679 if (GET_EP_CTX_STATE(ep_ctx) == EP_STATE_DISABLED) {
2680 xhci_err(xhci,
2681 "ERROR Transfer event for disabled endpoint slot %u ep %u\n",
2682 slot_id, ep_index);
2683 goto err_out;
2684 }
2685
2686 if (!ep_ring)
2687 return handle_transferless_tx_event(xhci, ep, trb_comp_code);
2688
2689 /* Look for common error cases */
2690 switch (trb_comp_code) {
2691 /* Skip codes that require special handling depending on
2692 * transfer type
2693 */
2694 case COMP_SUCCESS:
2695 if (EVENT_TRB_LEN(le32_to_cpu(event->transfer_len)) != 0) {
2696 trb_comp_code = COMP_SHORT_PACKET;
2697 xhci_dbg(xhci, "Successful completion on short TX for slot %u ep %u with last td short %d\n",
2698 slot_id, ep_index, ep_ring->last_td_was_short);
2699 }
2700 break;
2701 case COMP_SHORT_PACKET:
2702 break;
2703 /* Completion codes for endpoint stopped state */
2704 case COMP_STOPPED:
2705 xhci_dbg(xhci, "Stopped on Transfer TRB for slot %u ep %u\n",
2706 slot_id, ep_index);
2707 break;
2708 case COMP_STOPPED_LENGTH_INVALID:
2709 xhci_dbg(xhci,
2710 "Stopped on No-op or Link TRB for slot %u ep %u\n",
2711 slot_id, ep_index);
2712 break;
2713 case COMP_STOPPED_SHORT_PACKET:
2714 xhci_dbg(xhci,
2715 "Stopped with short packet transfer detected for slot %u ep %u\n",
2716 slot_id, ep_index);
2717 break;
2718 /* Completion codes for endpoint halted state */
2719 case COMP_STALL_ERROR:
2720 xhci_dbg(xhci, "Stalled endpoint for slot %u ep %u\n", slot_id,
2721 ep_index);
2722 status = -EPIPE;
2723 break;
2724 case COMP_SPLIT_TRANSACTION_ERROR:
2725 xhci_dbg(xhci, "Split transaction error for slot %u ep %u\n",
2726 slot_id, ep_index);
2727 status = -EPROTO;
2728 break;
2729 case COMP_USB_TRANSACTION_ERROR:
2730 xhci_dbg(xhci, "Transfer error for slot %u ep %u on endpoint\n",
2731 slot_id, ep_index);
2732 status = -EPROTO;
2733 break;
2734 case COMP_BABBLE_DETECTED_ERROR:
2735 xhci_dbg(xhci, "Babble error for slot %u ep %u on endpoint\n",
2736 slot_id, ep_index);
2737 status = -EOVERFLOW;
2738 break;
2739 /* Completion codes for endpoint error state */
2740 case COMP_TRB_ERROR:
2741 xhci_warn(xhci,
2742 "WARN: TRB error for slot %u ep %u on endpoint\n",
2743 slot_id, ep_index);
2744 status = -EILSEQ;
2745 break;
2746 /* completion codes not indicating endpoint state change */
2747 case COMP_DATA_BUFFER_ERROR:
2748 xhci_warn(xhci,
2749 "WARN: HC couldn't access mem fast enough for slot %u ep %u\n",
2750 slot_id, ep_index);
2751 status = -ENOSR;
2752 break;
2753 case COMP_BANDWIDTH_OVERRUN_ERROR:
2754 xhci_warn(xhci,
2755 "WARN: bandwidth overrun event for slot %u ep %u on endpoint\n",
2756 slot_id, ep_index);
2757 break;
2758 case COMP_ISOCH_BUFFER_OVERRUN:
2759 xhci_warn(xhci,
2760 "WARN: buffer overrun event for slot %u ep %u on endpoint",
2761 slot_id, ep_index);
2762 break;
2763 case COMP_RING_UNDERRUN:
2764 /*
2765 * When the Isoch ring is empty, the xHC will generate
2766 * a Ring Overrun Event for IN Isoch endpoint or Ring
2767 * Underrun Event for OUT Isoch endpoint.
2768 */
2769 xhci_dbg(xhci, "Underrun event on slot %u ep %u\n", slot_id, ep_index);
2770 if (ep->skip)
2771 break;
2772 return 0;
2773 case COMP_RING_OVERRUN:
2774 xhci_dbg(xhci, "Overrun event on slot %u ep %u\n", slot_id, ep_index);
2775 if (ep->skip)
2776 break;
2777 return 0;
2778 case COMP_MISSED_SERVICE_ERROR:
2779 /*
2780 * When encounter missed service error, one or more isoc tds
2781 * may be missed by xHC.
2782 * Set skip flag of the ep_ring; Complete the missed tds as
2783 * short transfer when process the ep_ring next time.
2784 */
2785 ep->skip = true;
2786 xhci_dbg(xhci,
2787 "Miss service interval error for slot %u ep %u, set skip flag\n",
2788 slot_id, ep_index);
2789 return 0;
2790 case COMP_NO_PING_RESPONSE_ERROR:
2791 ep->skip = true;
2792 xhci_dbg(xhci,
2793 "No Ping response error for slot %u ep %u, Skip one Isoc TD\n",
2794 slot_id, ep_index);
2795 return 0;
2796
2797 case COMP_INCOMPATIBLE_DEVICE_ERROR:
2798 /* needs disable slot command to recover */
2799 xhci_warn(xhci,
2800 "WARN: detect an incompatible device for slot %u ep %u",
2801 slot_id, ep_index);
2802 status = -EPROTO;
2803 break;
2804 default:
2805 if (xhci_is_vendor_info_code(xhci, trb_comp_code)) {
2806 status = 0;
2807 break;
2808 }
2809 xhci_warn(xhci,
2810 "ERROR Unknown event condition %u for slot %u ep %u , HC probably busted\n",
2811 trb_comp_code, slot_id, ep_index);
2812 if (ep->skip)
2813 break;
2814 return 0;
2815 }
2816
2817 /*
2818 * xhci 4.10.2 states isoc endpoints should continue
2819 * processing the next TD if there was an error mid TD.
2820 * So host like NEC don't generate an event for the last
2821 * isoc TRB even if the IOC flag is set.
2822 * xhci 4.9.1 states that if there are errors in mult-TRB
2823 * TDs xHC should generate an error for that TRB, and if xHC
2824 * proceeds to the next TD it should genete an event for
2825 * any TRB with IOC flag on the way. Other host follow this.
2826 *
2827 * We wait for the final IOC event, but if we get an event
2828 * anywhere outside this TD, just give it back already.
2829 */
2830 td = list_first_entry_or_null(&ep_ring->td_list, struct xhci_td, td_list);
2831
2832 if (td && td->error_mid_td && !trb_in_td(xhci, td, ep_trb_dma, false)) {
2833 xhci_dbg(xhci, "Missing TD completion event after mid TD error\n");
2834 xhci_dequeue_td(xhci, td, ep_ring, td->status);
2835 }
2836
2837 if (list_empty(&ep_ring->td_list)) {
2838 /*
2839 * Don't print wanings if ring is empty due to a stopped endpoint generating an
2840 * extra completion event if the device was suspended. Or, a event for the last TRB
2841 * of a short TD we already got a short event for. The short TD is already removed
2842 * from the TD list.
2843 */
2844 if (trb_comp_code != COMP_STOPPED &&
2845 trb_comp_code != COMP_STOPPED_LENGTH_INVALID &&
2846 !ep_ring->last_td_was_short) {
2847 xhci_warn(xhci, "Event TRB for slot %u ep %u with no TDs queued\n",
2848 slot_id, ep_index);
2849 }
2850
2851 ep->skip = false;
2852 goto check_endpoint_halted;
2853 }
2854
2855 do {
2856 td = list_first_entry(&ep_ring->td_list, struct xhci_td,
2857 td_list);
2858
2859 /* Is this a TRB in the currently executing TD? */
2860 ep_seg = trb_in_td(xhci, td, ep_trb_dma, false);
2861
2862 if (!ep_seg) {
2863
2864 if (ep->skip && usb_endpoint_xfer_isoc(&td->urb->ep->desc)) {
2865 skip_isoc_td(xhci, td, ep, status);
2866 if (!list_empty(&ep_ring->td_list))
2867 continue;
2868
2869 xhci_dbg(xhci, "All TDs skipped for slot %u ep %u. Clear skip flag.\n",
2870 slot_id, ep_index);
2871 ep->skip = false;
2872 td = NULL;
2873 goto check_endpoint_halted;
2874 }
2875
2876 /*
2877 * Skip the Force Stopped Event. The 'ep_trb' of FSE is not in the current
2878 * TD pointed by 'ep_ring->dequeue' because that the hardware dequeue
2879 * pointer still at the previous TRB of the current TD. The previous TRB
2880 * maybe a Link TD or the last TRB of the previous TD. The command
2881 * completion handle will take care the rest.
2882 */
2883 if (trb_comp_code == COMP_STOPPED ||
2884 trb_comp_code == COMP_STOPPED_LENGTH_INVALID) {
2885 return 0;
2886 }
2887
2888 /*
2889 * Some hosts give a spurious success event after a short
2890 * transfer. Ignore it.
2891 */
2892 if ((xhci->quirks & XHCI_SPURIOUS_SUCCESS) &&
2893 ep_ring->last_td_was_short) {
2894 ep_ring->last_td_was_short = false;
2895 return 0;
2896 }
2897
2898 /* HC is busted, give up! */
2899 xhci_err(xhci,
2900 "ERROR Transfer event TRB DMA ptr not part of current TD ep_index %d comp_code %u\n",
2901 ep_index, trb_comp_code);
2902 trb_in_td(xhci, td, ep_trb_dma, true);
2903
2904 return -ESHUTDOWN;
2905 }
2906
2907 if (ep->skip) {
2908 xhci_dbg(xhci,
2909 "Found td. Clear skip flag for slot %u ep %u.\n",
2910 slot_id, ep_index);
2911 ep->skip = false;
2912 }
2913
2914 /*
2915 * If ep->skip is set, it means there are missed tds on the
2916 * endpoint ring need to take care of.
2917 * Process them as short transfer until reach the td pointed by
2918 * the event.
2919 */
2920 } while (ep->skip);
2921
2922 if (trb_comp_code == COMP_SHORT_PACKET)
2923 ep_ring->last_td_was_short = true;
2924 else
2925 ep_ring->last_td_was_short = false;
2926
2927 ep_trb = &ep_seg->trbs[(ep_trb_dma - ep_seg->dma) / sizeof(*ep_trb)];
2928 trace_xhci_handle_transfer(ep_ring, (struct xhci_generic_trb *) ep_trb, ep_trb_dma);
2929
2930 /*
2931 * No-op TRB could trigger interrupts in a case where a URB was killed
2932 * and a STALL_ERROR happens right after the endpoint ring stopped.
2933 * Reset the halted endpoint. Otherwise, the endpoint remains stalled
2934 * indefinitely.
2935 */
2936
2937 if (trb_is_noop(ep_trb))
2938 goto check_endpoint_halted;
2939
2940 td->status = status;
2941
2942 /* update the urb's actual_length and give back to the core */
2943 if (usb_endpoint_xfer_control(&td->urb->ep->desc))
2944 process_ctrl_td(xhci, ep, ep_ring, td, ep_trb, event);
2945 else if (usb_endpoint_xfer_isoc(&td->urb->ep->desc))
2946 process_isoc_td(xhci, ep, ep_ring, td, ep_trb, event);
2947 else
2948 process_bulk_intr_td(xhci, ep, ep_ring, td, ep_trb, event);
2949 return 0;
2950
2951check_endpoint_halted:
2952 if (xhci_halted_host_endpoint(ep_ctx, trb_comp_code))
2953 xhci_handle_halted_endpoint(xhci, ep, td, EP_HARD_RESET);
2954
2955 return 0;
2956
2957err_out:
2958 xhci_err(xhci, "@%016llx %08x %08x %08x %08x\n",
2959 (unsigned long long) xhci_trb_virt_to_dma(
2960 ir->event_ring->deq_seg,
2961 ir->event_ring->dequeue),
2962 lower_32_bits(le64_to_cpu(event->buffer)),
2963 upper_32_bits(le64_to_cpu(event->buffer)),
2964 le32_to_cpu(event->transfer_len),
2965 le32_to_cpu(event->flags));
2966 return -ENODEV;
2967}
2968
2969/*
2970 * This function handles one OS-owned event on the event ring. It may drop
2971 * xhci->lock between event processing (e.g. to pass up port status changes).
2972 */
2973static int xhci_handle_event_trb(struct xhci_hcd *xhci, struct xhci_interrupter *ir,
2974 union xhci_trb *event)
2975{
2976 u32 trb_type;
2977
2978 trace_xhci_handle_event(ir->event_ring, &event->generic,
2979 xhci_trb_virt_to_dma(ir->event_ring->deq_seg,
2980 ir->event_ring->dequeue));
2981
2982 /*
2983 * Barrier between reading the TRB_CYCLE (valid) flag before, and any
2984 * speculative reads of the event's flags/data below.
2985 */
2986 rmb();
2987 trb_type = TRB_FIELD_TO_TYPE(le32_to_cpu(event->event_cmd.flags));
2988 /* FIXME: Handle more event types. */
2989
2990 switch (trb_type) {
2991 case TRB_COMPLETION:
2992 handle_cmd_completion(xhci, &event->event_cmd);
2993 break;
2994 case TRB_PORT_STATUS:
2995 handle_port_status(xhci, event);
2996 break;
2997 case TRB_TRANSFER:
2998 handle_tx_event(xhci, ir, &event->trans_event);
2999 break;
3000 case TRB_DEV_NOTE:
3001 handle_device_notification(xhci, event);
3002 break;
3003 default:
3004 if (trb_type >= TRB_VENDOR_DEFINED_LOW)
3005 handle_vendor_event(xhci, event, trb_type);
3006 else
3007 xhci_warn(xhci, "ERROR unknown event type %d\n", trb_type);
3008 }
3009 /* Any of the above functions may drop and re-acquire the lock, so check
3010 * to make sure a watchdog timer didn't mark the host as non-responsive.
3011 */
3012 if (xhci->xhc_state & XHCI_STATE_DYING) {
3013 xhci_dbg(xhci, "xHCI host dying, returning from event handler.\n");
3014 return -ENODEV;
3015 }
3016
3017 return 0;
3018}
3019
3020/*
3021 * Update Event Ring Dequeue Pointer:
3022 * - When all events have finished
3023 * - To avoid "Event Ring Full Error" condition
3024 */
3025static void xhci_update_erst_dequeue(struct xhci_hcd *xhci,
3026 struct xhci_interrupter *ir,
3027 bool clear_ehb)
3028{
3029 u64 temp_64;
3030 dma_addr_t deq;
3031
3032 temp_64 = xhci_read_64(xhci, &ir->ir_set->erst_dequeue);
3033 deq = xhci_trb_virt_to_dma(ir->event_ring->deq_seg,
3034 ir->event_ring->dequeue);
3035 if (deq == 0)
3036 xhci_warn(xhci, "WARN something wrong with SW event ring dequeue ptr\n");
3037 /*
3038 * Per 4.9.4, Software writes to the ERDP register shall always advance
3039 * the Event Ring Dequeue Pointer value.
3040 */
3041 if ((temp_64 & ERST_PTR_MASK) == (deq & ERST_PTR_MASK) && !clear_ehb)
3042 return;
3043
3044 /* Update HC event ring dequeue pointer */
3045 temp_64 = ir->event_ring->deq_seg->num & ERST_DESI_MASK;
3046 temp_64 |= deq & ERST_PTR_MASK;
3047
3048 /* Clear the event handler busy flag (RW1C) */
3049 if (clear_ehb)
3050 temp_64 |= ERST_EHB;
3051 xhci_write_64(xhci, temp_64, &ir->ir_set->erst_dequeue);
3052}
3053
3054/* Clear the interrupt pending bit for a specific interrupter. */
3055static void xhci_clear_interrupt_pending(struct xhci_interrupter *ir)
3056{
3057 if (!ir->ip_autoclear) {
3058 u32 irq_pending;
3059
3060 irq_pending = readl(&ir->ir_set->irq_pending);
3061 irq_pending |= IMAN_IP;
3062 writel(irq_pending, &ir->ir_set->irq_pending);
3063 }
3064}
3065
3066/*
3067 * Handle all OS-owned events on an interrupter event ring. It may drop
3068 * and reaquire xhci->lock between event processing.
3069 */
3070static int xhci_handle_events(struct xhci_hcd *xhci, struct xhci_interrupter *ir)
3071{
3072 int event_loop = 0;
3073 int err;
3074 u64 temp;
3075
3076 xhci_clear_interrupt_pending(ir);
3077
3078 /* Event ring hasn't been allocated yet. */
3079 if (!ir->event_ring || !ir->event_ring->dequeue) {
3080 xhci_err(xhci, "ERROR interrupter event ring not ready\n");
3081 return -ENOMEM;
3082 }
3083
3084 if (xhci->xhc_state & XHCI_STATE_DYING ||
3085 xhci->xhc_state & XHCI_STATE_HALTED) {
3086 xhci_dbg(xhci, "xHCI dying, ignoring interrupt. Shouldn't IRQs be disabled?\n");
3087
3088 /* Clear the event handler busy flag (RW1C) */
3089 temp = xhci_read_64(xhci, &ir->ir_set->erst_dequeue);
3090 xhci_write_64(xhci, temp | ERST_EHB, &ir->ir_set->erst_dequeue);
3091 return -ENODEV;
3092 }
3093
3094 /* Process all OS owned event TRBs on this event ring */
3095 while (unhandled_event_trb(ir->event_ring)) {
3096 err = xhci_handle_event_trb(xhci, ir, ir->event_ring->dequeue);
3097
3098 /*
3099 * If half a segment of events have been handled in one go then
3100 * update ERDP, and force isoc trbs to interrupt more often
3101 */
3102 if (event_loop++ > TRBS_PER_SEGMENT / 2) {
3103 xhci_update_erst_dequeue(xhci, ir, false);
3104
3105 if (ir->isoc_bei_interval > AVOID_BEI_INTERVAL_MIN)
3106 ir->isoc_bei_interval = ir->isoc_bei_interval / 2;
3107
3108 event_loop = 0;
3109 }
3110
3111 /* Update SW event ring dequeue pointer */
3112 inc_deq(xhci, ir->event_ring);
3113
3114 if (err)
3115 break;
3116 }
3117
3118 xhci_update_erst_dequeue(xhci, ir, true);
3119
3120 return 0;
3121}
3122
3123/*
3124 * xHCI spec says we can get an interrupt, and if the HC has an error condition,
3125 * we might get bad data out of the event ring. Section 4.10.2.7 has a list of
3126 * indicators of an event TRB error, but we check the status *first* to be safe.
3127 */
3128irqreturn_t xhci_irq(struct usb_hcd *hcd)
3129{
3130 struct xhci_hcd *xhci = hcd_to_xhci(hcd);
3131 irqreturn_t ret = IRQ_HANDLED;
3132 u32 status;
3133
3134 spin_lock(&xhci->lock);
3135 /* Check if the xHC generated the interrupt, or the irq is shared */
3136 status = readl(&xhci->op_regs->status);
3137 if (status == ~(u32)0) {
3138 xhci_hc_died(xhci);
3139 goto out;
3140 }
3141
3142 if (!(status & STS_EINT)) {
3143 ret = IRQ_NONE;
3144 goto out;
3145 }
3146
3147 if (status & STS_HCE) {
3148 xhci_warn(xhci, "WARNING: Host Controller Error\n");
3149 goto out;
3150 }
3151
3152 if (status & STS_FATAL) {
3153 xhci_warn(xhci, "WARNING: Host System Error\n");
3154 xhci_halt(xhci);
3155 goto out;
3156 }
3157
3158 /*
3159 * Clear the op reg interrupt status first,
3160 * so we can receive interrupts from other MSI-X interrupters.
3161 * Write 1 to clear the interrupt status.
3162 */
3163 status |= STS_EINT;
3164 writel(status, &xhci->op_regs->status);
3165
3166 /* This is the handler of the primary interrupter */
3167 xhci_handle_events(xhci, xhci->interrupters[0]);
3168out:
3169 spin_unlock(&xhci->lock);
3170
3171 return ret;
3172}
3173
3174irqreturn_t xhci_msi_irq(int irq, void *hcd)
3175{
3176 return xhci_irq(hcd);
3177}
3178EXPORT_SYMBOL_GPL(xhci_msi_irq);
3179
3180/**** Endpoint Ring Operations ****/
3181
3182/*
3183 * Generic function for queueing a TRB on a ring.
3184 * The caller must have checked to make sure there's room on the ring.
3185 *
3186 * @more_trbs_coming: Will you enqueue more TRBs before calling
3187 * prepare_transfer()?
3188 */
3189static void queue_trb(struct xhci_hcd *xhci, struct xhci_ring *ring,
3190 bool more_trbs_coming,
3191 u32 field1, u32 field2, u32 field3, u32 field4)
3192{
3193 struct xhci_generic_trb *trb;
3194
3195 trb = &ring->enqueue->generic;
3196 trb->field[0] = cpu_to_le32(field1);
3197 trb->field[1] = cpu_to_le32(field2);
3198 trb->field[2] = cpu_to_le32(field3);
3199 /* make sure TRB is fully written before giving it to the controller */
3200 wmb();
3201 trb->field[3] = cpu_to_le32(field4);
3202
3203 trace_xhci_queue_trb(ring, trb,
3204 xhci_trb_virt_to_dma(ring->enq_seg, ring->enqueue));
3205
3206 inc_enq(xhci, ring, more_trbs_coming);
3207}
3208
3209/*
3210 * Does various checks on the endpoint ring, and makes it ready to queue num_trbs.
3211 * expand ring if it start to be full.
3212 */
3213static int prepare_ring(struct xhci_hcd *xhci, struct xhci_ring *ep_ring,
3214 u32 ep_state, unsigned int num_trbs, gfp_t mem_flags)
3215{
3216 unsigned int link_trb_count = 0;
3217 unsigned int new_segs = 0;
3218
3219 /* Make sure the endpoint has been added to xHC schedule */
3220 switch (ep_state) {
3221 case EP_STATE_DISABLED:
3222 /*
3223 * USB core changed config/interfaces without notifying us,
3224 * or hardware is reporting the wrong state.
3225 */
3226 xhci_warn(xhci, "WARN urb submitted to disabled ep\n");
3227 return -ENOENT;
3228 case EP_STATE_ERROR:
3229 xhci_warn(xhci, "WARN waiting for error on ep to be cleared\n");
3230 /* FIXME event handling code for error needs to clear it */
3231 /* XXX not sure if this should be -ENOENT or not */
3232 return -EINVAL;
3233 case EP_STATE_HALTED:
3234 xhci_dbg(xhci, "WARN halted endpoint, queueing URB anyway.\n");
3235 break;
3236 case EP_STATE_STOPPED:
3237 case EP_STATE_RUNNING:
3238 break;
3239 default:
3240 xhci_err(xhci, "ERROR unknown endpoint state for ep\n");
3241 /*
3242 * FIXME issue Configure Endpoint command to try to get the HC
3243 * back into a known state.
3244 */
3245 return -EINVAL;
3246 }
3247
3248 if (ep_ring != xhci->cmd_ring) {
3249 new_segs = xhci_ring_expansion_needed(xhci, ep_ring, num_trbs);
3250 } else if (xhci_num_trbs_free(ep_ring) <= num_trbs) {
3251 xhci_err(xhci, "Do not support expand command ring\n");
3252 return -ENOMEM;
3253 }
3254
3255 if (new_segs) {
3256 xhci_dbg_trace(xhci, trace_xhci_dbg_ring_expansion,
3257 "ERROR no room on ep ring, try ring expansion");
3258 if (xhci_ring_expansion(xhci, ep_ring, new_segs, mem_flags)) {
3259 xhci_err(xhci, "Ring expansion failed\n");
3260 return -ENOMEM;
3261 }
3262 }
3263
3264 while (trb_is_link(ep_ring->enqueue)) {
3265 /* If we're not dealing with 0.95 hardware or isoc rings
3266 * on AMD 0.96 host, clear the chain bit.
3267 */
3268 if (!xhci_link_chain_quirk(xhci, ep_ring->type))
3269 ep_ring->enqueue->link.control &=
3270 cpu_to_le32(~TRB_CHAIN);
3271 else
3272 ep_ring->enqueue->link.control |=
3273 cpu_to_le32(TRB_CHAIN);
3274
3275 wmb();
3276 ep_ring->enqueue->link.control ^= cpu_to_le32(TRB_CYCLE);
3277
3278 /* Toggle the cycle bit after the last ring segment. */
3279 if (link_trb_toggles_cycle(ep_ring->enqueue))
3280 ep_ring->cycle_state ^= 1;
3281
3282 ep_ring->enq_seg = ep_ring->enq_seg->next;
3283 ep_ring->enqueue = ep_ring->enq_seg->trbs;
3284
3285 /* prevent infinite loop if all first trbs are link trbs */
3286 if (link_trb_count++ > ep_ring->num_segs) {
3287 xhci_warn(xhci, "Ring is an endless link TRB loop\n");
3288 return -EINVAL;
3289 }
3290 }
3291
3292 if (last_trb_on_seg(ep_ring->enq_seg, ep_ring->enqueue)) {
3293 xhci_warn(xhci, "Missing link TRB at end of ring segment\n");
3294 return -EINVAL;
3295 }
3296
3297 return 0;
3298}
3299
3300static int prepare_transfer(struct xhci_hcd *xhci,
3301 struct xhci_virt_device *xdev,
3302 unsigned int ep_index,
3303 unsigned int stream_id,
3304 unsigned int num_trbs,
3305 struct urb *urb,
3306 unsigned int td_index,
3307 gfp_t mem_flags)
3308{
3309 int ret;
3310 struct urb_priv *urb_priv;
3311 struct xhci_td *td;
3312 struct xhci_ring *ep_ring;
3313 struct xhci_ep_ctx *ep_ctx = xhci_get_ep_ctx(xhci, xdev->out_ctx, ep_index);
3314
3315 ep_ring = xhci_triad_to_transfer_ring(xhci, xdev->slot_id, ep_index,
3316 stream_id);
3317 if (!ep_ring) {
3318 xhci_dbg(xhci, "Can't prepare ring for bad stream ID %u\n",
3319 stream_id);
3320 return -EINVAL;
3321 }
3322
3323 ret = prepare_ring(xhci, ep_ring, GET_EP_CTX_STATE(ep_ctx),
3324 num_trbs, mem_flags);
3325 if (ret)
3326 return ret;
3327
3328 urb_priv = urb->hcpriv;
3329 td = &urb_priv->td[td_index];
3330
3331 INIT_LIST_HEAD(&td->td_list);
3332 INIT_LIST_HEAD(&td->cancelled_td_list);
3333
3334 if (td_index == 0) {
3335 ret = usb_hcd_link_urb_to_ep(bus_to_hcd(urb->dev->bus), urb);
3336 if (unlikely(ret))
3337 return ret;
3338 }
3339
3340 td->urb = urb;
3341 /* Add this TD to the tail of the endpoint ring's TD list */
3342 list_add_tail(&td->td_list, &ep_ring->td_list);
3343 td->start_seg = ep_ring->enq_seg;
3344 td->start_trb = ep_ring->enqueue;
3345
3346 return 0;
3347}
3348
3349unsigned int count_trbs(u64 addr, u64 len)
3350{
3351 unsigned int num_trbs;
3352
3353 num_trbs = DIV_ROUND_UP(len + (addr & (TRB_MAX_BUFF_SIZE - 1)),
3354 TRB_MAX_BUFF_SIZE);
3355 if (num_trbs == 0)
3356 num_trbs++;
3357
3358 return num_trbs;
3359}
3360
3361static inline unsigned int count_trbs_needed(struct urb *urb)
3362{
3363 return count_trbs(urb->transfer_dma, urb->transfer_buffer_length);
3364}
3365
3366static unsigned int count_sg_trbs_needed(struct urb *urb)
3367{
3368 struct scatterlist *sg;
3369 unsigned int i, len, full_len, num_trbs = 0;
3370
3371 full_len = urb->transfer_buffer_length;
3372
3373 for_each_sg(urb->sg, sg, urb->num_mapped_sgs, i) {
3374 len = sg_dma_len(sg);
3375 num_trbs += count_trbs(sg_dma_address(sg), len);
3376 len = min_t(unsigned int, len, full_len);
3377 full_len -= len;
3378 if (full_len == 0)
3379 break;
3380 }
3381
3382 return num_trbs;
3383}
3384
3385static unsigned int count_isoc_trbs_needed(struct urb *urb, int i)
3386{
3387 u64 addr, len;
3388
3389 addr = (u64) (urb->transfer_dma + urb->iso_frame_desc[i].offset);
3390 len = urb->iso_frame_desc[i].length;
3391
3392 return count_trbs(addr, len);
3393}
3394
3395static void check_trb_math(struct urb *urb, int running_total)
3396{
3397 if (unlikely(running_total != urb->transfer_buffer_length))
3398 dev_err(&urb->dev->dev, "%s - ep %#x - Miscalculated tx length, "
3399 "queued %#x (%d), asked for %#x (%d)\n",
3400 __func__,
3401 urb->ep->desc.bEndpointAddress,
3402 running_total, running_total,
3403 urb->transfer_buffer_length,
3404 urb->transfer_buffer_length);
3405}
3406
3407static void giveback_first_trb(struct xhci_hcd *xhci, int slot_id,
3408 unsigned int ep_index, unsigned int stream_id, int start_cycle,
3409 struct xhci_generic_trb *start_trb)
3410{
3411 /*
3412 * Pass all the TRBs to the hardware at once and make sure this write
3413 * isn't reordered.
3414 */
3415 wmb();
3416 if (start_cycle)
3417 start_trb->field[3] |= cpu_to_le32(start_cycle);
3418 else
3419 start_trb->field[3] &= cpu_to_le32(~TRB_CYCLE);
3420 xhci_ring_ep_doorbell(xhci, slot_id, ep_index, stream_id);
3421}
3422
3423static void check_interval(struct urb *urb, struct xhci_ep_ctx *ep_ctx)
3424{
3425 int xhci_interval;
3426 int ep_interval;
3427
3428 xhci_interval = EP_INTERVAL_TO_UFRAMES(le32_to_cpu(ep_ctx->ep_info));
3429 ep_interval = urb->interval;
3430
3431 /* Convert to microframes */
3432 if (urb->dev->speed == USB_SPEED_LOW ||
3433 urb->dev->speed == USB_SPEED_FULL)
3434 ep_interval *= 8;
3435
3436 /* FIXME change this to a warning and a suggestion to use the new API
3437 * to set the polling interval (once the API is added).
3438 */
3439 if (xhci_interval != ep_interval) {
3440 dev_dbg_ratelimited(&urb->dev->dev,
3441 "Driver uses different interval (%d microframe%s) than xHCI (%d microframe%s)\n",
3442 ep_interval, ep_interval == 1 ? "" : "s",
3443 xhci_interval, xhci_interval == 1 ? "" : "s");
3444 urb->interval = xhci_interval;
3445 /* Convert back to frames for LS/FS devices */
3446 if (urb->dev->speed == USB_SPEED_LOW ||
3447 urb->dev->speed == USB_SPEED_FULL)
3448 urb->interval /= 8;
3449 }
3450}
3451
3452/*
3453 * xHCI uses normal TRBs for both bulk and interrupt. When the interrupt
3454 * endpoint is to be serviced, the xHC will consume (at most) one TD. A TD
3455 * (comprised of sg list entries) can take several service intervals to
3456 * transmit.
3457 */
3458int xhci_queue_intr_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
3459 struct urb *urb, int slot_id, unsigned int ep_index)
3460{
3461 struct xhci_ep_ctx *ep_ctx;
3462
3463 ep_ctx = xhci_get_ep_ctx(xhci, xhci->devs[slot_id]->out_ctx, ep_index);
3464 check_interval(urb, ep_ctx);
3465
3466 return xhci_queue_bulk_tx(xhci, mem_flags, urb, slot_id, ep_index);
3467}
3468
3469/*
3470 * For xHCI 1.0 host controllers, TD size is the number of max packet sized
3471 * packets remaining in the TD (*not* including this TRB).
3472 *
3473 * Total TD packet count = total_packet_count =
3474 * DIV_ROUND_UP(TD size in bytes / wMaxPacketSize)
3475 *
3476 * Packets transferred up to and including this TRB = packets_transferred =
3477 * rounddown(total bytes transferred including this TRB / wMaxPacketSize)
3478 *
3479 * TD size = total_packet_count - packets_transferred
3480 *
3481 * For xHCI 0.96 and older, TD size field should be the remaining bytes
3482 * including this TRB, right shifted by 10
3483 *
3484 * For all hosts it must fit in bits 21:17, so it can't be bigger than 31.
3485 * This is taken care of in the TRB_TD_SIZE() macro
3486 *
3487 * The last TRB in a TD must have the TD size set to zero.
3488 */
3489static u32 xhci_td_remainder(struct xhci_hcd *xhci, int transferred,
3490 int trb_buff_len, unsigned int td_total_len,
3491 struct urb *urb, bool more_trbs_coming)
3492{
3493 u32 maxp, total_packet_count;
3494
3495 /* MTK xHCI 0.96 contains some features from 1.0 */
3496 if (xhci->hci_version < 0x100 && !(xhci->quirks & XHCI_MTK_HOST))
3497 return ((td_total_len - transferred) >> 10);
3498
3499 /* One TRB with a zero-length data packet. */
3500 if (!more_trbs_coming || (transferred == 0 && trb_buff_len == 0) ||
3501 trb_buff_len == td_total_len)
3502 return 0;
3503
3504 /* for MTK xHCI 0.96, TD size include this TRB, but not in 1.x */
3505 if ((xhci->quirks & XHCI_MTK_HOST) && (xhci->hci_version < 0x100))
3506 trb_buff_len = 0;
3507
3508 maxp = usb_endpoint_maxp(&urb->ep->desc);
3509 total_packet_count = DIV_ROUND_UP(td_total_len, maxp);
3510
3511 /* Queueing functions don't count the current TRB into transferred */
3512 return (total_packet_count - ((transferred + trb_buff_len) / maxp));
3513}
3514
3515
3516static int xhci_align_td(struct xhci_hcd *xhci, struct urb *urb, u32 enqd_len,
3517 u32 *trb_buff_len, struct xhci_segment *seg)
3518{
3519 struct device *dev = xhci_to_hcd(xhci)->self.sysdev;
3520 unsigned int unalign;
3521 unsigned int max_pkt;
3522 u32 new_buff_len;
3523 size_t len;
3524
3525 max_pkt = usb_endpoint_maxp(&urb->ep->desc);
3526 unalign = (enqd_len + *trb_buff_len) % max_pkt;
3527
3528 /* we got lucky, last normal TRB data on segment is packet aligned */
3529 if (unalign == 0)
3530 return 0;
3531
3532 xhci_dbg(xhci, "Unaligned %d bytes, buff len %d\n",
3533 unalign, *trb_buff_len);
3534
3535 /* is the last nornal TRB alignable by splitting it */
3536 if (*trb_buff_len > unalign) {
3537 *trb_buff_len -= unalign;
3538 xhci_dbg(xhci, "split align, new buff len %d\n", *trb_buff_len);
3539 return 0;
3540 }
3541
3542 /*
3543 * We want enqd_len + trb_buff_len to sum up to a number aligned to
3544 * number which is divisible by the endpoint's wMaxPacketSize. IOW:
3545 * (size of currently enqueued TRBs + remainder) % wMaxPacketSize == 0.
3546 */
3547 new_buff_len = max_pkt - (enqd_len % max_pkt);
3548
3549 if (new_buff_len > (urb->transfer_buffer_length - enqd_len))
3550 new_buff_len = (urb->transfer_buffer_length - enqd_len);
3551
3552 /* create a max max_pkt sized bounce buffer pointed to by last trb */
3553 if (usb_urb_dir_out(urb)) {
3554 if (urb->num_sgs) {
3555 len = sg_pcopy_to_buffer(urb->sg, urb->num_sgs,
3556 seg->bounce_buf, new_buff_len, enqd_len);
3557 if (len != new_buff_len)
3558 xhci_warn(xhci, "WARN Wrong bounce buffer write length: %zu != %d\n",
3559 len, new_buff_len);
3560 } else {
3561 memcpy(seg->bounce_buf, urb->transfer_buffer + enqd_len, new_buff_len);
3562 }
3563
3564 seg->bounce_dma = dma_map_single(dev, seg->bounce_buf,
3565 max_pkt, DMA_TO_DEVICE);
3566 } else {
3567 seg->bounce_dma = dma_map_single(dev, seg->bounce_buf,
3568 max_pkt, DMA_FROM_DEVICE);
3569 }
3570
3571 if (dma_mapping_error(dev, seg->bounce_dma)) {
3572 /* try without aligning. Some host controllers survive */
3573 xhci_warn(xhci, "Failed mapping bounce buffer, not aligning\n");
3574 return 0;
3575 }
3576 *trb_buff_len = new_buff_len;
3577 seg->bounce_len = new_buff_len;
3578 seg->bounce_offs = enqd_len;
3579
3580 xhci_dbg(xhci, "Bounce align, new buff len %d\n", *trb_buff_len);
3581
3582 return 1;
3583}
3584
3585/* This is very similar to what ehci-q.c qtd_fill() does */
3586int xhci_queue_bulk_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
3587 struct urb *urb, int slot_id, unsigned int ep_index)
3588{
3589 struct xhci_ring *ring;
3590 struct urb_priv *urb_priv;
3591 struct xhci_td *td;
3592 struct xhci_generic_trb *start_trb;
3593 struct scatterlist *sg = NULL;
3594 bool more_trbs_coming = true;
3595 bool need_zero_pkt = false;
3596 bool first_trb = true;
3597 unsigned int num_trbs;
3598 unsigned int start_cycle, num_sgs = 0;
3599 unsigned int enqd_len, block_len, trb_buff_len, full_len;
3600 int sent_len, ret;
3601 u32 field, length_field, remainder;
3602 u64 addr, send_addr;
3603
3604 ring = xhci_urb_to_transfer_ring(xhci, urb);
3605 if (!ring)
3606 return -EINVAL;
3607
3608 full_len = urb->transfer_buffer_length;
3609 /* If we have scatter/gather list, we use it. */
3610 if (urb->num_sgs && !(urb->transfer_flags & URB_DMA_MAP_SINGLE)) {
3611 num_sgs = urb->num_mapped_sgs;
3612 sg = urb->sg;
3613 addr = (u64) sg_dma_address(sg);
3614 block_len = sg_dma_len(sg);
3615 num_trbs = count_sg_trbs_needed(urb);
3616 } else {
3617 num_trbs = count_trbs_needed(urb);
3618 addr = (u64) urb->transfer_dma;
3619 block_len = full_len;
3620 }
3621 ret = prepare_transfer(xhci, xhci->devs[slot_id],
3622 ep_index, urb->stream_id,
3623 num_trbs, urb, 0, mem_flags);
3624 if (unlikely(ret < 0))
3625 return ret;
3626
3627 urb_priv = urb->hcpriv;
3628
3629 /* Deal with URB_ZERO_PACKET - need one more td/trb */
3630 if (urb->transfer_flags & URB_ZERO_PACKET && urb_priv->num_tds > 1)
3631 need_zero_pkt = true;
3632
3633 td = &urb_priv->td[0];
3634
3635 /*
3636 * Don't give the first TRB to the hardware (by toggling the cycle bit)
3637 * until we've finished creating all the other TRBs. The ring's cycle
3638 * state may change as we enqueue the other TRBs, so save it too.
3639 */
3640 start_trb = &ring->enqueue->generic;
3641 start_cycle = ring->cycle_state;
3642 send_addr = addr;
3643
3644 /* Queue the TRBs, even if they are zero-length */
3645 for (enqd_len = 0; first_trb || enqd_len < full_len;
3646 enqd_len += trb_buff_len) {
3647 field = TRB_TYPE(TRB_NORMAL);
3648
3649 /* TRB buffer should not cross 64KB boundaries */
3650 trb_buff_len = TRB_BUFF_LEN_UP_TO_BOUNDARY(addr);
3651 trb_buff_len = min_t(unsigned int, trb_buff_len, block_len);
3652
3653 if (enqd_len + trb_buff_len > full_len)
3654 trb_buff_len = full_len - enqd_len;
3655
3656 /* Don't change the cycle bit of the first TRB until later */
3657 if (first_trb) {
3658 first_trb = false;
3659 if (start_cycle == 0)
3660 field |= TRB_CYCLE;
3661 } else
3662 field |= ring->cycle_state;
3663
3664 /* Chain all the TRBs together; clear the chain bit in the last
3665 * TRB to indicate it's the last TRB in the chain.
3666 */
3667 if (enqd_len + trb_buff_len < full_len) {
3668 field |= TRB_CHAIN;
3669 if (trb_is_link(ring->enqueue + 1)) {
3670 if (xhci_align_td(xhci, urb, enqd_len,
3671 &trb_buff_len,
3672 ring->enq_seg)) {
3673 send_addr = ring->enq_seg->bounce_dma;
3674 /* assuming TD won't span 2 segs */
3675 td->bounce_seg = ring->enq_seg;
3676 }
3677 }
3678 }
3679 if (enqd_len + trb_buff_len >= full_len) {
3680 field &= ~TRB_CHAIN;
3681 field |= TRB_IOC;
3682 more_trbs_coming = false;
3683 td->end_trb = ring->enqueue;
3684 td->end_seg = ring->enq_seg;
3685 if (xhci_urb_suitable_for_idt(urb)) {
3686 memcpy(&send_addr, urb->transfer_buffer,
3687 trb_buff_len);
3688 le64_to_cpus(&send_addr);
3689 field |= TRB_IDT;
3690 }
3691 }
3692
3693 /* Only set interrupt on short packet for IN endpoints */
3694 if (usb_urb_dir_in(urb))
3695 field |= TRB_ISP;
3696
3697 /* Set the TRB length, TD size, and interrupter fields. */
3698 remainder = xhci_td_remainder(xhci, enqd_len, trb_buff_len,
3699 full_len, urb, more_trbs_coming);
3700
3701 length_field = TRB_LEN(trb_buff_len) |
3702 TRB_TD_SIZE(remainder) |
3703 TRB_INTR_TARGET(0);
3704
3705 queue_trb(xhci, ring, more_trbs_coming | need_zero_pkt,
3706 lower_32_bits(send_addr),
3707 upper_32_bits(send_addr),
3708 length_field,
3709 field);
3710 addr += trb_buff_len;
3711 sent_len = trb_buff_len;
3712
3713 while (sg && sent_len >= block_len) {
3714 /* New sg entry */
3715 --num_sgs;
3716 sent_len -= block_len;
3717 sg = sg_next(sg);
3718 if (num_sgs != 0 && sg) {
3719 block_len = sg_dma_len(sg);
3720 addr = (u64) sg_dma_address(sg);
3721 addr += sent_len;
3722 }
3723 }
3724 block_len -= sent_len;
3725 send_addr = addr;
3726 }
3727
3728 if (need_zero_pkt) {
3729 ret = prepare_transfer(xhci, xhci->devs[slot_id],
3730 ep_index, urb->stream_id,
3731 1, urb, 1, mem_flags);
3732 urb_priv->td[1].end_trb = ring->enqueue;
3733 urb_priv->td[1].end_seg = ring->enq_seg;
3734 field = TRB_TYPE(TRB_NORMAL) | ring->cycle_state | TRB_IOC;
3735 queue_trb(xhci, ring, 0, 0, 0, TRB_INTR_TARGET(0), field);
3736 }
3737
3738 check_trb_math(urb, enqd_len);
3739 giveback_first_trb(xhci, slot_id, ep_index, urb->stream_id,
3740 start_cycle, start_trb);
3741 return 0;
3742}
3743
3744/* Caller must have locked xhci->lock */
3745int xhci_queue_ctrl_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
3746 struct urb *urb, int slot_id, unsigned int ep_index)
3747{
3748 struct xhci_ring *ep_ring;
3749 int num_trbs;
3750 int ret;
3751 struct usb_ctrlrequest *setup;
3752 struct xhci_generic_trb *start_trb;
3753 int start_cycle;
3754 u32 field;
3755 struct urb_priv *urb_priv;
3756 struct xhci_td *td;
3757
3758 ep_ring = xhci_urb_to_transfer_ring(xhci, urb);
3759 if (!ep_ring)
3760 return -EINVAL;
3761
3762 /*
3763 * Need to copy setup packet into setup TRB, so we can't use the setup
3764 * DMA address.
3765 */
3766 if (!urb->setup_packet)
3767 return -EINVAL;
3768
3769 if ((xhci->quirks & XHCI_ETRON_HOST) &&
3770 urb->dev->speed >= USB_SPEED_SUPER) {
3771 /*
3772 * If next available TRB is the Link TRB in the ring segment then
3773 * enqueue a No Op TRB, this can prevent the Setup and Data Stage
3774 * TRB to be breaked by the Link TRB.
3775 */
3776 if (trb_is_link(ep_ring->enqueue + 1)) {
3777 field = TRB_TYPE(TRB_TR_NOOP) | ep_ring->cycle_state;
3778 queue_trb(xhci, ep_ring, false, 0, 0,
3779 TRB_INTR_TARGET(0), field);
3780 }
3781 }
3782
3783 /* 1 TRB for setup, 1 for status */
3784 num_trbs = 2;
3785 /*
3786 * Don't need to check if we need additional event data and normal TRBs,
3787 * since data in control transfers will never get bigger than 16MB
3788 * XXX: can we get a buffer that crosses 64KB boundaries?
3789 */
3790 if (urb->transfer_buffer_length > 0)
3791 num_trbs++;
3792 ret = prepare_transfer(xhci, xhci->devs[slot_id],
3793 ep_index, urb->stream_id,
3794 num_trbs, urb, 0, mem_flags);
3795 if (ret < 0)
3796 return ret;
3797
3798 urb_priv = urb->hcpriv;
3799 td = &urb_priv->td[0];
3800
3801 /*
3802 * Don't give the first TRB to the hardware (by toggling the cycle bit)
3803 * until we've finished creating all the other TRBs. The ring's cycle
3804 * state may change as we enqueue the other TRBs, so save it too.
3805 */
3806 start_trb = &ep_ring->enqueue->generic;
3807 start_cycle = ep_ring->cycle_state;
3808
3809 /* Queue setup TRB - see section 6.4.1.2.1 */
3810 /* FIXME better way to translate setup_packet into two u32 fields? */
3811 setup = (struct usb_ctrlrequest *) urb->setup_packet;
3812 field = 0;
3813 field |= TRB_IDT | TRB_TYPE(TRB_SETUP);
3814 if (start_cycle == 0)
3815 field |= 0x1;
3816
3817 /* xHCI 1.0/1.1 6.4.1.2.1: Transfer Type field */
3818 if ((xhci->hci_version >= 0x100) || (xhci->quirks & XHCI_MTK_HOST)) {
3819 if (urb->transfer_buffer_length > 0) {
3820 if (setup->bRequestType & USB_DIR_IN)
3821 field |= TRB_TX_TYPE(TRB_DATA_IN);
3822 else
3823 field |= TRB_TX_TYPE(TRB_DATA_OUT);
3824 }
3825 }
3826
3827 queue_trb(xhci, ep_ring, true,
3828 setup->bRequestType | setup->bRequest << 8 | le16_to_cpu(setup->wValue) << 16,
3829 le16_to_cpu(setup->wIndex) | le16_to_cpu(setup->wLength) << 16,
3830 TRB_LEN(8) | TRB_INTR_TARGET(0),
3831 /* Immediate data in pointer */
3832 field);
3833
3834 /* If there's data, queue data TRBs */
3835 /* Only set interrupt on short packet for IN endpoints */
3836 if (usb_urb_dir_in(urb))
3837 field = TRB_ISP | TRB_TYPE(TRB_DATA);
3838 else
3839 field = TRB_TYPE(TRB_DATA);
3840
3841 if (urb->transfer_buffer_length > 0) {
3842 u32 length_field, remainder;
3843 u64 addr;
3844
3845 if (xhci_urb_suitable_for_idt(urb)) {
3846 memcpy(&addr, urb->transfer_buffer,
3847 urb->transfer_buffer_length);
3848 le64_to_cpus(&addr);
3849 field |= TRB_IDT;
3850 } else {
3851 addr = (u64) urb->transfer_dma;
3852 }
3853
3854 remainder = xhci_td_remainder(xhci, 0,
3855 urb->transfer_buffer_length,
3856 urb->transfer_buffer_length,
3857 urb, 1);
3858 length_field = TRB_LEN(urb->transfer_buffer_length) |
3859 TRB_TD_SIZE(remainder) |
3860 TRB_INTR_TARGET(0);
3861 if (setup->bRequestType & USB_DIR_IN)
3862 field |= TRB_DIR_IN;
3863 queue_trb(xhci, ep_ring, true,
3864 lower_32_bits(addr),
3865 upper_32_bits(addr),
3866 length_field,
3867 field | ep_ring->cycle_state);
3868 }
3869
3870 /* Save the DMA address of the last TRB in the TD */
3871 td->end_trb = ep_ring->enqueue;
3872 td->end_seg = ep_ring->enq_seg;
3873
3874 /* Queue status TRB - see Table 7 and sections 4.11.2.2 and 6.4.1.2.3 */
3875 /* If the device sent data, the status stage is an OUT transfer */
3876 if (urb->transfer_buffer_length > 0 && setup->bRequestType & USB_DIR_IN)
3877 field = 0;
3878 else
3879 field = TRB_DIR_IN;
3880 queue_trb(xhci, ep_ring, false,
3881 0,
3882 0,
3883 TRB_INTR_TARGET(0),
3884 /* Event on completion */
3885 field | TRB_IOC | TRB_TYPE(TRB_STATUS) | ep_ring->cycle_state);
3886
3887 giveback_first_trb(xhci, slot_id, ep_index, 0,
3888 start_cycle, start_trb);
3889 return 0;
3890}
3891
3892/*
3893 * The transfer burst count field of the isochronous TRB defines the number of
3894 * bursts that are required to move all packets in this TD. Only SuperSpeed
3895 * devices can burst up to bMaxBurst number of packets per service interval.
3896 * This field is zero based, meaning a value of zero in the field means one
3897 * burst. Basically, for everything but SuperSpeed devices, this field will be
3898 * zero. Only xHCI 1.0 host controllers support this field.
3899 */
3900static unsigned int xhci_get_burst_count(struct xhci_hcd *xhci,
3901 struct urb *urb, unsigned int total_packet_count)
3902{
3903 unsigned int max_burst;
3904
3905 if (xhci->hci_version < 0x100 || urb->dev->speed < USB_SPEED_SUPER)
3906 return 0;
3907
3908 max_burst = urb->ep->ss_ep_comp.bMaxBurst;
3909 return DIV_ROUND_UP(total_packet_count, max_burst + 1) - 1;
3910}
3911
3912/*
3913 * Returns the number of packets in the last "burst" of packets. This field is
3914 * valid for all speeds of devices. USB 2.0 devices can only do one "burst", so
3915 * the last burst packet count is equal to the total number of packets in the
3916 * TD. SuperSpeed endpoints can have up to 3 bursts. All but the last burst
3917 * must contain (bMaxBurst + 1) number of packets, but the last burst can
3918 * contain 1 to (bMaxBurst + 1) packets.
3919 */
3920static unsigned int xhci_get_last_burst_packet_count(struct xhci_hcd *xhci,
3921 struct urb *urb, unsigned int total_packet_count)
3922{
3923 unsigned int max_burst;
3924 unsigned int residue;
3925
3926 if (xhci->hci_version < 0x100)
3927 return 0;
3928
3929 if (urb->dev->speed >= USB_SPEED_SUPER) {
3930 /* bMaxBurst is zero based: 0 means 1 packet per burst */
3931 max_burst = urb->ep->ss_ep_comp.bMaxBurst;
3932 residue = total_packet_count % (max_burst + 1);
3933 /* If residue is zero, the last burst contains (max_burst + 1)
3934 * number of packets, but the TLBPC field is zero-based.
3935 */
3936 if (residue == 0)
3937 return max_burst;
3938 return residue - 1;
3939 }
3940 if (total_packet_count == 0)
3941 return 0;
3942 return total_packet_count - 1;
3943}
3944
3945/*
3946 * Calculates Frame ID field of the isochronous TRB identifies the
3947 * target frame that the Interval associated with this Isochronous
3948 * Transfer Descriptor will start on. Refer to 4.11.2.5 in 1.1 spec.
3949 *
3950 * Returns actual frame id on success, negative value on error.
3951 */
3952static int xhci_get_isoc_frame_id(struct xhci_hcd *xhci,
3953 struct urb *urb, int index)
3954{
3955 int start_frame, ist, ret = 0;
3956 int start_frame_id, end_frame_id, current_frame_id;
3957
3958 if (urb->dev->speed == USB_SPEED_LOW ||
3959 urb->dev->speed == USB_SPEED_FULL)
3960 start_frame = urb->start_frame + index * urb->interval;
3961 else
3962 start_frame = (urb->start_frame + index * urb->interval) >> 3;
3963
3964 /* Isochronous Scheduling Threshold (IST, bits 0~3 in HCSPARAMS2):
3965 *
3966 * If bit [3] of IST is cleared to '0', software can add a TRB no
3967 * later than IST[2:0] Microframes before that TRB is scheduled to
3968 * be executed.
3969 * If bit [3] of IST is set to '1', software can add a TRB no later
3970 * than IST[2:0] Frames before that TRB is scheduled to be executed.
3971 */
3972 ist = HCS_IST(xhci->hcs_params2) & 0x7;
3973 if (HCS_IST(xhci->hcs_params2) & (1 << 3))
3974 ist <<= 3;
3975
3976 /* Software shall not schedule an Isoch TD with a Frame ID value that
3977 * is less than the Start Frame ID or greater than the End Frame ID,
3978 * where:
3979 *
3980 * End Frame ID = (Current MFINDEX register value + 895 ms.) MOD 2048
3981 * Start Frame ID = (Current MFINDEX register value + IST + 1) MOD 2048
3982 *
3983 * Both the End Frame ID and Start Frame ID values are calculated
3984 * in microframes. When software determines the valid Frame ID value;
3985 * The End Frame ID value should be rounded down to the nearest Frame
3986 * boundary, and the Start Frame ID value should be rounded up to the
3987 * nearest Frame boundary.
3988 */
3989 current_frame_id = readl(&xhci->run_regs->microframe_index);
3990 start_frame_id = roundup(current_frame_id + ist + 1, 8);
3991 end_frame_id = rounddown(current_frame_id + 895 * 8, 8);
3992
3993 start_frame &= 0x7ff;
3994 start_frame_id = (start_frame_id >> 3) & 0x7ff;
3995 end_frame_id = (end_frame_id >> 3) & 0x7ff;
3996
3997 if (start_frame_id < end_frame_id) {
3998 if (start_frame > end_frame_id ||
3999 start_frame < start_frame_id)
4000 ret = -EINVAL;
4001 } else if (start_frame_id > end_frame_id) {
4002 if ((start_frame > end_frame_id &&
4003 start_frame < start_frame_id))
4004 ret = -EINVAL;
4005 } else {
4006 ret = -EINVAL;
4007 }
4008
4009 if (index == 0) {
4010 if (ret == -EINVAL || start_frame == start_frame_id) {
4011 start_frame = start_frame_id + 1;
4012 if (urb->dev->speed == USB_SPEED_LOW ||
4013 urb->dev->speed == USB_SPEED_FULL)
4014 urb->start_frame = start_frame;
4015 else
4016 urb->start_frame = start_frame << 3;
4017 ret = 0;
4018 }
4019 }
4020
4021 if (ret) {
4022 xhci_warn(xhci, "Frame ID %d (reg %d, index %d) beyond range (%d, %d)\n",
4023 start_frame, current_frame_id, index,
4024 start_frame_id, end_frame_id);
4025 xhci_warn(xhci, "Ignore frame ID field, use SIA bit instead\n");
4026 return ret;
4027 }
4028
4029 return start_frame;
4030}
4031
4032/* Check if we should generate event interrupt for a TD in an isoc URB */
4033static bool trb_block_event_intr(struct xhci_hcd *xhci, int num_tds, int i,
4034 struct xhci_interrupter *ir)
4035{
4036 if (xhci->hci_version < 0x100)
4037 return false;
4038 /* always generate an event interrupt for the last TD */
4039 if (i == num_tds - 1)
4040 return false;
4041 /*
4042 * If AVOID_BEI is set the host handles full event rings poorly,
4043 * generate an event at least every 8th TD to clear the event ring
4044 */
4045 if (i && ir->isoc_bei_interval && xhci->quirks & XHCI_AVOID_BEI)
4046 return !!(i % ir->isoc_bei_interval);
4047
4048 return true;
4049}
4050
4051/* This is for isoc transfer */
4052static int xhci_queue_isoc_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
4053 struct urb *urb, int slot_id, unsigned int ep_index)
4054{
4055 struct xhci_interrupter *ir;
4056 struct xhci_ring *ep_ring;
4057 struct urb_priv *urb_priv;
4058 struct xhci_td *td;
4059 int num_tds, trbs_per_td;
4060 struct xhci_generic_trb *start_trb;
4061 bool first_trb;
4062 int start_cycle;
4063 u32 field, length_field;
4064 int running_total, trb_buff_len, td_len, td_remain_len, ret;
4065 u64 start_addr, addr;
4066 int i, j;
4067 bool more_trbs_coming;
4068 struct xhci_virt_ep *xep;
4069 int frame_id;
4070
4071 xep = &xhci->devs[slot_id]->eps[ep_index];
4072 ep_ring = xhci->devs[slot_id]->eps[ep_index].ring;
4073 ir = xhci->interrupters[0];
4074
4075 num_tds = urb->number_of_packets;
4076 if (num_tds < 1) {
4077 xhci_dbg(xhci, "Isoc URB with zero packets?\n");
4078 return -EINVAL;
4079 }
4080 start_addr = (u64) urb->transfer_dma;
4081 start_trb = &ep_ring->enqueue->generic;
4082 start_cycle = ep_ring->cycle_state;
4083
4084 urb_priv = urb->hcpriv;
4085 /* Queue the TRBs for each TD, even if they are zero-length */
4086 for (i = 0; i < num_tds; i++) {
4087 unsigned int total_pkt_count, max_pkt;
4088 unsigned int burst_count, last_burst_pkt_count;
4089 u32 sia_frame_id;
4090
4091 first_trb = true;
4092 running_total = 0;
4093 addr = start_addr + urb->iso_frame_desc[i].offset;
4094 td_len = urb->iso_frame_desc[i].length;
4095 td_remain_len = td_len;
4096 max_pkt = usb_endpoint_maxp(&urb->ep->desc);
4097 total_pkt_count = DIV_ROUND_UP(td_len, max_pkt);
4098
4099 /* A zero-length transfer still involves at least one packet. */
4100 if (total_pkt_count == 0)
4101 total_pkt_count++;
4102 burst_count = xhci_get_burst_count(xhci, urb, total_pkt_count);
4103 last_burst_pkt_count = xhci_get_last_burst_packet_count(xhci,
4104 urb, total_pkt_count);
4105
4106 trbs_per_td = count_isoc_trbs_needed(urb, i);
4107
4108 ret = prepare_transfer(xhci, xhci->devs[slot_id], ep_index,
4109 urb->stream_id, trbs_per_td, urb, i, mem_flags);
4110 if (ret < 0) {
4111 if (i == 0)
4112 return ret;
4113 goto cleanup;
4114 }
4115 td = &urb_priv->td[i];
4116 /* use SIA as default, if frame id is used overwrite it */
4117 sia_frame_id = TRB_SIA;
4118 if (!(urb->transfer_flags & URB_ISO_ASAP) &&
4119 HCC_CFC(xhci->hcc_params)) {
4120 frame_id = xhci_get_isoc_frame_id(xhci, urb, i);
4121 if (frame_id >= 0)
4122 sia_frame_id = TRB_FRAME_ID(frame_id);
4123 }
4124 /*
4125 * Set isoc specific data for the first TRB in a TD.
4126 * Prevent HW from getting the TRBs by keeping the cycle state
4127 * inverted in the first TDs isoc TRB.
4128 */
4129 field = TRB_TYPE(TRB_ISOC) |
4130 TRB_TLBPC(last_burst_pkt_count) |
4131 sia_frame_id |
4132 (i ? ep_ring->cycle_state : !start_cycle);
4133
4134 /* xhci 1.1 with ETE uses TD_Size field for TBC, old is Rsvdz */
4135 if (!xep->use_extended_tbc)
4136 field |= TRB_TBC(burst_count);
4137
4138 /* fill the rest of the TRB fields, and remaining normal TRBs */
4139 for (j = 0; j < trbs_per_td; j++) {
4140 u32 remainder = 0;
4141
4142 /* only first TRB is isoc, overwrite otherwise */
4143 if (!first_trb)
4144 field = TRB_TYPE(TRB_NORMAL) |
4145 ep_ring->cycle_state;
4146
4147 /* Only set interrupt on short packet for IN EPs */
4148 if (usb_urb_dir_in(urb))
4149 field |= TRB_ISP;
4150
4151 /* Set the chain bit for all except the last TRB */
4152 if (j < trbs_per_td - 1) {
4153 more_trbs_coming = true;
4154 field |= TRB_CHAIN;
4155 } else {
4156 more_trbs_coming = false;
4157 td->end_trb = ep_ring->enqueue;
4158 td->end_seg = ep_ring->enq_seg;
4159 field |= TRB_IOC;
4160 if (trb_block_event_intr(xhci, num_tds, i, ir))
4161 field |= TRB_BEI;
4162 }
4163 /* Calculate TRB length */
4164 trb_buff_len = TRB_BUFF_LEN_UP_TO_BOUNDARY(addr);
4165 if (trb_buff_len > td_remain_len)
4166 trb_buff_len = td_remain_len;
4167
4168 /* Set the TRB length, TD size, & interrupter fields. */
4169 remainder = xhci_td_remainder(xhci, running_total,
4170 trb_buff_len, td_len,
4171 urb, more_trbs_coming);
4172
4173 length_field = TRB_LEN(trb_buff_len) |
4174 TRB_INTR_TARGET(0);
4175
4176 /* xhci 1.1 with ETE uses TD Size field for TBC */
4177 if (first_trb && xep->use_extended_tbc)
4178 length_field |= TRB_TD_SIZE_TBC(burst_count);
4179 else
4180 length_field |= TRB_TD_SIZE(remainder);
4181 first_trb = false;
4182
4183 queue_trb(xhci, ep_ring, more_trbs_coming,
4184 lower_32_bits(addr),
4185 upper_32_bits(addr),
4186 length_field,
4187 field);
4188 running_total += trb_buff_len;
4189
4190 addr += trb_buff_len;
4191 td_remain_len -= trb_buff_len;
4192 }
4193
4194 /* Check TD length */
4195 if (running_total != td_len) {
4196 xhci_err(xhci, "ISOC TD length unmatch\n");
4197 ret = -EINVAL;
4198 goto cleanup;
4199 }
4200 }
4201
4202 /* store the next frame id */
4203 if (HCC_CFC(xhci->hcc_params))
4204 xep->next_frame_id = urb->start_frame + num_tds * urb->interval;
4205
4206 if (xhci_to_hcd(xhci)->self.bandwidth_isoc_reqs == 0) {
4207 if (xhci->quirks & XHCI_AMD_PLL_FIX)
4208 usb_amd_quirk_pll_disable();
4209 }
4210 xhci_to_hcd(xhci)->self.bandwidth_isoc_reqs++;
4211
4212 giveback_first_trb(xhci, slot_id, ep_index, urb->stream_id,
4213 start_cycle, start_trb);
4214 return 0;
4215cleanup:
4216 /* Clean up a partially enqueued isoc transfer. */
4217
4218 for (i--; i >= 0; i--)
4219 list_del_init(&urb_priv->td[i].td_list);
4220
4221 /* Use the first TD as a temporary variable to turn the TDs we've queued
4222 * into No-ops with a software-owned cycle bit. That way the hardware
4223 * won't accidentally start executing bogus TDs when we partially
4224 * overwrite them. td->start_trb and td->start_seg are already set.
4225 */
4226 urb_priv->td[0].end_trb = ep_ring->enqueue;
4227 /* Every TRB except the first & last will have its cycle bit flipped. */
4228 td_to_noop(&urb_priv->td[0], true);
4229
4230 /* Reset the ring enqueue back to the first TRB and its cycle bit. */
4231 ep_ring->enqueue = urb_priv->td[0].start_trb;
4232 ep_ring->enq_seg = urb_priv->td[0].start_seg;
4233 ep_ring->cycle_state = start_cycle;
4234 usb_hcd_unlink_urb_from_ep(bus_to_hcd(urb->dev->bus), urb);
4235 return ret;
4236}
4237
4238/*
4239 * Check transfer ring to guarantee there is enough room for the urb.
4240 * Update ISO URB start_frame and interval.
4241 * Update interval as xhci_queue_intr_tx does. Use xhci frame_index to
4242 * update urb->start_frame if URB_ISO_ASAP is set in transfer_flags or
4243 * Contiguous Frame ID is not supported by HC.
4244 */
4245int xhci_queue_isoc_tx_prepare(struct xhci_hcd *xhci, gfp_t mem_flags,
4246 struct urb *urb, int slot_id, unsigned int ep_index)
4247{
4248 struct xhci_virt_device *xdev;
4249 struct xhci_ring *ep_ring;
4250 struct xhci_ep_ctx *ep_ctx;
4251 int start_frame;
4252 int num_tds, num_trbs, i;
4253 int ret;
4254 struct xhci_virt_ep *xep;
4255 int ist;
4256
4257 xdev = xhci->devs[slot_id];
4258 xep = &xhci->devs[slot_id]->eps[ep_index];
4259 ep_ring = xdev->eps[ep_index].ring;
4260 ep_ctx = xhci_get_ep_ctx(xhci, xdev->out_ctx, ep_index);
4261
4262 num_trbs = 0;
4263 num_tds = urb->number_of_packets;
4264 for (i = 0; i < num_tds; i++)
4265 num_trbs += count_isoc_trbs_needed(urb, i);
4266
4267 /* Check the ring to guarantee there is enough room for the whole urb.
4268 * Do not insert any td of the urb to the ring if the check failed.
4269 */
4270 ret = prepare_ring(xhci, ep_ring, GET_EP_CTX_STATE(ep_ctx),
4271 num_trbs, mem_flags);
4272 if (ret)
4273 return ret;
4274
4275 /*
4276 * Check interval value. This should be done before we start to
4277 * calculate the start frame value.
4278 */
4279 check_interval(urb, ep_ctx);
4280
4281 /* Calculate the start frame and put it in urb->start_frame. */
4282 if (HCC_CFC(xhci->hcc_params) && !list_empty(&ep_ring->td_list)) {
4283 if (GET_EP_CTX_STATE(ep_ctx) == EP_STATE_RUNNING) {
4284 urb->start_frame = xep->next_frame_id;
4285 goto skip_start_over;
4286 }
4287 }
4288
4289 start_frame = readl(&xhci->run_regs->microframe_index);
4290 start_frame &= 0x3fff;
4291 /*
4292 * Round up to the next frame and consider the time before trb really
4293 * gets scheduled by hardare.
4294 */
4295 ist = HCS_IST(xhci->hcs_params2) & 0x7;
4296 if (HCS_IST(xhci->hcs_params2) & (1 << 3))
4297 ist <<= 3;
4298 start_frame += ist + XHCI_CFC_DELAY;
4299 start_frame = roundup(start_frame, 8);
4300
4301 /*
4302 * Round up to the next ESIT (Endpoint Service Interval Time) if ESIT
4303 * is greate than 8 microframes.
4304 */
4305 if (urb->dev->speed == USB_SPEED_LOW ||
4306 urb->dev->speed == USB_SPEED_FULL) {
4307 start_frame = roundup(start_frame, urb->interval << 3);
4308 urb->start_frame = start_frame >> 3;
4309 } else {
4310 start_frame = roundup(start_frame, urb->interval);
4311 urb->start_frame = start_frame;
4312 }
4313
4314skip_start_over:
4315
4316 return xhci_queue_isoc_tx(xhci, mem_flags, urb, slot_id, ep_index);
4317}
4318
4319/**** Command Ring Operations ****/
4320
4321/* Generic function for queueing a command TRB on the command ring.
4322 * Check to make sure there's room on the command ring for one command TRB.
4323 * Also check that there's room reserved for commands that must not fail.
4324 * If this is a command that must not fail, meaning command_must_succeed = TRUE,
4325 * then only check for the number of reserved spots.
4326 * Don't decrement xhci->cmd_ring_reserved_trbs after we've queued the TRB
4327 * because the command event handler may want to resubmit a failed command.
4328 */
4329static int queue_command(struct xhci_hcd *xhci, struct xhci_command *cmd,
4330 u32 field1, u32 field2,
4331 u32 field3, u32 field4, bool command_must_succeed)
4332{
4333 int reserved_trbs = xhci->cmd_ring_reserved_trbs;
4334 int ret;
4335
4336 if ((xhci->xhc_state & XHCI_STATE_DYING) ||
4337 (xhci->xhc_state & XHCI_STATE_HALTED)) {
4338 xhci_dbg(xhci, "xHCI dying or halted, can't queue_command\n");
4339 return -ESHUTDOWN;
4340 }
4341
4342 if (!command_must_succeed)
4343 reserved_trbs++;
4344
4345 ret = prepare_ring(xhci, xhci->cmd_ring, EP_STATE_RUNNING,
4346 reserved_trbs, GFP_ATOMIC);
4347 if (ret < 0) {
4348 xhci_err(xhci, "ERR: No room for command on command ring\n");
4349 if (command_must_succeed)
4350 xhci_err(xhci, "ERR: Reserved TRB counting for "
4351 "unfailable commands failed.\n");
4352 return ret;
4353 }
4354
4355 cmd->command_trb = xhci->cmd_ring->enqueue;
4356
4357 /* if there are no other commands queued we start the timeout timer */
4358 if (list_empty(&xhci->cmd_list)) {
4359 xhci->current_cmd = cmd;
4360 xhci_mod_cmd_timer(xhci);
4361 }
4362
4363 list_add_tail(&cmd->cmd_list, &xhci->cmd_list);
4364
4365 queue_trb(xhci, xhci->cmd_ring, false, field1, field2, field3,
4366 field4 | xhci->cmd_ring->cycle_state);
4367 return 0;
4368}
4369
4370/* Queue a slot enable or disable request on the command ring */
4371int xhci_queue_slot_control(struct xhci_hcd *xhci, struct xhci_command *cmd,
4372 u32 trb_type, u32 slot_id)
4373{
4374 return queue_command(xhci, cmd, 0, 0, 0,
4375 TRB_TYPE(trb_type) | SLOT_ID_FOR_TRB(slot_id), false);
4376}
4377
4378/* Queue an address device command TRB */
4379int xhci_queue_address_device(struct xhci_hcd *xhci, struct xhci_command *cmd,
4380 dma_addr_t in_ctx_ptr, u32 slot_id, enum xhci_setup_dev setup)
4381{
4382 return queue_command(xhci, cmd, lower_32_bits(in_ctx_ptr),
4383 upper_32_bits(in_ctx_ptr), 0,
4384 TRB_TYPE(TRB_ADDR_DEV) | SLOT_ID_FOR_TRB(slot_id)
4385 | (setup == SETUP_CONTEXT_ONLY ? TRB_BSR : 0), false);
4386}
4387
4388int xhci_queue_vendor_command(struct xhci_hcd *xhci, struct xhci_command *cmd,
4389 u32 field1, u32 field2, u32 field3, u32 field4)
4390{
4391 return queue_command(xhci, cmd, field1, field2, field3, field4, false);
4392}
4393
4394/* Queue a reset device command TRB */
4395int xhci_queue_reset_device(struct xhci_hcd *xhci, struct xhci_command *cmd,
4396 u32 slot_id)
4397{
4398 return queue_command(xhci, cmd, 0, 0, 0,
4399 TRB_TYPE(TRB_RESET_DEV) | SLOT_ID_FOR_TRB(slot_id),
4400 false);
4401}
4402
4403/* Queue a configure endpoint command TRB */
4404int xhci_queue_configure_endpoint(struct xhci_hcd *xhci,
4405 struct xhci_command *cmd, dma_addr_t in_ctx_ptr,
4406 u32 slot_id, bool command_must_succeed)
4407{
4408 return queue_command(xhci, cmd, lower_32_bits(in_ctx_ptr),
4409 upper_32_bits(in_ctx_ptr), 0,
4410 TRB_TYPE(TRB_CONFIG_EP) | SLOT_ID_FOR_TRB(slot_id),
4411 command_must_succeed);
4412}
4413
4414/* Queue an evaluate context command TRB */
4415int xhci_queue_evaluate_context(struct xhci_hcd *xhci, struct xhci_command *cmd,
4416 dma_addr_t in_ctx_ptr, u32 slot_id, bool command_must_succeed)
4417{
4418 return queue_command(xhci, cmd, lower_32_bits(in_ctx_ptr),
4419 upper_32_bits(in_ctx_ptr), 0,
4420 TRB_TYPE(TRB_EVAL_CONTEXT) | SLOT_ID_FOR_TRB(slot_id),
4421 command_must_succeed);
4422}
4423
4424/*
4425 * Suspend is set to indicate "Stop Endpoint Command" is being issued to stop
4426 * activity on an endpoint that is about to be suspended.
4427 */
4428int xhci_queue_stop_endpoint(struct xhci_hcd *xhci, struct xhci_command *cmd,
4429 int slot_id, unsigned int ep_index, int suspend)
4430{
4431 u32 trb_slot_id = SLOT_ID_FOR_TRB(slot_id);
4432 u32 trb_ep_index = EP_INDEX_FOR_TRB(ep_index);
4433 u32 type = TRB_TYPE(TRB_STOP_RING);
4434 u32 trb_suspend = SUSPEND_PORT_FOR_TRB(suspend);
4435
4436 return queue_command(xhci, cmd, 0, 0, 0,
4437 trb_slot_id | trb_ep_index | type | trb_suspend, false);
4438}
4439
4440int xhci_queue_reset_ep(struct xhci_hcd *xhci, struct xhci_command *cmd,
4441 int slot_id, unsigned int ep_index,
4442 enum xhci_ep_reset_type reset_type)
4443{
4444 u32 trb_slot_id = SLOT_ID_FOR_TRB(slot_id);
4445 u32 trb_ep_index = EP_INDEX_FOR_TRB(ep_index);
4446 u32 type = TRB_TYPE(TRB_RESET_EP);
4447
4448 if (reset_type == EP_SOFT_RESET)
4449 type |= TRB_TSP;
4450
4451 return queue_command(xhci, cmd, 0, 0, 0,
4452 trb_slot_id | trb_ep_index | type, false);
4453}