Loading...
1/*
2 * xHCI host controller driver
3 *
4 * Copyright (C) 2008 Intel Corp.
5 *
6 * Author: Sarah Sharp
7 * Some code borrowed from the Linux EHCI driver.
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License version 2 as
11 * published by the Free Software Foundation.
12 *
13 * This program is distributed in the hope that it will be useful, but
14 * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
15 * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
16 * for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software Foundation,
20 * Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
21 */
22
23/*
24 * Ring initialization rules:
25 * 1. Each segment is initialized to zero, except for link TRBs.
26 * 2. Ring cycle state = 0. This represents Producer Cycle State (PCS) or
27 * Consumer Cycle State (CCS), depending on ring function.
28 * 3. Enqueue pointer = dequeue pointer = address of first TRB in the segment.
29 *
30 * Ring behavior rules:
31 * 1. A ring is empty if enqueue == dequeue. This means there will always be at
32 * least one free TRB in the ring. This is useful if you want to turn that
33 * into a link TRB and expand the ring.
34 * 2. When incrementing an enqueue or dequeue pointer, if the next TRB is a
35 * link TRB, then load the pointer with the address in the link TRB. If the
36 * link TRB had its toggle bit set, you may need to update the ring cycle
37 * state (see cycle bit rules). You may have to do this multiple times
38 * until you reach a non-link TRB.
39 * 3. A ring is full if enqueue++ (for the definition of increment above)
40 * equals the dequeue pointer.
41 *
42 * Cycle bit rules:
43 * 1. When a consumer increments a dequeue pointer and encounters a toggle bit
44 * in a link TRB, it must toggle the ring cycle state.
45 * 2. When a producer increments an enqueue pointer and encounters a toggle bit
46 * in a link TRB, it must toggle the ring cycle state.
47 *
48 * Producer rules:
49 * 1. Check if ring is full before you enqueue.
50 * 2. Write the ring cycle state to the cycle bit in the TRB you're enqueuing.
51 * Update enqueue pointer between each write (which may update the ring
52 * cycle state).
53 * 3. Notify consumer. If SW is producer, it rings the doorbell for command
54 * and endpoint rings. If HC is the producer for the event ring,
55 * and it generates an interrupt according to interrupt modulation rules.
56 *
57 * Consumer rules:
58 * 1. Check if TRB belongs to you. If the cycle bit == your ring cycle state,
59 * the TRB is owned by the consumer.
60 * 2. Update dequeue pointer (which may update the ring cycle state) and
61 * continue processing TRBs until you reach a TRB which is not owned by you.
62 * 3. Notify the producer. SW is the consumer for the event ring, and it
63 * updates event ring dequeue pointer. HC is the consumer for the command and
64 * endpoint rings; it generates events on the event ring for these.
65 */
66
67#include <linux/scatterlist.h>
68#include <linux/slab.h>
69#include "xhci.h"
70
71static int handle_cmd_in_cmd_wait_list(struct xhci_hcd *xhci,
72 struct xhci_virt_device *virt_dev,
73 struct xhci_event_cmd *event);
74
75/*
76 * Returns zero if the TRB isn't in this segment, otherwise it returns the DMA
77 * address of the TRB.
78 */
79dma_addr_t xhci_trb_virt_to_dma(struct xhci_segment *seg,
80 union xhci_trb *trb)
81{
82 unsigned long segment_offset;
83
84 if (!seg || !trb || trb < seg->trbs)
85 return 0;
86 /* offset in TRBs */
87 segment_offset = trb - seg->trbs;
88 if (segment_offset > TRBS_PER_SEGMENT)
89 return 0;
90 return seg->dma + (segment_offset * sizeof(*trb));
91}
92
93/* Does this link TRB point to the first segment in a ring,
94 * or was the previous TRB the last TRB on the last segment in the ERST?
95 */
96static bool last_trb_on_last_seg(struct xhci_hcd *xhci, struct xhci_ring *ring,
97 struct xhci_segment *seg, union xhci_trb *trb)
98{
99 if (ring == xhci->event_ring)
100 return (trb == &seg->trbs[TRBS_PER_SEGMENT]) &&
101 (seg->next == xhci->event_ring->first_seg);
102 else
103 return le32_to_cpu(trb->link.control) & LINK_TOGGLE;
104}
105
106/* Is this TRB a link TRB or was the last TRB the last TRB in this event ring
107 * segment? I.e. would the updated event TRB pointer step off the end of the
108 * event seg?
109 */
110static int last_trb(struct xhci_hcd *xhci, struct xhci_ring *ring,
111 struct xhci_segment *seg, union xhci_trb *trb)
112{
113 if (ring == xhci->event_ring)
114 return trb == &seg->trbs[TRBS_PER_SEGMENT];
115 else
116 return TRB_TYPE_LINK_LE32(trb->link.control);
117}
118
119static int enqueue_is_link_trb(struct xhci_ring *ring)
120{
121 struct xhci_link_trb *link = &ring->enqueue->link;
122 return TRB_TYPE_LINK_LE32(link->control);
123}
124
125/* Updates trb to point to the next TRB in the ring, and updates seg if the next
126 * TRB is in a new segment. This does not skip over link TRBs, and it does not
127 * effect the ring dequeue or enqueue pointers.
128 */
129static void next_trb(struct xhci_hcd *xhci,
130 struct xhci_ring *ring,
131 struct xhci_segment **seg,
132 union xhci_trb **trb)
133{
134 if (last_trb(xhci, ring, *seg, *trb)) {
135 *seg = (*seg)->next;
136 *trb = ((*seg)->trbs);
137 } else {
138 (*trb)++;
139 }
140}
141
142/*
143 * See Cycle bit rules. SW is the consumer for the event ring only.
144 * Don't make a ring full of link TRBs. That would be dumb and this would loop.
145 */
146static void inc_deq(struct xhci_hcd *xhci, struct xhci_ring *ring, bool consumer)
147{
148 union xhci_trb *next = ++(ring->dequeue);
149 unsigned long long addr;
150
151 ring->deq_updates++;
152 /* Update the dequeue pointer further if that was a link TRB or we're at
153 * the end of an event ring segment (which doesn't have link TRBS)
154 */
155 while (last_trb(xhci, ring, ring->deq_seg, next)) {
156 if (consumer && last_trb_on_last_seg(xhci, ring, ring->deq_seg, next)) {
157 ring->cycle_state = (ring->cycle_state ? 0 : 1);
158 if (!in_interrupt())
159 xhci_dbg(xhci, "Toggle cycle state for ring %p = %i\n",
160 ring,
161 (unsigned int) ring->cycle_state);
162 }
163 ring->deq_seg = ring->deq_seg->next;
164 ring->dequeue = ring->deq_seg->trbs;
165 next = ring->dequeue;
166 }
167 addr = (unsigned long long) xhci_trb_virt_to_dma(ring->deq_seg, ring->dequeue);
168}
169
170/*
171 * See Cycle bit rules. SW is the consumer for the event ring only.
172 * Don't make a ring full of link TRBs. That would be dumb and this would loop.
173 *
174 * If we've just enqueued a TRB that is in the middle of a TD (meaning the
175 * chain bit is set), then set the chain bit in all the following link TRBs.
176 * If we've enqueued the last TRB in a TD, make sure the following link TRBs
177 * have their chain bit cleared (so that each Link TRB is a separate TD).
178 *
179 * Section 6.4.4.1 of the 0.95 spec says link TRBs cannot have the chain bit
180 * set, but other sections talk about dealing with the chain bit set. This was
181 * fixed in the 0.96 specification errata, but we have to assume that all 0.95
182 * xHCI hardware can't handle the chain bit being cleared on a link TRB.
183 *
184 * @more_trbs_coming: Will you enqueue more TRBs before calling
185 * prepare_transfer()?
186 */
187static void inc_enq(struct xhci_hcd *xhci, struct xhci_ring *ring,
188 bool consumer, bool more_trbs_coming)
189{
190 u32 chain;
191 union xhci_trb *next;
192 unsigned long long addr;
193
194 chain = le32_to_cpu(ring->enqueue->generic.field[3]) & TRB_CHAIN;
195 next = ++(ring->enqueue);
196
197 ring->enq_updates++;
198 /* Update the dequeue pointer further if that was a link TRB or we're at
199 * the end of an event ring segment (which doesn't have link TRBS)
200 */
201 while (last_trb(xhci, ring, ring->enq_seg, next)) {
202 if (!consumer) {
203 if (ring != xhci->event_ring) {
204 /*
205 * If the caller doesn't plan on enqueueing more
206 * TDs before ringing the doorbell, then we
207 * don't want to give the link TRB to the
208 * hardware just yet. We'll give the link TRB
209 * back in prepare_ring() just before we enqueue
210 * the TD at the top of the ring.
211 */
212 if (!chain && !more_trbs_coming)
213 break;
214
215 /* If we're not dealing with 0.95 hardware,
216 * carry over the chain bit of the previous TRB
217 * (which may mean the chain bit is cleared).
218 */
219 if (!xhci_link_trb_quirk(xhci)) {
220 next->link.control &=
221 cpu_to_le32(~TRB_CHAIN);
222 next->link.control |=
223 cpu_to_le32(chain);
224 }
225 /* Give this link TRB to the hardware */
226 wmb();
227 next->link.control ^= cpu_to_le32(TRB_CYCLE);
228 }
229 /* Toggle the cycle bit after the last ring segment. */
230 if (last_trb_on_last_seg(xhci, ring, ring->enq_seg, next)) {
231 ring->cycle_state = (ring->cycle_state ? 0 : 1);
232 if (!in_interrupt())
233 xhci_dbg(xhci, "Toggle cycle state for ring %p = %i\n",
234 ring,
235 (unsigned int) ring->cycle_state);
236 }
237 }
238 ring->enq_seg = ring->enq_seg->next;
239 ring->enqueue = ring->enq_seg->trbs;
240 next = ring->enqueue;
241 }
242 addr = (unsigned long long) xhci_trb_virt_to_dma(ring->enq_seg, ring->enqueue);
243}
244
245/*
246 * Check to see if there's room to enqueue num_trbs on the ring. See rules
247 * above.
248 * FIXME: this would be simpler and faster if we just kept track of the number
249 * of free TRBs in a ring.
250 */
251static int room_on_ring(struct xhci_hcd *xhci, struct xhci_ring *ring,
252 unsigned int num_trbs)
253{
254 int i;
255 union xhci_trb *enq = ring->enqueue;
256 struct xhci_segment *enq_seg = ring->enq_seg;
257 struct xhci_segment *cur_seg;
258 unsigned int left_on_ring;
259
260 /* If we are currently pointing to a link TRB, advance the
261 * enqueue pointer before checking for space */
262 while (last_trb(xhci, ring, enq_seg, enq)) {
263 enq_seg = enq_seg->next;
264 enq = enq_seg->trbs;
265 }
266
267 /* Check if ring is empty */
268 if (enq == ring->dequeue) {
269 /* Can't use link trbs */
270 left_on_ring = TRBS_PER_SEGMENT - 1;
271 for (cur_seg = enq_seg->next; cur_seg != enq_seg;
272 cur_seg = cur_seg->next)
273 left_on_ring += TRBS_PER_SEGMENT - 1;
274
275 /* Always need one TRB free in the ring. */
276 left_on_ring -= 1;
277 if (num_trbs > left_on_ring) {
278 xhci_warn(xhci, "Not enough room on ring; "
279 "need %u TRBs, %u TRBs left\n",
280 num_trbs, left_on_ring);
281 return 0;
282 }
283 return 1;
284 }
285 /* Make sure there's an extra empty TRB available */
286 for (i = 0; i <= num_trbs; ++i) {
287 if (enq == ring->dequeue)
288 return 0;
289 enq++;
290 while (last_trb(xhci, ring, enq_seg, enq)) {
291 enq_seg = enq_seg->next;
292 enq = enq_seg->trbs;
293 }
294 }
295 return 1;
296}
297
298/* Ring the host controller doorbell after placing a command on the ring */
299void xhci_ring_cmd_db(struct xhci_hcd *xhci)
300{
301 xhci_dbg(xhci, "// Ding dong!\n");
302 xhci_writel(xhci, DB_VALUE_HOST, &xhci->dba->doorbell[0]);
303 /* Flush PCI posted writes */
304 xhci_readl(xhci, &xhci->dba->doorbell[0]);
305}
306
307void xhci_ring_ep_doorbell(struct xhci_hcd *xhci,
308 unsigned int slot_id,
309 unsigned int ep_index,
310 unsigned int stream_id)
311{
312 __le32 __iomem *db_addr = &xhci->dba->doorbell[slot_id];
313 struct xhci_virt_ep *ep = &xhci->devs[slot_id]->eps[ep_index];
314 unsigned int ep_state = ep->ep_state;
315
316 /* Don't ring the doorbell for this endpoint if there are pending
317 * cancellations because we don't want to interrupt processing.
318 * We don't want to restart any stream rings if there's a set dequeue
319 * pointer command pending because the device can choose to start any
320 * stream once the endpoint is on the HW schedule.
321 * FIXME - check all the stream rings for pending cancellations.
322 */
323 if ((ep_state & EP_HALT_PENDING) || (ep_state & SET_DEQ_PENDING) ||
324 (ep_state & EP_HALTED))
325 return;
326 xhci_writel(xhci, DB_VALUE(ep_index, stream_id), db_addr);
327 /* The CPU has better things to do at this point than wait for a
328 * write-posting flush. It'll get there soon enough.
329 */
330}
331
332/* Ring the doorbell for any rings with pending URBs */
333static void ring_doorbell_for_active_rings(struct xhci_hcd *xhci,
334 unsigned int slot_id,
335 unsigned int ep_index)
336{
337 unsigned int stream_id;
338 struct xhci_virt_ep *ep;
339
340 ep = &xhci->devs[slot_id]->eps[ep_index];
341
342 /* A ring has pending URBs if its TD list is not empty */
343 if (!(ep->ep_state & EP_HAS_STREAMS)) {
344 if (!(list_empty(&ep->ring->td_list)))
345 xhci_ring_ep_doorbell(xhci, slot_id, ep_index, 0);
346 return;
347 }
348
349 for (stream_id = 1; stream_id < ep->stream_info->num_streams;
350 stream_id++) {
351 struct xhci_stream_info *stream_info = ep->stream_info;
352 if (!list_empty(&stream_info->stream_rings[stream_id]->td_list))
353 xhci_ring_ep_doorbell(xhci, slot_id, ep_index,
354 stream_id);
355 }
356}
357
358/*
359 * Find the segment that trb is in. Start searching in start_seg.
360 * If we must move past a segment that has a link TRB with a toggle cycle state
361 * bit set, then we will toggle the value pointed at by cycle_state.
362 */
363static struct xhci_segment *find_trb_seg(
364 struct xhci_segment *start_seg,
365 union xhci_trb *trb, int *cycle_state)
366{
367 struct xhci_segment *cur_seg = start_seg;
368 struct xhci_generic_trb *generic_trb;
369
370 while (cur_seg->trbs > trb ||
371 &cur_seg->trbs[TRBS_PER_SEGMENT - 1] < trb) {
372 generic_trb = &cur_seg->trbs[TRBS_PER_SEGMENT - 1].generic;
373 if (generic_trb->field[3] & cpu_to_le32(LINK_TOGGLE))
374 *cycle_state ^= 0x1;
375 cur_seg = cur_seg->next;
376 if (cur_seg == start_seg)
377 /* Looped over the entire list. Oops! */
378 return NULL;
379 }
380 return cur_seg;
381}
382
383
384static struct xhci_ring *xhci_triad_to_transfer_ring(struct xhci_hcd *xhci,
385 unsigned int slot_id, unsigned int ep_index,
386 unsigned int stream_id)
387{
388 struct xhci_virt_ep *ep;
389
390 ep = &xhci->devs[slot_id]->eps[ep_index];
391 /* Common case: no streams */
392 if (!(ep->ep_state & EP_HAS_STREAMS))
393 return ep->ring;
394
395 if (stream_id == 0) {
396 xhci_warn(xhci,
397 "WARN: Slot ID %u, ep index %u has streams, "
398 "but URB has no stream ID.\n",
399 slot_id, ep_index);
400 return NULL;
401 }
402
403 if (stream_id < ep->stream_info->num_streams)
404 return ep->stream_info->stream_rings[stream_id];
405
406 xhci_warn(xhci,
407 "WARN: Slot ID %u, ep index %u has "
408 "stream IDs 1 to %u allocated, "
409 "but stream ID %u is requested.\n",
410 slot_id, ep_index,
411 ep->stream_info->num_streams - 1,
412 stream_id);
413 return NULL;
414}
415
416/* Get the right ring for the given URB.
417 * If the endpoint supports streams, boundary check the URB's stream ID.
418 * If the endpoint doesn't support streams, return the singular endpoint ring.
419 */
420static struct xhci_ring *xhci_urb_to_transfer_ring(struct xhci_hcd *xhci,
421 struct urb *urb)
422{
423 return xhci_triad_to_transfer_ring(xhci, urb->dev->slot_id,
424 xhci_get_endpoint_index(&urb->ep->desc), urb->stream_id);
425}
426
427/*
428 * Move the xHC's endpoint ring dequeue pointer past cur_td.
429 * Record the new state of the xHC's endpoint ring dequeue segment,
430 * dequeue pointer, and new consumer cycle state in state.
431 * Update our internal representation of the ring's dequeue pointer.
432 *
433 * We do this in three jumps:
434 * - First we update our new ring state to be the same as when the xHC stopped.
435 * - Then we traverse the ring to find the segment that contains
436 * the last TRB in the TD. We toggle the xHC's new cycle state when we pass
437 * any link TRBs with the toggle cycle bit set.
438 * - Finally we move the dequeue state one TRB further, toggling the cycle bit
439 * if we've moved it past a link TRB with the toggle cycle bit set.
440 *
441 * Some of the uses of xhci_generic_trb are grotty, but if they're done
442 * with correct __le32 accesses they should work fine. Only users of this are
443 * in here.
444 */
445void xhci_find_new_dequeue_state(struct xhci_hcd *xhci,
446 unsigned int slot_id, unsigned int ep_index,
447 unsigned int stream_id, struct xhci_td *cur_td,
448 struct xhci_dequeue_state *state)
449{
450 struct xhci_virt_device *dev = xhci->devs[slot_id];
451 struct xhci_ring *ep_ring;
452 struct xhci_generic_trb *trb;
453 struct xhci_ep_ctx *ep_ctx;
454 dma_addr_t addr;
455
456 ep_ring = xhci_triad_to_transfer_ring(xhci, slot_id,
457 ep_index, stream_id);
458 if (!ep_ring) {
459 xhci_warn(xhci, "WARN can't find new dequeue state "
460 "for invalid stream ID %u.\n",
461 stream_id);
462 return;
463 }
464 state->new_cycle_state = 0;
465 xhci_dbg(xhci, "Finding segment containing stopped TRB.\n");
466 state->new_deq_seg = find_trb_seg(cur_td->start_seg,
467 dev->eps[ep_index].stopped_trb,
468 &state->new_cycle_state);
469 if (!state->new_deq_seg) {
470 WARN_ON(1);
471 return;
472 }
473
474 /* Dig out the cycle state saved by the xHC during the stop ep cmd */
475 xhci_dbg(xhci, "Finding endpoint context\n");
476 ep_ctx = xhci_get_ep_ctx(xhci, dev->out_ctx, ep_index);
477 state->new_cycle_state = 0x1 & le64_to_cpu(ep_ctx->deq);
478
479 state->new_deq_ptr = cur_td->last_trb;
480 xhci_dbg(xhci, "Finding segment containing last TRB in TD.\n");
481 state->new_deq_seg = find_trb_seg(state->new_deq_seg,
482 state->new_deq_ptr,
483 &state->new_cycle_state);
484 if (!state->new_deq_seg) {
485 WARN_ON(1);
486 return;
487 }
488
489 trb = &state->new_deq_ptr->generic;
490 if (TRB_TYPE_LINK_LE32(trb->field[3]) &&
491 (trb->field[3] & cpu_to_le32(LINK_TOGGLE)))
492 state->new_cycle_state ^= 0x1;
493 next_trb(xhci, ep_ring, &state->new_deq_seg, &state->new_deq_ptr);
494
495 /*
496 * If there is only one segment in a ring, find_trb_seg()'s while loop
497 * will not run, and it will return before it has a chance to see if it
498 * needs to toggle the cycle bit. It can't tell if the stalled transfer
499 * ended just before the link TRB on a one-segment ring, or if the TD
500 * wrapped around the top of the ring, because it doesn't have the TD in
501 * question. Look for the one-segment case where stalled TRB's address
502 * is greater than the new dequeue pointer address.
503 */
504 if (ep_ring->first_seg == ep_ring->first_seg->next &&
505 state->new_deq_ptr < dev->eps[ep_index].stopped_trb)
506 state->new_cycle_state ^= 0x1;
507 xhci_dbg(xhci, "Cycle state = 0x%x\n", state->new_cycle_state);
508
509 /* Don't update the ring cycle state for the producer (us). */
510 xhci_dbg(xhci, "New dequeue segment = %p (virtual)\n",
511 state->new_deq_seg);
512 addr = xhci_trb_virt_to_dma(state->new_deq_seg, state->new_deq_ptr);
513 xhci_dbg(xhci, "New dequeue pointer = 0x%llx (DMA)\n",
514 (unsigned long long) addr);
515}
516
517/* flip_cycle means flip the cycle bit of all but the first and last TRB.
518 * (The last TRB actually points to the ring enqueue pointer, which is not part
519 * of this TD.) This is used to remove partially enqueued isoc TDs from a ring.
520 */
521static void td_to_noop(struct xhci_hcd *xhci, struct xhci_ring *ep_ring,
522 struct xhci_td *cur_td, bool flip_cycle)
523{
524 struct xhci_segment *cur_seg;
525 union xhci_trb *cur_trb;
526
527 for (cur_seg = cur_td->start_seg, cur_trb = cur_td->first_trb;
528 true;
529 next_trb(xhci, ep_ring, &cur_seg, &cur_trb)) {
530 if (TRB_TYPE_LINK_LE32(cur_trb->generic.field[3])) {
531 /* Unchain any chained Link TRBs, but
532 * leave the pointers intact.
533 */
534 cur_trb->generic.field[3] &= cpu_to_le32(~TRB_CHAIN);
535 /* Flip the cycle bit (link TRBs can't be the first
536 * or last TRB).
537 */
538 if (flip_cycle)
539 cur_trb->generic.field[3] ^=
540 cpu_to_le32(TRB_CYCLE);
541 xhci_dbg(xhci, "Cancel (unchain) link TRB\n");
542 xhci_dbg(xhci, "Address = %p (0x%llx dma); "
543 "in seg %p (0x%llx dma)\n",
544 cur_trb,
545 (unsigned long long)xhci_trb_virt_to_dma(cur_seg, cur_trb),
546 cur_seg,
547 (unsigned long long)cur_seg->dma);
548 } else {
549 cur_trb->generic.field[0] = 0;
550 cur_trb->generic.field[1] = 0;
551 cur_trb->generic.field[2] = 0;
552 /* Preserve only the cycle bit of this TRB */
553 cur_trb->generic.field[3] &= cpu_to_le32(TRB_CYCLE);
554 /* Flip the cycle bit except on the first or last TRB */
555 if (flip_cycle && cur_trb != cur_td->first_trb &&
556 cur_trb != cur_td->last_trb)
557 cur_trb->generic.field[3] ^=
558 cpu_to_le32(TRB_CYCLE);
559 cur_trb->generic.field[3] |= cpu_to_le32(
560 TRB_TYPE(TRB_TR_NOOP));
561 xhci_dbg(xhci, "Cancel TRB %p (0x%llx dma) "
562 "in seg %p (0x%llx dma)\n",
563 cur_trb,
564 (unsigned long long)xhci_trb_virt_to_dma(cur_seg, cur_trb),
565 cur_seg,
566 (unsigned long long)cur_seg->dma);
567 }
568 if (cur_trb == cur_td->last_trb)
569 break;
570 }
571}
572
573static int queue_set_tr_deq(struct xhci_hcd *xhci, int slot_id,
574 unsigned int ep_index, unsigned int stream_id,
575 struct xhci_segment *deq_seg,
576 union xhci_trb *deq_ptr, u32 cycle_state);
577
578void xhci_queue_new_dequeue_state(struct xhci_hcd *xhci,
579 unsigned int slot_id, unsigned int ep_index,
580 unsigned int stream_id,
581 struct xhci_dequeue_state *deq_state)
582{
583 struct xhci_virt_ep *ep = &xhci->devs[slot_id]->eps[ep_index];
584
585 xhci_dbg(xhci, "Set TR Deq Ptr cmd, new deq seg = %p (0x%llx dma), "
586 "new deq ptr = %p (0x%llx dma), new cycle = %u\n",
587 deq_state->new_deq_seg,
588 (unsigned long long)deq_state->new_deq_seg->dma,
589 deq_state->new_deq_ptr,
590 (unsigned long long)xhci_trb_virt_to_dma(deq_state->new_deq_seg, deq_state->new_deq_ptr),
591 deq_state->new_cycle_state);
592 queue_set_tr_deq(xhci, slot_id, ep_index, stream_id,
593 deq_state->new_deq_seg,
594 deq_state->new_deq_ptr,
595 (u32) deq_state->new_cycle_state);
596 /* Stop the TD queueing code from ringing the doorbell until
597 * this command completes. The HC won't set the dequeue pointer
598 * if the ring is running, and ringing the doorbell starts the
599 * ring running.
600 */
601 ep->ep_state |= SET_DEQ_PENDING;
602}
603
604static void xhci_stop_watchdog_timer_in_irq(struct xhci_hcd *xhci,
605 struct xhci_virt_ep *ep)
606{
607 ep->ep_state &= ~EP_HALT_PENDING;
608 /* Can't del_timer_sync in interrupt, so we attempt to cancel. If the
609 * timer is running on another CPU, we don't decrement stop_cmds_pending
610 * (since we didn't successfully stop the watchdog timer).
611 */
612 if (del_timer(&ep->stop_cmd_timer))
613 ep->stop_cmds_pending--;
614}
615
616/* Must be called with xhci->lock held in interrupt context */
617static void xhci_giveback_urb_in_irq(struct xhci_hcd *xhci,
618 struct xhci_td *cur_td, int status, char *adjective)
619{
620 struct usb_hcd *hcd;
621 struct urb *urb;
622 struct urb_priv *urb_priv;
623
624 urb = cur_td->urb;
625 urb_priv = urb->hcpriv;
626 urb_priv->td_cnt++;
627 hcd = bus_to_hcd(urb->dev->bus);
628
629 /* Only giveback urb when this is the last td in urb */
630 if (urb_priv->td_cnt == urb_priv->length) {
631 if (usb_pipetype(urb->pipe) == PIPE_ISOCHRONOUS) {
632 xhci_to_hcd(xhci)->self.bandwidth_isoc_reqs--;
633 if (xhci_to_hcd(xhci)->self.bandwidth_isoc_reqs == 0) {
634 if (xhci->quirks & XHCI_AMD_PLL_FIX)
635 usb_amd_quirk_pll_enable();
636 }
637 }
638 usb_hcd_unlink_urb_from_ep(hcd, urb);
639
640 spin_unlock(&xhci->lock);
641 usb_hcd_giveback_urb(hcd, urb, status);
642 xhci_urb_free_priv(xhci, urb_priv);
643 spin_lock(&xhci->lock);
644 }
645}
646
647/*
648 * When we get a command completion for a Stop Endpoint Command, we need to
649 * unlink any cancelled TDs from the ring. There are two ways to do that:
650 *
651 * 1. If the HW was in the middle of processing the TD that needs to be
652 * cancelled, then we must move the ring's dequeue pointer past the last TRB
653 * in the TD with a Set Dequeue Pointer Command.
654 * 2. Otherwise, we turn all the TRBs in the TD into No-op TRBs (with the chain
655 * bit cleared) so that the HW will skip over them.
656 */
657static void handle_stopped_endpoint(struct xhci_hcd *xhci,
658 union xhci_trb *trb, struct xhci_event_cmd *event)
659{
660 unsigned int slot_id;
661 unsigned int ep_index;
662 struct xhci_virt_device *virt_dev;
663 struct xhci_ring *ep_ring;
664 struct xhci_virt_ep *ep;
665 struct list_head *entry;
666 struct xhci_td *cur_td = NULL;
667 struct xhci_td *last_unlinked_td;
668
669 struct xhci_dequeue_state deq_state;
670
671 if (unlikely(TRB_TO_SUSPEND_PORT(
672 le32_to_cpu(xhci->cmd_ring->dequeue->generic.field[3])))) {
673 slot_id = TRB_TO_SLOT_ID(
674 le32_to_cpu(xhci->cmd_ring->dequeue->generic.field[3]));
675 virt_dev = xhci->devs[slot_id];
676 if (virt_dev)
677 handle_cmd_in_cmd_wait_list(xhci, virt_dev,
678 event);
679 else
680 xhci_warn(xhci, "Stop endpoint command "
681 "completion for disabled slot %u\n",
682 slot_id);
683 return;
684 }
685
686 memset(&deq_state, 0, sizeof(deq_state));
687 slot_id = TRB_TO_SLOT_ID(le32_to_cpu(trb->generic.field[3]));
688 ep_index = TRB_TO_EP_INDEX(le32_to_cpu(trb->generic.field[3]));
689 ep = &xhci->devs[slot_id]->eps[ep_index];
690
691 if (list_empty(&ep->cancelled_td_list)) {
692 xhci_stop_watchdog_timer_in_irq(xhci, ep);
693 ep->stopped_td = NULL;
694 ep->stopped_trb = NULL;
695 ring_doorbell_for_active_rings(xhci, slot_id, ep_index);
696 return;
697 }
698
699 /* Fix up the ep ring first, so HW stops executing cancelled TDs.
700 * We have the xHCI lock, so nothing can modify this list until we drop
701 * it. We're also in the event handler, so we can't get re-interrupted
702 * if another Stop Endpoint command completes
703 */
704 list_for_each(entry, &ep->cancelled_td_list) {
705 cur_td = list_entry(entry, struct xhci_td, cancelled_td_list);
706 xhci_dbg(xhci, "Cancelling TD starting at %p, 0x%llx (dma).\n",
707 cur_td->first_trb,
708 (unsigned long long)xhci_trb_virt_to_dma(cur_td->start_seg, cur_td->first_trb));
709 ep_ring = xhci_urb_to_transfer_ring(xhci, cur_td->urb);
710 if (!ep_ring) {
711 /* This shouldn't happen unless a driver is mucking
712 * with the stream ID after submission. This will
713 * leave the TD on the hardware ring, and the hardware
714 * will try to execute it, and may access a buffer
715 * that has already been freed. In the best case, the
716 * hardware will execute it, and the event handler will
717 * ignore the completion event for that TD, since it was
718 * removed from the td_list for that endpoint. In
719 * short, don't muck with the stream ID after
720 * submission.
721 */
722 xhci_warn(xhci, "WARN Cancelled URB %p "
723 "has invalid stream ID %u.\n",
724 cur_td->urb,
725 cur_td->urb->stream_id);
726 goto remove_finished_td;
727 }
728 /*
729 * If we stopped on the TD we need to cancel, then we have to
730 * move the xHC endpoint ring dequeue pointer past this TD.
731 */
732 if (cur_td == ep->stopped_td)
733 xhci_find_new_dequeue_state(xhci, slot_id, ep_index,
734 cur_td->urb->stream_id,
735 cur_td, &deq_state);
736 else
737 td_to_noop(xhci, ep_ring, cur_td, false);
738remove_finished_td:
739 /*
740 * The event handler won't see a completion for this TD anymore,
741 * so remove it from the endpoint ring's TD list. Keep it in
742 * the cancelled TD list for URB completion later.
743 */
744 list_del_init(&cur_td->td_list);
745 }
746 last_unlinked_td = cur_td;
747 xhci_stop_watchdog_timer_in_irq(xhci, ep);
748
749 /* If necessary, queue a Set Transfer Ring Dequeue Pointer command */
750 if (deq_state.new_deq_ptr && deq_state.new_deq_seg) {
751 xhci_queue_new_dequeue_state(xhci,
752 slot_id, ep_index,
753 ep->stopped_td->urb->stream_id,
754 &deq_state);
755 xhci_ring_cmd_db(xhci);
756 } else {
757 /* Otherwise ring the doorbell(s) to restart queued transfers */
758 ring_doorbell_for_active_rings(xhci, slot_id, ep_index);
759 }
760 ep->stopped_td = NULL;
761 ep->stopped_trb = NULL;
762
763 /*
764 * Drop the lock and complete the URBs in the cancelled TD list.
765 * New TDs to be cancelled might be added to the end of the list before
766 * we can complete all the URBs for the TDs we already unlinked.
767 * So stop when we've completed the URB for the last TD we unlinked.
768 */
769 do {
770 cur_td = list_entry(ep->cancelled_td_list.next,
771 struct xhci_td, cancelled_td_list);
772 list_del_init(&cur_td->cancelled_td_list);
773
774 /* Clean up the cancelled URB */
775 /* Doesn't matter what we pass for status, since the core will
776 * just overwrite it (because the URB has been unlinked).
777 */
778 xhci_giveback_urb_in_irq(xhci, cur_td, 0, "cancelled");
779
780 /* Stop processing the cancelled list if the watchdog timer is
781 * running.
782 */
783 if (xhci->xhc_state & XHCI_STATE_DYING)
784 return;
785 } while (cur_td != last_unlinked_td);
786
787 /* Return to the event handler with xhci->lock re-acquired */
788}
789
790/* Watchdog timer function for when a stop endpoint command fails to complete.
791 * In this case, we assume the host controller is broken or dying or dead. The
792 * host may still be completing some other events, so we have to be careful to
793 * let the event ring handler and the URB dequeueing/enqueueing functions know
794 * through xhci->state.
795 *
796 * The timer may also fire if the host takes a very long time to respond to the
797 * command, and the stop endpoint command completion handler cannot delete the
798 * timer before the timer function is called. Another endpoint cancellation may
799 * sneak in before the timer function can grab the lock, and that may queue
800 * another stop endpoint command and add the timer back. So we cannot use a
801 * simple flag to say whether there is a pending stop endpoint command for a
802 * particular endpoint.
803 *
804 * Instead we use a combination of that flag and a counter for the number of
805 * pending stop endpoint commands. If the timer is the tail end of the last
806 * stop endpoint command, and the endpoint's command is still pending, we assume
807 * the host is dying.
808 */
809void xhci_stop_endpoint_command_watchdog(unsigned long arg)
810{
811 struct xhci_hcd *xhci;
812 struct xhci_virt_ep *ep;
813 struct xhci_virt_ep *temp_ep;
814 struct xhci_ring *ring;
815 struct xhci_td *cur_td;
816 int ret, i, j;
817
818 ep = (struct xhci_virt_ep *) arg;
819 xhci = ep->xhci;
820
821 spin_lock(&xhci->lock);
822
823 ep->stop_cmds_pending--;
824 if (xhci->xhc_state & XHCI_STATE_DYING) {
825 xhci_dbg(xhci, "Stop EP timer ran, but another timer marked "
826 "xHCI as DYING, exiting.\n");
827 spin_unlock(&xhci->lock);
828 return;
829 }
830 if (!(ep->stop_cmds_pending == 0 && (ep->ep_state & EP_HALT_PENDING))) {
831 xhci_dbg(xhci, "Stop EP timer ran, but no command pending, "
832 "exiting.\n");
833 spin_unlock(&xhci->lock);
834 return;
835 }
836
837 xhci_warn(xhci, "xHCI host not responding to stop endpoint command.\n");
838 xhci_warn(xhci, "Assuming host is dying, halting host.\n");
839 /* Oops, HC is dead or dying or at least not responding to the stop
840 * endpoint command.
841 */
842 xhci->xhc_state |= XHCI_STATE_DYING;
843 /* Disable interrupts from the host controller and start halting it */
844 xhci_quiesce(xhci);
845 spin_unlock(&xhci->lock);
846
847 ret = xhci_halt(xhci);
848
849 spin_lock(&xhci->lock);
850 if (ret < 0) {
851 /* This is bad; the host is not responding to commands and it's
852 * not allowing itself to be halted. At least interrupts are
853 * disabled. If we call usb_hc_died(), it will attempt to
854 * disconnect all device drivers under this host. Those
855 * disconnect() methods will wait for all URBs to be unlinked,
856 * so we must complete them.
857 */
858 xhci_warn(xhci, "Non-responsive xHCI host is not halting.\n");
859 xhci_warn(xhci, "Completing active URBs anyway.\n");
860 /* We could turn all TDs on the rings to no-ops. This won't
861 * help if the host has cached part of the ring, and is slow if
862 * we want to preserve the cycle bit. Skip it and hope the host
863 * doesn't touch the memory.
864 */
865 }
866 for (i = 0; i < MAX_HC_SLOTS; i++) {
867 if (!xhci->devs[i])
868 continue;
869 for (j = 0; j < 31; j++) {
870 temp_ep = &xhci->devs[i]->eps[j];
871 ring = temp_ep->ring;
872 if (!ring)
873 continue;
874 xhci_dbg(xhci, "Killing URBs for slot ID %u, "
875 "ep index %u\n", i, j);
876 while (!list_empty(&ring->td_list)) {
877 cur_td = list_first_entry(&ring->td_list,
878 struct xhci_td,
879 td_list);
880 list_del_init(&cur_td->td_list);
881 if (!list_empty(&cur_td->cancelled_td_list))
882 list_del_init(&cur_td->cancelled_td_list);
883 xhci_giveback_urb_in_irq(xhci, cur_td,
884 -ESHUTDOWN, "killed");
885 }
886 while (!list_empty(&temp_ep->cancelled_td_list)) {
887 cur_td = list_first_entry(
888 &temp_ep->cancelled_td_list,
889 struct xhci_td,
890 cancelled_td_list);
891 list_del_init(&cur_td->cancelled_td_list);
892 xhci_giveback_urb_in_irq(xhci, cur_td,
893 -ESHUTDOWN, "killed");
894 }
895 }
896 }
897 spin_unlock(&xhci->lock);
898 xhci_dbg(xhci, "Calling usb_hc_died()\n");
899 usb_hc_died(xhci_to_hcd(xhci)->primary_hcd);
900 xhci_dbg(xhci, "xHCI host controller is dead.\n");
901}
902
903/*
904 * When we get a completion for a Set Transfer Ring Dequeue Pointer command,
905 * we need to clear the set deq pending flag in the endpoint ring state, so that
906 * the TD queueing code can ring the doorbell again. We also need to ring the
907 * endpoint doorbell to restart the ring, but only if there aren't more
908 * cancellations pending.
909 */
910static void handle_set_deq_completion(struct xhci_hcd *xhci,
911 struct xhci_event_cmd *event,
912 union xhci_trb *trb)
913{
914 unsigned int slot_id;
915 unsigned int ep_index;
916 unsigned int stream_id;
917 struct xhci_ring *ep_ring;
918 struct xhci_virt_device *dev;
919 struct xhci_ep_ctx *ep_ctx;
920 struct xhci_slot_ctx *slot_ctx;
921
922 slot_id = TRB_TO_SLOT_ID(le32_to_cpu(trb->generic.field[3]));
923 ep_index = TRB_TO_EP_INDEX(le32_to_cpu(trb->generic.field[3]));
924 stream_id = TRB_TO_STREAM_ID(le32_to_cpu(trb->generic.field[2]));
925 dev = xhci->devs[slot_id];
926
927 ep_ring = xhci_stream_id_to_ring(dev, ep_index, stream_id);
928 if (!ep_ring) {
929 xhci_warn(xhci, "WARN Set TR deq ptr command for "
930 "freed stream ID %u\n",
931 stream_id);
932 /* XXX: Harmless??? */
933 dev->eps[ep_index].ep_state &= ~SET_DEQ_PENDING;
934 return;
935 }
936
937 ep_ctx = xhci_get_ep_ctx(xhci, dev->out_ctx, ep_index);
938 slot_ctx = xhci_get_slot_ctx(xhci, dev->out_ctx);
939
940 if (GET_COMP_CODE(le32_to_cpu(event->status)) != COMP_SUCCESS) {
941 unsigned int ep_state;
942 unsigned int slot_state;
943
944 switch (GET_COMP_CODE(le32_to_cpu(event->status))) {
945 case COMP_TRB_ERR:
946 xhci_warn(xhci, "WARN Set TR Deq Ptr cmd invalid because "
947 "of stream ID configuration\n");
948 break;
949 case COMP_CTX_STATE:
950 xhci_warn(xhci, "WARN Set TR Deq Ptr cmd failed due "
951 "to incorrect slot or ep state.\n");
952 ep_state = le32_to_cpu(ep_ctx->ep_info);
953 ep_state &= EP_STATE_MASK;
954 slot_state = le32_to_cpu(slot_ctx->dev_state);
955 slot_state = GET_SLOT_STATE(slot_state);
956 xhci_dbg(xhci, "Slot state = %u, EP state = %u\n",
957 slot_state, ep_state);
958 break;
959 case COMP_EBADSLT:
960 xhci_warn(xhci, "WARN Set TR Deq Ptr cmd failed because "
961 "slot %u was not enabled.\n", slot_id);
962 break;
963 default:
964 xhci_warn(xhci, "WARN Set TR Deq Ptr cmd with unknown "
965 "completion code of %u.\n",
966 GET_COMP_CODE(le32_to_cpu(event->status)));
967 break;
968 }
969 /* OK what do we do now? The endpoint state is hosed, and we
970 * should never get to this point if the synchronization between
971 * queueing, and endpoint state are correct. This might happen
972 * if the device gets disconnected after we've finished
973 * cancelling URBs, which might not be an error...
974 */
975 } else {
976 xhci_dbg(xhci, "Successful Set TR Deq Ptr cmd, deq = @%08llx\n",
977 le64_to_cpu(ep_ctx->deq));
978 if (xhci_trb_virt_to_dma(dev->eps[ep_index].queued_deq_seg,
979 dev->eps[ep_index].queued_deq_ptr) ==
980 (le64_to_cpu(ep_ctx->deq) & ~(EP_CTX_CYCLE_MASK))) {
981 /* Update the ring's dequeue segment and dequeue pointer
982 * to reflect the new position.
983 */
984 ep_ring->deq_seg = dev->eps[ep_index].queued_deq_seg;
985 ep_ring->dequeue = dev->eps[ep_index].queued_deq_ptr;
986 } else {
987 xhci_warn(xhci, "Mismatch between completed Set TR Deq "
988 "Ptr command & xHCI internal state.\n");
989 xhci_warn(xhci, "ep deq seg = %p, deq ptr = %p\n",
990 dev->eps[ep_index].queued_deq_seg,
991 dev->eps[ep_index].queued_deq_ptr);
992 }
993 }
994
995 dev->eps[ep_index].ep_state &= ~SET_DEQ_PENDING;
996 dev->eps[ep_index].queued_deq_seg = NULL;
997 dev->eps[ep_index].queued_deq_ptr = NULL;
998 /* Restart any rings with pending URBs */
999 ring_doorbell_for_active_rings(xhci, slot_id, ep_index);
1000}
1001
1002static void handle_reset_ep_completion(struct xhci_hcd *xhci,
1003 struct xhci_event_cmd *event,
1004 union xhci_trb *trb)
1005{
1006 int slot_id;
1007 unsigned int ep_index;
1008
1009 slot_id = TRB_TO_SLOT_ID(le32_to_cpu(trb->generic.field[3]));
1010 ep_index = TRB_TO_EP_INDEX(le32_to_cpu(trb->generic.field[3]));
1011 /* This command will only fail if the endpoint wasn't halted,
1012 * but we don't care.
1013 */
1014 xhci_dbg(xhci, "Ignoring reset ep completion code of %u\n",
1015 GET_COMP_CODE(le32_to_cpu(event->status)));
1016
1017 /* HW with the reset endpoint quirk needs to have a configure endpoint
1018 * command complete before the endpoint can be used. Queue that here
1019 * because the HW can't handle two commands being queued in a row.
1020 */
1021 if (xhci->quirks & XHCI_RESET_EP_QUIRK) {
1022 xhci_dbg(xhci, "Queueing configure endpoint command\n");
1023 xhci_queue_configure_endpoint(xhci,
1024 xhci->devs[slot_id]->in_ctx->dma, slot_id,
1025 false);
1026 xhci_ring_cmd_db(xhci);
1027 } else {
1028 /* Clear our internal halted state and restart the ring(s) */
1029 xhci->devs[slot_id]->eps[ep_index].ep_state &= ~EP_HALTED;
1030 ring_doorbell_for_active_rings(xhci, slot_id, ep_index);
1031 }
1032}
1033
1034/* Check to see if a command in the device's command queue matches this one.
1035 * Signal the completion or free the command, and return 1. Return 0 if the
1036 * completed command isn't at the head of the command list.
1037 */
1038static int handle_cmd_in_cmd_wait_list(struct xhci_hcd *xhci,
1039 struct xhci_virt_device *virt_dev,
1040 struct xhci_event_cmd *event)
1041{
1042 struct xhci_command *command;
1043
1044 if (list_empty(&virt_dev->cmd_list))
1045 return 0;
1046
1047 command = list_entry(virt_dev->cmd_list.next,
1048 struct xhci_command, cmd_list);
1049 if (xhci->cmd_ring->dequeue != command->command_trb)
1050 return 0;
1051
1052 command->status = GET_COMP_CODE(le32_to_cpu(event->status));
1053 list_del(&command->cmd_list);
1054 if (command->completion)
1055 complete(command->completion);
1056 else
1057 xhci_free_command(xhci, command);
1058 return 1;
1059}
1060
1061static void handle_cmd_completion(struct xhci_hcd *xhci,
1062 struct xhci_event_cmd *event)
1063{
1064 int slot_id = TRB_TO_SLOT_ID(le32_to_cpu(event->flags));
1065 u64 cmd_dma;
1066 dma_addr_t cmd_dequeue_dma;
1067 struct xhci_input_control_ctx *ctrl_ctx;
1068 struct xhci_virt_device *virt_dev;
1069 unsigned int ep_index;
1070 struct xhci_ring *ep_ring;
1071 unsigned int ep_state;
1072
1073 cmd_dma = le64_to_cpu(event->cmd_trb);
1074 cmd_dequeue_dma = xhci_trb_virt_to_dma(xhci->cmd_ring->deq_seg,
1075 xhci->cmd_ring->dequeue);
1076 /* Is the command ring deq ptr out of sync with the deq seg ptr? */
1077 if (cmd_dequeue_dma == 0) {
1078 xhci->error_bitmask |= 1 << 4;
1079 return;
1080 }
1081 /* Does the DMA address match our internal dequeue pointer address? */
1082 if (cmd_dma != (u64) cmd_dequeue_dma) {
1083 xhci->error_bitmask |= 1 << 5;
1084 return;
1085 }
1086 switch (le32_to_cpu(xhci->cmd_ring->dequeue->generic.field[3])
1087 & TRB_TYPE_BITMASK) {
1088 case TRB_TYPE(TRB_ENABLE_SLOT):
1089 if (GET_COMP_CODE(le32_to_cpu(event->status)) == COMP_SUCCESS)
1090 xhci->slot_id = slot_id;
1091 else
1092 xhci->slot_id = 0;
1093 complete(&xhci->addr_dev);
1094 break;
1095 case TRB_TYPE(TRB_DISABLE_SLOT):
1096 if (xhci->devs[slot_id]) {
1097 if (xhci->quirks & XHCI_EP_LIMIT_QUIRK)
1098 /* Delete default control endpoint resources */
1099 xhci_free_device_endpoint_resources(xhci,
1100 xhci->devs[slot_id], true);
1101 xhci_free_virt_device(xhci, slot_id);
1102 }
1103 break;
1104 case TRB_TYPE(TRB_CONFIG_EP):
1105 virt_dev = xhci->devs[slot_id];
1106 if (handle_cmd_in_cmd_wait_list(xhci, virt_dev, event))
1107 break;
1108 /*
1109 * Configure endpoint commands can come from the USB core
1110 * configuration or alt setting changes, or because the HW
1111 * needed an extra configure endpoint command after a reset
1112 * endpoint command or streams were being configured.
1113 * If the command was for a halted endpoint, the xHCI driver
1114 * is not waiting on the configure endpoint command.
1115 */
1116 ctrl_ctx = xhci_get_input_control_ctx(xhci,
1117 virt_dev->in_ctx);
1118 /* Input ctx add_flags are the endpoint index plus one */
1119 ep_index = xhci_last_valid_endpoint(le32_to_cpu(ctrl_ctx->add_flags)) - 1;
1120 /* A usb_set_interface() call directly after clearing a halted
1121 * condition may race on this quirky hardware. Not worth
1122 * worrying about, since this is prototype hardware. Not sure
1123 * if this will work for streams, but streams support was
1124 * untested on this prototype.
1125 */
1126 if (xhci->quirks & XHCI_RESET_EP_QUIRK &&
1127 ep_index != (unsigned int) -1 &&
1128 le32_to_cpu(ctrl_ctx->add_flags) - SLOT_FLAG ==
1129 le32_to_cpu(ctrl_ctx->drop_flags)) {
1130 ep_ring = xhci->devs[slot_id]->eps[ep_index].ring;
1131 ep_state = xhci->devs[slot_id]->eps[ep_index].ep_state;
1132 if (!(ep_state & EP_HALTED))
1133 goto bandwidth_change;
1134 xhci_dbg(xhci, "Completed config ep cmd - "
1135 "last ep index = %d, state = %d\n",
1136 ep_index, ep_state);
1137 /* Clear internal halted state and restart ring(s) */
1138 xhci->devs[slot_id]->eps[ep_index].ep_state &=
1139 ~EP_HALTED;
1140 ring_doorbell_for_active_rings(xhci, slot_id, ep_index);
1141 break;
1142 }
1143bandwidth_change:
1144 xhci_dbg(xhci, "Completed config ep cmd\n");
1145 xhci->devs[slot_id]->cmd_status =
1146 GET_COMP_CODE(le32_to_cpu(event->status));
1147 complete(&xhci->devs[slot_id]->cmd_completion);
1148 break;
1149 case TRB_TYPE(TRB_EVAL_CONTEXT):
1150 virt_dev = xhci->devs[slot_id];
1151 if (handle_cmd_in_cmd_wait_list(xhci, virt_dev, event))
1152 break;
1153 xhci->devs[slot_id]->cmd_status = GET_COMP_CODE(le32_to_cpu(event->status));
1154 complete(&xhci->devs[slot_id]->cmd_completion);
1155 break;
1156 case TRB_TYPE(TRB_ADDR_DEV):
1157 xhci->devs[slot_id]->cmd_status = GET_COMP_CODE(le32_to_cpu(event->status));
1158 complete(&xhci->addr_dev);
1159 break;
1160 case TRB_TYPE(TRB_STOP_RING):
1161 handle_stopped_endpoint(xhci, xhci->cmd_ring->dequeue, event);
1162 break;
1163 case TRB_TYPE(TRB_SET_DEQ):
1164 handle_set_deq_completion(xhci, event, xhci->cmd_ring->dequeue);
1165 break;
1166 case TRB_TYPE(TRB_CMD_NOOP):
1167 break;
1168 case TRB_TYPE(TRB_RESET_EP):
1169 handle_reset_ep_completion(xhci, event, xhci->cmd_ring->dequeue);
1170 break;
1171 case TRB_TYPE(TRB_RESET_DEV):
1172 xhci_dbg(xhci, "Completed reset device command.\n");
1173 slot_id = TRB_TO_SLOT_ID(
1174 le32_to_cpu(xhci->cmd_ring->dequeue->generic.field[3]));
1175 virt_dev = xhci->devs[slot_id];
1176 if (virt_dev)
1177 handle_cmd_in_cmd_wait_list(xhci, virt_dev, event);
1178 else
1179 xhci_warn(xhci, "Reset device command completion "
1180 "for disabled slot %u\n", slot_id);
1181 break;
1182 case TRB_TYPE(TRB_NEC_GET_FW):
1183 if (!(xhci->quirks & XHCI_NEC_HOST)) {
1184 xhci->error_bitmask |= 1 << 6;
1185 break;
1186 }
1187 xhci_dbg(xhci, "NEC firmware version %2x.%02x\n",
1188 NEC_FW_MAJOR(le32_to_cpu(event->status)),
1189 NEC_FW_MINOR(le32_to_cpu(event->status)));
1190 break;
1191 default:
1192 /* Skip over unknown commands on the event ring */
1193 xhci->error_bitmask |= 1 << 6;
1194 break;
1195 }
1196 inc_deq(xhci, xhci->cmd_ring, false);
1197}
1198
1199static void handle_vendor_event(struct xhci_hcd *xhci,
1200 union xhci_trb *event)
1201{
1202 u32 trb_type;
1203
1204 trb_type = TRB_FIELD_TO_TYPE(le32_to_cpu(event->generic.field[3]));
1205 xhci_dbg(xhci, "Vendor specific event TRB type = %u\n", trb_type);
1206 if (trb_type == TRB_NEC_CMD_COMP && (xhci->quirks & XHCI_NEC_HOST))
1207 handle_cmd_completion(xhci, &event->event_cmd);
1208}
1209
1210/* @port_id: the one-based port ID from the hardware (indexed from array of all
1211 * port registers -- USB 3.0 and USB 2.0).
1212 *
1213 * Returns a zero-based port number, which is suitable for indexing into each of
1214 * the split roothubs' port arrays and bus state arrays.
1215 */
1216static unsigned int find_faked_portnum_from_hw_portnum(struct usb_hcd *hcd,
1217 struct xhci_hcd *xhci, u32 port_id)
1218{
1219 unsigned int i;
1220 unsigned int num_similar_speed_ports = 0;
1221
1222 /* port_id from the hardware is 1-based, but port_array[], usb3_ports[],
1223 * and usb2_ports are 0-based indexes. Count the number of similar
1224 * speed ports, up to 1 port before this port.
1225 */
1226 for (i = 0; i < (port_id - 1); i++) {
1227 u8 port_speed = xhci->port_array[i];
1228
1229 /*
1230 * Skip ports that don't have known speeds, or have duplicate
1231 * Extended Capabilities port speed entries.
1232 */
1233 if (port_speed == 0 || port_speed == DUPLICATE_ENTRY)
1234 continue;
1235
1236 /*
1237 * USB 3.0 ports are always under a USB 3.0 hub. USB 2.0 and
1238 * 1.1 ports are under the USB 2.0 hub. If the port speed
1239 * matches the device speed, it's a similar speed port.
1240 */
1241 if ((port_speed == 0x03) == (hcd->speed == HCD_USB3))
1242 num_similar_speed_ports++;
1243 }
1244 return num_similar_speed_ports;
1245}
1246
1247static void handle_port_status(struct xhci_hcd *xhci,
1248 union xhci_trb *event)
1249{
1250 struct usb_hcd *hcd;
1251 u32 port_id;
1252 u32 temp, temp1;
1253 int max_ports;
1254 int slot_id;
1255 unsigned int faked_port_index;
1256 u8 major_revision;
1257 struct xhci_bus_state *bus_state;
1258 __le32 __iomem **port_array;
1259 bool bogus_port_status = false;
1260
1261 /* Port status change events always have a successful completion code */
1262 if (GET_COMP_CODE(le32_to_cpu(event->generic.field[2])) != COMP_SUCCESS) {
1263 xhci_warn(xhci, "WARN: xHC returned failed port status event\n");
1264 xhci->error_bitmask |= 1 << 8;
1265 }
1266 port_id = GET_PORT_ID(le32_to_cpu(event->generic.field[0]));
1267 xhci_dbg(xhci, "Port Status Change Event for port %d\n", port_id);
1268
1269 max_ports = HCS_MAX_PORTS(xhci->hcs_params1);
1270 if ((port_id <= 0) || (port_id > max_ports)) {
1271 xhci_warn(xhci, "Invalid port id %d\n", port_id);
1272 bogus_port_status = true;
1273 goto cleanup;
1274 }
1275
1276 /* Figure out which usb_hcd this port is attached to:
1277 * is it a USB 3.0 port or a USB 2.0/1.1 port?
1278 */
1279 major_revision = xhci->port_array[port_id - 1];
1280 if (major_revision == 0) {
1281 xhci_warn(xhci, "Event for port %u not in "
1282 "Extended Capabilities, ignoring.\n",
1283 port_id);
1284 bogus_port_status = true;
1285 goto cleanup;
1286 }
1287 if (major_revision == DUPLICATE_ENTRY) {
1288 xhci_warn(xhci, "Event for port %u duplicated in"
1289 "Extended Capabilities, ignoring.\n",
1290 port_id);
1291 bogus_port_status = true;
1292 goto cleanup;
1293 }
1294
1295 /*
1296 * Hardware port IDs reported by a Port Status Change Event include USB
1297 * 3.0 and USB 2.0 ports. We want to check if the port has reported a
1298 * resume event, but we first need to translate the hardware port ID
1299 * into the index into the ports on the correct split roothub, and the
1300 * correct bus_state structure.
1301 */
1302 /* Find the right roothub. */
1303 hcd = xhci_to_hcd(xhci);
1304 if ((major_revision == 0x03) != (hcd->speed == HCD_USB3))
1305 hcd = xhci->shared_hcd;
1306 bus_state = &xhci->bus_state[hcd_index(hcd)];
1307 if (hcd->speed == HCD_USB3)
1308 port_array = xhci->usb3_ports;
1309 else
1310 port_array = xhci->usb2_ports;
1311 /* Find the faked port hub number */
1312 faked_port_index = find_faked_portnum_from_hw_portnum(hcd, xhci,
1313 port_id);
1314
1315 temp = xhci_readl(xhci, port_array[faked_port_index]);
1316 if (hcd->state == HC_STATE_SUSPENDED) {
1317 xhci_dbg(xhci, "resume root hub\n");
1318 usb_hcd_resume_root_hub(hcd);
1319 }
1320
1321 if ((temp & PORT_PLC) && (temp & PORT_PLS_MASK) == XDEV_RESUME) {
1322 xhci_dbg(xhci, "port resume event for port %d\n", port_id);
1323
1324 temp1 = xhci_readl(xhci, &xhci->op_regs->command);
1325 if (!(temp1 & CMD_RUN)) {
1326 xhci_warn(xhci, "xHC is not running.\n");
1327 goto cleanup;
1328 }
1329
1330 if (DEV_SUPERSPEED(temp)) {
1331 xhci_dbg(xhci, "resume SS port %d\n", port_id);
1332 temp = xhci_port_state_to_neutral(temp);
1333 temp &= ~PORT_PLS_MASK;
1334 temp |= PORT_LINK_STROBE | XDEV_U0;
1335 xhci_writel(xhci, temp, port_array[faked_port_index]);
1336 slot_id = xhci_find_slot_id_by_port(hcd, xhci,
1337 faked_port_index);
1338 if (!slot_id) {
1339 xhci_dbg(xhci, "slot_id is zero\n");
1340 goto cleanup;
1341 }
1342 xhci_ring_device(xhci, slot_id);
1343 xhci_dbg(xhci, "resume SS port %d finished\n", port_id);
1344 /* Clear PORT_PLC */
1345 temp = xhci_readl(xhci, port_array[faked_port_index]);
1346 temp = xhci_port_state_to_neutral(temp);
1347 temp |= PORT_PLC;
1348 xhci_writel(xhci, temp, port_array[faked_port_index]);
1349 } else {
1350 xhci_dbg(xhci, "resume HS port %d\n", port_id);
1351 bus_state->resume_done[faked_port_index] = jiffies +
1352 msecs_to_jiffies(20);
1353 mod_timer(&hcd->rh_timer,
1354 bus_state->resume_done[faked_port_index]);
1355 /* Do the rest in GetPortStatus */
1356 }
1357 }
1358
1359cleanup:
1360 /* Update event ring dequeue pointer before dropping the lock */
1361 inc_deq(xhci, xhci->event_ring, true);
1362
1363 /* Don't make the USB core poll the roothub if we got a bad port status
1364 * change event. Besides, at that point we can't tell which roothub
1365 * (USB 2.0 or USB 3.0) to kick.
1366 */
1367 if (bogus_port_status)
1368 return;
1369
1370 spin_unlock(&xhci->lock);
1371 /* Pass this up to the core */
1372 usb_hcd_poll_rh_status(hcd);
1373 spin_lock(&xhci->lock);
1374}
1375
1376/*
1377 * This TD is defined by the TRBs starting at start_trb in start_seg and ending
1378 * at end_trb, which may be in another segment. If the suspect DMA address is a
1379 * TRB in this TD, this function returns that TRB's segment. Otherwise it
1380 * returns 0.
1381 */
1382struct xhci_segment *trb_in_td(struct xhci_segment *start_seg,
1383 union xhci_trb *start_trb,
1384 union xhci_trb *end_trb,
1385 dma_addr_t suspect_dma)
1386{
1387 dma_addr_t start_dma;
1388 dma_addr_t end_seg_dma;
1389 dma_addr_t end_trb_dma;
1390 struct xhci_segment *cur_seg;
1391
1392 start_dma = xhci_trb_virt_to_dma(start_seg, start_trb);
1393 cur_seg = start_seg;
1394
1395 do {
1396 if (start_dma == 0)
1397 return NULL;
1398 /* We may get an event for a Link TRB in the middle of a TD */
1399 end_seg_dma = xhci_trb_virt_to_dma(cur_seg,
1400 &cur_seg->trbs[TRBS_PER_SEGMENT - 1]);
1401 /* If the end TRB isn't in this segment, this is set to 0 */
1402 end_trb_dma = xhci_trb_virt_to_dma(cur_seg, end_trb);
1403
1404 if (end_trb_dma > 0) {
1405 /* The end TRB is in this segment, so suspect should be here */
1406 if (start_dma <= end_trb_dma) {
1407 if (suspect_dma >= start_dma && suspect_dma <= end_trb_dma)
1408 return cur_seg;
1409 } else {
1410 /* Case for one segment with
1411 * a TD wrapped around to the top
1412 */
1413 if ((suspect_dma >= start_dma &&
1414 suspect_dma <= end_seg_dma) ||
1415 (suspect_dma >= cur_seg->dma &&
1416 suspect_dma <= end_trb_dma))
1417 return cur_seg;
1418 }
1419 return NULL;
1420 } else {
1421 /* Might still be somewhere in this segment */
1422 if (suspect_dma >= start_dma && suspect_dma <= end_seg_dma)
1423 return cur_seg;
1424 }
1425 cur_seg = cur_seg->next;
1426 start_dma = xhci_trb_virt_to_dma(cur_seg, &cur_seg->trbs[0]);
1427 } while (cur_seg != start_seg);
1428
1429 return NULL;
1430}
1431
1432static void xhci_cleanup_halted_endpoint(struct xhci_hcd *xhci,
1433 unsigned int slot_id, unsigned int ep_index,
1434 unsigned int stream_id,
1435 struct xhci_td *td, union xhci_trb *event_trb)
1436{
1437 struct xhci_virt_ep *ep = &xhci->devs[slot_id]->eps[ep_index];
1438 ep->ep_state |= EP_HALTED;
1439 ep->stopped_td = td;
1440 ep->stopped_trb = event_trb;
1441 ep->stopped_stream = stream_id;
1442
1443 xhci_queue_reset_ep(xhci, slot_id, ep_index);
1444 xhci_cleanup_stalled_ring(xhci, td->urb->dev, ep_index);
1445
1446 ep->stopped_td = NULL;
1447 ep->stopped_trb = NULL;
1448 ep->stopped_stream = 0;
1449
1450 xhci_ring_cmd_db(xhci);
1451}
1452
1453/* Check if an error has halted the endpoint ring. The class driver will
1454 * cleanup the halt for a non-default control endpoint if we indicate a stall.
1455 * However, a babble and other errors also halt the endpoint ring, and the class
1456 * driver won't clear the halt in that case, so we need to issue a Set Transfer
1457 * Ring Dequeue Pointer command manually.
1458 */
1459static int xhci_requires_manual_halt_cleanup(struct xhci_hcd *xhci,
1460 struct xhci_ep_ctx *ep_ctx,
1461 unsigned int trb_comp_code)
1462{
1463 /* TRB completion codes that may require a manual halt cleanup */
1464 if (trb_comp_code == COMP_TX_ERR ||
1465 trb_comp_code == COMP_BABBLE ||
1466 trb_comp_code == COMP_SPLIT_ERR)
1467 /* The 0.96 spec says a babbling control endpoint
1468 * is not halted. The 0.96 spec says it is. Some HW
1469 * claims to be 0.95 compliant, but it halts the control
1470 * endpoint anyway. Check if a babble halted the
1471 * endpoint.
1472 */
1473 if ((ep_ctx->ep_info & cpu_to_le32(EP_STATE_MASK)) ==
1474 cpu_to_le32(EP_STATE_HALTED))
1475 return 1;
1476
1477 return 0;
1478}
1479
1480int xhci_is_vendor_info_code(struct xhci_hcd *xhci, unsigned int trb_comp_code)
1481{
1482 if (trb_comp_code >= 224 && trb_comp_code <= 255) {
1483 /* Vendor defined "informational" completion code,
1484 * treat as not-an-error.
1485 */
1486 xhci_dbg(xhci, "Vendor defined info completion code %u\n",
1487 trb_comp_code);
1488 xhci_dbg(xhci, "Treating code as success.\n");
1489 return 1;
1490 }
1491 return 0;
1492}
1493
1494/*
1495 * Finish the td processing, remove the td from td list;
1496 * Return 1 if the urb can be given back.
1497 */
1498static int finish_td(struct xhci_hcd *xhci, struct xhci_td *td,
1499 union xhci_trb *event_trb, struct xhci_transfer_event *event,
1500 struct xhci_virt_ep *ep, int *status, bool skip)
1501{
1502 struct xhci_virt_device *xdev;
1503 struct xhci_ring *ep_ring;
1504 unsigned int slot_id;
1505 int ep_index;
1506 struct urb *urb = NULL;
1507 struct xhci_ep_ctx *ep_ctx;
1508 int ret = 0;
1509 struct urb_priv *urb_priv;
1510 u32 trb_comp_code;
1511
1512 slot_id = TRB_TO_SLOT_ID(le32_to_cpu(event->flags));
1513 xdev = xhci->devs[slot_id];
1514 ep_index = TRB_TO_EP_ID(le32_to_cpu(event->flags)) - 1;
1515 ep_ring = xhci_dma_to_transfer_ring(ep, le64_to_cpu(event->buffer));
1516 ep_ctx = xhci_get_ep_ctx(xhci, xdev->out_ctx, ep_index);
1517 trb_comp_code = GET_COMP_CODE(le32_to_cpu(event->transfer_len));
1518
1519 if (skip)
1520 goto td_cleanup;
1521
1522 if (trb_comp_code == COMP_STOP_INVAL ||
1523 trb_comp_code == COMP_STOP) {
1524 /* The Endpoint Stop Command completion will take care of any
1525 * stopped TDs. A stopped TD may be restarted, so don't update
1526 * the ring dequeue pointer or take this TD off any lists yet.
1527 */
1528 ep->stopped_td = td;
1529 ep->stopped_trb = event_trb;
1530 return 0;
1531 } else {
1532 if (trb_comp_code == COMP_STALL) {
1533 /* The transfer is completed from the driver's
1534 * perspective, but we need to issue a set dequeue
1535 * command for this stalled endpoint to move the dequeue
1536 * pointer past the TD. We can't do that here because
1537 * the halt condition must be cleared first. Let the
1538 * USB class driver clear the stall later.
1539 */
1540 ep->stopped_td = td;
1541 ep->stopped_trb = event_trb;
1542 ep->stopped_stream = ep_ring->stream_id;
1543 } else if (xhci_requires_manual_halt_cleanup(xhci,
1544 ep_ctx, trb_comp_code)) {
1545 /* Other types of errors halt the endpoint, but the
1546 * class driver doesn't call usb_reset_endpoint() unless
1547 * the error is -EPIPE. Clear the halted status in the
1548 * xHCI hardware manually.
1549 */
1550 xhci_cleanup_halted_endpoint(xhci,
1551 slot_id, ep_index, ep_ring->stream_id,
1552 td, event_trb);
1553 } else {
1554 /* Update ring dequeue pointer */
1555 while (ep_ring->dequeue != td->last_trb)
1556 inc_deq(xhci, ep_ring, false);
1557 inc_deq(xhci, ep_ring, false);
1558 }
1559
1560td_cleanup:
1561 /* Clean up the endpoint's TD list */
1562 urb = td->urb;
1563 urb_priv = urb->hcpriv;
1564
1565 /* Do one last check of the actual transfer length.
1566 * If the host controller said we transferred more data than
1567 * the buffer length, urb->actual_length will be a very big
1568 * number (since it's unsigned). Play it safe and say we didn't
1569 * transfer anything.
1570 */
1571 if (urb->actual_length > urb->transfer_buffer_length) {
1572 xhci_warn(xhci, "URB transfer length is wrong, "
1573 "xHC issue? req. len = %u, "
1574 "act. len = %u\n",
1575 urb->transfer_buffer_length,
1576 urb->actual_length);
1577 urb->actual_length = 0;
1578 if (td->urb->transfer_flags & URB_SHORT_NOT_OK)
1579 *status = -EREMOTEIO;
1580 else
1581 *status = 0;
1582 }
1583 list_del_init(&td->td_list);
1584 /* Was this TD slated to be cancelled but completed anyway? */
1585 if (!list_empty(&td->cancelled_td_list))
1586 list_del_init(&td->cancelled_td_list);
1587
1588 urb_priv->td_cnt++;
1589 /* Giveback the urb when all the tds are completed */
1590 if (urb_priv->td_cnt == urb_priv->length) {
1591 ret = 1;
1592 if (usb_pipetype(urb->pipe) == PIPE_ISOCHRONOUS) {
1593 xhci_to_hcd(xhci)->self.bandwidth_isoc_reqs--;
1594 if (xhci_to_hcd(xhci)->self.bandwidth_isoc_reqs
1595 == 0) {
1596 if (xhci->quirks & XHCI_AMD_PLL_FIX)
1597 usb_amd_quirk_pll_enable();
1598 }
1599 }
1600 }
1601 }
1602
1603 return ret;
1604}
1605
1606/*
1607 * Process control tds, update urb status and actual_length.
1608 */
1609static int process_ctrl_td(struct xhci_hcd *xhci, struct xhci_td *td,
1610 union xhci_trb *event_trb, struct xhci_transfer_event *event,
1611 struct xhci_virt_ep *ep, int *status)
1612{
1613 struct xhci_virt_device *xdev;
1614 struct xhci_ring *ep_ring;
1615 unsigned int slot_id;
1616 int ep_index;
1617 struct xhci_ep_ctx *ep_ctx;
1618 u32 trb_comp_code;
1619
1620 slot_id = TRB_TO_SLOT_ID(le32_to_cpu(event->flags));
1621 xdev = xhci->devs[slot_id];
1622 ep_index = TRB_TO_EP_ID(le32_to_cpu(event->flags)) - 1;
1623 ep_ring = xhci_dma_to_transfer_ring(ep, le64_to_cpu(event->buffer));
1624 ep_ctx = xhci_get_ep_ctx(xhci, xdev->out_ctx, ep_index);
1625 trb_comp_code = GET_COMP_CODE(le32_to_cpu(event->transfer_len));
1626
1627 xhci_debug_trb(xhci, xhci->event_ring->dequeue);
1628 switch (trb_comp_code) {
1629 case COMP_SUCCESS:
1630 if (event_trb == ep_ring->dequeue) {
1631 xhci_warn(xhci, "WARN: Success on ctrl setup TRB "
1632 "without IOC set??\n");
1633 *status = -ESHUTDOWN;
1634 } else if (event_trb != td->last_trb) {
1635 xhci_warn(xhci, "WARN: Success on ctrl data TRB "
1636 "without IOC set??\n");
1637 *status = -ESHUTDOWN;
1638 } else {
1639 *status = 0;
1640 }
1641 break;
1642 case COMP_SHORT_TX:
1643 xhci_warn(xhci, "WARN: short transfer on control ep\n");
1644 if (td->urb->transfer_flags & URB_SHORT_NOT_OK)
1645 *status = -EREMOTEIO;
1646 else
1647 *status = 0;
1648 break;
1649 case COMP_STOP_INVAL:
1650 case COMP_STOP:
1651 return finish_td(xhci, td, event_trb, event, ep, status, false);
1652 default:
1653 if (!xhci_requires_manual_halt_cleanup(xhci,
1654 ep_ctx, trb_comp_code))
1655 break;
1656 xhci_dbg(xhci, "TRB error code %u, "
1657 "halted endpoint index = %u\n",
1658 trb_comp_code, ep_index);
1659 /* else fall through */
1660 case COMP_STALL:
1661 /* Did we transfer part of the data (middle) phase? */
1662 if (event_trb != ep_ring->dequeue &&
1663 event_trb != td->last_trb)
1664 td->urb->actual_length =
1665 td->urb->transfer_buffer_length
1666 - TRB_LEN(le32_to_cpu(event->transfer_len));
1667 else
1668 td->urb->actual_length = 0;
1669
1670 xhci_cleanup_halted_endpoint(xhci,
1671 slot_id, ep_index, 0, td, event_trb);
1672 return finish_td(xhci, td, event_trb, event, ep, status, true);
1673 }
1674 /*
1675 * Did we transfer any data, despite the errors that might have
1676 * happened? I.e. did we get past the setup stage?
1677 */
1678 if (event_trb != ep_ring->dequeue) {
1679 /* The event was for the status stage */
1680 if (event_trb == td->last_trb) {
1681 if (td->urb->actual_length != 0) {
1682 /* Don't overwrite a previously set error code
1683 */
1684 if ((*status == -EINPROGRESS || *status == 0) &&
1685 (td->urb->transfer_flags
1686 & URB_SHORT_NOT_OK))
1687 /* Did we already see a short data
1688 * stage? */
1689 *status = -EREMOTEIO;
1690 } else {
1691 td->urb->actual_length =
1692 td->urb->transfer_buffer_length;
1693 }
1694 } else {
1695 /* Maybe the event was for the data stage? */
1696 td->urb->actual_length =
1697 td->urb->transfer_buffer_length -
1698 TRB_LEN(le32_to_cpu(event->transfer_len));
1699 xhci_dbg(xhci, "Waiting for status "
1700 "stage event\n");
1701 return 0;
1702 }
1703 }
1704
1705 return finish_td(xhci, td, event_trb, event, ep, status, false);
1706}
1707
1708/*
1709 * Process isochronous tds, update urb packet status and actual_length.
1710 */
1711static int process_isoc_td(struct xhci_hcd *xhci, struct xhci_td *td,
1712 union xhci_trb *event_trb, struct xhci_transfer_event *event,
1713 struct xhci_virt_ep *ep, int *status)
1714{
1715 struct xhci_ring *ep_ring;
1716 struct urb_priv *urb_priv;
1717 int idx;
1718 int len = 0;
1719 union xhci_trb *cur_trb;
1720 struct xhci_segment *cur_seg;
1721 struct usb_iso_packet_descriptor *frame;
1722 u32 trb_comp_code;
1723 bool skip_td = false;
1724
1725 ep_ring = xhci_dma_to_transfer_ring(ep, le64_to_cpu(event->buffer));
1726 trb_comp_code = GET_COMP_CODE(le32_to_cpu(event->transfer_len));
1727 urb_priv = td->urb->hcpriv;
1728 idx = urb_priv->td_cnt;
1729 frame = &td->urb->iso_frame_desc[idx];
1730
1731 /* handle completion code */
1732 switch (trb_comp_code) {
1733 case COMP_SUCCESS:
1734 frame->status = 0;
1735 break;
1736 case COMP_SHORT_TX:
1737 frame->status = td->urb->transfer_flags & URB_SHORT_NOT_OK ?
1738 -EREMOTEIO : 0;
1739 break;
1740 case COMP_BW_OVER:
1741 frame->status = -ECOMM;
1742 skip_td = true;
1743 break;
1744 case COMP_BUFF_OVER:
1745 case COMP_BABBLE:
1746 frame->status = -EOVERFLOW;
1747 skip_td = true;
1748 break;
1749 case COMP_DEV_ERR:
1750 case COMP_STALL:
1751 frame->status = -EPROTO;
1752 skip_td = true;
1753 break;
1754 case COMP_STOP:
1755 case COMP_STOP_INVAL:
1756 break;
1757 default:
1758 frame->status = -1;
1759 break;
1760 }
1761
1762 if (trb_comp_code == COMP_SUCCESS || skip_td) {
1763 frame->actual_length = frame->length;
1764 td->urb->actual_length += frame->length;
1765 } else {
1766 for (cur_trb = ep_ring->dequeue,
1767 cur_seg = ep_ring->deq_seg; cur_trb != event_trb;
1768 next_trb(xhci, ep_ring, &cur_seg, &cur_trb)) {
1769 if (!TRB_TYPE_NOOP_LE32(cur_trb->generic.field[3]) &&
1770 !TRB_TYPE_LINK_LE32(cur_trb->generic.field[3]))
1771 len += TRB_LEN(le32_to_cpu(cur_trb->generic.field[2]));
1772 }
1773 len += TRB_LEN(le32_to_cpu(cur_trb->generic.field[2])) -
1774 TRB_LEN(le32_to_cpu(event->transfer_len));
1775
1776 if (trb_comp_code != COMP_STOP_INVAL) {
1777 frame->actual_length = len;
1778 td->urb->actual_length += len;
1779 }
1780 }
1781
1782 return finish_td(xhci, td, event_trb, event, ep, status, false);
1783}
1784
1785static int skip_isoc_td(struct xhci_hcd *xhci, struct xhci_td *td,
1786 struct xhci_transfer_event *event,
1787 struct xhci_virt_ep *ep, int *status)
1788{
1789 struct xhci_ring *ep_ring;
1790 struct urb_priv *urb_priv;
1791 struct usb_iso_packet_descriptor *frame;
1792 int idx;
1793
1794 ep_ring = xhci_dma_to_transfer_ring(ep, le64_to_cpu(event->buffer));
1795 urb_priv = td->urb->hcpriv;
1796 idx = urb_priv->td_cnt;
1797 frame = &td->urb->iso_frame_desc[idx];
1798
1799 /* The transfer is partly done. */
1800 frame->status = -EXDEV;
1801
1802 /* calc actual length */
1803 frame->actual_length = 0;
1804
1805 /* Update ring dequeue pointer */
1806 while (ep_ring->dequeue != td->last_trb)
1807 inc_deq(xhci, ep_ring, false);
1808 inc_deq(xhci, ep_ring, false);
1809
1810 return finish_td(xhci, td, NULL, event, ep, status, true);
1811}
1812
1813/*
1814 * Process bulk and interrupt tds, update urb status and actual_length.
1815 */
1816static int process_bulk_intr_td(struct xhci_hcd *xhci, struct xhci_td *td,
1817 union xhci_trb *event_trb, struct xhci_transfer_event *event,
1818 struct xhci_virt_ep *ep, int *status)
1819{
1820 struct xhci_ring *ep_ring;
1821 union xhci_trb *cur_trb;
1822 struct xhci_segment *cur_seg;
1823 u32 trb_comp_code;
1824
1825 ep_ring = xhci_dma_to_transfer_ring(ep, le64_to_cpu(event->buffer));
1826 trb_comp_code = GET_COMP_CODE(le32_to_cpu(event->transfer_len));
1827
1828 switch (trb_comp_code) {
1829 case COMP_SUCCESS:
1830 /* Double check that the HW transferred everything. */
1831 if (event_trb != td->last_trb) {
1832 xhci_warn(xhci, "WARN Successful completion "
1833 "on short TX\n");
1834 if (td->urb->transfer_flags & URB_SHORT_NOT_OK)
1835 *status = -EREMOTEIO;
1836 else
1837 *status = 0;
1838 } else {
1839 *status = 0;
1840 }
1841 break;
1842 case COMP_SHORT_TX:
1843 if (td->urb->transfer_flags & URB_SHORT_NOT_OK)
1844 *status = -EREMOTEIO;
1845 else
1846 *status = 0;
1847 break;
1848 default:
1849 /* Others already handled above */
1850 break;
1851 }
1852 if (trb_comp_code == COMP_SHORT_TX)
1853 xhci_dbg(xhci, "ep %#x - asked for %d bytes, "
1854 "%d bytes untransferred\n",
1855 td->urb->ep->desc.bEndpointAddress,
1856 td->urb->transfer_buffer_length,
1857 TRB_LEN(le32_to_cpu(event->transfer_len)));
1858 /* Fast path - was this the last TRB in the TD for this URB? */
1859 if (event_trb == td->last_trb) {
1860 if (TRB_LEN(le32_to_cpu(event->transfer_len)) != 0) {
1861 td->urb->actual_length =
1862 td->urb->transfer_buffer_length -
1863 TRB_LEN(le32_to_cpu(event->transfer_len));
1864 if (td->urb->transfer_buffer_length <
1865 td->urb->actual_length) {
1866 xhci_warn(xhci, "HC gave bad length "
1867 "of %d bytes left\n",
1868 TRB_LEN(le32_to_cpu(event->transfer_len)));
1869 td->urb->actual_length = 0;
1870 if (td->urb->transfer_flags & URB_SHORT_NOT_OK)
1871 *status = -EREMOTEIO;
1872 else
1873 *status = 0;
1874 }
1875 /* Don't overwrite a previously set error code */
1876 if (*status == -EINPROGRESS) {
1877 if (td->urb->transfer_flags & URB_SHORT_NOT_OK)
1878 *status = -EREMOTEIO;
1879 else
1880 *status = 0;
1881 }
1882 } else {
1883 td->urb->actual_length =
1884 td->urb->transfer_buffer_length;
1885 /* Ignore a short packet completion if the
1886 * untransferred length was zero.
1887 */
1888 if (*status == -EREMOTEIO)
1889 *status = 0;
1890 }
1891 } else {
1892 /* Slow path - walk the list, starting from the dequeue
1893 * pointer, to get the actual length transferred.
1894 */
1895 td->urb->actual_length = 0;
1896 for (cur_trb = ep_ring->dequeue, cur_seg = ep_ring->deq_seg;
1897 cur_trb != event_trb;
1898 next_trb(xhci, ep_ring, &cur_seg, &cur_trb)) {
1899 if (!TRB_TYPE_NOOP_LE32(cur_trb->generic.field[3]) &&
1900 !TRB_TYPE_LINK_LE32(cur_trb->generic.field[3]))
1901 td->urb->actual_length +=
1902 TRB_LEN(le32_to_cpu(cur_trb->generic.field[2]));
1903 }
1904 /* If the ring didn't stop on a Link or No-op TRB, add
1905 * in the actual bytes transferred from the Normal TRB
1906 */
1907 if (trb_comp_code != COMP_STOP_INVAL)
1908 td->urb->actual_length +=
1909 TRB_LEN(le32_to_cpu(cur_trb->generic.field[2])) -
1910 TRB_LEN(le32_to_cpu(event->transfer_len));
1911 }
1912
1913 return finish_td(xhci, td, event_trb, event, ep, status, false);
1914}
1915
1916/*
1917 * If this function returns an error condition, it means it got a Transfer
1918 * event with a corrupted Slot ID, Endpoint ID, or TRB DMA address.
1919 * At this point, the host controller is probably hosed and should be reset.
1920 */
1921static int handle_tx_event(struct xhci_hcd *xhci,
1922 struct xhci_transfer_event *event)
1923{
1924 struct xhci_virt_device *xdev;
1925 struct xhci_virt_ep *ep;
1926 struct xhci_ring *ep_ring;
1927 unsigned int slot_id;
1928 int ep_index;
1929 struct xhci_td *td = NULL;
1930 dma_addr_t event_dma;
1931 struct xhci_segment *event_seg;
1932 union xhci_trb *event_trb;
1933 struct urb *urb = NULL;
1934 int status = -EINPROGRESS;
1935 struct urb_priv *urb_priv;
1936 struct xhci_ep_ctx *ep_ctx;
1937 struct list_head *tmp;
1938 u32 trb_comp_code;
1939 int ret = 0;
1940 int td_num = 0;
1941
1942 slot_id = TRB_TO_SLOT_ID(le32_to_cpu(event->flags));
1943 xdev = xhci->devs[slot_id];
1944 if (!xdev) {
1945 xhci_err(xhci, "ERROR Transfer event pointed to bad slot\n");
1946 return -ENODEV;
1947 }
1948
1949 /* Endpoint ID is 1 based, our index is zero based */
1950 ep_index = TRB_TO_EP_ID(le32_to_cpu(event->flags)) - 1;
1951 ep = &xdev->eps[ep_index];
1952 ep_ring = xhci_dma_to_transfer_ring(ep, le64_to_cpu(event->buffer));
1953 ep_ctx = xhci_get_ep_ctx(xhci, xdev->out_ctx, ep_index);
1954 if (!ep_ring ||
1955 (le32_to_cpu(ep_ctx->ep_info) & EP_STATE_MASK) ==
1956 EP_STATE_DISABLED) {
1957 xhci_err(xhci, "ERROR Transfer event for disabled endpoint "
1958 "or incorrect stream ring\n");
1959 return -ENODEV;
1960 }
1961
1962 /* Count current td numbers if ep->skip is set */
1963 if (ep->skip) {
1964 list_for_each(tmp, &ep_ring->td_list)
1965 td_num++;
1966 }
1967
1968 event_dma = le64_to_cpu(event->buffer);
1969 trb_comp_code = GET_COMP_CODE(le32_to_cpu(event->transfer_len));
1970 /* Look for common error cases */
1971 switch (trb_comp_code) {
1972 /* Skip codes that require special handling depending on
1973 * transfer type
1974 */
1975 case COMP_SUCCESS:
1976 case COMP_SHORT_TX:
1977 break;
1978 case COMP_STOP:
1979 xhci_dbg(xhci, "Stopped on Transfer TRB\n");
1980 break;
1981 case COMP_STOP_INVAL:
1982 xhci_dbg(xhci, "Stopped on No-op or Link TRB\n");
1983 break;
1984 case COMP_STALL:
1985 xhci_warn(xhci, "WARN: Stalled endpoint\n");
1986 ep->ep_state |= EP_HALTED;
1987 status = -EPIPE;
1988 break;
1989 case COMP_TRB_ERR:
1990 xhci_warn(xhci, "WARN: TRB error on endpoint\n");
1991 status = -EILSEQ;
1992 break;
1993 case COMP_SPLIT_ERR:
1994 case COMP_TX_ERR:
1995 xhci_warn(xhci, "WARN: transfer error on endpoint\n");
1996 status = -EPROTO;
1997 break;
1998 case COMP_BABBLE:
1999 xhci_warn(xhci, "WARN: babble error on endpoint\n");
2000 status = -EOVERFLOW;
2001 break;
2002 case COMP_DB_ERR:
2003 xhci_warn(xhci, "WARN: HC couldn't access mem fast enough\n");
2004 status = -ENOSR;
2005 break;
2006 case COMP_BW_OVER:
2007 xhci_warn(xhci, "WARN: bandwidth overrun event on endpoint\n");
2008 break;
2009 case COMP_BUFF_OVER:
2010 xhci_warn(xhci, "WARN: buffer overrun event on endpoint\n");
2011 break;
2012 case COMP_UNDERRUN:
2013 /*
2014 * When the Isoch ring is empty, the xHC will generate
2015 * a Ring Overrun Event for IN Isoch endpoint or Ring
2016 * Underrun Event for OUT Isoch endpoint.
2017 */
2018 xhci_dbg(xhci, "underrun event on endpoint\n");
2019 if (!list_empty(&ep_ring->td_list))
2020 xhci_dbg(xhci, "Underrun Event for slot %d ep %d "
2021 "still with TDs queued?\n",
2022 TRB_TO_SLOT_ID(le32_to_cpu(event->flags)),
2023 ep_index);
2024 goto cleanup;
2025 case COMP_OVERRUN:
2026 xhci_dbg(xhci, "overrun event on endpoint\n");
2027 if (!list_empty(&ep_ring->td_list))
2028 xhci_dbg(xhci, "Overrun Event for slot %d ep %d "
2029 "still with TDs queued?\n",
2030 TRB_TO_SLOT_ID(le32_to_cpu(event->flags)),
2031 ep_index);
2032 goto cleanup;
2033 case COMP_DEV_ERR:
2034 xhci_warn(xhci, "WARN: detect an incompatible device");
2035 status = -EPROTO;
2036 break;
2037 case COMP_MISSED_INT:
2038 /*
2039 * When encounter missed service error, one or more isoc tds
2040 * may be missed by xHC.
2041 * Set skip flag of the ep_ring; Complete the missed tds as
2042 * short transfer when process the ep_ring next time.
2043 */
2044 ep->skip = true;
2045 xhci_dbg(xhci, "Miss service interval error, set skip flag\n");
2046 goto cleanup;
2047 default:
2048 if (xhci_is_vendor_info_code(xhci, trb_comp_code)) {
2049 status = 0;
2050 break;
2051 }
2052 xhci_warn(xhci, "ERROR Unknown event condition, HC probably "
2053 "busted\n");
2054 goto cleanup;
2055 }
2056
2057 do {
2058 /* This TRB should be in the TD at the head of this ring's
2059 * TD list.
2060 */
2061 if (list_empty(&ep_ring->td_list)) {
2062 xhci_warn(xhci, "WARN Event TRB for slot %d ep %d "
2063 "with no TDs queued?\n",
2064 TRB_TO_SLOT_ID(le32_to_cpu(event->flags)),
2065 ep_index);
2066 xhci_dbg(xhci, "Event TRB with TRB type ID %u\n",
2067 (le32_to_cpu(event->flags) &
2068 TRB_TYPE_BITMASK)>>10);
2069 xhci_print_trb_offsets(xhci, (union xhci_trb *) event);
2070 if (ep->skip) {
2071 ep->skip = false;
2072 xhci_dbg(xhci, "td_list is empty while skip "
2073 "flag set. Clear skip flag.\n");
2074 }
2075 ret = 0;
2076 goto cleanup;
2077 }
2078
2079 /* We've skipped all the TDs on the ep ring when ep->skip set */
2080 if (ep->skip && td_num == 0) {
2081 ep->skip = false;
2082 xhci_dbg(xhci, "All tds on the ep_ring skipped. "
2083 "Clear skip flag.\n");
2084 ret = 0;
2085 goto cleanup;
2086 }
2087
2088 td = list_entry(ep_ring->td_list.next, struct xhci_td, td_list);
2089 if (ep->skip)
2090 td_num--;
2091
2092 /* Is this a TRB in the currently executing TD? */
2093 event_seg = trb_in_td(ep_ring->deq_seg, ep_ring->dequeue,
2094 td->last_trb, event_dma);
2095
2096 /*
2097 * Skip the Force Stopped Event. The event_trb(event_dma) of FSE
2098 * is not in the current TD pointed by ep_ring->dequeue because
2099 * that the hardware dequeue pointer still at the previous TRB
2100 * of the current TD. The previous TRB maybe a Link TD or the
2101 * last TRB of the previous TD. The command completion handle
2102 * will take care the rest.
2103 */
2104 if (!event_seg && trb_comp_code == COMP_STOP_INVAL) {
2105 ret = 0;
2106 goto cleanup;
2107 }
2108
2109 if (!event_seg) {
2110 if (!ep->skip ||
2111 !usb_endpoint_xfer_isoc(&td->urb->ep->desc)) {
2112 /* Some host controllers give a spurious
2113 * successful event after a short transfer.
2114 * Ignore it.
2115 */
2116 if ((xhci->quirks & XHCI_SPURIOUS_SUCCESS) &&
2117 ep_ring->last_td_was_short) {
2118 ep_ring->last_td_was_short = false;
2119 ret = 0;
2120 goto cleanup;
2121 }
2122 /* HC is busted, give up! */
2123 xhci_err(xhci,
2124 "ERROR Transfer event TRB DMA ptr not "
2125 "part of current TD\n");
2126 return -ESHUTDOWN;
2127 }
2128
2129 ret = skip_isoc_td(xhci, td, event, ep, &status);
2130 goto cleanup;
2131 }
2132 if (trb_comp_code == COMP_SHORT_TX)
2133 ep_ring->last_td_was_short = true;
2134 else
2135 ep_ring->last_td_was_short = false;
2136
2137 if (ep->skip) {
2138 xhci_dbg(xhci, "Found td. Clear skip flag.\n");
2139 ep->skip = false;
2140 }
2141
2142 event_trb = &event_seg->trbs[(event_dma - event_seg->dma) /
2143 sizeof(*event_trb)];
2144 /*
2145 * No-op TRB should not trigger interrupts.
2146 * If event_trb is a no-op TRB, it means the
2147 * corresponding TD has been cancelled. Just ignore
2148 * the TD.
2149 */
2150 if (TRB_TYPE_NOOP_LE32(event_trb->generic.field[3])) {
2151 xhci_dbg(xhci,
2152 "event_trb is a no-op TRB. Skip it\n");
2153 goto cleanup;
2154 }
2155
2156 /* Now update the urb's actual_length and give back to
2157 * the core
2158 */
2159 if (usb_endpoint_xfer_control(&td->urb->ep->desc))
2160 ret = process_ctrl_td(xhci, td, event_trb, event, ep,
2161 &status);
2162 else if (usb_endpoint_xfer_isoc(&td->urb->ep->desc))
2163 ret = process_isoc_td(xhci, td, event_trb, event, ep,
2164 &status);
2165 else
2166 ret = process_bulk_intr_td(xhci, td, event_trb, event,
2167 ep, &status);
2168
2169cleanup:
2170 /*
2171 * Do not update event ring dequeue pointer if ep->skip is set.
2172 * Will roll back to continue process missed tds.
2173 */
2174 if (trb_comp_code == COMP_MISSED_INT || !ep->skip) {
2175 inc_deq(xhci, xhci->event_ring, true);
2176 }
2177
2178 if (ret) {
2179 urb = td->urb;
2180 urb_priv = urb->hcpriv;
2181 /* Leave the TD around for the reset endpoint function
2182 * to use(but only if it's not a control endpoint,
2183 * since we already queued the Set TR dequeue pointer
2184 * command for stalled control endpoints).
2185 */
2186 if (usb_endpoint_xfer_control(&urb->ep->desc) ||
2187 (trb_comp_code != COMP_STALL &&
2188 trb_comp_code != COMP_BABBLE))
2189 xhci_urb_free_priv(xhci, urb_priv);
2190
2191 usb_hcd_unlink_urb_from_ep(bus_to_hcd(urb->dev->bus), urb);
2192 if ((urb->actual_length != urb->transfer_buffer_length &&
2193 (urb->transfer_flags &
2194 URB_SHORT_NOT_OK)) ||
2195 status != 0)
2196 xhci_dbg(xhci, "Giveback URB %p, len = %d, "
2197 "expected = %x, status = %d\n",
2198 urb, urb->actual_length,
2199 urb->transfer_buffer_length,
2200 status);
2201 spin_unlock(&xhci->lock);
2202 /* EHCI, UHCI, and OHCI always unconditionally set the
2203 * urb->status of an isochronous endpoint to 0.
2204 */
2205 if (usb_pipetype(urb->pipe) == PIPE_ISOCHRONOUS)
2206 status = 0;
2207 usb_hcd_giveback_urb(bus_to_hcd(urb->dev->bus), urb, status);
2208 spin_lock(&xhci->lock);
2209 }
2210
2211 /*
2212 * If ep->skip is set, it means there are missed tds on the
2213 * endpoint ring need to take care of.
2214 * Process them as short transfer until reach the td pointed by
2215 * the event.
2216 */
2217 } while (ep->skip && trb_comp_code != COMP_MISSED_INT);
2218
2219 return 0;
2220}
2221
2222/*
2223 * This function handles all OS-owned events on the event ring. It may drop
2224 * xhci->lock between event processing (e.g. to pass up port status changes).
2225 * Returns >0 for "possibly more events to process" (caller should call again),
2226 * otherwise 0 if done. In future, <0 returns should indicate error code.
2227 */
2228static int xhci_handle_event(struct xhci_hcd *xhci)
2229{
2230 union xhci_trb *event;
2231 int update_ptrs = 1;
2232 int ret;
2233
2234 if (!xhci->event_ring || !xhci->event_ring->dequeue) {
2235 xhci->error_bitmask |= 1 << 1;
2236 return 0;
2237 }
2238
2239 event = xhci->event_ring->dequeue;
2240 /* Does the HC or OS own the TRB? */
2241 if ((le32_to_cpu(event->event_cmd.flags) & TRB_CYCLE) !=
2242 xhci->event_ring->cycle_state) {
2243 xhci->error_bitmask |= 1 << 2;
2244 return 0;
2245 }
2246
2247 /*
2248 * Barrier between reading the TRB_CYCLE (valid) flag above and any
2249 * speculative reads of the event's flags/data below.
2250 */
2251 rmb();
2252 /* FIXME: Handle more event types. */
2253 switch ((le32_to_cpu(event->event_cmd.flags) & TRB_TYPE_BITMASK)) {
2254 case TRB_TYPE(TRB_COMPLETION):
2255 handle_cmd_completion(xhci, &event->event_cmd);
2256 break;
2257 case TRB_TYPE(TRB_PORT_STATUS):
2258 handle_port_status(xhci, event);
2259 update_ptrs = 0;
2260 break;
2261 case TRB_TYPE(TRB_TRANSFER):
2262 ret = handle_tx_event(xhci, &event->trans_event);
2263 if (ret < 0)
2264 xhci->error_bitmask |= 1 << 9;
2265 else
2266 update_ptrs = 0;
2267 break;
2268 default:
2269 if ((le32_to_cpu(event->event_cmd.flags) & TRB_TYPE_BITMASK) >=
2270 TRB_TYPE(48))
2271 handle_vendor_event(xhci, event);
2272 else
2273 xhci->error_bitmask |= 1 << 3;
2274 }
2275 /* Any of the above functions may drop and re-acquire the lock, so check
2276 * to make sure a watchdog timer didn't mark the host as non-responsive.
2277 */
2278 if (xhci->xhc_state & XHCI_STATE_DYING) {
2279 xhci_dbg(xhci, "xHCI host dying, returning from "
2280 "event handler.\n");
2281 return 0;
2282 }
2283
2284 if (update_ptrs)
2285 /* Update SW event ring dequeue pointer */
2286 inc_deq(xhci, xhci->event_ring, true);
2287
2288 /* Are there more items on the event ring? Caller will call us again to
2289 * check.
2290 */
2291 return 1;
2292}
2293
2294/*
2295 * xHCI spec says we can get an interrupt, and if the HC has an error condition,
2296 * we might get bad data out of the event ring. Section 4.10.2.7 has a list of
2297 * indicators of an event TRB error, but we check the status *first* to be safe.
2298 */
2299irqreturn_t xhci_irq(struct usb_hcd *hcd)
2300{
2301 struct xhci_hcd *xhci = hcd_to_xhci(hcd);
2302 u32 status;
2303 union xhci_trb *trb;
2304 u64 temp_64;
2305 union xhci_trb *event_ring_deq;
2306 dma_addr_t deq;
2307
2308 spin_lock(&xhci->lock);
2309 trb = xhci->event_ring->dequeue;
2310 /* Check if the xHC generated the interrupt, or the irq is shared */
2311 status = xhci_readl(xhci, &xhci->op_regs->status);
2312 if (status == 0xffffffff)
2313 goto hw_died;
2314
2315 if (!(status & STS_EINT)) {
2316 spin_unlock(&xhci->lock);
2317 return IRQ_NONE;
2318 }
2319 if (status & STS_FATAL) {
2320 xhci_warn(xhci, "WARNING: Host System Error\n");
2321 xhci_halt(xhci);
2322hw_died:
2323 spin_unlock(&xhci->lock);
2324 return -ESHUTDOWN;
2325 }
2326
2327 /*
2328 * Clear the op reg interrupt status first,
2329 * so we can receive interrupts from other MSI-X interrupters.
2330 * Write 1 to clear the interrupt status.
2331 */
2332 status |= STS_EINT;
2333 xhci_writel(xhci, status, &xhci->op_regs->status);
2334 /* FIXME when MSI-X is supported and there are multiple vectors */
2335 /* Clear the MSI-X event interrupt status */
2336
2337 if (hcd->irq != -1) {
2338 u32 irq_pending;
2339 /* Acknowledge the PCI interrupt */
2340 irq_pending = xhci_readl(xhci, &xhci->ir_set->irq_pending);
2341 irq_pending |= 0x3;
2342 xhci_writel(xhci, irq_pending, &xhci->ir_set->irq_pending);
2343 }
2344
2345 if (xhci->xhc_state & XHCI_STATE_DYING) {
2346 xhci_dbg(xhci, "xHCI dying, ignoring interrupt. "
2347 "Shouldn't IRQs be disabled?\n");
2348 /* Clear the event handler busy flag (RW1C);
2349 * the event ring should be empty.
2350 */
2351 temp_64 = xhci_read_64(xhci, &xhci->ir_set->erst_dequeue);
2352 xhci_write_64(xhci, temp_64 | ERST_EHB,
2353 &xhci->ir_set->erst_dequeue);
2354 spin_unlock(&xhci->lock);
2355
2356 return IRQ_HANDLED;
2357 }
2358
2359 event_ring_deq = xhci->event_ring->dequeue;
2360 /* FIXME this should be a delayed service routine
2361 * that clears the EHB.
2362 */
2363 while (xhci_handle_event(xhci) > 0) {}
2364
2365 temp_64 = xhci_read_64(xhci, &xhci->ir_set->erst_dequeue);
2366 /* If necessary, update the HW's version of the event ring deq ptr. */
2367 if (event_ring_deq != xhci->event_ring->dequeue) {
2368 deq = xhci_trb_virt_to_dma(xhci->event_ring->deq_seg,
2369 xhci->event_ring->dequeue);
2370 if (deq == 0)
2371 xhci_warn(xhci, "WARN something wrong with SW event "
2372 "ring dequeue ptr.\n");
2373 /* Update HC event ring dequeue pointer */
2374 temp_64 &= ERST_PTR_MASK;
2375 temp_64 |= ((u64) deq & (u64) ~ERST_PTR_MASK);
2376 }
2377
2378 /* Clear the event handler busy flag (RW1C); event ring is empty. */
2379 temp_64 |= ERST_EHB;
2380 xhci_write_64(xhci, temp_64, &xhci->ir_set->erst_dequeue);
2381
2382 spin_unlock(&xhci->lock);
2383
2384 return IRQ_HANDLED;
2385}
2386
2387irqreturn_t xhci_msi_irq(int irq, struct usb_hcd *hcd)
2388{
2389 irqreturn_t ret;
2390 struct xhci_hcd *xhci;
2391
2392 xhci = hcd_to_xhci(hcd);
2393 set_bit(HCD_FLAG_SAW_IRQ, &hcd->flags);
2394 if (xhci->shared_hcd)
2395 set_bit(HCD_FLAG_SAW_IRQ, &xhci->shared_hcd->flags);
2396
2397 ret = xhci_irq(hcd);
2398
2399 return ret;
2400}
2401
2402/**** Endpoint Ring Operations ****/
2403
2404/*
2405 * Generic function for queueing a TRB on a ring.
2406 * The caller must have checked to make sure there's room on the ring.
2407 *
2408 * @more_trbs_coming: Will you enqueue more TRBs before calling
2409 * prepare_transfer()?
2410 */
2411static void queue_trb(struct xhci_hcd *xhci, struct xhci_ring *ring,
2412 bool consumer, bool more_trbs_coming,
2413 u32 field1, u32 field2, u32 field3, u32 field4)
2414{
2415 struct xhci_generic_trb *trb;
2416
2417 trb = &ring->enqueue->generic;
2418 trb->field[0] = cpu_to_le32(field1);
2419 trb->field[1] = cpu_to_le32(field2);
2420 trb->field[2] = cpu_to_le32(field3);
2421 trb->field[3] = cpu_to_le32(field4);
2422 inc_enq(xhci, ring, consumer, more_trbs_coming);
2423}
2424
2425/*
2426 * Does various checks on the endpoint ring, and makes it ready to queue num_trbs.
2427 * FIXME allocate segments if the ring is full.
2428 */
2429static int prepare_ring(struct xhci_hcd *xhci, struct xhci_ring *ep_ring,
2430 u32 ep_state, unsigned int num_trbs, gfp_t mem_flags)
2431{
2432 /* Make sure the endpoint has been added to xHC schedule */
2433 switch (ep_state) {
2434 case EP_STATE_DISABLED:
2435 /*
2436 * USB core changed config/interfaces without notifying us,
2437 * or hardware is reporting the wrong state.
2438 */
2439 xhci_warn(xhci, "WARN urb submitted to disabled ep\n");
2440 return -ENOENT;
2441 case EP_STATE_ERROR:
2442 xhci_warn(xhci, "WARN waiting for error on ep to be cleared\n");
2443 /* FIXME event handling code for error needs to clear it */
2444 /* XXX not sure if this should be -ENOENT or not */
2445 return -EINVAL;
2446 case EP_STATE_HALTED:
2447 xhci_dbg(xhci, "WARN halted endpoint, queueing URB anyway.\n");
2448 case EP_STATE_STOPPED:
2449 case EP_STATE_RUNNING:
2450 break;
2451 default:
2452 xhci_err(xhci, "ERROR unknown endpoint state for ep\n");
2453 /*
2454 * FIXME issue Configure Endpoint command to try to get the HC
2455 * back into a known state.
2456 */
2457 return -EINVAL;
2458 }
2459 if (!room_on_ring(xhci, ep_ring, num_trbs)) {
2460 /* FIXME allocate more room */
2461 xhci_err(xhci, "ERROR no room on ep ring\n");
2462 return -ENOMEM;
2463 }
2464
2465 if (enqueue_is_link_trb(ep_ring)) {
2466 struct xhci_ring *ring = ep_ring;
2467 union xhci_trb *next;
2468
2469 next = ring->enqueue;
2470
2471 while (last_trb(xhci, ring, ring->enq_seg, next)) {
2472 /* If we're not dealing with 0.95 hardware,
2473 * clear the chain bit.
2474 */
2475 if (!xhci_link_trb_quirk(xhci))
2476 next->link.control &= cpu_to_le32(~TRB_CHAIN);
2477 else
2478 next->link.control |= cpu_to_le32(TRB_CHAIN);
2479
2480 wmb();
2481 next->link.control ^= cpu_to_le32(TRB_CYCLE);
2482
2483 /* Toggle the cycle bit after the last ring segment. */
2484 if (last_trb_on_last_seg(xhci, ring, ring->enq_seg, next)) {
2485 ring->cycle_state = (ring->cycle_state ? 0 : 1);
2486 if (!in_interrupt()) {
2487 xhci_dbg(xhci, "queue_trb: Toggle cycle "
2488 "state for ring %p = %i\n",
2489 ring, (unsigned int)ring->cycle_state);
2490 }
2491 }
2492 ring->enq_seg = ring->enq_seg->next;
2493 ring->enqueue = ring->enq_seg->trbs;
2494 next = ring->enqueue;
2495 }
2496 }
2497
2498 return 0;
2499}
2500
2501static int prepare_transfer(struct xhci_hcd *xhci,
2502 struct xhci_virt_device *xdev,
2503 unsigned int ep_index,
2504 unsigned int stream_id,
2505 unsigned int num_trbs,
2506 struct urb *urb,
2507 unsigned int td_index,
2508 gfp_t mem_flags)
2509{
2510 int ret;
2511 struct urb_priv *urb_priv;
2512 struct xhci_td *td;
2513 struct xhci_ring *ep_ring;
2514 struct xhci_ep_ctx *ep_ctx = xhci_get_ep_ctx(xhci, xdev->out_ctx, ep_index);
2515
2516 ep_ring = xhci_stream_id_to_ring(xdev, ep_index, stream_id);
2517 if (!ep_ring) {
2518 xhci_dbg(xhci, "Can't prepare ring for bad stream ID %u\n",
2519 stream_id);
2520 return -EINVAL;
2521 }
2522
2523 ret = prepare_ring(xhci, ep_ring,
2524 le32_to_cpu(ep_ctx->ep_info) & EP_STATE_MASK,
2525 num_trbs, mem_flags);
2526 if (ret)
2527 return ret;
2528
2529 urb_priv = urb->hcpriv;
2530 td = urb_priv->td[td_index];
2531
2532 INIT_LIST_HEAD(&td->td_list);
2533 INIT_LIST_HEAD(&td->cancelled_td_list);
2534
2535 if (td_index == 0) {
2536 ret = usb_hcd_link_urb_to_ep(bus_to_hcd(urb->dev->bus), urb);
2537 if (unlikely(ret))
2538 return ret;
2539 }
2540
2541 td->urb = urb;
2542 /* Add this TD to the tail of the endpoint ring's TD list */
2543 list_add_tail(&td->td_list, &ep_ring->td_list);
2544 td->start_seg = ep_ring->enq_seg;
2545 td->first_trb = ep_ring->enqueue;
2546
2547 urb_priv->td[td_index] = td;
2548
2549 return 0;
2550}
2551
2552static unsigned int count_sg_trbs_needed(struct xhci_hcd *xhci, struct urb *urb)
2553{
2554 int num_sgs, num_trbs, running_total, temp, i;
2555 struct scatterlist *sg;
2556
2557 sg = NULL;
2558 num_sgs = urb->num_sgs;
2559 temp = urb->transfer_buffer_length;
2560
2561 xhci_dbg(xhci, "count sg list trbs: \n");
2562 num_trbs = 0;
2563 for_each_sg(urb->sg, sg, num_sgs, i) {
2564 unsigned int previous_total_trbs = num_trbs;
2565 unsigned int len = sg_dma_len(sg);
2566
2567 /* Scatter gather list entries may cross 64KB boundaries */
2568 running_total = TRB_MAX_BUFF_SIZE -
2569 (sg_dma_address(sg) & (TRB_MAX_BUFF_SIZE - 1));
2570 running_total &= TRB_MAX_BUFF_SIZE - 1;
2571 if (running_total != 0)
2572 num_trbs++;
2573
2574 /* How many more 64KB chunks to transfer, how many more TRBs? */
2575 while (running_total < sg_dma_len(sg) && running_total < temp) {
2576 num_trbs++;
2577 running_total += TRB_MAX_BUFF_SIZE;
2578 }
2579 xhci_dbg(xhci, " sg #%d: dma = %#llx, len = %#x (%d), num_trbs = %d\n",
2580 i, (unsigned long long)sg_dma_address(sg),
2581 len, len, num_trbs - previous_total_trbs);
2582
2583 len = min_t(int, len, temp);
2584 temp -= len;
2585 if (temp == 0)
2586 break;
2587 }
2588 xhci_dbg(xhci, "\n");
2589 if (!in_interrupt())
2590 xhci_dbg(xhci, "ep %#x - urb len = %d, sglist used, "
2591 "num_trbs = %d\n",
2592 urb->ep->desc.bEndpointAddress,
2593 urb->transfer_buffer_length,
2594 num_trbs);
2595 return num_trbs;
2596}
2597
2598static void check_trb_math(struct urb *urb, int num_trbs, int running_total)
2599{
2600 if (num_trbs != 0)
2601 dev_err(&urb->dev->dev, "%s - ep %#x - Miscalculated number of "
2602 "TRBs, %d left\n", __func__,
2603 urb->ep->desc.bEndpointAddress, num_trbs);
2604 if (running_total != urb->transfer_buffer_length)
2605 dev_err(&urb->dev->dev, "%s - ep %#x - Miscalculated tx length, "
2606 "queued %#x (%d), asked for %#x (%d)\n",
2607 __func__,
2608 urb->ep->desc.bEndpointAddress,
2609 running_total, running_total,
2610 urb->transfer_buffer_length,
2611 urb->transfer_buffer_length);
2612}
2613
2614static void giveback_first_trb(struct xhci_hcd *xhci, int slot_id,
2615 unsigned int ep_index, unsigned int stream_id, int start_cycle,
2616 struct xhci_generic_trb *start_trb)
2617{
2618 /*
2619 * Pass all the TRBs to the hardware at once and make sure this write
2620 * isn't reordered.
2621 */
2622 wmb();
2623 if (start_cycle)
2624 start_trb->field[3] |= cpu_to_le32(start_cycle);
2625 else
2626 start_trb->field[3] &= cpu_to_le32(~TRB_CYCLE);
2627 xhci_ring_ep_doorbell(xhci, slot_id, ep_index, stream_id);
2628}
2629
2630/*
2631 * xHCI uses normal TRBs for both bulk and interrupt. When the interrupt
2632 * endpoint is to be serviced, the xHC will consume (at most) one TD. A TD
2633 * (comprised of sg list entries) can take several service intervals to
2634 * transmit.
2635 */
2636int xhci_queue_intr_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
2637 struct urb *urb, int slot_id, unsigned int ep_index)
2638{
2639 struct xhci_ep_ctx *ep_ctx = xhci_get_ep_ctx(xhci,
2640 xhci->devs[slot_id]->out_ctx, ep_index);
2641 int xhci_interval;
2642 int ep_interval;
2643
2644 xhci_interval = EP_INTERVAL_TO_UFRAMES(le32_to_cpu(ep_ctx->ep_info));
2645 ep_interval = urb->interval;
2646 /* Convert to microframes */
2647 if (urb->dev->speed == USB_SPEED_LOW ||
2648 urb->dev->speed == USB_SPEED_FULL)
2649 ep_interval *= 8;
2650 /* FIXME change this to a warning and a suggestion to use the new API
2651 * to set the polling interval (once the API is added).
2652 */
2653 if (xhci_interval != ep_interval) {
2654 if (printk_ratelimit())
2655 dev_dbg(&urb->dev->dev, "Driver uses different interval"
2656 " (%d microframe%s) than xHCI "
2657 "(%d microframe%s)\n",
2658 ep_interval,
2659 ep_interval == 1 ? "" : "s",
2660 xhci_interval,
2661 xhci_interval == 1 ? "" : "s");
2662 urb->interval = xhci_interval;
2663 /* Convert back to frames for LS/FS devices */
2664 if (urb->dev->speed == USB_SPEED_LOW ||
2665 urb->dev->speed == USB_SPEED_FULL)
2666 urb->interval /= 8;
2667 }
2668 return xhci_queue_bulk_tx(xhci, GFP_ATOMIC, urb, slot_id, ep_index);
2669}
2670
2671/*
2672 * The TD size is the number of bytes remaining in the TD (including this TRB),
2673 * right shifted by 10.
2674 * It must fit in bits 21:17, so it can't be bigger than 31.
2675 */
2676static u32 xhci_td_remainder(unsigned int remainder)
2677{
2678 u32 max = (1 << (21 - 17 + 1)) - 1;
2679
2680 if ((remainder >> 10) >= max)
2681 return max << 17;
2682 else
2683 return (remainder >> 10) << 17;
2684}
2685
2686/*
2687 * For xHCI 1.0 host controllers, TD size is the number of packets remaining in
2688 * the TD (*not* including this TRB).
2689 *
2690 * Total TD packet count = total_packet_count =
2691 * roundup(TD size in bytes / wMaxPacketSize)
2692 *
2693 * Packets transferred up to and including this TRB = packets_transferred =
2694 * rounddown(total bytes transferred including this TRB / wMaxPacketSize)
2695 *
2696 * TD size = total_packet_count - packets_transferred
2697 *
2698 * It must fit in bits 21:17, so it can't be bigger than 31.
2699 */
2700
2701static u32 xhci_v1_0_td_remainder(int running_total, int trb_buff_len,
2702 unsigned int total_packet_count, struct urb *urb)
2703{
2704 int packets_transferred;
2705
2706 /* One TRB with a zero-length data packet. */
2707 if (running_total == 0 && trb_buff_len == 0)
2708 return 0;
2709
2710 /* All the TRB queueing functions don't count the current TRB in
2711 * running_total.
2712 */
2713 packets_transferred = (running_total + trb_buff_len) /
2714 le16_to_cpu(urb->ep->desc.wMaxPacketSize);
2715
2716 return xhci_td_remainder(total_packet_count - packets_transferred);
2717}
2718
2719static int queue_bulk_sg_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
2720 struct urb *urb, int slot_id, unsigned int ep_index)
2721{
2722 struct xhci_ring *ep_ring;
2723 unsigned int num_trbs;
2724 struct urb_priv *urb_priv;
2725 struct xhci_td *td;
2726 struct scatterlist *sg;
2727 int num_sgs;
2728 int trb_buff_len, this_sg_len, running_total;
2729 unsigned int total_packet_count;
2730 bool first_trb;
2731 u64 addr;
2732 bool more_trbs_coming;
2733
2734 struct xhci_generic_trb *start_trb;
2735 int start_cycle;
2736
2737 ep_ring = xhci_urb_to_transfer_ring(xhci, urb);
2738 if (!ep_ring)
2739 return -EINVAL;
2740
2741 num_trbs = count_sg_trbs_needed(xhci, urb);
2742 num_sgs = urb->num_sgs;
2743 total_packet_count = roundup(urb->transfer_buffer_length,
2744 le16_to_cpu(urb->ep->desc.wMaxPacketSize));
2745
2746 trb_buff_len = prepare_transfer(xhci, xhci->devs[slot_id],
2747 ep_index, urb->stream_id,
2748 num_trbs, urb, 0, mem_flags);
2749 if (trb_buff_len < 0)
2750 return trb_buff_len;
2751
2752 urb_priv = urb->hcpriv;
2753 td = urb_priv->td[0];
2754
2755 /*
2756 * Don't give the first TRB to the hardware (by toggling the cycle bit)
2757 * until we've finished creating all the other TRBs. The ring's cycle
2758 * state may change as we enqueue the other TRBs, so save it too.
2759 */
2760 start_trb = &ep_ring->enqueue->generic;
2761 start_cycle = ep_ring->cycle_state;
2762
2763 running_total = 0;
2764 /*
2765 * How much data is in the first TRB?
2766 *
2767 * There are three forces at work for TRB buffer pointers and lengths:
2768 * 1. We don't want to walk off the end of this sg-list entry buffer.
2769 * 2. The transfer length that the driver requested may be smaller than
2770 * the amount of memory allocated for this scatter-gather list.
2771 * 3. TRBs buffers can't cross 64KB boundaries.
2772 */
2773 sg = urb->sg;
2774 addr = (u64) sg_dma_address(sg);
2775 this_sg_len = sg_dma_len(sg);
2776 trb_buff_len = TRB_MAX_BUFF_SIZE - (addr & (TRB_MAX_BUFF_SIZE - 1));
2777 trb_buff_len = min_t(int, trb_buff_len, this_sg_len);
2778 if (trb_buff_len > urb->transfer_buffer_length)
2779 trb_buff_len = urb->transfer_buffer_length;
2780 xhci_dbg(xhci, "First length to xfer from 1st sglist entry = %u\n",
2781 trb_buff_len);
2782
2783 first_trb = true;
2784 /* Queue the first TRB, even if it's zero-length */
2785 do {
2786 u32 field = 0;
2787 u32 length_field = 0;
2788 u32 remainder = 0;
2789
2790 /* Don't change the cycle bit of the first TRB until later */
2791 if (first_trb) {
2792 first_trb = false;
2793 if (start_cycle == 0)
2794 field |= 0x1;
2795 } else
2796 field |= ep_ring->cycle_state;
2797
2798 /* Chain all the TRBs together; clear the chain bit in the last
2799 * TRB to indicate it's the last TRB in the chain.
2800 */
2801 if (num_trbs > 1) {
2802 field |= TRB_CHAIN;
2803 } else {
2804 /* FIXME - add check for ZERO_PACKET flag before this */
2805 td->last_trb = ep_ring->enqueue;
2806 field |= TRB_IOC;
2807 }
2808
2809 /* Only set interrupt on short packet for IN endpoints */
2810 if (usb_urb_dir_in(urb))
2811 field |= TRB_ISP;
2812
2813 xhci_dbg(xhci, " sg entry: dma = %#x, len = %#x (%d), "
2814 "64KB boundary at %#x, end dma = %#x\n",
2815 (unsigned int) addr, trb_buff_len, trb_buff_len,
2816 (unsigned int) (addr + TRB_MAX_BUFF_SIZE) & ~(TRB_MAX_BUFF_SIZE - 1),
2817 (unsigned int) addr + trb_buff_len);
2818 if (TRB_MAX_BUFF_SIZE -
2819 (addr & (TRB_MAX_BUFF_SIZE - 1)) < trb_buff_len) {
2820 xhci_warn(xhci, "WARN: sg dma xfer crosses 64KB boundaries!\n");
2821 xhci_dbg(xhci, "Next boundary at %#x, end dma = %#x\n",
2822 (unsigned int) (addr + TRB_MAX_BUFF_SIZE) & ~(TRB_MAX_BUFF_SIZE - 1),
2823 (unsigned int) addr + trb_buff_len);
2824 }
2825
2826 /* Set the TRB length, TD size, and interrupter fields. */
2827 if (xhci->hci_version < 0x100) {
2828 remainder = xhci_td_remainder(
2829 urb->transfer_buffer_length -
2830 running_total);
2831 } else {
2832 remainder = xhci_v1_0_td_remainder(running_total,
2833 trb_buff_len, total_packet_count, urb);
2834 }
2835 length_field = TRB_LEN(trb_buff_len) |
2836 remainder |
2837 TRB_INTR_TARGET(0);
2838
2839 if (num_trbs > 1)
2840 more_trbs_coming = true;
2841 else
2842 more_trbs_coming = false;
2843 queue_trb(xhci, ep_ring, false, more_trbs_coming,
2844 lower_32_bits(addr),
2845 upper_32_bits(addr),
2846 length_field,
2847 field | TRB_TYPE(TRB_NORMAL));
2848 --num_trbs;
2849 running_total += trb_buff_len;
2850
2851 /* Calculate length for next transfer --
2852 * Are we done queueing all the TRBs for this sg entry?
2853 */
2854 this_sg_len -= trb_buff_len;
2855 if (this_sg_len == 0) {
2856 --num_sgs;
2857 if (num_sgs == 0)
2858 break;
2859 sg = sg_next(sg);
2860 addr = (u64) sg_dma_address(sg);
2861 this_sg_len = sg_dma_len(sg);
2862 } else {
2863 addr += trb_buff_len;
2864 }
2865
2866 trb_buff_len = TRB_MAX_BUFF_SIZE -
2867 (addr & (TRB_MAX_BUFF_SIZE - 1));
2868 trb_buff_len = min_t(int, trb_buff_len, this_sg_len);
2869 if (running_total + trb_buff_len > urb->transfer_buffer_length)
2870 trb_buff_len =
2871 urb->transfer_buffer_length - running_total;
2872 } while (running_total < urb->transfer_buffer_length);
2873
2874 check_trb_math(urb, num_trbs, running_total);
2875 giveback_first_trb(xhci, slot_id, ep_index, urb->stream_id,
2876 start_cycle, start_trb);
2877 return 0;
2878}
2879
2880/* This is very similar to what ehci-q.c qtd_fill() does */
2881int xhci_queue_bulk_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
2882 struct urb *urb, int slot_id, unsigned int ep_index)
2883{
2884 struct xhci_ring *ep_ring;
2885 struct urb_priv *urb_priv;
2886 struct xhci_td *td;
2887 int num_trbs;
2888 struct xhci_generic_trb *start_trb;
2889 bool first_trb;
2890 bool more_trbs_coming;
2891 int start_cycle;
2892 u32 field, length_field;
2893
2894 int running_total, trb_buff_len, ret;
2895 unsigned int total_packet_count;
2896 u64 addr;
2897
2898 if (urb->num_sgs)
2899 return queue_bulk_sg_tx(xhci, mem_flags, urb, slot_id, ep_index);
2900
2901 ep_ring = xhci_urb_to_transfer_ring(xhci, urb);
2902 if (!ep_ring)
2903 return -EINVAL;
2904
2905 num_trbs = 0;
2906 /* How much data is (potentially) left before the 64KB boundary? */
2907 running_total = TRB_MAX_BUFF_SIZE -
2908 (urb->transfer_dma & (TRB_MAX_BUFF_SIZE - 1));
2909 running_total &= TRB_MAX_BUFF_SIZE - 1;
2910
2911 /* If there's some data on this 64KB chunk, or we have to send a
2912 * zero-length transfer, we need at least one TRB
2913 */
2914 if (running_total != 0 || urb->transfer_buffer_length == 0)
2915 num_trbs++;
2916 /* How many more 64KB chunks to transfer, how many more TRBs? */
2917 while (running_total < urb->transfer_buffer_length) {
2918 num_trbs++;
2919 running_total += TRB_MAX_BUFF_SIZE;
2920 }
2921 /* FIXME: this doesn't deal with URB_ZERO_PACKET - need one more */
2922
2923 if (!in_interrupt())
2924 xhci_dbg(xhci, "ep %#x - urb len = %#x (%d), "
2925 "addr = %#llx, num_trbs = %d\n",
2926 urb->ep->desc.bEndpointAddress,
2927 urb->transfer_buffer_length,
2928 urb->transfer_buffer_length,
2929 (unsigned long long)urb->transfer_dma,
2930 num_trbs);
2931
2932 ret = prepare_transfer(xhci, xhci->devs[slot_id],
2933 ep_index, urb->stream_id,
2934 num_trbs, urb, 0, mem_flags);
2935 if (ret < 0)
2936 return ret;
2937
2938 urb_priv = urb->hcpriv;
2939 td = urb_priv->td[0];
2940
2941 /*
2942 * Don't give the first TRB to the hardware (by toggling the cycle bit)
2943 * until we've finished creating all the other TRBs. The ring's cycle
2944 * state may change as we enqueue the other TRBs, so save it too.
2945 */
2946 start_trb = &ep_ring->enqueue->generic;
2947 start_cycle = ep_ring->cycle_state;
2948
2949 running_total = 0;
2950 total_packet_count = roundup(urb->transfer_buffer_length,
2951 le16_to_cpu(urb->ep->desc.wMaxPacketSize));
2952 /* How much data is in the first TRB? */
2953 addr = (u64) urb->transfer_dma;
2954 trb_buff_len = TRB_MAX_BUFF_SIZE -
2955 (urb->transfer_dma & (TRB_MAX_BUFF_SIZE - 1));
2956 if (trb_buff_len > urb->transfer_buffer_length)
2957 trb_buff_len = urb->transfer_buffer_length;
2958
2959 first_trb = true;
2960
2961 /* Queue the first TRB, even if it's zero-length */
2962 do {
2963 u32 remainder = 0;
2964 field = 0;
2965
2966 /* Don't change the cycle bit of the first TRB until later */
2967 if (first_trb) {
2968 first_trb = false;
2969 if (start_cycle == 0)
2970 field |= 0x1;
2971 } else
2972 field |= ep_ring->cycle_state;
2973
2974 /* Chain all the TRBs together; clear the chain bit in the last
2975 * TRB to indicate it's the last TRB in the chain.
2976 */
2977 if (num_trbs > 1) {
2978 field |= TRB_CHAIN;
2979 } else {
2980 /* FIXME - add check for ZERO_PACKET flag before this */
2981 td->last_trb = ep_ring->enqueue;
2982 field |= TRB_IOC;
2983 }
2984
2985 /* Only set interrupt on short packet for IN endpoints */
2986 if (usb_urb_dir_in(urb))
2987 field |= TRB_ISP;
2988
2989 /* Set the TRB length, TD size, and interrupter fields. */
2990 if (xhci->hci_version < 0x100) {
2991 remainder = xhci_td_remainder(
2992 urb->transfer_buffer_length -
2993 running_total);
2994 } else {
2995 remainder = xhci_v1_0_td_remainder(running_total,
2996 trb_buff_len, total_packet_count, urb);
2997 }
2998 length_field = TRB_LEN(trb_buff_len) |
2999 remainder |
3000 TRB_INTR_TARGET(0);
3001
3002 if (num_trbs > 1)
3003 more_trbs_coming = true;
3004 else
3005 more_trbs_coming = false;
3006 queue_trb(xhci, ep_ring, false, more_trbs_coming,
3007 lower_32_bits(addr),
3008 upper_32_bits(addr),
3009 length_field,
3010 field | TRB_TYPE(TRB_NORMAL));
3011 --num_trbs;
3012 running_total += trb_buff_len;
3013
3014 /* Calculate length for next transfer */
3015 addr += trb_buff_len;
3016 trb_buff_len = urb->transfer_buffer_length - running_total;
3017 if (trb_buff_len > TRB_MAX_BUFF_SIZE)
3018 trb_buff_len = TRB_MAX_BUFF_SIZE;
3019 } while (running_total < urb->transfer_buffer_length);
3020
3021 check_trb_math(urb, num_trbs, running_total);
3022 giveback_first_trb(xhci, slot_id, ep_index, urb->stream_id,
3023 start_cycle, start_trb);
3024 return 0;
3025}
3026
3027/* Caller must have locked xhci->lock */
3028int xhci_queue_ctrl_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
3029 struct urb *urb, int slot_id, unsigned int ep_index)
3030{
3031 struct xhci_ring *ep_ring;
3032 int num_trbs;
3033 int ret;
3034 struct usb_ctrlrequest *setup;
3035 struct xhci_generic_trb *start_trb;
3036 int start_cycle;
3037 u32 field, length_field;
3038 struct urb_priv *urb_priv;
3039 struct xhci_td *td;
3040
3041 ep_ring = xhci_urb_to_transfer_ring(xhci, urb);
3042 if (!ep_ring)
3043 return -EINVAL;
3044
3045 /*
3046 * Need to copy setup packet into setup TRB, so we can't use the setup
3047 * DMA address.
3048 */
3049 if (!urb->setup_packet)
3050 return -EINVAL;
3051
3052 if (!in_interrupt())
3053 xhci_dbg(xhci, "Queueing ctrl tx for slot id %d, ep %d\n",
3054 slot_id, ep_index);
3055 /* 1 TRB for setup, 1 for status */
3056 num_trbs = 2;
3057 /*
3058 * Don't need to check if we need additional event data and normal TRBs,
3059 * since data in control transfers will never get bigger than 16MB
3060 * XXX: can we get a buffer that crosses 64KB boundaries?
3061 */
3062 if (urb->transfer_buffer_length > 0)
3063 num_trbs++;
3064 ret = prepare_transfer(xhci, xhci->devs[slot_id],
3065 ep_index, urb->stream_id,
3066 num_trbs, urb, 0, mem_flags);
3067 if (ret < 0)
3068 return ret;
3069
3070 urb_priv = urb->hcpriv;
3071 td = urb_priv->td[0];
3072
3073 /*
3074 * Don't give the first TRB to the hardware (by toggling the cycle bit)
3075 * until we've finished creating all the other TRBs. The ring's cycle
3076 * state may change as we enqueue the other TRBs, so save it too.
3077 */
3078 start_trb = &ep_ring->enqueue->generic;
3079 start_cycle = ep_ring->cycle_state;
3080
3081 /* Queue setup TRB - see section 6.4.1.2.1 */
3082 /* FIXME better way to translate setup_packet into two u32 fields? */
3083 setup = (struct usb_ctrlrequest *) urb->setup_packet;
3084 field = 0;
3085 field |= TRB_IDT | TRB_TYPE(TRB_SETUP);
3086 if (start_cycle == 0)
3087 field |= 0x1;
3088
3089 /* xHCI 1.0 6.4.1.2.1: Transfer Type field */
3090 if (xhci->hci_version == 0x100) {
3091 if (urb->transfer_buffer_length > 0) {
3092 if (setup->bRequestType & USB_DIR_IN)
3093 field |= TRB_TX_TYPE(TRB_DATA_IN);
3094 else
3095 field |= TRB_TX_TYPE(TRB_DATA_OUT);
3096 }
3097 }
3098
3099 queue_trb(xhci, ep_ring, false, true,
3100 setup->bRequestType | setup->bRequest << 8 | le16_to_cpu(setup->wValue) << 16,
3101 le16_to_cpu(setup->wIndex) | le16_to_cpu(setup->wLength) << 16,
3102 TRB_LEN(8) | TRB_INTR_TARGET(0),
3103 /* Immediate data in pointer */
3104 field);
3105
3106 /* If there's data, queue data TRBs */
3107 /* Only set interrupt on short packet for IN endpoints */
3108 if (usb_urb_dir_in(urb))
3109 field = TRB_ISP | TRB_TYPE(TRB_DATA);
3110 else
3111 field = TRB_TYPE(TRB_DATA);
3112
3113 length_field = TRB_LEN(urb->transfer_buffer_length) |
3114 xhci_td_remainder(urb->transfer_buffer_length) |
3115 TRB_INTR_TARGET(0);
3116 if (urb->transfer_buffer_length > 0) {
3117 if (setup->bRequestType & USB_DIR_IN)
3118 field |= TRB_DIR_IN;
3119 queue_trb(xhci, ep_ring, false, true,
3120 lower_32_bits(urb->transfer_dma),
3121 upper_32_bits(urb->transfer_dma),
3122 length_field,
3123 field | ep_ring->cycle_state);
3124 }
3125
3126 /* Save the DMA address of the last TRB in the TD */
3127 td->last_trb = ep_ring->enqueue;
3128
3129 /* Queue status TRB - see Table 7 and sections 4.11.2.2 and 6.4.1.2.3 */
3130 /* If the device sent data, the status stage is an OUT transfer */
3131 if (urb->transfer_buffer_length > 0 && setup->bRequestType & USB_DIR_IN)
3132 field = 0;
3133 else
3134 field = TRB_DIR_IN;
3135 queue_trb(xhci, ep_ring, false, false,
3136 0,
3137 0,
3138 TRB_INTR_TARGET(0),
3139 /* Event on completion */
3140 field | TRB_IOC | TRB_TYPE(TRB_STATUS) | ep_ring->cycle_state);
3141
3142 giveback_first_trb(xhci, slot_id, ep_index, 0,
3143 start_cycle, start_trb);
3144 return 0;
3145}
3146
3147static int count_isoc_trbs_needed(struct xhci_hcd *xhci,
3148 struct urb *urb, int i)
3149{
3150 int num_trbs = 0;
3151 u64 addr, td_len;
3152
3153 addr = (u64) (urb->transfer_dma + urb->iso_frame_desc[i].offset);
3154 td_len = urb->iso_frame_desc[i].length;
3155
3156 num_trbs = DIV_ROUND_UP(td_len + (addr & (TRB_MAX_BUFF_SIZE - 1)),
3157 TRB_MAX_BUFF_SIZE);
3158 if (num_trbs == 0)
3159 num_trbs++;
3160
3161 return num_trbs;
3162}
3163
3164/*
3165 * The transfer burst count field of the isochronous TRB defines the number of
3166 * bursts that are required to move all packets in this TD. Only SuperSpeed
3167 * devices can burst up to bMaxBurst number of packets per service interval.
3168 * This field is zero based, meaning a value of zero in the field means one
3169 * burst. Basically, for everything but SuperSpeed devices, this field will be
3170 * zero. Only xHCI 1.0 host controllers support this field.
3171 */
3172static unsigned int xhci_get_burst_count(struct xhci_hcd *xhci,
3173 struct usb_device *udev,
3174 struct urb *urb, unsigned int total_packet_count)
3175{
3176 unsigned int max_burst;
3177
3178 if (xhci->hci_version < 0x100 || udev->speed != USB_SPEED_SUPER)
3179 return 0;
3180
3181 max_burst = urb->ep->ss_ep_comp.bMaxBurst;
3182 return roundup(total_packet_count, max_burst + 1) - 1;
3183}
3184
3185/*
3186 * Returns the number of packets in the last "burst" of packets. This field is
3187 * valid for all speeds of devices. USB 2.0 devices can only do one "burst", so
3188 * the last burst packet count is equal to the total number of packets in the
3189 * TD. SuperSpeed endpoints can have up to 3 bursts. All but the last burst
3190 * must contain (bMaxBurst + 1) number of packets, but the last burst can
3191 * contain 1 to (bMaxBurst + 1) packets.
3192 */
3193static unsigned int xhci_get_last_burst_packet_count(struct xhci_hcd *xhci,
3194 struct usb_device *udev,
3195 struct urb *urb, unsigned int total_packet_count)
3196{
3197 unsigned int max_burst;
3198 unsigned int residue;
3199
3200 if (xhci->hci_version < 0x100)
3201 return 0;
3202
3203 switch (udev->speed) {
3204 case USB_SPEED_SUPER:
3205 /* bMaxBurst is zero based: 0 means 1 packet per burst */
3206 max_burst = urb->ep->ss_ep_comp.bMaxBurst;
3207 residue = total_packet_count % (max_burst + 1);
3208 /* If residue is zero, the last burst contains (max_burst + 1)
3209 * number of packets, but the TLBPC field is zero-based.
3210 */
3211 if (residue == 0)
3212 return max_burst;
3213 return residue - 1;
3214 default:
3215 if (total_packet_count == 0)
3216 return 0;
3217 return total_packet_count - 1;
3218 }
3219}
3220
3221/* This is for isoc transfer */
3222static int xhci_queue_isoc_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
3223 struct urb *urb, int slot_id, unsigned int ep_index)
3224{
3225 struct xhci_ring *ep_ring;
3226 struct urb_priv *urb_priv;
3227 struct xhci_td *td;
3228 int num_tds, trbs_per_td;
3229 struct xhci_generic_trb *start_trb;
3230 bool first_trb;
3231 int start_cycle;
3232 u32 field, length_field;
3233 int running_total, trb_buff_len, td_len, td_remain_len, ret;
3234 u64 start_addr, addr;
3235 int i, j;
3236 bool more_trbs_coming;
3237
3238 ep_ring = xhci->devs[slot_id]->eps[ep_index].ring;
3239
3240 num_tds = urb->number_of_packets;
3241 if (num_tds < 1) {
3242 xhci_dbg(xhci, "Isoc URB with zero packets?\n");
3243 return -EINVAL;
3244 }
3245
3246 if (!in_interrupt())
3247 xhci_dbg(xhci, "ep %#x - urb len = %#x (%d),"
3248 " addr = %#llx, num_tds = %d\n",
3249 urb->ep->desc.bEndpointAddress,
3250 urb->transfer_buffer_length,
3251 urb->transfer_buffer_length,
3252 (unsigned long long)urb->transfer_dma,
3253 num_tds);
3254
3255 start_addr = (u64) urb->transfer_dma;
3256 start_trb = &ep_ring->enqueue->generic;
3257 start_cycle = ep_ring->cycle_state;
3258
3259 urb_priv = urb->hcpriv;
3260 /* Queue the first TRB, even if it's zero-length */
3261 for (i = 0; i < num_tds; i++) {
3262 unsigned int total_packet_count;
3263 unsigned int burst_count;
3264 unsigned int residue;
3265
3266 first_trb = true;
3267 running_total = 0;
3268 addr = start_addr + urb->iso_frame_desc[i].offset;
3269 td_len = urb->iso_frame_desc[i].length;
3270 td_remain_len = td_len;
3271 total_packet_count = roundup(td_len,
3272 le16_to_cpu(urb->ep->desc.wMaxPacketSize));
3273 /* A zero-length transfer still involves at least one packet. */
3274 if (total_packet_count == 0)
3275 total_packet_count++;
3276 burst_count = xhci_get_burst_count(xhci, urb->dev, urb,
3277 total_packet_count);
3278 residue = xhci_get_last_burst_packet_count(xhci,
3279 urb->dev, urb, total_packet_count);
3280
3281 trbs_per_td = count_isoc_trbs_needed(xhci, urb, i);
3282
3283 ret = prepare_transfer(xhci, xhci->devs[slot_id], ep_index,
3284 urb->stream_id, trbs_per_td, urb, i, mem_flags);
3285 if (ret < 0) {
3286 if (i == 0)
3287 return ret;
3288 goto cleanup;
3289 }
3290
3291 td = urb_priv->td[i];
3292 for (j = 0; j < trbs_per_td; j++) {
3293 u32 remainder = 0;
3294 field = TRB_TBC(burst_count) | TRB_TLBPC(residue);
3295
3296 if (first_trb) {
3297 /* Queue the isoc TRB */
3298 field |= TRB_TYPE(TRB_ISOC);
3299 /* Assume URB_ISO_ASAP is set */
3300 field |= TRB_SIA;
3301 if (i == 0) {
3302 if (start_cycle == 0)
3303 field |= 0x1;
3304 } else
3305 field |= ep_ring->cycle_state;
3306 first_trb = false;
3307 } else {
3308 /* Queue other normal TRBs */
3309 field |= TRB_TYPE(TRB_NORMAL);
3310 field |= ep_ring->cycle_state;
3311 }
3312
3313 /* Only set interrupt on short packet for IN EPs */
3314 if (usb_urb_dir_in(urb))
3315 field |= TRB_ISP;
3316
3317 /* Chain all the TRBs together; clear the chain bit in
3318 * the last TRB to indicate it's the last TRB in the
3319 * chain.
3320 */
3321 if (j < trbs_per_td - 1) {
3322 field |= TRB_CHAIN;
3323 more_trbs_coming = true;
3324 } else {
3325 td->last_trb = ep_ring->enqueue;
3326 field |= TRB_IOC;
3327 if (xhci->hci_version == 0x100) {
3328 /* Set BEI bit except for the last td */
3329 if (i < num_tds - 1)
3330 field |= TRB_BEI;
3331 }
3332 more_trbs_coming = false;
3333 }
3334
3335 /* Calculate TRB length */
3336 trb_buff_len = TRB_MAX_BUFF_SIZE -
3337 (addr & ((1 << TRB_MAX_BUFF_SHIFT) - 1));
3338 if (trb_buff_len > td_remain_len)
3339 trb_buff_len = td_remain_len;
3340
3341 /* Set the TRB length, TD size, & interrupter fields. */
3342 if (xhci->hci_version < 0x100) {
3343 remainder = xhci_td_remainder(
3344 td_len - running_total);
3345 } else {
3346 remainder = xhci_v1_0_td_remainder(
3347 running_total, trb_buff_len,
3348 total_packet_count, urb);
3349 }
3350 length_field = TRB_LEN(trb_buff_len) |
3351 remainder |
3352 TRB_INTR_TARGET(0);
3353
3354 queue_trb(xhci, ep_ring, false, more_trbs_coming,
3355 lower_32_bits(addr),
3356 upper_32_bits(addr),
3357 length_field,
3358 field);
3359 running_total += trb_buff_len;
3360
3361 addr += trb_buff_len;
3362 td_remain_len -= trb_buff_len;
3363 }
3364
3365 /* Check TD length */
3366 if (running_total != td_len) {
3367 xhci_err(xhci, "ISOC TD length unmatch\n");
3368 return -EINVAL;
3369 }
3370 }
3371
3372 if (xhci_to_hcd(xhci)->self.bandwidth_isoc_reqs == 0) {
3373 if (xhci->quirks & XHCI_AMD_PLL_FIX)
3374 usb_amd_quirk_pll_disable();
3375 }
3376 xhci_to_hcd(xhci)->self.bandwidth_isoc_reqs++;
3377
3378 giveback_first_trb(xhci, slot_id, ep_index, urb->stream_id,
3379 start_cycle, start_trb);
3380 return 0;
3381cleanup:
3382 /* Clean up a partially enqueued isoc transfer. */
3383
3384 for (i--; i >= 0; i--)
3385 list_del_init(&urb_priv->td[i]->td_list);
3386
3387 /* Use the first TD as a temporary variable to turn the TDs we've queued
3388 * into No-ops with a software-owned cycle bit. That way the hardware
3389 * won't accidentally start executing bogus TDs when we partially
3390 * overwrite them. td->first_trb and td->start_seg are already set.
3391 */
3392 urb_priv->td[0]->last_trb = ep_ring->enqueue;
3393 /* Every TRB except the first & last will have its cycle bit flipped. */
3394 td_to_noop(xhci, ep_ring, urb_priv->td[0], true);
3395
3396 /* Reset the ring enqueue back to the first TRB and its cycle bit. */
3397 ep_ring->enqueue = urb_priv->td[0]->first_trb;
3398 ep_ring->enq_seg = urb_priv->td[0]->start_seg;
3399 ep_ring->cycle_state = start_cycle;
3400 usb_hcd_unlink_urb_from_ep(bus_to_hcd(urb->dev->bus), urb);
3401 return ret;
3402}
3403
3404/*
3405 * Check transfer ring to guarantee there is enough room for the urb.
3406 * Update ISO URB start_frame and interval.
3407 * Update interval as xhci_queue_intr_tx does. Just use xhci frame_index to
3408 * update the urb->start_frame by now.
3409 * Always assume URB_ISO_ASAP set, and NEVER use urb->start_frame as input.
3410 */
3411int xhci_queue_isoc_tx_prepare(struct xhci_hcd *xhci, gfp_t mem_flags,
3412 struct urb *urb, int slot_id, unsigned int ep_index)
3413{
3414 struct xhci_virt_device *xdev;
3415 struct xhci_ring *ep_ring;
3416 struct xhci_ep_ctx *ep_ctx;
3417 int start_frame;
3418 int xhci_interval;
3419 int ep_interval;
3420 int num_tds, num_trbs, i;
3421 int ret;
3422
3423 xdev = xhci->devs[slot_id];
3424 ep_ring = xdev->eps[ep_index].ring;
3425 ep_ctx = xhci_get_ep_ctx(xhci, xdev->out_ctx, ep_index);
3426
3427 num_trbs = 0;
3428 num_tds = urb->number_of_packets;
3429 for (i = 0; i < num_tds; i++)
3430 num_trbs += count_isoc_trbs_needed(xhci, urb, i);
3431
3432 /* Check the ring to guarantee there is enough room for the whole urb.
3433 * Do not insert any td of the urb to the ring if the check failed.
3434 */
3435 ret = prepare_ring(xhci, ep_ring, le32_to_cpu(ep_ctx->ep_info) & EP_STATE_MASK,
3436 num_trbs, mem_flags);
3437 if (ret)
3438 return ret;
3439
3440 start_frame = xhci_readl(xhci, &xhci->run_regs->microframe_index);
3441 start_frame &= 0x3fff;
3442
3443 urb->start_frame = start_frame;
3444 if (urb->dev->speed == USB_SPEED_LOW ||
3445 urb->dev->speed == USB_SPEED_FULL)
3446 urb->start_frame >>= 3;
3447
3448 xhci_interval = EP_INTERVAL_TO_UFRAMES(le32_to_cpu(ep_ctx->ep_info));
3449 ep_interval = urb->interval;
3450 /* Convert to microframes */
3451 if (urb->dev->speed == USB_SPEED_LOW ||
3452 urb->dev->speed == USB_SPEED_FULL)
3453 ep_interval *= 8;
3454 /* FIXME change this to a warning and a suggestion to use the new API
3455 * to set the polling interval (once the API is added).
3456 */
3457 if (xhci_interval != ep_interval) {
3458 if (printk_ratelimit())
3459 dev_dbg(&urb->dev->dev, "Driver uses different interval"
3460 " (%d microframe%s) than xHCI "
3461 "(%d microframe%s)\n",
3462 ep_interval,
3463 ep_interval == 1 ? "" : "s",
3464 xhci_interval,
3465 xhci_interval == 1 ? "" : "s");
3466 urb->interval = xhci_interval;
3467 /* Convert back to frames for LS/FS devices */
3468 if (urb->dev->speed == USB_SPEED_LOW ||
3469 urb->dev->speed == USB_SPEED_FULL)
3470 urb->interval /= 8;
3471 }
3472 return xhci_queue_isoc_tx(xhci, GFP_ATOMIC, urb, slot_id, ep_index);
3473}
3474
3475/**** Command Ring Operations ****/
3476
3477/* Generic function for queueing a command TRB on the command ring.
3478 * Check to make sure there's room on the command ring for one command TRB.
3479 * Also check that there's room reserved for commands that must not fail.
3480 * If this is a command that must not fail, meaning command_must_succeed = TRUE,
3481 * then only check for the number of reserved spots.
3482 * Don't decrement xhci->cmd_ring_reserved_trbs after we've queued the TRB
3483 * because the command event handler may want to resubmit a failed command.
3484 */
3485static int queue_command(struct xhci_hcd *xhci, u32 field1, u32 field2,
3486 u32 field3, u32 field4, bool command_must_succeed)
3487{
3488 int reserved_trbs = xhci->cmd_ring_reserved_trbs;
3489 int ret;
3490
3491 if (!command_must_succeed)
3492 reserved_trbs++;
3493
3494 ret = prepare_ring(xhci, xhci->cmd_ring, EP_STATE_RUNNING,
3495 reserved_trbs, GFP_ATOMIC);
3496 if (ret < 0) {
3497 xhci_err(xhci, "ERR: No room for command on command ring\n");
3498 if (command_must_succeed)
3499 xhci_err(xhci, "ERR: Reserved TRB counting for "
3500 "unfailable commands failed.\n");
3501 return ret;
3502 }
3503 queue_trb(xhci, xhci->cmd_ring, false, false, field1, field2, field3,
3504 field4 | xhci->cmd_ring->cycle_state);
3505 return 0;
3506}
3507
3508/* Queue a slot enable or disable request on the command ring */
3509int xhci_queue_slot_control(struct xhci_hcd *xhci, u32 trb_type, u32 slot_id)
3510{
3511 return queue_command(xhci, 0, 0, 0,
3512 TRB_TYPE(trb_type) | SLOT_ID_FOR_TRB(slot_id), false);
3513}
3514
3515/* Queue an address device command TRB */
3516int xhci_queue_address_device(struct xhci_hcd *xhci, dma_addr_t in_ctx_ptr,
3517 u32 slot_id)
3518{
3519 return queue_command(xhci, lower_32_bits(in_ctx_ptr),
3520 upper_32_bits(in_ctx_ptr), 0,
3521 TRB_TYPE(TRB_ADDR_DEV) | SLOT_ID_FOR_TRB(slot_id),
3522 false);
3523}
3524
3525int xhci_queue_vendor_command(struct xhci_hcd *xhci,
3526 u32 field1, u32 field2, u32 field3, u32 field4)
3527{
3528 return queue_command(xhci, field1, field2, field3, field4, false);
3529}
3530
3531/* Queue a reset device command TRB */
3532int xhci_queue_reset_device(struct xhci_hcd *xhci, u32 slot_id)
3533{
3534 return queue_command(xhci, 0, 0, 0,
3535 TRB_TYPE(TRB_RESET_DEV) | SLOT_ID_FOR_TRB(slot_id),
3536 false);
3537}
3538
3539/* Queue a configure endpoint command TRB */
3540int xhci_queue_configure_endpoint(struct xhci_hcd *xhci, dma_addr_t in_ctx_ptr,
3541 u32 slot_id, bool command_must_succeed)
3542{
3543 return queue_command(xhci, lower_32_bits(in_ctx_ptr),
3544 upper_32_bits(in_ctx_ptr), 0,
3545 TRB_TYPE(TRB_CONFIG_EP) | SLOT_ID_FOR_TRB(slot_id),
3546 command_must_succeed);
3547}
3548
3549/* Queue an evaluate context command TRB */
3550int xhci_queue_evaluate_context(struct xhci_hcd *xhci, dma_addr_t in_ctx_ptr,
3551 u32 slot_id)
3552{
3553 return queue_command(xhci, lower_32_bits(in_ctx_ptr),
3554 upper_32_bits(in_ctx_ptr), 0,
3555 TRB_TYPE(TRB_EVAL_CONTEXT) | SLOT_ID_FOR_TRB(slot_id),
3556 false);
3557}
3558
3559/*
3560 * Suspend is set to indicate "Stop Endpoint Command" is being issued to stop
3561 * activity on an endpoint that is about to be suspended.
3562 */
3563int xhci_queue_stop_endpoint(struct xhci_hcd *xhci, int slot_id,
3564 unsigned int ep_index, int suspend)
3565{
3566 u32 trb_slot_id = SLOT_ID_FOR_TRB(slot_id);
3567 u32 trb_ep_index = EP_ID_FOR_TRB(ep_index);
3568 u32 type = TRB_TYPE(TRB_STOP_RING);
3569 u32 trb_suspend = SUSPEND_PORT_FOR_TRB(suspend);
3570
3571 return queue_command(xhci, 0, 0, 0,
3572 trb_slot_id | trb_ep_index | type | trb_suspend, false);
3573}
3574
3575/* Set Transfer Ring Dequeue Pointer command.
3576 * This should not be used for endpoints that have streams enabled.
3577 */
3578static int queue_set_tr_deq(struct xhci_hcd *xhci, int slot_id,
3579 unsigned int ep_index, unsigned int stream_id,
3580 struct xhci_segment *deq_seg,
3581 union xhci_trb *deq_ptr, u32 cycle_state)
3582{
3583 dma_addr_t addr;
3584 u32 trb_slot_id = SLOT_ID_FOR_TRB(slot_id);
3585 u32 trb_ep_index = EP_ID_FOR_TRB(ep_index);
3586 u32 trb_stream_id = STREAM_ID_FOR_TRB(stream_id);
3587 u32 type = TRB_TYPE(TRB_SET_DEQ);
3588 struct xhci_virt_ep *ep;
3589
3590 addr = xhci_trb_virt_to_dma(deq_seg, deq_ptr);
3591 if (addr == 0) {
3592 xhci_warn(xhci, "WARN Cannot submit Set TR Deq Ptr\n");
3593 xhci_warn(xhci, "WARN deq seg = %p, deq pt = %p\n",
3594 deq_seg, deq_ptr);
3595 return 0;
3596 }
3597 ep = &xhci->devs[slot_id]->eps[ep_index];
3598 if ((ep->ep_state & SET_DEQ_PENDING)) {
3599 xhci_warn(xhci, "WARN Cannot submit Set TR Deq Ptr\n");
3600 xhci_warn(xhci, "A Set TR Deq Ptr command is pending.\n");
3601 return 0;
3602 }
3603 ep->queued_deq_seg = deq_seg;
3604 ep->queued_deq_ptr = deq_ptr;
3605 return queue_command(xhci, lower_32_bits(addr) | cycle_state,
3606 upper_32_bits(addr), trb_stream_id,
3607 trb_slot_id | trb_ep_index | type, false);
3608}
3609
3610int xhci_queue_reset_ep(struct xhci_hcd *xhci, int slot_id,
3611 unsigned int ep_index)
3612{
3613 u32 trb_slot_id = SLOT_ID_FOR_TRB(slot_id);
3614 u32 trb_ep_index = EP_ID_FOR_TRB(ep_index);
3615 u32 type = TRB_TYPE(TRB_RESET_EP);
3616
3617 return queue_command(xhci, 0, 0, 0, trb_slot_id | trb_ep_index | type,
3618 false);
3619}
1/*
2 * xHCI host controller driver
3 *
4 * Copyright (C) 2008 Intel Corp.
5 *
6 * Author: Sarah Sharp
7 * Some code borrowed from the Linux EHCI driver.
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License version 2 as
11 * published by the Free Software Foundation.
12 *
13 * This program is distributed in the hope that it will be useful, but
14 * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
15 * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
16 * for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software Foundation,
20 * Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
21 */
22
23/*
24 * Ring initialization rules:
25 * 1. Each segment is initialized to zero, except for link TRBs.
26 * 2. Ring cycle state = 0. This represents Producer Cycle State (PCS) or
27 * Consumer Cycle State (CCS), depending on ring function.
28 * 3. Enqueue pointer = dequeue pointer = address of first TRB in the segment.
29 *
30 * Ring behavior rules:
31 * 1. A ring is empty if enqueue == dequeue. This means there will always be at
32 * least one free TRB in the ring. This is useful if you want to turn that
33 * into a link TRB and expand the ring.
34 * 2. When incrementing an enqueue or dequeue pointer, if the next TRB is a
35 * link TRB, then load the pointer with the address in the link TRB. If the
36 * link TRB had its toggle bit set, you may need to update the ring cycle
37 * state (see cycle bit rules). You may have to do this multiple times
38 * until you reach a non-link TRB.
39 * 3. A ring is full if enqueue++ (for the definition of increment above)
40 * equals the dequeue pointer.
41 *
42 * Cycle bit rules:
43 * 1. When a consumer increments a dequeue pointer and encounters a toggle bit
44 * in a link TRB, it must toggle the ring cycle state.
45 * 2. When a producer increments an enqueue pointer and encounters a toggle bit
46 * in a link TRB, it must toggle the ring cycle state.
47 *
48 * Producer rules:
49 * 1. Check if ring is full before you enqueue.
50 * 2. Write the ring cycle state to the cycle bit in the TRB you're enqueuing.
51 * Update enqueue pointer between each write (which may update the ring
52 * cycle state).
53 * 3. Notify consumer. If SW is producer, it rings the doorbell for command
54 * and endpoint rings. If HC is the producer for the event ring,
55 * and it generates an interrupt according to interrupt modulation rules.
56 *
57 * Consumer rules:
58 * 1. Check if TRB belongs to you. If the cycle bit == your ring cycle state,
59 * the TRB is owned by the consumer.
60 * 2. Update dequeue pointer (which may update the ring cycle state) and
61 * continue processing TRBs until you reach a TRB which is not owned by you.
62 * 3. Notify the producer. SW is the consumer for the event ring, and it
63 * updates event ring dequeue pointer. HC is the consumer for the command and
64 * endpoint rings; it generates events on the event ring for these.
65 */
66
67#include <linux/scatterlist.h>
68#include <linux/slab.h>
69#include "xhci.h"
70#include "xhci-trace.h"
71#include "xhci-mtk.h"
72
73/*
74 * Returns zero if the TRB isn't in this segment, otherwise it returns the DMA
75 * address of the TRB.
76 */
77dma_addr_t xhci_trb_virt_to_dma(struct xhci_segment *seg,
78 union xhci_trb *trb)
79{
80 unsigned long segment_offset;
81
82 if (!seg || !trb || trb < seg->trbs)
83 return 0;
84 /* offset in TRBs */
85 segment_offset = trb - seg->trbs;
86 if (segment_offset >= TRBS_PER_SEGMENT)
87 return 0;
88 return seg->dma + (segment_offset * sizeof(*trb));
89}
90
91/* Does this link TRB point to the first segment in a ring,
92 * or was the previous TRB the last TRB on the last segment in the ERST?
93 */
94static bool last_trb_on_last_seg(struct xhci_hcd *xhci, struct xhci_ring *ring,
95 struct xhci_segment *seg, union xhci_trb *trb)
96{
97 if (ring == xhci->event_ring)
98 return (trb == &seg->trbs[TRBS_PER_SEGMENT]) &&
99 (seg->next == xhci->event_ring->first_seg);
100 else
101 return le32_to_cpu(trb->link.control) & LINK_TOGGLE;
102}
103
104/* Is this TRB a link TRB or was the last TRB the last TRB in this event ring
105 * segment? I.e. would the updated event TRB pointer step off the end of the
106 * event seg?
107 */
108static int last_trb(struct xhci_hcd *xhci, struct xhci_ring *ring,
109 struct xhci_segment *seg, union xhci_trb *trb)
110{
111 if (ring == xhci->event_ring)
112 return trb == &seg->trbs[TRBS_PER_SEGMENT];
113 else
114 return TRB_TYPE_LINK_LE32(trb->link.control);
115}
116
117static int enqueue_is_link_trb(struct xhci_ring *ring)
118{
119 struct xhci_link_trb *link = &ring->enqueue->link;
120 return TRB_TYPE_LINK_LE32(link->control);
121}
122
123/* Updates trb to point to the next TRB in the ring, and updates seg if the next
124 * TRB is in a new segment. This does not skip over link TRBs, and it does not
125 * effect the ring dequeue or enqueue pointers.
126 */
127static void next_trb(struct xhci_hcd *xhci,
128 struct xhci_ring *ring,
129 struct xhci_segment **seg,
130 union xhci_trb **trb)
131{
132 if (last_trb(xhci, ring, *seg, *trb)) {
133 *seg = (*seg)->next;
134 *trb = ((*seg)->trbs);
135 } else {
136 (*trb)++;
137 }
138}
139
140/*
141 * See Cycle bit rules. SW is the consumer for the event ring only.
142 * Don't make a ring full of link TRBs. That would be dumb and this would loop.
143 */
144static void inc_deq(struct xhci_hcd *xhci, struct xhci_ring *ring)
145{
146 ring->deq_updates++;
147
148 /*
149 * If this is not event ring, and the dequeue pointer
150 * is not on a link TRB, there is one more usable TRB
151 */
152 if (ring->type != TYPE_EVENT &&
153 !last_trb(xhci, ring, ring->deq_seg, ring->dequeue))
154 ring->num_trbs_free++;
155
156 do {
157 /*
158 * Update the dequeue pointer further if that was a link TRB or
159 * we're at the end of an event ring segment (which doesn't have
160 * link TRBS)
161 */
162 if (last_trb(xhci, ring, ring->deq_seg, ring->dequeue)) {
163 if (ring->type == TYPE_EVENT &&
164 last_trb_on_last_seg(xhci, ring,
165 ring->deq_seg, ring->dequeue)) {
166 ring->cycle_state ^= 1;
167 }
168 ring->deq_seg = ring->deq_seg->next;
169 ring->dequeue = ring->deq_seg->trbs;
170 } else {
171 ring->dequeue++;
172 }
173 } while (last_trb(xhci, ring, ring->deq_seg, ring->dequeue));
174}
175
176/*
177 * See Cycle bit rules. SW is the consumer for the event ring only.
178 * Don't make a ring full of link TRBs. That would be dumb and this would loop.
179 *
180 * If we've just enqueued a TRB that is in the middle of a TD (meaning the
181 * chain bit is set), then set the chain bit in all the following link TRBs.
182 * If we've enqueued the last TRB in a TD, make sure the following link TRBs
183 * have their chain bit cleared (so that each Link TRB is a separate TD).
184 *
185 * Section 6.4.4.1 of the 0.95 spec says link TRBs cannot have the chain bit
186 * set, but other sections talk about dealing with the chain bit set. This was
187 * fixed in the 0.96 specification errata, but we have to assume that all 0.95
188 * xHCI hardware can't handle the chain bit being cleared on a link TRB.
189 *
190 * @more_trbs_coming: Will you enqueue more TRBs before calling
191 * prepare_transfer()?
192 */
193static void inc_enq(struct xhci_hcd *xhci, struct xhci_ring *ring,
194 bool more_trbs_coming)
195{
196 u32 chain;
197 union xhci_trb *next;
198
199 chain = le32_to_cpu(ring->enqueue->generic.field[3]) & TRB_CHAIN;
200 /* If this is not event ring, there is one less usable TRB */
201 if (ring->type != TYPE_EVENT &&
202 !last_trb(xhci, ring, ring->enq_seg, ring->enqueue))
203 ring->num_trbs_free--;
204 next = ++(ring->enqueue);
205
206 ring->enq_updates++;
207 /* Update the dequeue pointer further if that was a link TRB or we're at
208 * the end of an event ring segment (which doesn't have link TRBS)
209 */
210 while (last_trb(xhci, ring, ring->enq_seg, next)) {
211 if (ring->type != TYPE_EVENT) {
212 /*
213 * If the caller doesn't plan on enqueueing more
214 * TDs before ringing the doorbell, then we
215 * don't want to give the link TRB to the
216 * hardware just yet. We'll give the link TRB
217 * back in prepare_ring() just before we enqueue
218 * the TD at the top of the ring.
219 */
220 if (!chain && !more_trbs_coming)
221 break;
222
223 /* If we're not dealing with 0.95 hardware or
224 * isoc rings on AMD 0.96 host,
225 * carry over the chain bit of the previous TRB
226 * (which may mean the chain bit is cleared).
227 */
228 if (!(ring->type == TYPE_ISOC &&
229 (xhci->quirks & XHCI_AMD_0x96_HOST))
230 && !xhci_link_trb_quirk(xhci)) {
231 next->link.control &=
232 cpu_to_le32(~TRB_CHAIN);
233 next->link.control |=
234 cpu_to_le32(chain);
235 }
236 /* Give this link TRB to the hardware */
237 wmb();
238 next->link.control ^= cpu_to_le32(TRB_CYCLE);
239
240 /* Toggle the cycle bit after the last ring segment. */
241 if (last_trb_on_last_seg(xhci, ring, ring->enq_seg, next)) {
242 ring->cycle_state ^= 1;
243 }
244 }
245 ring->enq_seg = ring->enq_seg->next;
246 ring->enqueue = ring->enq_seg->trbs;
247 next = ring->enqueue;
248 }
249}
250
251/*
252 * Check to see if there's room to enqueue num_trbs on the ring and make sure
253 * enqueue pointer will not advance into dequeue segment. See rules above.
254 */
255static inline int room_on_ring(struct xhci_hcd *xhci, struct xhci_ring *ring,
256 unsigned int num_trbs)
257{
258 int num_trbs_in_deq_seg;
259
260 if (ring->num_trbs_free < num_trbs)
261 return 0;
262
263 if (ring->type != TYPE_COMMAND && ring->type != TYPE_EVENT) {
264 num_trbs_in_deq_seg = ring->dequeue - ring->deq_seg->trbs;
265 if (ring->num_trbs_free < num_trbs + num_trbs_in_deq_seg)
266 return 0;
267 }
268
269 return 1;
270}
271
272/* Ring the host controller doorbell after placing a command on the ring */
273void xhci_ring_cmd_db(struct xhci_hcd *xhci)
274{
275 if (!(xhci->cmd_ring_state & CMD_RING_STATE_RUNNING))
276 return;
277
278 xhci_dbg(xhci, "// Ding dong!\n");
279 writel(DB_VALUE_HOST, &xhci->dba->doorbell[0]);
280 /* Flush PCI posted writes */
281 readl(&xhci->dba->doorbell[0]);
282}
283
284static int xhci_abort_cmd_ring(struct xhci_hcd *xhci)
285{
286 u64 temp_64;
287 int ret;
288
289 xhci_dbg(xhci, "Abort command ring\n");
290
291 temp_64 = xhci_read_64(xhci, &xhci->op_regs->cmd_ring);
292 xhci->cmd_ring_state = CMD_RING_STATE_ABORTED;
293 xhci_write_64(xhci, temp_64 | CMD_RING_ABORT,
294 &xhci->op_regs->cmd_ring);
295
296 /* Section 4.6.1.2 of xHCI 1.0 spec says software should
297 * time the completion od all xHCI commands, including
298 * the Command Abort operation. If software doesn't see
299 * CRR negated in a timely manner (e.g. longer than 5
300 * seconds), then it should assume that the there are
301 * larger problems with the xHC and assert HCRST.
302 */
303 ret = xhci_handshake(&xhci->op_regs->cmd_ring,
304 CMD_RING_RUNNING, 0, 5 * 1000 * 1000);
305 if (ret < 0) {
306 /* we are about to kill xhci, give it one more chance */
307 xhci_write_64(xhci, temp_64 | CMD_RING_ABORT,
308 &xhci->op_regs->cmd_ring);
309 udelay(1000);
310 ret = xhci_handshake(&xhci->op_regs->cmd_ring,
311 CMD_RING_RUNNING, 0, 3 * 1000 * 1000);
312 if (ret == 0)
313 return 0;
314
315 xhci_err(xhci, "Stopped the command ring failed, "
316 "maybe the host is dead\n");
317 xhci->xhc_state |= XHCI_STATE_DYING;
318 xhci_quiesce(xhci);
319 xhci_halt(xhci);
320 return -ESHUTDOWN;
321 }
322
323 return 0;
324}
325
326void xhci_ring_ep_doorbell(struct xhci_hcd *xhci,
327 unsigned int slot_id,
328 unsigned int ep_index,
329 unsigned int stream_id)
330{
331 __le32 __iomem *db_addr = &xhci->dba->doorbell[slot_id];
332 struct xhci_virt_ep *ep = &xhci->devs[slot_id]->eps[ep_index];
333 unsigned int ep_state = ep->ep_state;
334
335 /* Don't ring the doorbell for this endpoint if there are pending
336 * cancellations because we don't want to interrupt processing.
337 * We don't want to restart any stream rings if there's a set dequeue
338 * pointer command pending because the device can choose to start any
339 * stream once the endpoint is on the HW schedule.
340 */
341 if ((ep_state & EP_HALT_PENDING) || (ep_state & SET_DEQ_PENDING) ||
342 (ep_state & EP_HALTED))
343 return;
344 writel(DB_VALUE(ep_index, stream_id), db_addr);
345 /* The CPU has better things to do at this point than wait for a
346 * write-posting flush. It'll get there soon enough.
347 */
348}
349
350/* Ring the doorbell for any rings with pending URBs */
351static void ring_doorbell_for_active_rings(struct xhci_hcd *xhci,
352 unsigned int slot_id,
353 unsigned int ep_index)
354{
355 unsigned int stream_id;
356 struct xhci_virt_ep *ep;
357
358 ep = &xhci->devs[slot_id]->eps[ep_index];
359
360 /* A ring has pending URBs if its TD list is not empty */
361 if (!(ep->ep_state & EP_HAS_STREAMS)) {
362 if (ep->ring && !(list_empty(&ep->ring->td_list)))
363 xhci_ring_ep_doorbell(xhci, slot_id, ep_index, 0);
364 return;
365 }
366
367 for (stream_id = 1; stream_id < ep->stream_info->num_streams;
368 stream_id++) {
369 struct xhci_stream_info *stream_info = ep->stream_info;
370 if (!list_empty(&stream_info->stream_rings[stream_id]->td_list))
371 xhci_ring_ep_doorbell(xhci, slot_id, ep_index,
372 stream_id);
373 }
374}
375
376static struct xhci_ring *xhci_triad_to_transfer_ring(struct xhci_hcd *xhci,
377 unsigned int slot_id, unsigned int ep_index,
378 unsigned int stream_id)
379{
380 struct xhci_virt_ep *ep;
381
382 ep = &xhci->devs[slot_id]->eps[ep_index];
383 /* Common case: no streams */
384 if (!(ep->ep_state & EP_HAS_STREAMS))
385 return ep->ring;
386
387 if (stream_id == 0) {
388 xhci_warn(xhci,
389 "WARN: Slot ID %u, ep index %u has streams, "
390 "but URB has no stream ID.\n",
391 slot_id, ep_index);
392 return NULL;
393 }
394
395 if (stream_id < ep->stream_info->num_streams)
396 return ep->stream_info->stream_rings[stream_id];
397
398 xhci_warn(xhci,
399 "WARN: Slot ID %u, ep index %u has "
400 "stream IDs 1 to %u allocated, "
401 "but stream ID %u is requested.\n",
402 slot_id, ep_index,
403 ep->stream_info->num_streams - 1,
404 stream_id);
405 return NULL;
406}
407
408/* Get the right ring for the given URB.
409 * If the endpoint supports streams, boundary check the URB's stream ID.
410 * If the endpoint doesn't support streams, return the singular endpoint ring.
411 */
412static struct xhci_ring *xhci_urb_to_transfer_ring(struct xhci_hcd *xhci,
413 struct urb *urb)
414{
415 return xhci_triad_to_transfer_ring(xhci, urb->dev->slot_id,
416 xhci_get_endpoint_index(&urb->ep->desc), urb->stream_id);
417}
418
419/*
420 * Move the xHC's endpoint ring dequeue pointer past cur_td.
421 * Record the new state of the xHC's endpoint ring dequeue segment,
422 * dequeue pointer, and new consumer cycle state in state.
423 * Update our internal representation of the ring's dequeue pointer.
424 *
425 * We do this in three jumps:
426 * - First we update our new ring state to be the same as when the xHC stopped.
427 * - Then we traverse the ring to find the segment that contains
428 * the last TRB in the TD. We toggle the xHC's new cycle state when we pass
429 * any link TRBs with the toggle cycle bit set.
430 * - Finally we move the dequeue state one TRB further, toggling the cycle bit
431 * if we've moved it past a link TRB with the toggle cycle bit set.
432 *
433 * Some of the uses of xhci_generic_trb are grotty, but if they're done
434 * with correct __le32 accesses they should work fine. Only users of this are
435 * in here.
436 */
437void xhci_find_new_dequeue_state(struct xhci_hcd *xhci,
438 unsigned int slot_id, unsigned int ep_index,
439 unsigned int stream_id, struct xhci_td *cur_td,
440 struct xhci_dequeue_state *state)
441{
442 struct xhci_virt_device *dev = xhci->devs[slot_id];
443 struct xhci_virt_ep *ep = &dev->eps[ep_index];
444 struct xhci_ring *ep_ring;
445 struct xhci_segment *new_seg;
446 union xhci_trb *new_deq;
447 dma_addr_t addr;
448 u64 hw_dequeue;
449 bool cycle_found = false;
450 bool td_last_trb_found = false;
451
452 ep_ring = xhci_triad_to_transfer_ring(xhci, slot_id,
453 ep_index, stream_id);
454 if (!ep_ring) {
455 xhci_warn(xhci, "WARN can't find new dequeue state "
456 "for invalid stream ID %u.\n",
457 stream_id);
458 return;
459 }
460
461 /* Dig out the cycle state saved by the xHC during the stop ep cmd */
462 xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb,
463 "Finding endpoint context");
464 /* 4.6.9 the css flag is written to the stream context for streams */
465 if (ep->ep_state & EP_HAS_STREAMS) {
466 struct xhci_stream_ctx *ctx =
467 &ep->stream_info->stream_ctx_array[stream_id];
468 hw_dequeue = le64_to_cpu(ctx->stream_ring);
469 } else {
470 struct xhci_ep_ctx *ep_ctx
471 = xhci_get_ep_ctx(xhci, dev->out_ctx, ep_index);
472 hw_dequeue = le64_to_cpu(ep_ctx->deq);
473 }
474
475 new_seg = ep_ring->deq_seg;
476 new_deq = ep_ring->dequeue;
477 state->new_cycle_state = hw_dequeue & 0x1;
478
479 /*
480 * We want to find the pointer, segment and cycle state of the new trb
481 * (the one after current TD's last_trb). We know the cycle state at
482 * hw_dequeue, so walk the ring until both hw_dequeue and last_trb are
483 * found.
484 */
485 do {
486 if (!cycle_found && xhci_trb_virt_to_dma(new_seg, new_deq)
487 == (dma_addr_t)(hw_dequeue & ~0xf)) {
488 cycle_found = true;
489 if (td_last_trb_found)
490 break;
491 }
492 if (new_deq == cur_td->last_trb)
493 td_last_trb_found = true;
494
495 if (cycle_found &&
496 TRB_TYPE_LINK_LE32(new_deq->generic.field[3]) &&
497 new_deq->generic.field[3] & cpu_to_le32(LINK_TOGGLE))
498 state->new_cycle_state ^= 0x1;
499
500 next_trb(xhci, ep_ring, &new_seg, &new_deq);
501
502 /* Search wrapped around, bail out */
503 if (new_deq == ep->ring->dequeue) {
504 xhci_err(xhci, "Error: Failed finding new dequeue state\n");
505 state->new_deq_seg = NULL;
506 state->new_deq_ptr = NULL;
507 return;
508 }
509
510 } while (!cycle_found || !td_last_trb_found);
511
512 state->new_deq_seg = new_seg;
513 state->new_deq_ptr = new_deq;
514
515 /* Don't update the ring cycle state for the producer (us). */
516 xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb,
517 "Cycle state = 0x%x", state->new_cycle_state);
518
519 xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb,
520 "New dequeue segment = %p (virtual)",
521 state->new_deq_seg);
522 addr = xhci_trb_virt_to_dma(state->new_deq_seg, state->new_deq_ptr);
523 xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb,
524 "New dequeue pointer = 0x%llx (DMA)",
525 (unsigned long long) addr);
526}
527
528/* flip_cycle means flip the cycle bit of all but the first and last TRB.
529 * (The last TRB actually points to the ring enqueue pointer, which is not part
530 * of this TD.) This is used to remove partially enqueued isoc TDs from a ring.
531 */
532static void td_to_noop(struct xhci_hcd *xhci, struct xhci_ring *ep_ring,
533 struct xhci_td *cur_td, bool flip_cycle)
534{
535 struct xhci_segment *cur_seg;
536 union xhci_trb *cur_trb;
537
538 for (cur_seg = cur_td->start_seg, cur_trb = cur_td->first_trb;
539 true;
540 next_trb(xhci, ep_ring, &cur_seg, &cur_trb)) {
541 if (TRB_TYPE_LINK_LE32(cur_trb->generic.field[3])) {
542 /* Unchain any chained Link TRBs, but
543 * leave the pointers intact.
544 */
545 cur_trb->generic.field[3] &= cpu_to_le32(~TRB_CHAIN);
546 /* Flip the cycle bit (link TRBs can't be the first
547 * or last TRB).
548 */
549 if (flip_cycle)
550 cur_trb->generic.field[3] ^=
551 cpu_to_le32(TRB_CYCLE);
552 xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb,
553 "Cancel (unchain) link TRB");
554 xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb,
555 "Address = %p (0x%llx dma); "
556 "in seg %p (0x%llx dma)",
557 cur_trb,
558 (unsigned long long)xhci_trb_virt_to_dma(cur_seg, cur_trb),
559 cur_seg,
560 (unsigned long long)cur_seg->dma);
561 } else {
562 cur_trb->generic.field[0] = 0;
563 cur_trb->generic.field[1] = 0;
564 cur_trb->generic.field[2] = 0;
565 /* Preserve only the cycle bit of this TRB */
566 cur_trb->generic.field[3] &= cpu_to_le32(TRB_CYCLE);
567 /* Flip the cycle bit except on the first or last TRB */
568 if (flip_cycle && cur_trb != cur_td->first_trb &&
569 cur_trb != cur_td->last_trb)
570 cur_trb->generic.field[3] ^=
571 cpu_to_le32(TRB_CYCLE);
572 cur_trb->generic.field[3] |= cpu_to_le32(
573 TRB_TYPE(TRB_TR_NOOP));
574 xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb,
575 "TRB to noop at offset 0x%llx",
576 (unsigned long long)
577 xhci_trb_virt_to_dma(cur_seg, cur_trb));
578 }
579 if (cur_trb == cur_td->last_trb)
580 break;
581 }
582}
583
584static void xhci_stop_watchdog_timer_in_irq(struct xhci_hcd *xhci,
585 struct xhci_virt_ep *ep)
586{
587 ep->ep_state &= ~EP_HALT_PENDING;
588 /* Can't del_timer_sync in interrupt, so we attempt to cancel. If the
589 * timer is running on another CPU, we don't decrement stop_cmds_pending
590 * (since we didn't successfully stop the watchdog timer).
591 */
592 if (del_timer(&ep->stop_cmd_timer))
593 ep->stop_cmds_pending--;
594}
595
596/* Must be called with xhci->lock held in interrupt context */
597static void xhci_giveback_urb_in_irq(struct xhci_hcd *xhci,
598 struct xhci_td *cur_td, int status)
599{
600 struct usb_hcd *hcd;
601 struct urb *urb;
602 struct urb_priv *urb_priv;
603
604 urb = cur_td->urb;
605 urb_priv = urb->hcpriv;
606 urb_priv->td_cnt++;
607 hcd = bus_to_hcd(urb->dev->bus);
608
609 /* Only giveback urb when this is the last td in urb */
610 if (urb_priv->td_cnt == urb_priv->length) {
611 if (usb_pipetype(urb->pipe) == PIPE_ISOCHRONOUS) {
612 xhci_to_hcd(xhci)->self.bandwidth_isoc_reqs--;
613 if (xhci_to_hcd(xhci)->self.bandwidth_isoc_reqs == 0) {
614 if (xhci->quirks & XHCI_AMD_PLL_FIX)
615 usb_amd_quirk_pll_enable();
616 }
617 }
618 usb_hcd_unlink_urb_from_ep(hcd, urb);
619
620 spin_unlock(&xhci->lock);
621 usb_hcd_giveback_urb(hcd, urb, status);
622 xhci_urb_free_priv(urb_priv);
623 spin_lock(&xhci->lock);
624 }
625}
626
627/*
628 * When we get a command completion for a Stop Endpoint Command, we need to
629 * unlink any cancelled TDs from the ring. There are two ways to do that:
630 *
631 * 1. If the HW was in the middle of processing the TD that needs to be
632 * cancelled, then we must move the ring's dequeue pointer past the last TRB
633 * in the TD with a Set Dequeue Pointer Command.
634 * 2. Otherwise, we turn all the TRBs in the TD into No-op TRBs (with the chain
635 * bit cleared) so that the HW will skip over them.
636 */
637static void xhci_handle_cmd_stop_ep(struct xhci_hcd *xhci, int slot_id,
638 union xhci_trb *trb, struct xhci_event_cmd *event)
639{
640 unsigned int ep_index;
641 struct xhci_ring *ep_ring;
642 struct xhci_virt_ep *ep;
643 struct list_head *entry;
644 struct xhci_td *cur_td = NULL;
645 struct xhci_td *last_unlinked_td;
646
647 struct xhci_dequeue_state deq_state;
648
649 if (unlikely(TRB_TO_SUSPEND_PORT(le32_to_cpu(trb->generic.field[3])))) {
650 if (!xhci->devs[slot_id])
651 xhci_warn(xhci, "Stop endpoint command "
652 "completion for disabled slot %u\n",
653 slot_id);
654 return;
655 }
656
657 memset(&deq_state, 0, sizeof(deq_state));
658 ep_index = TRB_TO_EP_INDEX(le32_to_cpu(trb->generic.field[3]));
659 ep = &xhci->devs[slot_id]->eps[ep_index];
660
661 if (list_empty(&ep->cancelled_td_list)) {
662 xhci_stop_watchdog_timer_in_irq(xhci, ep);
663 ep->stopped_td = NULL;
664 ring_doorbell_for_active_rings(xhci, slot_id, ep_index);
665 return;
666 }
667
668 /* Fix up the ep ring first, so HW stops executing cancelled TDs.
669 * We have the xHCI lock, so nothing can modify this list until we drop
670 * it. We're also in the event handler, so we can't get re-interrupted
671 * if another Stop Endpoint command completes
672 */
673 list_for_each(entry, &ep->cancelled_td_list) {
674 cur_td = list_entry(entry, struct xhci_td, cancelled_td_list);
675 xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb,
676 "Removing canceled TD starting at 0x%llx (dma).",
677 (unsigned long long)xhci_trb_virt_to_dma(
678 cur_td->start_seg, cur_td->first_trb));
679 ep_ring = xhci_urb_to_transfer_ring(xhci, cur_td->urb);
680 if (!ep_ring) {
681 /* This shouldn't happen unless a driver is mucking
682 * with the stream ID after submission. This will
683 * leave the TD on the hardware ring, and the hardware
684 * will try to execute it, and may access a buffer
685 * that has already been freed. In the best case, the
686 * hardware will execute it, and the event handler will
687 * ignore the completion event for that TD, since it was
688 * removed from the td_list for that endpoint. In
689 * short, don't muck with the stream ID after
690 * submission.
691 */
692 xhci_warn(xhci, "WARN Cancelled URB %p "
693 "has invalid stream ID %u.\n",
694 cur_td->urb,
695 cur_td->urb->stream_id);
696 goto remove_finished_td;
697 }
698 /*
699 * If we stopped on the TD we need to cancel, then we have to
700 * move the xHC endpoint ring dequeue pointer past this TD.
701 */
702 if (cur_td == ep->stopped_td)
703 xhci_find_new_dequeue_state(xhci, slot_id, ep_index,
704 cur_td->urb->stream_id,
705 cur_td, &deq_state);
706 else
707 td_to_noop(xhci, ep_ring, cur_td, false);
708remove_finished_td:
709 /*
710 * The event handler won't see a completion for this TD anymore,
711 * so remove it from the endpoint ring's TD list. Keep it in
712 * the cancelled TD list for URB completion later.
713 */
714 list_del_init(&cur_td->td_list);
715 }
716 last_unlinked_td = cur_td;
717 xhci_stop_watchdog_timer_in_irq(xhci, ep);
718
719 /* If necessary, queue a Set Transfer Ring Dequeue Pointer command */
720 if (deq_state.new_deq_ptr && deq_state.new_deq_seg) {
721 xhci_queue_new_dequeue_state(xhci, slot_id, ep_index,
722 ep->stopped_td->urb->stream_id, &deq_state);
723 xhci_ring_cmd_db(xhci);
724 } else {
725 /* Otherwise ring the doorbell(s) to restart queued transfers */
726 ring_doorbell_for_active_rings(xhci, slot_id, ep_index);
727 }
728
729 ep->stopped_td = NULL;
730
731 /*
732 * Drop the lock and complete the URBs in the cancelled TD list.
733 * New TDs to be cancelled might be added to the end of the list before
734 * we can complete all the URBs for the TDs we already unlinked.
735 * So stop when we've completed the URB for the last TD we unlinked.
736 */
737 do {
738 cur_td = list_entry(ep->cancelled_td_list.next,
739 struct xhci_td, cancelled_td_list);
740 list_del_init(&cur_td->cancelled_td_list);
741
742 /* Clean up the cancelled URB */
743 /* Doesn't matter what we pass for status, since the core will
744 * just overwrite it (because the URB has been unlinked).
745 */
746 xhci_giveback_urb_in_irq(xhci, cur_td, 0);
747
748 /* Stop processing the cancelled list if the watchdog timer is
749 * running.
750 */
751 if (xhci->xhc_state & XHCI_STATE_DYING)
752 return;
753 } while (cur_td != last_unlinked_td);
754
755 /* Return to the event handler with xhci->lock re-acquired */
756}
757
758static void xhci_kill_ring_urbs(struct xhci_hcd *xhci, struct xhci_ring *ring)
759{
760 struct xhci_td *cur_td;
761
762 while (!list_empty(&ring->td_list)) {
763 cur_td = list_first_entry(&ring->td_list,
764 struct xhci_td, td_list);
765 list_del_init(&cur_td->td_list);
766 if (!list_empty(&cur_td->cancelled_td_list))
767 list_del_init(&cur_td->cancelled_td_list);
768 xhci_giveback_urb_in_irq(xhci, cur_td, -ESHUTDOWN);
769 }
770}
771
772static void xhci_kill_endpoint_urbs(struct xhci_hcd *xhci,
773 int slot_id, int ep_index)
774{
775 struct xhci_td *cur_td;
776 struct xhci_virt_ep *ep;
777 struct xhci_ring *ring;
778
779 ep = &xhci->devs[slot_id]->eps[ep_index];
780 if ((ep->ep_state & EP_HAS_STREAMS) ||
781 (ep->ep_state & EP_GETTING_NO_STREAMS)) {
782 int stream_id;
783
784 for (stream_id = 0; stream_id < ep->stream_info->num_streams;
785 stream_id++) {
786 xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb,
787 "Killing URBs for slot ID %u, ep index %u, stream %u",
788 slot_id, ep_index, stream_id + 1);
789 xhci_kill_ring_urbs(xhci,
790 ep->stream_info->stream_rings[stream_id]);
791 }
792 } else {
793 ring = ep->ring;
794 if (!ring)
795 return;
796 xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb,
797 "Killing URBs for slot ID %u, ep index %u",
798 slot_id, ep_index);
799 xhci_kill_ring_urbs(xhci, ring);
800 }
801 while (!list_empty(&ep->cancelled_td_list)) {
802 cur_td = list_first_entry(&ep->cancelled_td_list,
803 struct xhci_td, cancelled_td_list);
804 list_del_init(&cur_td->cancelled_td_list);
805 xhci_giveback_urb_in_irq(xhci, cur_td, -ESHUTDOWN);
806 }
807}
808
809/* Watchdog timer function for when a stop endpoint command fails to complete.
810 * In this case, we assume the host controller is broken or dying or dead. The
811 * host may still be completing some other events, so we have to be careful to
812 * let the event ring handler and the URB dequeueing/enqueueing functions know
813 * through xhci->state.
814 *
815 * The timer may also fire if the host takes a very long time to respond to the
816 * command, and the stop endpoint command completion handler cannot delete the
817 * timer before the timer function is called. Another endpoint cancellation may
818 * sneak in before the timer function can grab the lock, and that may queue
819 * another stop endpoint command and add the timer back. So we cannot use a
820 * simple flag to say whether there is a pending stop endpoint command for a
821 * particular endpoint.
822 *
823 * Instead we use a combination of that flag and a counter for the number of
824 * pending stop endpoint commands. If the timer is the tail end of the last
825 * stop endpoint command, and the endpoint's command is still pending, we assume
826 * the host is dying.
827 */
828void xhci_stop_endpoint_command_watchdog(unsigned long arg)
829{
830 struct xhci_hcd *xhci;
831 struct xhci_virt_ep *ep;
832 int ret, i, j;
833 unsigned long flags;
834
835 ep = (struct xhci_virt_ep *) arg;
836 xhci = ep->xhci;
837
838 spin_lock_irqsave(&xhci->lock, flags);
839
840 ep->stop_cmds_pending--;
841 if (xhci->xhc_state & XHCI_STATE_DYING) {
842 xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb,
843 "Stop EP timer ran, but another timer marked "
844 "xHCI as DYING, exiting.");
845 spin_unlock_irqrestore(&xhci->lock, flags);
846 return;
847 }
848 if (!(ep->stop_cmds_pending == 0 && (ep->ep_state & EP_HALT_PENDING))) {
849 xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb,
850 "Stop EP timer ran, but no command pending, "
851 "exiting.");
852 spin_unlock_irqrestore(&xhci->lock, flags);
853 return;
854 }
855
856 xhci_warn(xhci, "xHCI host not responding to stop endpoint command.\n");
857 xhci_warn(xhci, "Assuming host is dying, halting host.\n");
858 /* Oops, HC is dead or dying or at least not responding to the stop
859 * endpoint command.
860 */
861 xhci->xhc_state |= XHCI_STATE_DYING;
862 /* Disable interrupts from the host controller and start halting it */
863 xhci_quiesce(xhci);
864 spin_unlock_irqrestore(&xhci->lock, flags);
865
866 ret = xhci_halt(xhci);
867
868 spin_lock_irqsave(&xhci->lock, flags);
869 if (ret < 0) {
870 /* This is bad; the host is not responding to commands and it's
871 * not allowing itself to be halted. At least interrupts are
872 * disabled. If we call usb_hc_died(), it will attempt to
873 * disconnect all device drivers under this host. Those
874 * disconnect() methods will wait for all URBs to be unlinked,
875 * so we must complete them.
876 */
877 xhci_warn(xhci, "Non-responsive xHCI host is not halting.\n");
878 xhci_warn(xhci, "Completing active URBs anyway.\n");
879 /* We could turn all TDs on the rings to no-ops. This won't
880 * help if the host has cached part of the ring, and is slow if
881 * we want to preserve the cycle bit. Skip it and hope the host
882 * doesn't touch the memory.
883 */
884 }
885 for (i = 0; i < MAX_HC_SLOTS; i++) {
886 if (!xhci->devs[i])
887 continue;
888 for (j = 0; j < 31; j++)
889 xhci_kill_endpoint_urbs(xhci, i, j);
890 }
891 spin_unlock_irqrestore(&xhci->lock, flags);
892 xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb,
893 "Calling usb_hc_died()");
894 usb_hc_died(xhci_to_hcd(xhci)->primary_hcd);
895 xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb,
896 "xHCI host controller is dead.");
897}
898
899
900static void update_ring_for_set_deq_completion(struct xhci_hcd *xhci,
901 struct xhci_virt_device *dev,
902 struct xhci_ring *ep_ring,
903 unsigned int ep_index)
904{
905 union xhci_trb *dequeue_temp;
906 int num_trbs_free_temp;
907 bool revert = false;
908
909 num_trbs_free_temp = ep_ring->num_trbs_free;
910 dequeue_temp = ep_ring->dequeue;
911
912 /* If we get two back-to-back stalls, and the first stalled transfer
913 * ends just before a link TRB, the dequeue pointer will be left on
914 * the link TRB by the code in the while loop. So we have to update
915 * the dequeue pointer one segment further, or we'll jump off
916 * the segment into la-la-land.
917 */
918 if (last_trb(xhci, ep_ring, ep_ring->deq_seg, ep_ring->dequeue)) {
919 ep_ring->deq_seg = ep_ring->deq_seg->next;
920 ep_ring->dequeue = ep_ring->deq_seg->trbs;
921 }
922
923 while (ep_ring->dequeue != dev->eps[ep_index].queued_deq_ptr) {
924 /* We have more usable TRBs */
925 ep_ring->num_trbs_free++;
926 ep_ring->dequeue++;
927 if (last_trb(xhci, ep_ring, ep_ring->deq_seg,
928 ep_ring->dequeue)) {
929 if (ep_ring->dequeue ==
930 dev->eps[ep_index].queued_deq_ptr)
931 break;
932 ep_ring->deq_seg = ep_ring->deq_seg->next;
933 ep_ring->dequeue = ep_ring->deq_seg->trbs;
934 }
935 if (ep_ring->dequeue == dequeue_temp) {
936 revert = true;
937 break;
938 }
939 }
940
941 if (revert) {
942 xhci_dbg(xhci, "Unable to find new dequeue pointer\n");
943 ep_ring->num_trbs_free = num_trbs_free_temp;
944 }
945}
946
947/*
948 * When we get a completion for a Set Transfer Ring Dequeue Pointer command,
949 * we need to clear the set deq pending flag in the endpoint ring state, so that
950 * the TD queueing code can ring the doorbell again. We also need to ring the
951 * endpoint doorbell to restart the ring, but only if there aren't more
952 * cancellations pending.
953 */
954static void xhci_handle_cmd_set_deq(struct xhci_hcd *xhci, int slot_id,
955 union xhci_trb *trb, u32 cmd_comp_code)
956{
957 unsigned int ep_index;
958 unsigned int stream_id;
959 struct xhci_ring *ep_ring;
960 struct xhci_virt_device *dev;
961 struct xhci_virt_ep *ep;
962 struct xhci_ep_ctx *ep_ctx;
963 struct xhci_slot_ctx *slot_ctx;
964
965 ep_index = TRB_TO_EP_INDEX(le32_to_cpu(trb->generic.field[3]));
966 stream_id = TRB_TO_STREAM_ID(le32_to_cpu(trb->generic.field[2]));
967 dev = xhci->devs[slot_id];
968 ep = &dev->eps[ep_index];
969
970 ep_ring = xhci_stream_id_to_ring(dev, ep_index, stream_id);
971 if (!ep_ring) {
972 xhci_warn(xhci, "WARN Set TR deq ptr command for freed stream ID %u\n",
973 stream_id);
974 /* XXX: Harmless??? */
975 goto cleanup;
976 }
977
978 ep_ctx = xhci_get_ep_ctx(xhci, dev->out_ctx, ep_index);
979 slot_ctx = xhci_get_slot_ctx(xhci, dev->out_ctx);
980
981 if (cmd_comp_code != COMP_SUCCESS) {
982 unsigned int ep_state;
983 unsigned int slot_state;
984
985 switch (cmd_comp_code) {
986 case COMP_TRB_ERR:
987 xhci_warn(xhci, "WARN Set TR Deq Ptr cmd invalid because of stream ID configuration\n");
988 break;
989 case COMP_CTX_STATE:
990 xhci_warn(xhci, "WARN Set TR Deq Ptr cmd failed due to incorrect slot or ep state.\n");
991 ep_state = le32_to_cpu(ep_ctx->ep_info);
992 ep_state &= EP_STATE_MASK;
993 slot_state = le32_to_cpu(slot_ctx->dev_state);
994 slot_state = GET_SLOT_STATE(slot_state);
995 xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb,
996 "Slot state = %u, EP state = %u",
997 slot_state, ep_state);
998 break;
999 case COMP_EBADSLT:
1000 xhci_warn(xhci, "WARN Set TR Deq Ptr cmd failed because slot %u was not enabled.\n",
1001 slot_id);
1002 break;
1003 default:
1004 xhci_warn(xhci, "WARN Set TR Deq Ptr cmd with unknown completion code of %u.\n",
1005 cmd_comp_code);
1006 break;
1007 }
1008 /* OK what do we do now? The endpoint state is hosed, and we
1009 * should never get to this point if the synchronization between
1010 * queueing, and endpoint state are correct. This might happen
1011 * if the device gets disconnected after we've finished
1012 * cancelling URBs, which might not be an error...
1013 */
1014 } else {
1015 u64 deq;
1016 /* 4.6.10 deq ptr is written to the stream ctx for streams */
1017 if (ep->ep_state & EP_HAS_STREAMS) {
1018 struct xhci_stream_ctx *ctx =
1019 &ep->stream_info->stream_ctx_array[stream_id];
1020 deq = le64_to_cpu(ctx->stream_ring) & SCTX_DEQ_MASK;
1021 } else {
1022 deq = le64_to_cpu(ep_ctx->deq) & ~EP_CTX_CYCLE_MASK;
1023 }
1024 xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb,
1025 "Successful Set TR Deq Ptr cmd, deq = @%08llx", deq);
1026 if (xhci_trb_virt_to_dma(ep->queued_deq_seg,
1027 ep->queued_deq_ptr) == deq) {
1028 /* Update the ring's dequeue segment and dequeue pointer
1029 * to reflect the new position.
1030 */
1031 update_ring_for_set_deq_completion(xhci, dev,
1032 ep_ring, ep_index);
1033 } else {
1034 xhci_warn(xhci, "Mismatch between completed Set TR Deq Ptr command & xHCI internal state.\n");
1035 xhci_warn(xhci, "ep deq seg = %p, deq ptr = %p\n",
1036 ep->queued_deq_seg, ep->queued_deq_ptr);
1037 }
1038 }
1039
1040cleanup:
1041 dev->eps[ep_index].ep_state &= ~SET_DEQ_PENDING;
1042 dev->eps[ep_index].queued_deq_seg = NULL;
1043 dev->eps[ep_index].queued_deq_ptr = NULL;
1044 /* Restart any rings with pending URBs */
1045 ring_doorbell_for_active_rings(xhci, slot_id, ep_index);
1046}
1047
1048static void xhci_handle_cmd_reset_ep(struct xhci_hcd *xhci, int slot_id,
1049 union xhci_trb *trb, u32 cmd_comp_code)
1050{
1051 unsigned int ep_index;
1052
1053 ep_index = TRB_TO_EP_INDEX(le32_to_cpu(trb->generic.field[3]));
1054 /* This command will only fail if the endpoint wasn't halted,
1055 * but we don't care.
1056 */
1057 xhci_dbg_trace(xhci, trace_xhci_dbg_reset_ep,
1058 "Ignoring reset ep completion code of %u", cmd_comp_code);
1059
1060 /* HW with the reset endpoint quirk needs to have a configure endpoint
1061 * command complete before the endpoint can be used. Queue that here
1062 * because the HW can't handle two commands being queued in a row.
1063 */
1064 if (xhci->quirks & XHCI_RESET_EP_QUIRK) {
1065 struct xhci_command *command;
1066 command = xhci_alloc_command(xhci, false, false, GFP_ATOMIC);
1067 if (!command) {
1068 xhci_warn(xhci, "WARN Cannot submit cfg ep: ENOMEM\n");
1069 return;
1070 }
1071 xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
1072 "Queueing configure endpoint command");
1073 xhci_queue_configure_endpoint(xhci, command,
1074 xhci->devs[slot_id]->in_ctx->dma, slot_id,
1075 false);
1076 xhci_ring_cmd_db(xhci);
1077 } else {
1078 /* Clear our internal halted state */
1079 xhci->devs[slot_id]->eps[ep_index].ep_state &= ~EP_HALTED;
1080 }
1081}
1082
1083static void xhci_handle_cmd_enable_slot(struct xhci_hcd *xhci, int slot_id,
1084 u32 cmd_comp_code)
1085{
1086 if (cmd_comp_code == COMP_SUCCESS)
1087 xhci->slot_id = slot_id;
1088 else
1089 xhci->slot_id = 0;
1090}
1091
1092static void xhci_handle_cmd_disable_slot(struct xhci_hcd *xhci, int slot_id)
1093{
1094 struct xhci_virt_device *virt_dev;
1095
1096 virt_dev = xhci->devs[slot_id];
1097 if (!virt_dev)
1098 return;
1099 if (xhci->quirks & XHCI_EP_LIMIT_QUIRK)
1100 /* Delete default control endpoint resources */
1101 xhci_free_device_endpoint_resources(xhci, virt_dev, true);
1102 xhci_free_virt_device(xhci, slot_id);
1103}
1104
1105static void xhci_handle_cmd_config_ep(struct xhci_hcd *xhci, int slot_id,
1106 struct xhci_event_cmd *event, u32 cmd_comp_code)
1107{
1108 struct xhci_virt_device *virt_dev;
1109 struct xhci_input_control_ctx *ctrl_ctx;
1110 unsigned int ep_index;
1111 unsigned int ep_state;
1112 u32 add_flags, drop_flags;
1113
1114 /*
1115 * Configure endpoint commands can come from the USB core
1116 * configuration or alt setting changes, or because the HW
1117 * needed an extra configure endpoint command after a reset
1118 * endpoint command or streams were being configured.
1119 * If the command was for a halted endpoint, the xHCI driver
1120 * is not waiting on the configure endpoint command.
1121 */
1122 virt_dev = xhci->devs[slot_id];
1123 ctrl_ctx = xhci_get_input_control_ctx(virt_dev->in_ctx);
1124 if (!ctrl_ctx) {
1125 xhci_warn(xhci, "Could not get input context, bad type.\n");
1126 return;
1127 }
1128
1129 add_flags = le32_to_cpu(ctrl_ctx->add_flags);
1130 drop_flags = le32_to_cpu(ctrl_ctx->drop_flags);
1131 /* Input ctx add_flags are the endpoint index plus one */
1132 ep_index = xhci_last_valid_endpoint(add_flags) - 1;
1133
1134 /* A usb_set_interface() call directly after clearing a halted
1135 * condition may race on this quirky hardware. Not worth
1136 * worrying about, since this is prototype hardware. Not sure
1137 * if this will work for streams, but streams support was
1138 * untested on this prototype.
1139 */
1140 if (xhci->quirks & XHCI_RESET_EP_QUIRK &&
1141 ep_index != (unsigned int) -1 &&
1142 add_flags - SLOT_FLAG == drop_flags) {
1143 ep_state = virt_dev->eps[ep_index].ep_state;
1144 if (!(ep_state & EP_HALTED))
1145 return;
1146 xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
1147 "Completed config ep cmd - "
1148 "last ep index = %d, state = %d",
1149 ep_index, ep_state);
1150 /* Clear internal halted state and restart ring(s) */
1151 virt_dev->eps[ep_index].ep_state &= ~EP_HALTED;
1152 ring_doorbell_for_active_rings(xhci, slot_id, ep_index);
1153 return;
1154 }
1155 return;
1156}
1157
1158static void xhci_handle_cmd_reset_dev(struct xhci_hcd *xhci, int slot_id,
1159 struct xhci_event_cmd *event)
1160{
1161 xhci_dbg(xhci, "Completed reset device command.\n");
1162 if (!xhci->devs[slot_id])
1163 xhci_warn(xhci, "Reset device command completion "
1164 "for disabled slot %u\n", slot_id);
1165}
1166
1167static void xhci_handle_cmd_nec_get_fw(struct xhci_hcd *xhci,
1168 struct xhci_event_cmd *event)
1169{
1170 if (!(xhci->quirks & XHCI_NEC_HOST)) {
1171 xhci->error_bitmask |= 1 << 6;
1172 return;
1173 }
1174 xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
1175 "NEC firmware version %2x.%02x",
1176 NEC_FW_MAJOR(le32_to_cpu(event->status)),
1177 NEC_FW_MINOR(le32_to_cpu(event->status)));
1178}
1179
1180static void xhci_complete_del_and_free_cmd(struct xhci_command *cmd, u32 status)
1181{
1182 list_del(&cmd->cmd_list);
1183
1184 if (cmd->completion) {
1185 cmd->status = status;
1186 complete(cmd->completion);
1187 } else {
1188 kfree(cmd);
1189 }
1190}
1191
1192void xhci_cleanup_command_queue(struct xhci_hcd *xhci)
1193{
1194 struct xhci_command *cur_cmd, *tmp_cmd;
1195 list_for_each_entry_safe(cur_cmd, tmp_cmd, &xhci->cmd_list, cmd_list)
1196 xhci_complete_del_and_free_cmd(cur_cmd, COMP_CMD_ABORT);
1197}
1198
1199/*
1200 * Turn all commands on command ring with status set to "aborted" to no-op trbs.
1201 * If there are other commands waiting then restart the ring and kick the timer.
1202 * This must be called with command ring stopped and xhci->lock held.
1203 */
1204static void xhci_handle_stopped_cmd_ring(struct xhci_hcd *xhci,
1205 struct xhci_command *cur_cmd)
1206{
1207 struct xhci_command *i_cmd, *tmp_cmd;
1208 u32 cycle_state;
1209
1210 /* Turn all aborted commands in list to no-ops, then restart */
1211 list_for_each_entry_safe(i_cmd, tmp_cmd, &xhci->cmd_list,
1212 cmd_list) {
1213
1214 if (i_cmd->status != COMP_CMD_ABORT)
1215 continue;
1216
1217 i_cmd->status = COMP_CMD_STOP;
1218
1219 xhci_dbg(xhci, "Turn aborted command %p to no-op\n",
1220 i_cmd->command_trb);
1221 /* get cycle state from the original cmd trb */
1222 cycle_state = le32_to_cpu(
1223 i_cmd->command_trb->generic.field[3]) & TRB_CYCLE;
1224 /* modify the command trb to no-op command */
1225 i_cmd->command_trb->generic.field[0] = 0;
1226 i_cmd->command_trb->generic.field[1] = 0;
1227 i_cmd->command_trb->generic.field[2] = 0;
1228 i_cmd->command_trb->generic.field[3] = cpu_to_le32(
1229 TRB_TYPE(TRB_CMD_NOOP) | cycle_state);
1230
1231 /*
1232 * caller waiting for completion is called when command
1233 * completion event is received for these no-op commands
1234 */
1235 }
1236
1237 xhci->cmd_ring_state = CMD_RING_STATE_RUNNING;
1238
1239 /* ring command ring doorbell to restart the command ring */
1240 if ((xhci->cmd_ring->dequeue != xhci->cmd_ring->enqueue) &&
1241 !(xhci->xhc_state & XHCI_STATE_DYING)) {
1242 xhci->current_cmd = cur_cmd;
1243 mod_timer(&xhci->cmd_timer, jiffies + XHCI_CMD_DEFAULT_TIMEOUT);
1244 xhci_ring_cmd_db(xhci);
1245 }
1246 return;
1247}
1248
1249
1250void xhci_handle_command_timeout(unsigned long data)
1251{
1252 struct xhci_hcd *xhci;
1253 int ret;
1254 unsigned long flags;
1255 u64 hw_ring_state;
1256 struct xhci_command *cur_cmd = NULL;
1257 xhci = (struct xhci_hcd *) data;
1258
1259 /* mark this command to be cancelled */
1260 spin_lock_irqsave(&xhci->lock, flags);
1261 if (xhci->current_cmd) {
1262 cur_cmd = xhci->current_cmd;
1263 cur_cmd->status = COMP_CMD_ABORT;
1264 }
1265
1266
1267 /* Make sure command ring is running before aborting it */
1268 hw_ring_state = xhci_read_64(xhci, &xhci->op_regs->cmd_ring);
1269 if ((xhci->cmd_ring_state & CMD_RING_STATE_RUNNING) &&
1270 (hw_ring_state & CMD_RING_RUNNING)) {
1271
1272 spin_unlock_irqrestore(&xhci->lock, flags);
1273 xhci_dbg(xhci, "Command timeout\n");
1274 ret = xhci_abort_cmd_ring(xhci);
1275 if (unlikely(ret == -ESHUTDOWN)) {
1276 xhci_err(xhci, "Abort command ring failed\n");
1277 xhci_cleanup_command_queue(xhci);
1278 usb_hc_died(xhci_to_hcd(xhci)->primary_hcd);
1279 xhci_dbg(xhci, "xHCI host controller is dead.\n");
1280 }
1281 return;
1282 }
1283 /* command timeout on stopped ring, ring can't be aborted */
1284 xhci_dbg(xhci, "Command timeout on stopped ring\n");
1285 xhci_handle_stopped_cmd_ring(xhci, xhci->current_cmd);
1286 spin_unlock_irqrestore(&xhci->lock, flags);
1287 return;
1288}
1289
1290static void handle_cmd_completion(struct xhci_hcd *xhci,
1291 struct xhci_event_cmd *event)
1292{
1293 int slot_id = TRB_TO_SLOT_ID(le32_to_cpu(event->flags));
1294 u64 cmd_dma;
1295 dma_addr_t cmd_dequeue_dma;
1296 u32 cmd_comp_code;
1297 union xhci_trb *cmd_trb;
1298 struct xhci_command *cmd;
1299 u32 cmd_type;
1300
1301 cmd_dma = le64_to_cpu(event->cmd_trb);
1302 cmd_trb = xhci->cmd_ring->dequeue;
1303 cmd_dequeue_dma = xhci_trb_virt_to_dma(xhci->cmd_ring->deq_seg,
1304 cmd_trb);
1305 /* Is the command ring deq ptr out of sync with the deq seg ptr? */
1306 if (cmd_dequeue_dma == 0) {
1307 xhci->error_bitmask |= 1 << 4;
1308 return;
1309 }
1310 /* Does the DMA address match our internal dequeue pointer address? */
1311 if (cmd_dma != (u64) cmd_dequeue_dma) {
1312 xhci->error_bitmask |= 1 << 5;
1313 return;
1314 }
1315
1316 cmd = list_entry(xhci->cmd_list.next, struct xhci_command, cmd_list);
1317
1318 if (cmd->command_trb != xhci->cmd_ring->dequeue) {
1319 xhci_err(xhci,
1320 "Command completion event does not match command\n");
1321 return;
1322 }
1323
1324 del_timer(&xhci->cmd_timer);
1325
1326 trace_xhci_cmd_completion(cmd_trb, (struct xhci_generic_trb *) event);
1327
1328 cmd_comp_code = GET_COMP_CODE(le32_to_cpu(event->status));
1329
1330 /* If CMD ring stopped we own the trbs between enqueue and dequeue */
1331 if (cmd_comp_code == COMP_CMD_STOP) {
1332 xhci_handle_stopped_cmd_ring(xhci, cmd);
1333 return;
1334 }
1335 /*
1336 * Host aborted the command ring, check if the current command was
1337 * supposed to be aborted, otherwise continue normally.
1338 * The command ring is stopped now, but the xHC will issue a Command
1339 * Ring Stopped event which will cause us to restart it.
1340 */
1341 if (cmd_comp_code == COMP_CMD_ABORT) {
1342 xhci->cmd_ring_state = CMD_RING_STATE_STOPPED;
1343 if (cmd->status == COMP_CMD_ABORT)
1344 goto event_handled;
1345 }
1346
1347 cmd_type = TRB_FIELD_TO_TYPE(le32_to_cpu(cmd_trb->generic.field[3]));
1348 switch (cmd_type) {
1349 case TRB_ENABLE_SLOT:
1350 xhci_handle_cmd_enable_slot(xhci, slot_id, cmd_comp_code);
1351 break;
1352 case TRB_DISABLE_SLOT:
1353 xhci_handle_cmd_disable_slot(xhci, slot_id);
1354 break;
1355 case TRB_CONFIG_EP:
1356 if (!cmd->completion)
1357 xhci_handle_cmd_config_ep(xhci, slot_id, event,
1358 cmd_comp_code);
1359 break;
1360 case TRB_EVAL_CONTEXT:
1361 break;
1362 case TRB_ADDR_DEV:
1363 break;
1364 case TRB_STOP_RING:
1365 WARN_ON(slot_id != TRB_TO_SLOT_ID(
1366 le32_to_cpu(cmd_trb->generic.field[3])));
1367 xhci_handle_cmd_stop_ep(xhci, slot_id, cmd_trb, event);
1368 break;
1369 case TRB_SET_DEQ:
1370 WARN_ON(slot_id != TRB_TO_SLOT_ID(
1371 le32_to_cpu(cmd_trb->generic.field[3])));
1372 xhci_handle_cmd_set_deq(xhci, slot_id, cmd_trb, cmd_comp_code);
1373 break;
1374 case TRB_CMD_NOOP:
1375 /* Is this an aborted command turned to NO-OP? */
1376 if (cmd->status == COMP_CMD_STOP)
1377 cmd_comp_code = COMP_CMD_STOP;
1378 break;
1379 case TRB_RESET_EP:
1380 WARN_ON(slot_id != TRB_TO_SLOT_ID(
1381 le32_to_cpu(cmd_trb->generic.field[3])));
1382 xhci_handle_cmd_reset_ep(xhci, slot_id, cmd_trb, cmd_comp_code);
1383 break;
1384 case TRB_RESET_DEV:
1385 /* SLOT_ID field in reset device cmd completion event TRB is 0.
1386 * Use the SLOT_ID from the command TRB instead (xhci 4.6.11)
1387 */
1388 slot_id = TRB_TO_SLOT_ID(
1389 le32_to_cpu(cmd_trb->generic.field[3]));
1390 xhci_handle_cmd_reset_dev(xhci, slot_id, event);
1391 break;
1392 case TRB_NEC_GET_FW:
1393 xhci_handle_cmd_nec_get_fw(xhci, event);
1394 break;
1395 default:
1396 /* Skip over unknown commands on the event ring */
1397 xhci->error_bitmask |= 1 << 6;
1398 break;
1399 }
1400
1401 /* restart timer if this wasn't the last command */
1402 if (cmd->cmd_list.next != &xhci->cmd_list) {
1403 xhci->current_cmd = list_entry(cmd->cmd_list.next,
1404 struct xhci_command, cmd_list);
1405 mod_timer(&xhci->cmd_timer, jiffies + XHCI_CMD_DEFAULT_TIMEOUT);
1406 }
1407
1408event_handled:
1409 xhci_complete_del_and_free_cmd(cmd, cmd_comp_code);
1410
1411 inc_deq(xhci, xhci->cmd_ring);
1412}
1413
1414static void handle_vendor_event(struct xhci_hcd *xhci,
1415 union xhci_trb *event)
1416{
1417 u32 trb_type;
1418
1419 trb_type = TRB_FIELD_TO_TYPE(le32_to_cpu(event->generic.field[3]));
1420 xhci_dbg(xhci, "Vendor specific event TRB type = %u\n", trb_type);
1421 if (trb_type == TRB_NEC_CMD_COMP && (xhci->quirks & XHCI_NEC_HOST))
1422 handle_cmd_completion(xhci, &event->event_cmd);
1423}
1424
1425/* @port_id: the one-based port ID from the hardware (indexed from array of all
1426 * port registers -- USB 3.0 and USB 2.0).
1427 *
1428 * Returns a zero-based port number, which is suitable for indexing into each of
1429 * the split roothubs' port arrays and bus state arrays.
1430 * Add one to it in order to call xhci_find_slot_id_by_port.
1431 */
1432static unsigned int find_faked_portnum_from_hw_portnum(struct usb_hcd *hcd,
1433 struct xhci_hcd *xhci, u32 port_id)
1434{
1435 unsigned int i;
1436 unsigned int num_similar_speed_ports = 0;
1437
1438 /* port_id from the hardware is 1-based, but port_array[], usb3_ports[],
1439 * and usb2_ports are 0-based indexes. Count the number of similar
1440 * speed ports, up to 1 port before this port.
1441 */
1442 for (i = 0; i < (port_id - 1); i++) {
1443 u8 port_speed = xhci->port_array[i];
1444
1445 /*
1446 * Skip ports that don't have known speeds, or have duplicate
1447 * Extended Capabilities port speed entries.
1448 */
1449 if (port_speed == 0 || port_speed == DUPLICATE_ENTRY)
1450 continue;
1451
1452 /*
1453 * USB 3.0 ports are always under a USB 3.0 hub. USB 2.0 and
1454 * 1.1 ports are under the USB 2.0 hub. If the port speed
1455 * matches the device speed, it's a similar speed port.
1456 */
1457 if ((port_speed == 0x03) == (hcd->speed >= HCD_USB3))
1458 num_similar_speed_ports++;
1459 }
1460 return num_similar_speed_ports;
1461}
1462
1463static void handle_device_notification(struct xhci_hcd *xhci,
1464 union xhci_trb *event)
1465{
1466 u32 slot_id;
1467 struct usb_device *udev;
1468
1469 slot_id = TRB_TO_SLOT_ID(le32_to_cpu(event->generic.field[3]));
1470 if (!xhci->devs[slot_id]) {
1471 xhci_warn(xhci, "Device Notification event for "
1472 "unused slot %u\n", slot_id);
1473 return;
1474 }
1475
1476 xhci_dbg(xhci, "Device Wake Notification event for slot ID %u\n",
1477 slot_id);
1478 udev = xhci->devs[slot_id]->udev;
1479 if (udev && udev->parent)
1480 usb_wakeup_notification(udev->parent, udev->portnum);
1481}
1482
1483static void handle_port_status(struct xhci_hcd *xhci,
1484 union xhci_trb *event)
1485{
1486 struct usb_hcd *hcd;
1487 u32 port_id;
1488 u32 temp, temp1;
1489 int max_ports;
1490 int slot_id;
1491 unsigned int faked_port_index;
1492 u8 major_revision;
1493 struct xhci_bus_state *bus_state;
1494 __le32 __iomem **port_array;
1495 bool bogus_port_status = false;
1496
1497 /* Port status change events always have a successful completion code */
1498 if (GET_COMP_CODE(le32_to_cpu(event->generic.field[2])) != COMP_SUCCESS) {
1499 xhci_warn(xhci, "WARN: xHC returned failed port status event\n");
1500 xhci->error_bitmask |= 1 << 8;
1501 }
1502 port_id = GET_PORT_ID(le32_to_cpu(event->generic.field[0]));
1503 xhci_dbg(xhci, "Port Status Change Event for port %d\n", port_id);
1504
1505 max_ports = HCS_MAX_PORTS(xhci->hcs_params1);
1506 if ((port_id <= 0) || (port_id > max_ports)) {
1507 xhci_warn(xhci, "Invalid port id %d\n", port_id);
1508 inc_deq(xhci, xhci->event_ring);
1509 return;
1510 }
1511
1512 /* Figure out which usb_hcd this port is attached to:
1513 * is it a USB 3.0 port or a USB 2.0/1.1 port?
1514 */
1515 major_revision = xhci->port_array[port_id - 1];
1516
1517 /* Find the right roothub. */
1518 hcd = xhci_to_hcd(xhci);
1519 if ((major_revision == 0x03) != (hcd->speed >= HCD_USB3))
1520 hcd = xhci->shared_hcd;
1521
1522 if (major_revision == 0) {
1523 xhci_warn(xhci, "Event for port %u not in "
1524 "Extended Capabilities, ignoring.\n",
1525 port_id);
1526 bogus_port_status = true;
1527 goto cleanup;
1528 }
1529 if (major_revision == DUPLICATE_ENTRY) {
1530 xhci_warn(xhci, "Event for port %u duplicated in"
1531 "Extended Capabilities, ignoring.\n",
1532 port_id);
1533 bogus_port_status = true;
1534 goto cleanup;
1535 }
1536
1537 /*
1538 * Hardware port IDs reported by a Port Status Change Event include USB
1539 * 3.0 and USB 2.0 ports. We want to check if the port has reported a
1540 * resume event, but we first need to translate the hardware port ID
1541 * into the index into the ports on the correct split roothub, and the
1542 * correct bus_state structure.
1543 */
1544 bus_state = &xhci->bus_state[hcd_index(hcd)];
1545 if (hcd->speed >= HCD_USB3)
1546 port_array = xhci->usb3_ports;
1547 else
1548 port_array = xhci->usb2_ports;
1549 /* Find the faked port hub number */
1550 faked_port_index = find_faked_portnum_from_hw_portnum(hcd, xhci,
1551 port_id);
1552
1553 temp = readl(port_array[faked_port_index]);
1554 if (hcd->state == HC_STATE_SUSPENDED) {
1555 xhci_dbg(xhci, "resume root hub\n");
1556 usb_hcd_resume_root_hub(hcd);
1557 }
1558
1559 if (hcd->speed >= HCD_USB3 && (temp & PORT_PLS_MASK) == XDEV_INACTIVE)
1560 bus_state->port_remote_wakeup &= ~(1 << faked_port_index);
1561
1562 if ((temp & PORT_PLC) && (temp & PORT_PLS_MASK) == XDEV_RESUME) {
1563 xhci_dbg(xhci, "port resume event for port %d\n", port_id);
1564
1565 temp1 = readl(&xhci->op_regs->command);
1566 if (!(temp1 & CMD_RUN)) {
1567 xhci_warn(xhci, "xHC is not running.\n");
1568 goto cleanup;
1569 }
1570
1571 if (DEV_SUPERSPEED_ANY(temp)) {
1572 xhci_dbg(xhci, "remote wake SS port %d\n", port_id);
1573 /* Set a flag to say the port signaled remote wakeup,
1574 * so we can tell the difference between the end of
1575 * device and host initiated resume.
1576 */
1577 bus_state->port_remote_wakeup |= 1 << faked_port_index;
1578 xhci_test_and_clear_bit(xhci, port_array,
1579 faked_port_index, PORT_PLC);
1580 xhci_set_link_state(xhci, port_array, faked_port_index,
1581 XDEV_U0);
1582 /* Need to wait until the next link state change
1583 * indicates the device is actually in U0.
1584 */
1585 bogus_port_status = true;
1586 goto cleanup;
1587 } else if (!test_bit(faked_port_index,
1588 &bus_state->resuming_ports)) {
1589 xhci_dbg(xhci, "resume HS port %d\n", port_id);
1590 bus_state->resume_done[faked_port_index] = jiffies +
1591 msecs_to_jiffies(USB_RESUME_TIMEOUT);
1592 set_bit(faked_port_index, &bus_state->resuming_ports);
1593 mod_timer(&hcd->rh_timer,
1594 bus_state->resume_done[faked_port_index]);
1595 /* Do the rest in GetPortStatus */
1596 }
1597 }
1598
1599 if ((temp & PORT_PLC) && (temp & PORT_PLS_MASK) == XDEV_U0 &&
1600 DEV_SUPERSPEED_ANY(temp)) {
1601 xhci_dbg(xhci, "resume SS port %d finished\n", port_id);
1602 /* We've just brought the device into U0 through either the
1603 * Resume state after a device remote wakeup, or through the
1604 * U3Exit state after a host-initiated resume. If it's a device
1605 * initiated remote wake, don't pass up the link state change,
1606 * so the roothub behavior is consistent with external
1607 * USB 3.0 hub behavior.
1608 */
1609 slot_id = xhci_find_slot_id_by_port(hcd, xhci,
1610 faked_port_index + 1);
1611 if (slot_id && xhci->devs[slot_id])
1612 xhci_ring_device(xhci, slot_id);
1613 if (bus_state->port_remote_wakeup & (1 << faked_port_index)) {
1614 bus_state->port_remote_wakeup &=
1615 ~(1 << faked_port_index);
1616 xhci_test_and_clear_bit(xhci, port_array,
1617 faked_port_index, PORT_PLC);
1618 usb_wakeup_notification(hcd->self.root_hub,
1619 faked_port_index + 1);
1620 bogus_port_status = true;
1621 goto cleanup;
1622 }
1623 }
1624
1625 /*
1626 * Check to see if xhci-hub.c is waiting on RExit to U0 transition (or
1627 * RExit to a disconnect state). If so, let the the driver know it's
1628 * out of the RExit state.
1629 */
1630 if (!DEV_SUPERSPEED_ANY(temp) &&
1631 test_and_clear_bit(faked_port_index,
1632 &bus_state->rexit_ports)) {
1633 complete(&bus_state->rexit_done[faked_port_index]);
1634 bogus_port_status = true;
1635 goto cleanup;
1636 }
1637
1638 if (hcd->speed < HCD_USB3)
1639 xhci_test_and_clear_bit(xhci, port_array, faked_port_index,
1640 PORT_PLC);
1641
1642cleanup:
1643 /* Update event ring dequeue pointer before dropping the lock */
1644 inc_deq(xhci, xhci->event_ring);
1645
1646 /* Don't make the USB core poll the roothub if we got a bad port status
1647 * change event. Besides, at that point we can't tell which roothub
1648 * (USB 2.0 or USB 3.0) to kick.
1649 */
1650 if (bogus_port_status)
1651 return;
1652
1653 /*
1654 * xHCI port-status-change events occur when the "or" of all the
1655 * status-change bits in the portsc register changes from 0 to 1.
1656 * New status changes won't cause an event if any other change
1657 * bits are still set. When an event occurs, switch over to
1658 * polling to avoid losing status changes.
1659 */
1660 xhci_dbg(xhci, "%s: starting port polling.\n", __func__);
1661 set_bit(HCD_FLAG_POLL_RH, &hcd->flags);
1662 spin_unlock(&xhci->lock);
1663 /* Pass this up to the core */
1664 usb_hcd_poll_rh_status(hcd);
1665 spin_lock(&xhci->lock);
1666}
1667
1668/*
1669 * This TD is defined by the TRBs starting at start_trb in start_seg and ending
1670 * at end_trb, which may be in another segment. If the suspect DMA address is a
1671 * TRB in this TD, this function returns that TRB's segment. Otherwise it
1672 * returns 0.
1673 */
1674struct xhci_segment *trb_in_td(struct xhci_hcd *xhci,
1675 struct xhci_segment *start_seg,
1676 union xhci_trb *start_trb,
1677 union xhci_trb *end_trb,
1678 dma_addr_t suspect_dma,
1679 bool debug)
1680{
1681 dma_addr_t start_dma;
1682 dma_addr_t end_seg_dma;
1683 dma_addr_t end_trb_dma;
1684 struct xhci_segment *cur_seg;
1685
1686 start_dma = xhci_trb_virt_to_dma(start_seg, start_trb);
1687 cur_seg = start_seg;
1688
1689 do {
1690 if (start_dma == 0)
1691 return NULL;
1692 /* We may get an event for a Link TRB in the middle of a TD */
1693 end_seg_dma = xhci_trb_virt_to_dma(cur_seg,
1694 &cur_seg->trbs[TRBS_PER_SEGMENT - 1]);
1695 /* If the end TRB isn't in this segment, this is set to 0 */
1696 end_trb_dma = xhci_trb_virt_to_dma(cur_seg, end_trb);
1697
1698 if (debug)
1699 xhci_warn(xhci,
1700 "Looking for event-dma %016llx trb-start %016llx trb-end %016llx seg-start %016llx seg-end %016llx\n",
1701 (unsigned long long)suspect_dma,
1702 (unsigned long long)start_dma,
1703 (unsigned long long)end_trb_dma,
1704 (unsigned long long)cur_seg->dma,
1705 (unsigned long long)end_seg_dma);
1706
1707 if (end_trb_dma > 0) {
1708 /* The end TRB is in this segment, so suspect should be here */
1709 if (start_dma <= end_trb_dma) {
1710 if (suspect_dma >= start_dma && suspect_dma <= end_trb_dma)
1711 return cur_seg;
1712 } else {
1713 /* Case for one segment with
1714 * a TD wrapped around to the top
1715 */
1716 if ((suspect_dma >= start_dma &&
1717 suspect_dma <= end_seg_dma) ||
1718 (suspect_dma >= cur_seg->dma &&
1719 suspect_dma <= end_trb_dma))
1720 return cur_seg;
1721 }
1722 return NULL;
1723 } else {
1724 /* Might still be somewhere in this segment */
1725 if (suspect_dma >= start_dma && suspect_dma <= end_seg_dma)
1726 return cur_seg;
1727 }
1728 cur_seg = cur_seg->next;
1729 start_dma = xhci_trb_virt_to_dma(cur_seg, &cur_seg->trbs[0]);
1730 } while (cur_seg != start_seg);
1731
1732 return NULL;
1733}
1734
1735static void xhci_cleanup_halted_endpoint(struct xhci_hcd *xhci,
1736 unsigned int slot_id, unsigned int ep_index,
1737 unsigned int stream_id,
1738 struct xhci_td *td, union xhci_trb *event_trb)
1739{
1740 struct xhci_virt_ep *ep = &xhci->devs[slot_id]->eps[ep_index];
1741 struct xhci_command *command;
1742 command = xhci_alloc_command(xhci, false, false, GFP_ATOMIC);
1743 if (!command)
1744 return;
1745
1746 ep->ep_state |= EP_HALTED;
1747 ep->stopped_stream = stream_id;
1748
1749 xhci_queue_reset_ep(xhci, command, slot_id, ep_index);
1750 xhci_cleanup_stalled_ring(xhci, ep_index, td);
1751
1752 ep->stopped_stream = 0;
1753
1754 xhci_ring_cmd_db(xhci);
1755}
1756
1757/* Check if an error has halted the endpoint ring. The class driver will
1758 * cleanup the halt for a non-default control endpoint if we indicate a stall.
1759 * However, a babble and other errors also halt the endpoint ring, and the class
1760 * driver won't clear the halt in that case, so we need to issue a Set Transfer
1761 * Ring Dequeue Pointer command manually.
1762 */
1763static int xhci_requires_manual_halt_cleanup(struct xhci_hcd *xhci,
1764 struct xhci_ep_ctx *ep_ctx,
1765 unsigned int trb_comp_code)
1766{
1767 /* TRB completion codes that may require a manual halt cleanup */
1768 if (trb_comp_code == COMP_TX_ERR ||
1769 trb_comp_code == COMP_BABBLE ||
1770 trb_comp_code == COMP_SPLIT_ERR)
1771 /* The 0.96 spec says a babbling control endpoint
1772 * is not halted. The 0.96 spec says it is. Some HW
1773 * claims to be 0.95 compliant, but it halts the control
1774 * endpoint anyway. Check if a babble halted the
1775 * endpoint.
1776 */
1777 if ((ep_ctx->ep_info & cpu_to_le32(EP_STATE_MASK)) ==
1778 cpu_to_le32(EP_STATE_HALTED))
1779 return 1;
1780
1781 return 0;
1782}
1783
1784int xhci_is_vendor_info_code(struct xhci_hcd *xhci, unsigned int trb_comp_code)
1785{
1786 if (trb_comp_code >= 224 && trb_comp_code <= 255) {
1787 /* Vendor defined "informational" completion code,
1788 * treat as not-an-error.
1789 */
1790 xhci_dbg(xhci, "Vendor defined info completion code %u\n",
1791 trb_comp_code);
1792 xhci_dbg(xhci, "Treating code as success.\n");
1793 return 1;
1794 }
1795 return 0;
1796}
1797
1798/*
1799 * Finish the td processing, remove the td from td list;
1800 * Return 1 if the urb can be given back.
1801 */
1802static int finish_td(struct xhci_hcd *xhci, struct xhci_td *td,
1803 union xhci_trb *event_trb, struct xhci_transfer_event *event,
1804 struct xhci_virt_ep *ep, int *status, bool skip)
1805{
1806 struct xhci_virt_device *xdev;
1807 struct xhci_ring *ep_ring;
1808 unsigned int slot_id;
1809 int ep_index;
1810 struct urb *urb = NULL;
1811 struct xhci_ep_ctx *ep_ctx;
1812 int ret = 0;
1813 struct urb_priv *urb_priv;
1814 u32 trb_comp_code;
1815
1816 slot_id = TRB_TO_SLOT_ID(le32_to_cpu(event->flags));
1817 xdev = xhci->devs[slot_id];
1818 ep_index = TRB_TO_EP_ID(le32_to_cpu(event->flags)) - 1;
1819 ep_ring = xhci_dma_to_transfer_ring(ep, le64_to_cpu(event->buffer));
1820 ep_ctx = xhci_get_ep_ctx(xhci, xdev->out_ctx, ep_index);
1821 trb_comp_code = GET_COMP_CODE(le32_to_cpu(event->transfer_len));
1822
1823 if (skip)
1824 goto td_cleanup;
1825
1826 if (trb_comp_code == COMP_STOP_INVAL ||
1827 trb_comp_code == COMP_STOP ||
1828 trb_comp_code == COMP_STOP_SHORT) {
1829 /* The Endpoint Stop Command completion will take care of any
1830 * stopped TDs. A stopped TD may be restarted, so don't update
1831 * the ring dequeue pointer or take this TD off any lists yet.
1832 */
1833 ep->stopped_td = td;
1834 return 0;
1835 }
1836 if (trb_comp_code == COMP_STALL ||
1837 xhci_requires_manual_halt_cleanup(xhci, ep_ctx,
1838 trb_comp_code)) {
1839 /* Issue a reset endpoint command to clear the host side
1840 * halt, followed by a set dequeue command to move the
1841 * dequeue pointer past the TD.
1842 * The class driver clears the device side halt later.
1843 */
1844 xhci_cleanup_halted_endpoint(xhci, slot_id, ep_index,
1845 ep_ring->stream_id, td, event_trb);
1846 } else {
1847 /* Update ring dequeue pointer */
1848 while (ep_ring->dequeue != td->last_trb)
1849 inc_deq(xhci, ep_ring);
1850 inc_deq(xhci, ep_ring);
1851 }
1852
1853td_cleanup:
1854 /* Clean up the endpoint's TD list */
1855 urb = td->urb;
1856 urb_priv = urb->hcpriv;
1857
1858 /* Do one last check of the actual transfer length.
1859 * If the host controller said we transferred more data than the buffer
1860 * length, urb->actual_length will be a very big number (since it's
1861 * unsigned). Play it safe and say we didn't transfer anything.
1862 */
1863 if (urb->actual_length > urb->transfer_buffer_length) {
1864 xhci_warn(xhci, "URB transfer length is wrong, xHC issue? req. len = %u, act. len = %u\n",
1865 urb->transfer_buffer_length,
1866 urb->actual_length);
1867 urb->actual_length = 0;
1868 if (td->urb->transfer_flags & URB_SHORT_NOT_OK)
1869 *status = -EREMOTEIO;
1870 else
1871 *status = 0;
1872 }
1873 list_del_init(&td->td_list);
1874 /* Was this TD slated to be cancelled but completed anyway? */
1875 if (!list_empty(&td->cancelled_td_list))
1876 list_del_init(&td->cancelled_td_list);
1877
1878 urb_priv->td_cnt++;
1879 /* Giveback the urb when all the tds are completed */
1880 if (urb_priv->td_cnt == urb_priv->length) {
1881 ret = 1;
1882 if (usb_pipetype(urb->pipe) == PIPE_ISOCHRONOUS) {
1883 xhci_to_hcd(xhci)->self.bandwidth_isoc_reqs--;
1884 if (xhci_to_hcd(xhci)->self.bandwidth_isoc_reqs == 0) {
1885 if (xhci->quirks & XHCI_AMD_PLL_FIX)
1886 usb_amd_quirk_pll_enable();
1887 }
1888 }
1889 }
1890
1891 return ret;
1892}
1893
1894/*
1895 * Process control tds, update urb status and actual_length.
1896 */
1897static int process_ctrl_td(struct xhci_hcd *xhci, struct xhci_td *td,
1898 union xhci_trb *event_trb, struct xhci_transfer_event *event,
1899 struct xhci_virt_ep *ep, int *status)
1900{
1901 struct xhci_virt_device *xdev;
1902 struct xhci_ring *ep_ring;
1903 unsigned int slot_id;
1904 int ep_index;
1905 struct xhci_ep_ctx *ep_ctx;
1906 u32 trb_comp_code;
1907
1908 slot_id = TRB_TO_SLOT_ID(le32_to_cpu(event->flags));
1909 xdev = xhci->devs[slot_id];
1910 ep_index = TRB_TO_EP_ID(le32_to_cpu(event->flags)) - 1;
1911 ep_ring = xhci_dma_to_transfer_ring(ep, le64_to_cpu(event->buffer));
1912 ep_ctx = xhci_get_ep_ctx(xhci, xdev->out_ctx, ep_index);
1913 trb_comp_code = GET_COMP_CODE(le32_to_cpu(event->transfer_len));
1914
1915 switch (trb_comp_code) {
1916 case COMP_SUCCESS:
1917 if (event_trb == ep_ring->dequeue) {
1918 xhci_warn(xhci, "WARN: Success on ctrl setup TRB "
1919 "without IOC set??\n");
1920 *status = -ESHUTDOWN;
1921 } else if (event_trb != td->last_trb) {
1922 xhci_warn(xhci, "WARN: Success on ctrl data TRB "
1923 "without IOC set??\n");
1924 *status = -ESHUTDOWN;
1925 } else {
1926 *status = 0;
1927 }
1928 break;
1929 case COMP_SHORT_TX:
1930 if (td->urb->transfer_flags & URB_SHORT_NOT_OK)
1931 *status = -EREMOTEIO;
1932 else
1933 *status = 0;
1934 break;
1935 case COMP_STOP_SHORT:
1936 if (event_trb == ep_ring->dequeue || event_trb == td->last_trb)
1937 xhci_warn(xhci, "WARN: Stopped Short Packet on ctrl setup or status TRB\n");
1938 else
1939 td->urb->actual_length =
1940 EVENT_TRB_LEN(le32_to_cpu(event->transfer_len));
1941
1942 return finish_td(xhci, td, event_trb, event, ep, status, false);
1943 case COMP_STOP:
1944 /* Did we stop at data stage? */
1945 if (event_trb != ep_ring->dequeue && event_trb != td->last_trb)
1946 td->urb->actual_length =
1947 td->urb->transfer_buffer_length -
1948 EVENT_TRB_LEN(le32_to_cpu(event->transfer_len));
1949 /* fall through */
1950 case COMP_STOP_INVAL:
1951 return finish_td(xhci, td, event_trb, event, ep, status, false);
1952 default:
1953 if (!xhci_requires_manual_halt_cleanup(xhci,
1954 ep_ctx, trb_comp_code))
1955 break;
1956 xhci_dbg(xhci, "TRB error code %u, "
1957 "halted endpoint index = %u\n",
1958 trb_comp_code, ep_index);
1959 /* else fall through */
1960 case COMP_STALL:
1961 /* Did we transfer part of the data (middle) phase? */
1962 if (event_trb != ep_ring->dequeue &&
1963 event_trb != td->last_trb)
1964 td->urb->actual_length =
1965 td->urb->transfer_buffer_length -
1966 EVENT_TRB_LEN(le32_to_cpu(event->transfer_len));
1967 else if (!td->urb_length_set)
1968 td->urb->actual_length = 0;
1969
1970 return finish_td(xhci, td, event_trb, event, ep, status, false);
1971 }
1972 /*
1973 * Did we transfer any data, despite the errors that might have
1974 * happened? I.e. did we get past the setup stage?
1975 */
1976 if (event_trb != ep_ring->dequeue) {
1977 /* The event was for the status stage */
1978 if (event_trb == td->last_trb) {
1979 if (td->urb_length_set) {
1980 /* Don't overwrite a previously set error code
1981 */
1982 if ((*status == -EINPROGRESS || *status == 0) &&
1983 (td->urb->transfer_flags
1984 & URB_SHORT_NOT_OK))
1985 /* Did we already see a short data
1986 * stage? */
1987 *status = -EREMOTEIO;
1988 } else {
1989 td->urb->actual_length =
1990 td->urb->transfer_buffer_length;
1991 }
1992 } else {
1993 /*
1994 * Maybe the event was for the data stage? If so, update
1995 * already the actual_length of the URB and flag it as
1996 * set, so that it is not overwritten in the event for
1997 * the last TRB.
1998 */
1999 td->urb_length_set = true;
2000 td->urb->actual_length =
2001 td->urb->transfer_buffer_length -
2002 EVENT_TRB_LEN(le32_to_cpu(event->transfer_len));
2003 xhci_dbg(xhci, "Waiting for status "
2004 "stage event\n");
2005 return 0;
2006 }
2007 }
2008
2009 return finish_td(xhci, td, event_trb, event, ep, status, false);
2010}
2011
2012/*
2013 * Process isochronous tds, update urb packet status and actual_length.
2014 */
2015static int process_isoc_td(struct xhci_hcd *xhci, struct xhci_td *td,
2016 union xhci_trb *event_trb, struct xhci_transfer_event *event,
2017 struct xhci_virt_ep *ep, int *status)
2018{
2019 struct xhci_ring *ep_ring;
2020 struct urb_priv *urb_priv;
2021 int idx;
2022 int len = 0;
2023 union xhci_trb *cur_trb;
2024 struct xhci_segment *cur_seg;
2025 struct usb_iso_packet_descriptor *frame;
2026 u32 trb_comp_code;
2027 bool skip_td = false;
2028
2029 ep_ring = xhci_dma_to_transfer_ring(ep, le64_to_cpu(event->buffer));
2030 trb_comp_code = GET_COMP_CODE(le32_to_cpu(event->transfer_len));
2031 urb_priv = td->urb->hcpriv;
2032 idx = urb_priv->td_cnt;
2033 frame = &td->urb->iso_frame_desc[idx];
2034
2035 /* handle completion code */
2036 switch (trb_comp_code) {
2037 case COMP_SUCCESS:
2038 if (EVENT_TRB_LEN(le32_to_cpu(event->transfer_len)) == 0) {
2039 frame->status = 0;
2040 break;
2041 }
2042 if ((xhci->quirks & XHCI_TRUST_TX_LENGTH))
2043 trb_comp_code = COMP_SHORT_TX;
2044 /* fallthrough */
2045 case COMP_STOP_SHORT:
2046 case COMP_SHORT_TX:
2047 frame->status = td->urb->transfer_flags & URB_SHORT_NOT_OK ?
2048 -EREMOTEIO : 0;
2049 break;
2050 case COMP_BW_OVER:
2051 frame->status = -ECOMM;
2052 skip_td = true;
2053 break;
2054 case COMP_BUFF_OVER:
2055 case COMP_BABBLE:
2056 frame->status = -EOVERFLOW;
2057 skip_td = true;
2058 break;
2059 case COMP_DEV_ERR:
2060 case COMP_STALL:
2061 frame->status = -EPROTO;
2062 skip_td = true;
2063 break;
2064 case COMP_TX_ERR:
2065 frame->status = -EPROTO;
2066 if (event_trb != td->last_trb)
2067 return 0;
2068 skip_td = true;
2069 break;
2070 case COMP_STOP:
2071 case COMP_STOP_INVAL:
2072 break;
2073 default:
2074 frame->status = -1;
2075 break;
2076 }
2077
2078 if (trb_comp_code == COMP_SUCCESS || skip_td) {
2079 frame->actual_length = frame->length;
2080 td->urb->actual_length += frame->length;
2081 } else if (trb_comp_code == COMP_STOP_SHORT) {
2082 frame->actual_length =
2083 EVENT_TRB_LEN(le32_to_cpu(event->transfer_len));
2084 td->urb->actual_length += frame->actual_length;
2085 } else {
2086 for (cur_trb = ep_ring->dequeue,
2087 cur_seg = ep_ring->deq_seg; cur_trb != event_trb;
2088 next_trb(xhci, ep_ring, &cur_seg, &cur_trb)) {
2089 if (!TRB_TYPE_NOOP_LE32(cur_trb->generic.field[3]) &&
2090 !TRB_TYPE_LINK_LE32(cur_trb->generic.field[3]))
2091 len += TRB_LEN(le32_to_cpu(cur_trb->generic.field[2]));
2092 }
2093 len += TRB_LEN(le32_to_cpu(cur_trb->generic.field[2])) -
2094 EVENT_TRB_LEN(le32_to_cpu(event->transfer_len));
2095
2096 if (trb_comp_code != COMP_STOP_INVAL) {
2097 frame->actual_length = len;
2098 td->urb->actual_length += len;
2099 }
2100 }
2101
2102 return finish_td(xhci, td, event_trb, event, ep, status, false);
2103}
2104
2105static int skip_isoc_td(struct xhci_hcd *xhci, struct xhci_td *td,
2106 struct xhci_transfer_event *event,
2107 struct xhci_virt_ep *ep, int *status)
2108{
2109 struct xhci_ring *ep_ring;
2110 struct urb_priv *urb_priv;
2111 struct usb_iso_packet_descriptor *frame;
2112 int idx;
2113
2114 ep_ring = xhci_dma_to_transfer_ring(ep, le64_to_cpu(event->buffer));
2115 urb_priv = td->urb->hcpriv;
2116 idx = urb_priv->td_cnt;
2117 frame = &td->urb->iso_frame_desc[idx];
2118
2119 /* The transfer is partly done. */
2120 frame->status = -EXDEV;
2121
2122 /* calc actual length */
2123 frame->actual_length = 0;
2124
2125 /* Update ring dequeue pointer */
2126 while (ep_ring->dequeue != td->last_trb)
2127 inc_deq(xhci, ep_ring);
2128 inc_deq(xhci, ep_ring);
2129
2130 return finish_td(xhci, td, NULL, event, ep, status, true);
2131}
2132
2133/*
2134 * Process bulk and interrupt tds, update urb status and actual_length.
2135 */
2136static int process_bulk_intr_td(struct xhci_hcd *xhci, struct xhci_td *td,
2137 union xhci_trb *event_trb, struct xhci_transfer_event *event,
2138 struct xhci_virt_ep *ep, int *status)
2139{
2140 struct xhci_ring *ep_ring;
2141 union xhci_trb *cur_trb;
2142 struct xhci_segment *cur_seg;
2143 u32 trb_comp_code;
2144
2145 ep_ring = xhci_dma_to_transfer_ring(ep, le64_to_cpu(event->buffer));
2146 trb_comp_code = GET_COMP_CODE(le32_to_cpu(event->transfer_len));
2147
2148 switch (trb_comp_code) {
2149 case COMP_SUCCESS:
2150 /* Double check that the HW transferred everything. */
2151 if (event_trb != td->last_trb ||
2152 EVENT_TRB_LEN(le32_to_cpu(event->transfer_len)) != 0) {
2153 xhci_warn(xhci, "WARN Successful completion "
2154 "on short TX\n");
2155 if (td->urb->transfer_flags & URB_SHORT_NOT_OK)
2156 *status = -EREMOTEIO;
2157 else
2158 *status = 0;
2159 if ((xhci->quirks & XHCI_TRUST_TX_LENGTH))
2160 trb_comp_code = COMP_SHORT_TX;
2161 } else {
2162 *status = 0;
2163 }
2164 break;
2165 case COMP_STOP_SHORT:
2166 case COMP_SHORT_TX:
2167 if (td->urb->transfer_flags & URB_SHORT_NOT_OK)
2168 *status = -EREMOTEIO;
2169 else
2170 *status = 0;
2171 break;
2172 default:
2173 /* Others already handled above */
2174 break;
2175 }
2176 if (trb_comp_code == COMP_SHORT_TX)
2177 xhci_dbg(xhci, "ep %#x - asked for %d bytes, "
2178 "%d bytes untransferred\n",
2179 td->urb->ep->desc.bEndpointAddress,
2180 td->urb->transfer_buffer_length,
2181 EVENT_TRB_LEN(le32_to_cpu(event->transfer_len)));
2182 /* Stopped - short packet completion */
2183 if (trb_comp_code == COMP_STOP_SHORT) {
2184 td->urb->actual_length =
2185 EVENT_TRB_LEN(le32_to_cpu(event->transfer_len));
2186
2187 if (td->urb->transfer_buffer_length <
2188 td->urb->actual_length) {
2189 xhci_warn(xhci, "HC gave bad length of %d bytes txed\n",
2190 EVENT_TRB_LEN(le32_to_cpu(event->transfer_len)));
2191 td->urb->actual_length = 0;
2192 /* status will be set by usb core for canceled urbs */
2193 }
2194 /* Fast path - was this the last TRB in the TD for this URB? */
2195 } else if (event_trb == td->last_trb) {
2196 if (EVENT_TRB_LEN(le32_to_cpu(event->transfer_len)) != 0) {
2197 td->urb->actual_length =
2198 td->urb->transfer_buffer_length -
2199 EVENT_TRB_LEN(le32_to_cpu(event->transfer_len));
2200 if (td->urb->transfer_buffer_length <
2201 td->urb->actual_length) {
2202 xhci_warn(xhci, "HC gave bad length "
2203 "of %d bytes left\n",
2204 EVENT_TRB_LEN(le32_to_cpu(event->transfer_len)));
2205 td->urb->actual_length = 0;
2206 if (td->urb->transfer_flags & URB_SHORT_NOT_OK)
2207 *status = -EREMOTEIO;
2208 else
2209 *status = 0;
2210 }
2211 /* Don't overwrite a previously set error code */
2212 if (*status == -EINPROGRESS) {
2213 if (td->urb->transfer_flags & URB_SHORT_NOT_OK)
2214 *status = -EREMOTEIO;
2215 else
2216 *status = 0;
2217 }
2218 } else {
2219 td->urb->actual_length =
2220 td->urb->transfer_buffer_length;
2221 /* Ignore a short packet completion if the
2222 * untransferred length was zero.
2223 */
2224 if (*status == -EREMOTEIO)
2225 *status = 0;
2226 }
2227 } else {
2228 /* Slow path - walk the list, starting from the dequeue
2229 * pointer, to get the actual length transferred.
2230 */
2231 td->urb->actual_length = 0;
2232 for (cur_trb = ep_ring->dequeue, cur_seg = ep_ring->deq_seg;
2233 cur_trb != event_trb;
2234 next_trb(xhci, ep_ring, &cur_seg, &cur_trb)) {
2235 if (!TRB_TYPE_NOOP_LE32(cur_trb->generic.field[3]) &&
2236 !TRB_TYPE_LINK_LE32(cur_trb->generic.field[3]))
2237 td->urb->actual_length +=
2238 TRB_LEN(le32_to_cpu(cur_trb->generic.field[2]));
2239 }
2240 /* If the ring didn't stop on a Link or No-op TRB, add
2241 * in the actual bytes transferred from the Normal TRB
2242 */
2243 if (trb_comp_code != COMP_STOP_INVAL)
2244 td->urb->actual_length +=
2245 TRB_LEN(le32_to_cpu(cur_trb->generic.field[2])) -
2246 EVENT_TRB_LEN(le32_to_cpu(event->transfer_len));
2247 }
2248
2249 return finish_td(xhci, td, event_trb, event, ep, status, false);
2250}
2251
2252/*
2253 * If this function returns an error condition, it means it got a Transfer
2254 * event with a corrupted Slot ID, Endpoint ID, or TRB DMA address.
2255 * At this point, the host controller is probably hosed and should be reset.
2256 */
2257static int handle_tx_event(struct xhci_hcd *xhci,
2258 struct xhci_transfer_event *event)
2259 __releases(&xhci->lock)
2260 __acquires(&xhci->lock)
2261{
2262 struct xhci_virt_device *xdev;
2263 struct xhci_virt_ep *ep;
2264 struct xhci_ring *ep_ring;
2265 unsigned int slot_id;
2266 int ep_index;
2267 struct xhci_td *td = NULL;
2268 dma_addr_t event_dma;
2269 struct xhci_segment *event_seg;
2270 union xhci_trb *event_trb;
2271 struct urb *urb = NULL;
2272 int status = -EINPROGRESS;
2273 struct urb_priv *urb_priv;
2274 struct xhci_ep_ctx *ep_ctx;
2275 struct list_head *tmp;
2276 u32 trb_comp_code;
2277 int ret = 0;
2278 int td_num = 0;
2279 bool handling_skipped_tds = false;
2280
2281 slot_id = TRB_TO_SLOT_ID(le32_to_cpu(event->flags));
2282 xdev = xhci->devs[slot_id];
2283 if (!xdev) {
2284 xhci_err(xhci, "ERROR Transfer event pointed to bad slot\n");
2285 xhci_err(xhci, "@%016llx %08x %08x %08x %08x\n",
2286 (unsigned long long) xhci_trb_virt_to_dma(
2287 xhci->event_ring->deq_seg,
2288 xhci->event_ring->dequeue),
2289 lower_32_bits(le64_to_cpu(event->buffer)),
2290 upper_32_bits(le64_to_cpu(event->buffer)),
2291 le32_to_cpu(event->transfer_len),
2292 le32_to_cpu(event->flags));
2293 xhci_dbg(xhci, "Event ring:\n");
2294 xhci_debug_segment(xhci, xhci->event_ring->deq_seg);
2295 return -ENODEV;
2296 }
2297
2298 /* Endpoint ID is 1 based, our index is zero based */
2299 ep_index = TRB_TO_EP_ID(le32_to_cpu(event->flags)) - 1;
2300 ep = &xdev->eps[ep_index];
2301 ep_ring = xhci_dma_to_transfer_ring(ep, le64_to_cpu(event->buffer));
2302 ep_ctx = xhci_get_ep_ctx(xhci, xdev->out_ctx, ep_index);
2303 if (!ep_ring ||
2304 (le32_to_cpu(ep_ctx->ep_info) & EP_STATE_MASK) ==
2305 EP_STATE_DISABLED) {
2306 xhci_err(xhci, "ERROR Transfer event for disabled endpoint "
2307 "or incorrect stream ring\n");
2308 xhci_err(xhci, "@%016llx %08x %08x %08x %08x\n",
2309 (unsigned long long) xhci_trb_virt_to_dma(
2310 xhci->event_ring->deq_seg,
2311 xhci->event_ring->dequeue),
2312 lower_32_bits(le64_to_cpu(event->buffer)),
2313 upper_32_bits(le64_to_cpu(event->buffer)),
2314 le32_to_cpu(event->transfer_len),
2315 le32_to_cpu(event->flags));
2316 xhci_dbg(xhci, "Event ring:\n");
2317 xhci_debug_segment(xhci, xhci->event_ring->deq_seg);
2318 return -ENODEV;
2319 }
2320
2321 /* Count current td numbers if ep->skip is set */
2322 if (ep->skip) {
2323 list_for_each(tmp, &ep_ring->td_list)
2324 td_num++;
2325 }
2326
2327 event_dma = le64_to_cpu(event->buffer);
2328 trb_comp_code = GET_COMP_CODE(le32_to_cpu(event->transfer_len));
2329 /* Look for common error cases */
2330 switch (trb_comp_code) {
2331 /* Skip codes that require special handling depending on
2332 * transfer type
2333 */
2334 case COMP_SUCCESS:
2335 if (EVENT_TRB_LEN(le32_to_cpu(event->transfer_len)) == 0)
2336 break;
2337 if (xhci->quirks & XHCI_TRUST_TX_LENGTH)
2338 trb_comp_code = COMP_SHORT_TX;
2339 else
2340 xhci_warn_ratelimited(xhci,
2341 "WARN Successful completion on short TX: needs XHCI_TRUST_TX_LENGTH quirk?\n");
2342 case COMP_SHORT_TX:
2343 break;
2344 case COMP_STOP:
2345 xhci_dbg(xhci, "Stopped on Transfer TRB\n");
2346 break;
2347 case COMP_STOP_INVAL:
2348 xhci_dbg(xhci, "Stopped on No-op or Link TRB\n");
2349 break;
2350 case COMP_STOP_SHORT:
2351 xhci_dbg(xhci, "Stopped with short packet transfer detected\n");
2352 break;
2353 case COMP_STALL:
2354 xhci_dbg(xhci, "Stalled endpoint\n");
2355 ep->ep_state |= EP_HALTED;
2356 status = -EPIPE;
2357 break;
2358 case COMP_TRB_ERR:
2359 xhci_warn(xhci, "WARN: TRB error on endpoint\n");
2360 status = -EILSEQ;
2361 break;
2362 case COMP_SPLIT_ERR:
2363 case COMP_TX_ERR:
2364 xhci_dbg(xhci, "Transfer error on endpoint\n");
2365 status = -EPROTO;
2366 break;
2367 case COMP_BABBLE:
2368 xhci_dbg(xhci, "Babble error on endpoint\n");
2369 status = -EOVERFLOW;
2370 break;
2371 case COMP_DB_ERR:
2372 xhci_warn(xhci, "WARN: HC couldn't access mem fast enough\n");
2373 status = -ENOSR;
2374 break;
2375 case COMP_BW_OVER:
2376 xhci_warn(xhci, "WARN: bandwidth overrun event on endpoint\n");
2377 break;
2378 case COMP_BUFF_OVER:
2379 xhci_warn(xhci, "WARN: buffer overrun event on endpoint\n");
2380 break;
2381 case COMP_UNDERRUN:
2382 /*
2383 * When the Isoch ring is empty, the xHC will generate
2384 * a Ring Overrun Event for IN Isoch endpoint or Ring
2385 * Underrun Event for OUT Isoch endpoint.
2386 */
2387 xhci_dbg(xhci, "underrun event on endpoint\n");
2388 if (!list_empty(&ep_ring->td_list))
2389 xhci_dbg(xhci, "Underrun Event for slot %d ep %d "
2390 "still with TDs queued?\n",
2391 TRB_TO_SLOT_ID(le32_to_cpu(event->flags)),
2392 ep_index);
2393 goto cleanup;
2394 case COMP_OVERRUN:
2395 xhci_dbg(xhci, "overrun event on endpoint\n");
2396 if (!list_empty(&ep_ring->td_list))
2397 xhci_dbg(xhci, "Overrun Event for slot %d ep %d "
2398 "still with TDs queued?\n",
2399 TRB_TO_SLOT_ID(le32_to_cpu(event->flags)),
2400 ep_index);
2401 goto cleanup;
2402 case COMP_DEV_ERR:
2403 xhci_warn(xhci, "WARN: detect an incompatible device");
2404 status = -EPROTO;
2405 break;
2406 case COMP_MISSED_INT:
2407 /*
2408 * When encounter missed service error, one or more isoc tds
2409 * may be missed by xHC.
2410 * Set skip flag of the ep_ring; Complete the missed tds as
2411 * short transfer when process the ep_ring next time.
2412 */
2413 ep->skip = true;
2414 xhci_dbg(xhci, "Miss service interval error, set skip flag\n");
2415 goto cleanup;
2416 case COMP_PING_ERR:
2417 ep->skip = true;
2418 xhci_dbg(xhci, "No Ping response error, Skip one Isoc TD\n");
2419 goto cleanup;
2420 default:
2421 if (xhci_is_vendor_info_code(xhci, trb_comp_code)) {
2422 status = 0;
2423 break;
2424 }
2425 xhci_warn(xhci, "ERROR Unknown event condition %u, HC probably busted\n",
2426 trb_comp_code);
2427 goto cleanup;
2428 }
2429
2430 do {
2431 /* This TRB should be in the TD at the head of this ring's
2432 * TD list.
2433 */
2434 if (list_empty(&ep_ring->td_list)) {
2435 /*
2436 * A stopped endpoint may generate an extra completion
2437 * event if the device was suspended. Don't print
2438 * warnings.
2439 */
2440 if (!(trb_comp_code == COMP_STOP ||
2441 trb_comp_code == COMP_STOP_INVAL)) {
2442 xhci_warn(xhci, "WARN Event TRB for slot %d ep %d with no TDs queued?\n",
2443 TRB_TO_SLOT_ID(le32_to_cpu(event->flags)),
2444 ep_index);
2445 xhci_dbg(xhci, "Event TRB with TRB type ID %u\n",
2446 (le32_to_cpu(event->flags) &
2447 TRB_TYPE_BITMASK)>>10);
2448 xhci_print_trb_offsets(xhci, (union xhci_trb *) event);
2449 }
2450 if (ep->skip) {
2451 ep->skip = false;
2452 xhci_dbg(xhci, "td_list is empty while skip "
2453 "flag set. Clear skip flag.\n");
2454 }
2455 ret = 0;
2456 goto cleanup;
2457 }
2458
2459 /* We've skipped all the TDs on the ep ring when ep->skip set */
2460 if (ep->skip && td_num == 0) {
2461 ep->skip = false;
2462 xhci_dbg(xhci, "All tds on the ep_ring skipped. "
2463 "Clear skip flag.\n");
2464 ret = 0;
2465 goto cleanup;
2466 }
2467
2468 td = list_entry(ep_ring->td_list.next, struct xhci_td, td_list);
2469 if (ep->skip)
2470 td_num--;
2471
2472 /* Is this a TRB in the currently executing TD? */
2473 event_seg = trb_in_td(xhci, ep_ring->deq_seg, ep_ring->dequeue,
2474 td->last_trb, event_dma, false);
2475
2476 /*
2477 * Skip the Force Stopped Event. The event_trb(event_dma) of FSE
2478 * is not in the current TD pointed by ep_ring->dequeue because
2479 * that the hardware dequeue pointer still at the previous TRB
2480 * of the current TD. The previous TRB maybe a Link TD or the
2481 * last TRB of the previous TD. The command completion handle
2482 * will take care the rest.
2483 */
2484 if (!event_seg && (trb_comp_code == COMP_STOP ||
2485 trb_comp_code == COMP_STOP_INVAL)) {
2486 ret = 0;
2487 goto cleanup;
2488 }
2489
2490 if (!event_seg) {
2491 if (!ep->skip ||
2492 !usb_endpoint_xfer_isoc(&td->urb->ep->desc)) {
2493 /* Some host controllers give a spurious
2494 * successful event after a short transfer.
2495 * Ignore it.
2496 */
2497 if ((xhci->quirks & XHCI_SPURIOUS_SUCCESS) &&
2498 ep_ring->last_td_was_short) {
2499 ep_ring->last_td_was_short = false;
2500 ret = 0;
2501 goto cleanup;
2502 }
2503 /* HC is busted, give up! */
2504 xhci_err(xhci,
2505 "ERROR Transfer event TRB DMA ptr not "
2506 "part of current TD ep_index %d "
2507 "comp_code %u\n", ep_index,
2508 trb_comp_code);
2509 trb_in_td(xhci, ep_ring->deq_seg,
2510 ep_ring->dequeue, td->last_trb,
2511 event_dma, true);
2512 return -ESHUTDOWN;
2513 }
2514
2515 ret = skip_isoc_td(xhci, td, event, ep, &status);
2516 goto cleanup;
2517 }
2518 if (trb_comp_code == COMP_SHORT_TX)
2519 ep_ring->last_td_was_short = true;
2520 else
2521 ep_ring->last_td_was_short = false;
2522
2523 if (ep->skip) {
2524 xhci_dbg(xhci, "Found td. Clear skip flag.\n");
2525 ep->skip = false;
2526 }
2527
2528 event_trb = &event_seg->trbs[(event_dma - event_seg->dma) /
2529 sizeof(*event_trb)];
2530 /*
2531 * No-op TRB should not trigger interrupts.
2532 * If event_trb is a no-op TRB, it means the
2533 * corresponding TD has been cancelled. Just ignore
2534 * the TD.
2535 */
2536 if (TRB_TYPE_NOOP_LE32(event_trb->generic.field[3])) {
2537 xhci_dbg(xhci,
2538 "event_trb is a no-op TRB. Skip it\n");
2539 goto cleanup;
2540 }
2541
2542 /* Now update the urb's actual_length and give back to
2543 * the core
2544 */
2545 if (usb_endpoint_xfer_control(&td->urb->ep->desc))
2546 ret = process_ctrl_td(xhci, td, event_trb, event, ep,
2547 &status);
2548 else if (usb_endpoint_xfer_isoc(&td->urb->ep->desc))
2549 ret = process_isoc_td(xhci, td, event_trb, event, ep,
2550 &status);
2551 else
2552 ret = process_bulk_intr_td(xhci, td, event_trb, event,
2553 ep, &status);
2554
2555cleanup:
2556
2557
2558 handling_skipped_tds = ep->skip &&
2559 trb_comp_code != COMP_MISSED_INT &&
2560 trb_comp_code != COMP_PING_ERR;
2561
2562 /*
2563 * Do not update event ring dequeue pointer if we're in a loop
2564 * processing missed tds.
2565 */
2566 if (!handling_skipped_tds)
2567 inc_deq(xhci, xhci->event_ring);
2568
2569 if (ret) {
2570 urb = td->urb;
2571 urb_priv = urb->hcpriv;
2572
2573 xhci_urb_free_priv(urb_priv);
2574
2575 usb_hcd_unlink_urb_from_ep(bus_to_hcd(urb->dev->bus), urb);
2576 if ((urb->actual_length != urb->transfer_buffer_length &&
2577 (urb->transfer_flags &
2578 URB_SHORT_NOT_OK)) ||
2579 (status != 0 &&
2580 !usb_endpoint_xfer_isoc(&urb->ep->desc)))
2581 xhci_dbg(xhci, "Giveback URB %p, len = %d, "
2582 "expected = %d, status = %d\n",
2583 urb, urb->actual_length,
2584 urb->transfer_buffer_length,
2585 status);
2586 spin_unlock(&xhci->lock);
2587 /* EHCI, UHCI, and OHCI always unconditionally set the
2588 * urb->status of an isochronous endpoint to 0.
2589 */
2590 if (usb_pipetype(urb->pipe) == PIPE_ISOCHRONOUS)
2591 status = 0;
2592 usb_hcd_giveback_urb(bus_to_hcd(urb->dev->bus), urb, status);
2593 spin_lock(&xhci->lock);
2594 }
2595
2596 /*
2597 * If ep->skip is set, it means there are missed tds on the
2598 * endpoint ring need to take care of.
2599 * Process them as short transfer until reach the td pointed by
2600 * the event.
2601 */
2602 } while (handling_skipped_tds);
2603
2604 return 0;
2605}
2606
2607/*
2608 * This function handles all OS-owned events on the event ring. It may drop
2609 * xhci->lock between event processing (e.g. to pass up port status changes).
2610 * Returns >0 for "possibly more events to process" (caller should call again),
2611 * otherwise 0 if done. In future, <0 returns should indicate error code.
2612 */
2613static int xhci_handle_event(struct xhci_hcd *xhci)
2614{
2615 union xhci_trb *event;
2616 int update_ptrs = 1;
2617 int ret;
2618
2619 if (!xhci->event_ring || !xhci->event_ring->dequeue) {
2620 xhci->error_bitmask |= 1 << 1;
2621 return 0;
2622 }
2623
2624 event = xhci->event_ring->dequeue;
2625 /* Does the HC or OS own the TRB? */
2626 if ((le32_to_cpu(event->event_cmd.flags) & TRB_CYCLE) !=
2627 xhci->event_ring->cycle_state) {
2628 xhci->error_bitmask |= 1 << 2;
2629 return 0;
2630 }
2631
2632 /*
2633 * Barrier between reading the TRB_CYCLE (valid) flag above and any
2634 * speculative reads of the event's flags/data below.
2635 */
2636 rmb();
2637 /* FIXME: Handle more event types. */
2638 switch ((le32_to_cpu(event->event_cmd.flags) & TRB_TYPE_BITMASK)) {
2639 case TRB_TYPE(TRB_COMPLETION):
2640 handle_cmd_completion(xhci, &event->event_cmd);
2641 break;
2642 case TRB_TYPE(TRB_PORT_STATUS):
2643 handle_port_status(xhci, event);
2644 update_ptrs = 0;
2645 break;
2646 case TRB_TYPE(TRB_TRANSFER):
2647 ret = handle_tx_event(xhci, &event->trans_event);
2648 if (ret < 0)
2649 xhci->error_bitmask |= 1 << 9;
2650 else
2651 update_ptrs = 0;
2652 break;
2653 case TRB_TYPE(TRB_DEV_NOTE):
2654 handle_device_notification(xhci, event);
2655 break;
2656 default:
2657 if ((le32_to_cpu(event->event_cmd.flags) & TRB_TYPE_BITMASK) >=
2658 TRB_TYPE(48))
2659 handle_vendor_event(xhci, event);
2660 else
2661 xhci->error_bitmask |= 1 << 3;
2662 }
2663 /* Any of the above functions may drop and re-acquire the lock, so check
2664 * to make sure a watchdog timer didn't mark the host as non-responsive.
2665 */
2666 if (xhci->xhc_state & XHCI_STATE_DYING) {
2667 xhci_dbg(xhci, "xHCI host dying, returning from "
2668 "event handler.\n");
2669 return 0;
2670 }
2671
2672 if (update_ptrs)
2673 /* Update SW event ring dequeue pointer */
2674 inc_deq(xhci, xhci->event_ring);
2675
2676 /* Are there more items on the event ring? Caller will call us again to
2677 * check.
2678 */
2679 return 1;
2680}
2681
2682/*
2683 * xHCI spec says we can get an interrupt, and if the HC has an error condition,
2684 * we might get bad data out of the event ring. Section 4.10.2.7 has a list of
2685 * indicators of an event TRB error, but we check the status *first* to be safe.
2686 */
2687irqreturn_t xhci_irq(struct usb_hcd *hcd)
2688{
2689 struct xhci_hcd *xhci = hcd_to_xhci(hcd);
2690 u32 status;
2691 u64 temp_64;
2692 union xhci_trb *event_ring_deq;
2693 dma_addr_t deq;
2694
2695 spin_lock(&xhci->lock);
2696 /* Check if the xHC generated the interrupt, or the irq is shared */
2697 status = readl(&xhci->op_regs->status);
2698 if (status == 0xffffffff)
2699 goto hw_died;
2700
2701 if (!(status & STS_EINT)) {
2702 spin_unlock(&xhci->lock);
2703 return IRQ_NONE;
2704 }
2705 if (status & STS_FATAL) {
2706 xhci_warn(xhci, "WARNING: Host System Error\n");
2707 xhci_halt(xhci);
2708hw_died:
2709 spin_unlock(&xhci->lock);
2710 return IRQ_HANDLED;
2711 }
2712
2713 /*
2714 * Clear the op reg interrupt status first,
2715 * so we can receive interrupts from other MSI-X interrupters.
2716 * Write 1 to clear the interrupt status.
2717 */
2718 status |= STS_EINT;
2719 writel(status, &xhci->op_regs->status);
2720 /* FIXME when MSI-X is supported and there are multiple vectors */
2721 /* Clear the MSI-X event interrupt status */
2722
2723 if (hcd->irq) {
2724 u32 irq_pending;
2725 /* Acknowledge the PCI interrupt */
2726 irq_pending = readl(&xhci->ir_set->irq_pending);
2727 irq_pending |= IMAN_IP;
2728 writel(irq_pending, &xhci->ir_set->irq_pending);
2729 }
2730
2731 if (xhci->xhc_state & XHCI_STATE_DYING) {
2732 xhci_dbg(xhci, "xHCI dying, ignoring interrupt. "
2733 "Shouldn't IRQs be disabled?\n");
2734 /* Clear the event handler busy flag (RW1C);
2735 * the event ring should be empty.
2736 */
2737 temp_64 = xhci_read_64(xhci, &xhci->ir_set->erst_dequeue);
2738 xhci_write_64(xhci, temp_64 | ERST_EHB,
2739 &xhci->ir_set->erst_dequeue);
2740 spin_unlock(&xhci->lock);
2741
2742 return IRQ_HANDLED;
2743 }
2744
2745 event_ring_deq = xhci->event_ring->dequeue;
2746 /* FIXME this should be a delayed service routine
2747 * that clears the EHB.
2748 */
2749 while (xhci_handle_event(xhci) > 0) {}
2750
2751 temp_64 = xhci_read_64(xhci, &xhci->ir_set->erst_dequeue);
2752 /* If necessary, update the HW's version of the event ring deq ptr. */
2753 if (event_ring_deq != xhci->event_ring->dequeue) {
2754 deq = xhci_trb_virt_to_dma(xhci->event_ring->deq_seg,
2755 xhci->event_ring->dequeue);
2756 if (deq == 0)
2757 xhci_warn(xhci, "WARN something wrong with SW event "
2758 "ring dequeue ptr.\n");
2759 /* Update HC event ring dequeue pointer */
2760 temp_64 &= ERST_PTR_MASK;
2761 temp_64 |= ((u64) deq & (u64) ~ERST_PTR_MASK);
2762 }
2763
2764 /* Clear the event handler busy flag (RW1C); event ring is empty. */
2765 temp_64 |= ERST_EHB;
2766 xhci_write_64(xhci, temp_64, &xhci->ir_set->erst_dequeue);
2767
2768 spin_unlock(&xhci->lock);
2769
2770 return IRQ_HANDLED;
2771}
2772
2773irqreturn_t xhci_msi_irq(int irq, void *hcd)
2774{
2775 return xhci_irq(hcd);
2776}
2777
2778/**** Endpoint Ring Operations ****/
2779
2780/*
2781 * Generic function for queueing a TRB on a ring.
2782 * The caller must have checked to make sure there's room on the ring.
2783 *
2784 * @more_trbs_coming: Will you enqueue more TRBs before calling
2785 * prepare_transfer()?
2786 */
2787static void queue_trb(struct xhci_hcd *xhci, struct xhci_ring *ring,
2788 bool more_trbs_coming,
2789 u32 field1, u32 field2, u32 field3, u32 field4)
2790{
2791 struct xhci_generic_trb *trb;
2792
2793 trb = &ring->enqueue->generic;
2794 trb->field[0] = cpu_to_le32(field1);
2795 trb->field[1] = cpu_to_le32(field2);
2796 trb->field[2] = cpu_to_le32(field3);
2797 trb->field[3] = cpu_to_le32(field4);
2798 inc_enq(xhci, ring, more_trbs_coming);
2799}
2800
2801/*
2802 * Does various checks on the endpoint ring, and makes it ready to queue num_trbs.
2803 * FIXME allocate segments if the ring is full.
2804 */
2805static int prepare_ring(struct xhci_hcd *xhci, struct xhci_ring *ep_ring,
2806 u32 ep_state, unsigned int num_trbs, gfp_t mem_flags)
2807{
2808 unsigned int num_trbs_needed;
2809
2810 /* Make sure the endpoint has been added to xHC schedule */
2811 switch (ep_state) {
2812 case EP_STATE_DISABLED:
2813 /*
2814 * USB core changed config/interfaces without notifying us,
2815 * or hardware is reporting the wrong state.
2816 */
2817 xhci_warn(xhci, "WARN urb submitted to disabled ep\n");
2818 return -ENOENT;
2819 case EP_STATE_ERROR:
2820 xhci_warn(xhci, "WARN waiting for error on ep to be cleared\n");
2821 /* FIXME event handling code for error needs to clear it */
2822 /* XXX not sure if this should be -ENOENT or not */
2823 return -EINVAL;
2824 case EP_STATE_HALTED:
2825 xhci_dbg(xhci, "WARN halted endpoint, queueing URB anyway.\n");
2826 case EP_STATE_STOPPED:
2827 case EP_STATE_RUNNING:
2828 break;
2829 default:
2830 xhci_err(xhci, "ERROR unknown endpoint state for ep\n");
2831 /*
2832 * FIXME issue Configure Endpoint command to try to get the HC
2833 * back into a known state.
2834 */
2835 return -EINVAL;
2836 }
2837
2838 while (1) {
2839 if (room_on_ring(xhci, ep_ring, num_trbs))
2840 break;
2841
2842 if (ep_ring == xhci->cmd_ring) {
2843 xhci_err(xhci, "Do not support expand command ring\n");
2844 return -ENOMEM;
2845 }
2846
2847 xhci_dbg_trace(xhci, trace_xhci_dbg_ring_expansion,
2848 "ERROR no room on ep ring, try ring expansion");
2849 num_trbs_needed = num_trbs - ep_ring->num_trbs_free;
2850 if (xhci_ring_expansion(xhci, ep_ring, num_trbs_needed,
2851 mem_flags)) {
2852 xhci_err(xhci, "Ring expansion failed\n");
2853 return -ENOMEM;
2854 }
2855 }
2856
2857 if (enqueue_is_link_trb(ep_ring)) {
2858 struct xhci_ring *ring = ep_ring;
2859 union xhci_trb *next;
2860
2861 next = ring->enqueue;
2862
2863 while (last_trb(xhci, ring, ring->enq_seg, next)) {
2864 /* If we're not dealing with 0.95 hardware or isoc rings
2865 * on AMD 0.96 host, clear the chain bit.
2866 */
2867 if (!xhci_link_trb_quirk(xhci) &&
2868 !(ring->type == TYPE_ISOC &&
2869 (xhci->quirks & XHCI_AMD_0x96_HOST)))
2870 next->link.control &= cpu_to_le32(~TRB_CHAIN);
2871 else
2872 next->link.control |= cpu_to_le32(TRB_CHAIN);
2873
2874 wmb();
2875 next->link.control ^= cpu_to_le32(TRB_CYCLE);
2876
2877 /* Toggle the cycle bit after the last ring segment. */
2878 if (last_trb_on_last_seg(xhci, ring, ring->enq_seg, next)) {
2879 ring->cycle_state ^= 1;
2880 }
2881 ring->enq_seg = ring->enq_seg->next;
2882 ring->enqueue = ring->enq_seg->trbs;
2883 next = ring->enqueue;
2884 }
2885 }
2886
2887 return 0;
2888}
2889
2890static int prepare_transfer(struct xhci_hcd *xhci,
2891 struct xhci_virt_device *xdev,
2892 unsigned int ep_index,
2893 unsigned int stream_id,
2894 unsigned int num_trbs,
2895 struct urb *urb,
2896 unsigned int td_index,
2897 gfp_t mem_flags)
2898{
2899 int ret;
2900 struct urb_priv *urb_priv;
2901 struct xhci_td *td;
2902 struct xhci_ring *ep_ring;
2903 struct xhci_ep_ctx *ep_ctx = xhci_get_ep_ctx(xhci, xdev->out_ctx, ep_index);
2904
2905 ep_ring = xhci_stream_id_to_ring(xdev, ep_index, stream_id);
2906 if (!ep_ring) {
2907 xhci_dbg(xhci, "Can't prepare ring for bad stream ID %u\n",
2908 stream_id);
2909 return -EINVAL;
2910 }
2911
2912 ret = prepare_ring(xhci, ep_ring,
2913 le32_to_cpu(ep_ctx->ep_info) & EP_STATE_MASK,
2914 num_trbs, mem_flags);
2915 if (ret)
2916 return ret;
2917
2918 urb_priv = urb->hcpriv;
2919 td = urb_priv->td[td_index];
2920
2921 INIT_LIST_HEAD(&td->td_list);
2922 INIT_LIST_HEAD(&td->cancelled_td_list);
2923
2924 if (td_index == 0) {
2925 ret = usb_hcd_link_urb_to_ep(bus_to_hcd(urb->dev->bus), urb);
2926 if (unlikely(ret))
2927 return ret;
2928 }
2929
2930 td->urb = urb;
2931 /* Add this TD to the tail of the endpoint ring's TD list */
2932 list_add_tail(&td->td_list, &ep_ring->td_list);
2933 td->start_seg = ep_ring->enq_seg;
2934 td->first_trb = ep_ring->enqueue;
2935
2936 urb_priv->td[td_index] = td;
2937
2938 return 0;
2939}
2940
2941static unsigned int count_sg_trbs_needed(struct xhci_hcd *xhci, struct urb *urb)
2942{
2943 int num_sgs, num_trbs, running_total, temp, i;
2944 struct scatterlist *sg;
2945
2946 sg = NULL;
2947 num_sgs = urb->num_mapped_sgs;
2948 temp = urb->transfer_buffer_length;
2949
2950 num_trbs = 0;
2951 for_each_sg(urb->sg, sg, num_sgs, i) {
2952 unsigned int len = sg_dma_len(sg);
2953
2954 /* Scatter gather list entries may cross 64KB boundaries */
2955 running_total = TRB_MAX_BUFF_SIZE -
2956 (sg_dma_address(sg) & (TRB_MAX_BUFF_SIZE - 1));
2957 running_total &= TRB_MAX_BUFF_SIZE - 1;
2958 if (running_total != 0)
2959 num_trbs++;
2960
2961 /* How many more 64KB chunks to transfer, how many more TRBs? */
2962 while (running_total < sg_dma_len(sg) && running_total < temp) {
2963 num_trbs++;
2964 running_total += TRB_MAX_BUFF_SIZE;
2965 }
2966 len = min_t(int, len, temp);
2967 temp -= len;
2968 if (temp == 0)
2969 break;
2970 }
2971 return num_trbs;
2972}
2973
2974static void check_trb_math(struct urb *urb, int num_trbs, int running_total)
2975{
2976 if (num_trbs != 0)
2977 dev_err(&urb->dev->dev, "%s - ep %#x - Miscalculated number of "
2978 "TRBs, %d left\n", __func__,
2979 urb->ep->desc.bEndpointAddress, num_trbs);
2980 if (running_total != urb->transfer_buffer_length)
2981 dev_err(&urb->dev->dev, "%s - ep %#x - Miscalculated tx length, "
2982 "queued %#x (%d), asked for %#x (%d)\n",
2983 __func__,
2984 urb->ep->desc.bEndpointAddress,
2985 running_total, running_total,
2986 urb->transfer_buffer_length,
2987 urb->transfer_buffer_length);
2988}
2989
2990static void giveback_first_trb(struct xhci_hcd *xhci, int slot_id,
2991 unsigned int ep_index, unsigned int stream_id, int start_cycle,
2992 struct xhci_generic_trb *start_trb)
2993{
2994 /*
2995 * Pass all the TRBs to the hardware at once and make sure this write
2996 * isn't reordered.
2997 */
2998 wmb();
2999 if (start_cycle)
3000 start_trb->field[3] |= cpu_to_le32(start_cycle);
3001 else
3002 start_trb->field[3] &= cpu_to_le32(~TRB_CYCLE);
3003 xhci_ring_ep_doorbell(xhci, slot_id, ep_index, stream_id);
3004}
3005
3006/*
3007 * xHCI uses normal TRBs for both bulk and interrupt. When the interrupt
3008 * endpoint is to be serviced, the xHC will consume (at most) one TD. A TD
3009 * (comprised of sg list entries) can take several service intervals to
3010 * transmit.
3011 */
3012int xhci_queue_intr_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
3013 struct urb *urb, int slot_id, unsigned int ep_index)
3014{
3015 struct xhci_ep_ctx *ep_ctx = xhci_get_ep_ctx(xhci,
3016 xhci->devs[slot_id]->out_ctx, ep_index);
3017 int xhci_interval;
3018 int ep_interval;
3019
3020 xhci_interval = EP_INTERVAL_TO_UFRAMES(le32_to_cpu(ep_ctx->ep_info));
3021 ep_interval = urb->interval;
3022 /* Convert to microframes */
3023 if (urb->dev->speed == USB_SPEED_LOW ||
3024 urb->dev->speed == USB_SPEED_FULL)
3025 ep_interval *= 8;
3026 /* FIXME change this to a warning and a suggestion to use the new API
3027 * to set the polling interval (once the API is added).
3028 */
3029 if (xhci_interval != ep_interval) {
3030 dev_dbg_ratelimited(&urb->dev->dev,
3031 "Driver uses different interval (%d microframe%s) than xHCI (%d microframe%s)\n",
3032 ep_interval, ep_interval == 1 ? "" : "s",
3033 xhci_interval, xhci_interval == 1 ? "" : "s");
3034 urb->interval = xhci_interval;
3035 /* Convert back to frames for LS/FS devices */
3036 if (urb->dev->speed == USB_SPEED_LOW ||
3037 urb->dev->speed == USB_SPEED_FULL)
3038 urb->interval /= 8;
3039 }
3040 return xhci_queue_bulk_tx(xhci, mem_flags, urb, slot_id, ep_index);
3041}
3042
3043/*
3044 * For xHCI 1.0 host controllers, TD size is the number of max packet sized
3045 * packets remaining in the TD (*not* including this TRB).
3046 *
3047 * Total TD packet count = total_packet_count =
3048 * DIV_ROUND_UP(TD size in bytes / wMaxPacketSize)
3049 *
3050 * Packets transferred up to and including this TRB = packets_transferred =
3051 * rounddown(total bytes transferred including this TRB / wMaxPacketSize)
3052 *
3053 * TD size = total_packet_count - packets_transferred
3054 *
3055 * For xHCI 0.96 and older, TD size field should be the remaining bytes
3056 * including this TRB, right shifted by 10
3057 *
3058 * For all hosts it must fit in bits 21:17, so it can't be bigger than 31.
3059 * This is taken care of in the TRB_TD_SIZE() macro
3060 *
3061 * The last TRB in a TD must have the TD size set to zero.
3062 */
3063static u32 xhci_td_remainder(struct xhci_hcd *xhci, int transferred,
3064 int trb_buff_len, unsigned int td_total_len,
3065 struct urb *urb, unsigned int num_trbs_left)
3066{
3067 u32 maxp, total_packet_count;
3068
3069 /* MTK xHCI is mostly 0.97 but contains some features from 1.0 */
3070 if (xhci->hci_version < 0x100 && !(xhci->quirks & XHCI_MTK_HOST))
3071 return ((td_total_len - transferred) >> 10);
3072
3073 /* One TRB with a zero-length data packet. */
3074 if (num_trbs_left == 0 || (transferred == 0 && trb_buff_len == 0) ||
3075 trb_buff_len == td_total_len)
3076 return 0;
3077
3078 /* for MTK xHCI, TD size doesn't include this TRB */
3079 if (xhci->quirks & XHCI_MTK_HOST)
3080 trb_buff_len = 0;
3081
3082 maxp = GET_MAX_PACKET(usb_endpoint_maxp(&urb->ep->desc));
3083 total_packet_count = DIV_ROUND_UP(td_total_len, maxp);
3084
3085 /* Queueing functions don't count the current TRB into transferred */
3086 return (total_packet_count - ((transferred + trb_buff_len) / maxp));
3087}
3088
3089
3090static int queue_bulk_sg_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
3091 struct urb *urb, int slot_id, unsigned int ep_index)
3092{
3093 struct xhci_ring *ep_ring;
3094 unsigned int num_trbs;
3095 struct urb_priv *urb_priv;
3096 struct xhci_td *td;
3097 struct scatterlist *sg;
3098 int num_sgs;
3099 int trb_buff_len, this_sg_len, running_total, ret;
3100 unsigned int total_packet_count;
3101 bool zero_length_needed;
3102 bool first_trb;
3103 int last_trb_num;
3104 u64 addr;
3105 bool more_trbs_coming;
3106
3107 struct xhci_generic_trb *start_trb;
3108 int start_cycle;
3109
3110 ep_ring = xhci_urb_to_transfer_ring(xhci, urb);
3111 if (!ep_ring)
3112 return -EINVAL;
3113
3114 num_trbs = count_sg_trbs_needed(xhci, urb);
3115 num_sgs = urb->num_mapped_sgs;
3116 total_packet_count = DIV_ROUND_UP(urb->transfer_buffer_length,
3117 usb_endpoint_maxp(&urb->ep->desc));
3118
3119 ret = prepare_transfer(xhci, xhci->devs[slot_id],
3120 ep_index, urb->stream_id,
3121 num_trbs, urb, 0, mem_flags);
3122 if (ret < 0)
3123 return ret;
3124
3125 urb_priv = urb->hcpriv;
3126
3127 /* Deal with URB_ZERO_PACKET - need one more td/trb */
3128 zero_length_needed = urb->transfer_flags & URB_ZERO_PACKET &&
3129 urb_priv->length == 2;
3130 if (zero_length_needed) {
3131 num_trbs++;
3132 xhci_dbg(xhci, "Creating zero length td.\n");
3133 ret = prepare_transfer(xhci, xhci->devs[slot_id],
3134 ep_index, urb->stream_id,
3135 1, urb, 1, mem_flags);
3136 if (ret < 0)
3137 return ret;
3138 }
3139
3140 td = urb_priv->td[0];
3141
3142 /*
3143 * Don't give the first TRB to the hardware (by toggling the cycle bit)
3144 * until we've finished creating all the other TRBs. The ring's cycle
3145 * state may change as we enqueue the other TRBs, so save it too.
3146 */
3147 start_trb = &ep_ring->enqueue->generic;
3148 start_cycle = ep_ring->cycle_state;
3149
3150 running_total = 0;
3151 /*
3152 * How much data is in the first TRB?
3153 *
3154 * There are three forces at work for TRB buffer pointers and lengths:
3155 * 1. We don't want to walk off the end of this sg-list entry buffer.
3156 * 2. The transfer length that the driver requested may be smaller than
3157 * the amount of memory allocated for this scatter-gather list.
3158 * 3. TRBs buffers can't cross 64KB boundaries.
3159 */
3160 sg = urb->sg;
3161 addr = (u64) sg_dma_address(sg);
3162 this_sg_len = sg_dma_len(sg);
3163 trb_buff_len = TRB_MAX_BUFF_SIZE - (addr & (TRB_MAX_BUFF_SIZE - 1));
3164 trb_buff_len = min_t(int, trb_buff_len, this_sg_len);
3165 if (trb_buff_len > urb->transfer_buffer_length)
3166 trb_buff_len = urb->transfer_buffer_length;
3167
3168 first_trb = true;
3169 last_trb_num = zero_length_needed ? 2 : 1;
3170 /* Queue the first TRB, even if it's zero-length */
3171 do {
3172 u32 field = 0;
3173 u32 length_field = 0;
3174 u32 remainder = 0;
3175
3176 /* Don't change the cycle bit of the first TRB until later */
3177 if (first_trb) {
3178 first_trb = false;
3179 if (start_cycle == 0)
3180 field |= 0x1;
3181 } else
3182 field |= ep_ring->cycle_state;
3183
3184 /* Chain all the TRBs together; clear the chain bit in the last
3185 * TRB to indicate it's the last TRB in the chain.
3186 */
3187 if (num_trbs > last_trb_num) {
3188 field |= TRB_CHAIN;
3189 } else if (num_trbs == last_trb_num) {
3190 td->last_trb = ep_ring->enqueue;
3191 field |= TRB_IOC;
3192 } else if (zero_length_needed && num_trbs == 1) {
3193 trb_buff_len = 0;
3194 urb_priv->td[1]->last_trb = ep_ring->enqueue;
3195 field |= TRB_IOC;
3196 }
3197
3198 /* Only set interrupt on short packet for IN endpoints */
3199 if (usb_urb_dir_in(urb))
3200 field |= TRB_ISP;
3201
3202 if (TRB_MAX_BUFF_SIZE -
3203 (addr & (TRB_MAX_BUFF_SIZE - 1)) < trb_buff_len) {
3204 xhci_warn(xhci, "WARN: sg dma xfer crosses 64KB boundaries!\n");
3205 xhci_dbg(xhci, "Next boundary at %#x, end dma = %#x\n",
3206 (unsigned int) (addr + TRB_MAX_BUFF_SIZE) & ~(TRB_MAX_BUFF_SIZE - 1),
3207 (unsigned int) addr + trb_buff_len);
3208 }
3209
3210 /* Set the TRB length, TD size, and interrupter fields. */
3211 remainder = xhci_td_remainder(xhci, running_total, trb_buff_len,
3212 urb->transfer_buffer_length,
3213 urb, num_trbs - 1);
3214
3215 length_field = TRB_LEN(trb_buff_len) |
3216 TRB_TD_SIZE(remainder) |
3217 TRB_INTR_TARGET(0);
3218
3219 if (num_trbs > 1)
3220 more_trbs_coming = true;
3221 else
3222 more_trbs_coming = false;
3223 queue_trb(xhci, ep_ring, more_trbs_coming,
3224 lower_32_bits(addr),
3225 upper_32_bits(addr),
3226 length_field,
3227 field | TRB_TYPE(TRB_NORMAL));
3228 --num_trbs;
3229 running_total += trb_buff_len;
3230
3231 /* Calculate length for next transfer --
3232 * Are we done queueing all the TRBs for this sg entry?
3233 */
3234 this_sg_len -= trb_buff_len;
3235 if (this_sg_len == 0) {
3236 --num_sgs;
3237 if (num_sgs == 0)
3238 break;
3239 sg = sg_next(sg);
3240 addr = (u64) sg_dma_address(sg);
3241 this_sg_len = sg_dma_len(sg);
3242 } else {
3243 addr += trb_buff_len;
3244 }
3245
3246 trb_buff_len = TRB_MAX_BUFF_SIZE -
3247 (addr & (TRB_MAX_BUFF_SIZE - 1));
3248 trb_buff_len = min_t(int, trb_buff_len, this_sg_len);
3249 if (running_total + trb_buff_len > urb->transfer_buffer_length)
3250 trb_buff_len =
3251 urb->transfer_buffer_length - running_total;
3252 } while (num_trbs > 0);
3253
3254 check_trb_math(urb, num_trbs, running_total);
3255 giveback_first_trb(xhci, slot_id, ep_index, urb->stream_id,
3256 start_cycle, start_trb);
3257 return 0;
3258}
3259
3260/* This is very similar to what ehci-q.c qtd_fill() does */
3261int xhci_queue_bulk_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
3262 struct urb *urb, int slot_id, unsigned int ep_index)
3263{
3264 struct xhci_ring *ep_ring;
3265 struct urb_priv *urb_priv;
3266 struct xhci_td *td;
3267 int num_trbs;
3268 struct xhci_generic_trb *start_trb;
3269 bool first_trb;
3270 int last_trb_num;
3271 bool more_trbs_coming;
3272 bool zero_length_needed;
3273 int start_cycle;
3274 u32 field, length_field;
3275
3276 int running_total, trb_buff_len, ret;
3277 unsigned int total_packet_count;
3278 u64 addr;
3279
3280 if (urb->num_sgs)
3281 return queue_bulk_sg_tx(xhci, mem_flags, urb, slot_id, ep_index);
3282
3283 ep_ring = xhci_urb_to_transfer_ring(xhci, urb);
3284 if (!ep_ring)
3285 return -EINVAL;
3286
3287 num_trbs = 0;
3288 /* How much data is (potentially) left before the 64KB boundary? */
3289 running_total = TRB_MAX_BUFF_SIZE -
3290 (urb->transfer_dma & (TRB_MAX_BUFF_SIZE - 1));
3291 running_total &= TRB_MAX_BUFF_SIZE - 1;
3292
3293 /* If there's some data on this 64KB chunk, or we have to send a
3294 * zero-length transfer, we need at least one TRB
3295 */
3296 if (running_total != 0 || urb->transfer_buffer_length == 0)
3297 num_trbs++;
3298 /* How many more 64KB chunks to transfer, how many more TRBs? */
3299 while (running_total < urb->transfer_buffer_length) {
3300 num_trbs++;
3301 running_total += TRB_MAX_BUFF_SIZE;
3302 }
3303
3304 ret = prepare_transfer(xhci, xhci->devs[slot_id],
3305 ep_index, urb->stream_id,
3306 num_trbs, urb, 0, mem_flags);
3307 if (ret < 0)
3308 return ret;
3309
3310 urb_priv = urb->hcpriv;
3311
3312 /* Deal with URB_ZERO_PACKET - need one more td/trb */
3313 zero_length_needed = urb->transfer_flags & URB_ZERO_PACKET &&
3314 urb_priv->length == 2;
3315 if (zero_length_needed) {
3316 num_trbs++;
3317 xhci_dbg(xhci, "Creating zero length td.\n");
3318 ret = prepare_transfer(xhci, xhci->devs[slot_id],
3319 ep_index, urb->stream_id,
3320 1, urb, 1, mem_flags);
3321 if (ret < 0)
3322 return ret;
3323 }
3324
3325 td = urb_priv->td[0];
3326
3327 /*
3328 * Don't give the first TRB to the hardware (by toggling the cycle bit)
3329 * until we've finished creating all the other TRBs. The ring's cycle
3330 * state may change as we enqueue the other TRBs, so save it too.
3331 */
3332 start_trb = &ep_ring->enqueue->generic;
3333 start_cycle = ep_ring->cycle_state;
3334
3335 running_total = 0;
3336 total_packet_count = DIV_ROUND_UP(urb->transfer_buffer_length,
3337 usb_endpoint_maxp(&urb->ep->desc));
3338 /* How much data is in the first TRB? */
3339 addr = (u64) urb->transfer_dma;
3340 trb_buff_len = TRB_MAX_BUFF_SIZE -
3341 (urb->transfer_dma & (TRB_MAX_BUFF_SIZE - 1));
3342 if (trb_buff_len > urb->transfer_buffer_length)
3343 trb_buff_len = urb->transfer_buffer_length;
3344
3345 first_trb = true;
3346 last_trb_num = zero_length_needed ? 2 : 1;
3347 /* Queue the first TRB, even if it's zero-length */
3348 do {
3349 u32 remainder = 0;
3350 field = 0;
3351
3352 /* Don't change the cycle bit of the first TRB until later */
3353 if (first_trb) {
3354 first_trb = false;
3355 if (start_cycle == 0)
3356 field |= 0x1;
3357 } else
3358 field |= ep_ring->cycle_state;
3359
3360 /* Chain all the TRBs together; clear the chain bit in the last
3361 * TRB to indicate it's the last TRB in the chain.
3362 */
3363 if (num_trbs > last_trb_num) {
3364 field |= TRB_CHAIN;
3365 } else if (num_trbs == last_trb_num) {
3366 td->last_trb = ep_ring->enqueue;
3367 field |= TRB_IOC;
3368 } else if (zero_length_needed && num_trbs == 1) {
3369 trb_buff_len = 0;
3370 urb_priv->td[1]->last_trb = ep_ring->enqueue;
3371 field |= TRB_IOC;
3372 }
3373
3374 /* Only set interrupt on short packet for IN endpoints */
3375 if (usb_urb_dir_in(urb))
3376 field |= TRB_ISP;
3377
3378 /* Set the TRB length, TD size, and interrupter fields. */
3379 remainder = xhci_td_remainder(xhci, running_total, trb_buff_len,
3380 urb->transfer_buffer_length,
3381 urb, num_trbs - 1);
3382
3383 length_field = TRB_LEN(trb_buff_len) |
3384 TRB_TD_SIZE(remainder) |
3385 TRB_INTR_TARGET(0);
3386
3387 if (num_trbs > 1)
3388 more_trbs_coming = true;
3389 else
3390 more_trbs_coming = false;
3391 queue_trb(xhci, ep_ring, more_trbs_coming,
3392 lower_32_bits(addr),
3393 upper_32_bits(addr),
3394 length_field,
3395 field | TRB_TYPE(TRB_NORMAL));
3396 --num_trbs;
3397 running_total += trb_buff_len;
3398
3399 /* Calculate length for next transfer */
3400 addr += trb_buff_len;
3401 trb_buff_len = urb->transfer_buffer_length - running_total;
3402 if (trb_buff_len > TRB_MAX_BUFF_SIZE)
3403 trb_buff_len = TRB_MAX_BUFF_SIZE;
3404 } while (num_trbs > 0);
3405
3406 check_trb_math(urb, num_trbs, running_total);
3407 giveback_first_trb(xhci, slot_id, ep_index, urb->stream_id,
3408 start_cycle, start_trb);
3409 return 0;
3410}
3411
3412/* Caller must have locked xhci->lock */
3413int xhci_queue_ctrl_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
3414 struct urb *urb, int slot_id, unsigned int ep_index)
3415{
3416 struct xhci_ring *ep_ring;
3417 int num_trbs;
3418 int ret;
3419 struct usb_ctrlrequest *setup;
3420 struct xhci_generic_trb *start_trb;
3421 int start_cycle;
3422 u32 field, length_field, remainder;
3423 struct urb_priv *urb_priv;
3424 struct xhci_td *td;
3425
3426 ep_ring = xhci_urb_to_transfer_ring(xhci, urb);
3427 if (!ep_ring)
3428 return -EINVAL;
3429
3430 /*
3431 * Need to copy setup packet into setup TRB, so we can't use the setup
3432 * DMA address.
3433 */
3434 if (!urb->setup_packet)
3435 return -EINVAL;
3436
3437 /* 1 TRB for setup, 1 for status */
3438 num_trbs = 2;
3439 /*
3440 * Don't need to check if we need additional event data and normal TRBs,
3441 * since data in control transfers will never get bigger than 16MB
3442 * XXX: can we get a buffer that crosses 64KB boundaries?
3443 */
3444 if (urb->transfer_buffer_length > 0)
3445 num_trbs++;
3446 ret = prepare_transfer(xhci, xhci->devs[slot_id],
3447 ep_index, urb->stream_id,
3448 num_trbs, urb, 0, mem_flags);
3449 if (ret < 0)
3450 return ret;
3451
3452 urb_priv = urb->hcpriv;
3453 td = urb_priv->td[0];
3454
3455 /*
3456 * Don't give the first TRB to the hardware (by toggling the cycle bit)
3457 * until we've finished creating all the other TRBs. The ring's cycle
3458 * state may change as we enqueue the other TRBs, so save it too.
3459 */
3460 start_trb = &ep_ring->enqueue->generic;
3461 start_cycle = ep_ring->cycle_state;
3462
3463 /* Queue setup TRB - see section 6.4.1.2.1 */
3464 /* FIXME better way to translate setup_packet into two u32 fields? */
3465 setup = (struct usb_ctrlrequest *) urb->setup_packet;
3466 field = 0;
3467 field |= TRB_IDT | TRB_TYPE(TRB_SETUP);
3468 if (start_cycle == 0)
3469 field |= 0x1;
3470
3471 /* xHCI 1.0/1.1 6.4.1.2.1: Transfer Type field */
3472 if ((xhci->hci_version >= 0x100) || (xhci->quirks & XHCI_MTK_HOST)) {
3473 if (urb->transfer_buffer_length > 0) {
3474 if (setup->bRequestType & USB_DIR_IN)
3475 field |= TRB_TX_TYPE(TRB_DATA_IN);
3476 else
3477 field |= TRB_TX_TYPE(TRB_DATA_OUT);
3478 }
3479 }
3480
3481 queue_trb(xhci, ep_ring, true,
3482 setup->bRequestType | setup->bRequest << 8 | le16_to_cpu(setup->wValue) << 16,
3483 le16_to_cpu(setup->wIndex) | le16_to_cpu(setup->wLength) << 16,
3484 TRB_LEN(8) | TRB_INTR_TARGET(0),
3485 /* Immediate data in pointer */
3486 field);
3487
3488 /* If there's data, queue data TRBs */
3489 /* Only set interrupt on short packet for IN endpoints */
3490 if (usb_urb_dir_in(urb))
3491 field = TRB_ISP | TRB_TYPE(TRB_DATA);
3492 else
3493 field = TRB_TYPE(TRB_DATA);
3494
3495 remainder = xhci_td_remainder(xhci, 0,
3496 urb->transfer_buffer_length,
3497 urb->transfer_buffer_length,
3498 urb, 1);
3499
3500 length_field = TRB_LEN(urb->transfer_buffer_length) |
3501 TRB_TD_SIZE(remainder) |
3502 TRB_INTR_TARGET(0);
3503
3504 if (urb->transfer_buffer_length > 0) {
3505 if (setup->bRequestType & USB_DIR_IN)
3506 field |= TRB_DIR_IN;
3507 queue_trb(xhci, ep_ring, true,
3508 lower_32_bits(urb->transfer_dma),
3509 upper_32_bits(urb->transfer_dma),
3510 length_field,
3511 field | ep_ring->cycle_state);
3512 }
3513
3514 /* Save the DMA address of the last TRB in the TD */
3515 td->last_trb = ep_ring->enqueue;
3516
3517 /* Queue status TRB - see Table 7 and sections 4.11.2.2 and 6.4.1.2.3 */
3518 /* If the device sent data, the status stage is an OUT transfer */
3519 if (urb->transfer_buffer_length > 0 && setup->bRequestType & USB_DIR_IN)
3520 field = 0;
3521 else
3522 field = TRB_DIR_IN;
3523 queue_trb(xhci, ep_ring, false,
3524 0,
3525 0,
3526 TRB_INTR_TARGET(0),
3527 /* Event on completion */
3528 field | TRB_IOC | TRB_TYPE(TRB_STATUS) | ep_ring->cycle_state);
3529
3530 giveback_first_trb(xhci, slot_id, ep_index, 0,
3531 start_cycle, start_trb);
3532 return 0;
3533}
3534
3535static int count_isoc_trbs_needed(struct xhci_hcd *xhci,
3536 struct urb *urb, int i)
3537{
3538 int num_trbs = 0;
3539 u64 addr, td_len;
3540
3541 addr = (u64) (urb->transfer_dma + urb->iso_frame_desc[i].offset);
3542 td_len = urb->iso_frame_desc[i].length;
3543
3544 num_trbs = DIV_ROUND_UP(td_len + (addr & (TRB_MAX_BUFF_SIZE - 1)),
3545 TRB_MAX_BUFF_SIZE);
3546 if (num_trbs == 0)
3547 num_trbs++;
3548
3549 return num_trbs;
3550}
3551
3552/*
3553 * The transfer burst count field of the isochronous TRB defines the number of
3554 * bursts that are required to move all packets in this TD. Only SuperSpeed
3555 * devices can burst up to bMaxBurst number of packets per service interval.
3556 * This field is zero based, meaning a value of zero in the field means one
3557 * burst. Basically, for everything but SuperSpeed devices, this field will be
3558 * zero. Only xHCI 1.0 host controllers support this field.
3559 */
3560static unsigned int xhci_get_burst_count(struct xhci_hcd *xhci,
3561 struct urb *urb, unsigned int total_packet_count)
3562{
3563 unsigned int max_burst;
3564
3565 if (xhci->hci_version < 0x100 || urb->dev->speed < USB_SPEED_SUPER)
3566 return 0;
3567
3568 max_burst = urb->ep->ss_ep_comp.bMaxBurst;
3569 return DIV_ROUND_UP(total_packet_count, max_burst + 1) - 1;
3570}
3571
3572/*
3573 * Returns the number of packets in the last "burst" of packets. This field is
3574 * valid for all speeds of devices. USB 2.0 devices can only do one "burst", so
3575 * the last burst packet count is equal to the total number of packets in the
3576 * TD. SuperSpeed endpoints can have up to 3 bursts. All but the last burst
3577 * must contain (bMaxBurst + 1) number of packets, but the last burst can
3578 * contain 1 to (bMaxBurst + 1) packets.
3579 */
3580static unsigned int xhci_get_last_burst_packet_count(struct xhci_hcd *xhci,
3581 struct urb *urb, unsigned int total_packet_count)
3582{
3583 unsigned int max_burst;
3584 unsigned int residue;
3585
3586 if (xhci->hci_version < 0x100)
3587 return 0;
3588
3589 if (urb->dev->speed >= USB_SPEED_SUPER) {
3590 /* bMaxBurst is zero based: 0 means 1 packet per burst */
3591 max_burst = urb->ep->ss_ep_comp.bMaxBurst;
3592 residue = total_packet_count % (max_burst + 1);
3593 /* If residue is zero, the last burst contains (max_burst + 1)
3594 * number of packets, but the TLBPC field is zero-based.
3595 */
3596 if (residue == 0)
3597 return max_burst;
3598 return residue - 1;
3599 }
3600 if (total_packet_count == 0)
3601 return 0;
3602 return total_packet_count - 1;
3603}
3604
3605/*
3606 * Calculates Frame ID field of the isochronous TRB identifies the
3607 * target frame that the Interval associated with this Isochronous
3608 * Transfer Descriptor will start on. Refer to 4.11.2.5 in 1.1 spec.
3609 *
3610 * Returns actual frame id on success, negative value on error.
3611 */
3612static int xhci_get_isoc_frame_id(struct xhci_hcd *xhci,
3613 struct urb *urb, int index)
3614{
3615 int start_frame, ist, ret = 0;
3616 int start_frame_id, end_frame_id, current_frame_id;
3617
3618 if (urb->dev->speed == USB_SPEED_LOW ||
3619 urb->dev->speed == USB_SPEED_FULL)
3620 start_frame = urb->start_frame + index * urb->interval;
3621 else
3622 start_frame = (urb->start_frame + index * urb->interval) >> 3;
3623
3624 /* Isochronous Scheduling Threshold (IST, bits 0~3 in HCSPARAMS2):
3625 *
3626 * If bit [3] of IST is cleared to '0', software can add a TRB no
3627 * later than IST[2:0] Microframes before that TRB is scheduled to
3628 * be executed.
3629 * If bit [3] of IST is set to '1', software can add a TRB no later
3630 * than IST[2:0] Frames before that TRB is scheduled to be executed.
3631 */
3632 ist = HCS_IST(xhci->hcs_params2) & 0x7;
3633 if (HCS_IST(xhci->hcs_params2) & (1 << 3))
3634 ist <<= 3;
3635
3636 /* Software shall not schedule an Isoch TD with a Frame ID value that
3637 * is less than the Start Frame ID or greater than the End Frame ID,
3638 * where:
3639 *
3640 * End Frame ID = (Current MFINDEX register value + 895 ms.) MOD 2048
3641 * Start Frame ID = (Current MFINDEX register value + IST + 1) MOD 2048
3642 *
3643 * Both the End Frame ID and Start Frame ID values are calculated
3644 * in microframes. When software determines the valid Frame ID value;
3645 * The End Frame ID value should be rounded down to the nearest Frame
3646 * boundary, and the Start Frame ID value should be rounded up to the
3647 * nearest Frame boundary.
3648 */
3649 current_frame_id = readl(&xhci->run_regs->microframe_index);
3650 start_frame_id = roundup(current_frame_id + ist + 1, 8);
3651 end_frame_id = rounddown(current_frame_id + 895 * 8, 8);
3652
3653 start_frame &= 0x7ff;
3654 start_frame_id = (start_frame_id >> 3) & 0x7ff;
3655 end_frame_id = (end_frame_id >> 3) & 0x7ff;
3656
3657 xhci_dbg(xhci, "%s: index %d, reg 0x%x start_frame_id 0x%x, end_frame_id 0x%x, start_frame 0x%x\n",
3658 __func__, index, readl(&xhci->run_regs->microframe_index),
3659 start_frame_id, end_frame_id, start_frame);
3660
3661 if (start_frame_id < end_frame_id) {
3662 if (start_frame > end_frame_id ||
3663 start_frame < start_frame_id)
3664 ret = -EINVAL;
3665 } else if (start_frame_id > end_frame_id) {
3666 if ((start_frame > end_frame_id &&
3667 start_frame < start_frame_id))
3668 ret = -EINVAL;
3669 } else {
3670 ret = -EINVAL;
3671 }
3672
3673 if (index == 0) {
3674 if (ret == -EINVAL || start_frame == start_frame_id) {
3675 start_frame = start_frame_id + 1;
3676 if (urb->dev->speed == USB_SPEED_LOW ||
3677 urb->dev->speed == USB_SPEED_FULL)
3678 urb->start_frame = start_frame;
3679 else
3680 urb->start_frame = start_frame << 3;
3681 ret = 0;
3682 }
3683 }
3684
3685 if (ret) {
3686 xhci_warn(xhci, "Frame ID %d (reg %d, index %d) beyond range (%d, %d)\n",
3687 start_frame, current_frame_id, index,
3688 start_frame_id, end_frame_id);
3689 xhci_warn(xhci, "Ignore frame ID field, use SIA bit instead\n");
3690 return ret;
3691 }
3692
3693 return start_frame;
3694}
3695
3696/* This is for isoc transfer */
3697static int xhci_queue_isoc_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
3698 struct urb *urb, int slot_id, unsigned int ep_index)
3699{
3700 struct xhci_ring *ep_ring;
3701 struct urb_priv *urb_priv;
3702 struct xhci_td *td;
3703 int num_tds, trbs_per_td;
3704 struct xhci_generic_trb *start_trb;
3705 bool first_trb;
3706 int start_cycle;
3707 u32 field, length_field;
3708 int running_total, trb_buff_len, td_len, td_remain_len, ret;
3709 u64 start_addr, addr;
3710 int i, j;
3711 bool more_trbs_coming;
3712 struct xhci_virt_ep *xep;
3713 int frame_id;
3714
3715 xep = &xhci->devs[slot_id]->eps[ep_index];
3716 ep_ring = xhci->devs[slot_id]->eps[ep_index].ring;
3717
3718 num_tds = urb->number_of_packets;
3719 if (num_tds < 1) {
3720 xhci_dbg(xhci, "Isoc URB with zero packets?\n");
3721 return -EINVAL;
3722 }
3723 start_addr = (u64) urb->transfer_dma;
3724 start_trb = &ep_ring->enqueue->generic;
3725 start_cycle = ep_ring->cycle_state;
3726
3727 urb_priv = urb->hcpriv;
3728 /* Queue the TRBs for each TD, even if they are zero-length */
3729 for (i = 0; i < num_tds; i++) {
3730 unsigned int total_pkt_count, max_pkt;
3731 unsigned int burst_count, last_burst_pkt_count;
3732 u32 sia_frame_id;
3733
3734 first_trb = true;
3735 running_total = 0;
3736 addr = start_addr + urb->iso_frame_desc[i].offset;
3737 td_len = urb->iso_frame_desc[i].length;
3738 td_remain_len = td_len;
3739 max_pkt = GET_MAX_PACKET(usb_endpoint_maxp(&urb->ep->desc));
3740 total_pkt_count = DIV_ROUND_UP(td_len, max_pkt);
3741
3742 /* A zero-length transfer still involves at least one packet. */
3743 if (total_pkt_count == 0)
3744 total_pkt_count++;
3745 burst_count = xhci_get_burst_count(xhci, urb, total_pkt_count);
3746 last_burst_pkt_count = xhci_get_last_burst_packet_count(xhci,
3747 urb, total_pkt_count);
3748
3749 trbs_per_td = count_isoc_trbs_needed(xhci, urb, i);
3750
3751 ret = prepare_transfer(xhci, xhci->devs[slot_id], ep_index,
3752 urb->stream_id, trbs_per_td, urb, i, mem_flags);
3753 if (ret < 0) {
3754 if (i == 0)
3755 return ret;
3756 goto cleanup;
3757 }
3758 td = urb_priv->td[i];
3759
3760 /* use SIA as default, if frame id is used overwrite it */
3761 sia_frame_id = TRB_SIA;
3762 if (!(urb->transfer_flags & URB_ISO_ASAP) &&
3763 HCC_CFC(xhci->hcc_params)) {
3764 frame_id = xhci_get_isoc_frame_id(xhci, urb, i);
3765 if (frame_id >= 0)
3766 sia_frame_id = TRB_FRAME_ID(frame_id);
3767 }
3768 /*
3769 * Set isoc specific data for the first TRB in a TD.
3770 * Prevent HW from getting the TRBs by keeping the cycle state
3771 * inverted in the first TDs isoc TRB.
3772 */
3773 field = TRB_TYPE(TRB_ISOC) |
3774 TRB_TLBPC(last_burst_pkt_count) |
3775 sia_frame_id |
3776 (i ? ep_ring->cycle_state : !start_cycle);
3777
3778 /* xhci 1.1 with ETE uses TD_Size field for TBC, old is Rsvdz */
3779 if (!xep->use_extended_tbc)
3780 field |= TRB_TBC(burst_count);
3781
3782 /* fill the rest of the TRB fields, and remaining normal TRBs */
3783 for (j = 0; j < trbs_per_td; j++) {
3784 u32 remainder = 0;
3785
3786 /* only first TRB is isoc, overwrite otherwise */
3787 if (!first_trb)
3788 field = TRB_TYPE(TRB_NORMAL) |
3789 ep_ring->cycle_state;
3790
3791 /* Only set interrupt on short packet for IN EPs */
3792 if (usb_urb_dir_in(urb))
3793 field |= TRB_ISP;
3794
3795 /* Set the chain bit for all except the last TRB */
3796 if (j < trbs_per_td - 1) {
3797 more_trbs_coming = true;
3798 field |= TRB_CHAIN;
3799 } else {
3800 more_trbs_coming = false;
3801 td->last_trb = ep_ring->enqueue;
3802 field |= TRB_IOC;
3803 /* set BEI, except for the last TD */
3804 if (xhci->hci_version >= 0x100 &&
3805 !(xhci->quirks & XHCI_AVOID_BEI) &&
3806 i < num_tds - 1)
3807 field |= TRB_BEI;
3808 }
3809 /* Calculate TRB length */
3810 trb_buff_len = TRB_MAX_BUFF_SIZE -
3811 (addr & ((1 << TRB_MAX_BUFF_SHIFT) - 1));
3812 if (trb_buff_len > td_remain_len)
3813 trb_buff_len = td_remain_len;
3814
3815 /* Set the TRB length, TD size, & interrupter fields. */
3816 remainder = xhci_td_remainder(xhci, running_total,
3817 trb_buff_len, td_len,
3818 urb, trbs_per_td - j - 1);
3819
3820 length_field = TRB_LEN(trb_buff_len) |
3821 TRB_INTR_TARGET(0);
3822
3823 /* xhci 1.1 with ETE uses TD Size field for TBC */
3824 if (first_trb && xep->use_extended_tbc)
3825 length_field |= TRB_TD_SIZE_TBC(burst_count);
3826 else
3827 length_field |= TRB_TD_SIZE(remainder);
3828 first_trb = false;
3829
3830 queue_trb(xhci, ep_ring, more_trbs_coming,
3831 lower_32_bits(addr),
3832 upper_32_bits(addr),
3833 length_field,
3834 field);
3835 running_total += trb_buff_len;
3836
3837 addr += trb_buff_len;
3838 td_remain_len -= trb_buff_len;
3839 }
3840
3841 /* Check TD length */
3842 if (running_total != td_len) {
3843 xhci_err(xhci, "ISOC TD length unmatch\n");
3844 ret = -EINVAL;
3845 goto cleanup;
3846 }
3847 }
3848
3849 /* store the next frame id */
3850 if (HCC_CFC(xhci->hcc_params))
3851 xep->next_frame_id = urb->start_frame + num_tds * urb->interval;
3852
3853 if (xhci_to_hcd(xhci)->self.bandwidth_isoc_reqs == 0) {
3854 if (xhci->quirks & XHCI_AMD_PLL_FIX)
3855 usb_amd_quirk_pll_disable();
3856 }
3857 xhci_to_hcd(xhci)->self.bandwidth_isoc_reqs++;
3858
3859 giveback_first_trb(xhci, slot_id, ep_index, urb->stream_id,
3860 start_cycle, start_trb);
3861 return 0;
3862cleanup:
3863 /* Clean up a partially enqueued isoc transfer. */
3864
3865 for (i--; i >= 0; i--)
3866 list_del_init(&urb_priv->td[i]->td_list);
3867
3868 /* Use the first TD as a temporary variable to turn the TDs we've queued
3869 * into No-ops with a software-owned cycle bit. That way the hardware
3870 * won't accidentally start executing bogus TDs when we partially
3871 * overwrite them. td->first_trb and td->start_seg are already set.
3872 */
3873 urb_priv->td[0]->last_trb = ep_ring->enqueue;
3874 /* Every TRB except the first & last will have its cycle bit flipped. */
3875 td_to_noop(xhci, ep_ring, urb_priv->td[0], true);
3876
3877 /* Reset the ring enqueue back to the first TRB and its cycle bit. */
3878 ep_ring->enqueue = urb_priv->td[0]->first_trb;
3879 ep_ring->enq_seg = urb_priv->td[0]->start_seg;
3880 ep_ring->cycle_state = start_cycle;
3881 ep_ring->num_trbs_free = ep_ring->num_trbs_free_temp;
3882 usb_hcd_unlink_urb_from_ep(bus_to_hcd(urb->dev->bus), urb);
3883 return ret;
3884}
3885
3886/*
3887 * Check transfer ring to guarantee there is enough room for the urb.
3888 * Update ISO URB start_frame and interval.
3889 * Update interval as xhci_queue_intr_tx does. Use xhci frame_index to
3890 * update urb->start_frame if URB_ISO_ASAP is set in transfer_flags or
3891 * Contiguous Frame ID is not supported by HC.
3892 */
3893int xhci_queue_isoc_tx_prepare(struct xhci_hcd *xhci, gfp_t mem_flags,
3894 struct urb *urb, int slot_id, unsigned int ep_index)
3895{
3896 struct xhci_virt_device *xdev;
3897 struct xhci_ring *ep_ring;
3898 struct xhci_ep_ctx *ep_ctx;
3899 int start_frame;
3900 int xhci_interval;
3901 int ep_interval;
3902 int num_tds, num_trbs, i;
3903 int ret;
3904 struct xhci_virt_ep *xep;
3905 int ist;
3906
3907 xdev = xhci->devs[slot_id];
3908 xep = &xhci->devs[slot_id]->eps[ep_index];
3909 ep_ring = xdev->eps[ep_index].ring;
3910 ep_ctx = xhci_get_ep_ctx(xhci, xdev->out_ctx, ep_index);
3911
3912 num_trbs = 0;
3913 num_tds = urb->number_of_packets;
3914 for (i = 0; i < num_tds; i++)
3915 num_trbs += count_isoc_trbs_needed(xhci, urb, i);
3916
3917 /* Check the ring to guarantee there is enough room for the whole urb.
3918 * Do not insert any td of the urb to the ring if the check failed.
3919 */
3920 ret = prepare_ring(xhci, ep_ring, le32_to_cpu(ep_ctx->ep_info) & EP_STATE_MASK,
3921 num_trbs, mem_flags);
3922 if (ret)
3923 return ret;
3924
3925 /*
3926 * Check interval value. This should be done before we start to
3927 * calculate the start frame value.
3928 */
3929 xhci_interval = EP_INTERVAL_TO_UFRAMES(le32_to_cpu(ep_ctx->ep_info));
3930 ep_interval = urb->interval;
3931 /* Convert to microframes */
3932 if (urb->dev->speed == USB_SPEED_LOW ||
3933 urb->dev->speed == USB_SPEED_FULL)
3934 ep_interval *= 8;
3935 /* FIXME change this to a warning and a suggestion to use the new API
3936 * to set the polling interval (once the API is added).
3937 */
3938 if (xhci_interval != ep_interval) {
3939 dev_dbg_ratelimited(&urb->dev->dev,
3940 "Driver uses different interval (%d microframe%s) than xHCI (%d microframe%s)\n",
3941 ep_interval, ep_interval == 1 ? "" : "s",
3942 xhci_interval, xhci_interval == 1 ? "" : "s");
3943 urb->interval = xhci_interval;
3944 /* Convert back to frames for LS/FS devices */
3945 if (urb->dev->speed == USB_SPEED_LOW ||
3946 urb->dev->speed == USB_SPEED_FULL)
3947 urb->interval /= 8;
3948 }
3949
3950 /* Calculate the start frame and put it in urb->start_frame. */
3951 if (HCC_CFC(xhci->hcc_params) && !list_empty(&ep_ring->td_list)) {
3952 if ((le32_to_cpu(ep_ctx->ep_info) & EP_STATE_MASK) ==
3953 EP_STATE_RUNNING) {
3954 urb->start_frame = xep->next_frame_id;
3955 goto skip_start_over;
3956 }
3957 }
3958
3959 start_frame = readl(&xhci->run_regs->microframe_index);
3960 start_frame &= 0x3fff;
3961 /*
3962 * Round up to the next frame and consider the time before trb really
3963 * gets scheduled by hardare.
3964 */
3965 ist = HCS_IST(xhci->hcs_params2) & 0x7;
3966 if (HCS_IST(xhci->hcs_params2) & (1 << 3))
3967 ist <<= 3;
3968 start_frame += ist + XHCI_CFC_DELAY;
3969 start_frame = roundup(start_frame, 8);
3970
3971 /*
3972 * Round up to the next ESIT (Endpoint Service Interval Time) if ESIT
3973 * is greate than 8 microframes.
3974 */
3975 if (urb->dev->speed == USB_SPEED_LOW ||
3976 urb->dev->speed == USB_SPEED_FULL) {
3977 start_frame = roundup(start_frame, urb->interval << 3);
3978 urb->start_frame = start_frame >> 3;
3979 } else {
3980 start_frame = roundup(start_frame, urb->interval);
3981 urb->start_frame = start_frame;
3982 }
3983
3984skip_start_over:
3985 ep_ring->num_trbs_free_temp = ep_ring->num_trbs_free;
3986
3987 return xhci_queue_isoc_tx(xhci, mem_flags, urb, slot_id, ep_index);
3988}
3989
3990/**** Command Ring Operations ****/
3991
3992/* Generic function for queueing a command TRB on the command ring.
3993 * Check to make sure there's room on the command ring for one command TRB.
3994 * Also check that there's room reserved for commands that must not fail.
3995 * If this is a command that must not fail, meaning command_must_succeed = TRUE,
3996 * then only check for the number of reserved spots.
3997 * Don't decrement xhci->cmd_ring_reserved_trbs after we've queued the TRB
3998 * because the command event handler may want to resubmit a failed command.
3999 */
4000static int queue_command(struct xhci_hcd *xhci, struct xhci_command *cmd,
4001 u32 field1, u32 field2,
4002 u32 field3, u32 field4, bool command_must_succeed)
4003{
4004 int reserved_trbs = xhci->cmd_ring_reserved_trbs;
4005 int ret;
4006
4007 if ((xhci->xhc_state & XHCI_STATE_DYING) ||
4008 (xhci->xhc_state & XHCI_STATE_HALTED)) {
4009 xhci_dbg(xhci, "xHCI dying or halted, can't queue_command\n");
4010 return -ESHUTDOWN;
4011 }
4012
4013 if (!command_must_succeed)
4014 reserved_trbs++;
4015
4016 ret = prepare_ring(xhci, xhci->cmd_ring, EP_STATE_RUNNING,
4017 reserved_trbs, GFP_ATOMIC);
4018 if (ret < 0) {
4019 xhci_err(xhci, "ERR: No room for command on command ring\n");
4020 if (command_must_succeed)
4021 xhci_err(xhci, "ERR: Reserved TRB counting for "
4022 "unfailable commands failed.\n");
4023 return ret;
4024 }
4025
4026 cmd->command_trb = xhci->cmd_ring->enqueue;
4027 list_add_tail(&cmd->cmd_list, &xhci->cmd_list);
4028
4029 /* if there are no other commands queued we start the timeout timer */
4030 if (xhci->cmd_list.next == &cmd->cmd_list &&
4031 !timer_pending(&xhci->cmd_timer)) {
4032 xhci->current_cmd = cmd;
4033 mod_timer(&xhci->cmd_timer, jiffies + XHCI_CMD_DEFAULT_TIMEOUT);
4034 }
4035
4036 queue_trb(xhci, xhci->cmd_ring, false, field1, field2, field3,
4037 field4 | xhci->cmd_ring->cycle_state);
4038 return 0;
4039}
4040
4041/* Queue a slot enable or disable request on the command ring */
4042int xhci_queue_slot_control(struct xhci_hcd *xhci, struct xhci_command *cmd,
4043 u32 trb_type, u32 slot_id)
4044{
4045 return queue_command(xhci, cmd, 0, 0, 0,
4046 TRB_TYPE(trb_type) | SLOT_ID_FOR_TRB(slot_id), false);
4047}
4048
4049/* Queue an address device command TRB */
4050int xhci_queue_address_device(struct xhci_hcd *xhci, struct xhci_command *cmd,
4051 dma_addr_t in_ctx_ptr, u32 slot_id, enum xhci_setup_dev setup)
4052{
4053 return queue_command(xhci, cmd, lower_32_bits(in_ctx_ptr),
4054 upper_32_bits(in_ctx_ptr), 0,
4055 TRB_TYPE(TRB_ADDR_DEV) | SLOT_ID_FOR_TRB(slot_id)
4056 | (setup == SETUP_CONTEXT_ONLY ? TRB_BSR : 0), false);
4057}
4058
4059int xhci_queue_vendor_command(struct xhci_hcd *xhci, struct xhci_command *cmd,
4060 u32 field1, u32 field2, u32 field3, u32 field4)
4061{
4062 return queue_command(xhci, cmd, field1, field2, field3, field4, false);
4063}
4064
4065/* Queue a reset device command TRB */
4066int xhci_queue_reset_device(struct xhci_hcd *xhci, struct xhci_command *cmd,
4067 u32 slot_id)
4068{
4069 return queue_command(xhci, cmd, 0, 0, 0,
4070 TRB_TYPE(TRB_RESET_DEV) | SLOT_ID_FOR_TRB(slot_id),
4071 false);
4072}
4073
4074/* Queue a configure endpoint command TRB */
4075int xhci_queue_configure_endpoint(struct xhci_hcd *xhci,
4076 struct xhci_command *cmd, dma_addr_t in_ctx_ptr,
4077 u32 slot_id, bool command_must_succeed)
4078{
4079 return queue_command(xhci, cmd, lower_32_bits(in_ctx_ptr),
4080 upper_32_bits(in_ctx_ptr), 0,
4081 TRB_TYPE(TRB_CONFIG_EP) | SLOT_ID_FOR_TRB(slot_id),
4082 command_must_succeed);
4083}
4084
4085/* Queue an evaluate context command TRB */
4086int xhci_queue_evaluate_context(struct xhci_hcd *xhci, struct xhci_command *cmd,
4087 dma_addr_t in_ctx_ptr, u32 slot_id, bool command_must_succeed)
4088{
4089 return queue_command(xhci, cmd, lower_32_bits(in_ctx_ptr),
4090 upper_32_bits(in_ctx_ptr), 0,
4091 TRB_TYPE(TRB_EVAL_CONTEXT) | SLOT_ID_FOR_TRB(slot_id),
4092 command_must_succeed);
4093}
4094
4095/*
4096 * Suspend is set to indicate "Stop Endpoint Command" is being issued to stop
4097 * activity on an endpoint that is about to be suspended.
4098 */
4099int xhci_queue_stop_endpoint(struct xhci_hcd *xhci, struct xhci_command *cmd,
4100 int slot_id, unsigned int ep_index, int suspend)
4101{
4102 u32 trb_slot_id = SLOT_ID_FOR_TRB(slot_id);
4103 u32 trb_ep_index = EP_ID_FOR_TRB(ep_index);
4104 u32 type = TRB_TYPE(TRB_STOP_RING);
4105 u32 trb_suspend = SUSPEND_PORT_FOR_TRB(suspend);
4106
4107 return queue_command(xhci, cmd, 0, 0, 0,
4108 trb_slot_id | trb_ep_index | type | trb_suspend, false);
4109}
4110
4111/* Set Transfer Ring Dequeue Pointer command */
4112void xhci_queue_new_dequeue_state(struct xhci_hcd *xhci,
4113 unsigned int slot_id, unsigned int ep_index,
4114 unsigned int stream_id,
4115 struct xhci_dequeue_state *deq_state)
4116{
4117 dma_addr_t addr;
4118 u32 trb_slot_id = SLOT_ID_FOR_TRB(slot_id);
4119 u32 trb_ep_index = EP_ID_FOR_TRB(ep_index);
4120 u32 trb_stream_id = STREAM_ID_FOR_TRB(stream_id);
4121 u32 trb_sct = 0;
4122 u32 type = TRB_TYPE(TRB_SET_DEQ);
4123 struct xhci_virt_ep *ep;
4124 struct xhci_command *cmd;
4125 int ret;
4126
4127 xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb,
4128 "Set TR Deq Ptr cmd, new deq seg = %p (0x%llx dma), new deq ptr = %p (0x%llx dma), new cycle = %u",
4129 deq_state->new_deq_seg,
4130 (unsigned long long)deq_state->new_deq_seg->dma,
4131 deq_state->new_deq_ptr,
4132 (unsigned long long)xhci_trb_virt_to_dma(
4133 deq_state->new_deq_seg, deq_state->new_deq_ptr),
4134 deq_state->new_cycle_state);
4135
4136 addr = xhci_trb_virt_to_dma(deq_state->new_deq_seg,
4137 deq_state->new_deq_ptr);
4138 if (addr == 0) {
4139 xhci_warn(xhci, "WARN Cannot submit Set TR Deq Ptr\n");
4140 xhci_warn(xhci, "WARN deq seg = %p, deq pt = %p\n",
4141 deq_state->new_deq_seg, deq_state->new_deq_ptr);
4142 return;
4143 }
4144 ep = &xhci->devs[slot_id]->eps[ep_index];
4145 if ((ep->ep_state & SET_DEQ_PENDING)) {
4146 xhci_warn(xhci, "WARN Cannot submit Set TR Deq Ptr\n");
4147 xhci_warn(xhci, "A Set TR Deq Ptr command is pending.\n");
4148 return;
4149 }
4150
4151 /* This function gets called from contexts where it cannot sleep */
4152 cmd = xhci_alloc_command(xhci, false, false, GFP_ATOMIC);
4153 if (!cmd) {
4154 xhci_warn(xhci, "WARN Cannot submit Set TR Deq Ptr: ENOMEM\n");
4155 return;
4156 }
4157
4158 ep->queued_deq_seg = deq_state->new_deq_seg;
4159 ep->queued_deq_ptr = deq_state->new_deq_ptr;
4160 if (stream_id)
4161 trb_sct = SCT_FOR_TRB(SCT_PRI_TR);
4162 ret = queue_command(xhci, cmd,
4163 lower_32_bits(addr) | trb_sct | deq_state->new_cycle_state,
4164 upper_32_bits(addr), trb_stream_id,
4165 trb_slot_id | trb_ep_index | type, false);
4166 if (ret < 0) {
4167 xhci_free_command(xhci, cmd);
4168 return;
4169 }
4170
4171 /* Stop the TD queueing code from ringing the doorbell until
4172 * this command completes. The HC won't set the dequeue pointer
4173 * if the ring is running, and ringing the doorbell starts the
4174 * ring running.
4175 */
4176 ep->ep_state |= SET_DEQ_PENDING;
4177}
4178
4179int xhci_queue_reset_ep(struct xhci_hcd *xhci, struct xhci_command *cmd,
4180 int slot_id, unsigned int ep_index)
4181{
4182 u32 trb_slot_id = SLOT_ID_FOR_TRB(slot_id);
4183 u32 trb_ep_index = EP_ID_FOR_TRB(ep_index);
4184 u32 type = TRB_TYPE(TRB_RESET_EP);
4185
4186 return queue_command(xhci, cmd, 0, 0, 0,
4187 trb_slot_id | trb_ep_index | type, false);
4188}