Loading...
1/*
2 * Copyright (c) 2001-2004 by David Brownell
3 * Copyright (c) 2003 Michal Sojka, for high-speed iso transfers
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License as published by the
7 * Free Software Foundation; either version 2 of the License, or (at your
8 * option) any later version.
9 *
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
12 * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
13 * for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software Foundation,
17 * Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
18 */
19
20/* this file is part of ehci-hcd.c */
21
22/*-------------------------------------------------------------------------*/
23
24/*
25 * EHCI scheduled transaction support: interrupt, iso, split iso
26 * These are called "periodic" transactions in the EHCI spec.
27 *
28 * Note that for interrupt transfers, the QH/QTD manipulation is shared
29 * with the "asynchronous" transaction support (control/bulk transfers).
30 * The only real difference is in how interrupt transfers are scheduled.
31 *
32 * For ISO, we make an "iso_stream" head to serve the same role as a QH.
33 * It keeps track of every ITD (or SITD) that's linked, and holds enough
34 * pre-calculated schedule data to make appending to the queue be quick.
35 */
36
37static int ehci_get_frame(struct usb_hcd *hcd);
38
39/*
40 * periodic_next_shadow - return "next" pointer on shadow list
41 * @periodic: host pointer to qh/itd/sitd
42 * @tag: hardware tag for type of this record
43 */
44static union ehci_shadow *
45periodic_next_shadow(struct ehci_hcd *ehci, union ehci_shadow *periodic,
46 __hc32 tag)
47{
48 switch (hc32_to_cpu(ehci, tag)) {
49 case Q_TYPE_QH:
50 return &periodic->qh->qh_next;
51 case Q_TYPE_FSTN:
52 return &periodic->fstn->fstn_next;
53 case Q_TYPE_ITD:
54 return &periodic->itd->itd_next;
55 /* case Q_TYPE_SITD: */
56 default:
57 return &periodic->sitd->sitd_next;
58 }
59}
60
61static __hc32 *
62shadow_next_periodic(struct ehci_hcd *ehci, union ehci_shadow *periodic,
63 __hc32 tag)
64{
65 switch (hc32_to_cpu(ehci, tag)) {
66 /* our ehci_shadow.qh is actually software part */
67 case Q_TYPE_QH:
68 return &periodic->qh->hw->hw_next;
69 /* others are hw parts */
70 default:
71 return periodic->hw_next;
72 }
73}
74
75/* caller must hold ehci->lock */
76static void periodic_unlink(struct ehci_hcd *ehci, unsigned frame, void *ptr)
77{
78 union ehci_shadow *prev_p = &ehci->pshadow[frame];
79 __hc32 *hw_p = &ehci->periodic[frame];
80 union ehci_shadow here = *prev_p;
81
82 /* find predecessor of "ptr"; hw and shadow lists are in sync */
83 while (here.ptr && here.ptr != ptr) {
84 prev_p = periodic_next_shadow(ehci, prev_p,
85 Q_NEXT_TYPE(ehci, *hw_p));
86 hw_p = shadow_next_periodic(ehci, &here,
87 Q_NEXT_TYPE(ehci, *hw_p));
88 here = *prev_p;
89 }
90 /* an interrupt entry (at list end) could have been shared */
91 if (!here.ptr)
92 return;
93
94 /* update shadow and hardware lists ... the old "next" pointers
95 * from ptr may still be in use, the caller updates them.
96 */
97 *prev_p = *periodic_next_shadow(ehci, &here,
98 Q_NEXT_TYPE(ehci, *hw_p));
99
100 if (!ehci->use_dummy_qh ||
101 *shadow_next_periodic(ehci, &here, Q_NEXT_TYPE(ehci, *hw_p))
102 != EHCI_LIST_END(ehci))
103 *hw_p = *shadow_next_periodic(ehci, &here,
104 Q_NEXT_TYPE(ehci, *hw_p));
105 else
106 *hw_p = cpu_to_hc32(ehci, ehci->dummy->qh_dma);
107}
108
109/*-------------------------------------------------------------------------*/
110
111/* Bandwidth and TT management */
112
113/* Find the TT data structure for this device; create it if necessary */
114static struct ehci_tt *find_tt(struct usb_device *udev)
115{
116 struct usb_tt *utt = udev->tt;
117 struct ehci_tt *tt, **tt_index, **ptt;
118 unsigned port;
119 bool allocated_index = false;
120
121 if (!utt)
122 return NULL; /* Not below a TT */
123
124 /*
125 * Find/create our data structure.
126 * For hubs with a single TT, we get it directly.
127 * For hubs with multiple TTs, there's an extra level of pointers.
128 */
129 tt_index = NULL;
130 if (utt->multi) {
131 tt_index = utt->hcpriv;
132 if (!tt_index) { /* Create the index array */
133 tt_index = kzalloc(utt->hub->maxchild *
134 sizeof(*tt_index), GFP_ATOMIC);
135 if (!tt_index)
136 return ERR_PTR(-ENOMEM);
137 utt->hcpriv = tt_index;
138 allocated_index = true;
139 }
140 port = udev->ttport - 1;
141 ptt = &tt_index[port];
142 } else {
143 port = 0;
144 ptt = (struct ehci_tt **) &utt->hcpriv;
145 }
146
147 tt = *ptt;
148 if (!tt) { /* Create the ehci_tt */
149 struct ehci_hcd *ehci =
150 hcd_to_ehci(bus_to_hcd(udev->bus));
151
152 tt = kzalloc(sizeof(*tt), GFP_ATOMIC);
153 if (!tt) {
154 if (allocated_index) {
155 utt->hcpriv = NULL;
156 kfree(tt_index);
157 }
158 return ERR_PTR(-ENOMEM);
159 }
160 list_add_tail(&tt->tt_list, &ehci->tt_list);
161 INIT_LIST_HEAD(&tt->ps_list);
162 tt->usb_tt = utt;
163 tt->tt_port = port;
164 *ptt = tt;
165 }
166
167 return tt;
168}
169
170/* Release the TT above udev, if it's not in use */
171static void drop_tt(struct usb_device *udev)
172{
173 struct usb_tt *utt = udev->tt;
174 struct ehci_tt *tt, **tt_index, **ptt;
175 int cnt, i;
176
177 if (!utt || !utt->hcpriv)
178 return; /* Not below a TT, or never allocated */
179
180 cnt = 0;
181 if (utt->multi) {
182 tt_index = utt->hcpriv;
183 ptt = &tt_index[udev->ttport - 1];
184
185 /* How many entries are left in tt_index? */
186 for (i = 0; i < utt->hub->maxchild; ++i)
187 cnt += !!tt_index[i];
188 } else {
189 tt_index = NULL;
190 ptt = (struct ehci_tt **) &utt->hcpriv;
191 }
192
193 tt = *ptt;
194 if (!tt || !list_empty(&tt->ps_list))
195 return; /* never allocated, or still in use */
196
197 list_del(&tt->tt_list);
198 *ptt = NULL;
199 kfree(tt);
200 if (cnt == 1) {
201 utt->hcpriv = NULL;
202 kfree(tt_index);
203 }
204}
205
206static void bandwidth_dbg(struct ehci_hcd *ehci, int sign, char *type,
207 struct ehci_per_sched *ps)
208{
209 dev_dbg(&ps->udev->dev,
210 "ep %02x: %s %s @ %u+%u (%u.%u+%u) [%u/%u us] mask %04x\n",
211 ps->ep->desc.bEndpointAddress,
212 (sign >= 0 ? "reserve" : "release"), type,
213 (ps->bw_phase << 3) + ps->phase_uf, ps->bw_uperiod,
214 ps->phase, ps->phase_uf, ps->period,
215 ps->usecs, ps->c_usecs, ps->cs_mask);
216}
217
218static void reserve_release_intr_bandwidth(struct ehci_hcd *ehci,
219 struct ehci_qh *qh, int sign)
220{
221 unsigned start_uf;
222 unsigned i, j, m;
223 int usecs = qh->ps.usecs;
224 int c_usecs = qh->ps.c_usecs;
225 int tt_usecs = qh->ps.tt_usecs;
226 struct ehci_tt *tt;
227
228 if (qh->ps.phase == NO_FRAME) /* Bandwidth wasn't reserved */
229 return;
230 start_uf = qh->ps.bw_phase << 3;
231
232 bandwidth_dbg(ehci, sign, "intr", &qh->ps);
233
234 if (sign < 0) { /* Release bandwidth */
235 usecs = -usecs;
236 c_usecs = -c_usecs;
237 tt_usecs = -tt_usecs;
238 }
239
240 /* Entire transaction (high speed) or start-split (full/low speed) */
241 for (i = start_uf + qh->ps.phase_uf; i < EHCI_BANDWIDTH_SIZE;
242 i += qh->ps.bw_uperiod)
243 ehci->bandwidth[i] += usecs;
244
245 /* Complete-split (full/low speed) */
246 if (qh->ps.c_usecs) {
247 /* NOTE: adjustments needed for FSTN */
248 for (i = start_uf; i < EHCI_BANDWIDTH_SIZE;
249 i += qh->ps.bw_uperiod) {
250 for ((j = 2, m = 1 << (j+8)); j < 8; (++j, m <<= 1)) {
251 if (qh->ps.cs_mask & m)
252 ehci->bandwidth[i+j] += c_usecs;
253 }
254 }
255 }
256
257 /* FS/LS bus bandwidth */
258 if (tt_usecs) {
259 tt = find_tt(qh->ps.udev);
260 if (sign > 0)
261 list_add_tail(&qh->ps.ps_list, &tt->ps_list);
262 else
263 list_del(&qh->ps.ps_list);
264
265 for (i = start_uf >> 3; i < EHCI_BANDWIDTH_FRAMES;
266 i += qh->ps.bw_period)
267 tt->bandwidth[i] += tt_usecs;
268 }
269}
270
271/*-------------------------------------------------------------------------*/
272
273static void compute_tt_budget(u8 budget_table[EHCI_BANDWIDTH_SIZE],
274 struct ehci_tt *tt)
275{
276 struct ehci_per_sched *ps;
277 unsigned uframe, uf, x;
278 u8 *budget_line;
279
280 if (!tt)
281 return;
282 memset(budget_table, 0, EHCI_BANDWIDTH_SIZE);
283
284 /* Add up the contributions from all the endpoints using this TT */
285 list_for_each_entry(ps, &tt->ps_list, ps_list) {
286 for (uframe = ps->bw_phase << 3; uframe < EHCI_BANDWIDTH_SIZE;
287 uframe += ps->bw_uperiod) {
288 budget_line = &budget_table[uframe];
289 x = ps->tt_usecs;
290
291 /* propagate the time forward */
292 for (uf = ps->phase_uf; uf < 8; ++uf) {
293 x += budget_line[uf];
294
295 /* Each microframe lasts 125 us */
296 if (x <= 125) {
297 budget_line[uf] = x;
298 break;
299 }
300 budget_line[uf] = 125;
301 x -= 125;
302 }
303 }
304 }
305}
306
307static int __maybe_unused same_tt(struct usb_device *dev1,
308 struct usb_device *dev2)
309{
310 if (!dev1->tt || !dev2->tt)
311 return 0;
312 if (dev1->tt != dev2->tt)
313 return 0;
314 if (dev1->tt->multi)
315 return dev1->ttport == dev2->ttport;
316 else
317 return 1;
318}
319
320#ifdef CONFIG_USB_EHCI_TT_NEWSCHED
321
322/* Which uframe does the low/fullspeed transfer start in?
323 *
324 * The parameter is the mask of ssplits in "H-frame" terms
325 * and this returns the transfer start uframe in "B-frame" terms,
326 * which allows both to match, e.g. a ssplit in "H-frame" uframe 0
327 * will cause a transfer in "B-frame" uframe 0. "B-frames" lag
328 * "H-frames" by 1 uframe. See the EHCI spec sec 4.5 and figure 4.7.
329 */
330static inline unsigned char tt_start_uframe(struct ehci_hcd *ehci, __hc32 mask)
331{
332 unsigned char smask = hc32_to_cpu(ehci, mask) & QH_SMASK;
333
334 if (!smask) {
335 ehci_err(ehci, "invalid empty smask!\n");
336 /* uframe 7 can't have bw so this will indicate failure */
337 return 7;
338 }
339 return ffs(smask) - 1;
340}
341
342static const unsigned char
343max_tt_usecs[] = { 125, 125, 125, 125, 125, 125, 30, 0 };
344
345/* carryover low/fullspeed bandwidth that crosses uframe boundries */
346static inline void carryover_tt_bandwidth(unsigned short tt_usecs[8])
347{
348 int i;
349
350 for (i = 0; i < 7; i++) {
351 if (max_tt_usecs[i] < tt_usecs[i]) {
352 tt_usecs[i+1] += tt_usecs[i] - max_tt_usecs[i];
353 tt_usecs[i] = max_tt_usecs[i];
354 }
355 }
356}
357
358/*
359 * Return true if the device's tt's downstream bus is available for a
360 * periodic transfer of the specified length (usecs), starting at the
361 * specified frame/uframe. Note that (as summarized in section 11.19
362 * of the usb 2.0 spec) TTs can buffer multiple transactions for each
363 * uframe.
364 *
365 * The uframe parameter is when the fullspeed/lowspeed transfer
366 * should be executed in "B-frame" terms, which is the same as the
367 * highspeed ssplit's uframe (which is in "H-frame" terms). For example
368 * a ssplit in "H-frame" 0 causes a transfer in "B-frame" 0.
369 * See the EHCI spec sec 4.5 and fig 4.7.
370 *
371 * This checks if the full/lowspeed bus, at the specified starting uframe,
372 * has the specified bandwidth available, according to rules listed
373 * in USB 2.0 spec section 11.18.1 fig 11-60.
374 *
375 * This does not check if the transfer would exceed the max ssplit
376 * limit of 16, specified in USB 2.0 spec section 11.18.4 requirement #4,
377 * since proper scheduling limits ssplits to less than 16 per uframe.
378 */
379static int tt_available(
380 struct ehci_hcd *ehci,
381 struct ehci_per_sched *ps,
382 struct ehci_tt *tt,
383 unsigned frame,
384 unsigned uframe
385)
386{
387 unsigned period = ps->bw_period;
388 unsigned usecs = ps->tt_usecs;
389
390 if ((period == 0) || (uframe >= 7)) /* error */
391 return 0;
392
393 for (frame &= period - 1; frame < EHCI_BANDWIDTH_FRAMES;
394 frame += period) {
395 unsigned i, uf;
396 unsigned short tt_usecs[8];
397
398 if (tt->bandwidth[frame] + usecs > 900)
399 return 0;
400
401 uf = frame << 3;
402 for (i = 0; i < 8; (++i, ++uf))
403 tt_usecs[i] = ehci->tt_budget[uf];
404
405 if (max_tt_usecs[uframe] <= tt_usecs[uframe])
406 return 0;
407
408 /* special case for isoc transfers larger than 125us:
409 * the first and each subsequent fully used uframe
410 * must be empty, so as to not illegally delay
411 * already scheduled transactions
412 */
413 if (usecs > 125) {
414 int ufs = (usecs / 125);
415
416 for (i = uframe; i < (uframe + ufs) && i < 8; i++)
417 if (tt_usecs[i] > 0)
418 return 0;
419 }
420
421 tt_usecs[uframe] += usecs;
422
423 carryover_tt_bandwidth(tt_usecs);
424
425 /* fail if the carryover pushed bw past the last uframe's limit */
426 if (max_tt_usecs[7] < tt_usecs[7])
427 return 0;
428 }
429
430 return 1;
431}
432
433#else
434
435/* return true iff the device's transaction translator is available
436 * for a periodic transfer starting at the specified frame, using
437 * all the uframes in the mask.
438 */
439static int tt_no_collision(
440 struct ehci_hcd *ehci,
441 unsigned period,
442 struct usb_device *dev,
443 unsigned frame,
444 u32 uf_mask
445)
446{
447 if (period == 0) /* error */
448 return 0;
449
450 /* note bandwidth wastage: split never follows csplit
451 * (different dev or endpoint) until the next uframe.
452 * calling convention doesn't make that distinction.
453 */
454 for (; frame < ehci->periodic_size; frame += period) {
455 union ehci_shadow here;
456 __hc32 type;
457 struct ehci_qh_hw *hw;
458
459 here = ehci->pshadow[frame];
460 type = Q_NEXT_TYPE(ehci, ehci->periodic[frame]);
461 while (here.ptr) {
462 switch (hc32_to_cpu(ehci, type)) {
463 case Q_TYPE_ITD:
464 type = Q_NEXT_TYPE(ehci, here.itd->hw_next);
465 here = here.itd->itd_next;
466 continue;
467 case Q_TYPE_QH:
468 hw = here.qh->hw;
469 if (same_tt(dev, here.qh->ps.udev)) {
470 u32 mask;
471
472 mask = hc32_to_cpu(ehci,
473 hw->hw_info2);
474 /* "knows" no gap is needed */
475 mask |= mask >> 8;
476 if (mask & uf_mask)
477 break;
478 }
479 type = Q_NEXT_TYPE(ehci, hw->hw_next);
480 here = here.qh->qh_next;
481 continue;
482 case Q_TYPE_SITD:
483 if (same_tt(dev, here.sitd->urb->dev)) {
484 u16 mask;
485
486 mask = hc32_to_cpu(ehci, here.sitd
487 ->hw_uframe);
488 /* FIXME assumes no gap for IN! */
489 mask |= mask >> 8;
490 if (mask & uf_mask)
491 break;
492 }
493 type = Q_NEXT_TYPE(ehci, here.sitd->hw_next);
494 here = here.sitd->sitd_next;
495 continue;
496 /* case Q_TYPE_FSTN: */
497 default:
498 ehci_dbg(ehci,
499 "periodic frame %d bogus type %d\n",
500 frame, type);
501 }
502
503 /* collision or error */
504 return 0;
505 }
506 }
507
508 /* no collision */
509 return 1;
510}
511
512#endif /* CONFIG_USB_EHCI_TT_NEWSCHED */
513
514/*-------------------------------------------------------------------------*/
515
516static void enable_periodic(struct ehci_hcd *ehci)
517{
518 if (ehci->periodic_count++)
519 return;
520
521 /* Stop waiting to turn off the periodic schedule */
522 ehci->enabled_hrtimer_events &= ~BIT(EHCI_HRTIMER_DISABLE_PERIODIC);
523
524 /* Don't start the schedule until PSS is 0 */
525 ehci_poll_PSS(ehci);
526 turn_on_io_watchdog(ehci);
527}
528
529static void disable_periodic(struct ehci_hcd *ehci)
530{
531 if (--ehci->periodic_count)
532 return;
533
534 /* Don't turn off the schedule until PSS is 1 */
535 ehci_poll_PSS(ehci);
536}
537
538/*-------------------------------------------------------------------------*/
539
540/* periodic schedule slots have iso tds (normal or split) first, then a
541 * sparse tree for active interrupt transfers.
542 *
543 * this just links in a qh; caller guarantees uframe masks are set right.
544 * no FSTN support (yet; ehci 0.96+)
545 */
546static void qh_link_periodic(struct ehci_hcd *ehci, struct ehci_qh *qh)
547{
548 unsigned i;
549 unsigned period = qh->ps.period;
550
551 dev_dbg(&qh->ps.udev->dev,
552 "link qh%d-%04x/%p start %d [%d/%d us]\n",
553 period, hc32_to_cpup(ehci, &qh->hw->hw_info2)
554 & (QH_CMASK | QH_SMASK),
555 qh, qh->ps.phase, qh->ps.usecs, qh->ps.c_usecs);
556
557 /* high bandwidth, or otherwise every microframe */
558 if (period == 0)
559 period = 1;
560
561 for (i = qh->ps.phase; i < ehci->periodic_size; i += period) {
562 union ehci_shadow *prev = &ehci->pshadow[i];
563 __hc32 *hw_p = &ehci->periodic[i];
564 union ehci_shadow here = *prev;
565 __hc32 type = 0;
566
567 /* skip the iso nodes at list head */
568 while (here.ptr) {
569 type = Q_NEXT_TYPE(ehci, *hw_p);
570 if (type == cpu_to_hc32(ehci, Q_TYPE_QH))
571 break;
572 prev = periodic_next_shadow(ehci, prev, type);
573 hw_p = shadow_next_periodic(ehci, &here, type);
574 here = *prev;
575 }
576
577 /* sorting each branch by period (slow-->fast)
578 * enables sharing interior tree nodes
579 */
580 while (here.ptr && qh != here.qh) {
581 if (qh->ps.period > here.qh->ps.period)
582 break;
583 prev = &here.qh->qh_next;
584 hw_p = &here.qh->hw->hw_next;
585 here = *prev;
586 }
587 /* link in this qh, unless some earlier pass did that */
588 if (qh != here.qh) {
589 qh->qh_next = here;
590 if (here.qh)
591 qh->hw->hw_next = *hw_p;
592 wmb();
593 prev->qh = qh;
594 *hw_p = QH_NEXT(ehci, qh->qh_dma);
595 }
596 }
597 qh->qh_state = QH_STATE_LINKED;
598 qh->xacterrs = 0;
599 qh->unlink_reason = 0;
600
601 /* update per-qh bandwidth for debugfs */
602 ehci_to_hcd(ehci)->self.bandwidth_allocated += qh->ps.bw_period
603 ? ((qh->ps.usecs + qh->ps.c_usecs) / qh->ps.bw_period)
604 : (qh->ps.usecs * 8);
605
606 list_add(&qh->intr_node, &ehci->intr_qh_list);
607
608 /* maybe enable periodic schedule processing */
609 ++ehci->intr_count;
610 enable_periodic(ehci);
611}
612
613static void qh_unlink_periodic(struct ehci_hcd *ehci, struct ehci_qh *qh)
614{
615 unsigned i;
616 unsigned period;
617
618 /*
619 * If qh is for a low/full-speed device, simply unlinking it
620 * could interfere with an ongoing split transaction. To unlink
621 * it safely would require setting the QH_INACTIVATE bit and
622 * waiting at least one frame, as described in EHCI 4.12.2.5.
623 *
624 * We won't bother with any of this. Instead, we assume that the
625 * only reason for unlinking an interrupt QH while the current URB
626 * is still active is to dequeue all the URBs (flush the whole
627 * endpoint queue).
628 *
629 * If rebalancing the periodic schedule is ever implemented, this
630 * approach will no longer be valid.
631 */
632
633 /* high bandwidth, or otherwise part of every microframe */
634 period = qh->ps.period ? : 1;
635
636 for (i = qh->ps.phase; i < ehci->periodic_size; i += period)
637 periodic_unlink(ehci, i, qh);
638
639 /* update per-qh bandwidth for debugfs */
640 ehci_to_hcd(ehci)->self.bandwidth_allocated -= qh->ps.bw_period
641 ? ((qh->ps.usecs + qh->ps.c_usecs) / qh->ps.bw_period)
642 : (qh->ps.usecs * 8);
643
644 dev_dbg(&qh->ps.udev->dev,
645 "unlink qh%d-%04x/%p start %d [%d/%d us]\n",
646 qh->ps.period,
647 hc32_to_cpup(ehci, &qh->hw->hw_info2) & (QH_CMASK | QH_SMASK),
648 qh, qh->ps.phase, qh->ps.usecs, qh->ps.c_usecs);
649
650 /* qh->qh_next still "live" to HC */
651 qh->qh_state = QH_STATE_UNLINK;
652 qh->qh_next.ptr = NULL;
653
654 if (ehci->qh_scan_next == qh)
655 ehci->qh_scan_next = list_entry(qh->intr_node.next,
656 struct ehci_qh, intr_node);
657 list_del(&qh->intr_node);
658}
659
660static void cancel_unlink_wait_intr(struct ehci_hcd *ehci, struct ehci_qh *qh)
661{
662 if (qh->qh_state != QH_STATE_LINKED ||
663 list_empty(&qh->unlink_node))
664 return;
665
666 list_del_init(&qh->unlink_node);
667
668 /*
669 * TODO: disable the event of EHCI_HRTIMER_START_UNLINK_INTR for
670 * avoiding unnecessary CPU wakeup
671 */
672}
673
674static void start_unlink_intr(struct ehci_hcd *ehci, struct ehci_qh *qh)
675{
676 /* If the QH isn't linked then there's nothing we can do. */
677 if (qh->qh_state != QH_STATE_LINKED)
678 return;
679
680 /* if the qh is waiting for unlink, cancel it now */
681 cancel_unlink_wait_intr(ehci, qh);
682
683 qh_unlink_periodic(ehci, qh);
684
685 /* Make sure the unlinks are visible before starting the timer */
686 wmb();
687
688 /*
689 * The EHCI spec doesn't say how long it takes the controller to
690 * stop accessing an unlinked interrupt QH. The timer delay is
691 * 9 uframes; presumably that will be long enough.
692 */
693 qh->unlink_cycle = ehci->intr_unlink_cycle;
694
695 /* New entries go at the end of the intr_unlink list */
696 list_add_tail(&qh->unlink_node, &ehci->intr_unlink);
697
698 if (ehci->intr_unlinking)
699 ; /* Avoid recursive calls */
700 else if (ehci->rh_state < EHCI_RH_RUNNING)
701 ehci_handle_intr_unlinks(ehci);
702 else if (ehci->intr_unlink.next == &qh->unlink_node) {
703 ehci_enable_event(ehci, EHCI_HRTIMER_UNLINK_INTR, true);
704 ++ehci->intr_unlink_cycle;
705 }
706}
707
708/*
709 * It is common only one intr URB is scheduled on one qh, and
710 * given complete() is run in tasklet context, introduce a bit
711 * delay to avoid unlink qh too early.
712 */
713static void start_unlink_intr_wait(struct ehci_hcd *ehci,
714 struct ehci_qh *qh)
715{
716 qh->unlink_cycle = ehci->intr_unlink_wait_cycle;
717
718 /* New entries go at the end of the intr_unlink_wait list */
719 list_add_tail(&qh->unlink_node, &ehci->intr_unlink_wait);
720
721 if (ehci->rh_state < EHCI_RH_RUNNING)
722 ehci_handle_start_intr_unlinks(ehci);
723 else if (ehci->intr_unlink_wait.next == &qh->unlink_node) {
724 ehci_enable_event(ehci, EHCI_HRTIMER_START_UNLINK_INTR, true);
725 ++ehci->intr_unlink_wait_cycle;
726 }
727}
728
729static void end_unlink_intr(struct ehci_hcd *ehci, struct ehci_qh *qh)
730{
731 struct ehci_qh_hw *hw = qh->hw;
732 int rc;
733
734 qh->qh_state = QH_STATE_IDLE;
735 hw->hw_next = EHCI_LIST_END(ehci);
736
737 if (!list_empty(&qh->qtd_list))
738 qh_completions(ehci, qh);
739
740 /* reschedule QH iff another request is queued */
741 if (!list_empty(&qh->qtd_list) && ehci->rh_state == EHCI_RH_RUNNING) {
742 rc = qh_schedule(ehci, qh);
743 if (rc == 0) {
744 qh_refresh(ehci, qh);
745 qh_link_periodic(ehci, qh);
746 }
747
748 /* An error here likely indicates handshake failure
749 * or no space left in the schedule. Neither fault
750 * should happen often ...
751 *
752 * FIXME kill the now-dysfunctional queued urbs
753 */
754 else {
755 ehci_err(ehci, "can't reschedule qh %p, err %d\n",
756 qh, rc);
757 }
758 }
759
760 /* maybe turn off periodic schedule */
761 --ehci->intr_count;
762 disable_periodic(ehci);
763}
764
765/*-------------------------------------------------------------------------*/
766
767static int check_period(
768 struct ehci_hcd *ehci,
769 unsigned frame,
770 unsigned uframe,
771 unsigned uperiod,
772 unsigned usecs
773) {
774 /* complete split running into next frame?
775 * given FSTN support, we could sometimes check...
776 */
777 if (uframe >= 8)
778 return 0;
779
780 /* convert "usecs we need" to "max already claimed" */
781 usecs = ehci->uframe_periodic_max - usecs;
782
783 for (uframe += frame << 3; uframe < EHCI_BANDWIDTH_SIZE;
784 uframe += uperiod) {
785 if (ehci->bandwidth[uframe] > usecs)
786 return 0;
787 }
788
789 /* success! */
790 return 1;
791}
792
793static int check_intr_schedule(
794 struct ehci_hcd *ehci,
795 unsigned frame,
796 unsigned uframe,
797 struct ehci_qh *qh,
798 unsigned *c_maskp,
799 struct ehci_tt *tt
800)
801{
802 int retval = -ENOSPC;
803 u8 mask = 0;
804
805 if (qh->ps.c_usecs && uframe >= 6) /* FSTN territory? */
806 goto done;
807
808 if (!check_period(ehci, frame, uframe, qh->ps.bw_uperiod, qh->ps.usecs))
809 goto done;
810 if (!qh->ps.c_usecs) {
811 retval = 0;
812 *c_maskp = 0;
813 goto done;
814 }
815
816#ifdef CONFIG_USB_EHCI_TT_NEWSCHED
817 if (tt_available(ehci, &qh->ps, tt, frame, uframe)) {
818 unsigned i;
819
820 /* TODO : this may need FSTN for SSPLIT in uframe 5. */
821 for (i = uframe+2; i < 8 && i <= uframe+4; i++)
822 if (!check_period(ehci, frame, i,
823 qh->ps.bw_uperiod, qh->ps.c_usecs))
824 goto done;
825 else
826 mask |= 1 << i;
827
828 retval = 0;
829
830 *c_maskp = mask;
831 }
832#else
833 /* Make sure this tt's buffer is also available for CSPLITs.
834 * We pessimize a bit; probably the typical full speed case
835 * doesn't need the second CSPLIT.
836 *
837 * NOTE: both SPLIT and CSPLIT could be checked in just
838 * one smart pass...
839 */
840 mask = 0x03 << (uframe + qh->gap_uf);
841 *c_maskp = mask;
842
843 mask |= 1 << uframe;
844 if (tt_no_collision(ehci, qh->ps.bw_period, qh->ps.udev, frame, mask)) {
845 if (!check_period(ehci, frame, uframe + qh->gap_uf + 1,
846 qh->ps.bw_uperiod, qh->ps.c_usecs))
847 goto done;
848 if (!check_period(ehci, frame, uframe + qh->gap_uf,
849 qh->ps.bw_uperiod, qh->ps.c_usecs))
850 goto done;
851 retval = 0;
852 }
853#endif
854done:
855 return retval;
856}
857
858/* "first fit" scheduling policy used the first time through,
859 * or when the previous schedule slot can't be re-used.
860 */
861static int qh_schedule(struct ehci_hcd *ehci, struct ehci_qh *qh)
862{
863 int status = 0;
864 unsigned uframe;
865 unsigned c_mask;
866 struct ehci_qh_hw *hw = qh->hw;
867 struct ehci_tt *tt;
868
869 hw->hw_next = EHCI_LIST_END(ehci);
870
871 /* reuse the previous schedule slots, if we can */
872 if (qh->ps.phase != NO_FRAME) {
873 ehci_dbg(ehci, "reused qh %p schedule\n", qh);
874 return 0;
875 }
876
877 uframe = 0;
878 c_mask = 0;
879 tt = find_tt(qh->ps.udev);
880 if (IS_ERR(tt)) {
881 status = PTR_ERR(tt);
882 goto done;
883 }
884 compute_tt_budget(ehci->tt_budget, tt);
885
886 /* else scan the schedule to find a group of slots such that all
887 * uframes have enough periodic bandwidth available.
888 */
889 /* "normal" case, uframing flexible except with splits */
890 if (qh->ps.bw_period) {
891 int i;
892 unsigned frame;
893
894 for (i = qh->ps.bw_period; i > 0; --i) {
895 frame = ++ehci->random_frame & (qh->ps.bw_period - 1);
896 for (uframe = 0; uframe < 8; uframe++) {
897 status = check_intr_schedule(ehci,
898 frame, uframe, qh, &c_mask, tt);
899 if (status == 0)
900 goto got_it;
901 }
902 }
903
904 /* qh->ps.bw_period == 0 means every uframe */
905 } else {
906 status = check_intr_schedule(ehci, 0, 0, qh, &c_mask, tt);
907 }
908 if (status)
909 goto done;
910
911 got_it:
912 qh->ps.phase = (qh->ps.period ? ehci->random_frame &
913 (qh->ps.period - 1) : 0);
914 qh->ps.bw_phase = qh->ps.phase & (qh->ps.bw_period - 1);
915 qh->ps.phase_uf = uframe;
916 qh->ps.cs_mask = qh->ps.period ?
917 (c_mask << 8) | (1 << uframe) :
918 QH_SMASK;
919
920 /* reset S-frame and (maybe) C-frame masks */
921 hw->hw_info2 &= cpu_to_hc32(ehci, ~(QH_CMASK | QH_SMASK));
922 hw->hw_info2 |= cpu_to_hc32(ehci, qh->ps.cs_mask);
923 reserve_release_intr_bandwidth(ehci, qh, 1);
924
925done:
926 return status;
927}
928
929static int intr_submit(
930 struct ehci_hcd *ehci,
931 struct urb *urb,
932 struct list_head *qtd_list,
933 gfp_t mem_flags
934) {
935 unsigned epnum;
936 unsigned long flags;
937 struct ehci_qh *qh;
938 int status;
939 struct list_head empty;
940
941 /* get endpoint and transfer/schedule data */
942 epnum = urb->ep->desc.bEndpointAddress;
943
944 spin_lock_irqsave(&ehci->lock, flags);
945
946 if (unlikely(!HCD_HW_ACCESSIBLE(ehci_to_hcd(ehci)))) {
947 status = -ESHUTDOWN;
948 goto done_not_linked;
949 }
950 status = usb_hcd_link_urb_to_ep(ehci_to_hcd(ehci), urb);
951 if (unlikely(status))
952 goto done_not_linked;
953
954 /* get qh and force any scheduling errors */
955 INIT_LIST_HEAD(&empty);
956 qh = qh_append_tds(ehci, urb, &empty, epnum, &urb->ep->hcpriv);
957 if (qh == NULL) {
958 status = -ENOMEM;
959 goto done;
960 }
961 if (qh->qh_state == QH_STATE_IDLE) {
962 status = qh_schedule(ehci, qh);
963 if (status)
964 goto done;
965 }
966
967 /* then queue the urb's tds to the qh */
968 qh = qh_append_tds(ehci, urb, qtd_list, epnum, &urb->ep->hcpriv);
969 BUG_ON(qh == NULL);
970
971 /* stuff into the periodic schedule */
972 if (qh->qh_state == QH_STATE_IDLE) {
973 qh_refresh(ehci, qh);
974 qh_link_periodic(ehci, qh);
975 } else {
976 /* cancel unlink wait for the qh */
977 cancel_unlink_wait_intr(ehci, qh);
978 }
979
980 /* ... update usbfs periodic stats */
981 ehci_to_hcd(ehci)->self.bandwidth_int_reqs++;
982
983done:
984 if (unlikely(status))
985 usb_hcd_unlink_urb_from_ep(ehci_to_hcd(ehci), urb);
986done_not_linked:
987 spin_unlock_irqrestore(&ehci->lock, flags);
988 if (status)
989 qtd_list_free(ehci, urb, qtd_list);
990
991 return status;
992}
993
994static void scan_intr(struct ehci_hcd *ehci)
995{
996 struct ehci_qh *qh;
997
998 list_for_each_entry_safe(qh, ehci->qh_scan_next, &ehci->intr_qh_list,
999 intr_node) {
1000
1001 /* clean any finished work for this qh */
1002 if (!list_empty(&qh->qtd_list)) {
1003 int temp;
1004
1005 /*
1006 * Unlinks could happen here; completion reporting
1007 * drops the lock. That's why ehci->qh_scan_next
1008 * always holds the next qh to scan; if the next qh
1009 * gets unlinked then ehci->qh_scan_next is adjusted
1010 * in qh_unlink_periodic().
1011 */
1012 temp = qh_completions(ehci, qh);
1013 if (unlikely(temp))
1014 start_unlink_intr(ehci, qh);
1015 else if (unlikely(list_empty(&qh->qtd_list) &&
1016 qh->qh_state == QH_STATE_LINKED))
1017 start_unlink_intr_wait(ehci, qh);
1018 }
1019 }
1020}
1021
1022/*-------------------------------------------------------------------------*/
1023
1024/* ehci_iso_stream ops work with both ITD and SITD */
1025
1026static struct ehci_iso_stream *
1027iso_stream_alloc(gfp_t mem_flags)
1028{
1029 struct ehci_iso_stream *stream;
1030
1031 stream = kzalloc(sizeof(*stream), mem_flags);
1032 if (likely(stream != NULL)) {
1033 INIT_LIST_HEAD(&stream->td_list);
1034 INIT_LIST_HEAD(&stream->free_list);
1035 stream->next_uframe = NO_FRAME;
1036 stream->ps.phase = NO_FRAME;
1037 }
1038 return stream;
1039}
1040
1041static void
1042iso_stream_init(
1043 struct ehci_hcd *ehci,
1044 struct ehci_iso_stream *stream,
1045 struct urb *urb
1046)
1047{
1048 static const u8 smask_out[] = { 0x01, 0x03, 0x07, 0x0f, 0x1f, 0x3f };
1049
1050 struct usb_device *dev = urb->dev;
1051 u32 buf1;
1052 unsigned epnum, maxp;
1053 int is_input;
1054 unsigned tmp;
1055
1056 /*
1057 * this might be a "high bandwidth" highspeed endpoint,
1058 * as encoded in the ep descriptor's wMaxPacket field
1059 */
1060 epnum = usb_pipeendpoint(urb->pipe);
1061 is_input = usb_pipein(urb->pipe) ? USB_DIR_IN : 0;
1062 maxp = usb_endpoint_maxp(&urb->ep->desc);
1063 buf1 = is_input ? 1 << 11 : 0;
1064
1065 /* knows about ITD vs SITD */
1066 if (dev->speed == USB_SPEED_HIGH) {
1067 unsigned multi = hb_mult(maxp);
1068
1069 stream->highspeed = 1;
1070
1071 maxp = max_packet(maxp);
1072 buf1 |= maxp;
1073 maxp *= multi;
1074
1075 stream->buf0 = cpu_to_hc32(ehci, (epnum << 8) | dev->devnum);
1076 stream->buf1 = cpu_to_hc32(ehci, buf1);
1077 stream->buf2 = cpu_to_hc32(ehci, multi);
1078
1079 /* usbfs wants to report the average usecs per frame tied up
1080 * when transfers on this endpoint are scheduled ...
1081 */
1082 stream->ps.usecs = HS_USECS_ISO(maxp);
1083
1084 /* period for bandwidth allocation */
1085 tmp = min_t(unsigned, EHCI_BANDWIDTH_SIZE,
1086 1 << (urb->ep->desc.bInterval - 1));
1087
1088 /* Allow urb->interval to override */
1089 stream->ps.bw_uperiod = min_t(unsigned, tmp, urb->interval);
1090
1091 stream->uperiod = urb->interval;
1092 stream->ps.period = urb->interval >> 3;
1093 stream->bandwidth = stream->ps.usecs * 8 /
1094 stream->ps.bw_uperiod;
1095
1096 } else {
1097 u32 addr;
1098 int think_time;
1099 int hs_transfers;
1100
1101 addr = dev->ttport << 24;
1102 if (!ehci_is_TDI(ehci)
1103 || (dev->tt->hub !=
1104 ehci_to_hcd(ehci)->self.root_hub))
1105 addr |= dev->tt->hub->devnum << 16;
1106 addr |= epnum << 8;
1107 addr |= dev->devnum;
1108 stream->ps.usecs = HS_USECS_ISO(maxp);
1109 think_time = dev->tt ? dev->tt->think_time : 0;
1110 stream->ps.tt_usecs = NS_TO_US(think_time + usb_calc_bus_time(
1111 dev->speed, is_input, 1, maxp));
1112 hs_transfers = max(1u, (maxp + 187) / 188);
1113 if (is_input) {
1114 u32 tmp;
1115
1116 addr |= 1 << 31;
1117 stream->ps.c_usecs = stream->ps.usecs;
1118 stream->ps.usecs = HS_USECS_ISO(1);
1119 stream->ps.cs_mask = 1;
1120
1121 /* c-mask as specified in USB 2.0 11.18.4 3.c */
1122 tmp = (1 << (hs_transfers + 2)) - 1;
1123 stream->ps.cs_mask |= tmp << (8 + 2);
1124 } else
1125 stream->ps.cs_mask = smask_out[hs_transfers - 1];
1126
1127 /* period for bandwidth allocation */
1128 tmp = min_t(unsigned, EHCI_BANDWIDTH_FRAMES,
1129 1 << (urb->ep->desc.bInterval - 1));
1130
1131 /* Allow urb->interval to override */
1132 stream->ps.bw_period = min_t(unsigned, tmp, urb->interval);
1133 stream->ps.bw_uperiod = stream->ps.bw_period << 3;
1134
1135 stream->ps.period = urb->interval;
1136 stream->uperiod = urb->interval << 3;
1137 stream->bandwidth = (stream->ps.usecs + stream->ps.c_usecs) /
1138 stream->ps.bw_period;
1139
1140 /* stream->splits gets created from cs_mask later */
1141 stream->address = cpu_to_hc32(ehci, addr);
1142 }
1143
1144 stream->ps.udev = dev;
1145 stream->ps.ep = urb->ep;
1146
1147 stream->bEndpointAddress = is_input | epnum;
1148 stream->maxp = maxp;
1149}
1150
1151static struct ehci_iso_stream *
1152iso_stream_find(struct ehci_hcd *ehci, struct urb *urb)
1153{
1154 unsigned epnum;
1155 struct ehci_iso_stream *stream;
1156 struct usb_host_endpoint *ep;
1157 unsigned long flags;
1158
1159 epnum = usb_pipeendpoint (urb->pipe);
1160 if (usb_pipein(urb->pipe))
1161 ep = urb->dev->ep_in[epnum];
1162 else
1163 ep = urb->dev->ep_out[epnum];
1164
1165 spin_lock_irqsave(&ehci->lock, flags);
1166 stream = ep->hcpriv;
1167
1168 if (unlikely(stream == NULL)) {
1169 stream = iso_stream_alloc(GFP_ATOMIC);
1170 if (likely(stream != NULL)) {
1171 ep->hcpriv = stream;
1172 iso_stream_init(ehci, stream, urb);
1173 }
1174
1175 /* if dev->ep [epnum] is a QH, hw is set */
1176 } else if (unlikely(stream->hw != NULL)) {
1177 ehci_dbg(ehci, "dev %s ep%d%s, not iso??\n",
1178 urb->dev->devpath, epnum,
1179 usb_pipein(urb->pipe) ? "in" : "out");
1180 stream = NULL;
1181 }
1182
1183 spin_unlock_irqrestore(&ehci->lock, flags);
1184 return stream;
1185}
1186
1187/*-------------------------------------------------------------------------*/
1188
1189/* ehci_iso_sched ops can be ITD-only or SITD-only */
1190
1191static struct ehci_iso_sched *
1192iso_sched_alloc(unsigned packets, gfp_t mem_flags)
1193{
1194 struct ehci_iso_sched *iso_sched;
1195 int size = sizeof(*iso_sched);
1196
1197 size += packets * sizeof(struct ehci_iso_packet);
1198 iso_sched = kzalloc(size, mem_flags);
1199 if (likely(iso_sched != NULL))
1200 INIT_LIST_HEAD(&iso_sched->td_list);
1201
1202 return iso_sched;
1203}
1204
1205static inline void
1206itd_sched_init(
1207 struct ehci_hcd *ehci,
1208 struct ehci_iso_sched *iso_sched,
1209 struct ehci_iso_stream *stream,
1210 struct urb *urb
1211)
1212{
1213 unsigned i;
1214 dma_addr_t dma = urb->transfer_dma;
1215
1216 /* how many uframes are needed for these transfers */
1217 iso_sched->span = urb->number_of_packets * stream->uperiod;
1218
1219 /* figure out per-uframe itd fields that we'll need later
1220 * when we fit new itds into the schedule.
1221 */
1222 for (i = 0; i < urb->number_of_packets; i++) {
1223 struct ehci_iso_packet *uframe = &iso_sched->packet[i];
1224 unsigned length;
1225 dma_addr_t buf;
1226 u32 trans;
1227
1228 length = urb->iso_frame_desc[i].length;
1229 buf = dma + urb->iso_frame_desc[i].offset;
1230
1231 trans = EHCI_ISOC_ACTIVE;
1232 trans |= buf & 0x0fff;
1233 if (unlikely(((i + 1) == urb->number_of_packets))
1234 && !(urb->transfer_flags & URB_NO_INTERRUPT))
1235 trans |= EHCI_ITD_IOC;
1236 trans |= length << 16;
1237 uframe->transaction = cpu_to_hc32(ehci, trans);
1238
1239 /* might need to cross a buffer page within a uframe */
1240 uframe->bufp = (buf & ~(u64)0x0fff);
1241 buf += length;
1242 if (unlikely((uframe->bufp != (buf & ~(u64)0x0fff))))
1243 uframe->cross = 1;
1244 }
1245}
1246
1247static void
1248iso_sched_free(
1249 struct ehci_iso_stream *stream,
1250 struct ehci_iso_sched *iso_sched
1251)
1252{
1253 if (!iso_sched)
1254 return;
1255 /* caller must hold ehci->lock! */
1256 list_splice(&iso_sched->td_list, &stream->free_list);
1257 kfree(iso_sched);
1258}
1259
1260static int
1261itd_urb_transaction(
1262 struct ehci_iso_stream *stream,
1263 struct ehci_hcd *ehci,
1264 struct urb *urb,
1265 gfp_t mem_flags
1266)
1267{
1268 struct ehci_itd *itd;
1269 dma_addr_t itd_dma;
1270 int i;
1271 unsigned num_itds;
1272 struct ehci_iso_sched *sched;
1273 unsigned long flags;
1274
1275 sched = iso_sched_alloc(urb->number_of_packets, mem_flags);
1276 if (unlikely(sched == NULL))
1277 return -ENOMEM;
1278
1279 itd_sched_init(ehci, sched, stream, urb);
1280
1281 if (urb->interval < 8)
1282 num_itds = 1 + (sched->span + 7) / 8;
1283 else
1284 num_itds = urb->number_of_packets;
1285
1286 /* allocate/init ITDs */
1287 spin_lock_irqsave(&ehci->lock, flags);
1288 for (i = 0; i < num_itds; i++) {
1289
1290 /*
1291 * Use iTDs from the free list, but not iTDs that may
1292 * still be in use by the hardware.
1293 */
1294 if (likely(!list_empty(&stream->free_list))) {
1295 itd = list_first_entry(&stream->free_list,
1296 struct ehci_itd, itd_list);
1297 if (itd->frame == ehci->now_frame)
1298 goto alloc_itd;
1299 list_del(&itd->itd_list);
1300 itd_dma = itd->itd_dma;
1301 } else {
1302 alloc_itd:
1303 spin_unlock_irqrestore(&ehci->lock, flags);
1304 itd = dma_pool_alloc(ehci->itd_pool, mem_flags,
1305 &itd_dma);
1306 spin_lock_irqsave(&ehci->lock, flags);
1307 if (!itd) {
1308 iso_sched_free(stream, sched);
1309 spin_unlock_irqrestore(&ehci->lock, flags);
1310 return -ENOMEM;
1311 }
1312 }
1313
1314 memset(itd, 0, sizeof(*itd));
1315 itd->itd_dma = itd_dma;
1316 itd->frame = NO_FRAME;
1317 list_add(&itd->itd_list, &sched->td_list);
1318 }
1319 spin_unlock_irqrestore(&ehci->lock, flags);
1320
1321 /* temporarily store schedule info in hcpriv */
1322 urb->hcpriv = sched;
1323 urb->error_count = 0;
1324 return 0;
1325}
1326
1327/*-------------------------------------------------------------------------*/
1328
1329static void reserve_release_iso_bandwidth(struct ehci_hcd *ehci,
1330 struct ehci_iso_stream *stream, int sign)
1331{
1332 unsigned uframe;
1333 unsigned i, j;
1334 unsigned s_mask, c_mask, m;
1335 int usecs = stream->ps.usecs;
1336 int c_usecs = stream->ps.c_usecs;
1337 int tt_usecs = stream->ps.tt_usecs;
1338 struct ehci_tt *tt;
1339
1340 if (stream->ps.phase == NO_FRAME) /* Bandwidth wasn't reserved */
1341 return;
1342 uframe = stream->ps.bw_phase << 3;
1343
1344 bandwidth_dbg(ehci, sign, "iso", &stream->ps);
1345
1346 if (sign < 0) { /* Release bandwidth */
1347 usecs = -usecs;
1348 c_usecs = -c_usecs;
1349 tt_usecs = -tt_usecs;
1350 }
1351
1352 if (!stream->splits) { /* High speed */
1353 for (i = uframe + stream->ps.phase_uf; i < EHCI_BANDWIDTH_SIZE;
1354 i += stream->ps.bw_uperiod)
1355 ehci->bandwidth[i] += usecs;
1356
1357 } else { /* Full speed */
1358 s_mask = stream->ps.cs_mask;
1359 c_mask = s_mask >> 8;
1360
1361 /* NOTE: adjustment needed for frame overflow */
1362 for (i = uframe; i < EHCI_BANDWIDTH_SIZE;
1363 i += stream->ps.bw_uperiod) {
1364 for ((j = stream->ps.phase_uf, m = 1 << j); j < 8;
1365 (++j, m <<= 1)) {
1366 if (s_mask & m)
1367 ehci->bandwidth[i+j] += usecs;
1368 else if (c_mask & m)
1369 ehci->bandwidth[i+j] += c_usecs;
1370 }
1371 }
1372
1373 tt = find_tt(stream->ps.udev);
1374 if (sign > 0)
1375 list_add_tail(&stream->ps.ps_list, &tt->ps_list);
1376 else
1377 list_del(&stream->ps.ps_list);
1378
1379 for (i = uframe >> 3; i < EHCI_BANDWIDTH_FRAMES;
1380 i += stream->ps.bw_period)
1381 tt->bandwidth[i] += tt_usecs;
1382 }
1383}
1384
1385static inline int
1386itd_slot_ok(
1387 struct ehci_hcd *ehci,
1388 struct ehci_iso_stream *stream,
1389 unsigned uframe
1390)
1391{
1392 unsigned usecs;
1393
1394 /* convert "usecs we need" to "max already claimed" */
1395 usecs = ehci->uframe_periodic_max - stream->ps.usecs;
1396
1397 for (uframe &= stream->ps.bw_uperiod - 1; uframe < EHCI_BANDWIDTH_SIZE;
1398 uframe += stream->ps.bw_uperiod) {
1399 if (ehci->bandwidth[uframe] > usecs)
1400 return 0;
1401 }
1402 return 1;
1403}
1404
1405static inline int
1406sitd_slot_ok(
1407 struct ehci_hcd *ehci,
1408 struct ehci_iso_stream *stream,
1409 unsigned uframe,
1410 struct ehci_iso_sched *sched,
1411 struct ehci_tt *tt
1412)
1413{
1414 unsigned mask, tmp;
1415 unsigned frame, uf;
1416
1417 mask = stream->ps.cs_mask << (uframe & 7);
1418
1419 /* for OUT, don't wrap SSPLIT into H-microframe 7 */
1420 if (((stream->ps.cs_mask & 0xff) << (uframe & 7)) >= (1 << 7))
1421 return 0;
1422
1423 /* for IN, don't wrap CSPLIT into the next frame */
1424 if (mask & ~0xffff)
1425 return 0;
1426
1427 /* check bandwidth */
1428 uframe &= stream->ps.bw_uperiod - 1;
1429 frame = uframe >> 3;
1430
1431#ifdef CONFIG_USB_EHCI_TT_NEWSCHED
1432 /* The tt's fullspeed bus bandwidth must be available.
1433 * tt_available scheduling guarantees 10+% for control/bulk.
1434 */
1435 uf = uframe & 7;
1436 if (!tt_available(ehci, &stream->ps, tt, frame, uf))
1437 return 0;
1438#else
1439 /* tt must be idle for start(s), any gap, and csplit.
1440 * assume scheduling slop leaves 10+% for control/bulk.
1441 */
1442 if (!tt_no_collision(ehci, stream->ps.bw_period,
1443 stream->ps.udev, frame, mask))
1444 return 0;
1445#endif
1446
1447 do {
1448 unsigned max_used;
1449 unsigned i;
1450
1451 /* check starts (OUT uses more than one) */
1452 uf = uframe;
1453 max_used = ehci->uframe_periodic_max - stream->ps.usecs;
1454 for (tmp = stream->ps.cs_mask & 0xff; tmp; tmp >>= 1, uf++) {
1455 if (ehci->bandwidth[uf] > max_used)
1456 return 0;
1457 }
1458
1459 /* for IN, check CSPLIT */
1460 if (stream->ps.c_usecs) {
1461 max_used = ehci->uframe_periodic_max -
1462 stream->ps.c_usecs;
1463 uf = uframe & ~7;
1464 tmp = 1 << (2+8);
1465 for (i = (uframe & 7) + 2; i < 8; (++i, tmp <<= 1)) {
1466 if ((stream->ps.cs_mask & tmp) == 0)
1467 continue;
1468 if (ehci->bandwidth[uf+i] > max_used)
1469 return 0;
1470 }
1471 }
1472
1473 uframe += stream->ps.bw_uperiod;
1474 } while (uframe < EHCI_BANDWIDTH_SIZE);
1475
1476 stream->ps.cs_mask <<= uframe & 7;
1477 stream->splits = cpu_to_hc32(ehci, stream->ps.cs_mask);
1478 return 1;
1479}
1480
1481/*
1482 * This scheduler plans almost as far into the future as it has actual
1483 * periodic schedule slots. (Affected by TUNE_FLS, which defaults to
1484 * "as small as possible" to be cache-friendlier.) That limits the size
1485 * transfers you can stream reliably; avoid more than 64 msec per urb.
1486 * Also avoid queue depths of less than ehci's worst irq latency (affected
1487 * by the per-urb URB_NO_INTERRUPT hint, the log2_irq_thresh module parameter,
1488 * and other factors); or more than about 230 msec total (for portability,
1489 * given EHCI_TUNE_FLS and the slop). Or, write a smarter scheduler!
1490 */
1491
1492static int
1493iso_stream_schedule(
1494 struct ehci_hcd *ehci,
1495 struct urb *urb,
1496 struct ehci_iso_stream *stream
1497)
1498{
1499 u32 now, base, next, start, period, span, now2;
1500 u32 wrap = 0, skip = 0;
1501 int status = 0;
1502 unsigned mod = ehci->periodic_size << 3;
1503 struct ehci_iso_sched *sched = urb->hcpriv;
1504 bool empty = list_empty(&stream->td_list);
1505 bool new_stream = false;
1506
1507 period = stream->uperiod;
1508 span = sched->span;
1509 if (!stream->highspeed)
1510 span <<= 3;
1511
1512 /* Start a new isochronous stream? */
1513 if (unlikely(empty && !hcd_periodic_completion_in_progress(
1514 ehci_to_hcd(ehci), urb->ep))) {
1515
1516 /* Schedule the endpoint */
1517 if (stream->ps.phase == NO_FRAME) {
1518 int done = 0;
1519 struct ehci_tt *tt = find_tt(stream->ps.udev);
1520
1521 if (IS_ERR(tt)) {
1522 status = PTR_ERR(tt);
1523 goto fail;
1524 }
1525 compute_tt_budget(ehci->tt_budget, tt);
1526
1527 start = ((-(++ehci->random_frame)) << 3) & (period - 1);
1528
1529 /* find a uframe slot with enough bandwidth.
1530 * Early uframes are more precious because full-speed
1531 * iso IN transfers can't use late uframes,
1532 * and therefore they should be allocated last.
1533 */
1534 next = start;
1535 start += period;
1536 do {
1537 start--;
1538 /* check schedule: enough space? */
1539 if (stream->highspeed) {
1540 if (itd_slot_ok(ehci, stream, start))
1541 done = 1;
1542 } else {
1543 if ((start % 8) >= 6)
1544 continue;
1545 if (sitd_slot_ok(ehci, stream, start,
1546 sched, tt))
1547 done = 1;
1548 }
1549 } while (start > next && !done);
1550
1551 /* no room in the schedule */
1552 if (!done) {
1553 ehci_dbg(ehci, "iso sched full %p", urb);
1554 status = -ENOSPC;
1555 goto fail;
1556 }
1557 stream->ps.phase = (start >> 3) &
1558 (stream->ps.period - 1);
1559 stream->ps.bw_phase = stream->ps.phase &
1560 (stream->ps.bw_period - 1);
1561 stream->ps.phase_uf = start & 7;
1562 reserve_release_iso_bandwidth(ehci, stream, 1);
1563 }
1564
1565 /* New stream is already scheduled; use the upcoming slot */
1566 else {
1567 start = (stream->ps.phase << 3) + stream->ps.phase_uf;
1568 }
1569
1570 stream->next_uframe = start;
1571 new_stream = true;
1572 }
1573
1574 now = ehci_read_frame_index(ehci) & (mod - 1);
1575
1576 /* Take the isochronous scheduling threshold into account */
1577 if (ehci->i_thresh)
1578 next = now + ehci->i_thresh; /* uframe cache */
1579 else
1580 next = (now + 2 + 7) & ~0x07; /* full frame cache */
1581
1582 /* If needed, initialize last_iso_frame so that this URB will be seen */
1583 if (ehci->isoc_count == 0)
1584 ehci->last_iso_frame = now >> 3;
1585
1586 /*
1587 * Use ehci->last_iso_frame as the base. There can't be any
1588 * TDs scheduled for earlier than that.
1589 */
1590 base = ehci->last_iso_frame << 3;
1591 next = (next - base) & (mod - 1);
1592 start = (stream->next_uframe - base) & (mod - 1);
1593
1594 if (unlikely(new_stream))
1595 goto do_ASAP;
1596
1597 /*
1598 * Typical case: reuse current schedule, stream may still be active.
1599 * Hopefully there are no gaps from the host falling behind
1600 * (irq delays etc). If there are, the behavior depends on
1601 * whether URB_ISO_ASAP is set.
1602 */
1603 now2 = (now - base) & (mod - 1);
1604
1605 /* Is the schedule about to wrap around? */
1606 if (unlikely(!empty && start < period)) {
1607 ehci_dbg(ehci, "request %p would overflow (%u-%u < %u mod %u)\n",
1608 urb, stream->next_uframe, base, period, mod);
1609 status = -EFBIG;
1610 goto fail;
1611 }
1612
1613 /* Is the next packet scheduled after the base time? */
1614 if (likely(!empty || start <= now2 + period)) {
1615
1616 /* URB_ISO_ASAP: make sure that start >= next */
1617 if (unlikely(start < next &&
1618 (urb->transfer_flags & URB_ISO_ASAP)))
1619 goto do_ASAP;
1620
1621 /* Otherwise use start, if it's not in the past */
1622 if (likely(start >= now2))
1623 goto use_start;
1624
1625 /* Otherwise we got an underrun while the queue was empty */
1626 } else {
1627 if (urb->transfer_flags & URB_ISO_ASAP)
1628 goto do_ASAP;
1629 wrap = mod;
1630 now2 += mod;
1631 }
1632
1633 /* How many uframes and packets do we need to skip? */
1634 skip = (now2 - start + period - 1) & -period;
1635 if (skip >= span) { /* Entirely in the past? */
1636 ehci_dbg(ehci, "iso underrun %p (%u+%u < %u) [%u]\n",
1637 urb, start + base, span - period, now2 + base,
1638 base);
1639
1640 /* Try to keep the last TD intact for scanning later */
1641 skip = span - period;
1642
1643 /* Will it come before the current scan position? */
1644 if (empty) {
1645 skip = span; /* Skip the entire URB */
1646 status = 1; /* and give it back immediately */
1647 iso_sched_free(stream, sched);
1648 sched = NULL;
1649 }
1650 }
1651 urb->error_count = skip / period;
1652 if (sched)
1653 sched->first_packet = urb->error_count;
1654 goto use_start;
1655
1656 do_ASAP:
1657 /* Use the first slot after "next" */
1658 start = next + ((start - next) & (period - 1));
1659
1660 use_start:
1661 /* Tried to schedule too far into the future? */
1662 if (unlikely(start + span - period >= mod + wrap)) {
1663 ehci_dbg(ehci, "request %p would overflow (%u+%u >= %u)\n",
1664 urb, start, span - period, mod + wrap);
1665 status = -EFBIG;
1666 goto fail;
1667 }
1668
1669 start += base;
1670 stream->next_uframe = (start + skip) & (mod - 1);
1671
1672 /* report high speed start in uframes; full speed, in frames */
1673 urb->start_frame = start & (mod - 1);
1674 if (!stream->highspeed)
1675 urb->start_frame >>= 3;
1676 return status;
1677
1678 fail:
1679 iso_sched_free(stream, sched);
1680 urb->hcpriv = NULL;
1681 return status;
1682}
1683
1684/*-------------------------------------------------------------------------*/
1685
1686static inline void
1687itd_init(struct ehci_hcd *ehci, struct ehci_iso_stream *stream,
1688 struct ehci_itd *itd)
1689{
1690 int i;
1691
1692 /* it's been recently zeroed */
1693 itd->hw_next = EHCI_LIST_END(ehci);
1694 itd->hw_bufp[0] = stream->buf0;
1695 itd->hw_bufp[1] = stream->buf1;
1696 itd->hw_bufp[2] = stream->buf2;
1697
1698 for (i = 0; i < 8; i++)
1699 itd->index[i] = -1;
1700
1701 /* All other fields are filled when scheduling */
1702}
1703
1704static inline void
1705itd_patch(
1706 struct ehci_hcd *ehci,
1707 struct ehci_itd *itd,
1708 struct ehci_iso_sched *iso_sched,
1709 unsigned index,
1710 u16 uframe
1711)
1712{
1713 struct ehci_iso_packet *uf = &iso_sched->packet[index];
1714 unsigned pg = itd->pg;
1715
1716 /* BUG_ON(pg == 6 && uf->cross); */
1717
1718 uframe &= 0x07;
1719 itd->index[uframe] = index;
1720
1721 itd->hw_transaction[uframe] = uf->transaction;
1722 itd->hw_transaction[uframe] |= cpu_to_hc32(ehci, pg << 12);
1723 itd->hw_bufp[pg] |= cpu_to_hc32(ehci, uf->bufp & ~(u32)0);
1724 itd->hw_bufp_hi[pg] |= cpu_to_hc32(ehci, (u32)(uf->bufp >> 32));
1725
1726 /* iso_frame_desc[].offset must be strictly increasing */
1727 if (unlikely(uf->cross)) {
1728 u64 bufp = uf->bufp + 4096;
1729
1730 itd->pg = ++pg;
1731 itd->hw_bufp[pg] |= cpu_to_hc32(ehci, bufp & ~(u32)0);
1732 itd->hw_bufp_hi[pg] |= cpu_to_hc32(ehci, (u32)(bufp >> 32));
1733 }
1734}
1735
1736static inline void
1737itd_link(struct ehci_hcd *ehci, unsigned frame, struct ehci_itd *itd)
1738{
1739 union ehci_shadow *prev = &ehci->pshadow[frame];
1740 __hc32 *hw_p = &ehci->periodic[frame];
1741 union ehci_shadow here = *prev;
1742 __hc32 type = 0;
1743
1744 /* skip any iso nodes which might belong to previous microframes */
1745 while (here.ptr) {
1746 type = Q_NEXT_TYPE(ehci, *hw_p);
1747 if (type == cpu_to_hc32(ehci, Q_TYPE_QH))
1748 break;
1749 prev = periodic_next_shadow(ehci, prev, type);
1750 hw_p = shadow_next_periodic(ehci, &here, type);
1751 here = *prev;
1752 }
1753
1754 itd->itd_next = here;
1755 itd->hw_next = *hw_p;
1756 prev->itd = itd;
1757 itd->frame = frame;
1758 wmb();
1759 *hw_p = cpu_to_hc32(ehci, itd->itd_dma | Q_TYPE_ITD);
1760}
1761
1762/* fit urb's itds into the selected schedule slot; activate as needed */
1763static void itd_link_urb(
1764 struct ehci_hcd *ehci,
1765 struct urb *urb,
1766 unsigned mod,
1767 struct ehci_iso_stream *stream
1768)
1769{
1770 int packet;
1771 unsigned next_uframe, uframe, frame;
1772 struct ehci_iso_sched *iso_sched = urb->hcpriv;
1773 struct ehci_itd *itd;
1774
1775 next_uframe = stream->next_uframe & (mod - 1);
1776
1777 if (unlikely(list_empty(&stream->td_list)))
1778 ehci_to_hcd(ehci)->self.bandwidth_allocated
1779 += stream->bandwidth;
1780
1781 if (ehci_to_hcd(ehci)->self.bandwidth_isoc_reqs == 0) {
1782 if (ehci->amd_pll_fix == 1)
1783 usb_amd_quirk_pll_disable();
1784 }
1785
1786 ehci_to_hcd(ehci)->self.bandwidth_isoc_reqs++;
1787
1788 /* fill iTDs uframe by uframe */
1789 for (packet = iso_sched->first_packet, itd = NULL;
1790 packet < urb->number_of_packets;) {
1791 if (itd == NULL) {
1792 /* ASSERT: we have all necessary itds */
1793 /* BUG_ON(list_empty(&iso_sched->td_list)); */
1794
1795 /* ASSERT: no itds for this endpoint in this uframe */
1796
1797 itd = list_entry(iso_sched->td_list.next,
1798 struct ehci_itd, itd_list);
1799 list_move_tail(&itd->itd_list, &stream->td_list);
1800 itd->stream = stream;
1801 itd->urb = urb;
1802 itd_init(ehci, stream, itd);
1803 }
1804
1805 uframe = next_uframe & 0x07;
1806 frame = next_uframe >> 3;
1807
1808 itd_patch(ehci, itd, iso_sched, packet, uframe);
1809
1810 next_uframe += stream->uperiod;
1811 next_uframe &= mod - 1;
1812 packet++;
1813
1814 /* link completed itds into the schedule */
1815 if (((next_uframe >> 3) != frame)
1816 || packet == urb->number_of_packets) {
1817 itd_link(ehci, frame & (ehci->periodic_size - 1), itd);
1818 itd = NULL;
1819 }
1820 }
1821 stream->next_uframe = next_uframe;
1822
1823 /* don't need that schedule data any more */
1824 iso_sched_free(stream, iso_sched);
1825 urb->hcpriv = stream;
1826
1827 ++ehci->isoc_count;
1828 enable_periodic(ehci);
1829}
1830
1831#define ISO_ERRS (EHCI_ISOC_BUF_ERR | EHCI_ISOC_BABBLE | EHCI_ISOC_XACTERR)
1832
1833/* Process and recycle a completed ITD. Return true iff its urb completed,
1834 * and hence its completion callback probably added things to the hardware
1835 * schedule.
1836 *
1837 * Note that we carefully avoid recycling this descriptor until after any
1838 * completion callback runs, so that it won't be reused quickly. That is,
1839 * assuming (a) no more than two urbs per frame on this endpoint, and also
1840 * (b) only this endpoint's completions submit URBs. It seems some silicon
1841 * corrupts things if you reuse completed descriptors very quickly...
1842 */
1843static bool itd_complete(struct ehci_hcd *ehci, struct ehci_itd *itd)
1844{
1845 struct urb *urb = itd->urb;
1846 struct usb_iso_packet_descriptor *desc;
1847 u32 t;
1848 unsigned uframe;
1849 int urb_index = -1;
1850 struct ehci_iso_stream *stream = itd->stream;
1851 struct usb_device *dev;
1852 bool retval = false;
1853
1854 /* for each uframe with a packet */
1855 for (uframe = 0; uframe < 8; uframe++) {
1856 if (likely(itd->index[uframe] == -1))
1857 continue;
1858 urb_index = itd->index[uframe];
1859 desc = &urb->iso_frame_desc[urb_index];
1860
1861 t = hc32_to_cpup(ehci, &itd->hw_transaction[uframe]);
1862 itd->hw_transaction[uframe] = 0;
1863
1864 /* report transfer status */
1865 if (unlikely(t & ISO_ERRS)) {
1866 urb->error_count++;
1867 if (t & EHCI_ISOC_BUF_ERR)
1868 desc->status = usb_pipein(urb->pipe)
1869 ? -ENOSR /* hc couldn't read */
1870 : -ECOMM; /* hc couldn't write */
1871 else if (t & EHCI_ISOC_BABBLE)
1872 desc->status = -EOVERFLOW;
1873 else /* (t & EHCI_ISOC_XACTERR) */
1874 desc->status = -EPROTO;
1875
1876 /* HC need not update length with this error */
1877 if (!(t & EHCI_ISOC_BABBLE)) {
1878 desc->actual_length = EHCI_ITD_LENGTH(t);
1879 urb->actual_length += desc->actual_length;
1880 }
1881 } else if (likely((t & EHCI_ISOC_ACTIVE) == 0)) {
1882 desc->status = 0;
1883 desc->actual_length = EHCI_ITD_LENGTH(t);
1884 urb->actual_length += desc->actual_length;
1885 } else {
1886 /* URB was too late */
1887 urb->error_count++;
1888 }
1889 }
1890
1891 /* handle completion now? */
1892 if (likely((urb_index + 1) != urb->number_of_packets))
1893 goto done;
1894
1895 /*
1896 * ASSERT: it's really the last itd for this urb
1897 * list_for_each_entry (itd, &stream->td_list, itd_list)
1898 * BUG_ON(itd->urb == urb);
1899 */
1900
1901 /* give urb back to the driver; completion often (re)submits */
1902 dev = urb->dev;
1903 ehci_urb_done(ehci, urb, 0);
1904 retval = true;
1905 urb = NULL;
1906
1907 --ehci->isoc_count;
1908 disable_periodic(ehci);
1909
1910 ehci_to_hcd(ehci)->self.bandwidth_isoc_reqs--;
1911 if (ehci_to_hcd(ehci)->self.bandwidth_isoc_reqs == 0) {
1912 if (ehci->amd_pll_fix == 1)
1913 usb_amd_quirk_pll_enable();
1914 }
1915
1916 if (unlikely(list_is_singular(&stream->td_list)))
1917 ehci_to_hcd(ehci)->self.bandwidth_allocated
1918 -= stream->bandwidth;
1919
1920done:
1921 itd->urb = NULL;
1922
1923 /* Add to the end of the free list for later reuse */
1924 list_move_tail(&itd->itd_list, &stream->free_list);
1925
1926 /* Recycle the iTDs when the pipeline is empty (ep no longer in use) */
1927 if (list_empty(&stream->td_list)) {
1928 list_splice_tail_init(&stream->free_list,
1929 &ehci->cached_itd_list);
1930 start_free_itds(ehci);
1931 }
1932
1933 return retval;
1934}
1935
1936/*-------------------------------------------------------------------------*/
1937
1938static int itd_submit(struct ehci_hcd *ehci, struct urb *urb,
1939 gfp_t mem_flags)
1940{
1941 int status = -EINVAL;
1942 unsigned long flags;
1943 struct ehci_iso_stream *stream;
1944
1945 /* Get iso_stream head */
1946 stream = iso_stream_find(ehci, urb);
1947 if (unlikely(stream == NULL)) {
1948 ehci_dbg(ehci, "can't get iso stream\n");
1949 return -ENOMEM;
1950 }
1951 if (unlikely(urb->interval != stream->uperiod)) {
1952 ehci_dbg(ehci, "can't change iso interval %d --> %d\n",
1953 stream->uperiod, urb->interval);
1954 goto done;
1955 }
1956
1957#ifdef EHCI_URB_TRACE
1958 ehci_dbg(ehci,
1959 "%s %s urb %p ep%d%s len %d, %d pkts %d uframes [%p]\n",
1960 __func__, urb->dev->devpath, urb,
1961 usb_pipeendpoint(urb->pipe),
1962 usb_pipein(urb->pipe) ? "in" : "out",
1963 urb->transfer_buffer_length,
1964 urb->number_of_packets, urb->interval,
1965 stream);
1966#endif
1967
1968 /* allocate ITDs w/o locking anything */
1969 status = itd_urb_transaction(stream, ehci, urb, mem_flags);
1970 if (unlikely(status < 0)) {
1971 ehci_dbg(ehci, "can't init itds\n");
1972 goto done;
1973 }
1974
1975 /* schedule ... need to lock */
1976 spin_lock_irqsave(&ehci->lock, flags);
1977 if (unlikely(!HCD_HW_ACCESSIBLE(ehci_to_hcd(ehci)))) {
1978 status = -ESHUTDOWN;
1979 goto done_not_linked;
1980 }
1981 status = usb_hcd_link_urb_to_ep(ehci_to_hcd(ehci), urb);
1982 if (unlikely(status))
1983 goto done_not_linked;
1984 status = iso_stream_schedule(ehci, urb, stream);
1985 if (likely(status == 0)) {
1986 itd_link_urb(ehci, urb, ehci->periodic_size << 3, stream);
1987 } else if (status > 0) {
1988 status = 0;
1989 ehci_urb_done(ehci, urb, 0);
1990 } else {
1991 usb_hcd_unlink_urb_from_ep(ehci_to_hcd(ehci), urb);
1992 }
1993 done_not_linked:
1994 spin_unlock_irqrestore(&ehci->lock, flags);
1995 done:
1996 return status;
1997}
1998
1999/*-------------------------------------------------------------------------*/
2000
2001/*
2002 * "Split ISO TDs" ... used for USB 1.1 devices going through the
2003 * TTs in USB 2.0 hubs. These need microframe scheduling.
2004 */
2005
2006static inline void
2007sitd_sched_init(
2008 struct ehci_hcd *ehci,
2009 struct ehci_iso_sched *iso_sched,
2010 struct ehci_iso_stream *stream,
2011 struct urb *urb
2012)
2013{
2014 unsigned i;
2015 dma_addr_t dma = urb->transfer_dma;
2016
2017 /* how many frames are needed for these transfers */
2018 iso_sched->span = urb->number_of_packets * stream->ps.period;
2019
2020 /* figure out per-frame sitd fields that we'll need later
2021 * when we fit new sitds into the schedule.
2022 */
2023 for (i = 0; i < urb->number_of_packets; i++) {
2024 struct ehci_iso_packet *packet = &iso_sched->packet[i];
2025 unsigned length;
2026 dma_addr_t buf;
2027 u32 trans;
2028
2029 length = urb->iso_frame_desc[i].length & 0x03ff;
2030 buf = dma + urb->iso_frame_desc[i].offset;
2031
2032 trans = SITD_STS_ACTIVE;
2033 if (((i + 1) == urb->number_of_packets)
2034 && !(urb->transfer_flags & URB_NO_INTERRUPT))
2035 trans |= SITD_IOC;
2036 trans |= length << 16;
2037 packet->transaction = cpu_to_hc32(ehci, trans);
2038
2039 /* might need to cross a buffer page within a td */
2040 packet->bufp = buf;
2041 packet->buf1 = (buf + length) & ~0x0fff;
2042 if (packet->buf1 != (buf & ~(u64)0x0fff))
2043 packet->cross = 1;
2044
2045 /* OUT uses multiple start-splits */
2046 if (stream->bEndpointAddress & USB_DIR_IN)
2047 continue;
2048 length = (length + 187) / 188;
2049 if (length > 1) /* BEGIN vs ALL */
2050 length |= 1 << 3;
2051 packet->buf1 |= length;
2052 }
2053}
2054
2055static int
2056sitd_urb_transaction(
2057 struct ehci_iso_stream *stream,
2058 struct ehci_hcd *ehci,
2059 struct urb *urb,
2060 gfp_t mem_flags
2061)
2062{
2063 struct ehci_sitd *sitd;
2064 dma_addr_t sitd_dma;
2065 int i;
2066 struct ehci_iso_sched *iso_sched;
2067 unsigned long flags;
2068
2069 iso_sched = iso_sched_alloc(urb->number_of_packets, mem_flags);
2070 if (iso_sched == NULL)
2071 return -ENOMEM;
2072
2073 sitd_sched_init(ehci, iso_sched, stream, urb);
2074
2075 /* allocate/init sITDs */
2076 spin_lock_irqsave(&ehci->lock, flags);
2077 for (i = 0; i < urb->number_of_packets; i++) {
2078
2079 /* NOTE: for now, we don't try to handle wraparound cases
2080 * for IN (using sitd->hw_backpointer, like a FSTN), which
2081 * means we never need two sitds for full speed packets.
2082 */
2083
2084 /*
2085 * Use siTDs from the free list, but not siTDs that may
2086 * still be in use by the hardware.
2087 */
2088 if (likely(!list_empty(&stream->free_list))) {
2089 sitd = list_first_entry(&stream->free_list,
2090 struct ehci_sitd, sitd_list);
2091 if (sitd->frame == ehci->now_frame)
2092 goto alloc_sitd;
2093 list_del(&sitd->sitd_list);
2094 sitd_dma = sitd->sitd_dma;
2095 } else {
2096 alloc_sitd:
2097 spin_unlock_irqrestore(&ehci->lock, flags);
2098 sitd = dma_pool_alloc(ehci->sitd_pool, mem_flags,
2099 &sitd_dma);
2100 spin_lock_irqsave(&ehci->lock, flags);
2101 if (!sitd) {
2102 iso_sched_free(stream, iso_sched);
2103 spin_unlock_irqrestore(&ehci->lock, flags);
2104 return -ENOMEM;
2105 }
2106 }
2107
2108 memset(sitd, 0, sizeof(*sitd));
2109 sitd->sitd_dma = sitd_dma;
2110 sitd->frame = NO_FRAME;
2111 list_add(&sitd->sitd_list, &iso_sched->td_list);
2112 }
2113
2114 /* temporarily store schedule info in hcpriv */
2115 urb->hcpriv = iso_sched;
2116 urb->error_count = 0;
2117
2118 spin_unlock_irqrestore(&ehci->lock, flags);
2119 return 0;
2120}
2121
2122/*-------------------------------------------------------------------------*/
2123
2124static inline void
2125sitd_patch(
2126 struct ehci_hcd *ehci,
2127 struct ehci_iso_stream *stream,
2128 struct ehci_sitd *sitd,
2129 struct ehci_iso_sched *iso_sched,
2130 unsigned index
2131)
2132{
2133 struct ehci_iso_packet *uf = &iso_sched->packet[index];
2134 u64 bufp;
2135
2136 sitd->hw_next = EHCI_LIST_END(ehci);
2137 sitd->hw_fullspeed_ep = stream->address;
2138 sitd->hw_uframe = stream->splits;
2139 sitd->hw_results = uf->transaction;
2140 sitd->hw_backpointer = EHCI_LIST_END(ehci);
2141
2142 bufp = uf->bufp;
2143 sitd->hw_buf[0] = cpu_to_hc32(ehci, bufp);
2144 sitd->hw_buf_hi[0] = cpu_to_hc32(ehci, bufp >> 32);
2145
2146 sitd->hw_buf[1] = cpu_to_hc32(ehci, uf->buf1);
2147 if (uf->cross)
2148 bufp += 4096;
2149 sitd->hw_buf_hi[1] = cpu_to_hc32(ehci, bufp >> 32);
2150 sitd->index = index;
2151}
2152
2153static inline void
2154sitd_link(struct ehci_hcd *ehci, unsigned frame, struct ehci_sitd *sitd)
2155{
2156 /* note: sitd ordering could matter (CSPLIT then SSPLIT) */
2157 sitd->sitd_next = ehci->pshadow[frame];
2158 sitd->hw_next = ehci->periodic[frame];
2159 ehci->pshadow[frame].sitd = sitd;
2160 sitd->frame = frame;
2161 wmb();
2162 ehci->periodic[frame] = cpu_to_hc32(ehci, sitd->sitd_dma | Q_TYPE_SITD);
2163}
2164
2165/* fit urb's sitds into the selected schedule slot; activate as needed */
2166static void sitd_link_urb(
2167 struct ehci_hcd *ehci,
2168 struct urb *urb,
2169 unsigned mod,
2170 struct ehci_iso_stream *stream
2171)
2172{
2173 int packet;
2174 unsigned next_uframe;
2175 struct ehci_iso_sched *sched = urb->hcpriv;
2176 struct ehci_sitd *sitd;
2177
2178 next_uframe = stream->next_uframe;
2179
2180 if (list_empty(&stream->td_list))
2181 /* usbfs ignores TT bandwidth */
2182 ehci_to_hcd(ehci)->self.bandwidth_allocated
2183 += stream->bandwidth;
2184
2185 if (ehci_to_hcd(ehci)->self.bandwidth_isoc_reqs == 0) {
2186 if (ehci->amd_pll_fix == 1)
2187 usb_amd_quirk_pll_disable();
2188 }
2189
2190 ehci_to_hcd(ehci)->self.bandwidth_isoc_reqs++;
2191
2192 /* fill sITDs frame by frame */
2193 for (packet = sched->first_packet, sitd = NULL;
2194 packet < urb->number_of_packets;
2195 packet++) {
2196
2197 /* ASSERT: we have all necessary sitds */
2198 BUG_ON(list_empty(&sched->td_list));
2199
2200 /* ASSERT: no itds for this endpoint in this frame */
2201
2202 sitd = list_entry(sched->td_list.next,
2203 struct ehci_sitd, sitd_list);
2204 list_move_tail(&sitd->sitd_list, &stream->td_list);
2205 sitd->stream = stream;
2206 sitd->urb = urb;
2207
2208 sitd_patch(ehci, stream, sitd, sched, packet);
2209 sitd_link(ehci, (next_uframe >> 3) & (ehci->periodic_size - 1),
2210 sitd);
2211
2212 next_uframe += stream->uperiod;
2213 }
2214 stream->next_uframe = next_uframe & (mod - 1);
2215
2216 /* don't need that schedule data any more */
2217 iso_sched_free(stream, sched);
2218 urb->hcpriv = stream;
2219
2220 ++ehci->isoc_count;
2221 enable_periodic(ehci);
2222}
2223
2224/*-------------------------------------------------------------------------*/
2225
2226#define SITD_ERRS (SITD_STS_ERR | SITD_STS_DBE | SITD_STS_BABBLE \
2227 | SITD_STS_XACT | SITD_STS_MMF)
2228
2229/* Process and recycle a completed SITD. Return true iff its urb completed,
2230 * and hence its completion callback probably added things to the hardware
2231 * schedule.
2232 *
2233 * Note that we carefully avoid recycling this descriptor until after any
2234 * completion callback runs, so that it won't be reused quickly. That is,
2235 * assuming (a) no more than two urbs per frame on this endpoint, and also
2236 * (b) only this endpoint's completions submit URBs. It seems some silicon
2237 * corrupts things if you reuse completed descriptors very quickly...
2238 */
2239static bool sitd_complete(struct ehci_hcd *ehci, struct ehci_sitd *sitd)
2240{
2241 struct urb *urb = sitd->urb;
2242 struct usb_iso_packet_descriptor *desc;
2243 u32 t;
2244 int urb_index;
2245 struct ehci_iso_stream *stream = sitd->stream;
2246 struct usb_device *dev;
2247 bool retval = false;
2248
2249 urb_index = sitd->index;
2250 desc = &urb->iso_frame_desc[urb_index];
2251 t = hc32_to_cpup(ehci, &sitd->hw_results);
2252
2253 /* report transfer status */
2254 if (unlikely(t & SITD_ERRS)) {
2255 urb->error_count++;
2256 if (t & SITD_STS_DBE)
2257 desc->status = usb_pipein(urb->pipe)
2258 ? -ENOSR /* hc couldn't read */
2259 : -ECOMM; /* hc couldn't write */
2260 else if (t & SITD_STS_BABBLE)
2261 desc->status = -EOVERFLOW;
2262 else /* XACT, MMF, etc */
2263 desc->status = -EPROTO;
2264 } else if (unlikely(t & SITD_STS_ACTIVE)) {
2265 /* URB was too late */
2266 urb->error_count++;
2267 } else {
2268 desc->status = 0;
2269 desc->actual_length = desc->length - SITD_LENGTH(t);
2270 urb->actual_length += desc->actual_length;
2271 }
2272
2273 /* handle completion now? */
2274 if ((urb_index + 1) != urb->number_of_packets)
2275 goto done;
2276
2277 /*
2278 * ASSERT: it's really the last sitd for this urb
2279 * list_for_each_entry (sitd, &stream->td_list, sitd_list)
2280 * BUG_ON(sitd->urb == urb);
2281 */
2282
2283 /* give urb back to the driver; completion often (re)submits */
2284 dev = urb->dev;
2285 ehci_urb_done(ehci, urb, 0);
2286 retval = true;
2287 urb = NULL;
2288
2289 --ehci->isoc_count;
2290 disable_periodic(ehci);
2291
2292 ehci_to_hcd(ehci)->self.bandwidth_isoc_reqs--;
2293 if (ehci_to_hcd(ehci)->self.bandwidth_isoc_reqs == 0) {
2294 if (ehci->amd_pll_fix == 1)
2295 usb_amd_quirk_pll_enable();
2296 }
2297
2298 if (list_is_singular(&stream->td_list))
2299 ehci_to_hcd(ehci)->self.bandwidth_allocated
2300 -= stream->bandwidth;
2301
2302done:
2303 sitd->urb = NULL;
2304
2305 /* Add to the end of the free list for later reuse */
2306 list_move_tail(&sitd->sitd_list, &stream->free_list);
2307
2308 /* Recycle the siTDs when the pipeline is empty (ep no longer in use) */
2309 if (list_empty(&stream->td_list)) {
2310 list_splice_tail_init(&stream->free_list,
2311 &ehci->cached_sitd_list);
2312 start_free_itds(ehci);
2313 }
2314
2315 return retval;
2316}
2317
2318
2319static int sitd_submit(struct ehci_hcd *ehci, struct urb *urb,
2320 gfp_t mem_flags)
2321{
2322 int status = -EINVAL;
2323 unsigned long flags;
2324 struct ehci_iso_stream *stream;
2325
2326 /* Get iso_stream head */
2327 stream = iso_stream_find(ehci, urb);
2328 if (stream == NULL) {
2329 ehci_dbg(ehci, "can't get iso stream\n");
2330 return -ENOMEM;
2331 }
2332 if (urb->interval != stream->ps.period) {
2333 ehci_dbg(ehci, "can't change iso interval %d --> %d\n",
2334 stream->ps.period, urb->interval);
2335 goto done;
2336 }
2337
2338#ifdef EHCI_URB_TRACE
2339 ehci_dbg(ehci,
2340 "submit %p dev%s ep%d%s-iso len %d\n",
2341 urb, urb->dev->devpath,
2342 usb_pipeendpoint(urb->pipe),
2343 usb_pipein(urb->pipe) ? "in" : "out",
2344 urb->transfer_buffer_length);
2345#endif
2346
2347 /* allocate SITDs */
2348 status = sitd_urb_transaction(stream, ehci, urb, mem_flags);
2349 if (status < 0) {
2350 ehci_dbg(ehci, "can't init sitds\n");
2351 goto done;
2352 }
2353
2354 /* schedule ... need to lock */
2355 spin_lock_irqsave(&ehci->lock, flags);
2356 if (unlikely(!HCD_HW_ACCESSIBLE(ehci_to_hcd(ehci)))) {
2357 status = -ESHUTDOWN;
2358 goto done_not_linked;
2359 }
2360 status = usb_hcd_link_urb_to_ep(ehci_to_hcd(ehci), urb);
2361 if (unlikely(status))
2362 goto done_not_linked;
2363 status = iso_stream_schedule(ehci, urb, stream);
2364 if (likely(status == 0)) {
2365 sitd_link_urb(ehci, urb, ehci->periodic_size << 3, stream);
2366 } else if (status > 0) {
2367 status = 0;
2368 ehci_urb_done(ehci, urb, 0);
2369 } else {
2370 usb_hcd_unlink_urb_from_ep(ehci_to_hcd(ehci), urb);
2371 }
2372 done_not_linked:
2373 spin_unlock_irqrestore(&ehci->lock, flags);
2374 done:
2375 return status;
2376}
2377
2378/*-------------------------------------------------------------------------*/
2379
2380static void scan_isoc(struct ehci_hcd *ehci)
2381{
2382 unsigned uf, now_frame, frame;
2383 unsigned fmask = ehci->periodic_size - 1;
2384 bool modified, live;
2385 union ehci_shadow q, *q_p;
2386 __hc32 type, *hw_p;
2387
2388 /*
2389 * When running, scan from last scan point up to "now"
2390 * else clean up by scanning everything that's left.
2391 * Touches as few pages as possible: cache-friendly.
2392 */
2393 if (ehci->rh_state >= EHCI_RH_RUNNING) {
2394 uf = ehci_read_frame_index(ehci);
2395 now_frame = (uf >> 3) & fmask;
2396 live = true;
2397 } else {
2398 now_frame = (ehci->last_iso_frame - 1) & fmask;
2399 live = false;
2400 }
2401 ehci->now_frame = now_frame;
2402
2403 frame = ehci->last_iso_frame;
2404
2405restart:
2406 /* Scan each element in frame's queue for completions */
2407 q_p = &ehci->pshadow[frame];
2408 hw_p = &ehci->periodic[frame];
2409 q.ptr = q_p->ptr;
2410 type = Q_NEXT_TYPE(ehci, *hw_p);
2411 modified = false;
2412
2413 while (q.ptr != NULL) {
2414 switch (hc32_to_cpu(ehci, type)) {
2415 case Q_TYPE_ITD:
2416 /*
2417 * If this ITD is still active, leave it for
2418 * later processing ... check the next entry.
2419 * No need to check for activity unless the
2420 * frame is current.
2421 */
2422 if (frame == now_frame && live) {
2423 rmb();
2424 for (uf = 0; uf < 8; uf++) {
2425 if (q.itd->hw_transaction[uf] &
2426 ITD_ACTIVE(ehci))
2427 break;
2428 }
2429 if (uf < 8) {
2430 q_p = &q.itd->itd_next;
2431 hw_p = &q.itd->hw_next;
2432 type = Q_NEXT_TYPE(ehci,
2433 q.itd->hw_next);
2434 q = *q_p;
2435 break;
2436 }
2437 }
2438
2439 /*
2440 * Take finished ITDs out of the schedule
2441 * and process them: recycle, maybe report
2442 * URB completion. HC won't cache the
2443 * pointer for much longer, if at all.
2444 */
2445 *q_p = q.itd->itd_next;
2446 if (!ehci->use_dummy_qh ||
2447 q.itd->hw_next != EHCI_LIST_END(ehci))
2448 *hw_p = q.itd->hw_next;
2449 else
2450 *hw_p = cpu_to_hc32(ehci, ehci->dummy->qh_dma);
2451 type = Q_NEXT_TYPE(ehci, q.itd->hw_next);
2452 wmb();
2453 modified = itd_complete(ehci, q.itd);
2454 q = *q_p;
2455 break;
2456 case Q_TYPE_SITD:
2457 /*
2458 * If this SITD is still active, leave it for
2459 * later processing ... check the next entry.
2460 * No need to check for activity unless the
2461 * frame is current.
2462 */
2463 if (((frame == now_frame) ||
2464 (((frame + 1) & fmask) == now_frame))
2465 && live
2466 && (q.sitd->hw_results & SITD_ACTIVE(ehci))) {
2467
2468 q_p = &q.sitd->sitd_next;
2469 hw_p = &q.sitd->hw_next;
2470 type = Q_NEXT_TYPE(ehci, q.sitd->hw_next);
2471 q = *q_p;
2472 break;
2473 }
2474
2475 /*
2476 * Take finished SITDs out of the schedule
2477 * and process them: recycle, maybe report
2478 * URB completion.
2479 */
2480 *q_p = q.sitd->sitd_next;
2481 if (!ehci->use_dummy_qh ||
2482 q.sitd->hw_next != EHCI_LIST_END(ehci))
2483 *hw_p = q.sitd->hw_next;
2484 else
2485 *hw_p = cpu_to_hc32(ehci, ehci->dummy->qh_dma);
2486 type = Q_NEXT_TYPE(ehci, q.sitd->hw_next);
2487 wmb();
2488 modified = sitd_complete(ehci, q.sitd);
2489 q = *q_p;
2490 break;
2491 default:
2492 ehci_dbg(ehci, "corrupt type %d frame %d shadow %p\n",
2493 type, frame, q.ptr);
2494 /* BUG(); */
2495 /* FALL THROUGH */
2496 case Q_TYPE_QH:
2497 case Q_TYPE_FSTN:
2498 /* End of the iTDs and siTDs */
2499 q.ptr = NULL;
2500 break;
2501 }
2502
2503 /* Assume completion callbacks modify the queue */
2504 if (unlikely(modified && ehci->isoc_count > 0))
2505 goto restart;
2506 }
2507
2508 /* Stop when we have reached the current frame */
2509 if (frame == now_frame)
2510 return;
2511
2512 /* The last frame may still have active siTDs */
2513 ehci->last_iso_frame = frame;
2514 frame = (frame + 1) & fmask;
2515
2516 goto restart;
2517}
1// SPDX-License-Identifier: GPL-2.0+
2/*
3 * Copyright (c) 2001-2004 by David Brownell
4 * Copyright (c) 2003 Michal Sojka, for high-speed iso transfers
5 */
6
7/* this file is part of ehci-hcd.c */
8
9/*-------------------------------------------------------------------------*/
10
11/*
12 * EHCI scheduled transaction support: interrupt, iso, split iso
13 * These are called "periodic" transactions in the EHCI spec.
14 *
15 * Note that for interrupt transfers, the QH/QTD manipulation is shared
16 * with the "asynchronous" transaction support (control/bulk transfers).
17 * The only real difference is in how interrupt transfers are scheduled.
18 *
19 * For ISO, we make an "iso_stream" head to serve the same role as a QH.
20 * It keeps track of every ITD (or SITD) that's linked, and holds enough
21 * pre-calculated schedule data to make appending to the queue be quick.
22 */
23
24static int ehci_get_frame(struct usb_hcd *hcd);
25
26/*
27 * periodic_next_shadow - return "next" pointer on shadow list
28 * @periodic: host pointer to qh/itd/sitd
29 * @tag: hardware tag for type of this record
30 */
31static union ehci_shadow *
32periodic_next_shadow(struct ehci_hcd *ehci, union ehci_shadow *periodic,
33 __hc32 tag)
34{
35 switch (hc32_to_cpu(ehci, tag)) {
36 case Q_TYPE_QH:
37 return &periodic->qh->qh_next;
38 case Q_TYPE_FSTN:
39 return &periodic->fstn->fstn_next;
40 case Q_TYPE_ITD:
41 return &periodic->itd->itd_next;
42 /* case Q_TYPE_SITD: */
43 default:
44 return &periodic->sitd->sitd_next;
45 }
46}
47
48static __hc32 *
49shadow_next_periodic(struct ehci_hcd *ehci, union ehci_shadow *periodic,
50 __hc32 tag)
51{
52 switch (hc32_to_cpu(ehci, tag)) {
53 /* our ehci_shadow.qh is actually software part */
54 case Q_TYPE_QH:
55 return &periodic->qh->hw->hw_next;
56 /* others are hw parts */
57 default:
58 return periodic->hw_next;
59 }
60}
61
62/* caller must hold ehci->lock */
63static void periodic_unlink(struct ehci_hcd *ehci, unsigned frame, void *ptr)
64{
65 union ehci_shadow *prev_p = &ehci->pshadow[frame];
66 __hc32 *hw_p = &ehci->periodic[frame];
67 union ehci_shadow here = *prev_p;
68
69 /* find predecessor of "ptr"; hw and shadow lists are in sync */
70 while (here.ptr && here.ptr != ptr) {
71 prev_p = periodic_next_shadow(ehci, prev_p,
72 Q_NEXT_TYPE(ehci, *hw_p));
73 hw_p = shadow_next_periodic(ehci, &here,
74 Q_NEXT_TYPE(ehci, *hw_p));
75 here = *prev_p;
76 }
77 /* an interrupt entry (at list end) could have been shared */
78 if (!here.ptr)
79 return;
80
81 /* update shadow and hardware lists ... the old "next" pointers
82 * from ptr may still be in use, the caller updates them.
83 */
84 *prev_p = *periodic_next_shadow(ehci, &here,
85 Q_NEXT_TYPE(ehci, *hw_p));
86
87 if (!ehci->use_dummy_qh ||
88 *shadow_next_periodic(ehci, &here, Q_NEXT_TYPE(ehci, *hw_p))
89 != EHCI_LIST_END(ehci))
90 *hw_p = *shadow_next_periodic(ehci, &here,
91 Q_NEXT_TYPE(ehci, *hw_p));
92 else
93 *hw_p = cpu_to_hc32(ehci, ehci->dummy->qh_dma);
94}
95
96/*-------------------------------------------------------------------------*/
97
98/* Bandwidth and TT management */
99
100/* Find the TT data structure for this device; create it if necessary */
101static struct ehci_tt *find_tt(struct usb_device *udev)
102{
103 struct usb_tt *utt = udev->tt;
104 struct ehci_tt *tt, **tt_index, **ptt;
105 unsigned port;
106 bool allocated_index = false;
107
108 if (!utt)
109 return NULL; /* Not below a TT */
110
111 /*
112 * Find/create our data structure.
113 * For hubs with a single TT, we get it directly.
114 * For hubs with multiple TTs, there's an extra level of pointers.
115 */
116 tt_index = NULL;
117 if (utt->multi) {
118 tt_index = utt->hcpriv;
119 if (!tt_index) { /* Create the index array */
120 tt_index = kcalloc(utt->hub->maxchild,
121 sizeof(*tt_index),
122 GFP_ATOMIC);
123 if (!tt_index)
124 return ERR_PTR(-ENOMEM);
125 utt->hcpriv = tt_index;
126 allocated_index = true;
127 }
128 port = udev->ttport - 1;
129 ptt = &tt_index[port];
130 } else {
131 port = 0;
132 ptt = (struct ehci_tt **) &utt->hcpriv;
133 }
134
135 tt = *ptt;
136 if (!tt) { /* Create the ehci_tt */
137 struct ehci_hcd *ehci =
138 hcd_to_ehci(bus_to_hcd(udev->bus));
139
140 tt = kzalloc(sizeof(*tt), GFP_ATOMIC);
141 if (!tt) {
142 if (allocated_index) {
143 utt->hcpriv = NULL;
144 kfree(tt_index);
145 }
146 return ERR_PTR(-ENOMEM);
147 }
148 list_add_tail(&tt->tt_list, &ehci->tt_list);
149 INIT_LIST_HEAD(&tt->ps_list);
150 tt->usb_tt = utt;
151 tt->tt_port = port;
152 *ptt = tt;
153 }
154
155 return tt;
156}
157
158/* Release the TT above udev, if it's not in use */
159static void drop_tt(struct usb_device *udev)
160{
161 struct usb_tt *utt = udev->tt;
162 struct ehci_tt *tt, **tt_index, **ptt;
163 int cnt, i;
164
165 if (!utt || !utt->hcpriv)
166 return; /* Not below a TT, or never allocated */
167
168 cnt = 0;
169 if (utt->multi) {
170 tt_index = utt->hcpriv;
171 ptt = &tt_index[udev->ttport - 1];
172
173 /* How many entries are left in tt_index? */
174 for (i = 0; i < utt->hub->maxchild; ++i)
175 cnt += !!tt_index[i];
176 } else {
177 tt_index = NULL;
178 ptt = (struct ehci_tt **) &utt->hcpriv;
179 }
180
181 tt = *ptt;
182 if (!tt || !list_empty(&tt->ps_list))
183 return; /* never allocated, or still in use */
184
185 list_del(&tt->tt_list);
186 *ptt = NULL;
187 kfree(tt);
188 if (cnt == 1) {
189 utt->hcpriv = NULL;
190 kfree(tt_index);
191 }
192}
193
194static void bandwidth_dbg(struct ehci_hcd *ehci, int sign, char *type,
195 struct ehci_per_sched *ps)
196{
197 dev_dbg(&ps->udev->dev,
198 "ep %02x: %s %s @ %u+%u (%u.%u+%u) [%u/%u us] mask %04x\n",
199 ps->ep->desc.bEndpointAddress,
200 (sign >= 0 ? "reserve" : "release"), type,
201 (ps->bw_phase << 3) + ps->phase_uf, ps->bw_uperiod,
202 ps->phase, ps->phase_uf, ps->period,
203 ps->usecs, ps->c_usecs, ps->cs_mask);
204}
205
206static void reserve_release_intr_bandwidth(struct ehci_hcd *ehci,
207 struct ehci_qh *qh, int sign)
208{
209 unsigned start_uf;
210 unsigned i, j, m;
211 int usecs = qh->ps.usecs;
212 int c_usecs = qh->ps.c_usecs;
213 int tt_usecs = qh->ps.tt_usecs;
214 struct ehci_tt *tt;
215
216 if (qh->ps.phase == NO_FRAME) /* Bandwidth wasn't reserved */
217 return;
218 start_uf = qh->ps.bw_phase << 3;
219
220 bandwidth_dbg(ehci, sign, "intr", &qh->ps);
221
222 if (sign < 0) { /* Release bandwidth */
223 usecs = -usecs;
224 c_usecs = -c_usecs;
225 tt_usecs = -tt_usecs;
226 }
227
228 /* Entire transaction (high speed) or start-split (full/low speed) */
229 for (i = start_uf + qh->ps.phase_uf; i < EHCI_BANDWIDTH_SIZE;
230 i += qh->ps.bw_uperiod)
231 ehci->bandwidth[i] += usecs;
232
233 /* Complete-split (full/low speed) */
234 if (qh->ps.c_usecs) {
235 /* NOTE: adjustments needed for FSTN */
236 for (i = start_uf; i < EHCI_BANDWIDTH_SIZE;
237 i += qh->ps.bw_uperiod) {
238 for ((j = 2, m = 1 << (j+8)); j < 8; (++j, m <<= 1)) {
239 if (qh->ps.cs_mask & m)
240 ehci->bandwidth[i+j] += c_usecs;
241 }
242 }
243 }
244
245 /* FS/LS bus bandwidth */
246 if (tt_usecs) {
247 /*
248 * find_tt() will not return any error here as we have
249 * already called find_tt() before calling this function
250 * and checked for any error return. The previous call
251 * would have created the data structure.
252 */
253 tt = find_tt(qh->ps.udev);
254 if (sign > 0)
255 list_add_tail(&qh->ps.ps_list, &tt->ps_list);
256 else
257 list_del(&qh->ps.ps_list);
258
259 for (i = start_uf >> 3; i < EHCI_BANDWIDTH_FRAMES;
260 i += qh->ps.bw_period)
261 tt->bandwidth[i] += tt_usecs;
262 }
263}
264
265/*-------------------------------------------------------------------------*/
266
267static void compute_tt_budget(u8 budget_table[EHCI_BANDWIDTH_SIZE],
268 struct ehci_tt *tt)
269{
270 struct ehci_per_sched *ps;
271 unsigned uframe, uf, x;
272 u8 *budget_line;
273
274 if (!tt)
275 return;
276 memset(budget_table, 0, EHCI_BANDWIDTH_SIZE);
277
278 /* Add up the contributions from all the endpoints using this TT */
279 list_for_each_entry(ps, &tt->ps_list, ps_list) {
280 for (uframe = ps->bw_phase << 3; uframe < EHCI_BANDWIDTH_SIZE;
281 uframe += ps->bw_uperiod) {
282 budget_line = &budget_table[uframe];
283 x = ps->tt_usecs;
284
285 /* propagate the time forward */
286 for (uf = ps->phase_uf; uf < 8; ++uf) {
287 x += budget_line[uf];
288
289 /* Each microframe lasts 125 us */
290 if (x <= 125) {
291 budget_line[uf] = x;
292 break;
293 }
294 budget_line[uf] = 125;
295 x -= 125;
296 }
297 }
298 }
299}
300
301static int __maybe_unused same_tt(struct usb_device *dev1,
302 struct usb_device *dev2)
303{
304 if (!dev1->tt || !dev2->tt)
305 return 0;
306 if (dev1->tt != dev2->tt)
307 return 0;
308 if (dev1->tt->multi)
309 return dev1->ttport == dev2->ttport;
310 else
311 return 1;
312}
313
314#ifdef CONFIG_USB_EHCI_TT_NEWSCHED
315
316static const unsigned char
317max_tt_usecs[] = { 125, 125, 125, 125, 125, 125, 30, 0 };
318
319/* carryover low/fullspeed bandwidth that crosses uframe boundries */
320static inline void carryover_tt_bandwidth(unsigned short tt_usecs[8])
321{
322 int i;
323
324 for (i = 0; i < 7; i++) {
325 if (max_tt_usecs[i] < tt_usecs[i]) {
326 tt_usecs[i+1] += tt_usecs[i] - max_tt_usecs[i];
327 tt_usecs[i] = max_tt_usecs[i];
328 }
329 }
330}
331
332/*
333 * Return true if the device's tt's downstream bus is available for a
334 * periodic transfer of the specified length (usecs), starting at the
335 * specified frame/uframe. Note that (as summarized in section 11.19
336 * of the usb 2.0 spec) TTs can buffer multiple transactions for each
337 * uframe.
338 *
339 * The uframe parameter is when the fullspeed/lowspeed transfer
340 * should be executed in "B-frame" terms, which is the same as the
341 * highspeed ssplit's uframe (which is in "H-frame" terms). For example
342 * a ssplit in "H-frame" 0 causes a transfer in "B-frame" 0.
343 * See the EHCI spec sec 4.5 and fig 4.7.
344 *
345 * This checks if the full/lowspeed bus, at the specified starting uframe,
346 * has the specified bandwidth available, according to rules listed
347 * in USB 2.0 spec section 11.18.1 fig 11-60.
348 *
349 * This does not check if the transfer would exceed the max ssplit
350 * limit of 16, specified in USB 2.0 spec section 11.18.4 requirement #4,
351 * since proper scheduling limits ssplits to less than 16 per uframe.
352 */
353static int tt_available(
354 struct ehci_hcd *ehci,
355 struct ehci_per_sched *ps,
356 struct ehci_tt *tt,
357 unsigned frame,
358 unsigned uframe
359)
360{
361 unsigned period = ps->bw_period;
362 unsigned usecs = ps->tt_usecs;
363
364 if ((period == 0) || (uframe >= 7)) /* error */
365 return 0;
366
367 for (frame &= period - 1; frame < EHCI_BANDWIDTH_FRAMES;
368 frame += period) {
369 unsigned i, uf;
370 unsigned short tt_usecs[8];
371
372 if (tt->bandwidth[frame] + usecs > 900)
373 return 0;
374
375 uf = frame << 3;
376 for (i = 0; i < 8; (++i, ++uf))
377 tt_usecs[i] = ehci->tt_budget[uf];
378
379 if (max_tt_usecs[uframe] <= tt_usecs[uframe])
380 return 0;
381
382 /* special case for isoc transfers larger than 125us:
383 * the first and each subsequent fully used uframe
384 * must be empty, so as to not illegally delay
385 * already scheduled transactions
386 */
387 if (usecs > 125) {
388 int ufs = (usecs / 125);
389
390 for (i = uframe; i < (uframe + ufs) && i < 8; i++)
391 if (tt_usecs[i] > 0)
392 return 0;
393 }
394
395 tt_usecs[uframe] += usecs;
396
397 carryover_tt_bandwidth(tt_usecs);
398
399 /* fail if the carryover pushed bw past the last uframe's limit */
400 if (max_tt_usecs[7] < tt_usecs[7])
401 return 0;
402 }
403
404 return 1;
405}
406
407#else
408
409/* return true iff the device's transaction translator is available
410 * for a periodic transfer starting at the specified frame, using
411 * all the uframes in the mask.
412 */
413static int tt_no_collision(
414 struct ehci_hcd *ehci,
415 unsigned period,
416 struct usb_device *dev,
417 unsigned frame,
418 u32 uf_mask
419)
420{
421 if (period == 0) /* error */
422 return 0;
423
424 /* note bandwidth wastage: split never follows csplit
425 * (different dev or endpoint) until the next uframe.
426 * calling convention doesn't make that distinction.
427 */
428 for (; frame < ehci->periodic_size; frame += period) {
429 union ehci_shadow here;
430 __hc32 type;
431 struct ehci_qh_hw *hw;
432
433 here = ehci->pshadow[frame];
434 type = Q_NEXT_TYPE(ehci, ehci->periodic[frame]);
435 while (here.ptr) {
436 switch (hc32_to_cpu(ehci, type)) {
437 case Q_TYPE_ITD:
438 type = Q_NEXT_TYPE(ehci, here.itd->hw_next);
439 here = here.itd->itd_next;
440 continue;
441 case Q_TYPE_QH:
442 hw = here.qh->hw;
443 if (same_tt(dev, here.qh->ps.udev)) {
444 u32 mask;
445
446 mask = hc32_to_cpu(ehci,
447 hw->hw_info2);
448 /* "knows" no gap is needed */
449 mask |= mask >> 8;
450 if (mask & uf_mask)
451 break;
452 }
453 type = Q_NEXT_TYPE(ehci, hw->hw_next);
454 here = here.qh->qh_next;
455 continue;
456 case Q_TYPE_SITD:
457 if (same_tt(dev, here.sitd->urb->dev)) {
458 u16 mask;
459
460 mask = hc32_to_cpu(ehci, here.sitd
461 ->hw_uframe);
462 /* FIXME assumes no gap for IN! */
463 mask |= mask >> 8;
464 if (mask & uf_mask)
465 break;
466 }
467 type = Q_NEXT_TYPE(ehci, here.sitd->hw_next);
468 here = here.sitd->sitd_next;
469 continue;
470 /* case Q_TYPE_FSTN: */
471 default:
472 ehci_dbg(ehci,
473 "periodic frame %d bogus type %d\n",
474 frame, type);
475 }
476
477 /* collision or error */
478 return 0;
479 }
480 }
481
482 /* no collision */
483 return 1;
484}
485
486#endif /* CONFIG_USB_EHCI_TT_NEWSCHED */
487
488/*-------------------------------------------------------------------------*/
489
490static void enable_periodic(struct ehci_hcd *ehci)
491{
492 if (ehci->periodic_count++)
493 return;
494
495 /* Stop waiting to turn off the periodic schedule */
496 ehci->enabled_hrtimer_events &= ~BIT(EHCI_HRTIMER_DISABLE_PERIODIC);
497
498 /* Don't start the schedule until PSS is 0 */
499 ehci_poll_PSS(ehci);
500 turn_on_io_watchdog(ehci);
501}
502
503static void disable_periodic(struct ehci_hcd *ehci)
504{
505 if (--ehci->periodic_count)
506 return;
507
508 /* Don't turn off the schedule until PSS is 1 */
509 ehci_poll_PSS(ehci);
510}
511
512/*-------------------------------------------------------------------------*/
513
514/* periodic schedule slots have iso tds (normal or split) first, then a
515 * sparse tree for active interrupt transfers.
516 *
517 * this just links in a qh; caller guarantees uframe masks are set right.
518 * no FSTN support (yet; ehci 0.96+)
519 */
520static void qh_link_periodic(struct ehci_hcd *ehci, struct ehci_qh *qh)
521{
522 unsigned i;
523 unsigned period = qh->ps.period;
524
525 dev_dbg(&qh->ps.udev->dev,
526 "link qh%d-%04x/%p start %d [%d/%d us]\n",
527 period, hc32_to_cpup(ehci, &qh->hw->hw_info2)
528 & (QH_CMASK | QH_SMASK),
529 qh, qh->ps.phase, qh->ps.usecs, qh->ps.c_usecs);
530
531 /* high bandwidth, or otherwise every microframe */
532 if (period == 0)
533 period = 1;
534
535 for (i = qh->ps.phase; i < ehci->periodic_size; i += period) {
536 union ehci_shadow *prev = &ehci->pshadow[i];
537 __hc32 *hw_p = &ehci->periodic[i];
538 union ehci_shadow here = *prev;
539 __hc32 type = 0;
540
541 /* skip the iso nodes at list head */
542 while (here.ptr) {
543 type = Q_NEXT_TYPE(ehci, *hw_p);
544 if (type == cpu_to_hc32(ehci, Q_TYPE_QH))
545 break;
546 prev = periodic_next_shadow(ehci, prev, type);
547 hw_p = shadow_next_periodic(ehci, &here, type);
548 here = *prev;
549 }
550
551 /* sorting each branch by period (slow-->fast)
552 * enables sharing interior tree nodes
553 */
554 while (here.ptr && qh != here.qh) {
555 if (qh->ps.period > here.qh->ps.period)
556 break;
557 prev = &here.qh->qh_next;
558 hw_p = &here.qh->hw->hw_next;
559 here = *prev;
560 }
561 /* link in this qh, unless some earlier pass did that */
562 if (qh != here.qh) {
563 qh->qh_next = here;
564 if (here.qh)
565 qh->hw->hw_next = *hw_p;
566 wmb();
567 prev->qh = qh;
568 *hw_p = QH_NEXT(ehci, qh->qh_dma);
569 }
570 }
571 qh->qh_state = QH_STATE_LINKED;
572 qh->xacterrs = 0;
573 qh->unlink_reason = 0;
574
575 /* update per-qh bandwidth for debugfs */
576 ehci_to_hcd(ehci)->self.bandwidth_allocated += qh->ps.bw_period
577 ? ((qh->ps.usecs + qh->ps.c_usecs) / qh->ps.bw_period)
578 : (qh->ps.usecs * 8);
579
580 list_add(&qh->intr_node, &ehci->intr_qh_list);
581
582 /* maybe enable periodic schedule processing */
583 ++ehci->intr_count;
584 enable_periodic(ehci);
585}
586
587static void qh_unlink_periodic(struct ehci_hcd *ehci, struct ehci_qh *qh)
588{
589 unsigned i;
590 unsigned period;
591
592 /*
593 * If qh is for a low/full-speed device, simply unlinking it
594 * could interfere with an ongoing split transaction. To unlink
595 * it safely would require setting the QH_INACTIVATE bit and
596 * waiting at least one frame, as described in EHCI 4.12.2.5.
597 *
598 * We won't bother with any of this. Instead, we assume that the
599 * only reason for unlinking an interrupt QH while the current URB
600 * is still active is to dequeue all the URBs (flush the whole
601 * endpoint queue).
602 *
603 * If rebalancing the periodic schedule is ever implemented, this
604 * approach will no longer be valid.
605 */
606
607 /* high bandwidth, or otherwise part of every microframe */
608 period = qh->ps.period ? : 1;
609
610 for (i = qh->ps.phase; i < ehci->periodic_size; i += period)
611 periodic_unlink(ehci, i, qh);
612
613 /* update per-qh bandwidth for debugfs */
614 ehci_to_hcd(ehci)->self.bandwidth_allocated -= qh->ps.bw_period
615 ? ((qh->ps.usecs + qh->ps.c_usecs) / qh->ps.bw_period)
616 : (qh->ps.usecs * 8);
617
618 dev_dbg(&qh->ps.udev->dev,
619 "unlink qh%d-%04x/%p start %d [%d/%d us]\n",
620 qh->ps.period,
621 hc32_to_cpup(ehci, &qh->hw->hw_info2) & (QH_CMASK | QH_SMASK),
622 qh, qh->ps.phase, qh->ps.usecs, qh->ps.c_usecs);
623
624 /* qh->qh_next still "live" to HC */
625 qh->qh_state = QH_STATE_UNLINK;
626 qh->qh_next.ptr = NULL;
627
628 if (ehci->qh_scan_next == qh)
629 ehci->qh_scan_next = list_entry(qh->intr_node.next,
630 struct ehci_qh, intr_node);
631 list_del(&qh->intr_node);
632}
633
634static void cancel_unlink_wait_intr(struct ehci_hcd *ehci, struct ehci_qh *qh)
635{
636 if (qh->qh_state != QH_STATE_LINKED ||
637 list_empty(&qh->unlink_node))
638 return;
639
640 list_del_init(&qh->unlink_node);
641
642 /*
643 * TODO: disable the event of EHCI_HRTIMER_START_UNLINK_INTR for
644 * avoiding unnecessary CPU wakeup
645 */
646}
647
648static void start_unlink_intr(struct ehci_hcd *ehci, struct ehci_qh *qh)
649{
650 /* If the QH isn't linked then there's nothing we can do. */
651 if (qh->qh_state != QH_STATE_LINKED)
652 return;
653
654 /* if the qh is waiting for unlink, cancel it now */
655 cancel_unlink_wait_intr(ehci, qh);
656
657 qh_unlink_periodic(ehci, qh);
658
659 /* Make sure the unlinks are visible before starting the timer */
660 wmb();
661
662 /*
663 * The EHCI spec doesn't say how long it takes the controller to
664 * stop accessing an unlinked interrupt QH. The timer delay is
665 * 9 uframes; presumably that will be long enough.
666 */
667 qh->unlink_cycle = ehci->intr_unlink_cycle;
668
669 /* New entries go at the end of the intr_unlink list */
670 list_add_tail(&qh->unlink_node, &ehci->intr_unlink);
671
672 if (ehci->intr_unlinking)
673 ; /* Avoid recursive calls */
674 else if (ehci->rh_state < EHCI_RH_RUNNING)
675 ehci_handle_intr_unlinks(ehci);
676 else if (ehci->intr_unlink.next == &qh->unlink_node) {
677 ehci_enable_event(ehci, EHCI_HRTIMER_UNLINK_INTR, true);
678 ++ehci->intr_unlink_cycle;
679 }
680}
681
682/*
683 * It is common only one intr URB is scheduled on one qh, and
684 * given complete() is run in tasklet context, introduce a bit
685 * delay to avoid unlink qh too early.
686 */
687static void start_unlink_intr_wait(struct ehci_hcd *ehci,
688 struct ehci_qh *qh)
689{
690 qh->unlink_cycle = ehci->intr_unlink_wait_cycle;
691
692 /* New entries go at the end of the intr_unlink_wait list */
693 list_add_tail(&qh->unlink_node, &ehci->intr_unlink_wait);
694
695 if (ehci->rh_state < EHCI_RH_RUNNING)
696 ehci_handle_start_intr_unlinks(ehci);
697 else if (ehci->intr_unlink_wait.next == &qh->unlink_node) {
698 ehci_enable_event(ehci, EHCI_HRTIMER_START_UNLINK_INTR, true);
699 ++ehci->intr_unlink_wait_cycle;
700 }
701}
702
703static void end_unlink_intr(struct ehci_hcd *ehci, struct ehci_qh *qh)
704{
705 struct ehci_qh_hw *hw = qh->hw;
706 int rc;
707
708 qh->qh_state = QH_STATE_IDLE;
709 hw->hw_next = EHCI_LIST_END(ehci);
710
711 if (!list_empty(&qh->qtd_list))
712 qh_completions(ehci, qh);
713
714 /* reschedule QH iff another request is queued */
715 if (!list_empty(&qh->qtd_list) && ehci->rh_state == EHCI_RH_RUNNING) {
716 rc = qh_schedule(ehci, qh);
717 if (rc == 0) {
718 qh_refresh(ehci, qh);
719 qh_link_periodic(ehci, qh);
720 }
721
722 /* An error here likely indicates handshake failure
723 * or no space left in the schedule. Neither fault
724 * should happen often ...
725 *
726 * FIXME kill the now-dysfunctional queued urbs
727 */
728 else {
729 ehci_err(ehci, "can't reschedule qh %p, err %d\n",
730 qh, rc);
731 }
732 }
733
734 /* maybe turn off periodic schedule */
735 --ehci->intr_count;
736 disable_periodic(ehci);
737}
738
739/*-------------------------------------------------------------------------*/
740
741static int check_period(
742 struct ehci_hcd *ehci,
743 unsigned frame,
744 unsigned uframe,
745 unsigned uperiod,
746 unsigned usecs
747) {
748 /* complete split running into next frame?
749 * given FSTN support, we could sometimes check...
750 */
751 if (uframe >= 8)
752 return 0;
753
754 /* convert "usecs we need" to "max already claimed" */
755 usecs = ehci->uframe_periodic_max - usecs;
756
757 for (uframe += frame << 3; uframe < EHCI_BANDWIDTH_SIZE;
758 uframe += uperiod) {
759 if (ehci->bandwidth[uframe] > usecs)
760 return 0;
761 }
762
763 /* success! */
764 return 1;
765}
766
767static int check_intr_schedule(
768 struct ehci_hcd *ehci,
769 unsigned frame,
770 unsigned uframe,
771 struct ehci_qh *qh,
772 unsigned *c_maskp,
773 struct ehci_tt *tt
774)
775{
776 int retval = -ENOSPC;
777 u8 mask = 0;
778
779 if (qh->ps.c_usecs && uframe >= 6) /* FSTN territory? */
780 goto done;
781
782 if (!check_period(ehci, frame, uframe, qh->ps.bw_uperiod, qh->ps.usecs))
783 goto done;
784 if (!qh->ps.c_usecs) {
785 retval = 0;
786 *c_maskp = 0;
787 goto done;
788 }
789
790#ifdef CONFIG_USB_EHCI_TT_NEWSCHED
791 if (tt_available(ehci, &qh->ps, tt, frame, uframe)) {
792 unsigned i;
793
794 /* TODO : this may need FSTN for SSPLIT in uframe 5. */
795 for (i = uframe+2; i < 8 && i <= uframe+4; i++)
796 if (!check_period(ehci, frame, i,
797 qh->ps.bw_uperiod, qh->ps.c_usecs))
798 goto done;
799 else
800 mask |= 1 << i;
801
802 retval = 0;
803
804 *c_maskp = mask;
805 }
806#else
807 /* Make sure this tt's buffer is also available for CSPLITs.
808 * We pessimize a bit; probably the typical full speed case
809 * doesn't need the second CSPLIT.
810 *
811 * NOTE: both SPLIT and CSPLIT could be checked in just
812 * one smart pass...
813 */
814 mask = 0x03 << (uframe + qh->gap_uf);
815 *c_maskp = mask;
816
817 mask |= 1 << uframe;
818 if (tt_no_collision(ehci, qh->ps.bw_period, qh->ps.udev, frame, mask)) {
819 if (!check_period(ehci, frame, uframe + qh->gap_uf + 1,
820 qh->ps.bw_uperiod, qh->ps.c_usecs))
821 goto done;
822 if (!check_period(ehci, frame, uframe + qh->gap_uf,
823 qh->ps.bw_uperiod, qh->ps.c_usecs))
824 goto done;
825 retval = 0;
826 }
827#endif
828done:
829 return retval;
830}
831
832/* "first fit" scheduling policy used the first time through,
833 * or when the previous schedule slot can't be re-used.
834 */
835static int qh_schedule(struct ehci_hcd *ehci, struct ehci_qh *qh)
836{
837 int status = 0;
838 unsigned uframe;
839 unsigned c_mask;
840 struct ehci_qh_hw *hw = qh->hw;
841 struct ehci_tt *tt;
842
843 hw->hw_next = EHCI_LIST_END(ehci);
844
845 /* reuse the previous schedule slots, if we can */
846 if (qh->ps.phase != NO_FRAME) {
847 ehci_dbg(ehci, "reused qh %p schedule\n", qh);
848 return 0;
849 }
850
851 uframe = 0;
852 c_mask = 0;
853 tt = find_tt(qh->ps.udev);
854 if (IS_ERR(tt)) {
855 status = PTR_ERR(tt);
856 goto done;
857 }
858 compute_tt_budget(ehci->tt_budget, tt);
859
860 /* else scan the schedule to find a group of slots such that all
861 * uframes have enough periodic bandwidth available.
862 */
863 /* "normal" case, uframing flexible except with splits */
864 if (qh->ps.bw_period) {
865 int i;
866 unsigned frame;
867
868 for (i = qh->ps.bw_period; i > 0; --i) {
869 frame = ++ehci->random_frame & (qh->ps.bw_period - 1);
870 for (uframe = 0; uframe < 8; uframe++) {
871 status = check_intr_schedule(ehci,
872 frame, uframe, qh, &c_mask, tt);
873 if (status == 0)
874 goto got_it;
875 }
876 }
877
878 /* qh->ps.bw_period == 0 means every uframe */
879 } else {
880 status = check_intr_schedule(ehci, 0, 0, qh, &c_mask, tt);
881 }
882 if (status)
883 goto done;
884
885 got_it:
886 qh->ps.phase = (qh->ps.period ? ehci->random_frame &
887 (qh->ps.period - 1) : 0);
888 qh->ps.bw_phase = qh->ps.phase & (qh->ps.bw_period - 1);
889 qh->ps.phase_uf = uframe;
890 qh->ps.cs_mask = qh->ps.period ?
891 (c_mask << 8) | (1 << uframe) :
892 QH_SMASK;
893
894 /* reset S-frame and (maybe) C-frame masks */
895 hw->hw_info2 &= cpu_to_hc32(ehci, ~(QH_CMASK | QH_SMASK));
896 hw->hw_info2 |= cpu_to_hc32(ehci, qh->ps.cs_mask);
897 reserve_release_intr_bandwidth(ehci, qh, 1);
898
899done:
900 return status;
901}
902
903static int intr_submit(
904 struct ehci_hcd *ehci,
905 struct urb *urb,
906 struct list_head *qtd_list,
907 gfp_t mem_flags
908) {
909 unsigned epnum;
910 unsigned long flags;
911 struct ehci_qh *qh;
912 int status;
913 struct list_head empty;
914
915 /* get endpoint and transfer/schedule data */
916 epnum = urb->ep->desc.bEndpointAddress;
917
918 spin_lock_irqsave(&ehci->lock, flags);
919
920 if (unlikely(!HCD_HW_ACCESSIBLE(ehci_to_hcd(ehci)))) {
921 status = -ESHUTDOWN;
922 goto done_not_linked;
923 }
924 status = usb_hcd_link_urb_to_ep(ehci_to_hcd(ehci), urb);
925 if (unlikely(status))
926 goto done_not_linked;
927
928 /* get qh and force any scheduling errors */
929 INIT_LIST_HEAD(&empty);
930 qh = qh_append_tds(ehci, urb, &empty, epnum, &urb->ep->hcpriv);
931 if (qh == NULL) {
932 status = -ENOMEM;
933 goto done;
934 }
935 if (qh->qh_state == QH_STATE_IDLE) {
936 status = qh_schedule(ehci, qh);
937 if (status)
938 goto done;
939 }
940
941 /* then queue the urb's tds to the qh */
942 qh = qh_append_tds(ehci, urb, qtd_list, epnum, &urb->ep->hcpriv);
943 BUG_ON(qh == NULL);
944
945 /* stuff into the periodic schedule */
946 if (qh->qh_state == QH_STATE_IDLE) {
947 qh_refresh(ehci, qh);
948 qh_link_periodic(ehci, qh);
949 } else {
950 /* cancel unlink wait for the qh */
951 cancel_unlink_wait_intr(ehci, qh);
952 }
953
954 /* ... update usbfs periodic stats */
955 ehci_to_hcd(ehci)->self.bandwidth_int_reqs++;
956
957done:
958 if (unlikely(status))
959 usb_hcd_unlink_urb_from_ep(ehci_to_hcd(ehci), urb);
960done_not_linked:
961 spin_unlock_irqrestore(&ehci->lock, flags);
962 if (status)
963 qtd_list_free(ehci, urb, qtd_list);
964
965 return status;
966}
967
968static void scan_intr(struct ehci_hcd *ehci)
969{
970 struct ehci_qh *qh;
971
972 list_for_each_entry_safe(qh, ehci->qh_scan_next, &ehci->intr_qh_list,
973 intr_node) {
974
975 /* clean any finished work for this qh */
976 if (!list_empty(&qh->qtd_list)) {
977 int temp;
978
979 /*
980 * Unlinks could happen here; completion reporting
981 * drops the lock. That's why ehci->qh_scan_next
982 * always holds the next qh to scan; if the next qh
983 * gets unlinked then ehci->qh_scan_next is adjusted
984 * in qh_unlink_periodic().
985 */
986 temp = qh_completions(ehci, qh);
987 if (unlikely(temp))
988 start_unlink_intr(ehci, qh);
989 else if (unlikely(list_empty(&qh->qtd_list) &&
990 qh->qh_state == QH_STATE_LINKED))
991 start_unlink_intr_wait(ehci, qh);
992 }
993 }
994}
995
996/*-------------------------------------------------------------------------*/
997
998/* ehci_iso_stream ops work with both ITD and SITD */
999
1000static struct ehci_iso_stream *
1001iso_stream_alloc(gfp_t mem_flags)
1002{
1003 struct ehci_iso_stream *stream;
1004
1005 stream = kzalloc(sizeof(*stream), mem_flags);
1006 if (likely(stream != NULL)) {
1007 INIT_LIST_HEAD(&stream->td_list);
1008 INIT_LIST_HEAD(&stream->free_list);
1009 stream->next_uframe = NO_FRAME;
1010 stream->ps.phase = NO_FRAME;
1011 }
1012 return stream;
1013}
1014
1015static void
1016iso_stream_init(
1017 struct ehci_hcd *ehci,
1018 struct ehci_iso_stream *stream,
1019 struct urb *urb
1020)
1021{
1022 static const u8 smask_out[] = { 0x01, 0x03, 0x07, 0x0f, 0x1f, 0x3f };
1023
1024 struct usb_device *dev = urb->dev;
1025 u32 buf1;
1026 unsigned epnum, maxp;
1027 int is_input;
1028 unsigned tmp;
1029
1030 /*
1031 * this might be a "high bandwidth" highspeed endpoint,
1032 * as encoded in the ep descriptor's wMaxPacket field
1033 */
1034 epnum = usb_pipeendpoint(urb->pipe);
1035 is_input = usb_pipein(urb->pipe) ? USB_DIR_IN : 0;
1036 maxp = usb_endpoint_maxp(&urb->ep->desc);
1037 buf1 = is_input ? 1 << 11 : 0;
1038
1039 /* knows about ITD vs SITD */
1040 if (dev->speed == USB_SPEED_HIGH) {
1041 unsigned multi = usb_endpoint_maxp_mult(&urb->ep->desc);
1042
1043 stream->highspeed = 1;
1044
1045 buf1 |= maxp;
1046 maxp *= multi;
1047
1048 stream->buf0 = cpu_to_hc32(ehci, (epnum << 8) | dev->devnum);
1049 stream->buf1 = cpu_to_hc32(ehci, buf1);
1050 stream->buf2 = cpu_to_hc32(ehci, multi);
1051
1052 /* usbfs wants to report the average usecs per frame tied up
1053 * when transfers on this endpoint are scheduled ...
1054 */
1055 stream->ps.usecs = HS_USECS_ISO(maxp);
1056
1057 /* period for bandwidth allocation */
1058 tmp = min_t(unsigned, EHCI_BANDWIDTH_SIZE,
1059 1 << (urb->ep->desc.bInterval - 1));
1060
1061 /* Allow urb->interval to override */
1062 stream->ps.bw_uperiod = min_t(unsigned, tmp, urb->interval);
1063
1064 stream->uperiod = urb->interval;
1065 stream->ps.period = urb->interval >> 3;
1066 stream->bandwidth = stream->ps.usecs * 8 /
1067 stream->ps.bw_uperiod;
1068
1069 } else {
1070 u32 addr;
1071 int think_time;
1072 int hs_transfers;
1073
1074 addr = dev->ttport << 24;
1075 if (!ehci_is_TDI(ehci)
1076 || (dev->tt->hub !=
1077 ehci_to_hcd(ehci)->self.root_hub))
1078 addr |= dev->tt->hub->devnum << 16;
1079 addr |= epnum << 8;
1080 addr |= dev->devnum;
1081 stream->ps.usecs = HS_USECS_ISO(maxp);
1082 think_time = dev->tt->think_time;
1083 stream->ps.tt_usecs = NS_TO_US(think_time + usb_calc_bus_time(
1084 dev->speed, is_input, 1, maxp));
1085 hs_transfers = max(1u, (maxp + 187) / 188);
1086 if (is_input) {
1087 u32 tmp;
1088
1089 addr |= 1 << 31;
1090 stream->ps.c_usecs = stream->ps.usecs;
1091 stream->ps.usecs = HS_USECS_ISO(1);
1092 stream->ps.cs_mask = 1;
1093
1094 /* c-mask as specified in USB 2.0 11.18.4 3.c */
1095 tmp = (1 << (hs_transfers + 2)) - 1;
1096 stream->ps.cs_mask |= tmp << (8 + 2);
1097 } else
1098 stream->ps.cs_mask = smask_out[hs_transfers - 1];
1099
1100 /* period for bandwidth allocation */
1101 tmp = min_t(unsigned, EHCI_BANDWIDTH_FRAMES,
1102 1 << (urb->ep->desc.bInterval - 1));
1103
1104 /* Allow urb->interval to override */
1105 stream->ps.bw_period = min_t(unsigned, tmp, urb->interval);
1106 stream->ps.bw_uperiod = stream->ps.bw_period << 3;
1107
1108 stream->ps.period = urb->interval;
1109 stream->uperiod = urb->interval << 3;
1110 stream->bandwidth = (stream->ps.usecs + stream->ps.c_usecs) /
1111 stream->ps.bw_period;
1112
1113 /* stream->splits gets created from cs_mask later */
1114 stream->address = cpu_to_hc32(ehci, addr);
1115 }
1116
1117 stream->ps.udev = dev;
1118 stream->ps.ep = urb->ep;
1119
1120 stream->bEndpointAddress = is_input | epnum;
1121 stream->maxp = maxp;
1122}
1123
1124static struct ehci_iso_stream *
1125iso_stream_find(struct ehci_hcd *ehci, struct urb *urb)
1126{
1127 unsigned epnum;
1128 struct ehci_iso_stream *stream;
1129 struct usb_host_endpoint *ep;
1130 unsigned long flags;
1131
1132 epnum = usb_pipeendpoint (urb->pipe);
1133 if (usb_pipein(urb->pipe))
1134 ep = urb->dev->ep_in[epnum];
1135 else
1136 ep = urb->dev->ep_out[epnum];
1137
1138 spin_lock_irqsave(&ehci->lock, flags);
1139 stream = ep->hcpriv;
1140
1141 if (unlikely(stream == NULL)) {
1142 stream = iso_stream_alloc(GFP_ATOMIC);
1143 if (likely(stream != NULL)) {
1144 ep->hcpriv = stream;
1145 iso_stream_init(ehci, stream, urb);
1146 }
1147
1148 /* if dev->ep [epnum] is a QH, hw is set */
1149 } else if (unlikely(stream->hw != NULL)) {
1150 ehci_dbg(ehci, "dev %s ep%d%s, not iso??\n",
1151 urb->dev->devpath, epnum,
1152 usb_pipein(urb->pipe) ? "in" : "out");
1153 stream = NULL;
1154 }
1155
1156 spin_unlock_irqrestore(&ehci->lock, flags);
1157 return stream;
1158}
1159
1160/*-------------------------------------------------------------------------*/
1161
1162/* ehci_iso_sched ops can be ITD-only or SITD-only */
1163
1164static struct ehci_iso_sched *
1165iso_sched_alloc(unsigned packets, gfp_t mem_flags)
1166{
1167 struct ehci_iso_sched *iso_sched;
1168
1169 iso_sched = kzalloc(struct_size(iso_sched, packet, packets), mem_flags);
1170 if (likely(iso_sched != NULL))
1171 INIT_LIST_HEAD(&iso_sched->td_list);
1172
1173 return iso_sched;
1174}
1175
1176static inline void
1177itd_sched_init(
1178 struct ehci_hcd *ehci,
1179 struct ehci_iso_sched *iso_sched,
1180 struct ehci_iso_stream *stream,
1181 struct urb *urb
1182)
1183{
1184 unsigned i;
1185 dma_addr_t dma = urb->transfer_dma;
1186
1187 /* how many uframes are needed for these transfers */
1188 iso_sched->span = urb->number_of_packets * stream->uperiod;
1189
1190 /* figure out per-uframe itd fields that we'll need later
1191 * when we fit new itds into the schedule.
1192 */
1193 for (i = 0; i < urb->number_of_packets; i++) {
1194 struct ehci_iso_packet *uframe = &iso_sched->packet[i];
1195 unsigned length;
1196 dma_addr_t buf;
1197 u32 trans;
1198
1199 length = urb->iso_frame_desc[i].length;
1200 buf = dma + urb->iso_frame_desc[i].offset;
1201
1202 trans = EHCI_ISOC_ACTIVE;
1203 trans |= buf & 0x0fff;
1204 if (unlikely(((i + 1) == urb->number_of_packets))
1205 && !(urb->transfer_flags & URB_NO_INTERRUPT))
1206 trans |= EHCI_ITD_IOC;
1207 trans |= length << 16;
1208 uframe->transaction = cpu_to_hc32(ehci, trans);
1209
1210 /* might need to cross a buffer page within a uframe */
1211 uframe->bufp = (buf & ~(u64)0x0fff);
1212 buf += length;
1213 if (unlikely((uframe->bufp != (buf & ~(u64)0x0fff))))
1214 uframe->cross = 1;
1215 }
1216}
1217
1218static void
1219iso_sched_free(
1220 struct ehci_iso_stream *stream,
1221 struct ehci_iso_sched *iso_sched
1222)
1223{
1224 if (!iso_sched)
1225 return;
1226 /* caller must hold ehci->lock! */
1227 list_splice(&iso_sched->td_list, &stream->free_list);
1228 kfree(iso_sched);
1229}
1230
1231static int
1232itd_urb_transaction(
1233 struct ehci_iso_stream *stream,
1234 struct ehci_hcd *ehci,
1235 struct urb *urb,
1236 gfp_t mem_flags
1237)
1238{
1239 struct ehci_itd *itd;
1240 dma_addr_t itd_dma;
1241 int i;
1242 unsigned num_itds;
1243 struct ehci_iso_sched *sched;
1244 unsigned long flags;
1245
1246 sched = iso_sched_alloc(urb->number_of_packets, mem_flags);
1247 if (unlikely(sched == NULL))
1248 return -ENOMEM;
1249
1250 itd_sched_init(ehci, sched, stream, urb);
1251
1252 if (urb->interval < 8)
1253 num_itds = 1 + (sched->span + 7) / 8;
1254 else
1255 num_itds = urb->number_of_packets;
1256
1257 /* allocate/init ITDs */
1258 spin_lock_irqsave(&ehci->lock, flags);
1259 for (i = 0; i < num_itds; i++) {
1260
1261 /*
1262 * Use iTDs from the free list, but not iTDs that may
1263 * still be in use by the hardware.
1264 */
1265 if (likely(!list_empty(&stream->free_list))) {
1266 itd = list_first_entry(&stream->free_list,
1267 struct ehci_itd, itd_list);
1268 if (itd->frame == ehci->now_frame)
1269 goto alloc_itd;
1270 list_del(&itd->itd_list);
1271 itd_dma = itd->itd_dma;
1272 } else {
1273 alloc_itd:
1274 spin_unlock_irqrestore(&ehci->lock, flags);
1275 itd = dma_pool_alloc(ehci->itd_pool, mem_flags,
1276 &itd_dma);
1277 spin_lock_irqsave(&ehci->lock, flags);
1278 if (!itd) {
1279 iso_sched_free(stream, sched);
1280 spin_unlock_irqrestore(&ehci->lock, flags);
1281 return -ENOMEM;
1282 }
1283 }
1284
1285 memset(itd, 0, sizeof(*itd));
1286 itd->itd_dma = itd_dma;
1287 itd->frame = NO_FRAME;
1288 list_add(&itd->itd_list, &sched->td_list);
1289 }
1290 spin_unlock_irqrestore(&ehci->lock, flags);
1291
1292 /* temporarily store schedule info in hcpriv */
1293 urb->hcpriv = sched;
1294 urb->error_count = 0;
1295 return 0;
1296}
1297
1298/*-------------------------------------------------------------------------*/
1299
1300static void reserve_release_iso_bandwidth(struct ehci_hcd *ehci,
1301 struct ehci_iso_stream *stream, int sign)
1302{
1303 unsigned uframe;
1304 unsigned i, j;
1305 unsigned s_mask, c_mask, m;
1306 int usecs = stream->ps.usecs;
1307 int c_usecs = stream->ps.c_usecs;
1308 int tt_usecs = stream->ps.tt_usecs;
1309 struct ehci_tt *tt;
1310
1311 if (stream->ps.phase == NO_FRAME) /* Bandwidth wasn't reserved */
1312 return;
1313 uframe = stream->ps.bw_phase << 3;
1314
1315 bandwidth_dbg(ehci, sign, "iso", &stream->ps);
1316
1317 if (sign < 0) { /* Release bandwidth */
1318 usecs = -usecs;
1319 c_usecs = -c_usecs;
1320 tt_usecs = -tt_usecs;
1321 }
1322
1323 if (!stream->splits) { /* High speed */
1324 for (i = uframe + stream->ps.phase_uf; i < EHCI_BANDWIDTH_SIZE;
1325 i += stream->ps.bw_uperiod)
1326 ehci->bandwidth[i] += usecs;
1327
1328 } else { /* Full speed */
1329 s_mask = stream->ps.cs_mask;
1330 c_mask = s_mask >> 8;
1331
1332 /* NOTE: adjustment needed for frame overflow */
1333 for (i = uframe; i < EHCI_BANDWIDTH_SIZE;
1334 i += stream->ps.bw_uperiod) {
1335 for ((j = stream->ps.phase_uf, m = 1 << j); j < 8;
1336 (++j, m <<= 1)) {
1337 if (s_mask & m)
1338 ehci->bandwidth[i+j] += usecs;
1339 else if (c_mask & m)
1340 ehci->bandwidth[i+j] += c_usecs;
1341 }
1342 }
1343
1344 /*
1345 * find_tt() will not return any error here as we have
1346 * already called find_tt() before calling this function
1347 * and checked for any error return. The previous call
1348 * would have created the data structure.
1349 */
1350 tt = find_tt(stream->ps.udev);
1351 if (sign > 0)
1352 list_add_tail(&stream->ps.ps_list, &tt->ps_list);
1353 else
1354 list_del(&stream->ps.ps_list);
1355
1356 for (i = uframe >> 3; i < EHCI_BANDWIDTH_FRAMES;
1357 i += stream->ps.bw_period)
1358 tt->bandwidth[i] += tt_usecs;
1359 }
1360}
1361
1362static inline int
1363itd_slot_ok(
1364 struct ehci_hcd *ehci,
1365 struct ehci_iso_stream *stream,
1366 unsigned uframe
1367)
1368{
1369 unsigned usecs;
1370
1371 /* convert "usecs we need" to "max already claimed" */
1372 usecs = ehci->uframe_periodic_max - stream->ps.usecs;
1373
1374 for (uframe &= stream->ps.bw_uperiod - 1; uframe < EHCI_BANDWIDTH_SIZE;
1375 uframe += stream->ps.bw_uperiod) {
1376 if (ehci->bandwidth[uframe] > usecs)
1377 return 0;
1378 }
1379 return 1;
1380}
1381
1382static inline int
1383sitd_slot_ok(
1384 struct ehci_hcd *ehci,
1385 struct ehci_iso_stream *stream,
1386 unsigned uframe,
1387 struct ehci_iso_sched *sched,
1388 struct ehci_tt *tt
1389)
1390{
1391 unsigned mask, tmp;
1392 unsigned frame, uf;
1393
1394 mask = stream->ps.cs_mask << (uframe & 7);
1395
1396 /* for OUT, don't wrap SSPLIT into H-microframe 7 */
1397 if (((stream->ps.cs_mask & 0xff) << (uframe & 7)) >= (1 << 7))
1398 return 0;
1399
1400 /* for IN, don't wrap CSPLIT into the next frame */
1401 if (mask & ~0xffff)
1402 return 0;
1403
1404 /* check bandwidth */
1405 uframe &= stream->ps.bw_uperiod - 1;
1406 frame = uframe >> 3;
1407
1408#ifdef CONFIG_USB_EHCI_TT_NEWSCHED
1409 /* The tt's fullspeed bus bandwidth must be available.
1410 * tt_available scheduling guarantees 10+% for control/bulk.
1411 */
1412 uf = uframe & 7;
1413 if (!tt_available(ehci, &stream->ps, tt, frame, uf))
1414 return 0;
1415#else
1416 /* tt must be idle for start(s), any gap, and csplit.
1417 * assume scheduling slop leaves 10+% for control/bulk.
1418 */
1419 if (!tt_no_collision(ehci, stream->ps.bw_period,
1420 stream->ps.udev, frame, mask))
1421 return 0;
1422#endif
1423
1424 do {
1425 unsigned max_used;
1426 unsigned i;
1427
1428 /* check starts (OUT uses more than one) */
1429 uf = uframe;
1430 max_used = ehci->uframe_periodic_max - stream->ps.usecs;
1431 for (tmp = stream->ps.cs_mask & 0xff; tmp; tmp >>= 1, uf++) {
1432 if (ehci->bandwidth[uf] > max_used)
1433 return 0;
1434 }
1435
1436 /* for IN, check CSPLIT */
1437 if (stream->ps.c_usecs) {
1438 max_used = ehci->uframe_periodic_max -
1439 stream->ps.c_usecs;
1440 uf = uframe & ~7;
1441 tmp = 1 << (2+8);
1442 for (i = (uframe & 7) + 2; i < 8; (++i, tmp <<= 1)) {
1443 if ((stream->ps.cs_mask & tmp) == 0)
1444 continue;
1445 if (ehci->bandwidth[uf+i] > max_used)
1446 return 0;
1447 }
1448 }
1449
1450 uframe += stream->ps.bw_uperiod;
1451 } while (uframe < EHCI_BANDWIDTH_SIZE);
1452
1453 stream->ps.cs_mask <<= uframe & 7;
1454 stream->splits = cpu_to_hc32(ehci, stream->ps.cs_mask);
1455 return 1;
1456}
1457
1458/*
1459 * This scheduler plans almost as far into the future as it has actual
1460 * periodic schedule slots. (Affected by TUNE_FLS, which defaults to
1461 * "as small as possible" to be cache-friendlier.) That limits the size
1462 * transfers you can stream reliably; avoid more than 64 msec per urb.
1463 * Also avoid queue depths of less than ehci's worst irq latency (affected
1464 * by the per-urb URB_NO_INTERRUPT hint, the log2_irq_thresh module parameter,
1465 * and other factors); or more than about 230 msec total (for portability,
1466 * given EHCI_TUNE_FLS and the slop). Or, write a smarter scheduler!
1467 */
1468
1469static int
1470iso_stream_schedule(
1471 struct ehci_hcd *ehci,
1472 struct urb *urb,
1473 struct ehci_iso_stream *stream
1474)
1475{
1476 u32 now, base, next, start, period, span, now2;
1477 u32 wrap = 0, skip = 0;
1478 int status = 0;
1479 unsigned mod = ehci->periodic_size << 3;
1480 struct ehci_iso_sched *sched = urb->hcpriv;
1481 bool empty = list_empty(&stream->td_list);
1482 bool new_stream = false;
1483
1484 period = stream->uperiod;
1485 span = sched->span;
1486 if (!stream->highspeed)
1487 span <<= 3;
1488
1489 /* Start a new isochronous stream? */
1490 if (unlikely(empty && !hcd_periodic_completion_in_progress(
1491 ehci_to_hcd(ehci), urb->ep))) {
1492
1493 /* Schedule the endpoint */
1494 if (stream->ps.phase == NO_FRAME) {
1495 int done = 0;
1496 struct ehci_tt *tt = find_tt(stream->ps.udev);
1497
1498 if (IS_ERR(tt)) {
1499 status = PTR_ERR(tt);
1500 goto fail;
1501 }
1502 compute_tt_budget(ehci->tt_budget, tt);
1503
1504 start = ((-(++ehci->random_frame)) << 3) & (period - 1);
1505
1506 /* find a uframe slot with enough bandwidth.
1507 * Early uframes are more precious because full-speed
1508 * iso IN transfers can't use late uframes,
1509 * and therefore they should be allocated last.
1510 */
1511 next = start;
1512 start += period;
1513 do {
1514 start--;
1515 /* check schedule: enough space? */
1516 if (stream->highspeed) {
1517 if (itd_slot_ok(ehci, stream, start))
1518 done = 1;
1519 } else {
1520 if ((start % 8) >= 6)
1521 continue;
1522 if (sitd_slot_ok(ehci, stream, start,
1523 sched, tt))
1524 done = 1;
1525 }
1526 } while (start > next && !done);
1527
1528 /* no room in the schedule */
1529 if (!done) {
1530 ehci_dbg(ehci, "iso sched full %p", urb);
1531 status = -ENOSPC;
1532 goto fail;
1533 }
1534 stream->ps.phase = (start >> 3) &
1535 (stream->ps.period - 1);
1536 stream->ps.bw_phase = stream->ps.phase &
1537 (stream->ps.bw_period - 1);
1538 stream->ps.phase_uf = start & 7;
1539 reserve_release_iso_bandwidth(ehci, stream, 1);
1540 }
1541
1542 /* New stream is already scheduled; use the upcoming slot */
1543 else {
1544 start = (stream->ps.phase << 3) + stream->ps.phase_uf;
1545 }
1546
1547 stream->next_uframe = start;
1548 new_stream = true;
1549 }
1550
1551 now = ehci_read_frame_index(ehci) & (mod - 1);
1552
1553 /* Take the isochronous scheduling threshold into account */
1554 if (ehci->i_thresh)
1555 next = now + ehci->i_thresh; /* uframe cache */
1556 else
1557 next = (now + 2 + 7) & ~0x07; /* full frame cache */
1558
1559 /* If needed, initialize last_iso_frame so that this URB will be seen */
1560 if (ehci->isoc_count == 0)
1561 ehci->last_iso_frame = now >> 3;
1562
1563 /*
1564 * Use ehci->last_iso_frame as the base. There can't be any
1565 * TDs scheduled for earlier than that.
1566 */
1567 base = ehci->last_iso_frame << 3;
1568 next = (next - base) & (mod - 1);
1569 start = (stream->next_uframe - base) & (mod - 1);
1570
1571 if (unlikely(new_stream))
1572 goto do_ASAP;
1573
1574 /*
1575 * Typical case: reuse current schedule, stream may still be active.
1576 * Hopefully there are no gaps from the host falling behind
1577 * (irq delays etc). If there are, the behavior depends on
1578 * whether URB_ISO_ASAP is set.
1579 */
1580 now2 = (now - base) & (mod - 1);
1581
1582 /* Is the schedule about to wrap around? */
1583 if (unlikely(!empty && start < period)) {
1584 ehci_dbg(ehci, "request %p would overflow (%u-%u < %u mod %u)\n",
1585 urb, stream->next_uframe, base, period, mod);
1586 status = -EFBIG;
1587 goto fail;
1588 }
1589
1590 /* Is the next packet scheduled after the base time? */
1591 if (likely(!empty || start <= now2 + period)) {
1592
1593 /* URB_ISO_ASAP: make sure that start >= next */
1594 if (unlikely(start < next &&
1595 (urb->transfer_flags & URB_ISO_ASAP)))
1596 goto do_ASAP;
1597
1598 /* Otherwise use start, if it's not in the past */
1599 if (likely(start >= now2))
1600 goto use_start;
1601
1602 /* Otherwise we got an underrun while the queue was empty */
1603 } else {
1604 if (urb->transfer_flags & URB_ISO_ASAP)
1605 goto do_ASAP;
1606 wrap = mod;
1607 now2 += mod;
1608 }
1609
1610 /* How many uframes and packets do we need to skip? */
1611 skip = (now2 - start + period - 1) & -period;
1612 if (skip >= span) { /* Entirely in the past? */
1613 ehci_dbg(ehci, "iso underrun %p (%u+%u < %u) [%u]\n",
1614 urb, start + base, span - period, now2 + base,
1615 base);
1616
1617 /* Try to keep the last TD intact for scanning later */
1618 skip = span - period;
1619
1620 /* Will it come before the current scan position? */
1621 if (empty) {
1622 skip = span; /* Skip the entire URB */
1623 status = 1; /* and give it back immediately */
1624 iso_sched_free(stream, sched);
1625 sched = NULL;
1626 }
1627 }
1628 urb->error_count = skip / period;
1629 if (sched)
1630 sched->first_packet = urb->error_count;
1631 goto use_start;
1632
1633 do_ASAP:
1634 /* Use the first slot after "next" */
1635 start = next + ((start - next) & (period - 1));
1636
1637 use_start:
1638 /* Tried to schedule too far into the future? */
1639 if (unlikely(start + span - period >= mod + wrap)) {
1640 ehci_dbg(ehci, "request %p would overflow (%u+%u >= %u)\n",
1641 urb, start, span - period, mod + wrap);
1642 status = -EFBIG;
1643 goto fail;
1644 }
1645
1646 start += base;
1647 stream->next_uframe = (start + skip) & (mod - 1);
1648
1649 /* report high speed start in uframes; full speed, in frames */
1650 urb->start_frame = start & (mod - 1);
1651 if (!stream->highspeed)
1652 urb->start_frame >>= 3;
1653 return status;
1654
1655 fail:
1656 iso_sched_free(stream, sched);
1657 urb->hcpriv = NULL;
1658 return status;
1659}
1660
1661/*-------------------------------------------------------------------------*/
1662
1663static inline void
1664itd_init(struct ehci_hcd *ehci, struct ehci_iso_stream *stream,
1665 struct ehci_itd *itd)
1666{
1667 int i;
1668
1669 /* it's been recently zeroed */
1670 itd->hw_next = EHCI_LIST_END(ehci);
1671 itd->hw_bufp[0] = stream->buf0;
1672 itd->hw_bufp[1] = stream->buf1;
1673 itd->hw_bufp[2] = stream->buf2;
1674
1675 for (i = 0; i < 8; i++)
1676 itd->index[i] = -1;
1677
1678 /* All other fields are filled when scheduling */
1679}
1680
1681static inline void
1682itd_patch(
1683 struct ehci_hcd *ehci,
1684 struct ehci_itd *itd,
1685 struct ehci_iso_sched *iso_sched,
1686 unsigned index,
1687 u16 uframe
1688)
1689{
1690 struct ehci_iso_packet *uf = &iso_sched->packet[index];
1691 unsigned pg = itd->pg;
1692
1693 /* BUG_ON(pg == 6 && uf->cross); */
1694
1695 uframe &= 0x07;
1696 itd->index[uframe] = index;
1697
1698 itd->hw_transaction[uframe] = uf->transaction;
1699 itd->hw_transaction[uframe] |= cpu_to_hc32(ehci, pg << 12);
1700 itd->hw_bufp[pg] |= cpu_to_hc32(ehci, uf->bufp & ~(u32)0);
1701 itd->hw_bufp_hi[pg] |= cpu_to_hc32(ehci, (u32)(uf->bufp >> 32));
1702
1703 /* iso_frame_desc[].offset must be strictly increasing */
1704 if (unlikely(uf->cross)) {
1705 u64 bufp = uf->bufp + 4096;
1706
1707 itd->pg = ++pg;
1708 itd->hw_bufp[pg] |= cpu_to_hc32(ehci, bufp & ~(u32)0);
1709 itd->hw_bufp_hi[pg] |= cpu_to_hc32(ehci, (u32)(bufp >> 32));
1710 }
1711}
1712
1713static inline void
1714itd_link(struct ehci_hcd *ehci, unsigned frame, struct ehci_itd *itd)
1715{
1716 union ehci_shadow *prev = &ehci->pshadow[frame];
1717 __hc32 *hw_p = &ehci->periodic[frame];
1718 union ehci_shadow here = *prev;
1719 __hc32 type = 0;
1720
1721 /* skip any iso nodes which might belong to previous microframes */
1722 while (here.ptr) {
1723 type = Q_NEXT_TYPE(ehci, *hw_p);
1724 if (type == cpu_to_hc32(ehci, Q_TYPE_QH))
1725 break;
1726 prev = periodic_next_shadow(ehci, prev, type);
1727 hw_p = shadow_next_periodic(ehci, &here, type);
1728 here = *prev;
1729 }
1730
1731 itd->itd_next = here;
1732 itd->hw_next = *hw_p;
1733 prev->itd = itd;
1734 itd->frame = frame;
1735 wmb();
1736 *hw_p = cpu_to_hc32(ehci, itd->itd_dma | Q_TYPE_ITD);
1737}
1738
1739/* fit urb's itds into the selected schedule slot; activate as needed */
1740static void itd_link_urb(
1741 struct ehci_hcd *ehci,
1742 struct urb *urb,
1743 unsigned mod,
1744 struct ehci_iso_stream *stream
1745)
1746{
1747 int packet;
1748 unsigned next_uframe, uframe, frame;
1749 struct ehci_iso_sched *iso_sched = urb->hcpriv;
1750 struct ehci_itd *itd;
1751
1752 next_uframe = stream->next_uframe & (mod - 1);
1753
1754 if (unlikely(list_empty(&stream->td_list)))
1755 ehci_to_hcd(ehci)->self.bandwidth_allocated
1756 += stream->bandwidth;
1757
1758 if (ehci_to_hcd(ehci)->self.bandwidth_isoc_reqs == 0) {
1759 if (ehci->amd_pll_fix == 1)
1760 usb_amd_quirk_pll_disable();
1761 }
1762
1763 ehci_to_hcd(ehci)->self.bandwidth_isoc_reqs++;
1764
1765 /* fill iTDs uframe by uframe */
1766 for (packet = iso_sched->first_packet, itd = NULL;
1767 packet < urb->number_of_packets;) {
1768 if (itd == NULL) {
1769 /* ASSERT: we have all necessary itds */
1770 /* BUG_ON(list_empty(&iso_sched->td_list)); */
1771
1772 /* ASSERT: no itds for this endpoint in this uframe */
1773
1774 itd = list_entry(iso_sched->td_list.next,
1775 struct ehci_itd, itd_list);
1776 list_move_tail(&itd->itd_list, &stream->td_list);
1777 itd->stream = stream;
1778 itd->urb = urb;
1779 itd_init(ehci, stream, itd);
1780 }
1781
1782 uframe = next_uframe & 0x07;
1783 frame = next_uframe >> 3;
1784
1785 itd_patch(ehci, itd, iso_sched, packet, uframe);
1786
1787 next_uframe += stream->uperiod;
1788 next_uframe &= mod - 1;
1789 packet++;
1790
1791 /* link completed itds into the schedule */
1792 if (((next_uframe >> 3) != frame)
1793 || packet == urb->number_of_packets) {
1794 itd_link(ehci, frame & (ehci->periodic_size - 1), itd);
1795 itd = NULL;
1796 }
1797 }
1798 stream->next_uframe = next_uframe;
1799
1800 /* don't need that schedule data any more */
1801 iso_sched_free(stream, iso_sched);
1802 urb->hcpriv = stream;
1803
1804 ++ehci->isoc_count;
1805 enable_periodic(ehci);
1806}
1807
1808#define ISO_ERRS (EHCI_ISOC_BUF_ERR | EHCI_ISOC_BABBLE | EHCI_ISOC_XACTERR)
1809
1810/* Process and recycle a completed ITD. Return true iff its urb completed,
1811 * and hence its completion callback probably added things to the hardware
1812 * schedule.
1813 *
1814 * Note that we carefully avoid recycling this descriptor until after any
1815 * completion callback runs, so that it won't be reused quickly. That is,
1816 * assuming (a) no more than two urbs per frame on this endpoint, and also
1817 * (b) only this endpoint's completions submit URBs. It seems some silicon
1818 * corrupts things if you reuse completed descriptors very quickly...
1819 */
1820static bool itd_complete(struct ehci_hcd *ehci, struct ehci_itd *itd)
1821{
1822 struct urb *urb = itd->urb;
1823 struct usb_iso_packet_descriptor *desc;
1824 u32 t;
1825 unsigned uframe;
1826 int urb_index = -1;
1827 struct ehci_iso_stream *stream = itd->stream;
1828 bool retval = false;
1829
1830 /* for each uframe with a packet */
1831 for (uframe = 0; uframe < 8; uframe++) {
1832 if (likely(itd->index[uframe] == -1))
1833 continue;
1834 urb_index = itd->index[uframe];
1835 desc = &urb->iso_frame_desc[urb_index];
1836
1837 t = hc32_to_cpup(ehci, &itd->hw_transaction[uframe]);
1838 itd->hw_transaction[uframe] = 0;
1839
1840 /* report transfer status */
1841 if (unlikely(t & ISO_ERRS)) {
1842 urb->error_count++;
1843 if (t & EHCI_ISOC_BUF_ERR)
1844 desc->status = usb_pipein(urb->pipe)
1845 ? -ENOSR /* hc couldn't read */
1846 : -ECOMM; /* hc couldn't write */
1847 else if (t & EHCI_ISOC_BABBLE)
1848 desc->status = -EOVERFLOW;
1849 else /* (t & EHCI_ISOC_XACTERR) */
1850 desc->status = -EPROTO;
1851
1852 /* HC need not update length with this error */
1853 if (!(t & EHCI_ISOC_BABBLE)) {
1854 desc->actual_length = EHCI_ITD_LENGTH(t);
1855 urb->actual_length += desc->actual_length;
1856 }
1857 } else if (likely((t & EHCI_ISOC_ACTIVE) == 0)) {
1858 desc->status = 0;
1859 desc->actual_length = EHCI_ITD_LENGTH(t);
1860 urb->actual_length += desc->actual_length;
1861 } else {
1862 /* URB was too late */
1863 urb->error_count++;
1864 }
1865 }
1866
1867 /* handle completion now? */
1868 if (likely((urb_index + 1) != urb->number_of_packets))
1869 goto done;
1870
1871 /*
1872 * ASSERT: it's really the last itd for this urb
1873 * list_for_each_entry (itd, &stream->td_list, itd_list)
1874 * BUG_ON(itd->urb == urb);
1875 */
1876
1877 /* give urb back to the driver; completion often (re)submits */
1878 ehci_urb_done(ehci, urb, 0);
1879 retval = true;
1880 urb = NULL;
1881
1882 --ehci->isoc_count;
1883 disable_periodic(ehci);
1884
1885 ehci_to_hcd(ehci)->self.bandwidth_isoc_reqs--;
1886 if (ehci_to_hcd(ehci)->self.bandwidth_isoc_reqs == 0) {
1887 if (ehci->amd_pll_fix == 1)
1888 usb_amd_quirk_pll_enable();
1889 }
1890
1891 if (unlikely(list_is_singular(&stream->td_list)))
1892 ehci_to_hcd(ehci)->self.bandwidth_allocated
1893 -= stream->bandwidth;
1894
1895done:
1896 itd->urb = NULL;
1897
1898 /* Add to the end of the free list for later reuse */
1899 list_move_tail(&itd->itd_list, &stream->free_list);
1900
1901 /* Recycle the iTDs when the pipeline is empty (ep no longer in use) */
1902 if (list_empty(&stream->td_list)) {
1903 list_splice_tail_init(&stream->free_list,
1904 &ehci->cached_itd_list);
1905 start_free_itds(ehci);
1906 }
1907
1908 return retval;
1909}
1910
1911/*-------------------------------------------------------------------------*/
1912
1913static int itd_submit(struct ehci_hcd *ehci, struct urb *urb,
1914 gfp_t mem_flags)
1915{
1916 int status = -EINVAL;
1917 unsigned long flags;
1918 struct ehci_iso_stream *stream;
1919
1920 /* Get iso_stream head */
1921 stream = iso_stream_find(ehci, urb);
1922 if (unlikely(stream == NULL)) {
1923 ehci_dbg(ehci, "can't get iso stream\n");
1924 return -ENOMEM;
1925 }
1926 if (unlikely(urb->interval != stream->uperiod)) {
1927 ehci_dbg(ehci, "can't change iso interval %d --> %d\n",
1928 stream->uperiod, urb->interval);
1929 goto done;
1930 }
1931
1932#ifdef EHCI_URB_TRACE
1933 ehci_dbg(ehci,
1934 "%s %s urb %p ep%d%s len %d, %d pkts %d uframes [%p]\n",
1935 __func__, urb->dev->devpath, urb,
1936 usb_pipeendpoint(urb->pipe),
1937 usb_pipein(urb->pipe) ? "in" : "out",
1938 urb->transfer_buffer_length,
1939 urb->number_of_packets, urb->interval,
1940 stream);
1941#endif
1942
1943 /* allocate ITDs w/o locking anything */
1944 status = itd_urb_transaction(stream, ehci, urb, mem_flags);
1945 if (unlikely(status < 0)) {
1946 ehci_dbg(ehci, "can't init itds\n");
1947 goto done;
1948 }
1949
1950 /* schedule ... need to lock */
1951 spin_lock_irqsave(&ehci->lock, flags);
1952 if (unlikely(!HCD_HW_ACCESSIBLE(ehci_to_hcd(ehci)))) {
1953 status = -ESHUTDOWN;
1954 goto done_not_linked;
1955 }
1956 status = usb_hcd_link_urb_to_ep(ehci_to_hcd(ehci), urb);
1957 if (unlikely(status))
1958 goto done_not_linked;
1959 status = iso_stream_schedule(ehci, urb, stream);
1960 if (likely(status == 0)) {
1961 itd_link_urb(ehci, urb, ehci->periodic_size << 3, stream);
1962 } else if (status > 0) {
1963 status = 0;
1964 ehci_urb_done(ehci, urb, 0);
1965 } else {
1966 usb_hcd_unlink_urb_from_ep(ehci_to_hcd(ehci), urb);
1967 }
1968 done_not_linked:
1969 spin_unlock_irqrestore(&ehci->lock, flags);
1970 done:
1971 return status;
1972}
1973
1974/*-------------------------------------------------------------------------*/
1975
1976/*
1977 * "Split ISO TDs" ... used for USB 1.1 devices going through the
1978 * TTs in USB 2.0 hubs. These need microframe scheduling.
1979 */
1980
1981static inline void
1982sitd_sched_init(
1983 struct ehci_hcd *ehci,
1984 struct ehci_iso_sched *iso_sched,
1985 struct ehci_iso_stream *stream,
1986 struct urb *urb
1987)
1988{
1989 unsigned i;
1990 dma_addr_t dma = urb->transfer_dma;
1991
1992 /* how many frames are needed for these transfers */
1993 iso_sched->span = urb->number_of_packets * stream->ps.period;
1994
1995 /* figure out per-frame sitd fields that we'll need later
1996 * when we fit new sitds into the schedule.
1997 */
1998 for (i = 0; i < urb->number_of_packets; i++) {
1999 struct ehci_iso_packet *packet = &iso_sched->packet[i];
2000 unsigned length;
2001 dma_addr_t buf;
2002 u32 trans;
2003
2004 length = urb->iso_frame_desc[i].length & 0x03ff;
2005 buf = dma + urb->iso_frame_desc[i].offset;
2006
2007 trans = SITD_STS_ACTIVE;
2008 if (((i + 1) == urb->number_of_packets)
2009 && !(urb->transfer_flags & URB_NO_INTERRUPT))
2010 trans |= SITD_IOC;
2011 trans |= length << 16;
2012 packet->transaction = cpu_to_hc32(ehci, trans);
2013
2014 /* might need to cross a buffer page within a td */
2015 packet->bufp = buf;
2016 packet->buf1 = (buf + length) & ~0x0fff;
2017 if (packet->buf1 != (buf & ~(u64)0x0fff))
2018 packet->cross = 1;
2019
2020 /* OUT uses multiple start-splits */
2021 if (stream->bEndpointAddress & USB_DIR_IN)
2022 continue;
2023 length = (length + 187) / 188;
2024 if (length > 1) /* BEGIN vs ALL */
2025 length |= 1 << 3;
2026 packet->buf1 |= length;
2027 }
2028}
2029
2030static int
2031sitd_urb_transaction(
2032 struct ehci_iso_stream *stream,
2033 struct ehci_hcd *ehci,
2034 struct urb *urb,
2035 gfp_t mem_flags
2036)
2037{
2038 struct ehci_sitd *sitd;
2039 dma_addr_t sitd_dma;
2040 int i;
2041 struct ehci_iso_sched *iso_sched;
2042 unsigned long flags;
2043
2044 iso_sched = iso_sched_alloc(urb->number_of_packets, mem_flags);
2045 if (iso_sched == NULL)
2046 return -ENOMEM;
2047
2048 sitd_sched_init(ehci, iso_sched, stream, urb);
2049
2050 /* allocate/init sITDs */
2051 spin_lock_irqsave(&ehci->lock, flags);
2052 for (i = 0; i < urb->number_of_packets; i++) {
2053
2054 /* NOTE: for now, we don't try to handle wraparound cases
2055 * for IN (using sitd->hw_backpointer, like a FSTN), which
2056 * means we never need two sitds for full speed packets.
2057 */
2058
2059 /*
2060 * Use siTDs from the free list, but not siTDs that may
2061 * still be in use by the hardware.
2062 */
2063 if (likely(!list_empty(&stream->free_list))) {
2064 sitd = list_first_entry(&stream->free_list,
2065 struct ehci_sitd, sitd_list);
2066 if (sitd->frame == ehci->now_frame)
2067 goto alloc_sitd;
2068 list_del(&sitd->sitd_list);
2069 sitd_dma = sitd->sitd_dma;
2070 } else {
2071 alloc_sitd:
2072 spin_unlock_irqrestore(&ehci->lock, flags);
2073 sitd = dma_pool_alloc(ehci->sitd_pool, mem_flags,
2074 &sitd_dma);
2075 spin_lock_irqsave(&ehci->lock, flags);
2076 if (!sitd) {
2077 iso_sched_free(stream, iso_sched);
2078 spin_unlock_irqrestore(&ehci->lock, flags);
2079 return -ENOMEM;
2080 }
2081 }
2082
2083 memset(sitd, 0, sizeof(*sitd));
2084 sitd->sitd_dma = sitd_dma;
2085 sitd->frame = NO_FRAME;
2086 list_add(&sitd->sitd_list, &iso_sched->td_list);
2087 }
2088
2089 /* temporarily store schedule info in hcpriv */
2090 urb->hcpriv = iso_sched;
2091 urb->error_count = 0;
2092
2093 spin_unlock_irqrestore(&ehci->lock, flags);
2094 return 0;
2095}
2096
2097/*-------------------------------------------------------------------------*/
2098
2099static inline void
2100sitd_patch(
2101 struct ehci_hcd *ehci,
2102 struct ehci_iso_stream *stream,
2103 struct ehci_sitd *sitd,
2104 struct ehci_iso_sched *iso_sched,
2105 unsigned index
2106)
2107{
2108 struct ehci_iso_packet *uf = &iso_sched->packet[index];
2109 u64 bufp;
2110
2111 sitd->hw_next = EHCI_LIST_END(ehci);
2112 sitd->hw_fullspeed_ep = stream->address;
2113 sitd->hw_uframe = stream->splits;
2114 sitd->hw_results = uf->transaction;
2115 sitd->hw_backpointer = EHCI_LIST_END(ehci);
2116
2117 bufp = uf->bufp;
2118 sitd->hw_buf[0] = cpu_to_hc32(ehci, bufp);
2119 sitd->hw_buf_hi[0] = cpu_to_hc32(ehci, bufp >> 32);
2120
2121 sitd->hw_buf[1] = cpu_to_hc32(ehci, uf->buf1);
2122 if (uf->cross)
2123 bufp += 4096;
2124 sitd->hw_buf_hi[1] = cpu_to_hc32(ehci, bufp >> 32);
2125 sitd->index = index;
2126}
2127
2128static inline void
2129sitd_link(struct ehci_hcd *ehci, unsigned frame, struct ehci_sitd *sitd)
2130{
2131 /* note: sitd ordering could matter (CSPLIT then SSPLIT) */
2132 sitd->sitd_next = ehci->pshadow[frame];
2133 sitd->hw_next = ehci->periodic[frame];
2134 ehci->pshadow[frame].sitd = sitd;
2135 sitd->frame = frame;
2136 wmb();
2137 ehci->periodic[frame] = cpu_to_hc32(ehci, sitd->sitd_dma | Q_TYPE_SITD);
2138}
2139
2140/* fit urb's sitds into the selected schedule slot; activate as needed */
2141static void sitd_link_urb(
2142 struct ehci_hcd *ehci,
2143 struct urb *urb,
2144 unsigned mod,
2145 struct ehci_iso_stream *stream
2146)
2147{
2148 int packet;
2149 unsigned next_uframe;
2150 struct ehci_iso_sched *sched = urb->hcpriv;
2151 struct ehci_sitd *sitd;
2152
2153 next_uframe = stream->next_uframe;
2154
2155 if (list_empty(&stream->td_list))
2156 /* usbfs ignores TT bandwidth */
2157 ehci_to_hcd(ehci)->self.bandwidth_allocated
2158 += stream->bandwidth;
2159
2160 if (ehci_to_hcd(ehci)->self.bandwidth_isoc_reqs == 0) {
2161 if (ehci->amd_pll_fix == 1)
2162 usb_amd_quirk_pll_disable();
2163 }
2164
2165 ehci_to_hcd(ehci)->self.bandwidth_isoc_reqs++;
2166
2167 /* fill sITDs frame by frame */
2168 for (packet = sched->first_packet, sitd = NULL;
2169 packet < urb->number_of_packets;
2170 packet++) {
2171
2172 /* ASSERT: we have all necessary sitds */
2173 BUG_ON(list_empty(&sched->td_list));
2174
2175 /* ASSERT: no itds for this endpoint in this frame */
2176
2177 sitd = list_entry(sched->td_list.next,
2178 struct ehci_sitd, sitd_list);
2179 list_move_tail(&sitd->sitd_list, &stream->td_list);
2180 sitd->stream = stream;
2181 sitd->urb = urb;
2182
2183 sitd_patch(ehci, stream, sitd, sched, packet);
2184 sitd_link(ehci, (next_uframe >> 3) & (ehci->periodic_size - 1),
2185 sitd);
2186
2187 next_uframe += stream->uperiod;
2188 }
2189 stream->next_uframe = next_uframe & (mod - 1);
2190
2191 /* don't need that schedule data any more */
2192 iso_sched_free(stream, sched);
2193 urb->hcpriv = stream;
2194
2195 ++ehci->isoc_count;
2196 enable_periodic(ehci);
2197}
2198
2199/*-------------------------------------------------------------------------*/
2200
2201#define SITD_ERRS (SITD_STS_ERR | SITD_STS_DBE | SITD_STS_BABBLE \
2202 | SITD_STS_XACT | SITD_STS_MMF)
2203
2204/* Process and recycle a completed SITD. Return true iff its urb completed,
2205 * and hence its completion callback probably added things to the hardware
2206 * schedule.
2207 *
2208 * Note that we carefully avoid recycling this descriptor until after any
2209 * completion callback runs, so that it won't be reused quickly. That is,
2210 * assuming (a) no more than two urbs per frame on this endpoint, and also
2211 * (b) only this endpoint's completions submit URBs. It seems some silicon
2212 * corrupts things if you reuse completed descriptors very quickly...
2213 */
2214static bool sitd_complete(struct ehci_hcd *ehci, struct ehci_sitd *sitd)
2215{
2216 struct urb *urb = sitd->urb;
2217 struct usb_iso_packet_descriptor *desc;
2218 u32 t;
2219 int urb_index;
2220 struct ehci_iso_stream *stream = sitd->stream;
2221 bool retval = false;
2222
2223 urb_index = sitd->index;
2224 desc = &urb->iso_frame_desc[urb_index];
2225 t = hc32_to_cpup(ehci, &sitd->hw_results);
2226
2227 /* report transfer status */
2228 if (unlikely(t & SITD_ERRS)) {
2229 urb->error_count++;
2230 if (t & SITD_STS_DBE)
2231 desc->status = usb_pipein(urb->pipe)
2232 ? -ENOSR /* hc couldn't read */
2233 : -ECOMM; /* hc couldn't write */
2234 else if (t & SITD_STS_BABBLE)
2235 desc->status = -EOVERFLOW;
2236 else /* XACT, MMF, etc */
2237 desc->status = -EPROTO;
2238 } else if (unlikely(t & SITD_STS_ACTIVE)) {
2239 /* URB was too late */
2240 urb->error_count++;
2241 } else {
2242 desc->status = 0;
2243 desc->actual_length = desc->length - SITD_LENGTH(t);
2244 urb->actual_length += desc->actual_length;
2245 }
2246
2247 /* handle completion now? */
2248 if ((urb_index + 1) != urb->number_of_packets)
2249 goto done;
2250
2251 /*
2252 * ASSERT: it's really the last sitd for this urb
2253 * list_for_each_entry (sitd, &stream->td_list, sitd_list)
2254 * BUG_ON(sitd->urb == urb);
2255 */
2256
2257 /* give urb back to the driver; completion often (re)submits */
2258 ehci_urb_done(ehci, urb, 0);
2259 retval = true;
2260 urb = NULL;
2261
2262 --ehci->isoc_count;
2263 disable_periodic(ehci);
2264
2265 ehci_to_hcd(ehci)->self.bandwidth_isoc_reqs--;
2266 if (ehci_to_hcd(ehci)->self.bandwidth_isoc_reqs == 0) {
2267 if (ehci->amd_pll_fix == 1)
2268 usb_amd_quirk_pll_enable();
2269 }
2270
2271 if (list_is_singular(&stream->td_list))
2272 ehci_to_hcd(ehci)->self.bandwidth_allocated
2273 -= stream->bandwidth;
2274
2275done:
2276 sitd->urb = NULL;
2277
2278 /* Add to the end of the free list for later reuse */
2279 list_move_tail(&sitd->sitd_list, &stream->free_list);
2280
2281 /* Recycle the siTDs when the pipeline is empty (ep no longer in use) */
2282 if (list_empty(&stream->td_list)) {
2283 list_splice_tail_init(&stream->free_list,
2284 &ehci->cached_sitd_list);
2285 start_free_itds(ehci);
2286 }
2287
2288 return retval;
2289}
2290
2291
2292static int sitd_submit(struct ehci_hcd *ehci, struct urb *urb,
2293 gfp_t mem_flags)
2294{
2295 int status = -EINVAL;
2296 unsigned long flags;
2297 struct ehci_iso_stream *stream;
2298
2299 /* Get iso_stream head */
2300 stream = iso_stream_find(ehci, urb);
2301 if (stream == NULL) {
2302 ehci_dbg(ehci, "can't get iso stream\n");
2303 return -ENOMEM;
2304 }
2305 if (urb->interval != stream->ps.period) {
2306 ehci_dbg(ehci, "can't change iso interval %d --> %d\n",
2307 stream->ps.period, urb->interval);
2308 goto done;
2309 }
2310
2311#ifdef EHCI_URB_TRACE
2312 ehci_dbg(ehci,
2313 "submit %p dev%s ep%d%s-iso len %d\n",
2314 urb, urb->dev->devpath,
2315 usb_pipeendpoint(urb->pipe),
2316 usb_pipein(urb->pipe) ? "in" : "out",
2317 urb->transfer_buffer_length);
2318#endif
2319
2320 /* allocate SITDs */
2321 status = sitd_urb_transaction(stream, ehci, urb, mem_flags);
2322 if (status < 0) {
2323 ehci_dbg(ehci, "can't init sitds\n");
2324 goto done;
2325 }
2326
2327 /* schedule ... need to lock */
2328 spin_lock_irqsave(&ehci->lock, flags);
2329 if (unlikely(!HCD_HW_ACCESSIBLE(ehci_to_hcd(ehci)))) {
2330 status = -ESHUTDOWN;
2331 goto done_not_linked;
2332 }
2333 status = usb_hcd_link_urb_to_ep(ehci_to_hcd(ehci), urb);
2334 if (unlikely(status))
2335 goto done_not_linked;
2336 status = iso_stream_schedule(ehci, urb, stream);
2337 if (likely(status == 0)) {
2338 sitd_link_urb(ehci, urb, ehci->periodic_size << 3, stream);
2339 } else if (status > 0) {
2340 status = 0;
2341 ehci_urb_done(ehci, urb, 0);
2342 } else {
2343 usb_hcd_unlink_urb_from_ep(ehci_to_hcd(ehci), urb);
2344 }
2345 done_not_linked:
2346 spin_unlock_irqrestore(&ehci->lock, flags);
2347 done:
2348 return status;
2349}
2350
2351/*-------------------------------------------------------------------------*/
2352
2353static void scan_isoc(struct ehci_hcd *ehci)
2354{
2355 unsigned uf, now_frame, frame;
2356 unsigned fmask = ehci->periodic_size - 1;
2357 bool modified, live;
2358 union ehci_shadow q, *q_p;
2359 __hc32 type, *hw_p;
2360
2361 /*
2362 * When running, scan from last scan point up to "now"
2363 * else clean up by scanning everything that's left.
2364 * Touches as few pages as possible: cache-friendly.
2365 */
2366 if (ehci->rh_state >= EHCI_RH_RUNNING) {
2367 uf = ehci_read_frame_index(ehci);
2368 now_frame = (uf >> 3) & fmask;
2369 live = true;
2370 } else {
2371 now_frame = (ehci->last_iso_frame - 1) & fmask;
2372 live = false;
2373 }
2374 ehci->now_frame = now_frame;
2375
2376 frame = ehci->last_iso_frame;
2377
2378restart:
2379 /* Scan each element in frame's queue for completions */
2380 q_p = &ehci->pshadow[frame];
2381 hw_p = &ehci->periodic[frame];
2382 q.ptr = q_p->ptr;
2383 type = Q_NEXT_TYPE(ehci, *hw_p);
2384 modified = false;
2385
2386 while (q.ptr != NULL) {
2387 switch (hc32_to_cpu(ehci, type)) {
2388 case Q_TYPE_ITD:
2389 /*
2390 * If this ITD is still active, leave it for
2391 * later processing ... check the next entry.
2392 * No need to check for activity unless the
2393 * frame is current.
2394 */
2395 if (frame == now_frame && live) {
2396 rmb();
2397 for (uf = 0; uf < 8; uf++) {
2398 if (q.itd->hw_transaction[uf] &
2399 ITD_ACTIVE(ehci))
2400 break;
2401 }
2402 if (uf < 8) {
2403 q_p = &q.itd->itd_next;
2404 hw_p = &q.itd->hw_next;
2405 type = Q_NEXT_TYPE(ehci,
2406 q.itd->hw_next);
2407 q = *q_p;
2408 break;
2409 }
2410 }
2411
2412 /*
2413 * Take finished ITDs out of the schedule
2414 * and process them: recycle, maybe report
2415 * URB completion. HC won't cache the
2416 * pointer for much longer, if at all.
2417 */
2418 *q_p = q.itd->itd_next;
2419 if (!ehci->use_dummy_qh ||
2420 q.itd->hw_next != EHCI_LIST_END(ehci))
2421 *hw_p = q.itd->hw_next;
2422 else
2423 *hw_p = cpu_to_hc32(ehci, ehci->dummy->qh_dma);
2424 type = Q_NEXT_TYPE(ehci, q.itd->hw_next);
2425 wmb();
2426 modified = itd_complete(ehci, q.itd);
2427 q = *q_p;
2428 break;
2429 case Q_TYPE_SITD:
2430 /*
2431 * If this SITD is still active, leave it for
2432 * later processing ... check the next entry.
2433 * No need to check for activity unless the
2434 * frame is current.
2435 */
2436 if (((frame == now_frame) ||
2437 (((frame + 1) & fmask) == now_frame))
2438 && live
2439 && (q.sitd->hw_results & SITD_ACTIVE(ehci))) {
2440
2441 q_p = &q.sitd->sitd_next;
2442 hw_p = &q.sitd->hw_next;
2443 type = Q_NEXT_TYPE(ehci, q.sitd->hw_next);
2444 q = *q_p;
2445 break;
2446 }
2447
2448 /*
2449 * Take finished SITDs out of the schedule
2450 * and process them: recycle, maybe report
2451 * URB completion.
2452 */
2453 *q_p = q.sitd->sitd_next;
2454 if (!ehci->use_dummy_qh ||
2455 q.sitd->hw_next != EHCI_LIST_END(ehci))
2456 *hw_p = q.sitd->hw_next;
2457 else
2458 *hw_p = cpu_to_hc32(ehci, ehci->dummy->qh_dma);
2459 type = Q_NEXT_TYPE(ehci, q.sitd->hw_next);
2460 wmb();
2461 modified = sitd_complete(ehci, q.sitd);
2462 q = *q_p;
2463 break;
2464 default:
2465 ehci_dbg(ehci, "corrupt type %d frame %d shadow %p\n",
2466 type, frame, q.ptr);
2467 /* BUG(); */
2468 fallthrough;
2469 case Q_TYPE_QH:
2470 case Q_TYPE_FSTN:
2471 /* End of the iTDs and siTDs */
2472 q.ptr = NULL;
2473 break;
2474 }
2475
2476 /* Assume completion callbacks modify the queue */
2477 if (unlikely(modified && ehci->isoc_count > 0))
2478 goto restart;
2479 }
2480
2481 /* Stop when we have reached the current frame */
2482 if (frame == now_frame)
2483 return;
2484
2485 /* The last frame may still have active siTDs */
2486 ehci->last_iso_frame = frame;
2487 frame = (frame + 1) & fmask;
2488
2489 goto restart;
2490}