Loading...
1/*
2 * WUSB Wire Adapter
3 * Data transfer and URB enqueing
4 *
5 * Copyright (C) 2005-2006 Intel Corporation
6 * Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com>
7 *
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License version
10 * 2 as published by the Free Software Foundation.
11 *
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software
19 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
20 * 02110-1301, USA.
21 *
22 *
23 * How transfers work: get a buffer, break it up in segments (segment
24 * size is a multiple of the maxpacket size). For each segment issue a
25 * segment request (struct wa_xfer_*), then send the data buffer if
26 * out or nothing if in (all over the DTO endpoint).
27 *
28 * For each submitted segment request, a notification will come over
29 * the NEP endpoint and a transfer result (struct xfer_result) will
30 * arrive in the DTI URB. Read it, get the xfer ID, see if there is
31 * data coming (inbound transfer), schedule a read and handle it.
32 *
33 * Sounds simple, it is a pain to implement.
34 *
35 *
36 * ENTRY POINTS
37 *
38 * FIXME
39 *
40 * LIFE CYCLE / STATE DIAGRAM
41 *
42 * FIXME
43 *
44 * THIS CODE IS DISGUSTING
45 *
46 * Warned you are; it's my second try and still not happy with it.
47 *
48 * NOTES:
49 *
50 * - No iso
51 *
52 * - Supports DMA xfers, control, bulk and maybe interrupt
53 *
54 * - Does not recycle unused rpipes
55 *
56 * An rpipe is assigned to an endpoint the first time it is used,
57 * and then it's there, assigned, until the endpoint is disabled
58 * (destroyed [{h,d}wahc_op_ep_disable()]. The assignment of the
59 * rpipe to the endpoint is done under the wa->rpipe_sem semaphore
60 * (should be a mutex).
61 *
62 * Two methods it could be done:
63 *
64 * (a) set up a timer every time an rpipe's use count drops to 1
65 * (which means unused) or when a transfer ends. Reset the
66 * timer when a xfer is queued. If the timer expires, release
67 * the rpipe [see rpipe_ep_disable()].
68 *
69 * (b) when looking for free rpipes to attach [rpipe_get_by_ep()],
70 * when none are found go over the list, check their endpoint
71 * and their activity record (if no last-xfer-done-ts in the
72 * last x seconds) take it
73 *
74 * However, due to the fact that we have a set of limited
75 * resources (max-segments-at-the-same-time per xfer,
76 * xfers-per-ripe, blocks-per-rpipe, rpipes-per-host), at the end
77 * we are going to have to rebuild all this based on an scheduler,
78 * to where we have a list of transactions to do and based on the
79 * availability of the different required components (blocks,
80 * rpipes, segment slots, etc), we go scheduling them. Painful.
81 */
82#include <linux/init.h>
83#include <linux/spinlock.h>
84#include <linux/slab.h>
85#include <linux/hash.h>
86#include <linux/ratelimit.h>
87
88#include "wa-hc.h"
89#include "wusbhc.h"
90
91enum {
92 WA_SEGS_MAX = 255,
93};
94
95enum wa_seg_status {
96 WA_SEG_NOTREADY,
97 WA_SEG_READY,
98 WA_SEG_DELAYED,
99 WA_SEG_SUBMITTED,
100 WA_SEG_PENDING,
101 WA_SEG_DTI_PENDING,
102 WA_SEG_DONE,
103 WA_SEG_ERROR,
104 WA_SEG_ABORTED,
105};
106
107static void wa_xfer_delayed_run(struct wa_rpipe *);
108
109/*
110 * Life cycle governed by 'struct urb' (the refcount of the struct is
111 * that of the 'struct urb' and usb_free_urb() would free the whole
112 * struct).
113 */
114struct wa_seg {
115 struct urb urb;
116 struct urb *dto_urb; /* for data output? */
117 struct list_head list_node; /* for rpipe->req_list */
118 struct wa_xfer *xfer; /* out xfer */
119 u8 index; /* which segment we are */
120 enum wa_seg_status status;
121 ssize_t result; /* bytes xfered or error */
122 struct wa_xfer_hdr xfer_hdr;
123 u8 xfer_extra[]; /* xtra space for xfer_hdr_ctl */
124};
125
126static void wa_seg_init(struct wa_seg *seg)
127{
128 /* usb_init_urb() repeats a lot of work, so we do it here */
129 kref_init(&seg->urb.kref);
130}
131
132/*
133 * Protected by xfer->lock
134 *
135 */
136struct wa_xfer {
137 struct kref refcnt;
138 struct list_head list_node;
139 spinlock_t lock;
140 u32 id;
141
142 struct wahc *wa; /* Wire adapter we are plugged to */
143 struct usb_host_endpoint *ep;
144 struct urb *urb; /* URB we are transferring for */
145 struct wa_seg **seg; /* transfer segments */
146 u8 segs, segs_submitted, segs_done;
147 unsigned is_inbound:1;
148 unsigned is_dma:1;
149 size_t seg_size;
150 int result;
151
152 gfp_t gfp; /* allocation mask */
153
154 struct wusb_dev *wusb_dev; /* for activity timestamps */
155};
156
157static inline void wa_xfer_init(struct wa_xfer *xfer)
158{
159 kref_init(&xfer->refcnt);
160 INIT_LIST_HEAD(&xfer->list_node);
161 spin_lock_init(&xfer->lock);
162}
163
164/*
165 * Destroy a transfer structure
166 *
167 * Note that the xfer->seg[index] thingies follow the URB life cycle,
168 * so we need to put them, not free them.
169 */
170static void wa_xfer_destroy(struct kref *_xfer)
171{
172 struct wa_xfer *xfer = container_of(_xfer, struct wa_xfer, refcnt);
173 if (xfer->seg) {
174 unsigned cnt;
175 for (cnt = 0; cnt < xfer->segs; cnt++) {
176 if (xfer->is_inbound)
177 usb_put_urb(xfer->seg[cnt]->dto_urb);
178 usb_put_urb(&xfer->seg[cnt]->urb);
179 }
180 }
181 kfree(xfer);
182}
183
184static void wa_xfer_get(struct wa_xfer *xfer)
185{
186 kref_get(&xfer->refcnt);
187}
188
189static void wa_xfer_put(struct wa_xfer *xfer)
190{
191 kref_put(&xfer->refcnt, wa_xfer_destroy);
192}
193
194/*
195 * xfer is referenced
196 *
197 * xfer->lock has to be unlocked
198 *
199 * We take xfer->lock for setting the result; this is a barrier
200 * against drivers/usb/core/hcd.c:unlink1() being called after we call
201 * usb_hcd_giveback_urb() and wa_urb_dequeue() trying to get a
202 * reference to the transfer.
203 */
204static void wa_xfer_giveback(struct wa_xfer *xfer)
205{
206 unsigned long flags;
207
208 spin_lock_irqsave(&xfer->wa->xfer_list_lock, flags);
209 list_del_init(&xfer->list_node);
210 spin_unlock_irqrestore(&xfer->wa->xfer_list_lock, flags);
211 /* FIXME: segmentation broken -- kills DWA */
212 wusbhc_giveback_urb(xfer->wa->wusb, xfer->urb, xfer->result);
213 wa_put(xfer->wa);
214 wa_xfer_put(xfer);
215}
216
217/*
218 * xfer is referenced
219 *
220 * xfer->lock has to be unlocked
221 */
222static void wa_xfer_completion(struct wa_xfer *xfer)
223{
224 if (xfer->wusb_dev)
225 wusb_dev_put(xfer->wusb_dev);
226 rpipe_put(xfer->ep->hcpriv);
227 wa_xfer_giveback(xfer);
228}
229
230/*
231 * If transfer is done, wrap it up and return true
232 *
233 * xfer->lock has to be locked
234 */
235static unsigned __wa_xfer_is_done(struct wa_xfer *xfer)
236{
237 struct device *dev = &xfer->wa->usb_iface->dev;
238 unsigned result, cnt;
239 struct wa_seg *seg;
240 struct urb *urb = xfer->urb;
241 unsigned found_short = 0;
242
243 result = xfer->segs_done == xfer->segs_submitted;
244 if (result == 0)
245 goto out;
246 urb->actual_length = 0;
247 for (cnt = 0; cnt < xfer->segs; cnt++) {
248 seg = xfer->seg[cnt];
249 switch (seg->status) {
250 case WA_SEG_DONE:
251 if (found_short && seg->result > 0) {
252 dev_dbg(dev, "xfer %p#%u: bad short segments (%zu)\n",
253 xfer, cnt, seg->result);
254 urb->status = -EINVAL;
255 goto out;
256 }
257 urb->actual_length += seg->result;
258 if (seg->result < xfer->seg_size
259 && cnt != xfer->segs-1)
260 found_short = 1;
261 dev_dbg(dev, "xfer %p#%u: DONE short %d "
262 "result %zu urb->actual_length %d\n",
263 xfer, seg->index, found_short, seg->result,
264 urb->actual_length);
265 break;
266 case WA_SEG_ERROR:
267 xfer->result = seg->result;
268 dev_dbg(dev, "xfer %p#%u: ERROR result %zu\n",
269 xfer, seg->index, seg->result);
270 goto out;
271 case WA_SEG_ABORTED:
272 dev_dbg(dev, "xfer %p#%u ABORTED: result %d\n",
273 xfer, seg->index, urb->status);
274 xfer->result = urb->status;
275 goto out;
276 default:
277 dev_warn(dev, "xfer %p#%u: is_done bad state %d\n",
278 xfer, cnt, seg->status);
279 xfer->result = -EINVAL;
280 goto out;
281 }
282 }
283 xfer->result = 0;
284out:
285 return result;
286}
287
288/*
289 * Initialize a transfer's ID
290 *
291 * We need to use a sequential number; if we use the pointer or the
292 * hash of the pointer, it can repeat over sequential transfers and
293 * then it will confuse the HWA....wonder why in hell they put a 32
294 * bit handle in there then.
295 */
296static void wa_xfer_id_init(struct wa_xfer *xfer)
297{
298 xfer->id = atomic_add_return(1, &xfer->wa->xfer_id_count);
299}
300
301/*
302 * Return the xfer's ID associated with xfer
303 *
304 * Need to generate a
305 */
306static u32 wa_xfer_id(struct wa_xfer *xfer)
307{
308 return xfer->id;
309}
310
311/*
312 * Search for a transfer list ID on the HCD's URB list
313 *
314 * For 32 bit architectures, we use the pointer itself; for 64 bits, a
315 * 32-bit hash of the pointer.
316 *
317 * @returns NULL if not found.
318 */
319static struct wa_xfer *wa_xfer_get_by_id(struct wahc *wa, u32 id)
320{
321 unsigned long flags;
322 struct wa_xfer *xfer_itr;
323 spin_lock_irqsave(&wa->xfer_list_lock, flags);
324 list_for_each_entry(xfer_itr, &wa->xfer_list, list_node) {
325 if (id == xfer_itr->id) {
326 wa_xfer_get(xfer_itr);
327 goto out;
328 }
329 }
330 xfer_itr = NULL;
331out:
332 spin_unlock_irqrestore(&wa->xfer_list_lock, flags);
333 return xfer_itr;
334}
335
336struct wa_xfer_abort_buffer {
337 struct urb urb;
338 struct wa_xfer_abort cmd;
339};
340
341static void __wa_xfer_abort_cb(struct urb *urb)
342{
343 struct wa_xfer_abort_buffer *b = urb->context;
344 usb_put_urb(&b->urb);
345}
346
347/*
348 * Aborts an ongoing transaction
349 *
350 * Assumes the transfer is referenced and locked and in a submitted
351 * state (mainly that there is an endpoint/rpipe assigned).
352 *
353 * The callback (see above) does nothing but freeing up the data by
354 * putting the URB. Because the URB is allocated at the head of the
355 * struct, the whole space we allocated is kfreed.
356 *
357 * We'll get an 'aborted transaction' xfer result on DTI, that'll
358 * politely ignore because at this point the transaction has been
359 * marked as aborted already.
360 */
361static void __wa_xfer_abort(struct wa_xfer *xfer)
362{
363 int result;
364 struct device *dev = &xfer->wa->usb_iface->dev;
365 struct wa_xfer_abort_buffer *b;
366 struct wa_rpipe *rpipe = xfer->ep->hcpriv;
367
368 b = kmalloc(sizeof(*b), GFP_ATOMIC);
369 if (b == NULL)
370 goto error_kmalloc;
371 b->cmd.bLength = sizeof(b->cmd);
372 b->cmd.bRequestType = WA_XFER_ABORT;
373 b->cmd.wRPipe = rpipe->descr.wRPipeIndex;
374 b->cmd.dwTransferID = wa_xfer_id(xfer);
375
376 usb_init_urb(&b->urb);
377 usb_fill_bulk_urb(&b->urb, xfer->wa->usb_dev,
378 usb_sndbulkpipe(xfer->wa->usb_dev,
379 xfer->wa->dto_epd->bEndpointAddress),
380 &b->cmd, sizeof(b->cmd), __wa_xfer_abort_cb, b);
381 result = usb_submit_urb(&b->urb, GFP_ATOMIC);
382 if (result < 0)
383 goto error_submit;
384 return; /* callback frees! */
385
386
387error_submit:
388 if (printk_ratelimit())
389 dev_err(dev, "xfer %p: Can't submit abort request: %d\n",
390 xfer, result);
391 kfree(b);
392error_kmalloc:
393 return;
394
395}
396
397/*
398 *
399 * @returns < 0 on error, transfer segment request size if ok
400 */
401static ssize_t __wa_xfer_setup_sizes(struct wa_xfer *xfer,
402 enum wa_xfer_type *pxfer_type)
403{
404 ssize_t result;
405 struct device *dev = &xfer->wa->usb_iface->dev;
406 size_t maxpktsize;
407 struct urb *urb = xfer->urb;
408 struct wa_rpipe *rpipe = xfer->ep->hcpriv;
409
410 switch (rpipe->descr.bmAttribute & 0x3) {
411 case USB_ENDPOINT_XFER_CONTROL:
412 *pxfer_type = WA_XFER_TYPE_CTL;
413 result = sizeof(struct wa_xfer_ctl);
414 break;
415 case USB_ENDPOINT_XFER_INT:
416 case USB_ENDPOINT_XFER_BULK:
417 *pxfer_type = WA_XFER_TYPE_BI;
418 result = sizeof(struct wa_xfer_bi);
419 break;
420 case USB_ENDPOINT_XFER_ISOC:
421 dev_err(dev, "FIXME: ISOC not implemented\n");
422 result = -ENOSYS;
423 goto error;
424 default:
425 /* never happens */
426 BUG();
427 result = -EINVAL; /* shut gcc up */
428 };
429 xfer->is_inbound = urb->pipe & USB_DIR_IN ? 1 : 0;
430 xfer->is_dma = urb->transfer_flags & URB_NO_TRANSFER_DMA_MAP ? 1 : 0;
431 xfer->seg_size = le16_to_cpu(rpipe->descr.wBlocks)
432 * 1 << (xfer->wa->wa_descr->bRPipeBlockSize - 1);
433 /* Compute the segment size and make sure it is a multiple of
434 * the maxpktsize (WUSB1.0[8.3.3.1])...not really too much of
435 * a check (FIXME) */
436 maxpktsize = le16_to_cpu(rpipe->descr.wMaxPacketSize);
437 if (xfer->seg_size < maxpktsize) {
438 dev_err(dev, "HW BUG? seg_size %zu smaller than maxpktsize "
439 "%zu\n", xfer->seg_size, maxpktsize);
440 result = -EINVAL;
441 goto error;
442 }
443 xfer->seg_size = (xfer->seg_size / maxpktsize) * maxpktsize;
444 xfer->segs = (urb->transfer_buffer_length + xfer->seg_size - 1)
445 / xfer->seg_size;
446 if (xfer->segs >= WA_SEGS_MAX) {
447 dev_err(dev, "BUG? ops, number of segments %d bigger than %d\n",
448 (int)(urb->transfer_buffer_length / xfer->seg_size),
449 WA_SEGS_MAX);
450 result = -EINVAL;
451 goto error;
452 }
453 if (xfer->segs == 0 && *pxfer_type == WA_XFER_TYPE_CTL)
454 xfer->segs = 1;
455error:
456 return result;
457}
458
459/* Fill in the common request header and xfer-type specific data. */
460static void __wa_xfer_setup_hdr0(struct wa_xfer *xfer,
461 struct wa_xfer_hdr *xfer_hdr0,
462 enum wa_xfer_type xfer_type,
463 size_t xfer_hdr_size)
464{
465 struct wa_rpipe *rpipe = xfer->ep->hcpriv;
466
467 xfer_hdr0 = &xfer->seg[0]->xfer_hdr;
468 xfer_hdr0->bLength = xfer_hdr_size;
469 xfer_hdr0->bRequestType = xfer_type;
470 xfer_hdr0->wRPipe = rpipe->descr.wRPipeIndex;
471 xfer_hdr0->dwTransferID = wa_xfer_id(xfer);
472 xfer_hdr0->bTransferSegment = 0;
473 switch (xfer_type) {
474 case WA_XFER_TYPE_CTL: {
475 struct wa_xfer_ctl *xfer_ctl =
476 container_of(xfer_hdr0, struct wa_xfer_ctl, hdr);
477 xfer_ctl->bmAttribute = xfer->is_inbound ? 1 : 0;
478 memcpy(&xfer_ctl->baSetupData, xfer->urb->setup_packet,
479 sizeof(xfer_ctl->baSetupData));
480 break;
481 }
482 case WA_XFER_TYPE_BI:
483 break;
484 case WA_XFER_TYPE_ISO:
485 printk(KERN_ERR "FIXME: ISOC not implemented\n");
486 default:
487 BUG();
488 };
489}
490
491/*
492 * Callback for the OUT data phase of the segment request
493 *
494 * Check wa_seg_cb(); most comments also apply here because this
495 * function does almost the same thing and they work closely
496 * together.
497 *
498 * If the seg request has failed but this DTO phase has succeeded,
499 * wa_seg_cb() has already failed the segment and moved the
500 * status to WA_SEG_ERROR, so this will go through 'case 0' and
501 * effectively do nothing.
502 */
503static void wa_seg_dto_cb(struct urb *urb)
504{
505 struct wa_seg *seg = urb->context;
506 struct wa_xfer *xfer = seg->xfer;
507 struct wahc *wa;
508 struct device *dev;
509 struct wa_rpipe *rpipe;
510 unsigned long flags;
511 unsigned rpipe_ready = 0;
512 u8 done = 0;
513
514 switch (urb->status) {
515 case 0:
516 spin_lock_irqsave(&xfer->lock, flags);
517 wa = xfer->wa;
518 dev = &wa->usb_iface->dev;
519 dev_dbg(dev, "xfer %p#%u: data out done (%d bytes)\n",
520 xfer, seg->index, urb->actual_length);
521 if (seg->status < WA_SEG_PENDING)
522 seg->status = WA_SEG_PENDING;
523 seg->result = urb->actual_length;
524 spin_unlock_irqrestore(&xfer->lock, flags);
525 break;
526 case -ECONNRESET: /* URB unlinked; no need to do anything */
527 case -ENOENT: /* as it was done by the who unlinked us */
528 break;
529 default: /* Other errors ... */
530 spin_lock_irqsave(&xfer->lock, flags);
531 wa = xfer->wa;
532 dev = &wa->usb_iface->dev;
533 rpipe = xfer->ep->hcpriv;
534 dev_dbg(dev, "xfer %p#%u: data out error %d\n",
535 xfer, seg->index, urb->status);
536 if (edc_inc(&wa->nep_edc, EDC_MAX_ERRORS,
537 EDC_ERROR_TIMEFRAME)){
538 dev_err(dev, "DTO: URB max acceptable errors "
539 "exceeded, resetting device\n");
540 wa_reset_all(wa);
541 }
542 if (seg->status != WA_SEG_ERROR) {
543 seg->status = WA_SEG_ERROR;
544 seg->result = urb->status;
545 xfer->segs_done++;
546 __wa_xfer_abort(xfer);
547 rpipe_ready = rpipe_avail_inc(rpipe);
548 done = __wa_xfer_is_done(xfer);
549 }
550 spin_unlock_irqrestore(&xfer->lock, flags);
551 if (done)
552 wa_xfer_completion(xfer);
553 if (rpipe_ready)
554 wa_xfer_delayed_run(rpipe);
555 }
556}
557
558/*
559 * Callback for the segment request
560 *
561 * If successful transition state (unless already transitioned or
562 * outbound transfer); otherwise, take a note of the error, mark this
563 * segment done and try completion.
564 *
565 * Note we don't access until we are sure that the transfer hasn't
566 * been cancelled (ECONNRESET, ENOENT), which could mean that
567 * seg->xfer could be already gone.
568 *
569 * We have to check before setting the status to WA_SEG_PENDING
570 * because sometimes the xfer result callback arrives before this
571 * callback (geeeeeeze), so it might happen that we are already in
572 * another state. As well, we don't set it if the transfer is inbound,
573 * as in that case, wa_seg_dto_cb will do it when the OUT data phase
574 * finishes.
575 */
576static void wa_seg_cb(struct urb *urb)
577{
578 struct wa_seg *seg = urb->context;
579 struct wa_xfer *xfer = seg->xfer;
580 struct wahc *wa;
581 struct device *dev;
582 struct wa_rpipe *rpipe;
583 unsigned long flags;
584 unsigned rpipe_ready;
585 u8 done = 0;
586
587 switch (urb->status) {
588 case 0:
589 spin_lock_irqsave(&xfer->lock, flags);
590 wa = xfer->wa;
591 dev = &wa->usb_iface->dev;
592 dev_dbg(dev, "xfer %p#%u: request done\n", xfer, seg->index);
593 if (xfer->is_inbound && seg->status < WA_SEG_PENDING)
594 seg->status = WA_SEG_PENDING;
595 spin_unlock_irqrestore(&xfer->lock, flags);
596 break;
597 case -ECONNRESET: /* URB unlinked; no need to do anything */
598 case -ENOENT: /* as it was done by the who unlinked us */
599 break;
600 default: /* Other errors ... */
601 spin_lock_irqsave(&xfer->lock, flags);
602 wa = xfer->wa;
603 dev = &wa->usb_iface->dev;
604 rpipe = xfer->ep->hcpriv;
605 if (printk_ratelimit())
606 dev_err(dev, "xfer %p#%u: request error %d\n",
607 xfer, seg->index, urb->status);
608 if (edc_inc(&wa->nep_edc, EDC_MAX_ERRORS,
609 EDC_ERROR_TIMEFRAME)){
610 dev_err(dev, "DTO: URB max acceptable errors "
611 "exceeded, resetting device\n");
612 wa_reset_all(wa);
613 }
614 usb_unlink_urb(seg->dto_urb);
615 seg->status = WA_SEG_ERROR;
616 seg->result = urb->status;
617 xfer->segs_done++;
618 __wa_xfer_abort(xfer);
619 rpipe_ready = rpipe_avail_inc(rpipe);
620 done = __wa_xfer_is_done(xfer);
621 spin_unlock_irqrestore(&xfer->lock, flags);
622 if (done)
623 wa_xfer_completion(xfer);
624 if (rpipe_ready)
625 wa_xfer_delayed_run(rpipe);
626 }
627}
628
629/*
630 * Allocate the segs array and initialize each of them
631 *
632 * The segments are freed by wa_xfer_destroy() when the xfer use count
633 * drops to zero; however, because each segment is given the same life
634 * cycle as the USB URB it contains, it is actually freed by
635 * usb_put_urb() on the contained USB URB (twisted, eh?).
636 */
637static int __wa_xfer_setup_segs(struct wa_xfer *xfer, size_t xfer_hdr_size)
638{
639 int result, cnt;
640 size_t alloc_size = sizeof(*xfer->seg[0])
641 - sizeof(xfer->seg[0]->xfer_hdr) + xfer_hdr_size;
642 struct usb_device *usb_dev = xfer->wa->usb_dev;
643 const struct usb_endpoint_descriptor *dto_epd = xfer->wa->dto_epd;
644 struct wa_seg *seg;
645 size_t buf_itr, buf_size, buf_itr_size;
646
647 result = -ENOMEM;
648 xfer->seg = kcalloc(xfer->segs, sizeof(xfer->seg[0]), GFP_ATOMIC);
649 if (xfer->seg == NULL)
650 goto error_segs_kzalloc;
651 buf_itr = 0;
652 buf_size = xfer->urb->transfer_buffer_length;
653 for (cnt = 0; cnt < xfer->segs; cnt++) {
654 seg = xfer->seg[cnt] = kzalloc(alloc_size, GFP_ATOMIC);
655 if (seg == NULL)
656 goto error_seg_kzalloc;
657 wa_seg_init(seg);
658 seg->xfer = xfer;
659 seg->index = cnt;
660 usb_fill_bulk_urb(&seg->urb, usb_dev,
661 usb_sndbulkpipe(usb_dev,
662 dto_epd->bEndpointAddress),
663 &seg->xfer_hdr, xfer_hdr_size,
664 wa_seg_cb, seg);
665 buf_itr_size = buf_size > xfer->seg_size ?
666 xfer->seg_size : buf_size;
667 if (xfer->is_inbound == 0 && buf_size > 0) {
668 seg->dto_urb = usb_alloc_urb(0, GFP_ATOMIC);
669 if (seg->dto_urb == NULL)
670 goto error_dto_alloc;
671 usb_fill_bulk_urb(
672 seg->dto_urb, usb_dev,
673 usb_sndbulkpipe(usb_dev,
674 dto_epd->bEndpointAddress),
675 NULL, 0, wa_seg_dto_cb, seg);
676 if (xfer->is_dma) {
677 seg->dto_urb->transfer_dma =
678 xfer->urb->transfer_dma + buf_itr;
679 seg->dto_urb->transfer_flags |=
680 URB_NO_TRANSFER_DMA_MAP;
681 } else
682 seg->dto_urb->transfer_buffer =
683 xfer->urb->transfer_buffer + buf_itr;
684 seg->dto_urb->transfer_buffer_length = buf_itr_size;
685 }
686 seg->status = WA_SEG_READY;
687 buf_itr += buf_itr_size;
688 buf_size -= buf_itr_size;
689 }
690 return 0;
691
692error_dto_alloc:
693 kfree(xfer->seg[cnt]);
694 cnt--;
695error_seg_kzalloc:
696 /* use the fact that cnt is left at were it failed */
697 for (; cnt > 0; cnt--) {
698 if (xfer->is_inbound == 0)
699 kfree(xfer->seg[cnt]->dto_urb);
700 kfree(xfer->seg[cnt]);
701 }
702error_segs_kzalloc:
703 return result;
704}
705
706/*
707 * Allocates all the stuff needed to submit a transfer
708 *
709 * Breaks the whole data buffer in a list of segments, each one has a
710 * structure allocated to it and linked in xfer->seg[index]
711 *
712 * FIXME: merge setup_segs() and the last part of this function, no
713 * need to do two for loops when we could run everything in a
714 * single one
715 */
716static int __wa_xfer_setup(struct wa_xfer *xfer, struct urb *urb)
717{
718 int result;
719 struct device *dev = &xfer->wa->usb_iface->dev;
720 enum wa_xfer_type xfer_type = 0; /* shut up GCC */
721 size_t xfer_hdr_size, cnt, transfer_size;
722 struct wa_xfer_hdr *xfer_hdr0, *xfer_hdr;
723
724 result = __wa_xfer_setup_sizes(xfer, &xfer_type);
725 if (result < 0)
726 goto error_setup_sizes;
727 xfer_hdr_size = result;
728 result = __wa_xfer_setup_segs(xfer, xfer_hdr_size);
729 if (result < 0) {
730 dev_err(dev, "xfer %p: Failed to allocate %d segments: %d\n",
731 xfer, xfer->segs, result);
732 goto error_setup_segs;
733 }
734 /* Fill the first header */
735 xfer_hdr0 = &xfer->seg[0]->xfer_hdr;
736 wa_xfer_id_init(xfer);
737 __wa_xfer_setup_hdr0(xfer, xfer_hdr0, xfer_type, xfer_hdr_size);
738
739 /* Fill remainig headers */
740 xfer_hdr = xfer_hdr0;
741 transfer_size = urb->transfer_buffer_length;
742 xfer_hdr0->dwTransferLength = transfer_size > xfer->seg_size ?
743 xfer->seg_size : transfer_size;
744 transfer_size -= xfer->seg_size;
745 for (cnt = 1; cnt < xfer->segs; cnt++) {
746 xfer_hdr = &xfer->seg[cnt]->xfer_hdr;
747 memcpy(xfer_hdr, xfer_hdr0, xfer_hdr_size);
748 xfer_hdr->bTransferSegment = cnt;
749 xfer_hdr->dwTransferLength = transfer_size > xfer->seg_size ?
750 cpu_to_le32(xfer->seg_size)
751 : cpu_to_le32(transfer_size);
752 xfer->seg[cnt]->status = WA_SEG_READY;
753 transfer_size -= xfer->seg_size;
754 }
755 xfer_hdr->bTransferSegment |= 0x80; /* this is the last segment */
756 result = 0;
757error_setup_segs:
758error_setup_sizes:
759 return result;
760}
761
762/*
763 *
764 *
765 * rpipe->seg_lock is held!
766 */
767static int __wa_seg_submit(struct wa_rpipe *rpipe, struct wa_xfer *xfer,
768 struct wa_seg *seg)
769{
770 int result;
771 result = usb_submit_urb(&seg->urb, GFP_ATOMIC);
772 if (result < 0) {
773 printk(KERN_ERR "xfer %p#%u: REQ submit failed: %d\n",
774 xfer, seg->index, result);
775 goto error_seg_submit;
776 }
777 if (seg->dto_urb) {
778 result = usb_submit_urb(seg->dto_urb, GFP_ATOMIC);
779 if (result < 0) {
780 printk(KERN_ERR "xfer %p#%u: DTO submit failed: %d\n",
781 xfer, seg->index, result);
782 goto error_dto_submit;
783 }
784 }
785 seg->status = WA_SEG_SUBMITTED;
786 rpipe_avail_dec(rpipe);
787 return 0;
788
789error_dto_submit:
790 usb_unlink_urb(&seg->urb);
791error_seg_submit:
792 seg->status = WA_SEG_ERROR;
793 seg->result = result;
794 return result;
795}
796
797/*
798 * Execute more queued request segments until the maximum concurrent allowed
799 *
800 * The ugly unlock/lock sequence on the error path is needed as the
801 * xfer->lock normally nests the seg_lock and not viceversa.
802 *
803 */
804static void wa_xfer_delayed_run(struct wa_rpipe *rpipe)
805{
806 int result;
807 struct device *dev = &rpipe->wa->usb_iface->dev;
808 struct wa_seg *seg;
809 struct wa_xfer *xfer;
810 unsigned long flags;
811
812 spin_lock_irqsave(&rpipe->seg_lock, flags);
813 while (atomic_read(&rpipe->segs_available) > 0
814 && !list_empty(&rpipe->seg_list)) {
815 seg = list_entry(rpipe->seg_list.next, struct wa_seg,
816 list_node);
817 list_del(&seg->list_node);
818 xfer = seg->xfer;
819 result = __wa_seg_submit(rpipe, xfer, seg);
820 dev_dbg(dev, "xfer %p#%u submitted from delayed [%d segments available] %d\n",
821 xfer, seg->index, atomic_read(&rpipe->segs_available), result);
822 if (unlikely(result < 0)) {
823 spin_unlock_irqrestore(&rpipe->seg_lock, flags);
824 spin_lock_irqsave(&xfer->lock, flags);
825 __wa_xfer_abort(xfer);
826 xfer->segs_done++;
827 spin_unlock_irqrestore(&xfer->lock, flags);
828 spin_lock_irqsave(&rpipe->seg_lock, flags);
829 }
830 }
831 spin_unlock_irqrestore(&rpipe->seg_lock, flags);
832}
833
834/*
835 *
836 * xfer->lock is taken
837 *
838 * On failure submitting we just stop submitting and return error;
839 * wa_urb_enqueue_b() will execute the completion path
840 */
841static int __wa_xfer_submit(struct wa_xfer *xfer)
842{
843 int result;
844 struct wahc *wa = xfer->wa;
845 struct device *dev = &wa->usb_iface->dev;
846 unsigned cnt;
847 struct wa_seg *seg;
848 unsigned long flags;
849 struct wa_rpipe *rpipe = xfer->ep->hcpriv;
850 size_t maxrequests = le16_to_cpu(rpipe->descr.wRequests);
851 u8 available;
852 u8 empty;
853
854 spin_lock_irqsave(&wa->xfer_list_lock, flags);
855 list_add_tail(&xfer->list_node, &wa->xfer_list);
856 spin_unlock_irqrestore(&wa->xfer_list_lock, flags);
857
858 BUG_ON(atomic_read(&rpipe->segs_available) > maxrequests);
859 result = 0;
860 spin_lock_irqsave(&rpipe->seg_lock, flags);
861 for (cnt = 0; cnt < xfer->segs; cnt++) {
862 available = atomic_read(&rpipe->segs_available);
863 empty = list_empty(&rpipe->seg_list);
864 seg = xfer->seg[cnt];
865 dev_dbg(dev, "xfer %p#%u: available %u empty %u (%s)\n",
866 xfer, cnt, available, empty,
867 available == 0 || !empty ? "delayed" : "submitted");
868 if (available == 0 || !empty) {
869 dev_dbg(dev, "xfer %p#%u: delayed\n", xfer, cnt);
870 seg->status = WA_SEG_DELAYED;
871 list_add_tail(&seg->list_node, &rpipe->seg_list);
872 } else {
873 result = __wa_seg_submit(rpipe, xfer, seg);
874 if (result < 0) {
875 __wa_xfer_abort(xfer);
876 goto error_seg_submit;
877 }
878 }
879 xfer->segs_submitted++;
880 }
881error_seg_submit:
882 spin_unlock_irqrestore(&rpipe->seg_lock, flags);
883 return result;
884}
885
886/*
887 * Second part of a URB/transfer enqueuement
888 *
889 * Assumes this comes from wa_urb_enqueue() [maybe through
890 * wa_urb_enqueue_run()]. At this point:
891 *
892 * xfer->wa filled and refcounted
893 * xfer->ep filled with rpipe refcounted if
894 * delayed == 0
895 * xfer->urb filled and refcounted (this is the case when called
896 * from wa_urb_enqueue() as we come from usb_submit_urb()
897 * and when called by wa_urb_enqueue_run(), as we took an
898 * extra ref dropped by _run() after we return).
899 * xfer->gfp filled
900 *
901 * If we fail at __wa_xfer_submit(), then we just check if we are done
902 * and if so, we run the completion procedure. However, if we are not
903 * yet done, we do nothing and wait for the completion handlers from
904 * the submitted URBs or from the xfer-result path to kick in. If xfer
905 * result never kicks in, the xfer will timeout from the USB code and
906 * dequeue() will be called.
907 */
908static void wa_urb_enqueue_b(struct wa_xfer *xfer)
909{
910 int result;
911 unsigned long flags;
912 struct urb *urb = xfer->urb;
913 struct wahc *wa = xfer->wa;
914 struct wusbhc *wusbhc = wa->wusb;
915 struct wusb_dev *wusb_dev;
916 unsigned done;
917
918 result = rpipe_get_by_ep(wa, xfer->ep, urb, xfer->gfp);
919 if (result < 0)
920 goto error_rpipe_get;
921 result = -ENODEV;
922 /* FIXME: segmentation broken -- kills DWA */
923 mutex_lock(&wusbhc->mutex); /* get a WUSB dev */
924 if (urb->dev == NULL) {
925 mutex_unlock(&wusbhc->mutex);
926 goto error_dev_gone;
927 }
928 wusb_dev = __wusb_dev_get_by_usb_dev(wusbhc, urb->dev);
929 if (wusb_dev == NULL) {
930 mutex_unlock(&wusbhc->mutex);
931 goto error_dev_gone;
932 }
933 mutex_unlock(&wusbhc->mutex);
934
935 spin_lock_irqsave(&xfer->lock, flags);
936 xfer->wusb_dev = wusb_dev;
937 result = urb->status;
938 if (urb->status != -EINPROGRESS)
939 goto error_dequeued;
940
941 result = __wa_xfer_setup(xfer, urb);
942 if (result < 0)
943 goto error_xfer_setup;
944 result = __wa_xfer_submit(xfer);
945 if (result < 0)
946 goto error_xfer_submit;
947 spin_unlock_irqrestore(&xfer->lock, flags);
948 return;
949
950 /* this is basically wa_xfer_completion() broken up wa_xfer_giveback()
951 * does a wa_xfer_put() that will call wa_xfer_destroy() and clean
952 * upundo setup().
953 */
954error_xfer_setup:
955error_dequeued:
956 spin_unlock_irqrestore(&xfer->lock, flags);
957 /* FIXME: segmentation broken, kills DWA */
958 if (wusb_dev)
959 wusb_dev_put(wusb_dev);
960error_dev_gone:
961 rpipe_put(xfer->ep->hcpriv);
962error_rpipe_get:
963 xfer->result = result;
964 wa_xfer_giveback(xfer);
965 return;
966
967error_xfer_submit:
968 done = __wa_xfer_is_done(xfer);
969 xfer->result = result;
970 spin_unlock_irqrestore(&xfer->lock, flags);
971 if (done)
972 wa_xfer_completion(xfer);
973}
974
975/*
976 * Execute the delayed transfers in the Wire Adapter @wa
977 *
978 * We need to be careful here, as dequeue() could be called in the
979 * middle. That's why we do the whole thing under the
980 * wa->xfer_list_lock. If dequeue() jumps in, it first locks urb->lock
981 * and then checks the list -- so as we would be acquiring in inverse
982 * order, we just drop the lock once we have the xfer and reacquire it
983 * later.
984 */
985void wa_urb_enqueue_run(struct work_struct *ws)
986{
987 struct wahc *wa = container_of(ws, struct wahc, xfer_work);
988 struct wa_xfer *xfer, *next;
989 struct urb *urb;
990
991 spin_lock_irq(&wa->xfer_list_lock);
992 list_for_each_entry_safe(xfer, next, &wa->xfer_delayed_list,
993 list_node) {
994 list_del_init(&xfer->list_node);
995 spin_unlock_irq(&wa->xfer_list_lock);
996
997 urb = xfer->urb;
998 wa_urb_enqueue_b(xfer);
999 usb_put_urb(urb); /* taken when queuing */
1000
1001 spin_lock_irq(&wa->xfer_list_lock);
1002 }
1003 spin_unlock_irq(&wa->xfer_list_lock);
1004}
1005EXPORT_SYMBOL_GPL(wa_urb_enqueue_run);
1006
1007/*
1008 * Submit a transfer to the Wire Adapter in a delayed way
1009 *
1010 * The process of enqueuing involves possible sleeps() [see
1011 * enqueue_b(), for the rpipe_get() and the mutex_lock()]. If we are
1012 * in an atomic section, we defer the enqueue_b() call--else we call direct.
1013 *
1014 * @urb: We own a reference to it done by the HCI Linux USB stack that
1015 * will be given up by calling usb_hcd_giveback_urb() or by
1016 * returning error from this function -> ergo we don't have to
1017 * refcount it.
1018 */
1019int wa_urb_enqueue(struct wahc *wa, struct usb_host_endpoint *ep,
1020 struct urb *urb, gfp_t gfp)
1021{
1022 int result;
1023 struct device *dev = &wa->usb_iface->dev;
1024 struct wa_xfer *xfer;
1025 unsigned long my_flags;
1026 unsigned cant_sleep = irqs_disabled() | in_atomic();
1027
1028 if (urb->transfer_buffer == NULL
1029 && !(urb->transfer_flags & URB_NO_TRANSFER_DMA_MAP)
1030 && urb->transfer_buffer_length != 0) {
1031 dev_err(dev, "BUG? urb %p: NULL xfer buffer & NODMA\n", urb);
1032 dump_stack();
1033 }
1034
1035 result = -ENOMEM;
1036 xfer = kzalloc(sizeof(*xfer), gfp);
1037 if (xfer == NULL)
1038 goto error_kmalloc;
1039
1040 result = -ENOENT;
1041 if (urb->status != -EINPROGRESS) /* cancelled */
1042 goto error_dequeued; /* before starting? */
1043 wa_xfer_init(xfer);
1044 xfer->wa = wa_get(wa);
1045 xfer->urb = urb;
1046 xfer->gfp = gfp;
1047 xfer->ep = ep;
1048 urb->hcpriv = xfer;
1049
1050 dev_dbg(dev, "xfer %p urb %p pipe 0x%02x [%d bytes] %s %s %s\n",
1051 xfer, urb, urb->pipe, urb->transfer_buffer_length,
1052 urb->transfer_flags & URB_NO_TRANSFER_DMA_MAP ? "dma" : "nodma",
1053 urb->pipe & USB_DIR_IN ? "inbound" : "outbound",
1054 cant_sleep ? "deferred" : "inline");
1055
1056 if (cant_sleep) {
1057 usb_get_urb(urb);
1058 spin_lock_irqsave(&wa->xfer_list_lock, my_flags);
1059 list_add_tail(&xfer->list_node, &wa->xfer_delayed_list);
1060 spin_unlock_irqrestore(&wa->xfer_list_lock, my_flags);
1061 queue_work(wusbd, &wa->xfer_work);
1062 } else {
1063 wa_urb_enqueue_b(xfer);
1064 }
1065 return 0;
1066
1067error_dequeued:
1068 kfree(xfer);
1069error_kmalloc:
1070 return result;
1071}
1072EXPORT_SYMBOL_GPL(wa_urb_enqueue);
1073
1074/*
1075 * Dequeue a URB and make sure uwb_hcd_giveback_urb() [completion
1076 * handler] is called.
1077 *
1078 * Until a transfer goes successfully through wa_urb_enqueue() it
1079 * needs to be dequeued with completion calling; when stuck in delayed
1080 * or before wa_xfer_setup() is called, we need to do completion.
1081 *
1082 * not setup If there is no hcpriv yet, that means that that enqueue
1083 * still had no time to set the xfer up. Because
1084 * urb->status should be other than -EINPROGRESS,
1085 * enqueue() will catch that and bail out.
1086 *
1087 * If the transfer has gone through setup, we just need to clean it
1088 * up. If it has gone through submit(), we have to abort it [with an
1089 * asynch request] and then make sure we cancel each segment.
1090 *
1091 */
1092int wa_urb_dequeue(struct wahc *wa, struct urb *urb)
1093{
1094 unsigned long flags, flags2;
1095 struct wa_xfer *xfer;
1096 struct wa_seg *seg;
1097 struct wa_rpipe *rpipe;
1098 unsigned cnt;
1099 unsigned rpipe_ready = 0;
1100
1101 xfer = urb->hcpriv;
1102 if (xfer == NULL) {
1103 /* NOthing setup yet enqueue will see urb->status !=
1104 * -EINPROGRESS (by hcd layer) and bail out with
1105 * error, no need to do completion
1106 */
1107 BUG_ON(urb->status == -EINPROGRESS);
1108 goto out;
1109 }
1110 spin_lock_irqsave(&xfer->lock, flags);
1111 rpipe = xfer->ep->hcpriv;
1112 /* Check the delayed list -> if there, release and complete */
1113 spin_lock_irqsave(&wa->xfer_list_lock, flags2);
1114 if (!list_empty(&xfer->list_node) && xfer->seg == NULL)
1115 goto dequeue_delayed;
1116 spin_unlock_irqrestore(&wa->xfer_list_lock, flags2);
1117 if (xfer->seg == NULL) /* still hasn't reached */
1118 goto out_unlock; /* setup(), enqueue_b() completes */
1119 /* Ok, the xfer is in flight already, it's been setup and submitted.*/
1120 __wa_xfer_abort(xfer);
1121 for (cnt = 0; cnt < xfer->segs; cnt++) {
1122 seg = xfer->seg[cnt];
1123 switch (seg->status) {
1124 case WA_SEG_NOTREADY:
1125 case WA_SEG_READY:
1126 printk(KERN_ERR "xfer %p#%u: dequeue bad state %u\n",
1127 xfer, cnt, seg->status);
1128 WARN_ON(1);
1129 break;
1130 case WA_SEG_DELAYED:
1131 seg->status = WA_SEG_ABORTED;
1132 spin_lock_irqsave(&rpipe->seg_lock, flags2);
1133 list_del(&seg->list_node);
1134 xfer->segs_done++;
1135 rpipe_ready = rpipe_avail_inc(rpipe);
1136 spin_unlock_irqrestore(&rpipe->seg_lock, flags2);
1137 break;
1138 case WA_SEG_SUBMITTED:
1139 seg->status = WA_SEG_ABORTED;
1140 usb_unlink_urb(&seg->urb);
1141 if (xfer->is_inbound == 0)
1142 usb_unlink_urb(seg->dto_urb);
1143 xfer->segs_done++;
1144 rpipe_ready = rpipe_avail_inc(rpipe);
1145 break;
1146 case WA_SEG_PENDING:
1147 seg->status = WA_SEG_ABORTED;
1148 xfer->segs_done++;
1149 rpipe_ready = rpipe_avail_inc(rpipe);
1150 break;
1151 case WA_SEG_DTI_PENDING:
1152 usb_unlink_urb(wa->dti_urb);
1153 seg->status = WA_SEG_ABORTED;
1154 xfer->segs_done++;
1155 rpipe_ready = rpipe_avail_inc(rpipe);
1156 break;
1157 case WA_SEG_DONE:
1158 case WA_SEG_ERROR:
1159 case WA_SEG_ABORTED:
1160 break;
1161 }
1162 }
1163 xfer->result = urb->status; /* -ENOENT or -ECONNRESET */
1164 __wa_xfer_is_done(xfer);
1165 spin_unlock_irqrestore(&xfer->lock, flags);
1166 wa_xfer_completion(xfer);
1167 if (rpipe_ready)
1168 wa_xfer_delayed_run(rpipe);
1169 return 0;
1170
1171out_unlock:
1172 spin_unlock_irqrestore(&xfer->lock, flags);
1173out:
1174 return 0;
1175
1176dequeue_delayed:
1177 list_del_init(&xfer->list_node);
1178 spin_unlock_irqrestore(&wa->xfer_list_lock, flags2);
1179 xfer->result = urb->status;
1180 spin_unlock_irqrestore(&xfer->lock, flags);
1181 wa_xfer_giveback(xfer);
1182 usb_put_urb(urb); /* we got a ref in enqueue() */
1183 return 0;
1184}
1185EXPORT_SYMBOL_GPL(wa_urb_dequeue);
1186
1187/*
1188 * Translation from WA status codes (WUSB1.0 Table 8.15) to errno
1189 * codes
1190 *
1191 * Positive errno values are internal inconsistencies and should be
1192 * flagged louder. Negative are to be passed up to the user in the
1193 * normal way.
1194 *
1195 * @status: USB WA status code -- high two bits are stripped.
1196 */
1197static int wa_xfer_status_to_errno(u8 status)
1198{
1199 int errno;
1200 u8 real_status = status;
1201 static int xlat[] = {
1202 [WA_XFER_STATUS_SUCCESS] = 0,
1203 [WA_XFER_STATUS_HALTED] = -EPIPE,
1204 [WA_XFER_STATUS_DATA_BUFFER_ERROR] = -ENOBUFS,
1205 [WA_XFER_STATUS_BABBLE] = -EOVERFLOW,
1206 [WA_XFER_RESERVED] = EINVAL,
1207 [WA_XFER_STATUS_NOT_FOUND] = 0,
1208 [WA_XFER_STATUS_INSUFFICIENT_RESOURCE] = -ENOMEM,
1209 [WA_XFER_STATUS_TRANSACTION_ERROR] = -EILSEQ,
1210 [WA_XFER_STATUS_ABORTED] = -EINTR,
1211 [WA_XFER_STATUS_RPIPE_NOT_READY] = EINVAL,
1212 [WA_XFER_INVALID_FORMAT] = EINVAL,
1213 [WA_XFER_UNEXPECTED_SEGMENT_NUMBER] = EINVAL,
1214 [WA_XFER_STATUS_RPIPE_TYPE_MISMATCH] = EINVAL,
1215 };
1216 status &= 0x3f;
1217
1218 if (status == 0)
1219 return 0;
1220 if (status >= ARRAY_SIZE(xlat)) {
1221 printk_ratelimited(KERN_ERR "%s(): BUG? "
1222 "Unknown WA transfer status 0x%02x\n",
1223 __func__, real_status);
1224 return -EINVAL;
1225 }
1226 errno = xlat[status];
1227 if (unlikely(errno > 0)) {
1228 printk_ratelimited(KERN_ERR "%s(): BUG? "
1229 "Inconsistent WA status: 0x%02x\n",
1230 __func__, real_status);
1231 errno = -errno;
1232 }
1233 return errno;
1234}
1235
1236/*
1237 * Process a xfer result completion message
1238 *
1239 * inbound transfers: need to schedule a DTI read
1240 *
1241 * FIXME: this functio needs to be broken up in parts
1242 */
1243static void wa_xfer_result_chew(struct wahc *wa, struct wa_xfer *xfer)
1244{
1245 int result;
1246 struct device *dev = &wa->usb_iface->dev;
1247 unsigned long flags;
1248 u8 seg_idx;
1249 struct wa_seg *seg;
1250 struct wa_rpipe *rpipe;
1251 struct wa_xfer_result *xfer_result = wa->xfer_result;
1252 u8 done = 0;
1253 u8 usb_status;
1254 unsigned rpipe_ready = 0;
1255
1256 spin_lock_irqsave(&xfer->lock, flags);
1257 seg_idx = xfer_result->bTransferSegment & 0x7f;
1258 if (unlikely(seg_idx >= xfer->segs))
1259 goto error_bad_seg;
1260 seg = xfer->seg[seg_idx];
1261 rpipe = xfer->ep->hcpriv;
1262 usb_status = xfer_result->bTransferStatus;
1263 dev_dbg(dev, "xfer %p#%u: bTransferStatus 0x%02x (seg %u)\n",
1264 xfer, seg_idx, usb_status, seg->status);
1265 if (seg->status == WA_SEG_ABORTED
1266 || seg->status == WA_SEG_ERROR) /* already handled */
1267 goto segment_aborted;
1268 if (seg->status == WA_SEG_SUBMITTED) /* ops, got here */
1269 seg->status = WA_SEG_PENDING; /* before wa_seg{_dto}_cb() */
1270 if (seg->status != WA_SEG_PENDING) {
1271 if (printk_ratelimit())
1272 dev_err(dev, "xfer %p#%u: Bad segment state %u\n",
1273 xfer, seg_idx, seg->status);
1274 seg->status = WA_SEG_PENDING; /* workaround/"fix" it */
1275 }
1276 if (usb_status & 0x80) {
1277 seg->result = wa_xfer_status_to_errno(usb_status);
1278 dev_err(dev, "DTI: xfer %p#%u failed (0x%02x)\n",
1279 xfer, seg->index, usb_status);
1280 goto error_complete;
1281 }
1282 /* FIXME: we ignore warnings, tally them for stats */
1283 if (usb_status & 0x40) /* Warning?... */
1284 usb_status = 0; /* ... pass */
1285 if (xfer->is_inbound) { /* IN data phase: read to buffer */
1286 seg->status = WA_SEG_DTI_PENDING;
1287 BUG_ON(wa->buf_in_urb->status == -EINPROGRESS);
1288 if (xfer->is_dma) {
1289 wa->buf_in_urb->transfer_dma =
1290 xfer->urb->transfer_dma
1291 + seg_idx * xfer->seg_size;
1292 wa->buf_in_urb->transfer_flags
1293 |= URB_NO_TRANSFER_DMA_MAP;
1294 } else {
1295 wa->buf_in_urb->transfer_buffer =
1296 xfer->urb->transfer_buffer
1297 + seg_idx * xfer->seg_size;
1298 wa->buf_in_urb->transfer_flags
1299 &= ~URB_NO_TRANSFER_DMA_MAP;
1300 }
1301 wa->buf_in_urb->transfer_buffer_length =
1302 le32_to_cpu(xfer_result->dwTransferLength);
1303 wa->buf_in_urb->context = seg;
1304 result = usb_submit_urb(wa->buf_in_urb, GFP_ATOMIC);
1305 if (result < 0)
1306 goto error_submit_buf_in;
1307 } else {
1308 /* OUT data phase, complete it -- */
1309 seg->status = WA_SEG_DONE;
1310 seg->result = le32_to_cpu(xfer_result->dwTransferLength);
1311 xfer->segs_done++;
1312 rpipe_ready = rpipe_avail_inc(rpipe);
1313 done = __wa_xfer_is_done(xfer);
1314 }
1315 spin_unlock_irqrestore(&xfer->lock, flags);
1316 if (done)
1317 wa_xfer_completion(xfer);
1318 if (rpipe_ready)
1319 wa_xfer_delayed_run(rpipe);
1320 return;
1321
1322error_submit_buf_in:
1323 if (edc_inc(&wa->dti_edc, EDC_MAX_ERRORS, EDC_ERROR_TIMEFRAME)) {
1324 dev_err(dev, "DTI: URB max acceptable errors "
1325 "exceeded, resetting device\n");
1326 wa_reset_all(wa);
1327 }
1328 if (printk_ratelimit())
1329 dev_err(dev, "xfer %p#%u: can't submit DTI data phase: %d\n",
1330 xfer, seg_idx, result);
1331 seg->result = result;
1332error_complete:
1333 seg->status = WA_SEG_ERROR;
1334 xfer->segs_done++;
1335 rpipe_ready = rpipe_avail_inc(rpipe);
1336 __wa_xfer_abort(xfer);
1337 done = __wa_xfer_is_done(xfer);
1338 spin_unlock_irqrestore(&xfer->lock, flags);
1339 if (done)
1340 wa_xfer_completion(xfer);
1341 if (rpipe_ready)
1342 wa_xfer_delayed_run(rpipe);
1343 return;
1344
1345error_bad_seg:
1346 spin_unlock_irqrestore(&xfer->lock, flags);
1347 wa_urb_dequeue(wa, xfer->urb);
1348 if (printk_ratelimit())
1349 dev_err(dev, "xfer %p#%u: bad segment\n", xfer, seg_idx);
1350 if (edc_inc(&wa->dti_edc, EDC_MAX_ERRORS, EDC_ERROR_TIMEFRAME)) {
1351 dev_err(dev, "DTI: URB max acceptable errors "
1352 "exceeded, resetting device\n");
1353 wa_reset_all(wa);
1354 }
1355 return;
1356
1357segment_aborted:
1358 /* nothing to do, as the aborter did the completion */
1359 spin_unlock_irqrestore(&xfer->lock, flags);
1360}
1361
1362/*
1363 * Callback for the IN data phase
1364 *
1365 * If successful transition state; otherwise, take a note of the
1366 * error, mark this segment done and try completion.
1367 *
1368 * Note we don't access until we are sure that the transfer hasn't
1369 * been cancelled (ECONNRESET, ENOENT), which could mean that
1370 * seg->xfer could be already gone.
1371 */
1372static void wa_buf_in_cb(struct urb *urb)
1373{
1374 struct wa_seg *seg = urb->context;
1375 struct wa_xfer *xfer = seg->xfer;
1376 struct wahc *wa;
1377 struct device *dev;
1378 struct wa_rpipe *rpipe;
1379 unsigned rpipe_ready;
1380 unsigned long flags;
1381 u8 done = 0;
1382
1383 switch (urb->status) {
1384 case 0:
1385 spin_lock_irqsave(&xfer->lock, flags);
1386 wa = xfer->wa;
1387 dev = &wa->usb_iface->dev;
1388 rpipe = xfer->ep->hcpriv;
1389 dev_dbg(dev, "xfer %p#%u: data in done (%zu bytes)\n",
1390 xfer, seg->index, (size_t)urb->actual_length);
1391 seg->status = WA_SEG_DONE;
1392 seg->result = urb->actual_length;
1393 xfer->segs_done++;
1394 rpipe_ready = rpipe_avail_inc(rpipe);
1395 done = __wa_xfer_is_done(xfer);
1396 spin_unlock_irqrestore(&xfer->lock, flags);
1397 if (done)
1398 wa_xfer_completion(xfer);
1399 if (rpipe_ready)
1400 wa_xfer_delayed_run(rpipe);
1401 break;
1402 case -ECONNRESET: /* URB unlinked; no need to do anything */
1403 case -ENOENT: /* as it was done by the who unlinked us */
1404 break;
1405 default: /* Other errors ... */
1406 spin_lock_irqsave(&xfer->lock, flags);
1407 wa = xfer->wa;
1408 dev = &wa->usb_iface->dev;
1409 rpipe = xfer->ep->hcpriv;
1410 if (printk_ratelimit())
1411 dev_err(dev, "xfer %p#%u: data in error %d\n",
1412 xfer, seg->index, urb->status);
1413 if (edc_inc(&wa->nep_edc, EDC_MAX_ERRORS,
1414 EDC_ERROR_TIMEFRAME)){
1415 dev_err(dev, "DTO: URB max acceptable errors "
1416 "exceeded, resetting device\n");
1417 wa_reset_all(wa);
1418 }
1419 seg->status = WA_SEG_ERROR;
1420 seg->result = urb->status;
1421 xfer->segs_done++;
1422 rpipe_ready = rpipe_avail_inc(rpipe);
1423 __wa_xfer_abort(xfer);
1424 done = __wa_xfer_is_done(xfer);
1425 spin_unlock_irqrestore(&xfer->lock, flags);
1426 if (done)
1427 wa_xfer_completion(xfer);
1428 if (rpipe_ready)
1429 wa_xfer_delayed_run(rpipe);
1430 }
1431}
1432
1433/*
1434 * Handle an incoming transfer result buffer
1435 *
1436 * Given a transfer result buffer, it completes the transfer (possibly
1437 * scheduling and buffer in read) and then resubmits the DTI URB for a
1438 * new transfer result read.
1439 *
1440 *
1441 * The xfer_result DTI URB state machine
1442 *
1443 * States: OFF | RXR (Read-Xfer-Result) | RBI (Read-Buffer-In)
1444 *
1445 * We start in OFF mode, the first xfer_result notification [through
1446 * wa_handle_notif_xfer()] moves us to RXR by posting the DTI-URB to
1447 * read.
1448 *
1449 * We receive a buffer -- if it is not a xfer_result, we complain and
1450 * repost the DTI-URB. If it is a xfer_result then do the xfer seg
1451 * request accounting. If it is an IN segment, we move to RBI and post
1452 * a BUF-IN-URB to the right buffer. The BUF-IN-URB callback will
1453 * repost the DTI-URB and move to RXR state. if there was no IN
1454 * segment, it will repost the DTI-URB.
1455 *
1456 * We go back to OFF when we detect a ENOENT or ESHUTDOWN (or too many
1457 * errors) in the URBs.
1458 */
1459static void wa_xfer_result_cb(struct urb *urb)
1460{
1461 int result;
1462 struct wahc *wa = urb->context;
1463 struct device *dev = &wa->usb_iface->dev;
1464 struct wa_xfer_result *xfer_result;
1465 u32 xfer_id;
1466 struct wa_xfer *xfer;
1467 u8 usb_status;
1468
1469 BUG_ON(wa->dti_urb != urb);
1470 switch (wa->dti_urb->status) {
1471 case 0:
1472 /* We have a xfer result buffer; check it */
1473 dev_dbg(dev, "DTI: xfer result %d bytes at %p\n",
1474 urb->actual_length, urb->transfer_buffer);
1475 if (wa->dti_urb->actual_length != sizeof(*xfer_result)) {
1476 dev_err(dev, "DTI Error: xfer result--bad size "
1477 "xfer result (%d bytes vs %zu needed)\n",
1478 urb->actual_length, sizeof(*xfer_result));
1479 break;
1480 }
1481 xfer_result = wa->xfer_result;
1482 if (xfer_result->hdr.bLength != sizeof(*xfer_result)) {
1483 dev_err(dev, "DTI Error: xfer result--"
1484 "bad header length %u\n",
1485 xfer_result->hdr.bLength);
1486 break;
1487 }
1488 if (xfer_result->hdr.bNotifyType != WA_XFER_RESULT) {
1489 dev_err(dev, "DTI Error: xfer result--"
1490 "bad header type 0x%02x\n",
1491 xfer_result->hdr.bNotifyType);
1492 break;
1493 }
1494 usb_status = xfer_result->bTransferStatus & 0x3f;
1495 if (usb_status == WA_XFER_STATUS_ABORTED
1496 || usb_status == WA_XFER_STATUS_NOT_FOUND)
1497 /* taken care of already */
1498 break;
1499 xfer_id = xfer_result->dwTransferID;
1500 xfer = wa_xfer_get_by_id(wa, xfer_id);
1501 if (xfer == NULL) {
1502 /* FIXME: transaction might have been cancelled */
1503 dev_err(dev, "DTI Error: xfer result--"
1504 "unknown xfer 0x%08x (status 0x%02x)\n",
1505 xfer_id, usb_status);
1506 break;
1507 }
1508 wa_xfer_result_chew(wa, xfer);
1509 wa_xfer_put(xfer);
1510 break;
1511 case -ENOENT: /* (we killed the URB)...so, no broadcast */
1512 case -ESHUTDOWN: /* going away! */
1513 dev_dbg(dev, "DTI: going down! %d\n", urb->status);
1514 goto out;
1515 default:
1516 /* Unknown error */
1517 if (edc_inc(&wa->dti_edc, EDC_MAX_ERRORS,
1518 EDC_ERROR_TIMEFRAME)) {
1519 dev_err(dev, "DTI: URB max acceptable errors "
1520 "exceeded, resetting device\n");
1521 wa_reset_all(wa);
1522 goto out;
1523 }
1524 if (printk_ratelimit())
1525 dev_err(dev, "DTI: URB error %d\n", urb->status);
1526 break;
1527 }
1528 /* Resubmit the DTI URB */
1529 result = usb_submit_urb(wa->dti_urb, GFP_ATOMIC);
1530 if (result < 0) {
1531 dev_err(dev, "DTI Error: Could not submit DTI URB (%d), "
1532 "resetting\n", result);
1533 wa_reset_all(wa);
1534 }
1535out:
1536 return;
1537}
1538
1539/*
1540 * Transfer complete notification
1541 *
1542 * Called from the notif.c code. We get a notification on EP2 saying
1543 * that some endpoint has some transfer result data available. We are
1544 * about to read it.
1545 *
1546 * To speed up things, we always have a URB reading the DTI URB; we
1547 * don't really set it up and start it until the first xfer complete
1548 * notification arrives, which is what we do here.
1549 *
1550 * Follow up in wa_xfer_result_cb(), as that's where the whole state
1551 * machine starts.
1552 *
1553 * So here we just initialize the DTI URB for reading transfer result
1554 * notifications and also the buffer-in URB, for reading buffers. Then
1555 * we just submit the DTI URB.
1556 *
1557 * @wa shall be referenced
1558 */
1559void wa_handle_notif_xfer(struct wahc *wa, struct wa_notif_hdr *notif_hdr)
1560{
1561 int result;
1562 struct device *dev = &wa->usb_iface->dev;
1563 struct wa_notif_xfer *notif_xfer;
1564 const struct usb_endpoint_descriptor *dti_epd = wa->dti_epd;
1565
1566 notif_xfer = container_of(notif_hdr, struct wa_notif_xfer, hdr);
1567 BUG_ON(notif_hdr->bNotifyType != WA_NOTIF_TRANSFER);
1568
1569 if ((0x80 | notif_xfer->bEndpoint) != dti_epd->bEndpointAddress) {
1570 /* FIXME: hardcoded limitation, adapt */
1571 dev_err(dev, "BUG: DTI ep is %u, not %u (hack me)\n",
1572 notif_xfer->bEndpoint, dti_epd->bEndpointAddress);
1573 goto error;
1574 }
1575 if (wa->dti_urb != NULL) /* DTI URB already started */
1576 goto out;
1577
1578 wa->dti_urb = usb_alloc_urb(0, GFP_KERNEL);
1579 if (wa->dti_urb == NULL) {
1580 dev_err(dev, "Can't allocate DTI URB\n");
1581 goto error_dti_urb_alloc;
1582 }
1583 usb_fill_bulk_urb(
1584 wa->dti_urb, wa->usb_dev,
1585 usb_rcvbulkpipe(wa->usb_dev, 0x80 | notif_xfer->bEndpoint),
1586 wa->xfer_result, wa->xfer_result_size,
1587 wa_xfer_result_cb, wa);
1588
1589 wa->buf_in_urb = usb_alloc_urb(0, GFP_KERNEL);
1590 if (wa->buf_in_urb == NULL) {
1591 dev_err(dev, "Can't allocate BUF-IN URB\n");
1592 goto error_buf_in_urb_alloc;
1593 }
1594 usb_fill_bulk_urb(
1595 wa->buf_in_urb, wa->usb_dev,
1596 usb_rcvbulkpipe(wa->usb_dev, 0x80 | notif_xfer->bEndpoint),
1597 NULL, 0, wa_buf_in_cb, wa);
1598 result = usb_submit_urb(wa->dti_urb, GFP_KERNEL);
1599 if (result < 0) {
1600 dev_err(dev, "DTI Error: Could not submit DTI URB (%d), "
1601 "resetting\n", result);
1602 goto error_dti_urb_submit;
1603 }
1604out:
1605 return;
1606
1607error_dti_urb_submit:
1608 usb_put_urb(wa->buf_in_urb);
1609error_buf_in_urb_alloc:
1610 usb_put_urb(wa->dti_urb);
1611 wa->dti_urb = NULL;
1612error_dti_urb_alloc:
1613error:
1614 wa_reset_all(wa);
1615}
1/*
2 * WUSB Wire Adapter
3 * Data transfer and URB enqueing
4 *
5 * Copyright (C) 2005-2006 Intel Corporation
6 * Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com>
7 *
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License version
10 * 2 as published by the Free Software Foundation.
11 *
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software
19 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
20 * 02110-1301, USA.
21 *
22 *
23 * How transfers work: get a buffer, break it up in segments (segment
24 * size is a multiple of the maxpacket size). For each segment issue a
25 * segment request (struct wa_xfer_*), then send the data buffer if
26 * out or nothing if in (all over the DTO endpoint).
27 *
28 * For each submitted segment request, a notification will come over
29 * the NEP endpoint and a transfer result (struct xfer_result) will
30 * arrive in the DTI URB. Read it, get the xfer ID, see if there is
31 * data coming (inbound transfer), schedule a read and handle it.
32 *
33 * Sounds simple, it is a pain to implement.
34 *
35 *
36 * ENTRY POINTS
37 *
38 * FIXME
39 *
40 * LIFE CYCLE / STATE DIAGRAM
41 *
42 * FIXME
43 *
44 * THIS CODE IS DISGUSTING
45 *
46 * Warned you are; it's my second try and still not happy with it.
47 *
48 * NOTES:
49 *
50 * - No iso
51 *
52 * - Supports DMA xfers, control, bulk and maybe interrupt
53 *
54 * - Does not recycle unused rpipes
55 *
56 * An rpipe is assigned to an endpoint the first time it is used,
57 * and then it's there, assigned, until the endpoint is disabled
58 * (destroyed [{h,d}wahc_op_ep_disable()]. The assignment of the
59 * rpipe to the endpoint is done under the wa->rpipe_sem semaphore
60 * (should be a mutex).
61 *
62 * Two methods it could be done:
63 *
64 * (a) set up a timer every time an rpipe's use count drops to 1
65 * (which means unused) or when a transfer ends. Reset the
66 * timer when a xfer is queued. If the timer expires, release
67 * the rpipe [see rpipe_ep_disable()].
68 *
69 * (b) when looking for free rpipes to attach [rpipe_get_by_ep()],
70 * when none are found go over the list, check their endpoint
71 * and their activity record (if no last-xfer-done-ts in the
72 * last x seconds) take it
73 *
74 * However, due to the fact that we have a set of limited
75 * resources (max-segments-at-the-same-time per xfer,
76 * xfers-per-ripe, blocks-per-rpipe, rpipes-per-host), at the end
77 * we are going to have to rebuild all this based on an scheduler,
78 * to where we have a list of transactions to do and based on the
79 * availability of the different required components (blocks,
80 * rpipes, segment slots, etc), we go scheduling them. Painful.
81 */
82#include <linux/spinlock.h>
83#include <linux/slab.h>
84#include <linux/hash.h>
85#include <linux/ratelimit.h>
86#include <linux/export.h>
87#include <linux/scatterlist.h>
88
89#include "wa-hc.h"
90#include "wusbhc.h"
91
92enum {
93 /* [WUSB] section 8.3.3 allocates 7 bits for the segment index. */
94 WA_SEGS_MAX = 128,
95};
96
97enum wa_seg_status {
98 WA_SEG_NOTREADY,
99 WA_SEG_READY,
100 WA_SEG_DELAYED,
101 WA_SEG_SUBMITTED,
102 WA_SEG_PENDING,
103 WA_SEG_DTI_PENDING,
104 WA_SEG_DONE,
105 WA_SEG_ERROR,
106 WA_SEG_ABORTED,
107};
108
109static void wa_xfer_delayed_run(struct wa_rpipe *);
110static int __wa_xfer_delayed_run(struct wa_rpipe *rpipe, int *dto_waiting);
111
112/*
113 * Life cycle governed by 'struct urb' (the refcount of the struct is
114 * that of the 'struct urb' and usb_free_urb() would free the whole
115 * struct).
116 */
117struct wa_seg {
118 struct urb tr_urb; /* transfer request urb. */
119 struct urb *isoc_pack_desc_urb; /* for isoc packet descriptor. */
120 struct urb *dto_urb; /* for data output. */
121 struct list_head list_node; /* for rpipe->req_list */
122 struct wa_xfer *xfer; /* out xfer */
123 u8 index; /* which segment we are */
124 int isoc_frame_count; /* number of isoc frames in this segment. */
125 int isoc_frame_offset; /* starting frame offset in the xfer URB. */
126 /* Isoc frame that the current transfer buffer corresponds to. */
127 int isoc_frame_index;
128 int isoc_size; /* size of all isoc frames sent by this seg. */
129 enum wa_seg_status status;
130 ssize_t result; /* bytes xfered or error */
131 struct wa_xfer_hdr xfer_hdr;
132};
133
134static inline void wa_seg_init(struct wa_seg *seg)
135{
136 usb_init_urb(&seg->tr_urb);
137
138 /* set the remaining memory to 0. */
139 memset(((void *)seg) + sizeof(seg->tr_urb), 0,
140 sizeof(*seg) - sizeof(seg->tr_urb));
141}
142
143/*
144 * Protected by xfer->lock
145 *
146 */
147struct wa_xfer {
148 struct kref refcnt;
149 struct list_head list_node;
150 spinlock_t lock;
151 u32 id;
152
153 struct wahc *wa; /* Wire adapter we are plugged to */
154 struct usb_host_endpoint *ep;
155 struct urb *urb; /* URB we are transferring for */
156 struct wa_seg **seg; /* transfer segments */
157 u8 segs, segs_submitted, segs_done;
158 unsigned is_inbound:1;
159 unsigned is_dma:1;
160 size_t seg_size;
161 int result;
162
163 gfp_t gfp; /* allocation mask */
164
165 struct wusb_dev *wusb_dev; /* for activity timestamps */
166};
167
168static void __wa_populate_dto_urb_isoc(struct wa_xfer *xfer,
169 struct wa_seg *seg, int curr_iso_frame);
170static void wa_complete_remaining_xfer_segs(struct wa_xfer *xfer,
171 int starting_index, enum wa_seg_status status);
172
173static inline void wa_xfer_init(struct wa_xfer *xfer)
174{
175 kref_init(&xfer->refcnt);
176 INIT_LIST_HEAD(&xfer->list_node);
177 spin_lock_init(&xfer->lock);
178}
179
180/*
181 * Destroy a transfer structure
182 *
183 * Note that freeing xfer->seg[cnt]->tr_urb will free the containing
184 * xfer->seg[cnt] memory that was allocated by __wa_xfer_setup_segs.
185 */
186static void wa_xfer_destroy(struct kref *_xfer)
187{
188 struct wa_xfer *xfer = container_of(_xfer, struct wa_xfer, refcnt);
189 if (xfer->seg) {
190 unsigned cnt;
191 for (cnt = 0; cnt < xfer->segs; cnt++) {
192 struct wa_seg *seg = xfer->seg[cnt];
193 if (seg) {
194 usb_free_urb(seg->isoc_pack_desc_urb);
195 if (seg->dto_urb) {
196 kfree(seg->dto_urb->sg);
197 usb_free_urb(seg->dto_urb);
198 }
199 usb_free_urb(&seg->tr_urb);
200 }
201 }
202 kfree(xfer->seg);
203 }
204 kfree(xfer);
205}
206
207static void wa_xfer_get(struct wa_xfer *xfer)
208{
209 kref_get(&xfer->refcnt);
210}
211
212static void wa_xfer_put(struct wa_xfer *xfer)
213{
214 kref_put(&xfer->refcnt, wa_xfer_destroy);
215}
216
217/*
218 * Try to get exclusive access to the DTO endpoint resource. Return true
219 * if successful.
220 */
221static inline int __wa_dto_try_get(struct wahc *wa)
222{
223 return (test_and_set_bit(0, &wa->dto_in_use) == 0);
224}
225
226/* Release the DTO endpoint resource. */
227static inline void __wa_dto_put(struct wahc *wa)
228{
229 clear_bit_unlock(0, &wa->dto_in_use);
230}
231
232/* Service RPIPEs that are waiting on the DTO resource. */
233static void wa_check_for_delayed_rpipes(struct wahc *wa)
234{
235 unsigned long flags;
236 int dto_waiting = 0;
237 struct wa_rpipe *rpipe;
238
239 spin_lock_irqsave(&wa->rpipe_lock, flags);
240 while (!list_empty(&wa->rpipe_delayed_list) && !dto_waiting) {
241 rpipe = list_first_entry(&wa->rpipe_delayed_list,
242 struct wa_rpipe, list_node);
243 __wa_xfer_delayed_run(rpipe, &dto_waiting);
244 /* remove this RPIPE from the list if it is not waiting. */
245 if (!dto_waiting) {
246 pr_debug("%s: RPIPE %d serviced and removed from delayed list.\n",
247 __func__,
248 le16_to_cpu(rpipe->descr.wRPipeIndex));
249 list_del_init(&rpipe->list_node);
250 }
251 }
252 spin_unlock_irqrestore(&wa->rpipe_lock, flags);
253}
254
255/* add this RPIPE to the end of the delayed RPIPE list. */
256static void wa_add_delayed_rpipe(struct wahc *wa, struct wa_rpipe *rpipe)
257{
258 unsigned long flags;
259
260 spin_lock_irqsave(&wa->rpipe_lock, flags);
261 /* add rpipe to the list if it is not already on it. */
262 if (list_empty(&rpipe->list_node)) {
263 pr_debug("%s: adding RPIPE %d to the delayed list.\n",
264 __func__, le16_to_cpu(rpipe->descr.wRPipeIndex));
265 list_add_tail(&rpipe->list_node, &wa->rpipe_delayed_list);
266 }
267 spin_unlock_irqrestore(&wa->rpipe_lock, flags);
268}
269
270/*
271 * xfer is referenced
272 *
273 * xfer->lock has to be unlocked
274 *
275 * We take xfer->lock for setting the result; this is a barrier
276 * against drivers/usb/core/hcd.c:unlink1() being called after we call
277 * usb_hcd_giveback_urb() and wa_urb_dequeue() trying to get a
278 * reference to the transfer.
279 */
280static void wa_xfer_giveback(struct wa_xfer *xfer)
281{
282 unsigned long flags;
283
284 spin_lock_irqsave(&xfer->wa->xfer_list_lock, flags);
285 list_del_init(&xfer->list_node);
286 usb_hcd_unlink_urb_from_ep(&(xfer->wa->wusb->usb_hcd), xfer->urb);
287 spin_unlock_irqrestore(&xfer->wa->xfer_list_lock, flags);
288 /* FIXME: segmentation broken -- kills DWA */
289 wusbhc_giveback_urb(xfer->wa->wusb, xfer->urb, xfer->result);
290 wa_put(xfer->wa);
291 wa_xfer_put(xfer);
292}
293
294/*
295 * xfer is referenced
296 *
297 * xfer->lock has to be unlocked
298 */
299static void wa_xfer_completion(struct wa_xfer *xfer)
300{
301 if (xfer->wusb_dev)
302 wusb_dev_put(xfer->wusb_dev);
303 rpipe_put(xfer->ep->hcpriv);
304 wa_xfer_giveback(xfer);
305}
306
307/*
308 * Initialize a transfer's ID
309 *
310 * We need to use a sequential number; if we use the pointer or the
311 * hash of the pointer, it can repeat over sequential transfers and
312 * then it will confuse the HWA....wonder why in hell they put a 32
313 * bit handle in there then.
314 */
315static void wa_xfer_id_init(struct wa_xfer *xfer)
316{
317 xfer->id = atomic_add_return(1, &xfer->wa->xfer_id_count);
318}
319
320/* Return the xfer's ID. */
321static inline u32 wa_xfer_id(struct wa_xfer *xfer)
322{
323 return xfer->id;
324}
325
326/* Return the xfer's ID in transport format (little endian). */
327static inline __le32 wa_xfer_id_le32(struct wa_xfer *xfer)
328{
329 return cpu_to_le32(xfer->id);
330}
331
332/*
333 * If transfer is done, wrap it up and return true
334 *
335 * xfer->lock has to be locked
336 */
337static unsigned __wa_xfer_is_done(struct wa_xfer *xfer)
338{
339 struct device *dev = &xfer->wa->usb_iface->dev;
340 unsigned result, cnt;
341 struct wa_seg *seg;
342 struct urb *urb = xfer->urb;
343 unsigned found_short = 0;
344
345 result = xfer->segs_done == xfer->segs_submitted;
346 if (result == 0)
347 goto out;
348 urb->actual_length = 0;
349 for (cnt = 0; cnt < xfer->segs; cnt++) {
350 seg = xfer->seg[cnt];
351 switch (seg->status) {
352 case WA_SEG_DONE:
353 if (found_short && seg->result > 0) {
354 dev_dbg(dev, "xfer %p ID %08X#%u: bad short segments (%zu)\n",
355 xfer, wa_xfer_id(xfer), cnt,
356 seg->result);
357 urb->status = -EINVAL;
358 goto out;
359 }
360 urb->actual_length += seg->result;
361 if (!(usb_pipeisoc(xfer->urb->pipe))
362 && seg->result < xfer->seg_size
363 && cnt != xfer->segs-1)
364 found_short = 1;
365 dev_dbg(dev, "xfer %p ID %08X#%u: DONE short %d "
366 "result %zu urb->actual_length %d\n",
367 xfer, wa_xfer_id(xfer), seg->index, found_short,
368 seg->result, urb->actual_length);
369 break;
370 case WA_SEG_ERROR:
371 xfer->result = seg->result;
372 dev_dbg(dev, "xfer %p ID %08X#%u: ERROR result %zi(0x%08zX)\n",
373 xfer, wa_xfer_id(xfer), seg->index, seg->result,
374 seg->result);
375 goto out;
376 case WA_SEG_ABORTED:
377 xfer->result = seg->result;
378 dev_dbg(dev, "xfer %p ID %08X#%u: ABORTED result %zi(0x%08zX)\n",
379 xfer, wa_xfer_id(xfer), seg->index, seg->result,
380 seg->result);
381 goto out;
382 default:
383 dev_warn(dev, "xfer %p ID %08X#%u: is_done bad state %d\n",
384 xfer, wa_xfer_id(xfer), cnt, seg->status);
385 xfer->result = -EINVAL;
386 goto out;
387 }
388 }
389 xfer->result = 0;
390out:
391 return result;
392}
393
394/*
395 * Mark the given segment as done. Return true if this completes the xfer.
396 * This should only be called for segs that have been submitted to an RPIPE.
397 * Delayed segs are not marked as submitted so they do not need to be marked
398 * as done when cleaning up.
399 *
400 * xfer->lock has to be locked
401 */
402static unsigned __wa_xfer_mark_seg_as_done(struct wa_xfer *xfer,
403 struct wa_seg *seg, enum wa_seg_status status)
404{
405 seg->status = status;
406 xfer->segs_done++;
407
408 /* check for done. */
409 return __wa_xfer_is_done(xfer);
410}
411
412/*
413 * Search for a transfer list ID on the HCD's URB list
414 *
415 * For 32 bit architectures, we use the pointer itself; for 64 bits, a
416 * 32-bit hash of the pointer.
417 *
418 * @returns NULL if not found.
419 */
420static struct wa_xfer *wa_xfer_get_by_id(struct wahc *wa, u32 id)
421{
422 unsigned long flags;
423 struct wa_xfer *xfer_itr;
424 spin_lock_irqsave(&wa->xfer_list_lock, flags);
425 list_for_each_entry(xfer_itr, &wa->xfer_list, list_node) {
426 if (id == xfer_itr->id) {
427 wa_xfer_get(xfer_itr);
428 goto out;
429 }
430 }
431 xfer_itr = NULL;
432out:
433 spin_unlock_irqrestore(&wa->xfer_list_lock, flags);
434 return xfer_itr;
435}
436
437struct wa_xfer_abort_buffer {
438 struct urb urb;
439 struct wahc *wa;
440 struct wa_xfer_abort cmd;
441};
442
443static void __wa_xfer_abort_cb(struct urb *urb)
444{
445 struct wa_xfer_abort_buffer *b = urb->context;
446 struct wahc *wa = b->wa;
447
448 /*
449 * If the abort request URB failed, then the HWA did not get the abort
450 * command. Forcibly clean up the xfer without waiting for a Transfer
451 * Result from the HWA.
452 */
453 if (urb->status < 0) {
454 struct wa_xfer *xfer;
455 struct device *dev = &wa->usb_iface->dev;
456
457 xfer = wa_xfer_get_by_id(wa, le32_to_cpu(b->cmd.dwTransferID));
458 dev_err(dev, "%s: Transfer Abort request failed. result: %d\n",
459 __func__, urb->status);
460 if (xfer) {
461 unsigned long flags;
462 int done, seg_index = 0;
463 struct wa_rpipe *rpipe = xfer->ep->hcpriv;
464
465 dev_err(dev, "%s: cleaning up xfer %p ID 0x%08X.\n",
466 __func__, xfer, wa_xfer_id(xfer));
467 spin_lock_irqsave(&xfer->lock, flags);
468 /* skip done segs. */
469 while (seg_index < xfer->segs) {
470 struct wa_seg *seg = xfer->seg[seg_index];
471
472 if ((seg->status == WA_SEG_DONE) ||
473 (seg->status == WA_SEG_ERROR)) {
474 ++seg_index;
475 } else {
476 break;
477 }
478 }
479 /* mark remaining segs as aborted. */
480 wa_complete_remaining_xfer_segs(xfer, seg_index,
481 WA_SEG_ABORTED);
482 done = __wa_xfer_is_done(xfer);
483 spin_unlock_irqrestore(&xfer->lock, flags);
484 if (done)
485 wa_xfer_completion(xfer);
486 wa_xfer_delayed_run(rpipe);
487 wa_xfer_put(xfer);
488 } else {
489 dev_err(dev, "%s: xfer ID 0x%08X already gone.\n",
490 __func__, le32_to_cpu(b->cmd.dwTransferID));
491 }
492 }
493
494 wa_put(wa); /* taken in __wa_xfer_abort */
495 usb_put_urb(&b->urb);
496}
497
498/*
499 * Aborts an ongoing transaction
500 *
501 * Assumes the transfer is referenced and locked and in a submitted
502 * state (mainly that there is an endpoint/rpipe assigned).
503 *
504 * The callback (see above) does nothing but freeing up the data by
505 * putting the URB. Because the URB is allocated at the head of the
506 * struct, the whole space we allocated is kfreed. *
507 */
508static int __wa_xfer_abort(struct wa_xfer *xfer)
509{
510 int result = -ENOMEM;
511 struct device *dev = &xfer->wa->usb_iface->dev;
512 struct wa_xfer_abort_buffer *b;
513 struct wa_rpipe *rpipe = xfer->ep->hcpriv;
514
515 b = kmalloc(sizeof(*b), GFP_ATOMIC);
516 if (b == NULL)
517 goto error_kmalloc;
518 b->cmd.bLength = sizeof(b->cmd);
519 b->cmd.bRequestType = WA_XFER_ABORT;
520 b->cmd.wRPipe = rpipe->descr.wRPipeIndex;
521 b->cmd.dwTransferID = wa_xfer_id_le32(xfer);
522 b->wa = wa_get(xfer->wa);
523
524 usb_init_urb(&b->urb);
525 usb_fill_bulk_urb(&b->urb, xfer->wa->usb_dev,
526 usb_sndbulkpipe(xfer->wa->usb_dev,
527 xfer->wa->dto_epd->bEndpointAddress),
528 &b->cmd, sizeof(b->cmd), __wa_xfer_abort_cb, b);
529 result = usb_submit_urb(&b->urb, GFP_ATOMIC);
530 if (result < 0)
531 goto error_submit;
532 return result; /* callback frees! */
533
534
535error_submit:
536 wa_put(xfer->wa);
537 if (printk_ratelimit())
538 dev_err(dev, "xfer %p: Can't submit abort request: %d\n",
539 xfer, result);
540 kfree(b);
541error_kmalloc:
542 return result;
543
544}
545
546/*
547 * Calculate the number of isoc frames starting from isoc_frame_offset
548 * that will fit a in transfer segment.
549 */
550static int __wa_seg_calculate_isoc_frame_count(struct wa_xfer *xfer,
551 int isoc_frame_offset, int *total_size)
552{
553 int segment_size = 0, frame_count = 0;
554 int index = isoc_frame_offset;
555 struct usb_iso_packet_descriptor *iso_frame_desc =
556 xfer->urb->iso_frame_desc;
557
558 while ((index < xfer->urb->number_of_packets)
559 && ((segment_size + iso_frame_desc[index].length)
560 <= xfer->seg_size)) {
561 /*
562 * For Alereon HWA devices, only include an isoc frame in an
563 * out segment if it is physically contiguous with the previous
564 * frame. This is required because those devices expect
565 * the isoc frames to be sent as a single USB transaction as
566 * opposed to one transaction per frame with standard HWA.
567 */
568 if ((xfer->wa->quirks & WUSB_QUIRK_ALEREON_HWA_CONCAT_ISOC)
569 && (xfer->is_inbound == 0)
570 && (index > isoc_frame_offset)
571 && ((iso_frame_desc[index - 1].offset +
572 iso_frame_desc[index - 1].length) !=
573 iso_frame_desc[index].offset))
574 break;
575
576 /* this frame fits. count it. */
577 ++frame_count;
578 segment_size += iso_frame_desc[index].length;
579
580 /* move to the next isoc frame. */
581 ++index;
582 }
583
584 *total_size = segment_size;
585 return frame_count;
586}
587
588/*
589 *
590 * @returns < 0 on error, transfer segment request size if ok
591 */
592static ssize_t __wa_xfer_setup_sizes(struct wa_xfer *xfer,
593 enum wa_xfer_type *pxfer_type)
594{
595 ssize_t result;
596 struct device *dev = &xfer->wa->usb_iface->dev;
597 size_t maxpktsize;
598 struct urb *urb = xfer->urb;
599 struct wa_rpipe *rpipe = xfer->ep->hcpriv;
600
601 switch (rpipe->descr.bmAttribute & 0x3) {
602 case USB_ENDPOINT_XFER_CONTROL:
603 *pxfer_type = WA_XFER_TYPE_CTL;
604 result = sizeof(struct wa_xfer_ctl);
605 break;
606 case USB_ENDPOINT_XFER_INT:
607 case USB_ENDPOINT_XFER_BULK:
608 *pxfer_type = WA_XFER_TYPE_BI;
609 result = sizeof(struct wa_xfer_bi);
610 break;
611 case USB_ENDPOINT_XFER_ISOC:
612 *pxfer_type = WA_XFER_TYPE_ISO;
613 result = sizeof(struct wa_xfer_hwaiso);
614 break;
615 default:
616 /* never happens */
617 BUG();
618 result = -EINVAL; /* shut gcc up */
619 }
620 xfer->is_inbound = urb->pipe & USB_DIR_IN ? 1 : 0;
621 xfer->is_dma = urb->transfer_flags & URB_NO_TRANSFER_DMA_MAP ? 1 : 0;
622
623 maxpktsize = le16_to_cpu(rpipe->descr.wMaxPacketSize);
624 xfer->seg_size = le16_to_cpu(rpipe->descr.wBlocks)
625 * 1 << (xfer->wa->wa_descr->bRPipeBlockSize - 1);
626 /* Compute the segment size and make sure it is a multiple of
627 * the maxpktsize (WUSB1.0[8.3.3.1])...not really too much of
628 * a check (FIXME) */
629 if (xfer->seg_size < maxpktsize) {
630 dev_err(dev,
631 "HW BUG? seg_size %zu smaller than maxpktsize %zu\n",
632 xfer->seg_size, maxpktsize);
633 result = -EINVAL;
634 goto error;
635 }
636 xfer->seg_size = (xfer->seg_size / maxpktsize) * maxpktsize;
637 if ((rpipe->descr.bmAttribute & 0x3) == USB_ENDPOINT_XFER_ISOC) {
638 int index = 0;
639
640 xfer->segs = 0;
641 /*
642 * loop over urb->number_of_packets to determine how many
643 * xfer segments will be needed to send the isoc frames.
644 */
645 while (index < urb->number_of_packets) {
646 int seg_size; /* don't care. */
647 index += __wa_seg_calculate_isoc_frame_count(xfer,
648 index, &seg_size);
649 ++xfer->segs;
650 }
651 } else {
652 xfer->segs = DIV_ROUND_UP(urb->transfer_buffer_length,
653 xfer->seg_size);
654 if (xfer->segs == 0 && *pxfer_type == WA_XFER_TYPE_CTL)
655 xfer->segs = 1;
656 }
657
658 if (xfer->segs > WA_SEGS_MAX) {
659 dev_err(dev, "BUG? oops, number of segments %zu bigger than %d\n",
660 (urb->transfer_buffer_length/xfer->seg_size),
661 WA_SEGS_MAX);
662 result = -EINVAL;
663 goto error;
664 }
665error:
666 return result;
667}
668
669static void __wa_setup_isoc_packet_descr(
670 struct wa_xfer_packet_info_hwaiso *packet_desc,
671 struct wa_xfer *xfer,
672 struct wa_seg *seg) {
673 struct usb_iso_packet_descriptor *iso_frame_desc =
674 xfer->urb->iso_frame_desc;
675 int frame_index;
676
677 /* populate isoc packet descriptor. */
678 packet_desc->bPacketType = WA_XFER_ISO_PACKET_INFO;
679 packet_desc->wLength = cpu_to_le16(sizeof(*packet_desc) +
680 (sizeof(packet_desc->PacketLength[0]) *
681 seg->isoc_frame_count));
682 for (frame_index = 0; frame_index < seg->isoc_frame_count;
683 ++frame_index) {
684 int offset_index = frame_index + seg->isoc_frame_offset;
685 packet_desc->PacketLength[frame_index] =
686 cpu_to_le16(iso_frame_desc[offset_index].length);
687 }
688}
689
690
691/* Fill in the common request header and xfer-type specific data. */
692static void __wa_xfer_setup_hdr0(struct wa_xfer *xfer,
693 struct wa_xfer_hdr *xfer_hdr0,
694 enum wa_xfer_type xfer_type,
695 size_t xfer_hdr_size)
696{
697 struct wa_rpipe *rpipe = xfer->ep->hcpriv;
698 struct wa_seg *seg = xfer->seg[0];
699
700 xfer_hdr0 = &seg->xfer_hdr;
701 xfer_hdr0->bLength = xfer_hdr_size;
702 xfer_hdr0->bRequestType = xfer_type;
703 xfer_hdr0->wRPipe = rpipe->descr.wRPipeIndex;
704 xfer_hdr0->dwTransferID = wa_xfer_id_le32(xfer);
705 xfer_hdr0->bTransferSegment = 0;
706 switch (xfer_type) {
707 case WA_XFER_TYPE_CTL: {
708 struct wa_xfer_ctl *xfer_ctl =
709 container_of(xfer_hdr0, struct wa_xfer_ctl, hdr);
710 xfer_ctl->bmAttribute = xfer->is_inbound ? 1 : 0;
711 memcpy(&xfer_ctl->baSetupData, xfer->urb->setup_packet,
712 sizeof(xfer_ctl->baSetupData));
713 break;
714 }
715 case WA_XFER_TYPE_BI:
716 break;
717 case WA_XFER_TYPE_ISO: {
718 struct wa_xfer_hwaiso *xfer_iso =
719 container_of(xfer_hdr0, struct wa_xfer_hwaiso, hdr);
720 struct wa_xfer_packet_info_hwaiso *packet_desc =
721 ((void *)xfer_iso) + xfer_hdr_size;
722
723 /* populate the isoc section of the transfer request. */
724 xfer_iso->dwNumOfPackets = cpu_to_le32(seg->isoc_frame_count);
725 /* populate isoc packet descriptor. */
726 __wa_setup_isoc_packet_descr(packet_desc, xfer, seg);
727 break;
728 }
729 default:
730 BUG();
731 };
732}
733
734/*
735 * Callback for the OUT data phase of the segment request
736 *
737 * Check wa_seg_tr_cb(); most comments also apply here because this
738 * function does almost the same thing and they work closely
739 * together.
740 *
741 * If the seg request has failed but this DTO phase has succeeded,
742 * wa_seg_tr_cb() has already failed the segment and moved the
743 * status to WA_SEG_ERROR, so this will go through 'case 0' and
744 * effectively do nothing.
745 */
746static void wa_seg_dto_cb(struct urb *urb)
747{
748 struct wa_seg *seg = urb->context;
749 struct wa_xfer *xfer = seg->xfer;
750 struct wahc *wa;
751 struct device *dev;
752 struct wa_rpipe *rpipe;
753 unsigned long flags;
754 unsigned rpipe_ready = 0;
755 int data_send_done = 1, release_dto = 0, holding_dto = 0;
756 u8 done = 0;
757 int result;
758
759 /* free the sg if it was used. */
760 kfree(urb->sg);
761 urb->sg = NULL;
762
763 spin_lock_irqsave(&xfer->lock, flags);
764 wa = xfer->wa;
765 dev = &wa->usb_iface->dev;
766 if (usb_pipeisoc(xfer->urb->pipe)) {
767 /* Alereon HWA sends all isoc frames in a single transfer. */
768 if (wa->quirks & WUSB_QUIRK_ALEREON_HWA_CONCAT_ISOC)
769 seg->isoc_frame_index += seg->isoc_frame_count;
770 else
771 seg->isoc_frame_index += 1;
772 if (seg->isoc_frame_index < seg->isoc_frame_count) {
773 data_send_done = 0;
774 holding_dto = 1; /* checked in error cases. */
775 /*
776 * if this is the last isoc frame of the segment, we
777 * can release DTO after sending this frame.
778 */
779 if ((seg->isoc_frame_index + 1) >=
780 seg->isoc_frame_count)
781 release_dto = 1;
782 }
783 dev_dbg(dev, "xfer 0x%08X#%u: isoc frame = %d, holding_dto = %d, release_dto = %d.\n",
784 wa_xfer_id(xfer), seg->index, seg->isoc_frame_index,
785 holding_dto, release_dto);
786 }
787 spin_unlock_irqrestore(&xfer->lock, flags);
788
789 switch (urb->status) {
790 case 0:
791 spin_lock_irqsave(&xfer->lock, flags);
792 seg->result += urb->actual_length;
793 if (data_send_done) {
794 dev_dbg(dev, "xfer 0x%08X#%u: data out done (%zu bytes)\n",
795 wa_xfer_id(xfer), seg->index, seg->result);
796 if (seg->status < WA_SEG_PENDING)
797 seg->status = WA_SEG_PENDING;
798 } else {
799 /* should only hit this for isoc xfers. */
800 /*
801 * Populate the dto URB with the next isoc frame buffer,
802 * send the URB and release DTO if we no longer need it.
803 */
804 __wa_populate_dto_urb_isoc(xfer, seg,
805 seg->isoc_frame_offset + seg->isoc_frame_index);
806
807 /* resubmit the URB with the next isoc frame. */
808 /* take a ref on resubmit. */
809 wa_xfer_get(xfer);
810 result = usb_submit_urb(seg->dto_urb, GFP_ATOMIC);
811 if (result < 0) {
812 dev_err(dev, "xfer 0x%08X#%u: DTO submit failed: %d\n",
813 wa_xfer_id(xfer), seg->index, result);
814 spin_unlock_irqrestore(&xfer->lock, flags);
815 goto error_dto_submit;
816 }
817 }
818 spin_unlock_irqrestore(&xfer->lock, flags);
819 if (release_dto) {
820 __wa_dto_put(wa);
821 wa_check_for_delayed_rpipes(wa);
822 }
823 break;
824 case -ECONNRESET: /* URB unlinked; no need to do anything */
825 case -ENOENT: /* as it was done by the who unlinked us */
826 if (holding_dto) {
827 __wa_dto_put(wa);
828 wa_check_for_delayed_rpipes(wa);
829 }
830 break;
831 default: /* Other errors ... */
832 dev_err(dev, "xfer 0x%08X#%u: data out error %d\n",
833 wa_xfer_id(xfer), seg->index, urb->status);
834 goto error_default;
835 }
836
837 /* taken when this URB was submitted. */
838 wa_xfer_put(xfer);
839 return;
840
841error_dto_submit:
842 /* taken on resubmit attempt. */
843 wa_xfer_put(xfer);
844error_default:
845 spin_lock_irqsave(&xfer->lock, flags);
846 rpipe = xfer->ep->hcpriv;
847 if (edc_inc(&wa->nep_edc, EDC_MAX_ERRORS,
848 EDC_ERROR_TIMEFRAME)){
849 dev_err(dev, "DTO: URB max acceptable errors exceeded, resetting device\n");
850 wa_reset_all(wa);
851 }
852 if (seg->status != WA_SEG_ERROR) {
853 seg->result = urb->status;
854 __wa_xfer_abort(xfer);
855 rpipe_ready = rpipe_avail_inc(rpipe);
856 done = __wa_xfer_mark_seg_as_done(xfer, seg, WA_SEG_ERROR);
857 }
858 spin_unlock_irqrestore(&xfer->lock, flags);
859 if (holding_dto) {
860 __wa_dto_put(wa);
861 wa_check_for_delayed_rpipes(wa);
862 }
863 if (done)
864 wa_xfer_completion(xfer);
865 if (rpipe_ready)
866 wa_xfer_delayed_run(rpipe);
867 /* taken when this URB was submitted. */
868 wa_xfer_put(xfer);
869}
870
871/*
872 * Callback for the isoc packet descriptor phase of the segment request
873 *
874 * Check wa_seg_tr_cb(); most comments also apply here because this
875 * function does almost the same thing and they work closely
876 * together.
877 *
878 * If the seg request has failed but this phase has succeeded,
879 * wa_seg_tr_cb() has already failed the segment and moved the
880 * status to WA_SEG_ERROR, so this will go through 'case 0' and
881 * effectively do nothing.
882 */
883static void wa_seg_iso_pack_desc_cb(struct urb *urb)
884{
885 struct wa_seg *seg = urb->context;
886 struct wa_xfer *xfer = seg->xfer;
887 struct wahc *wa;
888 struct device *dev;
889 struct wa_rpipe *rpipe;
890 unsigned long flags;
891 unsigned rpipe_ready = 0;
892 u8 done = 0;
893
894 switch (urb->status) {
895 case 0:
896 spin_lock_irqsave(&xfer->lock, flags);
897 wa = xfer->wa;
898 dev = &wa->usb_iface->dev;
899 dev_dbg(dev, "iso xfer %08X#%u: packet descriptor done\n",
900 wa_xfer_id(xfer), seg->index);
901 if (xfer->is_inbound && seg->status < WA_SEG_PENDING)
902 seg->status = WA_SEG_PENDING;
903 spin_unlock_irqrestore(&xfer->lock, flags);
904 break;
905 case -ECONNRESET: /* URB unlinked; no need to do anything */
906 case -ENOENT: /* as it was done by the who unlinked us */
907 break;
908 default: /* Other errors ... */
909 spin_lock_irqsave(&xfer->lock, flags);
910 wa = xfer->wa;
911 dev = &wa->usb_iface->dev;
912 rpipe = xfer->ep->hcpriv;
913 pr_err_ratelimited("iso xfer %08X#%u: packet descriptor error %d\n",
914 wa_xfer_id(xfer), seg->index, urb->status);
915 if (edc_inc(&wa->nep_edc, EDC_MAX_ERRORS,
916 EDC_ERROR_TIMEFRAME)){
917 dev_err(dev, "iso xfer: URB max acceptable errors exceeded, resetting device\n");
918 wa_reset_all(wa);
919 }
920 if (seg->status != WA_SEG_ERROR) {
921 usb_unlink_urb(seg->dto_urb);
922 seg->result = urb->status;
923 __wa_xfer_abort(xfer);
924 rpipe_ready = rpipe_avail_inc(rpipe);
925 done = __wa_xfer_mark_seg_as_done(xfer, seg,
926 WA_SEG_ERROR);
927 }
928 spin_unlock_irqrestore(&xfer->lock, flags);
929 if (done)
930 wa_xfer_completion(xfer);
931 if (rpipe_ready)
932 wa_xfer_delayed_run(rpipe);
933 }
934 /* taken when this URB was submitted. */
935 wa_xfer_put(xfer);
936}
937
938/*
939 * Callback for the segment request
940 *
941 * If successful transition state (unless already transitioned or
942 * outbound transfer); otherwise, take a note of the error, mark this
943 * segment done and try completion.
944 *
945 * Note we don't access until we are sure that the transfer hasn't
946 * been cancelled (ECONNRESET, ENOENT), which could mean that
947 * seg->xfer could be already gone.
948 *
949 * We have to check before setting the status to WA_SEG_PENDING
950 * because sometimes the xfer result callback arrives before this
951 * callback (geeeeeeze), so it might happen that we are already in
952 * another state. As well, we don't set it if the transfer is not inbound,
953 * as in that case, wa_seg_dto_cb will do it when the OUT data phase
954 * finishes.
955 */
956static void wa_seg_tr_cb(struct urb *urb)
957{
958 struct wa_seg *seg = urb->context;
959 struct wa_xfer *xfer = seg->xfer;
960 struct wahc *wa;
961 struct device *dev;
962 struct wa_rpipe *rpipe;
963 unsigned long flags;
964 unsigned rpipe_ready;
965 u8 done = 0;
966
967 switch (urb->status) {
968 case 0:
969 spin_lock_irqsave(&xfer->lock, flags);
970 wa = xfer->wa;
971 dev = &wa->usb_iface->dev;
972 dev_dbg(dev, "xfer %p ID 0x%08X#%u: request done\n",
973 xfer, wa_xfer_id(xfer), seg->index);
974 if (xfer->is_inbound &&
975 seg->status < WA_SEG_PENDING &&
976 !(usb_pipeisoc(xfer->urb->pipe)))
977 seg->status = WA_SEG_PENDING;
978 spin_unlock_irqrestore(&xfer->lock, flags);
979 break;
980 case -ECONNRESET: /* URB unlinked; no need to do anything */
981 case -ENOENT: /* as it was done by the who unlinked us */
982 break;
983 default: /* Other errors ... */
984 spin_lock_irqsave(&xfer->lock, flags);
985 wa = xfer->wa;
986 dev = &wa->usb_iface->dev;
987 rpipe = xfer->ep->hcpriv;
988 if (printk_ratelimit())
989 dev_err(dev, "xfer %p ID 0x%08X#%u: request error %d\n",
990 xfer, wa_xfer_id(xfer), seg->index,
991 urb->status);
992 if (edc_inc(&wa->nep_edc, EDC_MAX_ERRORS,
993 EDC_ERROR_TIMEFRAME)){
994 dev_err(dev, "DTO: URB max acceptable errors "
995 "exceeded, resetting device\n");
996 wa_reset_all(wa);
997 }
998 usb_unlink_urb(seg->isoc_pack_desc_urb);
999 usb_unlink_urb(seg->dto_urb);
1000 seg->result = urb->status;
1001 __wa_xfer_abort(xfer);
1002 rpipe_ready = rpipe_avail_inc(rpipe);
1003 done = __wa_xfer_mark_seg_as_done(xfer, seg, WA_SEG_ERROR);
1004 spin_unlock_irqrestore(&xfer->lock, flags);
1005 if (done)
1006 wa_xfer_completion(xfer);
1007 if (rpipe_ready)
1008 wa_xfer_delayed_run(rpipe);
1009 }
1010 /* taken when this URB was submitted. */
1011 wa_xfer_put(xfer);
1012}
1013
1014/*
1015 * Allocate an SG list to store bytes_to_transfer bytes and copy the
1016 * subset of the in_sg that matches the buffer subset
1017 * we are about to transfer.
1018 */
1019static struct scatterlist *wa_xfer_create_subset_sg(struct scatterlist *in_sg,
1020 const unsigned int bytes_transferred,
1021 const unsigned int bytes_to_transfer, int *out_num_sgs)
1022{
1023 struct scatterlist *out_sg;
1024 unsigned int bytes_processed = 0, offset_into_current_page_data = 0,
1025 nents;
1026 struct scatterlist *current_xfer_sg = in_sg;
1027 struct scatterlist *current_seg_sg, *last_seg_sg;
1028
1029 /* skip previously transferred pages. */
1030 while ((current_xfer_sg) &&
1031 (bytes_processed < bytes_transferred)) {
1032 bytes_processed += current_xfer_sg->length;
1033
1034 /* advance the sg if current segment starts on or past the
1035 next page. */
1036 if (bytes_processed <= bytes_transferred)
1037 current_xfer_sg = sg_next(current_xfer_sg);
1038 }
1039
1040 /* the data for the current segment starts in current_xfer_sg.
1041 calculate the offset. */
1042 if (bytes_processed > bytes_transferred) {
1043 offset_into_current_page_data = current_xfer_sg->length -
1044 (bytes_processed - bytes_transferred);
1045 }
1046
1047 /* calculate the number of pages needed by this segment. */
1048 nents = DIV_ROUND_UP((bytes_to_transfer +
1049 offset_into_current_page_data +
1050 current_xfer_sg->offset),
1051 PAGE_SIZE);
1052
1053 out_sg = kmalloc((sizeof(struct scatterlist) * nents), GFP_ATOMIC);
1054 if (out_sg) {
1055 sg_init_table(out_sg, nents);
1056
1057 /* copy the portion of the incoming SG that correlates to the
1058 * data to be transferred by this segment to the segment SG. */
1059 last_seg_sg = current_seg_sg = out_sg;
1060 bytes_processed = 0;
1061
1062 /* reset nents and calculate the actual number of sg entries
1063 needed. */
1064 nents = 0;
1065 while ((bytes_processed < bytes_to_transfer) &&
1066 current_seg_sg && current_xfer_sg) {
1067 unsigned int page_len = min((current_xfer_sg->length -
1068 offset_into_current_page_data),
1069 (bytes_to_transfer - bytes_processed));
1070
1071 sg_set_page(current_seg_sg, sg_page(current_xfer_sg),
1072 page_len,
1073 current_xfer_sg->offset +
1074 offset_into_current_page_data);
1075
1076 bytes_processed += page_len;
1077
1078 last_seg_sg = current_seg_sg;
1079 current_seg_sg = sg_next(current_seg_sg);
1080 current_xfer_sg = sg_next(current_xfer_sg);
1081
1082 /* only the first page may require additional offset. */
1083 offset_into_current_page_data = 0;
1084 nents++;
1085 }
1086
1087 /* update num_sgs and terminate the list since we may have
1088 * concatenated pages. */
1089 sg_mark_end(last_seg_sg);
1090 *out_num_sgs = nents;
1091 }
1092
1093 return out_sg;
1094}
1095
1096/*
1097 * Populate DMA buffer info for the isoc dto urb.
1098 */
1099static void __wa_populate_dto_urb_isoc(struct wa_xfer *xfer,
1100 struct wa_seg *seg, int curr_iso_frame)
1101{
1102 seg->dto_urb->transfer_flags |= URB_NO_TRANSFER_DMA_MAP;
1103 seg->dto_urb->sg = NULL;
1104 seg->dto_urb->num_sgs = 0;
1105 /* dto urb buffer address pulled from iso_frame_desc. */
1106 seg->dto_urb->transfer_dma = xfer->urb->transfer_dma +
1107 xfer->urb->iso_frame_desc[curr_iso_frame].offset;
1108 /* The Alereon HWA sends a single URB with all isoc segs. */
1109 if (xfer->wa->quirks & WUSB_QUIRK_ALEREON_HWA_CONCAT_ISOC)
1110 seg->dto_urb->transfer_buffer_length = seg->isoc_size;
1111 else
1112 seg->dto_urb->transfer_buffer_length =
1113 xfer->urb->iso_frame_desc[curr_iso_frame].length;
1114}
1115
1116/*
1117 * Populate buffer ptr and size, DMA buffer or SG list for the dto urb.
1118 */
1119static int __wa_populate_dto_urb(struct wa_xfer *xfer,
1120 struct wa_seg *seg, size_t buf_itr_offset, size_t buf_itr_size)
1121{
1122 int result = 0;
1123
1124 if (xfer->is_dma) {
1125 seg->dto_urb->transfer_dma =
1126 xfer->urb->transfer_dma + buf_itr_offset;
1127 seg->dto_urb->transfer_flags |= URB_NO_TRANSFER_DMA_MAP;
1128 seg->dto_urb->sg = NULL;
1129 seg->dto_urb->num_sgs = 0;
1130 } else {
1131 /* do buffer or SG processing. */
1132 seg->dto_urb->transfer_flags &=
1133 ~URB_NO_TRANSFER_DMA_MAP;
1134 /* this should always be 0 before a resubmit. */
1135 seg->dto_urb->num_mapped_sgs = 0;
1136
1137 if (xfer->urb->transfer_buffer) {
1138 seg->dto_urb->transfer_buffer =
1139 xfer->urb->transfer_buffer +
1140 buf_itr_offset;
1141 seg->dto_urb->sg = NULL;
1142 seg->dto_urb->num_sgs = 0;
1143 } else {
1144 seg->dto_urb->transfer_buffer = NULL;
1145
1146 /*
1147 * allocate an SG list to store seg_size bytes
1148 * and copy the subset of the xfer->urb->sg that
1149 * matches the buffer subset we are about to
1150 * read.
1151 */
1152 seg->dto_urb->sg = wa_xfer_create_subset_sg(
1153 xfer->urb->sg,
1154 buf_itr_offset, buf_itr_size,
1155 &(seg->dto_urb->num_sgs));
1156 if (!(seg->dto_urb->sg))
1157 result = -ENOMEM;
1158 }
1159 }
1160 seg->dto_urb->transfer_buffer_length = buf_itr_size;
1161
1162 return result;
1163}
1164
1165/*
1166 * Allocate the segs array and initialize each of them
1167 *
1168 * The segments are freed by wa_xfer_destroy() when the xfer use count
1169 * drops to zero; however, because each segment is given the same life
1170 * cycle as the USB URB it contains, it is actually freed by
1171 * usb_put_urb() on the contained USB URB (twisted, eh?).
1172 */
1173static int __wa_xfer_setup_segs(struct wa_xfer *xfer, size_t xfer_hdr_size)
1174{
1175 int result, cnt, isoc_frame_offset = 0;
1176 size_t alloc_size = sizeof(*xfer->seg[0])
1177 - sizeof(xfer->seg[0]->xfer_hdr) + xfer_hdr_size;
1178 struct usb_device *usb_dev = xfer->wa->usb_dev;
1179 const struct usb_endpoint_descriptor *dto_epd = xfer->wa->dto_epd;
1180 struct wa_seg *seg;
1181 size_t buf_itr, buf_size, buf_itr_size;
1182
1183 result = -ENOMEM;
1184 xfer->seg = kcalloc(xfer->segs, sizeof(xfer->seg[0]), GFP_ATOMIC);
1185 if (xfer->seg == NULL)
1186 goto error_segs_kzalloc;
1187 buf_itr = 0;
1188 buf_size = xfer->urb->transfer_buffer_length;
1189 for (cnt = 0; cnt < xfer->segs; cnt++) {
1190 size_t iso_pkt_descr_size = 0;
1191 int seg_isoc_frame_count = 0, seg_isoc_size = 0;
1192
1193 /*
1194 * Adjust the size of the segment object to contain space for
1195 * the isoc packet descriptor buffer.
1196 */
1197 if (usb_pipeisoc(xfer->urb->pipe)) {
1198 seg_isoc_frame_count =
1199 __wa_seg_calculate_isoc_frame_count(xfer,
1200 isoc_frame_offset, &seg_isoc_size);
1201
1202 iso_pkt_descr_size =
1203 sizeof(struct wa_xfer_packet_info_hwaiso) +
1204 (seg_isoc_frame_count * sizeof(__le16));
1205 }
1206 result = -ENOMEM;
1207 seg = xfer->seg[cnt] = kmalloc(alloc_size + iso_pkt_descr_size,
1208 GFP_ATOMIC);
1209 if (seg == NULL)
1210 goto error_seg_kmalloc;
1211 wa_seg_init(seg);
1212 seg->xfer = xfer;
1213 seg->index = cnt;
1214 usb_fill_bulk_urb(&seg->tr_urb, usb_dev,
1215 usb_sndbulkpipe(usb_dev,
1216 dto_epd->bEndpointAddress),
1217 &seg->xfer_hdr, xfer_hdr_size,
1218 wa_seg_tr_cb, seg);
1219 buf_itr_size = min(buf_size, xfer->seg_size);
1220
1221 if (usb_pipeisoc(xfer->urb->pipe)) {
1222 seg->isoc_frame_count = seg_isoc_frame_count;
1223 seg->isoc_frame_offset = isoc_frame_offset;
1224 seg->isoc_size = seg_isoc_size;
1225 /* iso packet descriptor. */
1226 seg->isoc_pack_desc_urb =
1227 usb_alloc_urb(0, GFP_ATOMIC);
1228 if (seg->isoc_pack_desc_urb == NULL)
1229 goto error_iso_pack_desc_alloc;
1230 /*
1231 * The buffer for the isoc packet descriptor starts
1232 * after the transfer request header in the
1233 * segment object memory buffer.
1234 */
1235 usb_fill_bulk_urb(
1236 seg->isoc_pack_desc_urb, usb_dev,
1237 usb_sndbulkpipe(usb_dev,
1238 dto_epd->bEndpointAddress),
1239 (void *)(&seg->xfer_hdr) +
1240 xfer_hdr_size,
1241 iso_pkt_descr_size,
1242 wa_seg_iso_pack_desc_cb, seg);
1243
1244 /* adjust starting frame offset for next seg. */
1245 isoc_frame_offset += seg_isoc_frame_count;
1246 }
1247
1248 if (xfer->is_inbound == 0 && buf_size > 0) {
1249 /* outbound data. */
1250 seg->dto_urb = usb_alloc_urb(0, GFP_ATOMIC);
1251 if (seg->dto_urb == NULL)
1252 goto error_dto_alloc;
1253 usb_fill_bulk_urb(
1254 seg->dto_urb, usb_dev,
1255 usb_sndbulkpipe(usb_dev,
1256 dto_epd->bEndpointAddress),
1257 NULL, 0, wa_seg_dto_cb, seg);
1258
1259 if (usb_pipeisoc(xfer->urb->pipe)) {
1260 /*
1261 * Fill in the xfer buffer information for the
1262 * first isoc frame. Subsequent frames in this
1263 * segment will be filled in and sent from the
1264 * DTO completion routine, if needed.
1265 */
1266 __wa_populate_dto_urb_isoc(xfer, seg,
1267 seg->isoc_frame_offset);
1268 } else {
1269 /* fill in the xfer buffer information. */
1270 result = __wa_populate_dto_urb(xfer, seg,
1271 buf_itr, buf_itr_size);
1272 if (result < 0)
1273 goto error_seg_outbound_populate;
1274
1275 buf_itr += buf_itr_size;
1276 buf_size -= buf_itr_size;
1277 }
1278 }
1279 seg->status = WA_SEG_READY;
1280 }
1281 return 0;
1282
1283 /*
1284 * Free the memory for the current segment which failed to init.
1285 * Use the fact that cnt is left at were it failed. The remaining
1286 * segments will be cleaned up by wa_xfer_destroy.
1287 */
1288error_seg_outbound_populate:
1289 usb_free_urb(xfer->seg[cnt]->dto_urb);
1290error_dto_alloc:
1291 usb_free_urb(xfer->seg[cnt]->isoc_pack_desc_urb);
1292error_iso_pack_desc_alloc:
1293 kfree(xfer->seg[cnt]);
1294 xfer->seg[cnt] = NULL;
1295error_seg_kmalloc:
1296error_segs_kzalloc:
1297 return result;
1298}
1299
1300/*
1301 * Allocates all the stuff needed to submit a transfer
1302 *
1303 * Breaks the whole data buffer in a list of segments, each one has a
1304 * structure allocated to it and linked in xfer->seg[index]
1305 *
1306 * FIXME: merge setup_segs() and the last part of this function, no
1307 * need to do two for loops when we could run everything in a
1308 * single one
1309 */
1310static int __wa_xfer_setup(struct wa_xfer *xfer, struct urb *urb)
1311{
1312 int result;
1313 struct device *dev = &xfer->wa->usb_iface->dev;
1314 enum wa_xfer_type xfer_type = 0; /* shut up GCC */
1315 size_t xfer_hdr_size, cnt, transfer_size;
1316 struct wa_xfer_hdr *xfer_hdr0, *xfer_hdr;
1317
1318 result = __wa_xfer_setup_sizes(xfer, &xfer_type);
1319 if (result < 0)
1320 goto error_setup_sizes;
1321 xfer_hdr_size = result;
1322 result = __wa_xfer_setup_segs(xfer, xfer_hdr_size);
1323 if (result < 0) {
1324 dev_err(dev, "xfer %p: Failed to allocate %d segments: %d\n",
1325 xfer, xfer->segs, result);
1326 goto error_setup_segs;
1327 }
1328 /* Fill the first header */
1329 xfer_hdr0 = &xfer->seg[0]->xfer_hdr;
1330 wa_xfer_id_init(xfer);
1331 __wa_xfer_setup_hdr0(xfer, xfer_hdr0, xfer_type, xfer_hdr_size);
1332
1333 /* Fill remaining headers */
1334 xfer_hdr = xfer_hdr0;
1335 if (xfer_type == WA_XFER_TYPE_ISO) {
1336 xfer_hdr0->dwTransferLength =
1337 cpu_to_le32(xfer->seg[0]->isoc_size);
1338 for (cnt = 1; cnt < xfer->segs; cnt++) {
1339 struct wa_xfer_packet_info_hwaiso *packet_desc;
1340 struct wa_seg *seg = xfer->seg[cnt];
1341 struct wa_xfer_hwaiso *xfer_iso;
1342
1343 xfer_hdr = &seg->xfer_hdr;
1344 xfer_iso = container_of(xfer_hdr,
1345 struct wa_xfer_hwaiso, hdr);
1346 packet_desc = ((void *)xfer_hdr) + xfer_hdr_size;
1347 /*
1348 * Copy values from the 0th header. Segment specific
1349 * values are set below.
1350 */
1351 memcpy(xfer_hdr, xfer_hdr0, xfer_hdr_size);
1352 xfer_hdr->bTransferSegment = cnt;
1353 xfer_hdr->dwTransferLength =
1354 cpu_to_le32(seg->isoc_size);
1355 xfer_iso->dwNumOfPackets =
1356 cpu_to_le32(seg->isoc_frame_count);
1357 __wa_setup_isoc_packet_descr(packet_desc, xfer, seg);
1358 seg->status = WA_SEG_READY;
1359 }
1360 } else {
1361 transfer_size = urb->transfer_buffer_length;
1362 xfer_hdr0->dwTransferLength = transfer_size > xfer->seg_size ?
1363 cpu_to_le32(xfer->seg_size) :
1364 cpu_to_le32(transfer_size);
1365 transfer_size -= xfer->seg_size;
1366 for (cnt = 1; cnt < xfer->segs; cnt++) {
1367 xfer_hdr = &xfer->seg[cnt]->xfer_hdr;
1368 memcpy(xfer_hdr, xfer_hdr0, xfer_hdr_size);
1369 xfer_hdr->bTransferSegment = cnt;
1370 xfer_hdr->dwTransferLength =
1371 transfer_size > xfer->seg_size ?
1372 cpu_to_le32(xfer->seg_size)
1373 : cpu_to_le32(transfer_size);
1374 xfer->seg[cnt]->status = WA_SEG_READY;
1375 transfer_size -= xfer->seg_size;
1376 }
1377 }
1378 xfer_hdr->bTransferSegment |= 0x80; /* this is the last segment */
1379 result = 0;
1380error_setup_segs:
1381error_setup_sizes:
1382 return result;
1383}
1384
1385/*
1386 *
1387 *
1388 * rpipe->seg_lock is held!
1389 */
1390static int __wa_seg_submit(struct wa_rpipe *rpipe, struct wa_xfer *xfer,
1391 struct wa_seg *seg, int *dto_done)
1392{
1393 int result;
1394
1395 /* default to done unless we encounter a multi-frame isoc segment. */
1396 *dto_done = 1;
1397
1398 /*
1399 * Take a ref for each segment urb so the xfer cannot disappear until
1400 * all of the callbacks run.
1401 */
1402 wa_xfer_get(xfer);
1403 /* submit the transfer request. */
1404 seg->status = WA_SEG_SUBMITTED;
1405 result = usb_submit_urb(&seg->tr_urb, GFP_ATOMIC);
1406 if (result < 0) {
1407 pr_err("%s: xfer %p#%u: REQ submit failed: %d\n",
1408 __func__, xfer, seg->index, result);
1409 wa_xfer_put(xfer);
1410 goto error_tr_submit;
1411 }
1412 /* submit the isoc packet descriptor if present. */
1413 if (seg->isoc_pack_desc_urb) {
1414 wa_xfer_get(xfer);
1415 result = usb_submit_urb(seg->isoc_pack_desc_urb, GFP_ATOMIC);
1416 seg->isoc_frame_index = 0;
1417 if (result < 0) {
1418 pr_err("%s: xfer %p#%u: ISO packet descriptor submit failed: %d\n",
1419 __func__, xfer, seg->index, result);
1420 wa_xfer_put(xfer);
1421 goto error_iso_pack_desc_submit;
1422 }
1423 }
1424 /* submit the out data if this is an out request. */
1425 if (seg->dto_urb) {
1426 struct wahc *wa = xfer->wa;
1427 wa_xfer_get(xfer);
1428 result = usb_submit_urb(seg->dto_urb, GFP_ATOMIC);
1429 if (result < 0) {
1430 pr_err("%s: xfer %p#%u: DTO submit failed: %d\n",
1431 __func__, xfer, seg->index, result);
1432 wa_xfer_put(xfer);
1433 goto error_dto_submit;
1434 }
1435 /*
1436 * If this segment contains more than one isoc frame, hold
1437 * onto the dto resource until we send all frames.
1438 * Only applies to non-Alereon devices.
1439 */
1440 if (((wa->quirks & WUSB_QUIRK_ALEREON_HWA_CONCAT_ISOC) == 0)
1441 && (seg->isoc_frame_count > 1))
1442 *dto_done = 0;
1443 }
1444 rpipe_avail_dec(rpipe);
1445 return 0;
1446
1447error_dto_submit:
1448 usb_unlink_urb(seg->isoc_pack_desc_urb);
1449error_iso_pack_desc_submit:
1450 usb_unlink_urb(&seg->tr_urb);
1451error_tr_submit:
1452 seg->status = WA_SEG_ERROR;
1453 seg->result = result;
1454 *dto_done = 1;
1455 return result;
1456}
1457
1458/*
1459 * Execute more queued request segments until the maximum concurrent allowed.
1460 * Return true if the DTO resource was acquired and released.
1461 *
1462 * The ugly unlock/lock sequence on the error path is needed as the
1463 * xfer->lock normally nests the seg_lock and not viceversa.
1464 */
1465static int __wa_xfer_delayed_run(struct wa_rpipe *rpipe, int *dto_waiting)
1466{
1467 int result, dto_acquired = 0, dto_done = 0;
1468 struct device *dev = &rpipe->wa->usb_iface->dev;
1469 struct wa_seg *seg;
1470 struct wa_xfer *xfer;
1471 unsigned long flags;
1472
1473 *dto_waiting = 0;
1474
1475 spin_lock_irqsave(&rpipe->seg_lock, flags);
1476 while (atomic_read(&rpipe->segs_available) > 0
1477 && !list_empty(&rpipe->seg_list)
1478 && (dto_acquired = __wa_dto_try_get(rpipe->wa))) {
1479 seg = list_first_entry(&(rpipe->seg_list), struct wa_seg,
1480 list_node);
1481 list_del(&seg->list_node);
1482 xfer = seg->xfer;
1483 /*
1484 * Get a reference to the xfer in case the callbacks for the
1485 * URBs submitted by __wa_seg_submit attempt to complete
1486 * the xfer before this function completes.
1487 */
1488 wa_xfer_get(xfer);
1489 result = __wa_seg_submit(rpipe, xfer, seg, &dto_done);
1490 /* release the dto resource if this RPIPE is done with it. */
1491 if (dto_done)
1492 __wa_dto_put(rpipe->wa);
1493 dev_dbg(dev, "xfer %p ID %08X#%u submitted from delayed [%d segments available] %d\n",
1494 xfer, wa_xfer_id(xfer), seg->index,
1495 atomic_read(&rpipe->segs_available), result);
1496 if (unlikely(result < 0)) {
1497 int done;
1498
1499 spin_unlock_irqrestore(&rpipe->seg_lock, flags);
1500 spin_lock_irqsave(&xfer->lock, flags);
1501 __wa_xfer_abort(xfer);
1502 /*
1503 * This seg was marked as submitted when it was put on
1504 * the RPIPE seg_list. Mark it done.
1505 */
1506 xfer->segs_done++;
1507 done = __wa_xfer_is_done(xfer);
1508 spin_unlock_irqrestore(&xfer->lock, flags);
1509 if (done)
1510 wa_xfer_completion(xfer);
1511 spin_lock_irqsave(&rpipe->seg_lock, flags);
1512 }
1513 wa_xfer_put(xfer);
1514 }
1515 /*
1516 * Mark this RPIPE as waiting if dto was not acquired, there are
1517 * delayed segs and no active transfers to wake us up later.
1518 */
1519 if (!dto_acquired && !list_empty(&rpipe->seg_list)
1520 && (atomic_read(&rpipe->segs_available) ==
1521 le16_to_cpu(rpipe->descr.wRequests)))
1522 *dto_waiting = 1;
1523
1524 spin_unlock_irqrestore(&rpipe->seg_lock, flags);
1525
1526 return dto_done;
1527}
1528
1529static void wa_xfer_delayed_run(struct wa_rpipe *rpipe)
1530{
1531 int dto_waiting;
1532 int dto_done = __wa_xfer_delayed_run(rpipe, &dto_waiting);
1533
1534 /*
1535 * If this RPIPE is waiting on the DTO resource, add it to the tail of
1536 * the waiting list.
1537 * Otherwise, if the WA DTO resource was acquired and released by
1538 * __wa_xfer_delayed_run, another RPIPE may have attempted to acquire
1539 * DTO and failed during that time. Check the delayed list and process
1540 * any waiters. Start searching from the next RPIPE index.
1541 */
1542 if (dto_waiting)
1543 wa_add_delayed_rpipe(rpipe->wa, rpipe);
1544 else if (dto_done)
1545 wa_check_for_delayed_rpipes(rpipe->wa);
1546}
1547
1548/*
1549 *
1550 * xfer->lock is taken
1551 *
1552 * On failure submitting we just stop submitting and return error;
1553 * wa_urb_enqueue_b() will execute the completion path
1554 */
1555static int __wa_xfer_submit(struct wa_xfer *xfer)
1556{
1557 int result, dto_acquired = 0, dto_done = 0, dto_waiting = 0;
1558 struct wahc *wa = xfer->wa;
1559 struct device *dev = &wa->usb_iface->dev;
1560 unsigned cnt;
1561 struct wa_seg *seg;
1562 unsigned long flags;
1563 struct wa_rpipe *rpipe = xfer->ep->hcpriv;
1564 size_t maxrequests = le16_to_cpu(rpipe->descr.wRequests);
1565 u8 available;
1566 u8 empty;
1567
1568 spin_lock_irqsave(&wa->xfer_list_lock, flags);
1569 list_add_tail(&xfer->list_node, &wa->xfer_list);
1570 spin_unlock_irqrestore(&wa->xfer_list_lock, flags);
1571
1572 BUG_ON(atomic_read(&rpipe->segs_available) > maxrequests);
1573 result = 0;
1574 spin_lock_irqsave(&rpipe->seg_lock, flags);
1575 for (cnt = 0; cnt < xfer->segs; cnt++) {
1576 int delay_seg = 1;
1577
1578 available = atomic_read(&rpipe->segs_available);
1579 empty = list_empty(&rpipe->seg_list);
1580 seg = xfer->seg[cnt];
1581 if (available && empty) {
1582 /*
1583 * Only attempt to acquire DTO if we have a segment
1584 * to send.
1585 */
1586 dto_acquired = __wa_dto_try_get(rpipe->wa);
1587 if (dto_acquired) {
1588 delay_seg = 0;
1589 result = __wa_seg_submit(rpipe, xfer, seg,
1590 &dto_done);
1591 dev_dbg(dev, "xfer %p ID 0x%08X#%u: available %u empty %u submitted\n",
1592 xfer, wa_xfer_id(xfer), cnt, available,
1593 empty);
1594 if (dto_done)
1595 __wa_dto_put(rpipe->wa);
1596
1597 if (result < 0) {
1598 __wa_xfer_abort(xfer);
1599 goto error_seg_submit;
1600 }
1601 }
1602 }
1603
1604 if (delay_seg) {
1605 dev_dbg(dev, "xfer %p ID 0x%08X#%u: available %u empty %u delayed\n",
1606 xfer, wa_xfer_id(xfer), cnt, available, empty);
1607 seg->status = WA_SEG_DELAYED;
1608 list_add_tail(&seg->list_node, &rpipe->seg_list);
1609 }
1610 xfer->segs_submitted++;
1611 }
1612error_seg_submit:
1613 /*
1614 * Mark this RPIPE as waiting if dto was not acquired, there are
1615 * delayed segs and no active transfers to wake us up later.
1616 */
1617 if (!dto_acquired && !list_empty(&rpipe->seg_list)
1618 && (atomic_read(&rpipe->segs_available) ==
1619 le16_to_cpu(rpipe->descr.wRequests)))
1620 dto_waiting = 1;
1621 spin_unlock_irqrestore(&rpipe->seg_lock, flags);
1622
1623 if (dto_waiting)
1624 wa_add_delayed_rpipe(rpipe->wa, rpipe);
1625 else if (dto_done)
1626 wa_check_for_delayed_rpipes(rpipe->wa);
1627
1628 return result;
1629}
1630
1631/*
1632 * Second part of a URB/transfer enqueuement
1633 *
1634 * Assumes this comes from wa_urb_enqueue() [maybe through
1635 * wa_urb_enqueue_run()]. At this point:
1636 *
1637 * xfer->wa filled and refcounted
1638 * xfer->ep filled with rpipe refcounted if
1639 * delayed == 0
1640 * xfer->urb filled and refcounted (this is the case when called
1641 * from wa_urb_enqueue() as we come from usb_submit_urb()
1642 * and when called by wa_urb_enqueue_run(), as we took an
1643 * extra ref dropped by _run() after we return).
1644 * xfer->gfp filled
1645 *
1646 * If we fail at __wa_xfer_submit(), then we just check if we are done
1647 * and if so, we run the completion procedure. However, if we are not
1648 * yet done, we do nothing and wait for the completion handlers from
1649 * the submitted URBs or from the xfer-result path to kick in. If xfer
1650 * result never kicks in, the xfer will timeout from the USB code and
1651 * dequeue() will be called.
1652 */
1653static int wa_urb_enqueue_b(struct wa_xfer *xfer)
1654{
1655 int result;
1656 unsigned long flags;
1657 struct urb *urb = xfer->urb;
1658 struct wahc *wa = xfer->wa;
1659 struct wusbhc *wusbhc = wa->wusb;
1660 struct wusb_dev *wusb_dev;
1661 unsigned done;
1662
1663 result = rpipe_get_by_ep(wa, xfer->ep, urb, xfer->gfp);
1664 if (result < 0) {
1665 pr_err("%s: error_rpipe_get\n", __func__);
1666 goto error_rpipe_get;
1667 }
1668 result = -ENODEV;
1669 /* FIXME: segmentation broken -- kills DWA */
1670 mutex_lock(&wusbhc->mutex); /* get a WUSB dev */
1671 if (urb->dev == NULL) {
1672 mutex_unlock(&wusbhc->mutex);
1673 pr_err("%s: error usb dev gone\n", __func__);
1674 goto error_dev_gone;
1675 }
1676 wusb_dev = __wusb_dev_get_by_usb_dev(wusbhc, urb->dev);
1677 if (wusb_dev == NULL) {
1678 mutex_unlock(&wusbhc->mutex);
1679 dev_err(&(urb->dev->dev), "%s: error wusb dev gone\n",
1680 __func__);
1681 goto error_dev_gone;
1682 }
1683 mutex_unlock(&wusbhc->mutex);
1684
1685 spin_lock_irqsave(&xfer->lock, flags);
1686 xfer->wusb_dev = wusb_dev;
1687 result = urb->status;
1688 if (urb->status != -EINPROGRESS) {
1689 dev_err(&(urb->dev->dev), "%s: error_dequeued\n", __func__);
1690 goto error_dequeued;
1691 }
1692
1693 result = __wa_xfer_setup(xfer, urb);
1694 if (result < 0) {
1695 dev_err(&(urb->dev->dev), "%s: error_xfer_setup\n", __func__);
1696 goto error_xfer_setup;
1697 }
1698 /*
1699 * Get a xfer reference since __wa_xfer_submit starts asynchronous
1700 * operations that may try to complete the xfer before this function
1701 * exits.
1702 */
1703 wa_xfer_get(xfer);
1704 result = __wa_xfer_submit(xfer);
1705 if (result < 0) {
1706 dev_err(&(urb->dev->dev), "%s: error_xfer_submit\n", __func__);
1707 goto error_xfer_submit;
1708 }
1709 spin_unlock_irqrestore(&xfer->lock, flags);
1710 wa_xfer_put(xfer);
1711 return 0;
1712
1713 /*
1714 * this is basically wa_xfer_completion() broken up wa_xfer_giveback()
1715 * does a wa_xfer_put() that will call wa_xfer_destroy() and undo
1716 * setup().
1717 */
1718error_xfer_setup:
1719error_dequeued:
1720 spin_unlock_irqrestore(&xfer->lock, flags);
1721 /* FIXME: segmentation broken, kills DWA */
1722 if (wusb_dev)
1723 wusb_dev_put(wusb_dev);
1724error_dev_gone:
1725 rpipe_put(xfer->ep->hcpriv);
1726error_rpipe_get:
1727 xfer->result = result;
1728 return result;
1729
1730error_xfer_submit:
1731 done = __wa_xfer_is_done(xfer);
1732 xfer->result = result;
1733 spin_unlock_irqrestore(&xfer->lock, flags);
1734 if (done)
1735 wa_xfer_completion(xfer);
1736 wa_xfer_put(xfer);
1737 /* return success since the completion routine will run. */
1738 return 0;
1739}
1740
1741/*
1742 * Execute the delayed transfers in the Wire Adapter @wa
1743 *
1744 * We need to be careful here, as dequeue() could be called in the
1745 * middle. That's why we do the whole thing under the
1746 * wa->xfer_list_lock. If dequeue() jumps in, it first locks xfer->lock
1747 * and then checks the list -- so as we would be acquiring in inverse
1748 * order, we move the delayed list to a separate list while locked and then
1749 * submit them without the list lock held.
1750 */
1751void wa_urb_enqueue_run(struct work_struct *ws)
1752{
1753 struct wahc *wa = container_of(ws, struct wahc, xfer_enqueue_work);
1754 struct wa_xfer *xfer, *next;
1755 struct urb *urb;
1756 LIST_HEAD(tmp_list);
1757
1758 /* Create a copy of the wa->xfer_delayed_list while holding the lock */
1759 spin_lock_irq(&wa->xfer_list_lock);
1760 list_cut_position(&tmp_list, &wa->xfer_delayed_list,
1761 wa->xfer_delayed_list.prev);
1762 spin_unlock_irq(&wa->xfer_list_lock);
1763
1764 /*
1765 * enqueue from temp list without list lock held since wa_urb_enqueue_b
1766 * can take xfer->lock as well as lock mutexes.
1767 */
1768 list_for_each_entry_safe(xfer, next, &tmp_list, list_node) {
1769 list_del_init(&xfer->list_node);
1770
1771 urb = xfer->urb;
1772 if (wa_urb_enqueue_b(xfer) < 0)
1773 wa_xfer_giveback(xfer);
1774 usb_put_urb(urb); /* taken when queuing */
1775 }
1776}
1777EXPORT_SYMBOL_GPL(wa_urb_enqueue_run);
1778
1779/*
1780 * Process the errored transfers on the Wire Adapter outside of interrupt.
1781 */
1782void wa_process_errored_transfers_run(struct work_struct *ws)
1783{
1784 struct wahc *wa = container_of(ws, struct wahc, xfer_error_work);
1785 struct wa_xfer *xfer, *next;
1786 LIST_HEAD(tmp_list);
1787
1788 pr_info("%s: Run delayed STALL processing.\n", __func__);
1789
1790 /* Create a copy of the wa->xfer_errored_list while holding the lock */
1791 spin_lock_irq(&wa->xfer_list_lock);
1792 list_cut_position(&tmp_list, &wa->xfer_errored_list,
1793 wa->xfer_errored_list.prev);
1794 spin_unlock_irq(&wa->xfer_list_lock);
1795
1796 /*
1797 * run rpipe_clear_feature_stalled from temp list without list lock
1798 * held.
1799 */
1800 list_for_each_entry_safe(xfer, next, &tmp_list, list_node) {
1801 struct usb_host_endpoint *ep;
1802 unsigned long flags;
1803 struct wa_rpipe *rpipe;
1804
1805 spin_lock_irqsave(&xfer->lock, flags);
1806 ep = xfer->ep;
1807 rpipe = ep->hcpriv;
1808 spin_unlock_irqrestore(&xfer->lock, flags);
1809
1810 /* clear RPIPE feature stalled without holding a lock. */
1811 rpipe_clear_feature_stalled(wa, ep);
1812
1813 /* complete the xfer. This removes it from the tmp list. */
1814 wa_xfer_completion(xfer);
1815
1816 /* check for work. */
1817 wa_xfer_delayed_run(rpipe);
1818 }
1819}
1820EXPORT_SYMBOL_GPL(wa_process_errored_transfers_run);
1821
1822/*
1823 * Submit a transfer to the Wire Adapter in a delayed way
1824 *
1825 * The process of enqueuing involves possible sleeps() [see
1826 * enqueue_b(), for the rpipe_get() and the mutex_lock()]. If we are
1827 * in an atomic section, we defer the enqueue_b() call--else we call direct.
1828 *
1829 * @urb: We own a reference to it done by the HCI Linux USB stack that
1830 * will be given up by calling usb_hcd_giveback_urb() or by
1831 * returning error from this function -> ergo we don't have to
1832 * refcount it.
1833 */
1834int wa_urb_enqueue(struct wahc *wa, struct usb_host_endpoint *ep,
1835 struct urb *urb, gfp_t gfp)
1836{
1837 int result;
1838 struct device *dev = &wa->usb_iface->dev;
1839 struct wa_xfer *xfer;
1840 unsigned long my_flags;
1841 unsigned cant_sleep = irqs_disabled() | in_atomic();
1842
1843 if ((urb->transfer_buffer == NULL)
1844 && (urb->sg == NULL)
1845 && !(urb->transfer_flags & URB_NO_TRANSFER_DMA_MAP)
1846 && urb->transfer_buffer_length != 0) {
1847 dev_err(dev, "BUG? urb %p: NULL xfer buffer & NODMA\n", urb);
1848 dump_stack();
1849 }
1850
1851 spin_lock_irqsave(&wa->xfer_list_lock, my_flags);
1852 result = usb_hcd_link_urb_to_ep(&(wa->wusb->usb_hcd), urb);
1853 spin_unlock_irqrestore(&wa->xfer_list_lock, my_flags);
1854 if (result < 0)
1855 goto error_link_urb;
1856
1857 result = -ENOMEM;
1858 xfer = kzalloc(sizeof(*xfer), gfp);
1859 if (xfer == NULL)
1860 goto error_kmalloc;
1861
1862 result = -ENOENT;
1863 if (urb->status != -EINPROGRESS) /* cancelled */
1864 goto error_dequeued; /* before starting? */
1865 wa_xfer_init(xfer);
1866 xfer->wa = wa_get(wa);
1867 xfer->urb = urb;
1868 xfer->gfp = gfp;
1869 xfer->ep = ep;
1870 urb->hcpriv = xfer;
1871
1872 dev_dbg(dev, "xfer %p urb %p pipe 0x%02x [%d bytes] %s %s %s\n",
1873 xfer, urb, urb->pipe, urb->transfer_buffer_length,
1874 urb->transfer_flags & URB_NO_TRANSFER_DMA_MAP ? "dma" : "nodma",
1875 urb->pipe & USB_DIR_IN ? "inbound" : "outbound",
1876 cant_sleep ? "deferred" : "inline");
1877
1878 if (cant_sleep) {
1879 usb_get_urb(urb);
1880 spin_lock_irqsave(&wa->xfer_list_lock, my_flags);
1881 list_add_tail(&xfer->list_node, &wa->xfer_delayed_list);
1882 spin_unlock_irqrestore(&wa->xfer_list_lock, my_flags);
1883 queue_work(wusbd, &wa->xfer_enqueue_work);
1884 } else {
1885 result = wa_urb_enqueue_b(xfer);
1886 if (result < 0) {
1887 /*
1888 * URB submit/enqueue failed. Clean up, return an
1889 * error and do not run the callback. This avoids
1890 * an infinite submit/complete loop.
1891 */
1892 dev_err(dev, "%s: URB enqueue failed: %d\n",
1893 __func__, result);
1894 wa_put(xfer->wa);
1895 wa_xfer_put(xfer);
1896 spin_lock_irqsave(&wa->xfer_list_lock, my_flags);
1897 usb_hcd_unlink_urb_from_ep(&(wa->wusb->usb_hcd), urb);
1898 spin_unlock_irqrestore(&wa->xfer_list_lock, my_flags);
1899 return result;
1900 }
1901 }
1902 return 0;
1903
1904error_dequeued:
1905 kfree(xfer);
1906error_kmalloc:
1907 spin_lock_irqsave(&wa->xfer_list_lock, my_flags);
1908 usb_hcd_unlink_urb_from_ep(&(wa->wusb->usb_hcd), urb);
1909 spin_unlock_irqrestore(&wa->xfer_list_lock, my_flags);
1910error_link_urb:
1911 return result;
1912}
1913EXPORT_SYMBOL_GPL(wa_urb_enqueue);
1914
1915/*
1916 * Dequeue a URB and make sure uwb_hcd_giveback_urb() [completion
1917 * handler] is called.
1918 *
1919 * Until a transfer goes successfully through wa_urb_enqueue() it
1920 * needs to be dequeued with completion calling; when stuck in delayed
1921 * or before wa_xfer_setup() is called, we need to do completion.
1922 *
1923 * not setup If there is no hcpriv yet, that means that that enqueue
1924 * still had no time to set the xfer up. Because
1925 * urb->status should be other than -EINPROGRESS,
1926 * enqueue() will catch that and bail out.
1927 *
1928 * If the transfer has gone through setup, we just need to clean it
1929 * up. If it has gone through submit(), we have to abort it [with an
1930 * asynch request] and then make sure we cancel each segment.
1931 *
1932 */
1933int wa_urb_dequeue(struct wahc *wa, struct urb *urb, int status)
1934{
1935 unsigned long flags, flags2;
1936 struct wa_xfer *xfer;
1937 struct wa_seg *seg;
1938 struct wa_rpipe *rpipe;
1939 unsigned cnt, done = 0, xfer_abort_pending;
1940 unsigned rpipe_ready = 0;
1941 int result;
1942
1943 /* check if it is safe to unlink. */
1944 spin_lock_irqsave(&wa->xfer_list_lock, flags);
1945 result = usb_hcd_check_unlink_urb(&(wa->wusb->usb_hcd), urb, status);
1946 if ((result == 0) && urb->hcpriv) {
1947 /*
1948 * Get a xfer ref to prevent a race with wa_xfer_giveback
1949 * cleaning up the xfer while we are working with it.
1950 */
1951 wa_xfer_get(urb->hcpriv);
1952 }
1953 spin_unlock_irqrestore(&wa->xfer_list_lock, flags);
1954 if (result)
1955 return result;
1956
1957 xfer = urb->hcpriv;
1958 if (xfer == NULL)
1959 return -ENOENT;
1960 spin_lock_irqsave(&xfer->lock, flags);
1961 pr_debug("%s: DEQUEUE xfer id 0x%08X\n", __func__, wa_xfer_id(xfer));
1962 rpipe = xfer->ep->hcpriv;
1963 if (rpipe == NULL) {
1964 pr_debug("%s: xfer %p id 0x%08X has no RPIPE. %s",
1965 __func__, xfer, wa_xfer_id(xfer),
1966 "Probably already aborted.\n" );
1967 result = -ENOENT;
1968 goto out_unlock;
1969 }
1970 /*
1971 * Check for done to avoid racing with wa_xfer_giveback and completing
1972 * twice.
1973 */
1974 if (__wa_xfer_is_done(xfer)) {
1975 pr_debug("%s: xfer %p id 0x%08X already done.\n", __func__,
1976 xfer, wa_xfer_id(xfer));
1977 result = -ENOENT;
1978 goto out_unlock;
1979 }
1980 /* Check the delayed list -> if there, release and complete */
1981 spin_lock_irqsave(&wa->xfer_list_lock, flags2);
1982 if (!list_empty(&xfer->list_node) && xfer->seg == NULL)
1983 goto dequeue_delayed;
1984 spin_unlock_irqrestore(&wa->xfer_list_lock, flags2);
1985 if (xfer->seg == NULL) /* still hasn't reached */
1986 goto out_unlock; /* setup(), enqueue_b() completes */
1987 /* Ok, the xfer is in flight already, it's been setup and submitted.*/
1988 xfer_abort_pending = __wa_xfer_abort(xfer) >= 0;
1989 /*
1990 * grab the rpipe->seg_lock here to prevent racing with
1991 * __wa_xfer_delayed_run.
1992 */
1993 spin_lock(&rpipe->seg_lock);
1994 for (cnt = 0; cnt < xfer->segs; cnt++) {
1995 seg = xfer->seg[cnt];
1996 pr_debug("%s: xfer id 0x%08X#%d status = %d\n",
1997 __func__, wa_xfer_id(xfer), cnt, seg->status);
1998 switch (seg->status) {
1999 case WA_SEG_NOTREADY:
2000 case WA_SEG_READY:
2001 printk(KERN_ERR "xfer %p#%u: dequeue bad state %u\n",
2002 xfer, cnt, seg->status);
2003 WARN_ON(1);
2004 break;
2005 case WA_SEG_DELAYED:
2006 /*
2007 * delete from rpipe delayed list. If no segments on
2008 * this xfer have been submitted, __wa_xfer_is_done will
2009 * trigger a giveback below. Otherwise, the submitted
2010 * segments will be completed in the DTI interrupt.
2011 */
2012 seg->status = WA_SEG_ABORTED;
2013 seg->result = -ENOENT;
2014 list_del(&seg->list_node);
2015 xfer->segs_done++;
2016 break;
2017 case WA_SEG_DONE:
2018 case WA_SEG_ERROR:
2019 case WA_SEG_ABORTED:
2020 break;
2021 /*
2022 * The buf_in data for a segment in the
2023 * WA_SEG_DTI_PENDING state is actively being read.
2024 * Let wa_buf_in_cb handle it since it will be called
2025 * and will increment xfer->segs_done. Cleaning up
2026 * here could cause wa_buf_in_cb to access the xfer
2027 * after it has been completed/freed.
2028 */
2029 case WA_SEG_DTI_PENDING:
2030 break;
2031 /*
2032 * In the states below, the HWA device already knows
2033 * about the transfer. If an abort request was sent,
2034 * allow the HWA to process it and wait for the
2035 * results. Otherwise, the DTI state and seg completed
2036 * counts can get out of sync.
2037 */
2038 case WA_SEG_SUBMITTED:
2039 case WA_SEG_PENDING:
2040 /*
2041 * Check if the abort was successfully sent. This could
2042 * be false if the HWA has been removed but we haven't
2043 * gotten the disconnect notification yet.
2044 */
2045 if (!xfer_abort_pending) {
2046 seg->status = WA_SEG_ABORTED;
2047 rpipe_ready = rpipe_avail_inc(rpipe);
2048 xfer->segs_done++;
2049 }
2050 break;
2051 }
2052 }
2053 spin_unlock(&rpipe->seg_lock);
2054 xfer->result = urb->status; /* -ENOENT or -ECONNRESET */
2055 done = __wa_xfer_is_done(xfer);
2056 spin_unlock_irqrestore(&xfer->lock, flags);
2057 if (done)
2058 wa_xfer_completion(xfer);
2059 if (rpipe_ready)
2060 wa_xfer_delayed_run(rpipe);
2061 wa_xfer_put(xfer);
2062 return result;
2063
2064out_unlock:
2065 spin_unlock_irqrestore(&xfer->lock, flags);
2066 wa_xfer_put(xfer);
2067 return result;
2068
2069dequeue_delayed:
2070 list_del_init(&xfer->list_node);
2071 spin_unlock_irqrestore(&wa->xfer_list_lock, flags2);
2072 xfer->result = urb->status;
2073 spin_unlock_irqrestore(&xfer->lock, flags);
2074 wa_xfer_giveback(xfer);
2075 wa_xfer_put(xfer);
2076 usb_put_urb(urb); /* we got a ref in enqueue() */
2077 return 0;
2078}
2079EXPORT_SYMBOL_GPL(wa_urb_dequeue);
2080
2081/*
2082 * Translation from WA status codes (WUSB1.0 Table 8.15) to errno
2083 * codes
2084 *
2085 * Positive errno values are internal inconsistencies and should be
2086 * flagged louder. Negative are to be passed up to the user in the
2087 * normal way.
2088 *
2089 * @status: USB WA status code -- high two bits are stripped.
2090 */
2091static int wa_xfer_status_to_errno(u8 status)
2092{
2093 int errno;
2094 u8 real_status = status;
2095 static int xlat[] = {
2096 [WA_XFER_STATUS_SUCCESS] = 0,
2097 [WA_XFER_STATUS_HALTED] = -EPIPE,
2098 [WA_XFER_STATUS_DATA_BUFFER_ERROR] = -ENOBUFS,
2099 [WA_XFER_STATUS_BABBLE] = -EOVERFLOW,
2100 [WA_XFER_RESERVED] = EINVAL,
2101 [WA_XFER_STATUS_NOT_FOUND] = 0,
2102 [WA_XFER_STATUS_INSUFFICIENT_RESOURCE] = -ENOMEM,
2103 [WA_XFER_STATUS_TRANSACTION_ERROR] = -EILSEQ,
2104 [WA_XFER_STATUS_ABORTED] = -ENOENT,
2105 [WA_XFER_STATUS_RPIPE_NOT_READY] = EINVAL,
2106 [WA_XFER_INVALID_FORMAT] = EINVAL,
2107 [WA_XFER_UNEXPECTED_SEGMENT_NUMBER] = EINVAL,
2108 [WA_XFER_STATUS_RPIPE_TYPE_MISMATCH] = EINVAL,
2109 };
2110 status &= 0x3f;
2111
2112 if (status == 0)
2113 return 0;
2114 if (status >= ARRAY_SIZE(xlat)) {
2115 printk_ratelimited(KERN_ERR "%s(): BUG? "
2116 "Unknown WA transfer status 0x%02x\n",
2117 __func__, real_status);
2118 return -EINVAL;
2119 }
2120 errno = xlat[status];
2121 if (unlikely(errno > 0)) {
2122 printk_ratelimited(KERN_ERR "%s(): BUG? "
2123 "Inconsistent WA status: 0x%02x\n",
2124 __func__, real_status);
2125 errno = -errno;
2126 }
2127 return errno;
2128}
2129
2130/*
2131 * If a last segment flag and/or a transfer result error is encountered,
2132 * no other segment transfer results will be returned from the device.
2133 * Mark the remaining submitted or pending xfers as completed so that
2134 * the xfer will complete cleanly.
2135 *
2136 * xfer->lock must be held
2137 *
2138 */
2139static void wa_complete_remaining_xfer_segs(struct wa_xfer *xfer,
2140 int starting_index, enum wa_seg_status status)
2141{
2142 int index;
2143 struct wa_rpipe *rpipe = xfer->ep->hcpriv;
2144
2145 for (index = starting_index; index < xfer->segs_submitted; index++) {
2146 struct wa_seg *current_seg = xfer->seg[index];
2147
2148 BUG_ON(current_seg == NULL);
2149
2150 switch (current_seg->status) {
2151 case WA_SEG_SUBMITTED:
2152 case WA_SEG_PENDING:
2153 case WA_SEG_DTI_PENDING:
2154 rpipe_avail_inc(rpipe);
2155 /*
2156 * do not increment RPIPE avail for the WA_SEG_DELAYED case
2157 * since it has not been submitted to the RPIPE.
2158 */
2159 case WA_SEG_DELAYED:
2160 xfer->segs_done++;
2161 current_seg->status = status;
2162 break;
2163 case WA_SEG_ABORTED:
2164 break;
2165 default:
2166 WARN(1, "%s: xfer 0x%08X#%d. bad seg status = %d\n",
2167 __func__, wa_xfer_id(xfer), index,
2168 current_seg->status);
2169 break;
2170 }
2171 }
2172}
2173
2174/* Populate the given urb based on the current isoc transfer state. */
2175static int __wa_populate_buf_in_urb_isoc(struct wahc *wa,
2176 struct urb *buf_in_urb, struct wa_xfer *xfer, struct wa_seg *seg)
2177{
2178 int urb_start_frame = seg->isoc_frame_index + seg->isoc_frame_offset;
2179 int seg_index, total_len = 0, urb_frame_index = urb_start_frame;
2180 struct usb_iso_packet_descriptor *iso_frame_desc =
2181 xfer->urb->iso_frame_desc;
2182 const int dti_packet_size = usb_endpoint_maxp(wa->dti_epd);
2183 int next_frame_contiguous;
2184 struct usb_iso_packet_descriptor *iso_frame;
2185
2186 BUG_ON(buf_in_urb->status == -EINPROGRESS);
2187
2188 /*
2189 * If the current frame actual_length is contiguous with the next frame
2190 * and actual_length is a multiple of the DTI endpoint max packet size,
2191 * combine the current frame with the next frame in a single URB. This
2192 * reduces the number of URBs that must be submitted in that case.
2193 */
2194 seg_index = seg->isoc_frame_index;
2195 do {
2196 next_frame_contiguous = 0;
2197
2198 iso_frame = &iso_frame_desc[urb_frame_index];
2199 total_len += iso_frame->actual_length;
2200 ++urb_frame_index;
2201 ++seg_index;
2202
2203 if (seg_index < seg->isoc_frame_count) {
2204 struct usb_iso_packet_descriptor *next_iso_frame;
2205
2206 next_iso_frame = &iso_frame_desc[urb_frame_index];
2207
2208 if ((iso_frame->offset + iso_frame->actual_length) ==
2209 next_iso_frame->offset)
2210 next_frame_contiguous = 1;
2211 }
2212 } while (next_frame_contiguous
2213 && ((iso_frame->actual_length % dti_packet_size) == 0));
2214
2215 /* this should always be 0 before a resubmit. */
2216 buf_in_urb->num_mapped_sgs = 0;
2217 buf_in_urb->transfer_dma = xfer->urb->transfer_dma +
2218 iso_frame_desc[urb_start_frame].offset;
2219 buf_in_urb->transfer_buffer_length = total_len;
2220 buf_in_urb->transfer_flags |= URB_NO_TRANSFER_DMA_MAP;
2221 buf_in_urb->transfer_buffer = NULL;
2222 buf_in_urb->sg = NULL;
2223 buf_in_urb->num_sgs = 0;
2224 buf_in_urb->context = seg;
2225
2226 /* return the number of frames included in this URB. */
2227 return seg_index - seg->isoc_frame_index;
2228}
2229
2230/* Populate the given urb based on the current transfer state. */
2231static int wa_populate_buf_in_urb(struct urb *buf_in_urb, struct wa_xfer *xfer,
2232 unsigned int seg_idx, unsigned int bytes_transferred)
2233{
2234 int result = 0;
2235 struct wa_seg *seg = xfer->seg[seg_idx];
2236
2237 BUG_ON(buf_in_urb->status == -EINPROGRESS);
2238 /* this should always be 0 before a resubmit. */
2239 buf_in_urb->num_mapped_sgs = 0;
2240
2241 if (xfer->is_dma) {
2242 buf_in_urb->transfer_dma = xfer->urb->transfer_dma
2243 + (seg_idx * xfer->seg_size);
2244 buf_in_urb->transfer_flags |= URB_NO_TRANSFER_DMA_MAP;
2245 buf_in_urb->transfer_buffer = NULL;
2246 buf_in_urb->sg = NULL;
2247 buf_in_urb->num_sgs = 0;
2248 } else {
2249 /* do buffer or SG processing. */
2250 buf_in_urb->transfer_flags &= ~URB_NO_TRANSFER_DMA_MAP;
2251
2252 if (xfer->urb->transfer_buffer) {
2253 buf_in_urb->transfer_buffer =
2254 xfer->urb->transfer_buffer
2255 + (seg_idx * xfer->seg_size);
2256 buf_in_urb->sg = NULL;
2257 buf_in_urb->num_sgs = 0;
2258 } else {
2259 /* allocate an SG list to store seg_size bytes
2260 and copy the subset of the xfer->urb->sg
2261 that matches the buffer subset we are
2262 about to read. */
2263 buf_in_urb->sg = wa_xfer_create_subset_sg(
2264 xfer->urb->sg,
2265 seg_idx * xfer->seg_size,
2266 bytes_transferred,
2267 &(buf_in_urb->num_sgs));
2268
2269 if (!(buf_in_urb->sg)) {
2270 buf_in_urb->num_sgs = 0;
2271 result = -ENOMEM;
2272 }
2273 buf_in_urb->transfer_buffer = NULL;
2274 }
2275 }
2276 buf_in_urb->transfer_buffer_length = bytes_transferred;
2277 buf_in_urb->context = seg;
2278
2279 return result;
2280}
2281
2282/*
2283 * Process a xfer result completion message
2284 *
2285 * inbound transfers: need to schedule a buf_in_urb read
2286 *
2287 * FIXME: this function needs to be broken up in parts
2288 */
2289static void wa_xfer_result_chew(struct wahc *wa, struct wa_xfer *xfer,
2290 struct wa_xfer_result *xfer_result)
2291{
2292 int result;
2293 struct device *dev = &wa->usb_iface->dev;
2294 unsigned long flags;
2295 unsigned int seg_idx;
2296 struct wa_seg *seg;
2297 struct wa_rpipe *rpipe;
2298 unsigned done = 0;
2299 u8 usb_status;
2300 unsigned rpipe_ready = 0;
2301 unsigned bytes_transferred = le32_to_cpu(xfer_result->dwTransferLength);
2302 struct urb *buf_in_urb = &(wa->buf_in_urbs[0]);
2303
2304 spin_lock_irqsave(&xfer->lock, flags);
2305 seg_idx = xfer_result->bTransferSegment & 0x7f;
2306 if (unlikely(seg_idx >= xfer->segs))
2307 goto error_bad_seg;
2308 seg = xfer->seg[seg_idx];
2309 rpipe = xfer->ep->hcpriv;
2310 usb_status = xfer_result->bTransferStatus;
2311 dev_dbg(dev, "xfer %p ID 0x%08X#%u: bTransferStatus 0x%02x (seg status %u)\n",
2312 xfer, wa_xfer_id(xfer), seg_idx, usb_status, seg->status);
2313 if (seg->status == WA_SEG_ABORTED
2314 || seg->status == WA_SEG_ERROR) /* already handled */
2315 goto segment_aborted;
2316 if (seg->status == WA_SEG_SUBMITTED) /* ops, got here */
2317 seg->status = WA_SEG_PENDING; /* before wa_seg{_dto}_cb() */
2318 if (seg->status != WA_SEG_PENDING) {
2319 if (printk_ratelimit())
2320 dev_err(dev, "xfer %p#%u: Bad segment state %u\n",
2321 xfer, seg_idx, seg->status);
2322 seg->status = WA_SEG_PENDING; /* workaround/"fix" it */
2323 }
2324 if (usb_status & 0x80) {
2325 seg->result = wa_xfer_status_to_errno(usb_status);
2326 dev_err(dev, "DTI: xfer %p 0x%08X:#%u failed (0x%02x)\n",
2327 xfer, xfer->id, seg->index, usb_status);
2328 seg->status = ((usb_status & 0x7F) == WA_XFER_STATUS_ABORTED) ?
2329 WA_SEG_ABORTED : WA_SEG_ERROR;
2330 goto error_complete;
2331 }
2332 /* FIXME: we ignore warnings, tally them for stats */
2333 if (usb_status & 0x40) /* Warning?... */
2334 usb_status = 0; /* ... pass */
2335 /*
2336 * If the last segment bit is set, complete the remaining segments.
2337 * When the current segment is completed, either in wa_buf_in_cb for
2338 * transfers with data or below for no data, the xfer will complete.
2339 */
2340 if (xfer_result->bTransferSegment & 0x80)
2341 wa_complete_remaining_xfer_segs(xfer, seg->index + 1,
2342 WA_SEG_DONE);
2343 if (usb_pipeisoc(xfer->urb->pipe)
2344 && (le32_to_cpu(xfer_result->dwNumOfPackets) > 0)) {
2345 /* set up WA state to read the isoc packet status next. */
2346 wa->dti_isoc_xfer_in_progress = wa_xfer_id(xfer);
2347 wa->dti_isoc_xfer_seg = seg_idx;
2348 wa->dti_state = WA_DTI_ISOC_PACKET_STATUS_PENDING;
2349 } else if (xfer->is_inbound && !usb_pipeisoc(xfer->urb->pipe)
2350 && (bytes_transferred > 0)) {
2351 /* IN data phase: read to buffer */
2352 seg->status = WA_SEG_DTI_PENDING;
2353 result = wa_populate_buf_in_urb(buf_in_urb, xfer, seg_idx,
2354 bytes_transferred);
2355 if (result < 0)
2356 goto error_buf_in_populate;
2357 ++(wa->active_buf_in_urbs);
2358 result = usb_submit_urb(buf_in_urb, GFP_ATOMIC);
2359 if (result < 0) {
2360 --(wa->active_buf_in_urbs);
2361 goto error_submit_buf_in;
2362 }
2363 } else {
2364 /* OUT data phase or no data, complete it -- */
2365 seg->result = bytes_transferred;
2366 rpipe_ready = rpipe_avail_inc(rpipe);
2367 done = __wa_xfer_mark_seg_as_done(xfer, seg, WA_SEG_DONE);
2368 }
2369 spin_unlock_irqrestore(&xfer->lock, flags);
2370 if (done)
2371 wa_xfer_completion(xfer);
2372 if (rpipe_ready)
2373 wa_xfer_delayed_run(rpipe);
2374 return;
2375
2376error_submit_buf_in:
2377 if (edc_inc(&wa->dti_edc, EDC_MAX_ERRORS, EDC_ERROR_TIMEFRAME)) {
2378 dev_err(dev, "DTI: URB max acceptable errors "
2379 "exceeded, resetting device\n");
2380 wa_reset_all(wa);
2381 }
2382 if (printk_ratelimit())
2383 dev_err(dev, "xfer %p#%u: can't submit DTI data phase: %d\n",
2384 xfer, seg_idx, result);
2385 seg->result = result;
2386 kfree(buf_in_urb->sg);
2387 buf_in_urb->sg = NULL;
2388error_buf_in_populate:
2389 __wa_xfer_abort(xfer);
2390 seg->status = WA_SEG_ERROR;
2391error_complete:
2392 xfer->segs_done++;
2393 rpipe_ready = rpipe_avail_inc(rpipe);
2394 wa_complete_remaining_xfer_segs(xfer, seg->index + 1, seg->status);
2395 done = __wa_xfer_is_done(xfer);
2396 /*
2397 * queue work item to clear STALL for control endpoints.
2398 * Otherwise, let endpoint_reset take care of it.
2399 */
2400 if (((usb_status & 0x3f) == WA_XFER_STATUS_HALTED) &&
2401 usb_endpoint_xfer_control(&xfer->ep->desc) &&
2402 done) {
2403
2404 dev_info(dev, "Control EP stall. Queue delayed work.\n");
2405 spin_lock(&wa->xfer_list_lock);
2406 /* move xfer from xfer_list to xfer_errored_list. */
2407 list_move_tail(&xfer->list_node, &wa->xfer_errored_list);
2408 spin_unlock(&wa->xfer_list_lock);
2409 spin_unlock_irqrestore(&xfer->lock, flags);
2410 queue_work(wusbd, &wa->xfer_error_work);
2411 } else {
2412 spin_unlock_irqrestore(&xfer->lock, flags);
2413 if (done)
2414 wa_xfer_completion(xfer);
2415 if (rpipe_ready)
2416 wa_xfer_delayed_run(rpipe);
2417 }
2418
2419 return;
2420
2421error_bad_seg:
2422 spin_unlock_irqrestore(&xfer->lock, flags);
2423 wa_urb_dequeue(wa, xfer->urb, -ENOENT);
2424 if (printk_ratelimit())
2425 dev_err(dev, "xfer %p#%u: bad segment\n", xfer, seg_idx);
2426 if (edc_inc(&wa->dti_edc, EDC_MAX_ERRORS, EDC_ERROR_TIMEFRAME)) {
2427 dev_err(dev, "DTI: URB max acceptable errors "
2428 "exceeded, resetting device\n");
2429 wa_reset_all(wa);
2430 }
2431 return;
2432
2433segment_aborted:
2434 /* nothing to do, as the aborter did the completion */
2435 spin_unlock_irqrestore(&xfer->lock, flags);
2436}
2437
2438/*
2439 * Process a isochronous packet status message
2440 *
2441 * inbound transfers: need to schedule a buf_in_urb read
2442 */
2443static int wa_process_iso_packet_status(struct wahc *wa, struct urb *urb)
2444{
2445 struct device *dev = &wa->usb_iface->dev;
2446 struct wa_xfer_packet_status_hwaiso *packet_status;
2447 struct wa_xfer_packet_status_len_hwaiso *status_array;
2448 struct wa_xfer *xfer;
2449 unsigned long flags;
2450 struct wa_seg *seg;
2451 struct wa_rpipe *rpipe;
2452 unsigned done = 0, dti_busy = 0, data_frame_count = 0, seg_index;
2453 unsigned first_frame_index = 0, rpipe_ready = 0;
2454 int expected_size;
2455
2456 /* We have a xfer result buffer; check it */
2457 dev_dbg(dev, "DTI: isoc packet status %d bytes at %p\n",
2458 urb->actual_length, urb->transfer_buffer);
2459 packet_status = (struct wa_xfer_packet_status_hwaiso *)(wa->dti_buf);
2460 if (packet_status->bPacketType != WA_XFER_ISO_PACKET_STATUS) {
2461 dev_err(dev, "DTI Error: isoc packet status--bad type 0x%02x\n",
2462 packet_status->bPacketType);
2463 goto error_parse_buffer;
2464 }
2465 xfer = wa_xfer_get_by_id(wa, wa->dti_isoc_xfer_in_progress);
2466 if (xfer == NULL) {
2467 dev_err(dev, "DTI Error: isoc packet status--unknown xfer 0x%08x\n",
2468 wa->dti_isoc_xfer_in_progress);
2469 goto error_parse_buffer;
2470 }
2471 spin_lock_irqsave(&xfer->lock, flags);
2472 if (unlikely(wa->dti_isoc_xfer_seg >= xfer->segs))
2473 goto error_bad_seg;
2474 seg = xfer->seg[wa->dti_isoc_xfer_seg];
2475 rpipe = xfer->ep->hcpriv;
2476 expected_size = sizeof(*packet_status) +
2477 (sizeof(packet_status->PacketStatus[0]) *
2478 seg->isoc_frame_count);
2479 if (urb->actual_length != expected_size) {
2480 dev_err(dev, "DTI Error: isoc packet status--bad urb length (%d bytes vs %d needed)\n",
2481 urb->actual_length, expected_size);
2482 goto error_bad_seg;
2483 }
2484 if (le16_to_cpu(packet_status->wLength) != expected_size) {
2485 dev_err(dev, "DTI Error: isoc packet status--bad length %u\n",
2486 le16_to_cpu(packet_status->wLength));
2487 goto error_bad_seg;
2488 }
2489 /* write isoc packet status and lengths back to the xfer urb. */
2490 status_array = packet_status->PacketStatus;
2491 xfer->urb->start_frame =
2492 wa->wusb->usb_hcd.driver->get_frame_number(&wa->wusb->usb_hcd);
2493 for (seg_index = 0; seg_index < seg->isoc_frame_count; ++seg_index) {
2494 struct usb_iso_packet_descriptor *iso_frame_desc =
2495 xfer->urb->iso_frame_desc;
2496 const int xfer_frame_index =
2497 seg->isoc_frame_offset + seg_index;
2498
2499 iso_frame_desc[xfer_frame_index].status =
2500 wa_xfer_status_to_errno(
2501 le16_to_cpu(status_array[seg_index].PacketStatus));
2502 iso_frame_desc[xfer_frame_index].actual_length =
2503 le16_to_cpu(status_array[seg_index].PacketLength);
2504 /* track the number of frames successfully transferred. */
2505 if (iso_frame_desc[xfer_frame_index].actual_length > 0) {
2506 /* save the starting frame index for buf_in_urb. */
2507 if (!data_frame_count)
2508 first_frame_index = seg_index;
2509 ++data_frame_count;
2510 }
2511 }
2512
2513 if (xfer->is_inbound && data_frame_count) {
2514 int result, total_frames_read = 0, urb_index = 0;
2515 struct urb *buf_in_urb;
2516
2517 /* IN data phase: read to buffer */
2518 seg->status = WA_SEG_DTI_PENDING;
2519
2520 /* start with the first frame with data. */
2521 seg->isoc_frame_index = first_frame_index;
2522 /* submit up to WA_MAX_BUF_IN_URBS read URBs. */
2523 do {
2524 int urb_frame_index, urb_frame_count;
2525 struct usb_iso_packet_descriptor *iso_frame_desc;
2526
2527 buf_in_urb = &(wa->buf_in_urbs[urb_index]);
2528 urb_frame_count = __wa_populate_buf_in_urb_isoc(wa,
2529 buf_in_urb, xfer, seg);
2530 /* advance frame index to start of next read URB. */
2531 seg->isoc_frame_index += urb_frame_count;
2532 total_frames_read += urb_frame_count;
2533
2534 ++(wa->active_buf_in_urbs);
2535 result = usb_submit_urb(buf_in_urb, GFP_ATOMIC);
2536
2537 /* skip 0-byte frames. */
2538 urb_frame_index =
2539 seg->isoc_frame_offset + seg->isoc_frame_index;
2540 iso_frame_desc =
2541 &(xfer->urb->iso_frame_desc[urb_frame_index]);
2542 while ((seg->isoc_frame_index <
2543 seg->isoc_frame_count) &&
2544 (iso_frame_desc->actual_length == 0)) {
2545 ++(seg->isoc_frame_index);
2546 ++iso_frame_desc;
2547 }
2548 ++urb_index;
2549
2550 } while ((result == 0) && (urb_index < WA_MAX_BUF_IN_URBS)
2551 && (seg->isoc_frame_index <
2552 seg->isoc_frame_count));
2553
2554 if (result < 0) {
2555 --(wa->active_buf_in_urbs);
2556 dev_err(dev, "DTI Error: Could not submit buf in URB (%d)",
2557 result);
2558 wa_reset_all(wa);
2559 } else if (data_frame_count > total_frames_read)
2560 /* If we need to read more frames, set DTI busy. */
2561 dti_busy = 1;
2562 } else {
2563 /* OUT transfer or no more IN data, complete it -- */
2564 rpipe_ready = rpipe_avail_inc(rpipe);
2565 done = __wa_xfer_mark_seg_as_done(xfer, seg, WA_SEG_DONE);
2566 }
2567 spin_unlock_irqrestore(&xfer->lock, flags);
2568 if (dti_busy)
2569 wa->dti_state = WA_DTI_BUF_IN_DATA_PENDING;
2570 else
2571 wa->dti_state = WA_DTI_TRANSFER_RESULT_PENDING;
2572 if (done)
2573 wa_xfer_completion(xfer);
2574 if (rpipe_ready)
2575 wa_xfer_delayed_run(rpipe);
2576 wa_xfer_put(xfer);
2577 return dti_busy;
2578
2579error_bad_seg:
2580 spin_unlock_irqrestore(&xfer->lock, flags);
2581 wa_xfer_put(xfer);
2582error_parse_buffer:
2583 return dti_busy;
2584}
2585
2586/*
2587 * Callback for the IN data phase
2588 *
2589 * If successful transition state; otherwise, take a note of the
2590 * error, mark this segment done and try completion.
2591 *
2592 * Note we don't access until we are sure that the transfer hasn't
2593 * been cancelled (ECONNRESET, ENOENT), which could mean that
2594 * seg->xfer could be already gone.
2595 */
2596static void wa_buf_in_cb(struct urb *urb)
2597{
2598 struct wa_seg *seg = urb->context;
2599 struct wa_xfer *xfer = seg->xfer;
2600 struct wahc *wa;
2601 struct device *dev;
2602 struct wa_rpipe *rpipe;
2603 unsigned rpipe_ready = 0, isoc_data_frame_count = 0;
2604 unsigned long flags;
2605 int resubmit_dti = 0, active_buf_in_urbs;
2606 u8 done = 0;
2607
2608 /* free the sg if it was used. */
2609 kfree(urb->sg);
2610 urb->sg = NULL;
2611
2612 spin_lock_irqsave(&xfer->lock, flags);
2613 wa = xfer->wa;
2614 dev = &wa->usb_iface->dev;
2615 --(wa->active_buf_in_urbs);
2616 active_buf_in_urbs = wa->active_buf_in_urbs;
2617 rpipe = xfer->ep->hcpriv;
2618
2619 if (usb_pipeisoc(xfer->urb->pipe)) {
2620 struct usb_iso_packet_descriptor *iso_frame_desc =
2621 xfer->urb->iso_frame_desc;
2622 int seg_index;
2623
2624 /*
2625 * Find the next isoc frame with data and count how many
2626 * frames with data remain.
2627 */
2628 seg_index = seg->isoc_frame_index;
2629 while (seg_index < seg->isoc_frame_count) {
2630 const int urb_frame_index =
2631 seg->isoc_frame_offset + seg_index;
2632
2633 if (iso_frame_desc[urb_frame_index].actual_length > 0) {
2634 /* save the index of the next frame with data */
2635 if (!isoc_data_frame_count)
2636 seg->isoc_frame_index = seg_index;
2637 ++isoc_data_frame_count;
2638 }
2639 ++seg_index;
2640 }
2641 }
2642 spin_unlock_irqrestore(&xfer->lock, flags);
2643
2644 switch (urb->status) {
2645 case 0:
2646 spin_lock_irqsave(&xfer->lock, flags);
2647
2648 seg->result += urb->actual_length;
2649 if (isoc_data_frame_count > 0) {
2650 int result, urb_frame_count;
2651
2652 /* submit a read URB for the next frame with data. */
2653 urb_frame_count = __wa_populate_buf_in_urb_isoc(wa, urb,
2654 xfer, seg);
2655 /* advance index to start of next read URB. */
2656 seg->isoc_frame_index += urb_frame_count;
2657 ++(wa->active_buf_in_urbs);
2658 result = usb_submit_urb(urb, GFP_ATOMIC);
2659 if (result < 0) {
2660 --(wa->active_buf_in_urbs);
2661 dev_err(dev, "DTI Error: Could not submit buf in URB (%d)",
2662 result);
2663 wa_reset_all(wa);
2664 }
2665 /*
2666 * If we are in this callback and
2667 * isoc_data_frame_count > 0, it means that the dti_urb
2668 * submission was delayed in wa_dti_cb. Once
2669 * we submit the last buf_in_urb, we can submit the
2670 * delayed dti_urb.
2671 */
2672 resubmit_dti = (isoc_data_frame_count ==
2673 urb_frame_count);
2674 } else if (active_buf_in_urbs == 0) {
2675 dev_dbg(dev,
2676 "xfer %p 0x%08X#%u: data in done (%zu bytes)\n",
2677 xfer, wa_xfer_id(xfer), seg->index,
2678 seg->result);
2679 rpipe_ready = rpipe_avail_inc(rpipe);
2680 done = __wa_xfer_mark_seg_as_done(xfer, seg,
2681 WA_SEG_DONE);
2682 }
2683 spin_unlock_irqrestore(&xfer->lock, flags);
2684 if (done)
2685 wa_xfer_completion(xfer);
2686 if (rpipe_ready)
2687 wa_xfer_delayed_run(rpipe);
2688 break;
2689 case -ECONNRESET: /* URB unlinked; no need to do anything */
2690 case -ENOENT: /* as it was done by the who unlinked us */
2691 break;
2692 default: /* Other errors ... */
2693 /*
2694 * Error on data buf read. Only resubmit DTI if it hasn't
2695 * already been done by previously hitting this error or by a
2696 * successful completion of the previous buf_in_urb.
2697 */
2698 resubmit_dti = wa->dti_state != WA_DTI_TRANSFER_RESULT_PENDING;
2699 spin_lock_irqsave(&xfer->lock, flags);
2700 if (printk_ratelimit())
2701 dev_err(dev, "xfer %p 0x%08X#%u: data in error %d\n",
2702 xfer, wa_xfer_id(xfer), seg->index,
2703 urb->status);
2704 if (edc_inc(&wa->nep_edc, EDC_MAX_ERRORS,
2705 EDC_ERROR_TIMEFRAME)){
2706 dev_err(dev, "DTO: URB max acceptable errors "
2707 "exceeded, resetting device\n");
2708 wa_reset_all(wa);
2709 }
2710 seg->result = urb->status;
2711 rpipe_ready = rpipe_avail_inc(rpipe);
2712 if (active_buf_in_urbs == 0)
2713 done = __wa_xfer_mark_seg_as_done(xfer, seg,
2714 WA_SEG_ERROR);
2715 else
2716 __wa_xfer_abort(xfer);
2717 spin_unlock_irqrestore(&xfer->lock, flags);
2718 if (done)
2719 wa_xfer_completion(xfer);
2720 if (rpipe_ready)
2721 wa_xfer_delayed_run(rpipe);
2722 }
2723
2724 if (resubmit_dti) {
2725 int result;
2726
2727 wa->dti_state = WA_DTI_TRANSFER_RESULT_PENDING;
2728
2729 result = usb_submit_urb(wa->dti_urb, GFP_ATOMIC);
2730 if (result < 0) {
2731 dev_err(dev, "DTI Error: Could not submit DTI URB (%d)\n",
2732 result);
2733 wa_reset_all(wa);
2734 }
2735 }
2736}
2737
2738/*
2739 * Handle an incoming transfer result buffer
2740 *
2741 * Given a transfer result buffer, it completes the transfer (possibly
2742 * scheduling and buffer in read) and then resubmits the DTI URB for a
2743 * new transfer result read.
2744 *
2745 *
2746 * The xfer_result DTI URB state machine
2747 *
2748 * States: OFF | RXR (Read-Xfer-Result) | RBI (Read-Buffer-In)
2749 *
2750 * We start in OFF mode, the first xfer_result notification [through
2751 * wa_handle_notif_xfer()] moves us to RXR by posting the DTI-URB to
2752 * read.
2753 *
2754 * We receive a buffer -- if it is not a xfer_result, we complain and
2755 * repost the DTI-URB. If it is a xfer_result then do the xfer seg
2756 * request accounting. If it is an IN segment, we move to RBI and post
2757 * a BUF-IN-URB to the right buffer. The BUF-IN-URB callback will
2758 * repost the DTI-URB and move to RXR state. if there was no IN
2759 * segment, it will repost the DTI-URB.
2760 *
2761 * We go back to OFF when we detect a ENOENT or ESHUTDOWN (or too many
2762 * errors) in the URBs.
2763 */
2764static void wa_dti_cb(struct urb *urb)
2765{
2766 int result, dti_busy = 0;
2767 struct wahc *wa = urb->context;
2768 struct device *dev = &wa->usb_iface->dev;
2769 u32 xfer_id;
2770 u8 usb_status;
2771
2772 BUG_ON(wa->dti_urb != urb);
2773 switch (wa->dti_urb->status) {
2774 case 0:
2775 if (wa->dti_state == WA_DTI_TRANSFER_RESULT_PENDING) {
2776 struct wa_xfer_result *xfer_result;
2777 struct wa_xfer *xfer;
2778
2779 /* We have a xfer result buffer; check it */
2780 dev_dbg(dev, "DTI: xfer result %d bytes at %p\n",
2781 urb->actual_length, urb->transfer_buffer);
2782 if (urb->actual_length != sizeof(*xfer_result)) {
2783 dev_err(dev, "DTI Error: xfer result--bad size xfer result (%d bytes vs %zu needed)\n",
2784 urb->actual_length,
2785 sizeof(*xfer_result));
2786 break;
2787 }
2788 xfer_result = (struct wa_xfer_result *)(wa->dti_buf);
2789 if (xfer_result->hdr.bLength != sizeof(*xfer_result)) {
2790 dev_err(dev, "DTI Error: xfer result--bad header length %u\n",
2791 xfer_result->hdr.bLength);
2792 break;
2793 }
2794 if (xfer_result->hdr.bNotifyType != WA_XFER_RESULT) {
2795 dev_err(dev, "DTI Error: xfer result--bad header type 0x%02x\n",
2796 xfer_result->hdr.bNotifyType);
2797 break;
2798 }
2799 xfer_id = le32_to_cpu(xfer_result->dwTransferID);
2800 usb_status = xfer_result->bTransferStatus & 0x3f;
2801 if (usb_status == WA_XFER_STATUS_NOT_FOUND) {
2802 /* taken care of already */
2803 dev_dbg(dev, "%s: xfer 0x%08X#%u not found.\n",
2804 __func__, xfer_id,
2805 xfer_result->bTransferSegment & 0x7f);
2806 break;
2807 }
2808 xfer = wa_xfer_get_by_id(wa, xfer_id);
2809 if (xfer == NULL) {
2810 /* FIXME: transaction not found. */
2811 dev_err(dev, "DTI Error: xfer result--unknown xfer 0x%08x (status 0x%02x)\n",
2812 xfer_id, usb_status);
2813 break;
2814 }
2815 wa_xfer_result_chew(wa, xfer, xfer_result);
2816 wa_xfer_put(xfer);
2817 } else if (wa->dti_state == WA_DTI_ISOC_PACKET_STATUS_PENDING) {
2818 dti_busy = wa_process_iso_packet_status(wa, urb);
2819 } else {
2820 dev_err(dev, "DTI Error: unexpected EP state = %d\n",
2821 wa->dti_state);
2822 }
2823 break;
2824 case -ENOENT: /* (we killed the URB)...so, no broadcast */
2825 case -ESHUTDOWN: /* going away! */
2826 dev_dbg(dev, "DTI: going down! %d\n", urb->status);
2827 goto out;
2828 default:
2829 /* Unknown error */
2830 if (edc_inc(&wa->dti_edc, EDC_MAX_ERRORS,
2831 EDC_ERROR_TIMEFRAME)) {
2832 dev_err(dev, "DTI: URB max acceptable errors "
2833 "exceeded, resetting device\n");
2834 wa_reset_all(wa);
2835 goto out;
2836 }
2837 if (printk_ratelimit())
2838 dev_err(dev, "DTI: URB error %d\n", urb->status);
2839 break;
2840 }
2841
2842 /* Resubmit the DTI URB if we are not busy processing isoc in frames. */
2843 if (!dti_busy) {
2844 result = usb_submit_urb(wa->dti_urb, GFP_ATOMIC);
2845 if (result < 0) {
2846 dev_err(dev, "DTI Error: Could not submit DTI URB (%d)\n",
2847 result);
2848 wa_reset_all(wa);
2849 }
2850 }
2851out:
2852 return;
2853}
2854
2855/*
2856 * Initialize the DTI URB for reading transfer result notifications and also
2857 * the buffer-in URB, for reading buffers. Then we just submit the DTI URB.
2858 */
2859int wa_dti_start(struct wahc *wa)
2860{
2861 const struct usb_endpoint_descriptor *dti_epd = wa->dti_epd;
2862 struct device *dev = &wa->usb_iface->dev;
2863 int result = -ENOMEM, index;
2864
2865 if (wa->dti_urb != NULL) /* DTI URB already started */
2866 goto out;
2867
2868 wa->dti_urb = usb_alloc_urb(0, GFP_KERNEL);
2869 if (wa->dti_urb == NULL)
2870 goto error_dti_urb_alloc;
2871 usb_fill_bulk_urb(
2872 wa->dti_urb, wa->usb_dev,
2873 usb_rcvbulkpipe(wa->usb_dev, 0x80 | dti_epd->bEndpointAddress),
2874 wa->dti_buf, wa->dti_buf_size,
2875 wa_dti_cb, wa);
2876
2877 /* init the buf in URBs */
2878 for (index = 0; index < WA_MAX_BUF_IN_URBS; ++index) {
2879 usb_fill_bulk_urb(
2880 &(wa->buf_in_urbs[index]), wa->usb_dev,
2881 usb_rcvbulkpipe(wa->usb_dev,
2882 0x80 | dti_epd->bEndpointAddress),
2883 NULL, 0, wa_buf_in_cb, wa);
2884 }
2885 result = usb_submit_urb(wa->dti_urb, GFP_KERNEL);
2886 if (result < 0) {
2887 dev_err(dev, "DTI Error: Could not submit DTI URB (%d) resetting\n",
2888 result);
2889 goto error_dti_urb_submit;
2890 }
2891out:
2892 return 0;
2893
2894error_dti_urb_submit:
2895 usb_put_urb(wa->dti_urb);
2896 wa->dti_urb = NULL;
2897error_dti_urb_alloc:
2898 return result;
2899}
2900EXPORT_SYMBOL_GPL(wa_dti_start);
2901/*
2902 * Transfer complete notification
2903 *
2904 * Called from the notif.c code. We get a notification on EP2 saying
2905 * that some endpoint has some transfer result data available. We are
2906 * about to read it.
2907 *
2908 * To speed up things, we always have a URB reading the DTI URB; we
2909 * don't really set it up and start it until the first xfer complete
2910 * notification arrives, which is what we do here.
2911 *
2912 * Follow up in wa_dti_cb(), as that's where the whole state
2913 * machine starts.
2914 *
2915 * @wa shall be referenced
2916 */
2917void wa_handle_notif_xfer(struct wahc *wa, struct wa_notif_hdr *notif_hdr)
2918{
2919 struct device *dev = &wa->usb_iface->dev;
2920 struct wa_notif_xfer *notif_xfer;
2921 const struct usb_endpoint_descriptor *dti_epd = wa->dti_epd;
2922
2923 notif_xfer = container_of(notif_hdr, struct wa_notif_xfer, hdr);
2924 BUG_ON(notif_hdr->bNotifyType != WA_NOTIF_TRANSFER);
2925
2926 if ((0x80 | notif_xfer->bEndpoint) != dti_epd->bEndpointAddress) {
2927 /* FIXME: hardcoded limitation, adapt */
2928 dev_err(dev, "BUG: DTI ep is %u, not %u (hack me)\n",
2929 notif_xfer->bEndpoint, dti_epd->bEndpointAddress);
2930 goto error;
2931 }
2932
2933 /* attempt to start the DTI ep processing. */
2934 if (wa_dti_start(wa) < 0)
2935 goto error;
2936
2937 return;
2938
2939error:
2940 wa_reset_all(wa);
2941}