Loading...
1/*
2 * WUSB Wire Adapter
3 * Data transfer and URB enqueing
4 *
5 * Copyright (C) 2005-2006 Intel Corporation
6 * Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com>
7 *
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License version
10 * 2 as published by the Free Software Foundation.
11 *
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software
19 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
20 * 02110-1301, USA.
21 *
22 *
23 * How transfers work: get a buffer, break it up in segments (segment
24 * size is a multiple of the maxpacket size). For each segment issue a
25 * segment request (struct wa_xfer_*), then send the data buffer if
26 * out or nothing if in (all over the DTO endpoint).
27 *
28 * For each submitted segment request, a notification will come over
29 * the NEP endpoint and a transfer result (struct xfer_result) will
30 * arrive in the DTI URB. Read it, get the xfer ID, see if there is
31 * data coming (inbound transfer), schedule a read and handle it.
32 *
33 * Sounds simple, it is a pain to implement.
34 *
35 *
36 * ENTRY POINTS
37 *
38 * FIXME
39 *
40 * LIFE CYCLE / STATE DIAGRAM
41 *
42 * FIXME
43 *
44 * THIS CODE IS DISGUSTING
45 *
46 * Warned you are; it's my second try and still not happy with it.
47 *
48 * NOTES:
49 *
50 * - No iso
51 *
52 * - Supports DMA xfers, control, bulk and maybe interrupt
53 *
54 * - Does not recycle unused rpipes
55 *
56 * An rpipe is assigned to an endpoint the first time it is used,
57 * and then it's there, assigned, until the endpoint is disabled
58 * (destroyed [{h,d}wahc_op_ep_disable()]. The assignment of the
59 * rpipe to the endpoint is done under the wa->rpipe_sem semaphore
60 * (should be a mutex).
61 *
62 * Two methods it could be done:
63 *
64 * (a) set up a timer every time an rpipe's use count drops to 1
65 * (which means unused) or when a transfer ends. Reset the
66 * timer when a xfer is queued. If the timer expires, release
67 * the rpipe [see rpipe_ep_disable()].
68 *
69 * (b) when looking for free rpipes to attach [rpipe_get_by_ep()],
70 * when none are found go over the list, check their endpoint
71 * and their activity record (if no last-xfer-done-ts in the
72 * last x seconds) take it
73 *
74 * However, due to the fact that we have a set of limited
75 * resources (max-segments-at-the-same-time per xfer,
76 * xfers-per-ripe, blocks-per-rpipe, rpipes-per-host), at the end
77 * we are going to have to rebuild all this based on an scheduler,
78 * to where we have a list of transactions to do and based on the
79 * availability of the different required components (blocks,
80 * rpipes, segment slots, etc), we go scheduling them. Painful.
81 */
82#include <linux/init.h>
83#include <linux/spinlock.h>
84#include <linux/slab.h>
85#include <linux/hash.h>
86#include <linux/ratelimit.h>
87
88#include "wa-hc.h"
89#include "wusbhc.h"
90
91enum {
92 WA_SEGS_MAX = 255,
93};
94
95enum wa_seg_status {
96 WA_SEG_NOTREADY,
97 WA_SEG_READY,
98 WA_SEG_DELAYED,
99 WA_SEG_SUBMITTED,
100 WA_SEG_PENDING,
101 WA_SEG_DTI_PENDING,
102 WA_SEG_DONE,
103 WA_SEG_ERROR,
104 WA_SEG_ABORTED,
105};
106
107static void wa_xfer_delayed_run(struct wa_rpipe *);
108
109/*
110 * Life cycle governed by 'struct urb' (the refcount of the struct is
111 * that of the 'struct urb' and usb_free_urb() would free the whole
112 * struct).
113 */
114struct wa_seg {
115 struct urb urb;
116 struct urb *dto_urb; /* for data output? */
117 struct list_head list_node; /* for rpipe->req_list */
118 struct wa_xfer *xfer; /* out xfer */
119 u8 index; /* which segment we are */
120 enum wa_seg_status status;
121 ssize_t result; /* bytes xfered or error */
122 struct wa_xfer_hdr xfer_hdr;
123 u8 xfer_extra[]; /* xtra space for xfer_hdr_ctl */
124};
125
126static void wa_seg_init(struct wa_seg *seg)
127{
128 /* usb_init_urb() repeats a lot of work, so we do it here */
129 kref_init(&seg->urb.kref);
130}
131
132/*
133 * Protected by xfer->lock
134 *
135 */
136struct wa_xfer {
137 struct kref refcnt;
138 struct list_head list_node;
139 spinlock_t lock;
140 u32 id;
141
142 struct wahc *wa; /* Wire adapter we are plugged to */
143 struct usb_host_endpoint *ep;
144 struct urb *urb; /* URB we are transferring for */
145 struct wa_seg **seg; /* transfer segments */
146 u8 segs, segs_submitted, segs_done;
147 unsigned is_inbound:1;
148 unsigned is_dma:1;
149 size_t seg_size;
150 int result;
151
152 gfp_t gfp; /* allocation mask */
153
154 struct wusb_dev *wusb_dev; /* for activity timestamps */
155};
156
157static inline void wa_xfer_init(struct wa_xfer *xfer)
158{
159 kref_init(&xfer->refcnt);
160 INIT_LIST_HEAD(&xfer->list_node);
161 spin_lock_init(&xfer->lock);
162}
163
164/*
165 * Destroy a transfer structure
166 *
167 * Note that the xfer->seg[index] thingies follow the URB life cycle,
168 * so we need to put them, not free them.
169 */
170static void wa_xfer_destroy(struct kref *_xfer)
171{
172 struct wa_xfer *xfer = container_of(_xfer, struct wa_xfer, refcnt);
173 if (xfer->seg) {
174 unsigned cnt;
175 for (cnt = 0; cnt < xfer->segs; cnt++) {
176 if (xfer->is_inbound)
177 usb_put_urb(xfer->seg[cnt]->dto_urb);
178 usb_put_urb(&xfer->seg[cnt]->urb);
179 }
180 }
181 kfree(xfer);
182}
183
184static void wa_xfer_get(struct wa_xfer *xfer)
185{
186 kref_get(&xfer->refcnt);
187}
188
189static void wa_xfer_put(struct wa_xfer *xfer)
190{
191 kref_put(&xfer->refcnt, wa_xfer_destroy);
192}
193
194/*
195 * xfer is referenced
196 *
197 * xfer->lock has to be unlocked
198 *
199 * We take xfer->lock for setting the result; this is a barrier
200 * against drivers/usb/core/hcd.c:unlink1() being called after we call
201 * usb_hcd_giveback_urb() and wa_urb_dequeue() trying to get a
202 * reference to the transfer.
203 */
204static void wa_xfer_giveback(struct wa_xfer *xfer)
205{
206 unsigned long flags;
207
208 spin_lock_irqsave(&xfer->wa->xfer_list_lock, flags);
209 list_del_init(&xfer->list_node);
210 spin_unlock_irqrestore(&xfer->wa->xfer_list_lock, flags);
211 /* FIXME: segmentation broken -- kills DWA */
212 wusbhc_giveback_urb(xfer->wa->wusb, xfer->urb, xfer->result);
213 wa_put(xfer->wa);
214 wa_xfer_put(xfer);
215}
216
217/*
218 * xfer is referenced
219 *
220 * xfer->lock has to be unlocked
221 */
222static void wa_xfer_completion(struct wa_xfer *xfer)
223{
224 if (xfer->wusb_dev)
225 wusb_dev_put(xfer->wusb_dev);
226 rpipe_put(xfer->ep->hcpriv);
227 wa_xfer_giveback(xfer);
228}
229
230/*
231 * If transfer is done, wrap it up and return true
232 *
233 * xfer->lock has to be locked
234 */
235static unsigned __wa_xfer_is_done(struct wa_xfer *xfer)
236{
237 struct device *dev = &xfer->wa->usb_iface->dev;
238 unsigned result, cnt;
239 struct wa_seg *seg;
240 struct urb *urb = xfer->urb;
241 unsigned found_short = 0;
242
243 result = xfer->segs_done == xfer->segs_submitted;
244 if (result == 0)
245 goto out;
246 urb->actual_length = 0;
247 for (cnt = 0; cnt < xfer->segs; cnt++) {
248 seg = xfer->seg[cnt];
249 switch (seg->status) {
250 case WA_SEG_DONE:
251 if (found_short && seg->result > 0) {
252 dev_dbg(dev, "xfer %p#%u: bad short segments (%zu)\n",
253 xfer, cnt, seg->result);
254 urb->status = -EINVAL;
255 goto out;
256 }
257 urb->actual_length += seg->result;
258 if (seg->result < xfer->seg_size
259 && cnt != xfer->segs-1)
260 found_short = 1;
261 dev_dbg(dev, "xfer %p#%u: DONE short %d "
262 "result %zu urb->actual_length %d\n",
263 xfer, seg->index, found_short, seg->result,
264 urb->actual_length);
265 break;
266 case WA_SEG_ERROR:
267 xfer->result = seg->result;
268 dev_dbg(dev, "xfer %p#%u: ERROR result %zu\n",
269 xfer, seg->index, seg->result);
270 goto out;
271 case WA_SEG_ABORTED:
272 dev_dbg(dev, "xfer %p#%u ABORTED: result %d\n",
273 xfer, seg->index, urb->status);
274 xfer->result = urb->status;
275 goto out;
276 default:
277 dev_warn(dev, "xfer %p#%u: is_done bad state %d\n",
278 xfer, cnt, seg->status);
279 xfer->result = -EINVAL;
280 goto out;
281 }
282 }
283 xfer->result = 0;
284out:
285 return result;
286}
287
288/*
289 * Initialize a transfer's ID
290 *
291 * We need to use a sequential number; if we use the pointer or the
292 * hash of the pointer, it can repeat over sequential transfers and
293 * then it will confuse the HWA....wonder why in hell they put a 32
294 * bit handle in there then.
295 */
296static void wa_xfer_id_init(struct wa_xfer *xfer)
297{
298 xfer->id = atomic_add_return(1, &xfer->wa->xfer_id_count);
299}
300
301/*
302 * Return the xfer's ID associated with xfer
303 *
304 * Need to generate a
305 */
306static u32 wa_xfer_id(struct wa_xfer *xfer)
307{
308 return xfer->id;
309}
310
311/*
312 * Search for a transfer list ID on the HCD's URB list
313 *
314 * For 32 bit architectures, we use the pointer itself; for 64 bits, a
315 * 32-bit hash of the pointer.
316 *
317 * @returns NULL if not found.
318 */
319static struct wa_xfer *wa_xfer_get_by_id(struct wahc *wa, u32 id)
320{
321 unsigned long flags;
322 struct wa_xfer *xfer_itr;
323 spin_lock_irqsave(&wa->xfer_list_lock, flags);
324 list_for_each_entry(xfer_itr, &wa->xfer_list, list_node) {
325 if (id == xfer_itr->id) {
326 wa_xfer_get(xfer_itr);
327 goto out;
328 }
329 }
330 xfer_itr = NULL;
331out:
332 spin_unlock_irqrestore(&wa->xfer_list_lock, flags);
333 return xfer_itr;
334}
335
336struct wa_xfer_abort_buffer {
337 struct urb urb;
338 struct wa_xfer_abort cmd;
339};
340
341static void __wa_xfer_abort_cb(struct urb *urb)
342{
343 struct wa_xfer_abort_buffer *b = urb->context;
344 usb_put_urb(&b->urb);
345}
346
347/*
348 * Aborts an ongoing transaction
349 *
350 * Assumes the transfer is referenced and locked and in a submitted
351 * state (mainly that there is an endpoint/rpipe assigned).
352 *
353 * The callback (see above) does nothing but freeing up the data by
354 * putting the URB. Because the URB is allocated at the head of the
355 * struct, the whole space we allocated is kfreed.
356 *
357 * We'll get an 'aborted transaction' xfer result on DTI, that'll
358 * politely ignore because at this point the transaction has been
359 * marked as aborted already.
360 */
361static void __wa_xfer_abort(struct wa_xfer *xfer)
362{
363 int result;
364 struct device *dev = &xfer->wa->usb_iface->dev;
365 struct wa_xfer_abort_buffer *b;
366 struct wa_rpipe *rpipe = xfer->ep->hcpriv;
367
368 b = kmalloc(sizeof(*b), GFP_ATOMIC);
369 if (b == NULL)
370 goto error_kmalloc;
371 b->cmd.bLength = sizeof(b->cmd);
372 b->cmd.bRequestType = WA_XFER_ABORT;
373 b->cmd.wRPipe = rpipe->descr.wRPipeIndex;
374 b->cmd.dwTransferID = wa_xfer_id(xfer);
375
376 usb_init_urb(&b->urb);
377 usb_fill_bulk_urb(&b->urb, xfer->wa->usb_dev,
378 usb_sndbulkpipe(xfer->wa->usb_dev,
379 xfer->wa->dto_epd->bEndpointAddress),
380 &b->cmd, sizeof(b->cmd), __wa_xfer_abort_cb, b);
381 result = usb_submit_urb(&b->urb, GFP_ATOMIC);
382 if (result < 0)
383 goto error_submit;
384 return; /* callback frees! */
385
386
387error_submit:
388 if (printk_ratelimit())
389 dev_err(dev, "xfer %p: Can't submit abort request: %d\n",
390 xfer, result);
391 kfree(b);
392error_kmalloc:
393 return;
394
395}
396
397/*
398 *
399 * @returns < 0 on error, transfer segment request size if ok
400 */
401static ssize_t __wa_xfer_setup_sizes(struct wa_xfer *xfer,
402 enum wa_xfer_type *pxfer_type)
403{
404 ssize_t result;
405 struct device *dev = &xfer->wa->usb_iface->dev;
406 size_t maxpktsize;
407 struct urb *urb = xfer->urb;
408 struct wa_rpipe *rpipe = xfer->ep->hcpriv;
409
410 switch (rpipe->descr.bmAttribute & 0x3) {
411 case USB_ENDPOINT_XFER_CONTROL:
412 *pxfer_type = WA_XFER_TYPE_CTL;
413 result = sizeof(struct wa_xfer_ctl);
414 break;
415 case USB_ENDPOINT_XFER_INT:
416 case USB_ENDPOINT_XFER_BULK:
417 *pxfer_type = WA_XFER_TYPE_BI;
418 result = sizeof(struct wa_xfer_bi);
419 break;
420 case USB_ENDPOINT_XFER_ISOC:
421 dev_err(dev, "FIXME: ISOC not implemented\n");
422 result = -ENOSYS;
423 goto error;
424 default:
425 /* never happens */
426 BUG();
427 result = -EINVAL; /* shut gcc up */
428 };
429 xfer->is_inbound = urb->pipe & USB_DIR_IN ? 1 : 0;
430 xfer->is_dma = urb->transfer_flags & URB_NO_TRANSFER_DMA_MAP ? 1 : 0;
431 xfer->seg_size = le16_to_cpu(rpipe->descr.wBlocks)
432 * 1 << (xfer->wa->wa_descr->bRPipeBlockSize - 1);
433 /* Compute the segment size and make sure it is a multiple of
434 * the maxpktsize (WUSB1.0[8.3.3.1])...not really too much of
435 * a check (FIXME) */
436 maxpktsize = le16_to_cpu(rpipe->descr.wMaxPacketSize);
437 if (xfer->seg_size < maxpktsize) {
438 dev_err(dev, "HW BUG? seg_size %zu smaller than maxpktsize "
439 "%zu\n", xfer->seg_size, maxpktsize);
440 result = -EINVAL;
441 goto error;
442 }
443 xfer->seg_size = (xfer->seg_size / maxpktsize) * maxpktsize;
444 xfer->segs = (urb->transfer_buffer_length + xfer->seg_size - 1)
445 / xfer->seg_size;
446 if (xfer->segs >= WA_SEGS_MAX) {
447 dev_err(dev, "BUG? ops, number of segments %d bigger than %d\n",
448 (int)(urb->transfer_buffer_length / xfer->seg_size),
449 WA_SEGS_MAX);
450 result = -EINVAL;
451 goto error;
452 }
453 if (xfer->segs == 0 && *pxfer_type == WA_XFER_TYPE_CTL)
454 xfer->segs = 1;
455error:
456 return result;
457}
458
459/* Fill in the common request header and xfer-type specific data. */
460static void __wa_xfer_setup_hdr0(struct wa_xfer *xfer,
461 struct wa_xfer_hdr *xfer_hdr0,
462 enum wa_xfer_type xfer_type,
463 size_t xfer_hdr_size)
464{
465 struct wa_rpipe *rpipe = xfer->ep->hcpriv;
466
467 xfer_hdr0 = &xfer->seg[0]->xfer_hdr;
468 xfer_hdr0->bLength = xfer_hdr_size;
469 xfer_hdr0->bRequestType = xfer_type;
470 xfer_hdr0->wRPipe = rpipe->descr.wRPipeIndex;
471 xfer_hdr0->dwTransferID = wa_xfer_id(xfer);
472 xfer_hdr0->bTransferSegment = 0;
473 switch (xfer_type) {
474 case WA_XFER_TYPE_CTL: {
475 struct wa_xfer_ctl *xfer_ctl =
476 container_of(xfer_hdr0, struct wa_xfer_ctl, hdr);
477 xfer_ctl->bmAttribute = xfer->is_inbound ? 1 : 0;
478 memcpy(&xfer_ctl->baSetupData, xfer->urb->setup_packet,
479 sizeof(xfer_ctl->baSetupData));
480 break;
481 }
482 case WA_XFER_TYPE_BI:
483 break;
484 case WA_XFER_TYPE_ISO:
485 printk(KERN_ERR "FIXME: ISOC not implemented\n");
486 default:
487 BUG();
488 };
489}
490
491/*
492 * Callback for the OUT data phase of the segment request
493 *
494 * Check wa_seg_cb(); most comments also apply here because this
495 * function does almost the same thing and they work closely
496 * together.
497 *
498 * If the seg request has failed but this DTO phase has succeeded,
499 * wa_seg_cb() has already failed the segment and moved the
500 * status to WA_SEG_ERROR, so this will go through 'case 0' and
501 * effectively do nothing.
502 */
503static void wa_seg_dto_cb(struct urb *urb)
504{
505 struct wa_seg *seg = urb->context;
506 struct wa_xfer *xfer = seg->xfer;
507 struct wahc *wa;
508 struct device *dev;
509 struct wa_rpipe *rpipe;
510 unsigned long flags;
511 unsigned rpipe_ready = 0;
512 u8 done = 0;
513
514 switch (urb->status) {
515 case 0:
516 spin_lock_irqsave(&xfer->lock, flags);
517 wa = xfer->wa;
518 dev = &wa->usb_iface->dev;
519 dev_dbg(dev, "xfer %p#%u: data out done (%d bytes)\n",
520 xfer, seg->index, urb->actual_length);
521 if (seg->status < WA_SEG_PENDING)
522 seg->status = WA_SEG_PENDING;
523 seg->result = urb->actual_length;
524 spin_unlock_irqrestore(&xfer->lock, flags);
525 break;
526 case -ECONNRESET: /* URB unlinked; no need to do anything */
527 case -ENOENT: /* as it was done by the who unlinked us */
528 break;
529 default: /* Other errors ... */
530 spin_lock_irqsave(&xfer->lock, flags);
531 wa = xfer->wa;
532 dev = &wa->usb_iface->dev;
533 rpipe = xfer->ep->hcpriv;
534 dev_dbg(dev, "xfer %p#%u: data out error %d\n",
535 xfer, seg->index, urb->status);
536 if (edc_inc(&wa->nep_edc, EDC_MAX_ERRORS,
537 EDC_ERROR_TIMEFRAME)){
538 dev_err(dev, "DTO: URB max acceptable errors "
539 "exceeded, resetting device\n");
540 wa_reset_all(wa);
541 }
542 if (seg->status != WA_SEG_ERROR) {
543 seg->status = WA_SEG_ERROR;
544 seg->result = urb->status;
545 xfer->segs_done++;
546 __wa_xfer_abort(xfer);
547 rpipe_ready = rpipe_avail_inc(rpipe);
548 done = __wa_xfer_is_done(xfer);
549 }
550 spin_unlock_irqrestore(&xfer->lock, flags);
551 if (done)
552 wa_xfer_completion(xfer);
553 if (rpipe_ready)
554 wa_xfer_delayed_run(rpipe);
555 }
556}
557
558/*
559 * Callback for the segment request
560 *
561 * If successful transition state (unless already transitioned or
562 * outbound transfer); otherwise, take a note of the error, mark this
563 * segment done and try completion.
564 *
565 * Note we don't access until we are sure that the transfer hasn't
566 * been cancelled (ECONNRESET, ENOENT), which could mean that
567 * seg->xfer could be already gone.
568 *
569 * We have to check before setting the status to WA_SEG_PENDING
570 * because sometimes the xfer result callback arrives before this
571 * callback (geeeeeeze), so it might happen that we are already in
572 * another state. As well, we don't set it if the transfer is inbound,
573 * as in that case, wa_seg_dto_cb will do it when the OUT data phase
574 * finishes.
575 */
576static void wa_seg_cb(struct urb *urb)
577{
578 struct wa_seg *seg = urb->context;
579 struct wa_xfer *xfer = seg->xfer;
580 struct wahc *wa;
581 struct device *dev;
582 struct wa_rpipe *rpipe;
583 unsigned long flags;
584 unsigned rpipe_ready;
585 u8 done = 0;
586
587 switch (urb->status) {
588 case 0:
589 spin_lock_irqsave(&xfer->lock, flags);
590 wa = xfer->wa;
591 dev = &wa->usb_iface->dev;
592 dev_dbg(dev, "xfer %p#%u: request done\n", xfer, seg->index);
593 if (xfer->is_inbound && seg->status < WA_SEG_PENDING)
594 seg->status = WA_SEG_PENDING;
595 spin_unlock_irqrestore(&xfer->lock, flags);
596 break;
597 case -ECONNRESET: /* URB unlinked; no need to do anything */
598 case -ENOENT: /* as it was done by the who unlinked us */
599 break;
600 default: /* Other errors ... */
601 spin_lock_irqsave(&xfer->lock, flags);
602 wa = xfer->wa;
603 dev = &wa->usb_iface->dev;
604 rpipe = xfer->ep->hcpriv;
605 if (printk_ratelimit())
606 dev_err(dev, "xfer %p#%u: request error %d\n",
607 xfer, seg->index, urb->status);
608 if (edc_inc(&wa->nep_edc, EDC_MAX_ERRORS,
609 EDC_ERROR_TIMEFRAME)){
610 dev_err(dev, "DTO: URB max acceptable errors "
611 "exceeded, resetting device\n");
612 wa_reset_all(wa);
613 }
614 usb_unlink_urb(seg->dto_urb);
615 seg->status = WA_SEG_ERROR;
616 seg->result = urb->status;
617 xfer->segs_done++;
618 __wa_xfer_abort(xfer);
619 rpipe_ready = rpipe_avail_inc(rpipe);
620 done = __wa_xfer_is_done(xfer);
621 spin_unlock_irqrestore(&xfer->lock, flags);
622 if (done)
623 wa_xfer_completion(xfer);
624 if (rpipe_ready)
625 wa_xfer_delayed_run(rpipe);
626 }
627}
628
629/*
630 * Allocate the segs array and initialize each of them
631 *
632 * The segments are freed by wa_xfer_destroy() when the xfer use count
633 * drops to zero; however, because each segment is given the same life
634 * cycle as the USB URB it contains, it is actually freed by
635 * usb_put_urb() on the contained USB URB (twisted, eh?).
636 */
637static int __wa_xfer_setup_segs(struct wa_xfer *xfer, size_t xfer_hdr_size)
638{
639 int result, cnt;
640 size_t alloc_size = sizeof(*xfer->seg[0])
641 - sizeof(xfer->seg[0]->xfer_hdr) + xfer_hdr_size;
642 struct usb_device *usb_dev = xfer->wa->usb_dev;
643 const struct usb_endpoint_descriptor *dto_epd = xfer->wa->dto_epd;
644 struct wa_seg *seg;
645 size_t buf_itr, buf_size, buf_itr_size;
646
647 result = -ENOMEM;
648 xfer->seg = kcalloc(xfer->segs, sizeof(xfer->seg[0]), GFP_ATOMIC);
649 if (xfer->seg == NULL)
650 goto error_segs_kzalloc;
651 buf_itr = 0;
652 buf_size = xfer->urb->transfer_buffer_length;
653 for (cnt = 0; cnt < xfer->segs; cnt++) {
654 seg = xfer->seg[cnt] = kzalloc(alloc_size, GFP_ATOMIC);
655 if (seg == NULL)
656 goto error_seg_kzalloc;
657 wa_seg_init(seg);
658 seg->xfer = xfer;
659 seg->index = cnt;
660 usb_fill_bulk_urb(&seg->urb, usb_dev,
661 usb_sndbulkpipe(usb_dev,
662 dto_epd->bEndpointAddress),
663 &seg->xfer_hdr, xfer_hdr_size,
664 wa_seg_cb, seg);
665 buf_itr_size = buf_size > xfer->seg_size ?
666 xfer->seg_size : buf_size;
667 if (xfer->is_inbound == 0 && buf_size > 0) {
668 seg->dto_urb = usb_alloc_urb(0, GFP_ATOMIC);
669 if (seg->dto_urb == NULL)
670 goto error_dto_alloc;
671 usb_fill_bulk_urb(
672 seg->dto_urb, usb_dev,
673 usb_sndbulkpipe(usb_dev,
674 dto_epd->bEndpointAddress),
675 NULL, 0, wa_seg_dto_cb, seg);
676 if (xfer->is_dma) {
677 seg->dto_urb->transfer_dma =
678 xfer->urb->transfer_dma + buf_itr;
679 seg->dto_urb->transfer_flags |=
680 URB_NO_TRANSFER_DMA_MAP;
681 } else
682 seg->dto_urb->transfer_buffer =
683 xfer->urb->transfer_buffer + buf_itr;
684 seg->dto_urb->transfer_buffer_length = buf_itr_size;
685 }
686 seg->status = WA_SEG_READY;
687 buf_itr += buf_itr_size;
688 buf_size -= buf_itr_size;
689 }
690 return 0;
691
692error_dto_alloc:
693 kfree(xfer->seg[cnt]);
694 cnt--;
695error_seg_kzalloc:
696 /* use the fact that cnt is left at were it failed */
697 for (; cnt > 0; cnt--) {
698 if (xfer->is_inbound == 0)
699 kfree(xfer->seg[cnt]->dto_urb);
700 kfree(xfer->seg[cnt]);
701 }
702error_segs_kzalloc:
703 return result;
704}
705
706/*
707 * Allocates all the stuff needed to submit a transfer
708 *
709 * Breaks the whole data buffer in a list of segments, each one has a
710 * structure allocated to it and linked in xfer->seg[index]
711 *
712 * FIXME: merge setup_segs() and the last part of this function, no
713 * need to do two for loops when we could run everything in a
714 * single one
715 */
716static int __wa_xfer_setup(struct wa_xfer *xfer, struct urb *urb)
717{
718 int result;
719 struct device *dev = &xfer->wa->usb_iface->dev;
720 enum wa_xfer_type xfer_type = 0; /* shut up GCC */
721 size_t xfer_hdr_size, cnt, transfer_size;
722 struct wa_xfer_hdr *xfer_hdr0, *xfer_hdr;
723
724 result = __wa_xfer_setup_sizes(xfer, &xfer_type);
725 if (result < 0)
726 goto error_setup_sizes;
727 xfer_hdr_size = result;
728 result = __wa_xfer_setup_segs(xfer, xfer_hdr_size);
729 if (result < 0) {
730 dev_err(dev, "xfer %p: Failed to allocate %d segments: %d\n",
731 xfer, xfer->segs, result);
732 goto error_setup_segs;
733 }
734 /* Fill the first header */
735 xfer_hdr0 = &xfer->seg[0]->xfer_hdr;
736 wa_xfer_id_init(xfer);
737 __wa_xfer_setup_hdr0(xfer, xfer_hdr0, xfer_type, xfer_hdr_size);
738
739 /* Fill remainig headers */
740 xfer_hdr = xfer_hdr0;
741 transfer_size = urb->transfer_buffer_length;
742 xfer_hdr0->dwTransferLength = transfer_size > xfer->seg_size ?
743 xfer->seg_size : transfer_size;
744 transfer_size -= xfer->seg_size;
745 for (cnt = 1; cnt < xfer->segs; cnt++) {
746 xfer_hdr = &xfer->seg[cnt]->xfer_hdr;
747 memcpy(xfer_hdr, xfer_hdr0, xfer_hdr_size);
748 xfer_hdr->bTransferSegment = cnt;
749 xfer_hdr->dwTransferLength = transfer_size > xfer->seg_size ?
750 cpu_to_le32(xfer->seg_size)
751 : cpu_to_le32(transfer_size);
752 xfer->seg[cnt]->status = WA_SEG_READY;
753 transfer_size -= xfer->seg_size;
754 }
755 xfer_hdr->bTransferSegment |= 0x80; /* this is the last segment */
756 result = 0;
757error_setup_segs:
758error_setup_sizes:
759 return result;
760}
761
762/*
763 *
764 *
765 * rpipe->seg_lock is held!
766 */
767static int __wa_seg_submit(struct wa_rpipe *rpipe, struct wa_xfer *xfer,
768 struct wa_seg *seg)
769{
770 int result;
771 result = usb_submit_urb(&seg->urb, GFP_ATOMIC);
772 if (result < 0) {
773 printk(KERN_ERR "xfer %p#%u: REQ submit failed: %d\n",
774 xfer, seg->index, result);
775 goto error_seg_submit;
776 }
777 if (seg->dto_urb) {
778 result = usb_submit_urb(seg->dto_urb, GFP_ATOMIC);
779 if (result < 0) {
780 printk(KERN_ERR "xfer %p#%u: DTO submit failed: %d\n",
781 xfer, seg->index, result);
782 goto error_dto_submit;
783 }
784 }
785 seg->status = WA_SEG_SUBMITTED;
786 rpipe_avail_dec(rpipe);
787 return 0;
788
789error_dto_submit:
790 usb_unlink_urb(&seg->urb);
791error_seg_submit:
792 seg->status = WA_SEG_ERROR;
793 seg->result = result;
794 return result;
795}
796
797/*
798 * Execute more queued request segments until the maximum concurrent allowed
799 *
800 * The ugly unlock/lock sequence on the error path is needed as the
801 * xfer->lock normally nests the seg_lock and not viceversa.
802 *
803 */
804static void wa_xfer_delayed_run(struct wa_rpipe *rpipe)
805{
806 int result;
807 struct device *dev = &rpipe->wa->usb_iface->dev;
808 struct wa_seg *seg;
809 struct wa_xfer *xfer;
810 unsigned long flags;
811
812 spin_lock_irqsave(&rpipe->seg_lock, flags);
813 while (atomic_read(&rpipe->segs_available) > 0
814 && !list_empty(&rpipe->seg_list)) {
815 seg = list_entry(rpipe->seg_list.next, struct wa_seg,
816 list_node);
817 list_del(&seg->list_node);
818 xfer = seg->xfer;
819 result = __wa_seg_submit(rpipe, xfer, seg);
820 dev_dbg(dev, "xfer %p#%u submitted from delayed [%d segments available] %d\n",
821 xfer, seg->index, atomic_read(&rpipe->segs_available), result);
822 if (unlikely(result < 0)) {
823 spin_unlock_irqrestore(&rpipe->seg_lock, flags);
824 spin_lock_irqsave(&xfer->lock, flags);
825 __wa_xfer_abort(xfer);
826 xfer->segs_done++;
827 spin_unlock_irqrestore(&xfer->lock, flags);
828 spin_lock_irqsave(&rpipe->seg_lock, flags);
829 }
830 }
831 spin_unlock_irqrestore(&rpipe->seg_lock, flags);
832}
833
834/*
835 *
836 * xfer->lock is taken
837 *
838 * On failure submitting we just stop submitting and return error;
839 * wa_urb_enqueue_b() will execute the completion path
840 */
841static int __wa_xfer_submit(struct wa_xfer *xfer)
842{
843 int result;
844 struct wahc *wa = xfer->wa;
845 struct device *dev = &wa->usb_iface->dev;
846 unsigned cnt;
847 struct wa_seg *seg;
848 unsigned long flags;
849 struct wa_rpipe *rpipe = xfer->ep->hcpriv;
850 size_t maxrequests = le16_to_cpu(rpipe->descr.wRequests);
851 u8 available;
852 u8 empty;
853
854 spin_lock_irqsave(&wa->xfer_list_lock, flags);
855 list_add_tail(&xfer->list_node, &wa->xfer_list);
856 spin_unlock_irqrestore(&wa->xfer_list_lock, flags);
857
858 BUG_ON(atomic_read(&rpipe->segs_available) > maxrequests);
859 result = 0;
860 spin_lock_irqsave(&rpipe->seg_lock, flags);
861 for (cnt = 0; cnt < xfer->segs; cnt++) {
862 available = atomic_read(&rpipe->segs_available);
863 empty = list_empty(&rpipe->seg_list);
864 seg = xfer->seg[cnt];
865 dev_dbg(dev, "xfer %p#%u: available %u empty %u (%s)\n",
866 xfer, cnt, available, empty,
867 available == 0 || !empty ? "delayed" : "submitted");
868 if (available == 0 || !empty) {
869 dev_dbg(dev, "xfer %p#%u: delayed\n", xfer, cnt);
870 seg->status = WA_SEG_DELAYED;
871 list_add_tail(&seg->list_node, &rpipe->seg_list);
872 } else {
873 result = __wa_seg_submit(rpipe, xfer, seg);
874 if (result < 0) {
875 __wa_xfer_abort(xfer);
876 goto error_seg_submit;
877 }
878 }
879 xfer->segs_submitted++;
880 }
881error_seg_submit:
882 spin_unlock_irqrestore(&rpipe->seg_lock, flags);
883 return result;
884}
885
886/*
887 * Second part of a URB/transfer enqueuement
888 *
889 * Assumes this comes from wa_urb_enqueue() [maybe through
890 * wa_urb_enqueue_run()]. At this point:
891 *
892 * xfer->wa filled and refcounted
893 * xfer->ep filled with rpipe refcounted if
894 * delayed == 0
895 * xfer->urb filled and refcounted (this is the case when called
896 * from wa_urb_enqueue() as we come from usb_submit_urb()
897 * and when called by wa_urb_enqueue_run(), as we took an
898 * extra ref dropped by _run() after we return).
899 * xfer->gfp filled
900 *
901 * If we fail at __wa_xfer_submit(), then we just check if we are done
902 * and if so, we run the completion procedure. However, if we are not
903 * yet done, we do nothing and wait for the completion handlers from
904 * the submitted URBs or from the xfer-result path to kick in. If xfer
905 * result never kicks in, the xfer will timeout from the USB code and
906 * dequeue() will be called.
907 */
908static void wa_urb_enqueue_b(struct wa_xfer *xfer)
909{
910 int result;
911 unsigned long flags;
912 struct urb *urb = xfer->urb;
913 struct wahc *wa = xfer->wa;
914 struct wusbhc *wusbhc = wa->wusb;
915 struct wusb_dev *wusb_dev;
916 unsigned done;
917
918 result = rpipe_get_by_ep(wa, xfer->ep, urb, xfer->gfp);
919 if (result < 0)
920 goto error_rpipe_get;
921 result = -ENODEV;
922 /* FIXME: segmentation broken -- kills DWA */
923 mutex_lock(&wusbhc->mutex); /* get a WUSB dev */
924 if (urb->dev == NULL) {
925 mutex_unlock(&wusbhc->mutex);
926 goto error_dev_gone;
927 }
928 wusb_dev = __wusb_dev_get_by_usb_dev(wusbhc, urb->dev);
929 if (wusb_dev == NULL) {
930 mutex_unlock(&wusbhc->mutex);
931 goto error_dev_gone;
932 }
933 mutex_unlock(&wusbhc->mutex);
934
935 spin_lock_irqsave(&xfer->lock, flags);
936 xfer->wusb_dev = wusb_dev;
937 result = urb->status;
938 if (urb->status != -EINPROGRESS)
939 goto error_dequeued;
940
941 result = __wa_xfer_setup(xfer, urb);
942 if (result < 0)
943 goto error_xfer_setup;
944 result = __wa_xfer_submit(xfer);
945 if (result < 0)
946 goto error_xfer_submit;
947 spin_unlock_irqrestore(&xfer->lock, flags);
948 return;
949
950 /* this is basically wa_xfer_completion() broken up wa_xfer_giveback()
951 * does a wa_xfer_put() that will call wa_xfer_destroy() and clean
952 * upundo setup().
953 */
954error_xfer_setup:
955error_dequeued:
956 spin_unlock_irqrestore(&xfer->lock, flags);
957 /* FIXME: segmentation broken, kills DWA */
958 if (wusb_dev)
959 wusb_dev_put(wusb_dev);
960error_dev_gone:
961 rpipe_put(xfer->ep->hcpriv);
962error_rpipe_get:
963 xfer->result = result;
964 wa_xfer_giveback(xfer);
965 return;
966
967error_xfer_submit:
968 done = __wa_xfer_is_done(xfer);
969 xfer->result = result;
970 spin_unlock_irqrestore(&xfer->lock, flags);
971 if (done)
972 wa_xfer_completion(xfer);
973}
974
975/*
976 * Execute the delayed transfers in the Wire Adapter @wa
977 *
978 * We need to be careful here, as dequeue() could be called in the
979 * middle. That's why we do the whole thing under the
980 * wa->xfer_list_lock. If dequeue() jumps in, it first locks urb->lock
981 * and then checks the list -- so as we would be acquiring in inverse
982 * order, we just drop the lock once we have the xfer and reacquire it
983 * later.
984 */
985void wa_urb_enqueue_run(struct work_struct *ws)
986{
987 struct wahc *wa = container_of(ws, struct wahc, xfer_work);
988 struct wa_xfer *xfer, *next;
989 struct urb *urb;
990
991 spin_lock_irq(&wa->xfer_list_lock);
992 list_for_each_entry_safe(xfer, next, &wa->xfer_delayed_list,
993 list_node) {
994 list_del_init(&xfer->list_node);
995 spin_unlock_irq(&wa->xfer_list_lock);
996
997 urb = xfer->urb;
998 wa_urb_enqueue_b(xfer);
999 usb_put_urb(urb); /* taken when queuing */
1000
1001 spin_lock_irq(&wa->xfer_list_lock);
1002 }
1003 spin_unlock_irq(&wa->xfer_list_lock);
1004}
1005EXPORT_SYMBOL_GPL(wa_urb_enqueue_run);
1006
1007/*
1008 * Submit a transfer to the Wire Adapter in a delayed way
1009 *
1010 * The process of enqueuing involves possible sleeps() [see
1011 * enqueue_b(), for the rpipe_get() and the mutex_lock()]. If we are
1012 * in an atomic section, we defer the enqueue_b() call--else we call direct.
1013 *
1014 * @urb: We own a reference to it done by the HCI Linux USB stack that
1015 * will be given up by calling usb_hcd_giveback_urb() or by
1016 * returning error from this function -> ergo we don't have to
1017 * refcount it.
1018 */
1019int wa_urb_enqueue(struct wahc *wa, struct usb_host_endpoint *ep,
1020 struct urb *urb, gfp_t gfp)
1021{
1022 int result;
1023 struct device *dev = &wa->usb_iface->dev;
1024 struct wa_xfer *xfer;
1025 unsigned long my_flags;
1026 unsigned cant_sleep = irqs_disabled() | in_atomic();
1027
1028 if (urb->transfer_buffer == NULL
1029 && !(urb->transfer_flags & URB_NO_TRANSFER_DMA_MAP)
1030 && urb->transfer_buffer_length != 0) {
1031 dev_err(dev, "BUG? urb %p: NULL xfer buffer & NODMA\n", urb);
1032 dump_stack();
1033 }
1034
1035 result = -ENOMEM;
1036 xfer = kzalloc(sizeof(*xfer), gfp);
1037 if (xfer == NULL)
1038 goto error_kmalloc;
1039
1040 result = -ENOENT;
1041 if (urb->status != -EINPROGRESS) /* cancelled */
1042 goto error_dequeued; /* before starting? */
1043 wa_xfer_init(xfer);
1044 xfer->wa = wa_get(wa);
1045 xfer->urb = urb;
1046 xfer->gfp = gfp;
1047 xfer->ep = ep;
1048 urb->hcpriv = xfer;
1049
1050 dev_dbg(dev, "xfer %p urb %p pipe 0x%02x [%d bytes] %s %s %s\n",
1051 xfer, urb, urb->pipe, urb->transfer_buffer_length,
1052 urb->transfer_flags & URB_NO_TRANSFER_DMA_MAP ? "dma" : "nodma",
1053 urb->pipe & USB_DIR_IN ? "inbound" : "outbound",
1054 cant_sleep ? "deferred" : "inline");
1055
1056 if (cant_sleep) {
1057 usb_get_urb(urb);
1058 spin_lock_irqsave(&wa->xfer_list_lock, my_flags);
1059 list_add_tail(&xfer->list_node, &wa->xfer_delayed_list);
1060 spin_unlock_irqrestore(&wa->xfer_list_lock, my_flags);
1061 queue_work(wusbd, &wa->xfer_work);
1062 } else {
1063 wa_urb_enqueue_b(xfer);
1064 }
1065 return 0;
1066
1067error_dequeued:
1068 kfree(xfer);
1069error_kmalloc:
1070 return result;
1071}
1072EXPORT_SYMBOL_GPL(wa_urb_enqueue);
1073
1074/*
1075 * Dequeue a URB and make sure uwb_hcd_giveback_urb() [completion
1076 * handler] is called.
1077 *
1078 * Until a transfer goes successfully through wa_urb_enqueue() it
1079 * needs to be dequeued with completion calling; when stuck in delayed
1080 * or before wa_xfer_setup() is called, we need to do completion.
1081 *
1082 * not setup If there is no hcpriv yet, that means that that enqueue
1083 * still had no time to set the xfer up. Because
1084 * urb->status should be other than -EINPROGRESS,
1085 * enqueue() will catch that and bail out.
1086 *
1087 * If the transfer has gone through setup, we just need to clean it
1088 * up. If it has gone through submit(), we have to abort it [with an
1089 * asynch request] and then make sure we cancel each segment.
1090 *
1091 */
1092int wa_urb_dequeue(struct wahc *wa, struct urb *urb)
1093{
1094 unsigned long flags, flags2;
1095 struct wa_xfer *xfer;
1096 struct wa_seg *seg;
1097 struct wa_rpipe *rpipe;
1098 unsigned cnt;
1099 unsigned rpipe_ready = 0;
1100
1101 xfer = urb->hcpriv;
1102 if (xfer == NULL) {
1103 /* NOthing setup yet enqueue will see urb->status !=
1104 * -EINPROGRESS (by hcd layer) and bail out with
1105 * error, no need to do completion
1106 */
1107 BUG_ON(urb->status == -EINPROGRESS);
1108 goto out;
1109 }
1110 spin_lock_irqsave(&xfer->lock, flags);
1111 rpipe = xfer->ep->hcpriv;
1112 /* Check the delayed list -> if there, release and complete */
1113 spin_lock_irqsave(&wa->xfer_list_lock, flags2);
1114 if (!list_empty(&xfer->list_node) && xfer->seg == NULL)
1115 goto dequeue_delayed;
1116 spin_unlock_irqrestore(&wa->xfer_list_lock, flags2);
1117 if (xfer->seg == NULL) /* still hasn't reached */
1118 goto out_unlock; /* setup(), enqueue_b() completes */
1119 /* Ok, the xfer is in flight already, it's been setup and submitted.*/
1120 __wa_xfer_abort(xfer);
1121 for (cnt = 0; cnt < xfer->segs; cnt++) {
1122 seg = xfer->seg[cnt];
1123 switch (seg->status) {
1124 case WA_SEG_NOTREADY:
1125 case WA_SEG_READY:
1126 printk(KERN_ERR "xfer %p#%u: dequeue bad state %u\n",
1127 xfer, cnt, seg->status);
1128 WARN_ON(1);
1129 break;
1130 case WA_SEG_DELAYED:
1131 seg->status = WA_SEG_ABORTED;
1132 spin_lock_irqsave(&rpipe->seg_lock, flags2);
1133 list_del(&seg->list_node);
1134 xfer->segs_done++;
1135 rpipe_ready = rpipe_avail_inc(rpipe);
1136 spin_unlock_irqrestore(&rpipe->seg_lock, flags2);
1137 break;
1138 case WA_SEG_SUBMITTED:
1139 seg->status = WA_SEG_ABORTED;
1140 usb_unlink_urb(&seg->urb);
1141 if (xfer->is_inbound == 0)
1142 usb_unlink_urb(seg->dto_urb);
1143 xfer->segs_done++;
1144 rpipe_ready = rpipe_avail_inc(rpipe);
1145 break;
1146 case WA_SEG_PENDING:
1147 seg->status = WA_SEG_ABORTED;
1148 xfer->segs_done++;
1149 rpipe_ready = rpipe_avail_inc(rpipe);
1150 break;
1151 case WA_SEG_DTI_PENDING:
1152 usb_unlink_urb(wa->dti_urb);
1153 seg->status = WA_SEG_ABORTED;
1154 xfer->segs_done++;
1155 rpipe_ready = rpipe_avail_inc(rpipe);
1156 break;
1157 case WA_SEG_DONE:
1158 case WA_SEG_ERROR:
1159 case WA_SEG_ABORTED:
1160 break;
1161 }
1162 }
1163 xfer->result = urb->status; /* -ENOENT or -ECONNRESET */
1164 __wa_xfer_is_done(xfer);
1165 spin_unlock_irqrestore(&xfer->lock, flags);
1166 wa_xfer_completion(xfer);
1167 if (rpipe_ready)
1168 wa_xfer_delayed_run(rpipe);
1169 return 0;
1170
1171out_unlock:
1172 spin_unlock_irqrestore(&xfer->lock, flags);
1173out:
1174 return 0;
1175
1176dequeue_delayed:
1177 list_del_init(&xfer->list_node);
1178 spin_unlock_irqrestore(&wa->xfer_list_lock, flags2);
1179 xfer->result = urb->status;
1180 spin_unlock_irqrestore(&xfer->lock, flags);
1181 wa_xfer_giveback(xfer);
1182 usb_put_urb(urb); /* we got a ref in enqueue() */
1183 return 0;
1184}
1185EXPORT_SYMBOL_GPL(wa_urb_dequeue);
1186
1187/*
1188 * Translation from WA status codes (WUSB1.0 Table 8.15) to errno
1189 * codes
1190 *
1191 * Positive errno values are internal inconsistencies and should be
1192 * flagged louder. Negative are to be passed up to the user in the
1193 * normal way.
1194 *
1195 * @status: USB WA status code -- high two bits are stripped.
1196 */
1197static int wa_xfer_status_to_errno(u8 status)
1198{
1199 int errno;
1200 u8 real_status = status;
1201 static int xlat[] = {
1202 [WA_XFER_STATUS_SUCCESS] = 0,
1203 [WA_XFER_STATUS_HALTED] = -EPIPE,
1204 [WA_XFER_STATUS_DATA_BUFFER_ERROR] = -ENOBUFS,
1205 [WA_XFER_STATUS_BABBLE] = -EOVERFLOW,
1206 [WA_XFER_RESERVED] = EINVAL,
1207 [WA_XFER_STATUS_NOT_FOUND] = 0,
1208 [WA_XFER_STATUS_INSUFFICIENT_RESOURCE] = -ENOMEM,
1209 [WA_XFER_STATUS_TRANSACTION_ERROR] = -EILSEQ,
1210 [WA_XFER_STATUS_ABORTED] = -EINTR,
1211 [WA_XFER_STATUS_RPIPE_NOT_READY] = EINVAL,
1212 [WA_XFER_INVALID_FORMAT] = EINVAL,
1213 [WA_XFER_UNEXPECTED_SEGMENT_NUMBER] = EINVAL,
1214 [WA_XFER_STATUS_RPIPE_TYPE_MISMATCH] = EINVAL,
1215 };
1216 status &= 0x3f;
1217
1218 if (status == 0)
1219 return 0;
1220 if (status >= ARRAY_SIZE(xlat)) {
1221 printk_ratelimited(KERN_ERR "%s(): BUG? "
1222 "Unknown WA transfer status 0x%02x\n",
1223 __func__, real_status);
1224 return -EINVAL;
1225 }
1226 errno = xlat[status];
1227 if (unlikely(errno > 0)) {
1228 printk_ratelimited(KERN_ERR "%s(): BUG? "
1229 "Inconsistent WA status: 0x%02x\n",
1230 __func__, real_status);
1231 errno = -errno;
1232 }
1233 return errno;
1234}
1235
1236/*
1237 * Process a xfer result completion message
1238 *
1239 * inbound transfers: need to schedule a DTI read
1240 *
1241 * FIXME: this functio needs to be broken up in parts
1242 */
1243static void wa_xfer_result_chew(struct wahc *wa, struct wa_xfer *xfer)
1244{
1245 int result;
1246 struct device *dev = &wa->usb_iface->dev;
1247 unsigned long flags;
1248 u8 seg_idx;
1249 struct wa_seg *seg;
1250 struct wa_rpipe *rpipe;
1251 struct wa_xfer_result *xfer_result = wa->xfer_result;
1252 u8 done = 0;
1253 u8 usb_status;
1254 unsigned rpipe_ready = 0;
1255
1256 spin_lock_irqsave(&xfer->lock, flags);
1257 seg_idx = xfer_result->bTransferSegment & 0x7f;
1258 if (unlikely(seg_idx >= xfer->segs))
1259 goto error_bad_seg;
1260 seg = xfer->seg[seg_idx];
1261 rpipe = xfer->ep->hcpriv;
1262 usb_status = xfer_result->bTransferStatus;
1263 dev_dbg(dev, "xfer %p#%u: bTransferStatus 0x%02x (seg %u)\n",
1264 xfer, seg_idx, usb_status, seg->status);
1265 if (seg->status == WA_SEG_ABORTED
1266 || seg->status == WA_SEG_ERROR) /* already handled */
1267 goto segment_aborted;
1268 if (seg->status == WA_SEG_SUBMITTED) /* ops, got here */
1269 seg->status = WA_SEG_PENDING; /* before wa_seg{_dto}_cb() */
1270 if (seg->status != WA_SEG_PENDING) {
1271 if (printk_ratelimit())
1272 dev_err(dev, "xfer %p#%u: Bad segment state %u\n",
1273 xfer, seg_idx, seg->status);
1274 seg->status = WA_SEG_PENDING; /* workaround/"fix" it */
1275 }
1276 if (usb_status & 0x80) {
1277 seg->result = wa_xfer_status_to_errno(usb_status);
1278 dev_err(dev, "DTI: xfer %p#%u failed (0x%02x)\n",
1279 xfer, seg->index, usb_status);
1280 goto error_complete;
1281 }
1282 /* FIXME: we ignore warnings, tally them for stats */
1283 if (usb_status & 0x40) /* Warning?... */
1284 usb_status = 0; /* ... pass */
1285 if (xfer->is_inbound) { /* IN data phase: read to buffer */
1286 seg->status = WA_SEG_DTI_PENDING;
1287 BUG_ON(wa->buf_in_urb->status == -EINPROGRESS);
1288 if (xfer->is_dma) {
1289 wa->buf_in_urb->transfer_dma =
1290 xfer->urb->transfer_dma
1291 + seg_idx * xfer->seg_size;
1292 wa->buf_in_urb->transfer_flags
1293 |= URB_NO_TRANSFER_DMA_MAP;
1294 } else {
1295 wa->buf_in_urb->transfer_buffer =
1296 xfer->urb->transfer_buffer
1297 + seg_idx * xfer->seg_size;
1298 wa->buf_in_urb->transfer_flags
1299 &= ~URB_NO_TRANSFER_DMA_MAP;
1300 }
1301 wa->buf_in_urb->transfer_buffer_length =
1302 le32_to_cpu(xfer_result->dwTransferLength);
1303 wa->buf_in_urb->context = seg;
1304 result = usb_submit_urb(wa->buf_in_urb, GFP_ATOMIC);
1305 if (result < 0)
1306 goto error_submit_buf_in;
1307 } else {
1308 /* OUT data phase, complete it -- */
1309 seg->status = WA_SEG_DONE;
1310 seg->result = le32_to_cpu(xfer_result->dwTransferLength);
1311 xfer->segs_done++;
1312 rpipe_ready = rpipe_avail_inc(rpipe);
1313 done = __wa_xfer_is_done(xfer);
1314 }
1315 spin_unlock_irqrestore(&xfer->lock, flags);
1316 if (done)
1317 wa_xfer_completion(xfer);
1318 if (rpipe_ready)
1319 wa_xfer_delayed_run(rpipe);
1320 return;
1321
1322error_submit_buf_in:
1323 if (edc_inc(&wa->dti_edc, EDC_MAX_ERRORS, EDC_ERROR_TIMEFRAME)) {
1324 dev_err(dev, "DTI: URB max acceptable errors "
1325 "exceeded, resetting device\n");
1326 wa_reset_all(wa);
1327 }
1328 if (printk_ratelimit())
1329 dev_err(dev, "xfer %p#%u: can't submit DTI data phase: %d\n",
1330 xfer, seg_idx, result);
1331 seg->result = result;
1332error_complete:
1333 seg->status = WA_SEG_ERROR;
1334 xfer->segs_done++;
1335 rpipe_ready = rpipe_avail_inc(rpipe);
1336 __wa_xfer_abort(xfer);
1337 done = __wa_xfer_is_done(xfer);
1338 spin_unlock_irqrestore(&xfer->lock, flags);
1339 if (done)
1340 wa_xfer_completion(xfer);
1341 if (rpipe_ready)
1342 wa_xfer_delayed_run(rpipe);
1343 return;
1344
1345error_bad_seg:
1346 spin_unlock_irqrestore(&xfer->lock, flags);
1347 wa_urb_dequeue(wa, xfer->urb);
1348 if (printk_ratelimit())
1349 dev_err(dev, "xfer %p#%u: bad segment\n", xfer, seg_idx);
1350 if (edc_inc(&wa->dti_edc, EDC_MAX_ERRORS, EDC_ERROR_TIMEFRAME)) {
1351 dev_err(dev, "DTI: URB max acceptable errors "
1352 "exceeded, resetting device\n");
1353 wa_reset_all(wa);
1354 }
1355 return;
1356
1357segment_aborted:
1358 /* nothing to do, as the aborter did the completion */
1359 spin_unlock_irqrestore(&xfer->lock, flags);
1360}
1361
1362/*
1363 * Callback for the IN data phase
1364 *
1365 * If successful transition state; otherwise, take a note of the
1366 * error, mark this segment done and try completion.
1367 *
1368 * Note we don't access until we are sure that the transfer hasn't
1369 * been cancelled (ECONNRESET, ENOENT), which could mean that
1370 * seg->xfer could be already gone.
1371 */
1372static void wa_buf_in_cb(struct urb *urb)
1373{
1374 struct wa_seg *seg = urb->context;
1375 struct wa_xfer *xfer = seg->xfer;
1376 struct wahc *wa;
1377 struct device *dev;
1378 struct wa_rpipe *rpipe;
1379 unsigned rpipe_ready;
1380 unsigned long flags;
1381 u8 done = 0;
1382
1383 switch (urb->status) {
1384 case 0:
1385 spin_lock_irqsave(&xfer->lock, flags);
1386 wa = xfer->wa;
1387 dev = &wa->usb_iface->dev;
1388 rpipe = xfer->ep->hcpriv;
1389 dev_dbg(dev, "xfer %p#%u: data in done (%zu bytes)\n",
1390 xfer, seg->index, (size_t)urb->actual_length);
1391 seg->status = WA_SEG_DONE;
1392 seg->result = urb->actual_length;
1393 xfer->segs_done++;
1394 rpipe_ready = rpipe_avail_inc(rpipe);
1395 done = __wa_xfer_is_done(xfer);
1396 spin_unlock_irqrestore(&xfer->lock, flags);
1397 if (done)
1398 wa_xfer_completion(xfer);
1399 if (rpipe_ready)
1400 wa_xfer_delayed_run(rpipe);
1401 break;
1402 case -ECONNRESET: /* URB unlinked; no need to do anything */
1403 case -ENOENT: /* as it was done by the who unlinked us */
1404 break;
1405 default: /* Other errors ... */
1406 spin_lock_irqsave(&xfer->lock, flags);
1407 wa = xfer->wa;
1408 dev = &wa->usb_iface->dev;
1409 rpipe = xfer->ep->hcpriv;
1410 if (printk_ratelimit())
1411 dev_err(dev, "xfer %p#%u: data in error %d\n",
1412 xfer, seg->index, urb->status);
1413 if (edc_inc(&wa->nep_edc, EDC_MAX_ERRORS,
1414 EDC_ERROR_TIMEFRAME)){
1415 dev_err(dev, "DTO: URB max acceptable errors "
1416 "exceeded, resetting device\n");
1417 wa_reset_all(wa);
1418 }
1419 seg->status = WA_SEG_ERROR;
1420 seg->result = urb->status;
1421 xfer->segs_done++;
1422 rpipe_ready = rpipe_avail_inc(rpipe);
1423 __wa_xfer_abort(xfer);
1424 done = __wa_xfer_is_done(xfer);
1425 spin_unlock_irqrestore(&xfer->lock, flags);
1426 if (done)
1427 wa_xfer_completion(xfer);
1428 if (rpipe_ready)
1429 wa_xfer_delayed_run(rpipe);
1430 }
1431}
1432
1433/*
1434 * Handle an incoming transfer result buffer
1435 *
1436 * Given a transfer result buffer, it completes the transfer (possibly
1437 * scheduling and buffer in read) and then resubmits the DTI URB for a
1438 * new transfer result read.
1439 *
1440 *
1441 * The xfer_result DTI URB state machine
1442 *
1443 * States: OFF | RXR (Read-Xfer-Result) | RBI (Read-Buffer-In)
1444 *
1445 * We start in OFF mode, the first xfer_result notification [through
1446 * wa_handle_notif_xfer()] moves us to RXR by posting the DTI-URB to
1447 * read.
1448 *
1449 * We receive a buffer -- if it is not a xfer_result, we complain and
1450 * repost the DTI-URB. If it is a xfer_result then do the xfer seg
1451 * request accounting. If it is an IN segment, we move to RBI and post
1452 * a BUF-IN-URB to the right buffer. The BUF-IN-URB callback will
1453 * repost the DTI-URB and move to RXR state. if there was no IN
1454 * segment, it will repost the DTI-URB.
1455 *
1456 * We go back to OFF when we detect a ENOENT or ESHUTDOWN (or too many
1457 * errors) in the URBs.
1458 */
1459static void wa_xfer_result_cb(struct urb *urb)
1460{
1461 int result;
1462 struct wahc *wa = urb->context;
1463 struct device *dev = &wa->usb_iface->dev;
1464 struct wa_xfer_result *xfer_result;
1465 u32 xfer_id;
1466 struct wa_xfer *xfer;
1467 u8 usb_status;
1468
1469 BUG_ON(wa->dti_urb != urb);
1470 switch (wa->dti_urb->status) {
1471 case 0:
1472 /* We have a xfer result buffer; check it */
1473 dev_dbg(dev, "DTI: xfer result %d bytes at %p\n",
1474 urb->actual_length, urb->transfer_buffer);
1475 if (wa->dti_urb->actual_length != sizeof(*xfer_result)) {
1476 dev_err(dev, "DTI Error: xfer result--bad size "
1477 "xfer result (%d bytes vs %zu needed)\n",
1478 urb->actual_length, sizeof(*xfer_result));
1479 break;
1480 }
1481 xfer_result = wa->xfer_result;
1482 if (xfer_result->hdr.bLength != sizeof(*xfer_result)) {
1483 dev_err(dev, "DTI Error: xfer result--"
1484 "bad header length %u\n",
1485 xfer_result->hdr.bLength);
1486 break;
1487 }
1488 if (xfer_result->hdr.bNotifyType != WA_XFER_RESULT) {
1489 dev_err(dev, "DTI Error: xfer result--"
1490 "bad header type 0x%02x\n",
1491 xfer_result->hdr.bNotifyType);
1492 break;
1493 }
1494 usb_status = xfer_result->bTransferStatus & 0x3f;
1495 if (usb_status == WA_XFER_STATUS_ABORTED
1496 || usb_status == WA_XFER_STATUS_NOT_FOUND)
1497 /* taken care of already */
1498 break;
1499 xfer_id = xfer_result->dwTransferID;
1500 xfer = wa_xfer_get_by_id(wa, xfer_id);
1501 if (xfer == NULL) {
1502 /* FIXME: transaction might have been cancelled */
1503 dev_err(dev, "DTI Error: xfer result--"
1504 "unknown xfer 0x%08x (status 0x%02x)\n",
1505 xfer_id, usb_status);
1506 break;
1507 }
1508 wa_xfer_result_chew(wa, xfer);
1509 wa_xfer_put(xfer);
1510 break;
1511 case -ENOENT: /* (we killed the URB)...so, no broadcast */
1512 case -ESHUTDOWN: /* going away! */
1513 dev_dbg(dev, "DTI: going down! %d\n", urb->status);
1514 goto out;
1515 default:
1516 /* Unknown error */
1517 if (edc_inc(&wa->dti_edc, EDC_MAX_ERRORS,
1518 EDC_ERROR_TIMEFRAME)) {
1519 dev_err(dev, "DTI: URB max acceptable errors "
1520 "exceeded, resetting device\n");
1521 wa_reset_all(wa);
1522 goto out;
1523 }
1524 if (printk_ratelimit())
1525 dev_err(dev, "DTI: URB error %d\n", urb->status);
1526 break;
1527 }
1528 /* Resubmit the DTI URB */
1529 result = usb_submit_urb(wa->dti_urb, GFP_ATOMIC);
1530 if (result < 0) {
1531 dev_err(dev, "DTI Error: Could not submit DTI URB (%d), "
1532 "resetting\n", result);
1533 wa_reset_all(wa);
1534 }
1535out:
1536 return;
1537}
1538
1539/*
1540 * Transfer complete notification
1541 *
1542 * Called from the notif.c code. We get a notification on EP2 saying
1543 * that some endpoint has some transfer result data available. We are
1544 * about to read it.
1545 *
1546 * To speed up things, we always have a URB reading the DTI URB; we
1547 * don't really set it up and start it until the first xfer complete
1548 * notification arrives, which is what we do here.
1549 *
1550 * Follow up in wa_xfer_result_cb(), as that's where the whole state
1551 * machine starts.
1552 *
1553 * So here we just initialize the DTI URB for reading transfer result
1554 * notifications and also the buffer-in URB, for reading buffers. Then
1555 * we just submit the DTI URB.
1556 *
1557 * @wa shall be referenced
1558 */
1559void wa_handle_notif_xfer(struct wahc *wa, struct wa_notif_hdr *notif_hdr)
1560{
1561 int result;
1562 struct device *dev = &wa->usb_iface->dev;
1563 struct wa_notif_xfer *notif_xfer;
1564 const struct usb_endpoint_descriptor *dti_epd = wa->dti_epd;
1565
1566 notif_xfer = container_of(notif_hdr, struct wa_notif_xfer, hdr);
1567 BUG_ON(notif_hdr->bNotifyType != WA_NOTIF_TRANSFER);
1568
1569 if ((0x80 | notif_xfer->bEndpoint) != dti_epd->bEndpointAddress) {
1570 /* FIXME: hardcoded limitation, adapt */
1571 dev_err(dev, "BUG: DTI ep is %u, not %u (hack me)\n",
1572 notif_xfer->bEndpoint, dti_epd->bEndpointAddress);
1573 goto error;
1574 }
1575 if (wa->dti_urb != NULL) /* DTI URB already started */
1576 goto out;
1577
1578 wa->dti_urb = usb_alloc_urb(0, GFP_KERNEL);
1579 if (wa->dti_urb == NULL) {
1580 dev_err(dev, "Can't allocate DTI URB\n");
1581 goto error_dti_urb_alloc;
1582 }
1583 usb_fill_bulk_urb(
1584 wa->dti_urb, wa->usb_dev,
1585 usb_rcvbulkpipe(wa->usb_dev, 0x80 | notif_xfer->bEndpoint),
1586 wa->xfer_result, wa->xfer_result_size,
1587 wa_xfer_result_cb, wa);
1588
1589 wa->buf_in_urb = usb_alloc_urb(0, GFP_KERNEL);
1590 if (wa->buf_in_urb == NULL) {
1591 dev_err(dev, "Can't allocate BUF-IN URB\n");
1592 goto error_buf_in_urb_alloc;
1593 }
1594 usb_fill_bulk_urb(
1595 wa->buf_in_urb, wa->usb_dev,
1596 usb_rcvbulkpipe(wa->usb_dev, 0x80 | notif_xfer->bEndpoint),
1597 NULL, 0, wa_buf_in_cb, wa);
1598 result = usb_submit_urb(wa->dti_urb, GFP_KERNEL);
1599 if (result < 0) {
1600 dev_err(dev, "DTI Error: Could not submit DTI URB (%d), "
1601 "resetting\n", result);
1602 goto error_dti_urb_submit;
1603 }
1604out:
1605 return;
1606
1607error_dti_urb_submit:
1608 usb_put_urb(wa->buf_in_urb);
1609error_buf_in_urb_alloc:
1610 usb_put_urb(wa->dti_urb);
1611 wa->dti_urb = NULL;
1612error_dti_urb_alloc:
1613error:
1614 wa_reset_all(wa);
1615}
1/*
2 * WUSB Wire Adapter
3 * Data transfer and URB enqueing
4 *
5 * Copyright (C) 2005-2006 Intel Corporation
6 * Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com>
7 *
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License version
10 * 2 as published by the Free Software Foundation.
11 *
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software
19 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
20 * 02110-1301, USA.
21 *
22 *
23 * How transfers work: get a buffer, break it up in segments (segment
24 * size is a multiple of the maxpacket size). For each segment issue a
25 * segment request (struct wa_xfer_*), then send the data buffer if
26 * out or nothing if in (all over the DTO endpoint).
27 *
28 * For each submitted segment request, a notification will come over
29 * the NEP endpoint and a transfer result (struct xfer_result) will
30 * arrive in the DTI URB. Read it, get the xfer ID, see if there is
31 * data coming (inbound transfer), schedule a read and handle it.
32 *
33 * Sounds simple, it is a pain to implement.
34 *
35 *
36 * ENTRY POINTS
37 *
38 * FIXME
39 *
40 * LIFE CYCLE / STATE DIAGRAM
41 *
42 * FIXME
43 *
44 * THIS CODE IS DISGUSTING
45 *
46 * Warned you are; it's my second try and still not happy with it.
47 *
48 * NOTES:
49 *
50 * - No iso
51 *
52 * - Supports DMA xfers, control, bulk and maybe interrupt
53 *
54 * - Does not recycle unused rpipes
55 *
56 * An rpipe is assigned to an endpoint the first time it is used,
57 * and then it's there, assigned, until the endpoint is disabled
58 * (destroyed [{h,d}wahc_op_ep_disable()]. The assignment of the
59 * rpipe to the endpoint is done under the wa->rpipe_sem semaphore
60 * (should be a mutex).
61 *
62 * Two methods it could be done:
63 *
64 * (a) set up a timer every time an rpipe's use count drops to 1
65 * (which means unused) or when a transfer ends. Reset the
66 * timer when a xfer is queued. If the timer expires, release
67 * the rpipe [see rpipe_ep_disable()].
68 *
69 * (b) when looking for free rpipes to attach [rpipe_get_by_ep()],
70 * when none are found go over the list, check their endpoint
71 * and their activity record (if no last-xfer-done-ts in the
72 * last x seconds) take it
73 *
74 * However, due to the fact that we have a set of limited
75 * resources (max-segments-at-the-same-time per xfer,
76 * xfers-per-ripe, blocks-per-rpipe, rpipes-per-host), at the end
77 * we are going to have to rebuild all this based on an scheduler,
78 * to where we have a list of transactions to do and based on the
79 * availability of the different required components (blocks,
80 * rpipes, segment slots, etc), we go scheduling them. Painful.
81 */
82#include <linux/init.h>
83#include <linux/spinlock.h>
84#include <linux/slab.h>
85#include <linux/hash.h>
86#include <linux/ratelimit.h>
87#include <linux/export.h>
88
89#include "wa-hc.h"
90#include "wusbhc.h"
91
92enum {
93 WA_SEGS_MAX = 255,
94};
95
96enum wa_seg_status {
97 WA_SEG_NOTREADY,
98 WA_SEG_READY,
99 WA_SEG_DELAYED,
100 WA_SEG_SUBMITTED,
101 WA_SEG_PENDING,
102 WA_SEG_DTI_PENDING,
103 WA_SEG_DONE,
104 WA_SEG_ERROR,
105 WA_SEG_ABORTED,
106};
107
108static void wa_xfer_delayed_run(struct wa_rpipe *);
109
110/*
111 * Life cycle governed by 'struct urb' (the refcount of the struct is
112 * that of the 'struct urb' and usb_free_urb() would free the whole
113 * struct).
114 */
115struct wa_seg {
116 struct urb urb;
117 struct urb *dto_urb; /* for data output? */
118 struct list_head list_node; /* for rpipe->req_list */
119 struct wa_xfer *xfer; /* out xfer */
120 u8 index; /* which segment we are */
121 enum wa_seg_status status;
122 ssize_t result; /* bytes xfered or error */
123 struct wa_xfer_hdr xfer_hdr;
124 u8 xfer_extra[]; /* xtra space for xfer_hdr_ctl */
125};
126
127static void wa_seg_init(struct wa_seg *seg)
128{
129 /* usb_init_urb() repeats a lot of work, so we do it here */
130 kref_init(&seg->urb.kref);
131}
132
133/*
134 * Protected by xfer->lock
135 *
136 */
137struct wa_xfer {
138 struct kref refcnt;
139 struct list_head list_node;
140 spinlock_t lock;
141 u32 id;
142
143 struct wahc *wa; /* Wire adapter we are plugged to */
144 struct usb_host_endpoint *ep;
145 struct urb *urb; /* URB we are transferring for */
146 struct wa_seg **seg; /* transfer segments */
147 u8 segs, segs_submitted, segs_done;
148 unsigned is_inbound:1;
149 unsigned is_dma:1;
150 size_t seg_size;
151 int result;
152
153 gfp_t gfp; /* allocation mask */
154
155 struct wusb_dev *wusb_dev; /* for activity timestamps */
156};
157
158static inline void wa_xfer_init(struct wa_xfer *xfer)
159{
160 kref_init(&xfer->refcnt);
161 INIT_LIST_HEAD(&xfer->list_node);
162 spin_lock_init(&xfer->lock);
163}
164
165/*
166 * Destroy a transfer structure
167 *
168 * Note that the xfer->seg[index] thingies follow the URB life cycle,
169 * so we need to put them, not free them.
170 */
171static void wa_xfer_destroy(struct kref *_xfer)
172{
173 struct wa_xfer *xfer = container_of(_xfer, struct wa_xfer, refcnt);
174 if (xfer->seg) {
175 unsigned cnt;
176 for (cnt = 0; cnt < xfer->segs; cnt++) {
177 if (xfer->is_inbound)
178 usb_put_urb(xfer->seg[cnt]->dto_urb);
179 usb_put_urb(&xfer->seg[cnt]->urb);
180 }
181 }
182 kfree(xfer);
183}
184
185static void wa_xfer_get(struct wa_xfer *xfer)
186{
187 kref_get(&xfer->refcnt);
188}
189
190static void wa_xfer_put(struct wa_xfer *xfer)
191{
192 kref_put(&xfer->refcnt, wa_xfer_destroy);
193}
194
195/*
196 * xfer is referenced
197 *
198 * xfer->lock has to be unlocked
199 *
200 * We take xfer->lock for setting the result; this is a barrier
201 * against drivers/usb/core/hcd.c:unlink1() being called after we call
202 * usb_hcd_giveback_urb() and wa_urb_dequeue() trying to get a
203 * reference to the transfer.
204 */
205static void wa_xfer_giveback(struct wa_xfer *xfer)
206{
207 unsigned long flags;
208
209 spin_lock_irqsave(&xfer->wa->xfer_list_lock, flags);
210 list_del_init(&xfer->list_node);
211 spin_unlock_irqrestore(&xfer->wa->xfer_list_lock, flags);
212 /* FIXME: segmentation broken -- kills DWA */
213 wusbhc_giveback_urb(xfer->wa->wusb, xfer->urb, xfer->result);
214 wa_put(xfer->wa);
215 wa_xfer_put(xfer);
216}
217
218/*
219 * xfer is referenced
220 *
221 * xfer->lock has to be unlocked
222 */
223static void wa_xfer_completion(struct wa_xfer *xfer)
224{
225 if (xfer->wusb_dev)
226 wusb_dev_put(xfer->wusb_dev);
227 rpipe_put(xfer->ep->hcpriv);
228 wa_xfer_giveback(xfer);
229}
230
231/*
232 * If transfer is done, wrap it up and return true
233 *
234 * xfer->lock has to be locked
235 */
236static unsigned __wa_xfer_is_done(struct wa_xfer *xfer)
237{
238 struct device *dev = &xfer->wa->usb_iface->dev;
239 unsigned result, cnt;
240 struct wa_seg *seg;
241 struct urb *urb = xfer->urb;
242 unsigned found_short = 0;
243
244 result = xfer->segs_done == xfer->segs_submitted;
245 if (result == 0)
246 goto out;
247 urb->actual_length = 0;
248 for (cnt = 0; cnt < xfer->segs; cnt++) {
249 seg = xfer->seg[cnt];
250 switch (seg->status) {
251 case WA_SEG_DONE:
252 if (found_short && seg->result > 0) {
253 dev_dbg(dev, "xfer %p#%u: bad short segments (%zu)\n",
254 xfer, cnt, seg->result);
255 urb->status = -EINVAL;
256 goto out;
257 }
258 urb->actual_length += seg->result;
259 if (seg->result < xfer->seg_size
260 && cnt != xfer->segs-1)
261 found_short = 1;
262 dev_dbg(dev, "xfer %p#%u: DONE short %d "
263 "result %zu urb->actual_length %d\n",
264 xfer, seg->index, found_short, seg->result,
265 urb->actual_length);
266 break;
267 case WA_SEG_ERROR:
268 xfer->result = seg->result;
269 dev_dbg(dev, "xfer %p#%u: ERROR result %zu\n",
270 xfer, seg->index, seg->result);
271 goto out;
272 case WA_SEG_ABORTED:
273 dev_dbg(dev, "xfer %p#%u ABORTED: result %d\n",
274 xfer, seg->index, urb->status);
275 xfer->result = urb->status;
276 goto out;
277 default:
278 dev_warn(dev, "xfer %p#%u: is_done bad state %d\n",
279 xfer, cnt, seg->status);
280 xfer->result = -EINVAL;
281 goto out;
282 }
283 }
284 xfer->result = 0;
285out:
286 return result;
287}
288
289/*
290 * Initialize a transfer's ID
291 *
292 * We need to use a sequential number; if we use the pointer or the
293 * hash of the pointer, it can repeat over sequential transfers and
294 * then it will confuse the HWA....wonder why in hell they put a 32
295 * bit handle in there then.
296 */
297static void wa_xfer_id_init(struct wa_xfer *xfer)
298{
299 xfer->id = atomic_add_return(1, &xfer->wa->xfer_id_count);
300}
301
302/*
303 * Return the xfer's ID associated with xfer
304 *
305 * Need to generate a
306 */
307static u32 wa_xfer_id(struct wa_xfer *xfer)
308{
309 return xfer->id;
310}
311
312/*
313 * Search for a transfer list ID on the HCD's URB list
314 *
315 * For 32 bit architectures, we use the pointer itself; for 64 bits, a
316 * 32-bit hash of the pointer.
317 *
318 * @returns NULL if not found.
319 */
320static struct wa_xfer *wa_xfer_get_by_id(struct wahc *wa, u32 id)
321{
322 unsigned long flags;
323 struct wa_xfer *xfer_itr;
324 spin_lock_irqsave(&wa->xfer_list_lock, flags);
325 list_for_each_entry(xfer_itr, &wa->xfer_list, list_node) {
326 if (id == xfer_itr->id) {
327 wa_xfer_get(xfer_itr);
328 goto out;
329 }
330 }
331 xfer_itr = NULL;
332out:
333 spin_unlock_irqrestore(&wa->xfer_list_lock, flags);
334 return xfer_itr;
335}
336
337struct wa_xfer_abort_buffer {
338 struct urb urb;
339 struct wa_xfer_abort cmd;
340};
341
342static void __wa_xfer_abort_cb(struct urb *urb)
343{
344 struct wa_xfer_abort_buffer *b = urb->context;
345 usb_put_urb(&b->urb);
346}
347
348/*
349 * Aborts an ongoing transaction
350 *
351 * Assumes the transfer is referenced and locked and in a submitted
352 * state (mainly that there is an endpoint/rpipe assigned).
353 *
354 * The callback (see above) does nothing but freeing up the data by
355 * putting the URB. Because the URB is allocated at the head of the
356 * struct, the whole space we allocated is kfreed.
357 *
358 * We'll get an 'aborted transaction' xfer result on DTI, that'll
359 * politely ignore because at this point the transaction has been
360 * marked as aborted already.
361 */
362static void __wa_xfer_abort(struct wa_xfer *xfer)
363{
364 int result;
365 struct device *dev = &xfer->wa->usb_iface->dev;
366 struct wa_xfer_abort_buffer *b;
367 struct wa_rpipe *rpipe = xfer->ep->hcpriv;
368
369 b = kmalloc(sizeof(*b), GFP_ATOMIC);
370 if (b == NULL)
371 goto error_kmalloc;
372 b->cmd.bLength = sizeof(b->cmd);
373 b->cmd.bRequestType = WA_XFER_ABORT;
374 b->cmd.wRPipe = rpipe->descr.wRPipeIndex;
375 b->cmd.dwTransferID = wa_xfer_id(xfer);
376
377 usb_init_urb(&b->urb);
378 usb_fill_bulk_urb(&b->urb, xfer->wa->usb_dev,
379 usb_sndbulkpipe(xfer->wa->usb_dev,
380 xfer->wa->dto_epd->bEndpointAddress),
381 &b->cmd, sizeof(b->cmd), __wa_xfer_abort_cb, b);
382 result = usb_submit_urb(&b->urb, GFP_ATOMIC);
383 if (result < 0)
384 goto error_submit;
385 return; /* callback frees! */
386
387
388error_submit:
389 if (printk_ratelimit())
390 dev_err(dev, "xfer %p: Can't submit abort request: %d\n",
391 xfer, result);
392 kfree(b);
393error_kmalloc:
394 return;
395
396}
397
398/*
399 *
400 * @returns < 0 on error, transfer segment request size if ok
401 */
402static ssize_t __wa_xfer_setup_sizes(struct wa_xfer *xfer,
403 enum wa_xfer_type *pxfer_type)
404{
405 ssize_t result;
406 struct device *dev = &xfer->wa->usb_iface->dev;
407 size_t maxpktsize;
408 struct urb *urb = xfer->urb;
409 struct wa_rpipe *rpipe = xfer->ep->hcpriv;
410
411 switch (rpipe->descr.bmAttribute & 0x3) {
412 case USB_ENDPOINT_XFER_CONTROL:
413 *pxfer_type = WA_XFER_TYPE_CTL;
414 result = sizeof(struct wa_xfer_ctl);
415 break;
416 case USB_ENDPOINT_XFER_INT:
417 case USB_ENDPOINT_XFER_BULK:
418 *pxfer_type = WA_XFER_TYPE_BI;
419 result = sizeof(struct wa_xfer_bi);
420 break;
421 case USB_ENDPOINT_XFER_ISOC:
422 dev_err(dev, "FIXME: ISOC not implemented\n");
423 result = -ENOSYS;
424 goto error;
425 default:
426 /* never happens */
427 BUG();
428 result = -EINVAL; /* shut gcc up */
429 };
430 xfer->is_inbound = urb->pipe & USB_DIR_IN ? 1 : 0;
431 xfer->is_dma = urb->transfer_flags & URB_NO_TRANSFER_DMA_MAP ? 1 : 0;
432 xfer->seg_size = le16_to_cpu(rpipe->descr.wBlocks)
433 * 1 << (xfer->wa->wa_descr->bRPipeBlockSize - 1);
434 /* Compute the segment size and make sure it is a multiple of
435 * the maxpktsize (WUSB1.0[8.3.3.1])...not really too much of
436 * a check (FIXME) */
437 maxpktsize = le16_to_cpu(rpipe->descr.wMaxPacketSize);
438 if (xfer->seg_size < maxpktsize) {
439 dev_err(dev, "HW BUG? seg_size %zu smaller than maxpktsize "
440 "%zu\n", xfer->seg_size, maxpktsize);
441 result = -EINVAL;
442 goto error;
443 }
444 xfer->seg_size = (xfer->seg_size / maxpktsize) * maxpktsize;
445 xfer->segs = (urb->transfer_buffer_length + xfer->seg_size - 1)
446 / xfer->seg_size;
447 if (xfer->segs >= WA_SEGS_MAX) {
448 dev_err(dev, "BUG? ops, number of segments %d bigger than %d\n",
449 (int)(urb->transfer_buffer_length / xfer->seg_size),
450 WA_SEGS_MAX);
451 result = -EINVAL;
452 goto error;
453 }
454 if (xfer->segs == 0 && *pxfer_type == WA_XFER_TYPE_CTL)
455 xfer->segs = 1;
456error:
457 return result;
458}
459
460/* Fill in the common request header and xfer-type specific data. */
461static void __wa_xfer_setup_hdr0(struct wa_xfer *xfer,
462 struct wa_xfer_hdr *xfer_hdr0,
463 enum wa_xfer_type xfer_type,
464 size_t xfer_hdr_size)
465{
466 struct wa_rpipe *rpipe = xfer->ep->hcpriv;
467
468 xfer_hdr0 = &xfer->seg[0]->xfer_hdr;
469 xfer_hdr0->bLength = xfer_hdr_size;
470 xfer_hdr0->bRequestType = xfer_type;
471 xfer_hdr0->wRPipe = rpipe->descr.wRPipeIndex;
472 xfer_hdr0->dwTransferID = wa_xfer_id(xfer);
473 xfer_hdr0->bTransferSegment = 0;
474 switch (xfer_type) {
475 case WA_XFER_TYPE_CTL: {
476 struct wa_xfer_ctl *xfer_ctl =
477 container_of(xfer_hdr0, struct wa_xfer_ctl, hdr);
478 xfer_ctl->bmAttribute = xfer->is_inbound ? 1 : 0;
479 memcpy(&xfer_ctl->baSetupData, xfer->urb->setup_packet,
480 sizeof(xfer_ctl->baSetupData));
481 break;
482 }
483 case WA_XFER_TYPE_BI:
484 break;
485 case WA_XFER_TYPE_ISO:
486 printk(KERN_ERR "FIXME: ISOC not implemented\n");
487 default:
488 BUG();
489 };
490}
491
492/*
493 * Callback for the OUT data phase of the segment request
494 *
495 * Check wa_seg_cb(); most comments also apply here because this
496 * function does almost the same thing and they work closely
497 * together.
498 *
499 * If the seg request has failed but this DTO phase has succeeded,
500 * wa_seg_cb() has already failed the segment and moved the
501 * status to WA_SEG_ERROR, so this will go through 'case 0' and
502 * effectively do nothing.
503 */
504static void wa_seg_dto_cb(struct urb *urb)
505{
506 struct wa_seg *seg = urb->context;
507 struct wa_xfer *xfer = seg->xfer;
508 struct wahc *wa;
509 struct device *dev;
510 struct wa_rpipe *rpipe;
511 unsigned long flags;
512 unsigned rpipe_ready = 0;
513 u8 done = 0;
514
515 switch (urb->status) {
516 case 0:
517 spin_lock_irqsave(&xfer->lock, flags);
518 wa = xfer->wa;
519 dev = &wa->usb_iface->dev;
520 dev_dbg(dev, "xfer %p#%u: data out done (%d bytes)\n",
521 xfer, seg->index, urb->actual_length);
522 if (seg->status < WA_SEG_PENDING)
523 seg->status = WA_SEG_PENDING;
524 seg->result = urb->actual_length;
525 spin_unlock_irqrestore(&xfer->lock, flags);
526 break;
527 case -ECONNRESET: /* URB unlinked; no need to do anything */
528 case -ENOENT: /* as it was done by the who unlinked us */
529 break;
530 default: /* Other errors ... */
531 spin_lock_irqsave(&xfer->lock, flags);
532 wa = xfer->wa;
533 dev = &wa->usb_iface->dev;
534 rpipe = xfer->ep->hcpriv;
535 dev_dbg(dev, "xfer %p#%u: data out error %d\n",
536 xfer, seg->index, urb->status);
537 if (edc_inc(&wa->nep_edc, EDC_MAX_ERRORS,
538 EDC_ERROR_TIMEFRAME)){
539 dev_err(dev, "DTO: URB max acceptable errors "
540 "exceeded, resetting device\n");
541 wa_reset_all(wa);
542 }
543 if (seg->status != WA_SEG_ERROR) {
544 seg->status = WA_SEG_ERROR;
545 seg->result = urb->status;
546 xfer->segs_done++;
547 __wa_xfer_abort(xfer);
548 rpipe_ready = rpipe_avail_inc(rpipe);
549 done = __wa_xfer_is_done(xfer);
550 }
551 spin_unlock_irqrestore(&xfer->lock, flags);
552 if (done)
553 wa_xfer_completion(xfer);
554 if (rpipe_ready)
555 wa_xfer_delayed_run(rpipe);
556 }
557}
558
559/*
560 * Callback for the segment request
561 *
562 * If successful transition state (unless already transitioned or
563 * outbound transfer); otherwise, take a note of the error, mark this
564 * segment done and try completion.
565 *
566 * Note we don't access until we are sure that the transfer hasn't
567 * been cancelled (ECONNRESET, ENOENT), which could mean that
568 * seg->xfer could be already gone.
569 *
570 * We have to check before setting the status to WA_SEG_PENDING
571 * because sometimes the xfer result callback arrives before this
572 * callback (geeeeeeze), so it might happen that we are already in
573 * another state. As well, we don't set it if the transfer is inbound,
574 * as in that case, wa_seg_dto_cb will do it when the OUT data phase
575 * finishes.
576 */
577static void wa_seg_cb(struct urb *urb)
578{
579 struct wa_seg *seg = urb->context;
580 struct wa_xfer *xfer = seg->xfer;
581 struct wahc *wa;
582 struct device *dev;
583 struct wa_rpipe *rpipe;
584 unsigned long flags;
585 unsigned rpipe_ready;
586 u8 done = 0;
587
588 switch (urb->status) {
589 case 0:
590 spin_lock_irqsave(&xfer->lock, flags);
591 wa = xfer->wa;
592 dev = &wa->usb_iface->dev;
593 dev_dbg(dev, "xfer %p#%u: request done\n", xfer, seg->index);
594 if (xfer->is_inbound && seg->status < WA_SEG_PENDING)
595 seg->status = WA_SEG_PENDING;
596 spin_unlock_irqrestore(&xfer->lock, flags);
597 break;
598 case -ECONNRESET: /* URB unlinked; no need to do anything */
599 case -ENOENT: /* as it was done by the who unlinked us */
600 break;
601 default: /* Other errors ... */
602 spin_lock_irqsave(&xfer->lock, flags);
603 wa = xfer->wa;
604 dev = &wa->usb_iface->dev;
605 rpipe = xfer->ep->hcpriv;
606 if (printk_ratelimit())
607 dev_err(dev, "xfer %p#%u: request error %d\n",
608 xfer, seg->index, urb->status);
609 if (edc_inc(&wa->nep_edc, EDC_MAX_ERRORS,
610 EDC_ERROR_TIMEFRAME)){
611 dev_err(dev, "DTO: URB max acceptable errors "
612 "exceeded, resetting device\n");
613 wa_reset_all(wa);
614 }
615 usb_unlink_urb(seg->dto_urb);
616 seg->status = WA_SEG_ERROR;
617 seg->result = urb->status;
618 xfer->segs_done++;
619 __wa_xfer_abort(xfer);
620 rpipe_ready = rpipe_avail_inc(rpipe);
621 done = __wa_xfer_is_done(xfer);
622 spin_unlock_irqrestore(&xfer->lock, flags);
623 if (done)
624 wa_xfer_completion(xfer);
625 if (rpipe_ready)
626 wa_xfer_delayed_run(rpipe);
627 }
628}
629
630/*
631 * Allocate the segs array and initialize each of them
632 *
633 * The segments are freed by wa_xfer_destroy() when the xfer use count
634 * drops to zero; however, because each segment is given the same life
635 * cycle as the USB URB it contains, it is actually freed by
636 * usb_put_urb() on the contained USB URB (twisted, eh?).
637 */
638static int __wa_xfer_setup_segs(struct wa_xfer *xfer, size_t xfer_hdr_size)
639{
640 int result, cnt;
641 size_t alloc_size = sizeof(*xfer->seg[0])
642 - sizeof(xfer->seg[0]->xfer_hdr) + xfer_hdr_size;
643 struct usb_device *usb_dev = xfer->wa->usb_dev;
644 const struct usb_endpoint_descriptor *dto_epd = xfer->wa->dto_epd;
645 struct wa_seg *seg;
646 size_t buf_itr, buf_size, buf_itr_size;
647
648 result = -ENOMEM;
649 xfer->seg = kcalloc(xfer->segs, sizeof(xfer->seg[0]), GFP_ATOMIC);
650 if (xfer->seg == NULL)
651 goto error_segs_kzalloc;
652 buf_itr = 0;
653 buf_size = xfer->urb->transfer_buffer_length;
654 for (cnt = 0; cnt < xfer->segs; cnt++) {
655 seg = xfer->seg[cnt] = kzalloc(alloc_size, GFP_ATOMIC);
656 if (seg == NULL)
657 goto error_seg_kzalloc;
658 wa_seg_init(seg);
659 seg->xfer = xfer;
660 seg->index = cnt;
661 usb_fill_bulk_urb(&seg->urb, usb_dev,
662 usb_sndbulkpipe(usb_dev,
663 dto_epd->bEndpointAddress),
664 &seg->xfer_hdr, xfer_hdr_size,
665 wa_seg_cb, seg);
666 buf_itr_size = buf_size > xfer->seg_size ?
667 xfer->seg_size : buf_size;
668 if (xfer->is_inbound == 0 && buf_size > 0) {
669 seg->dto_urb = usb_alloc_urb(0, GFP_ATOMIC);
670 if (seg->dto_urb == NULL)
671 goto error_dto_alloc;
672 usb_fill_bulk_urb(
673 seg->dto_urb, usb_dev,
674 usb_sndbulkpipe(usb_dev,
675 dto_epd->bEndpointAddress),
676 NULL, 0, wa_seg_dto_cb, seg);
677 if (xfer->is_dma) {
678 seg->dto_urb->transfer_dma =
679 xfer->urb->transfer_dma + buf_itr;
680 seg->dto_urb->transfer_flags |=
681 URB_NO_TRANSFER_DMA_MAP;
682 } else
683 seg->dto_urb->transfer_buffer =
684 xfer->urb->transfer_buffer + buf_itr;
685 seg->dto_urb->transfer_buffer_length = buf_itr_size;
686 }
687 seg->status = WA_SEG_READY;
688 buf_itr += buf_itr_size;
689 buf_size -= buf_itr_size;
690 }
691 return 0;
692
693error_dto_alloc:
694 kfree(xfer->seg[cnt]);
695 cnt--;
696error_seg_kzalloc:
697 /* use the fact that cnt is left at were it failed */
698 for (; cnt > 0; cnt--) {
699 if (xfer->is_inbound == 0)
700 kfree(xfer->seg[cnt]->dto_urb);
701 kfree(xfer->seg[cnt]);
702 }
703error_segs_kzalloc:
704 return result;
705}
706
707/*
708 * Allocates all the stuff needed to submit a transfer
709 *
710 * Breaks the whole data buffer in a list of segments, each one has a
711 * structure allocated to it and linked in xfer->seg[index]
712 *
713 * FIXME: merge setup_segs() and the last part of this function, no
714 * need to do two for loops when we could run everything in a
715 * single one
716 */
717static int __wa_xfer_setup(struct wa_xfer *xfer, struct urb *urb)
718{
719 int result;
720 struct device *dev = &xfer->wa->usb_iface->dev;
721 enum wa_xfer_type xfer_type = 0; /* shut up GCC */
722 size_t xfer_hdr_size, cnt, transfer_size;
723 struct wa_xfer_hdr *xfer_hdr0, *xfer_hdr;
724
725 result = __wa_xfer_setup_sizes(xfer, &xfer_type);
726 if (result < 0)
727 goto error_setup_sizes;
728 xfer_hdr_size = result;
729 result = __wa_xfer_setup_segs(xfer, xfer_hdr_size);
730 if (result < 0) {
731 dev_err(dev, "xfer %p: Failed to allocate %d segments: %d\n",
732 xfer, xfer->segs, result);
733 goto error_setup_segs;
734 }
735 /* Fill the first header */
736 xfer_hdr0 = &xfer->seg[0]->xfer_hdr;
737 wa_xfer_id_init(xfer);
738 __wa_xfer_setup_hdr0(xfer, xfer_hdr0, xfer_type, xfer_hdr_size);
739
740 /* Fill remainig headers */
741 xfer_hdr = xfer_hdr0;
742 transfer_size = urb->transfer_buffer_length;
743 xfer_hdr0->dwTransferLength = transfer_size > xfer->seg_size ?
744 xfer->seg_size : transfer_size;
745 transfer_size -= xfer->seg_size;
746 for (cnt = 1; cnt < xfer->segs; cnt++) {
747 xfer_hdr = &xfer->seg[cnt]->xfer_hdr;
748 memcpy(xfer_hdr, xfer_hdr0, xfer_hdr_size);
749 xfer_hdr->bTransferSegment = cnt;
750 xfer_hdr->dwTransferLength = transfer_size > xfer->seg_size ?
751 cpu_to_le32(xfer->seg_size)
752 : cpu_to_le32(transfer_size);
753 xfer->seg[cnt]->status = WA_SEG_READY;
754 transfer_size -= xfer->seg_size;
755 }
756 xfer_hdr->bTransferSegment |= 0x80; /* this is the last segment */
757 result = 0;
758error_setup_segs:
759error_setup_sizes:
760 return result;
761}
762
763/*
764 *
765 *
766 * rpipe->seg_lock is held!
767 */
768static int __wa_seg_submit(struct wa_rpipe *rpipe, struct wa_xfer *xfer,
769 struct wa_seg *seg)
770{
771 int result;
772 result = usb_submit_urb(&seg->urb, GFP_ATOMIC);
773 if (result < 0) {
774 printk(KERN_ERR "xfer %p#%u: REQ submit failed: %d\n",
775 xfer, seg->index, result);
776 goto error_seg_submit;
777 }
778 if (seg->dto_urb) {
779 result = usb_submit_urb(seg->dto_urb, GFP_ATOMIC);
780 if (result < 0) {
781 printk(KERN_ERR "xfer %p#%u: DTO submit failed: %d\n",
782 xfer, seg->index, result);
783 goto error_dto_submit;
784 }
785 }
786 seg->status = WA_SEG_SUBMITTED;
787 rpipe_avail_dec(rpipe);
788 return 0;
789
790error_dto_submit:
791 usb_unlink_urb(&seg->urb);
792error_seg_submit:
793 seg->status = WA_SEG_ERROR;
794 seg->result = result;
795 return result;
796}
797
798/*
799 * Execute more queued request segments until the maximum concurrent allowed
800 *
801 * The ugly unlock/lock sequence on the error path is needed as the
802 * xfer->lock normally nests the seg_lock and not viceversa.
803 *
804 */
805static void wa_xfer_delayed_run(struct wa_rpipe *rpipe)
806{
807 int result;
808 struct device *dev = &rpipe->wa->usb_iface->dev;
809 struct wa_seg *seg;
810 struct wa_xfer *xfer;
811 unsigned long flags;
812
813 spin_lock_irqsave(&rpipe->seg_lock, flags);
814 while (atomic_read(&rpipe->segs_available) > 0
815 && !list_empty(&rpipe->seg_list)) {
816 seg = list_entry(rpipe->seg_list.next, struct wa_seg,
817 list_node);
818 list_del(&seg->list_node);
819 xfer = seg->xfer;
820 result = __wa_seg_submit(rpipe, xfer, seg);
821 dev_dbg(dev, "xfer %p#%u submitted from delayed [%d segments available] %d\n",
822 xfer, seg->index, atomic_read(&rpipe->segs_available), result);
823 if (unlikely(result < 0)) {
824 spin_unlock_irqrestore(&rpipe->seg_lock, flags);
825 spin_lock_irqsave(&xfer->lock, flags);
826 __wa_xfer_abort(xfer);
827 xfer->segs_done++;
828 spin_unlock_irqrestore(&xfer->lock, flags);
829 spin_lock_irqsave(&rpipe->seg_lock, flags);
830 }
831 }
832 spin_unlock_irqrestore(&rpipe->seg_lock, flags);
833}
834
835/*
836 *
837 * xfer->lock is taken
838 *
839 * On failure submitting we just stop submitting and return error;
840 * wa_urb_enqueue_b() will execute the completion path
841 */
842static int __wa_xfer_submit(struct wa_xfer *xfer)
843{
844 int result;
845 struct wahc *wa = xfer->wa;
846 struct device *dev = &wa->usb_iface->dev;
847 unsigned cnt;
848 struct wa_seg *seg;
849 unsigned long flags;
850 struct wa_rpipe *rpipe = xfer->ep->hcpriv;
851 size_t maxrequests = le16_to_cpu(rpipe->descr.wRequests);
852 u8 available;
853 u8 empty;
854
855 spin_lock_irqsave(&wa->xfer_list_lock, flags);
856 list_add_tail(&xfer->list_node, &wa->xfer_list);
857 spin_unlock_irqrestore(&wa->xfer_list_lock, flags);
858
859 BUG_ON(atomic_read(&rpipe->segs_available) > maxrequests);
860 result = 0;
861 spin_lock_irqsave(&rpipe->seg_lock, flags);
862 for (cnt = 0; cnt < xfer->segs; cnt++) {
863 available = atomic_read(&rpipe->segs_available);
864 empty = list_empty(&rpipe->seg_list);
865 seg = xfer->seg[cnt];
866 dev_dbg(dev, "xfer %p#%u: available %u empty %u (%s)\n",
867 xfer, cnt, available, empty,
868 available == 0 || !empty ? "delayed" : "submitted");
869 if (available == 0 || !empty) {
870 dev_dbg(dev, "xfer %p#%u: delayed\n", xfer, cnt);
871 seg->status = WA_SEG_DELAYED;
872 list_add_tail(&seg->list_node, &rpipe->seg_list);
873 } else {
874 result = __wa_seg_submit(rpipe, xfer, seg);
875 if (result < 0) {
876 __wa_xfer_abort(xfer);
877 goto error_seg_submit;
878 }
879 }
880 xfer->segs_submitted++;
881 }
882error_seg_submit:
883 spin_unlock_irqrestore(&rpipe->seg_lock, flags);
884 return result;
885}
886
887/*
888 * Second part of a URB/transfer enqueuement
889 *
890 * Assumes this comes from wa_urb_enqueue() [maybe through
891 * wa_urb_enqueue_run()]. At this point:
892 *
893 * xfer->wa filled and refcounted
894 * xfer->ep filled with rpipe refcounted if
895 * delayed == 0
896 * xfer->urb filled and refcounted (this is the case when called
897 * from wa_urb_enqueue() as we come from usb_submit_urb()
898 * and when called by wa_urb_enqueue_run(), as we took an
899 * extra ref dropped by _run() after we return).
900 * xfer->gfp filled
901 *
902 * If we fail at __wa_xfer_submit(), then we just check if we are done
903 * and if so, we run the completion procedure. However, if we are not
904 * yet done, we do nothing and wait for the completion handlers from
905 * the submitted URBs or from the xfer-result path to kick in. If xfer
906 * result never kicks in, the xfer will timeout from the USB code and
907 * dequeue() will be called.
908 */
909static void wa_urb_enqueue_b(struct wa_xfer *xfer)
910{
911 int result;
912 unsigned long flags;
913 struct urb *urb = xfer->urb;
914 struct wahc *wa = xfer->wa;
915 struct wusbhc *wusbhc = wa->wusb;
916 struct wusb_dev *wusb_dev;
917 unsigned done;
918
919 result = rpipe_get_by_ep(wa, xfer->ep, urb, xfer->gfp);
920 if (result < 0)
921 goto error_rpipe_get;
922 result = -ENODEV;
923 /* FIXME: segmentation broken -- kills DWA */
924 mutex_lock(&wusbhc->mutex); /* get a WUSB dev */
925 if (urb->dev == NULL) {
926 mutex_unlock(&wusbhc->mutex);
927 goto error_dev_gone;
928 }
929 wusb_dev = __wusb_dev_get_by_usb_dev(wusbhc, urb->dev);
930 if (wusb_dev == NULL) {
931 mutex_unlock(&wusbhc->mutex);
932 goto error_dev_gone;
933 }
934 mutex_unlock(&wusbhc->mutex);
935
936 spin_lock_irqsave(&xfer->lock, flags);
937 xfer->wusb_dev = wusb_dev;
938 result = urb->status;
939 if (urb->status != -EINPROGRESS)
940 goto error_dequeued;
941
942 result = __wa_xfer_setup(xfer, urb);
943 if (result < 0)
944 goto error_xfer_setup;
945 result = __wa_xfer_submit(xfer);
946 if (result < 0)
947 goto error_xfer_submit;
948 spin_unlock_irqrestore(&xfer->lock, flags);
949 return;
950
951 /* this is basically wa_xfer_completion() broken up wa_xfer_giveback()
952 * does a wa_xfer_put() that will call wa_xfer_destroy() and clean
953 * upundo setup().
954 */
955error_xfer_setup:
956error_dequeued:
957 spin_unlock_irqrestore(&xfer->lock, flags);
958 /* FIXME: segmentation broken, kills DWA */
959 if (wusb_dev)
960 wusb_dev_put(wusb_dev);
961error_dev_gone:
962 rpipe_put(xfer->ep->hcpriv);
963error_rpipe_get:
964 xfer->result = result;
965 wa_xfer_giveback(xfer);
966 return;
967
968error_xfer_submit:
969 done = __wa_xfer_is_done(xfer);
970 xfer->result = result;
971 spin_unlock_irqrestore(&xfer->lock, flags);
972 if (done)
973 wa_xfer_completion(xfer);
974}
975
976/*
977 * Execute the delayed transfers in the Wire Adapter @wa
978 *
979 * We need to be careful here, as dequeue() could be called in the
980 * middle. That's why we do the whole thing under the
981 * wa->xfer_list_lock. If dequeue() jumps in, it first locks urb->lock
982 * and then checks the list -- so as we would be acquiring in inverse
983 * order, we just drop the lock once we have the xfer and reacquire it
984 * later.
985 */
986void wa_urb_enqueue_run(struct work_struct *ws)
987{
988 struct wahc *wa = container_of(ws, struct wahc, xfer_work);
989 struct wa_xfer *xfer, *next;
990 struct urb *urb;
991
992 spin_lock_irq(&wa->xfer_list_lock);
993 list_for_each_entry_safe(xfer, next, &wa->xfer_delayed_list,
994 list_node) {
995 list_del_init(&xfer->list_node);
996 spin_unlock_irq(&wa->xfer_list_lock);
997
998 urb = xfer->urb;
999 wa_urb_enqueue_b(xfer);
1000 usb_put_urb(urb); /* taken when queuing */
1001
1002 spin_lock_irq(&wa->xfer_list_lock);
1003 }
1004 spin_unlock_irq(&wa->xfer_list_lock);
1005}
1006EXPORT_SYMBOL_GPL(wa_urb_enqueue_run);
1007
1008/*
1009 * Submit a transfer to the Wire Adapter in a delayed way
1010 *
1011 * The process of enqueuing involves possible sleeps() [see
1012 * enqueue_b(), for the rpipe_get() and the mutex_lock()]. If we are
1013 * in an atomic section, we defer the enqueue_b() call--else we call direct.
1014 *
1015 * @urb: We own a reference to it done by the HCI Linux USB stack that
1016 * will be given up by calling usb_hcd_giveback_urb() or by
1017 * returning error from this function -> ergo we don't have to
1018 * refcount it.
1019 */
1020int wa_urb_enqueue(struct wahc *wa, struct usb_host_endpoint *ep,
1021 struct urb *urb, gfp_t gfp)
1022{
1023 int result;
1024 struct device *dev = &wa->usb_iface->dev;
1025 struct wa_xfer *xfer;
1026 unsigned long my_flags;
1027 unsigned cant_sleep = irqs_disabled() | in_atomic();
1028
1029 if (urb->transfer_buffer == NULL
1030 && !(urb->transfer_flags & URB_NO_TRANSFER_DMA_MAP)
1031 && urb->transfer_buffer_length != 0) {
1032 dev_err(dev, "BUG? urb %p: NULL xfer buffer & NODMA\n", urb);
1033 dump_stack();
1034 }
1035
1036 result = -ENOMEM;
1037 xfer = kzalloc(sizeof(*xfer), gfp);
1038 if (xfer == NULL)
1039 goto error_kmalloc;
1040
1041 result = -ENOENT;
1042 if (urb->status != -EINPROGRESS) /* cancelled */
1043 goto error_dequeued; /* before starting? */
1044 wa_xfer_init(xfer);
1045 xfer->wa = wa_get(wa);
1046 xfer->urb = urb;
1047 xfer->gfp = gfp;
1048 xfer->ep = ep;
1049 urb->hcpriv = xfer;
1050
1051 dev_dbg(dev, "xfer %p urb %p pipe 0x%02x [%d bytes] %s %s %s\n",
1052 xfer, urb, urb->pipe, urb->transfer_buffer_length,
1053 urb->transfer_flags & URB_NO_TRANSFER_DMA_MAP ? "dma" : "nodma",
1054 urb->pipe & USB_DIR_IN ? "inbound" : "outbound",
1055 cant_sleep ? "deferred" : "inline");
1056
1057 if (cant_sleep) {
1058 usb_get_urb(urb);
1059 spin_lock_irqsave(&wa->xfer_list_lock, my_flags);
1060 list_add_tail(&xfer->list_node, &wa->xfer_delayed_list);
1061 spin_unlock_irqrestore(&wa->xfer_list_lock, my_flags);
1062 queue_work(wusbd, &wa->xfer_work);
1063 } else {
1064 wa_urb_enqueue_b(xfer);
1065 }
1066 return 0;
1067
1068error_dequeued:
1069 kfree(xfer);
1070error_kmalloc:
1071 return result;
1072}
1073EXPORT_SYMBOL_GPL(wa_urb_enqueue);
1074
1075/*
1076 * Dequeue a URB and make sure uwb_hcd_giveback_urb() [completion
1077 * handler] is called.
1078 *
1079 * Until a transfer goes successfully through wa_urb_enqueue() it
1080 * needs to be dequeued with completion calling; when stuck in delayed
1081 * or before wa_xfer_setup() is called, we need to do completion.
1082 *
1083 * not setup If there is no hcpriv yet, that means that that enqueue
1084 * still had no time to set the xfer up. Because
1085 * urb->status should be other than -EINPROGRESS,
1086 * enqueue() will catch that and bail out.
1087 *
1088 * If the transfer has gone through setup, we just need to clean it
1089 * up. If it has gone through submit(), we have to abort it [with an
1090 * asynch request] and then make sure we cancel each segment.
1091 *
1092 */
1093int wa_urb_dequeue(struct wahc *wa, struct urb *urb)
1094{
1095 unsigned long flags, flags2;
1096 struct wa_xfer *xfer;
1097 struct wa_seg *seg;
1098 struct wa_rpipe *rpipe;
1099 unsigned cnt;
1100 unsigned rpipe_ready = 0;
1101
1102 xfer = urb->hcpriv;
1103 if (xfer == NULL) {
1104 /* NOthing setup yet enqueue will see urb->status !=
1105 * -EINPROGRESS (by hcd layer) and bail out with
1106 * error, no need to do completion
1107 */
1108 BUG_ON(urb->status == -EINPROGRESS);
1109 goto out;
1110 }
1111 spin_lock_irqsave(&xfer->lock, flags);
1112 rpipe = xfer->ep->hcpriv;
1113 /* Check the delayed list -> if there, release and complete */
1114 spin_lock_irqsave(&wa->xfer_list_lock, flags2);
1115 if (!list_empty(&xfer->list_node) && xfer->seg == NULL)
1116 goto dequeue_delayed;
1117 spin_unlock_irqrestore(&wa->xfer_list_lock, flags2);
1118 if (xfer->seg == NULL) /* still hasn't reached */
1119 goto out_unlock; /* setup(), enqueue_b() completes */
1120 /* Ok, the xfer is in flight already, it's been setup and submitted.*/
1121 __wa_xfer_abort(xfer);
1122 for (cnt = 0; cnt < xfer->segs; cnt++) {
1123 seg = xfer->seg[cnt];
1124 switch (seg->status) {
1125 case WA_SEG_NOTREADY:
1126 case WA_SEG_READY:
1127 printk(KERN_ERR "xfer %p#%u: dequeue bad state %u\n",
1128 xfer, cnt, seg->status);
1129 WARN_ON(1);
1130 break;
1131 case WA_SEG_DELAYED:
1132 seg->status = WA_SEG_ABORTED;
1133 spin_lock_irqsave(&rpipe->seg_lock, flags2);
1134 list_del(&seg->list_node);
1135 xfer->segs_done++;
1136 rpipe_ready = rpipe_avail_inc(rpipe);
1137 spin_unlock_irqrestore(&rpipe->seg_lock, flags2);
1138 break;
1139 case WA_SEG_SUBMITTED:
1140 seg->status = WA_SEG_ABORTED;
1141 usb_unlink_urb(&seg->urb);
1142 if (xfer->is_inbound == 0)
1143 usb_unlink_urb(seg->dto_urb);
1144 xfer->segs_done++;
1145 rpipe_ready = rpipe_avail_inc(rpipe);
1146 break;
1147 case WA_SEG_PENDING:
1148 seg->status = WA_SEG_ABORTED;
1149 xfer->segs_done++;
1150 rpipe_ready = rpipe_avail_inc(rpipe);
1151 break;
1152 case WA_SEG_DTI_PENDING:
1153 usb_unlink_urb(wa->dti_urb);
1154 seg->status = WA_SEG_ABORTED;
1155 xfer->segs_done++;
1156 rpipe_ready = rpipe_avail_inc(rpipe);
1157 break;
1158 case WA_SEG_DONE:
1159 case WA_SEG_ERROR:
1160 case WA_SEG_ABORTED:
1161 break;
1162 }
1163 }
1164 xfer->result = urb->status; /* -ENOENT or -ECONNRESET */
1165 __wa_xfer_is_done(xfer);
1166 spin_unlock_irqrestore(&xfer->lock, flags);
1167 wa_xfer_completion(xfer);
1168 if (rpipe_ready)
1169 wa_xfer_delayed_run(rpipe);
1170 return 0;
1171
1172out_unlock:
1173 spin_unlock_irqrestore(&xfer->lock, flags);
1174out:
1175 return 0;
1176
1177dequeue_delayed:
1178 list_del_init(&xfer->list_node);
1179 spin_unlock_irqrestore(&wa->xfer_list_lock, flags2);
1180 xfer->result = urb->status;
1181 spin_unlock_irqrestore(&xfer->lock, flags);
1182 wa_xfer_giveback(xfer);
1183 usb_put_urb(urb); /* we got a ref in enqueue() */
1184 return 0;
1185}
1186EXPORT_SYMBOL_GPL(wa_urb_dequeue);
1187
1188/*
1189 * Translation from WA status codes (WUSB1.0 Table 8.15) to errno
1190 * codes
1191 *
1192 * Positive errno values are internal inconsistencies and should be
1193 * flagged louder. Negative are to be passed up to the user in the
1194 * normal way.
1195 *
1196 * @status: USB WA status code -- high two bits are stripped.
1197 */
1198static int wa_xfer_status_to_errno(u8 status)
1199{
1200 int errno;
1201 u8 real_status = status;
1202 static int xlat[] = {
1203 [WA_XFER_STATUS_SUCCESS] = 0,
1204 [WA_XFER_STATUS_HALTED] = -EPIPE,
1205 [WA_XFER_STATUS_DATA_BUFFER_ERROR] = -ENOBUFS,
1206 [WA_XFER_STATUS_BABBLE] = -EOVERFLOW,
1207 [WA_XFER_RESERVED] = EINVAL,
1208 [WA_XFER_STATUS_NOT_FOUND] = 0,
1209 [WA_XFER_STATUS_INSUFFICIENT_RESOURCE] = -ENOMEM,
1210 [WA_XFER_STATUS_TRANSACTION_ERROR] = -EILSEQ,
1211 [WA_XFER_STATUS_ABORTED] = -EINTR,
1212 [WA_XFER_STATUS_RPIPE_NOT_READY] = EINVAL,
1213 [WA_XFER_INVALID_FORMAT] = EINVAL,
1214 [WA_XFER_UNEXPECTED_SEGMENT_NUMBER] = EINVAL,
1215 [WA_XFER_STATUS_RPIPE_TYPE_MISMATCH] = EINVAL,
1216 };
1217 status &= 0x3f;
1218
1219 if (status == 0)
1220 return 0;
1221 if (status >= ARRAY_SIZE(xlat)) {
1222 printk_ratelimited(KERN_ERR "%s(): BUG? "
1223 "Unknown WA transfer status 0x%02x\n",
1224 __func__, real_status);
1225 return -EINVAL;
1226 }
1227 errno = xlat[status];
1228 if (unlikely(errno > 0)) {
1229 printk_ratelimited(KERN_ERR "%s(): BUG? "
1230 "Inconsistent WA status: 0x%02x\n",
1231 __func__, real_status);
1232 errno = -errno;
1233 }
1234 return errno;
1235}
1236
1237/*
1238 * Process a xfer result completion message
1239 *
1240 * inbound transfers: need to schedule a DTI read
1241 *
1242 * FIXME: this functio needs to be broken up in parts
1243 */
1244static void wa_xfer_result_chew(struct wahc *wa, struct wa_xfer *xfer)
1245{
1246 int result;
1247 struct device *dev = &wa->usb_iface->dev;
1248 unsigned long flags;
1249 u8 seg_idx;
1250 struct wa_seg *seg;
1251 struct wa_rpipe *rpipe;
1252 struct wa_xfer_result *xfer_result = wa->xfer_result;
1253 u8 done = 0;
1254 u8 usb_status;
1255 unsigned rpipe_ready = 0;
1256
1257 spin_lock_irqsave(&xfer->lock, flags);
1258 seg_idx = xfer_result->bTransferSegment & 0x7f;
1259 if (unlikely(seg_idx >= xfer->segs))
1260 goto error_bad_seg;
1261 seg = xfer->seg[seg_idx];
1262 rpipe = xfer->ep->hcpriv;
1263 usb_status = xfer_result->bTransferStatus;
1264 dev_dbg(dev, "xfer %p#%u: bTransferStatus 0x%02x (seg %u)\n",
1265 xfer, seg_idx, usb_status, seg->status);
1266 if (seg->status == WA_SEG_ABORTED
1267 || seg->status == WA_SEG_ERROR) /* already handled */
1268 goto segment_aborted;
1269 if (seg->status == WA_SEG_SUBMITTED) /* ops, got here */
1270 seg->status = WA_SEG_PENDING; /* before wa_seg{_dto}_cb() */
1271 if (seg->status != WA_SEG_PENDING) {
1272 if (printk_ratelimit())
1273 dev_err(dev, "xfer %p#%u: Bad segment state %u\n",
1274 xfer, seg_idx, seg->status);
1275 seg->status = WA_SEG_PENDING; /* workaround/"fix" it */
1276 }
1277 if (usb_status & 0x80) {
1278 seg->result = wa_xfer_status_to_errno(usb_status);
1279 dev_err(dev, "DTI: xfer %p#%u failed (0x%02x)\n",
1280 xfer, seg->index, usb_status);
1281 goto error_complete;
1282 }
1283 /* FIXME: we ignore warnings, tally them for stats */
1284 if (usb_status & 0x40) /* Warning?... */
1285 usb_status = 0; /* ... pass */
1286 if (xfer->is_inbound) { /* IN data phase: read to buffer */
1287 seg->status = WA_SEG_DTI_PENDING;
1288 BUG_ON(wa->buf_in_urb->status == -EINPROGRESS);
1289 if (xfer->is_dma) {
1290 wa->buf_in_urb->transfer_dma =
1291 xfer->urb->transfer_dma
1292 + seg_idx * xfer->seg_size;
1293 wa->buf_in_urb->transfer_flags
1294 |= URB_NO_TRANSFER_DMA_MAP;
1295 } else {
1296 wa->buf_in_urb->transfer_buffer =
1297 xfer->urb->transfer_buffer
1298 + seg_idx * xfer->seg_size;
1299 wa->buf_in_urb->transfer_flags
1300 &= ~URB_NO_TRANSFER_DMA_MAP;
1301 }
1302 wa->buf_in_urb->transfer_buffer_length =
1303 le32_to_cpu(xfer_result->dwTransferLength);
1304 wa->buf_in_urb->context = seg;
1305 result = usb_submit_urb(wa->buf_in_urb, GFP_ATOMIC);
1306 if (result < 0)
1307 goto error_submit_buf_in;
1308 } else {
1309 /* OUT data phase, complete it -- */
1310 seg->status = WA_SEG_DONE;
1311 seg->result = le32_to_cpu(xfer_result->dwTransferLength);
1312 xfer->segs_done++;
1313 rpipe_ready = rpipe_avail_inc(rpipe);
1314 done = __wa_xfer_is_done(xfer);
1315 }
1316 spin_unlock_irqrestore(&xfer->lock, flags);
1317 if (done)
1318 wa_xfer_completion(xfer);
1319 if (rpipe_ready)
1320 wa_xfer_delayed_run(rpipe);
1321 return;
1322
1323error_submit_buf_in:
1324 if (edc_inc(&wa->dti_edc, EDC_MAX_ERRORS, EDC_ERROR_TIMEFRAME)) {
1325 dev_err(dev, "DTI: URB max acceptable errors "
1326 "exceeded, resetting device\n");
1327 wa_reset_all(wa);
1328 }
1329 if (printk_ratelimit())
1330 dev_err(dev, "xfer %p#%u: can't submit DTI data phase: %d\n",
1331 xfer, seg_idx, result);
1332 seg->result = result;
1333error_complete:
1334 seg->status = WA_SEG_ERROR;
1335 xfer->segs_done++;
1336 rpipe_ready = rpipe_avail_inc(rpipe);
1337 __wa_xfer_abort(xfer);
1338 done = __wa_xfer_is_done(xfer);
1339 spin_unlock_irqrestore(&xfer->lock, flags);
1340 if (done)
1341 wa_xfer_completion(xfer);
1342 if (rpipe_ready)
1343 wa_xfer_delayed_run(rpipe);
1344 return;
1345
1346error_bad_seg:
1347 spin_unlock_irqrestore(&xfer->lock, flags);
1348 wa_urb_dequeue(wa, xfer->urb);
1349 if (printk_ratelimit())
1350 dev_err(dev, "xfer %p#%u: bad segment\n", xfer, seg_idx);
1351 if (edc_inc(&wa->dti_edc, EDC_MAX_ERRORS, EDC_ERROR_TIMEFRAME)) {
1352 dev_err(dev, "DTI: URB max acceptable errors "
1353 "exceeded, resetting device\n");
1354 wa_reset_all(wa);
1355 }
1356 return;
1357
1358segment_aborted:
1359 /* nothing to do, as the aborter did the completion */
1360 spin_unlock_irqrestore(&xfer->lock, flags);
1361}
1362
1363/*
1364 * Callback for the IN data phase
1365 *
1366 * If successful transition state; otherwise, take a note of the
1367 * error, mark this segment done and try completion.
1368 *
1369 * Note we don't access until we are sure that the transfer hasn't
1370 * been cancelled (ECONNRESET, ENOENT), which could mean that
1371 * seg->xfer could be already gone.
1372 */
1373static void wa_buf_in_cb(struct urb *urb)
1374{
1375 struct wa_seg *seg = urb->context;
1376 struct wa_xfer *xfer = seg->xfer;
1377 struct wahc *wa;
1378 struct device *dev;
1379 struct wa_rpipe *rpipe;
1380 unsigned rpipe_ready;
1381 unsigned long flags;
1382 u8 done = 0;
1383
1384 switch (urb->status) {
1385 case 0:
1386 spin_lock_irqsave(&xfer->lock, flags);
1387 wa = xfer->wa;
1388 dev = &wa->usb_iface->dev;
1389 rpipe = xfer->ep->hcpriv;
1390 dev_dbg(dev, "xfer %p#%u: data in done (%zu bytes)\n",
1391 xfer, seg->index, (size_t)urb->actual_length);
1392 seg->status = WA_SEG_DONE;
1393 seg->result = urb->actual_length;
1394 xfer->segs_done++;
1395 rpipe_ready = rpipe_avail_inc(rpipe);
1396 done = __wa_xfer_is_done(xfer);
1397 spin_unlock_irqrestore(&xfer->lock, flags);
1398 if (done)
1399 wa_xfer_completion(xfer);
1400 if (rpipe_ready)
1401 wa_xfer_delayed_run(rpipe);
1402 break;
1403 case -ECONNRESET: /* URB unlinked; no need to do anything */
1404 case -ENOENT: /* as it was done by the who unlinked us */
1405 break;
1406 default: /* Other errors ... */
1407 spin_lock_irqsave(&xfer->lock, flags);
1408 wa = xfer->wa;
1409 dev = &wa->usb_iface->dev;
1410 rpipe = xfer->ep->hcpriv;
1411 if (printk_ratelimit())
1412 dev_err(dev, "xfer %p#%u: data in error %d\n",
1413 xfer, seg->index, urb->status);
1414 if (edc_inc(&wa->nep_edc, EDC_MAX_ERRORS,
1415 EDC_ERROR_TIMEFRAME)){
1416 dev_err(dev, "DTO: URB max acceptable errors "
1417 "exceeded, resetting device\n");
1418 wa_reset_all(wa);
1419 }
1420 seg->status = WA_SEG_ERROR;
1421 seg->result = urb->status;
1422 xfer->segs_done++;
1423 rpipe_ready = rpipe_avail_inc(rpipe);
1424 __wa_xfer_abort(xfer);
1425 done = __wa_xfer_is_done(xfer);
1426 spin_unlock_irqrestore(&xfer->lock, flags);
1427 if (done)
1428 wa_xfer_completion(xfer);
1429 if (rpipe_ready)
1430 wa_xfer_delayed_run(rpipe);
1431 }
1432}
1433
1434/*
1435 * Handle an incoming transfer result buffer
1436 *
1437 * Given a transfer result buffer, it completes the transfer (possibly
1438 * scheduling and buffer in read) and then resubmits the DTI URB for a
1439 * new transfer result read.
1440 *
1441 *
1442 * The xfer_result DTI URB state machine
1443 *
1444 * States: OFF | RXR (Read-Xfer-Result) | RBI (Read-Buffer-In)
1445 *
1446 * We start in OFF mode, the first xfer_result notification [through
1447 * wa_handle_notif_xfer()] moves us to RXR by posting the DTI-URB to
1448 * read.
1449 *
1450 * We receive a buffer -- if it is not a xfer_result, we complain and
1451 * repost the DTI-URB. If it is a xfer_result then do the xfer seg
1452 * request accounting. If it is an IN segment, we move to RBI and post
1453 * a BUF-IN-URB to the right buffer. The BUF-IN-URB callback will
1454 * repost the DTI-URB and move to RXR state. if there was no IN
1455 * segment, it will repost the DTI-URB.
1456 *
1457 * We go back to OFF when we detect a ENOENT or ESHUTDOWN (or too many
1458 * errors) in the URBs.
1459 */
1460static void wa_xfer_result_cb(struct urb *urb)
1461{
1462 int result;
1463 struct wahc *wa = urb->context;
1464 struct device *dev = &wa->usb_iface->dev;
1465 struct wa_xfer_result *xfer_result;
1466 u32 xfer_id;
1467 struct wa_xfer *xfer;
1468 u8 usb_status;
1469
1470 BUG_ON(wa->dti_urb != urb);
1471 switch (wa->dti_urb->status) {
1472 case 0:
1473 /* We have a xfer result buffer; check it */
1474 dev_dbg(dev, "DTI: xfer result %d bytes at %p\n",
1475 urb->actual_length, urb->transfer_buffer);
1476 if (wa->dti_urb->actual_length != sizeof(*xfer_result)) {
1477 dev_err(dev, "DTI Error: xfer result--bad size "
1478 "xfer result (%d bytes vs %zu needed)\n",
1479 urb->actual_length, sizeof(*xfer_result));
1480 break;
1481 }
1482 xfer_result = wa->xfer_result;
1483 if (xfer_result->hdr.bLength != sizeof(*xfer_result)) {
1484 dev_err(dev, "DTI Error: xfer result--"
1485 "bad header length %u\n",
1486 xfer_result->hdr.bLength);
1487 break;
1488 }
1489 if (xfer_result->hdr.bNotifyType != WA_XFER_RESULT) {
1490 dev_err(dev, "DTI Error: xfer result--"
1491 "bad header type 0x%02x\n",
1492 xfer_result->hdr.bNotifyType);
1493 break;
1494 }
1495 usb_status = xfer_result->bTransferStatus & 0x3f;
1496 if (usb_status == WA_XFER_STATUS_ABORTED
1497 || usb_status == WA_XFER_STATUS_NOT_FOUND)
1498 /* taken care of already */
1499 break;
1500 xfer_id = xfer_result->dwTransferID;
1501 xfer = wa_xfer_get_by_id(wa, xfer_id);
1502 if (xfer == NULL) {
1503 /* FIXME: transaction might have been cancelled */
1504 dev_err(dev, "DTI Error: xfer result--"
1505 "unknown xfer 0x%08x (status 0x%02x)\n",
1506 xfer_id, usb_status);
1507 break;
1508 }
1509 wa_xfer_result_chew(wa, xfer);
1510 wa_xfer_put(xfer);
1511 break;
1512 case -ENOENT: /* (we killed the URB)...so, no broadcast */
1513 case -ESHUTDOWN: /* going away! */
1514 dev_dbg(dev, "DTI: going down! %d\n", urb->status);
1515 goto out;
1516 default:
1517 /* Unknown error */
1518 if (edc_inc(&wa->dti_edc, EDC_MAX_ERRORS,
1519 EDC_ERROR_TIMEFRAME)) {
1520 dev_err(dev, "DTI: URB max acceptable errors "
1521 "exceeded, resetting device\n");
1522 wa_reset_all(wa);
1523 goto out;
1524 }
1525 if (printk_ratelimit())
1526 dev_err(dev, "DTI: URB error %d\n", urb->status);
1527 break;
1528 }
1529 /* Resubmit the DTI URB */
1530 result = usb_submit_urb(wa->dti_urb, GFP_ATOMIC);
1531 if (result < 0) {
1532 dev_err(dev, "DTI Error: Could not submit DTI URB (%d), "
1533 "resetting\n", result);
1534 wa_reset_all(wa);
1535 }
1536out:
1537 return;
1538}
1539
1540/*
1541 * Transfer complete notification
1542 *
1543 * Called from the notif.c code. We get a notification on EP2 saying
1544 * that some endpoint has some transfer result data available. We are
1545 * about to read it.
1546 *
1547 * To speed up things, we always have a URB reading the DTI URB; we
1548 * don't really set it up and start it until the first xfer complete
1549 * notification arrives, which is what we do here.
1550 *
1551 * Follow up in wa_xfer_result_cb(), as that's where the whole state
1552 * machine starts.
1553 *
1554 * So here we just initialize the DTI URB for reading transfer result
1555 * notifications and also the buffer-in URB, for reading buffers. Then
1556 * we just submit the DTI URB.
1557 *
1558 * @wa shall be referenced
1559 */
1560void wa_handle_notif_xfer(struct wahc *wa, struct wa_notif_hdr *notif_hdr)
1561{
1562 int result;
1563 struct device *dev = &wa->usb_iface->dev;
1564 struct wa_notif_xfer *notif_xfer;
1565 const struct usb_endpoint_descriptor *dti_epd = wa->dti_epd;
1566
1567 notif_xfer = container_of(notif_hdr, struct wa_notif_xfer, hdr);
1568 BUG_ON(notif_hdr->bNotifyType != WA_NOTIF_TRANSFER);
1569
1570 if ((0x80 | notif_xfer->bEndpoint) != dti_epd->bEndpointAddress) {
1571 /* FIXME: hardcoded limitation, adapt */
1572 dev_err(dev, "BUG: DTI ep is %u, not %u (hack me)\n",
1573 notif_xfer->bEndpoint, dti_epd->bEndpointAddress);
1574 goto error;
1575 }
1576 if (wa->dti_urb != NULL) /* DTI URB already started */
1577 goto out;
1578
1579 wa->dti_urb = usb_alloc_urb(0, GFP_KERNEL);
1580 if (wa->dti_urb == NULL) {
1581 dev_err(dev, "Can't allocate DTI URB\n");
1582 goto error_dti_urb_alloc;
1583 }
1584 usb_fill_bulk_urb(
1585 wa->dti_urb, wa->usb_dev,
1586 usb_rcvbulkpipe(wa->usb_dev, 0x80 | notif_xfer->bEndpoint),
1587 wa->xfer_result, wa->xfer_result_size,
1588 wa_xfer_result_cb, wa);
1589
1590 wa->buf_in_urb = usb_alloc_urb(0, GFP_KERNEL);
1591 if (wa->buf_in_urb == NULL) {
1592 dev_err(dev, "Can't allocate BUF-IN URB\n");
1593 goto error_buf_in_urb_alloc;
1594 }
1595 usb_fill_bulk_urb(
1596 wa->buf_in_urb, wa->usb_dev,
1597 usb_rcvbulkpipe(wa->usb_dev, 0x80 | notif_xfer->bEndpoint),
1598 NULL, 0, wa_buf_in_cb, wa);
1599 result = usb_submit_urb(wa->dti_urb, GFP_KERNEL);
1600 if (result < 0) {
1601 dev_err(dev, "DTI Error: Could not submit DTI URB (%d), "
1602 "resetting\n", result);
1603 goto error_dti_urb_submit;
1604 }
1605out:
1606 return;
1607
1608error_dti_urb_submit:
1609 usb_put_urb(wa->buf_in_urb);
1610error_buf_in_urb_alloc:
1611 usb_put_urb(wa->dti_urb);
1612 wa->dti_urb = NULL;
1613error_dti_urb_alloc:
1614error:
1615 wa_reset_all(wa);
1616}