Loading...
1/*
2 * WUSB Wire Adapter
3 * Data transfer and URB enqueing
4 *
5 * Copyright (C) 2005-2006 Intel Corporation
6 * Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com>
7 *
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License version
10 * 2 as published by the Free Software Foundation.
11 *
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software
19 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
20 * 02110-1301, USA.
21 *
22 *
23 * How transfers work: get a buffer, break it up in segments (segment
24 * size is a multiple of the maxpacket size). For each segment issue a
25 * segment request (struct wa_xfer_*), then send the data buffer if
26 * out or nothing if in (all over the DTO endpoint).
27 *
28 * For each submitted segment request, a notification will come over
29 * the NEP endpoint and a transfer result (struct xfer_result) will
30 * arrive in the DTI URB. Read it, get the xfer ID, see if there is
31 * data coming (inbound transfer), schedule a read and handle it.
32 *
33 * Sounds simple, it is a pain to implement.
34 *
35 *
36 * ENTRY POINTS
37 *
38 * FIXME
39 *
40 * LIFE CYCLE / STATE DIAGRAM
41 *
42 * FIXME
43 *
44 * THIS CODE IS DISGUSTING
45 *
46 * Warned you are; it's my second try and still not happy with it.
47 *
48 * NOTES:
49 *
50 * - No iso
51 *
52 * - Supports DMA xfers, control, bulk and maybe interrupt
53 *
54 * - Does not recycle unused rpipes
55 *
56 * An rpipe is assigned to an endpoint the first time it is used,
57 * and then it's there, assigned, until the endpoint is disabled
58 * (destroyed [{h,d}wahc_op_ep_disable()]. The assignment of the
59 * rpipe to the endpoint is done under the wa->rpipe_sem semaphore
60 * (should be a mutex).
61 *
62 * Two methods it could be done:
63 *
64 * (a) set up a timer every time an rpipe's use count drops to 1
65 * (which means unused) or when a transfer ends. Reset the
66 * timer when a xfer is queued. If the timer expires, release
67 * the rpipe [see rpipe_ep_disable()].
68 *
69 * (b) when looking for free rpipes to attach [rpipe_get_by_ep()],
70 * when none are found go over the list, check their endpoint
71 * and their activity record (if no last-xfer-done-ts in the
72 * last x seconds) take it
73 *
74 * However, due to the fact that we have a set of limited
75 * resources (max-segments-at-the-same-time per xfer,
76 * xfers-per-ripe, blocks-per-rpipe, rpipes-per-host), at the end
77 * we are going to have to rebuild all this based on an scheduler,
78 * to where we have a list of transactions to do and based on the
79 * availability of the different required components (blocks,
80 * rpipes, segment slots, etc), we go scheduling them. Painful.
81 */
82#include <linux/spinlock.h>
83#include <linux/slab.h>
84#include <linux/hash.h>
85#include <linux/ratelimit.h>
86#include <linux/export.h>
87#include <linux/scatterlist.h>
88
89#include "wa-hc.h"
90#include "wusbhc.h"
91
92enum {
93 /* [WUSB] section 8.3.3 allocates 7 bits for the segment index. */
94 WA_SEGS_MAX = 128,
95};
96
97enum wa_seg_status {
98 WA_SEG_NOTREADY,
99 WA_SEG_READY,
100 WA_SEG_DELAYED,
101 WA_SEG_SUBMITTED,
102 WA_SEG_PENDING,
103 WA_SEG_DTI_PENDING,
104 WA_SEG_DONE,
105 WA_SEG_ERROR,
106 WA_SEG_ABORTED,
107};
108
109static void wa_xfer_delayed_run(struct wa_rpipe *);
110static int __wa_xfer_delayed_run(struct wa_rpipe *rpipe, int *dto_waiting);
111
112/*
113 * Life cycle governed by 'struct urb' (the refcount of the struct is
114 * that of the 'struct urb' and usb_free_urb() would free the whole
115 * struct).
116 */
117struct wa_seg {
118 struct urb tr_urb; /* transfer request urb. */
119 struct urb *isoc_pack_desc_urb; /* for isoc packet descriptor. */
120 struct urb *dto_urb; /* for data output. */
121 struct list_head list_node; /* for rpipe->req_list */
122 struct wa_xfer *xfer; /* out xfer */
123 u8 index; /* which segment we are */
124 int isoc_frame_count; /* number of isoc frames in this segment. */
125 int isoc_frame_offset; /* starting frame offset in the xfer URB. */
126 /* Isoc frame that the current transfer buffer corresponds to. */
127 int isoc_frame_index;
128 int isoc_size; /* size of all isoc frames sent by this seg. */
129 enum wa_seg_status status;
130 ssize_t result; /* bytes xfered or error */
131 struct wa_xfer_hdr xfer_hdr;
132};
133
134static inline void wa_seg_init(struct wa_seg *seg)
135{
136 usb_init_urb(&seg->tr_urb);
137
138 /* set the remaining memory to 0. */
139 memset(((void *)seg) + sizeof(seg->tr_urb), 0,
140 sizeof(*seg) - sizeof(seg->tr_urb));
141}
142
143/*
144 * Protected by xfer->lock
145 *
146 */
147struct wa_xfer {
148 struct kref refcnt;
149 struct list_head list_node;
150 spinlock_t lock;
151 u32 id;
152
153 struct wahc *wa; /* Wire adapter we are plugged to */
154 struct usb_host_endpoint *ep;
155 struct urb *urb; /* URB we are transferring for */
156 struct wa_seg **seg; /* transfer segments */
157 u8 segs, segs_submitted, segs_done;
158 unsigned is_inbound:1;
159 unsigned is_dma:1;
160 size_t seg_size;
161 int result;
162
163 gfp_t gfp; /* allocation mask */
164
165 struct wusb_dev *wusb_dev; /* for activity timestamps */
166};
167
168static void __wa_populate_dto_urb_isoc(struct wa_xfer *xfer,
169 struct wa_seg *seg, int curr_iso_frame);
170static void wa_complete_remaining_xfer_segs(struct wa_xfer *xfer,
171 int starting_index, enum wa_seg_status status);
172
173static inline void wa_xfer_init(struct wa_xfer *xfer)
174{
175 kref_init(&xfer->refcnt);
176 INIT_LIST_HEAD(&xfer->list_node);
177 spin_lock_init(&xfer->lock);
178}
179
180/*
181 * Destroy a transfer structure
182 *
183 * Note that freeing xfer->seg[cnt]->tr_urb will free the containing
184 * xfer->seg[cnt] memory that was allocated by __wa_xfer_setup_segs.
185 */
186static void wa_xfer_destroy(struct kref *_xfer)
187{
188 struct wa_xfer *xfer = container_of(_xfer, struct wa_xfer, refcnt);
189 if (xfer->seg) {
190 unsigned cnt;
191 for (cnt = 0; cnt < xfer->segs; cnt++) {
192 struct wa_seg *seg = xfer->seg[cnt];
193 if (seg) {
194 usb_free_urb(seg->isoc_pack_desc_urb);
195 if (seg->dto_urb) {
196 kfree(seg->dto_urb->sg);
197 usb_free_urb(seg->dto_urb);
198 }
199 usb_free_urb(&seg->tr_urb);
200 }
201 }
202 kfree(xfer->seg);
203 }
204 kfree(xfer);
205}
206
207static void wa_xfer_get(struct wa_xfer *xfer)
208{
209 kref_get(&xfer->refcnt);
210}
211
212static void wa_xfer_put(struct wa_xfer *xfer)
213{
214 kref_put(&xfer->refcnt, wa_xfer_destroy);
215}
216
217/*
218 * Try to get exclusive access to the DTO endpoint resource. Return true
219 * if successful.
220 */
221static inline int __wa_dto_try_get(struct wahc *wa)
222{
223 return (test_and_set_bit(0, &wa->dto_in_use) == 0);
224}
225
226/* Release the DTO endpoint resource. */
227static inline void __wa_dto_put(struct wahc *wa)
228{
229 clear_bit_unlock(0, &wa->dto_in_use);
230}
231
232/* Service RPIPEs that are waiting on the DTO resource. */
233static void wa_check_for_delayed_rpipes(struct wahc *wa)
234{
235 unsigned long flags;
236 int dto_waiting = 0;
237 struct wa_rpipe *rpipe;
238
239 spin_lock_irqsave(&wa->rpipe_lock, flags);
240 while (!list_empty(&wa->rpipe_delayed_list) && !dto_waiting) {
241 rpipe = list_first_entry(&wa->rpipe_delayed_list,
242 struct wa_rpipe, list_node);
243 __wa_xfer_delayed_run(rpipe, &dto_waiting);
244 /* remove this RPIPE from the list if it is not waiting. */
245 if (!dto_waiting) {
246 pr_debug("%s: RPIPE %d serviced and removed from delayed list.\n",
247 __func__,
248 le16_to_cpu(rpipe->descr.wRPipeIndex));
249 list_del_init(&rpipe->list_node);
250 }
251 }
252 spin_unlock_irqrestore(&wa->rpipe_lock, flags);
253}
254
255/* add this RPIPE to the end of the delayed RPIPE list. */
256static void wa_add_delayed_rpipe(struct wahc *wa, struct wa_rpipe *rpipe)
257{
258 unsigned long flags;
259
260 spin_lock_irqsave(&wa->rpipe_lock, flags);
261 /* add rpipe to the list if it is not already on it. */
262 if (list_empty(&rpipe->list_node)) {
263 pr_debug("%s: adding RPIPE %d to the delayed list.\n",
264 __func__, le16_to_cpu(rpipe->descr.wRPipeIndex));
265 list_add_tail(&rpipe->list_node, &wa->rpipe_delayed_list);
266 }
267 spin_unlock_irqrestore(&wa->rpipe_lock, flags);
268}
269
270/*
271 * xfer is referenced
272 *
273 * xfer->lock has to be unlocked
274 *
275 * We take xfer->lock for setting the result; this is a barrier
276 * against drivers/usb/core/hcd.c:unlink1() being called after we call
277 * usb_hcd_giveback_urb() and wa_urb_dequeue() trying to get a
278 * reference to the transfer.
279 */
280static void wa_xfer_giveback(struct wa_xfer *xfer)
281{
282 unsigned long flags;
283
284 spin_lock_irqsave(&xfer->wa->xfer_list_lock, flags);
285 list_del_init(&xfer->list_node);
286 usb_hcd_unlink_urb_from_ep(&(xfer->wa->wusb->usb_hcd), xfer->urb);
287 spin_unlock_irqrestore(&xfer->wa->xfer_list_lock, flags);
288 /* FIXME: segmentation broken -- kills DWA */
289 wusbhc_giveback_urb(xfer->wa->wusb, xfer->urb, xfer->result);
290 wa_put(xfer->wa);
291 wa_xfer_put(xfer);
292}
293
294/*
295 * xfer is referenced
296 *
297 * xfer->lock has to be unlocked
298 */
299static void wa_xfer_completion(struct wa_xfer *xfer)
300{
301 if (xfer->wusb_dev)
302 wusb_dev_put(xfer->wusb_dev);
303 rpipe_put(xfer->ep->hcpriv);
304 wa_xfer_giveback(xfer);
305}
306
307/*
308 * Initialize a transfer's ID
309 *
310 * We need to use a sequential number; if we use the pointer or the
311 * hash of the pointer, it can repeat over sequential transfers and
312 * then it will confuse the HWA....wonder why in hell they put a 32
313 * bit handle in there then.
314 */
315static void wa_xfer_id_init(struct wa_xfer *xfer)
316{
317 xfer->id = atomic_add_return(1, &xfer->wa->xfer_id_count);
318}
319
320/* Return the xfer's ID. */
321static inline u32 wa_xfer_id(struct wa_xfer *xfer)
322{
323 return xfer->id;
324}
325
326/* Return the xfer's ID in transport format (little endian). */
327static inline __le32 wa_xfer_id_le32(struct wa_xfer *xfer)
328{
329 return cpu_to_le32(xfer->id);
330}
331
332/*
333 * If transfer is done, wrap it up and return true
334 *
335 * xfer->lock has to be locked
336 */
337static unsigned __wa_xfer_is_done(struct wa_xfer *xfer)
338{
339 struct device *dev = &xfer->wa->usb_iface->dev;
340 unsigned result, cnt;
341 struct wa_seg *seg;
342 struct urb *urb = xfer->urb;
343 unsigned found_short = 0;
344
345 result = xfer->segs_done == xfer->segs_submitted;
346 if (result == 0)
347 goto out;
348 urb->actual_length = 0;
349 for (cnt = 0; cnt < xfer->segs; cnt++) {
350 seg = xfer->seg[cnt];
351 switch (seg->status) {
352 case WA_SEG_DONE:
353 if (found_short && seg->result > 0) {
354 dev_dbg(dev, "xfer %p ID %08X#%u: bad short segments (%zu)\n",
355 xfer, wa_xfer_id(xfer), cnt,
356 seg->result);
357 urb->status = -EINVAL;
358 goto out;
359 }
360 urb->actual_length += seg->result;
361 if (!(usb_pipeisoc(xfer->urb->pipe))
362 && seg->result < xfer->seg_size
363 && cnt != xfer->segs-1)
364 found_short = 1;
365 dev_dbg(dev, "xfer %p ID %08X#%u: DONE short %d "
366 "result %zu urb->actual_length %d\n",
367 xfer, wa_xfer_id(xfer), seg->index, found_short,
368 seg->result, urb->actual_length);
369 break;
370 case WA_SEG_ERROR:
371 xfer->result = seg->result;
372 dev_dbg(dev, "xfer %p ID %08X#%u: ERROR result %zi(0x%08zX)\n",
373 xfer, wa_xfer_id(xfer), seg->index, seg->result,
374 seg->result);
375 goto out;
376 case WA_SEG_ABORTED:
377 xfer->result = seg->result;
378 dev_dbg(dev, "xfer %p ID %08X#%u: ABORTED result %zi(0x%08zX)\n",
379 xfer, wa_xfer_id(xfer), seg->index, seg->result,
380 seg->result);
381 goto out;
382 default:
383 dev_warn(dev, "xfer %p ID %08X#%u: is_done bad state %d\n",
384 xfer, wa_xfer_id(xfer), cnt, seg->status);
385 xfer->result = -EINVAL;
386 goto out;
387 }
388 }
389 xfer->result = 0;
390out:
391 return result;
392}
393
394/*
395 * Mark the given segment as done. Return true if this completes the xfer.
396 * This should only be called for segs that have been submitted to an RPIPE.
397 * Delayed segs are not marked as submitted so they do not need to be marked
398 * as done when cleaning up.
399 *
400 * xfer->lock has to be locked
401 */
402static unsigned __wa_xfer_mark_seg_as_done(struct wa_xfer *xfer,
403 struct wa_seg *seg, enum wa_seg_status status)
404{
405 seg->status = status;
406 xfer->segs_done++;
407
408 /* check for done. */
409 return __wa_xfer_is_done(xfer);
410}
411
412/*
413 * Search for a transfer list ID on the HCD's URB list
414 *
415 * For 32 bit architectures, we use the pointer itself; for 64 bits, a
416 * 32-bit hash of the pointer.
417 *
418 * @returns NULL if not found.
419 */
420static struct wa_xfer *wa_xfer_get_by_id(struct wahc *wa, u32 id)
421{
422 unsigned long flags;
423 struct wa_xfer *xfer_itr;
424 spin_lock_irqsave(&wa->xfer_list_lock, flags);
425 list_for_each_entry(xfer_itr, &wa->xfer_list, list_node) {
426 if (id == xfer_itr->id) {
427 wa_xfer_get(xfer_itr);
428 goto out;
429 }
430 }
431 xfer_itr = NULL;
432out:
433 spin_unlock_irqrestore(&wa->xfer_list_lock, flags);
434 return xfer_itr;
435}
436
437struct wa_xfer_abort_buffer {
438 struct urb urb;
439 struct wahc *wa;
440 struct wa_xfer_abort cmd;
441};
442
443static void __wa_xfer_abort_cb(struct urb *urb)
444{
445 struct wa_xfer_abort_buffer *b = urb->context;
446 struct wahc *wa = b->wa;
447
448 /*
449 * If the abort request URB failed, then the HWA did not get the abort
450 * command. Forcibly clean up the xfer without waiting for a Transfer
451 * Result from the HWA.
452 */
453 if (urb->status < 0) {
454 struct wa_xfer *xfer;
455 struct device *dev = &wa->usb_iface->dev;
456
457 xfer = wa_xfer_get_by_id(wa, le32_to_cpu(b->cmd.dwTransferID));
458 dev_err(dev, "%s: Transfer Abort request failed. result: %d\n",
459 __func__, urb->status);
460 if (xfer) {
461 unsigned long flags;
462 int done, seg_index = 0;
463 struct wa_rpipe *rpipe = xfer->ep->hcpriv;
464
465 dev_err(dev, "%s: cleaning up xfer %p ID 0x%08X.\n",
466 __func__, xfer, wa_xfer_id(xfer));
467 spin_lock_irqsave(&xfer->lock, flags);
468 /* skip done segs. */
469 while (seg_index < xfer->segs) {
470 struct wa_seg *seg = xfer->seg[seg_index];
471
472 if ((seg->status == WA_SEG_DONE) ||
473 (seg->status == WA_SEG_ERROR)) {
474 ++seg_index;
475 } else {
476 break;
477 }
478 }
479 /* mark remaining segs as aborted. */
480 wa_complete_remaining_xfer_segs(xfer, seg_index,
481 WA_SEG_ABORTED);
482 done = __wa_xfer_is_done(xfer);
483 spin_unlock_irqrestore(&xfer->lock, flags);
484 if (done)
485 wa_xfer_completion(xfer);
486 wa_xfer_delayed_run(rpipe);
487 wa_xfer_put(xfer);
488 } else {
489 dev_err(dev, "%s: xfer ID 0x%08X already gone.\n",
490 __func__, le32_to_cpu(b->cmd.dwTransferID));
491 }
492 }
493
494 wa_put(wa); /* taken in __wa_xfer_abort */
495 usb_put_urb(&b->urb);
496}
497
498/*
499 * Aborts an ongoing transaction
500 *
501 * Assumes the transfer is referenced and locked and in a submitted
502 * state (mainly that there is an endpoint/rpipe assigned).
503 *
504 * The callback (see above) does nothing but freeing up the data by
505 * putting the URB. Because the URB is allocated at the head of the
506 * struct, the whole space we allocated is kfreed. *
507 */
508static int __wa_xfer_abort(struct wa_xfer *xfer)
509{
510 int result = -ENOMEM;
511 struct device *dev = &xfer->wa->usb_iface->dev;
512 struct wa_xfer_abort_buffer *b;
513 struct wa_rpipe *rpipe = xfer->ep->hcpriv;
514
515 b = kmalloc(sizeof(*b), GFP_ATOMIC);
516 if (b == NULL)
517 goto error_kmalloc;
518 b->cmd.bLength = sizeof(b->cmd);
519 b->cmd.bRequestType = WA_XFER_ABORT;
520 b->cmd.wRPipe = rpipe->descr.wRPipeIndex;
521 b->cmd.dwTransferID = wa_xfer_id_le32(xfer);
522 b->wa = wa_get(xfer->wa);
523
524 usb_init_urb(&b->urb);
525 usb_fill_bulk_urb(&b->urb, xfer->wa->usb_dev,
526 usb_sndbulkpipe(xfer->wa->usb_dev,
527 xfer->wa->dto_epd->bEndpointAddress),
528 &b->cmd, sizeof(b->cmd), __wa_xfer_abort_cb, b);
529 result = usb_submit_urb(&b->urb, GFP_ATOMIC);
530 if (result < 0)
531 goto error_submit;
532 return result; /* callback frees! */
533
534
535error_submit:
536 wa_put(xfer->wa);
537 if (printk_ratelimit())
538 dev_err(dev, "xfer %p: Can't submit abort request: %d\n",
539 xfer, result);
540 kfree(b);
541error_kmalloc:
542 return result;
543
544}
545
546/*
547 * Calculate the number of isoc frames starting from isoc_frame_offset
548 * that will fit a in transfer segment.
549 */
550static int __wa_seg_calculate_isoc_frame_count(struct wa_xfer *xfer,
551 int isoc_frame_offset, int *total_size)
552{
553 int segment_size = 0, frame_count = 0;
554 int index = isoc_frame_offset;
555 struct usb_iso_packet_descriptor *iso_frame_desc =
556 xfer->urb->iso_frame_desc;
557
558 while ((index < xfer->urb->number_of_packets)
559 && ((segment_size + iso_frame_desc[index].length)
560 <= xfer->seg_size)) {
561 /*
562 * For Alereon HWA devices, only include an isoc frame in an
563 * out segment if it is physically contiguous with the previous
564 * frame. This is required because those devices expect
565 * the isoc frames to be sent as a single USB transaction as
566 * opposed to one transaction per frame with standard HWA.
567 */
568 if ((xfer->wa->quirks & WUSB_QUIRK_ALEREON_HWA_CONCAT_ISOC)
569 && (xfer->is_inbound == 0)
570 && (index > isoc_frame_offset)
571 && ((iso_frame_desc[index - 1].offset +
572 iso_frame_desc[index - 1].length) !=
573 iso_frame_desc[index].offset))
574 break;
575
576 /* this frame fits. count it. */
577 ++frame_count;
578 segment_size += iso_frame_desc[index].length;
579
580 /* move to the next isoc frame. */
581 ++index;
582 }
583
584 *total_size = segment_size;
585 return frame_count;
586}
587
588/*
589 *
590 * @returns < 0 on error, transfer segment request size if ok
591 */
592static ssize_t __wa_xfer_setup_sizes(struct wa_xfer *xfer,
593 enum wa_xfer_type *pxfer_type)
594{
595 ssize_t result;
596 struct device *dev = &xfer->wa->usb_iface->dev;
597 size_t maxpktsize;
598 struct urb *urb = xfer->urb;
599 struct wa_rpipe *rpipe = xfer->ep->hcpriv;
600
601 switch (rpipe->descr.bmAttribute & 0x3) {
602 case USB_ENDPOINT_XFER_CONTROL:
603 *pxfer_type = WA_XFER_TYPE_CTL;
604 result = sizeof(struct wa_xfer_ctl);
605 break;
606 case USB_ENDPOINT_XFER_INT:
607 case USB_ENDPOINT_XFER_BULK:
608 *pxfer_type = WA_XFER_TYPE_BI;
609 result = sizeof(struct wa_xfer_bi);
610 break;
611 case USB_ENDPOINT_XFER_ISOC:
612 *pxfer_type = WA_XFER_TYPE_ISO;
613 result = sizeof(struct wa_xfer_hwaiso);
614 break;
615 default:
616 /* never happens */
617 BUG();
618 result = -EINVAL; /* shut gcc up */
619 }
620 xfer->is_inbound = urb->pipe & USB_DIR_IN ? 1 : 0;
621 xfer->is_dma = urb->transfer_flags & URB_NO_TRANSFER_DMA_MAP ? 1 : 0;
622
623 maxpktsize = le16_to_cpu(rpipe->descr.wMaxPacketSize);
624 xfer->seg_size = le16_to_cpu(rpipe->descr.wBlocks)
625 * 1 << (xfer->wa->wa_descr->bRPipeBlockSize - 1);
626 /* Compute the segment size and make sure it is a multiple of
627 * the maxpktsize (WUSB1.0[8.3.3.1])...not really too much of
628 * a check (FIXME) */
629 if (xfer->seg_size < maxpktsize) {
630 dev_err(dev,
631 "HW BUG? seg_size %zu smaller than maxpktsize %zu\n",
632 xfer->seg_size, maxpktsize);
633 result = -EINVAL;
634 goto error;
635 }
636 xfer->seg_size = (xfer->seg_size / maxpktsize) * maxpktsize;
637 if ((rpipe->descr.bmAttribute & 0x3) == USB_ENDPOINT_XFER_ISOC) {
638 int index = 0;
639
640 xfer->segs = 0;
641 /*
642 * loop over urb->number_of_packets to determine how many
643 * xfer segments will be needed to send the isoc frames.
644 */
645 while (index < urb->number_of_packets) {
646 int seg_size; /* don't care. */
647 index += __wa_seg_calculate_isoc_frame_count(xfer,
648 index, &seg_size);
649 ++xfer->segs;
650 }
651 } else {
652 xfer->segs = DIV_ROUND_UP(urb->transfer_buffer_length,
653 xfer->seg_size);
654 if (xfer->segs == 0 && *pxfer_type == WA_XFER_TYPE_CTL)
655 xfer->segs = 1;
656 }
657
658 if (xfer->segs > WA_SEGS_MAX) {
659 dev_err(dev, "BUG? oops, number of segments %zu bigger than %d\n",
660 (urb->transfer_buffer_length/xfer->seg_size),
661 WA_SEGS_MAX);
662 result = -EINVAL;
663 goto error;
664 }
665error:
666 return result;
667}
668
669static void __wa_setup_isoc_packet_descr(
670 struct wa_xfer_packet_info_hwaiso *packet_desc,
671 struct wa_xfer *xfer,
672 struct wa_seg *seg) {
673 struct usb_iso_packet_descriptor *iso_frame_desc =
674 xfer->urb->iso_frame_desc;
675 int frame_index;
676
677 /* populate isoc packet descriptor. */
678 packet_desc->bPacketType = WA_XFER_ISO_PACKET_INFO;
679 packet_desc->wLength = cpu_to_le16(sizeof(*packet_desc) +
680 (sizeof(packet_desc->PacketLength[0]) *
681 seg->isoc_frame_count));
682 for (frame_index = 0; frame_index < seg->isoc_frame_count;
683 ++frame_index) {
684 int offset_index = frame_index + seg->isoc_frame_offset;
685 packet_desc->PacketLength[frame_index] =
686 cpu_to_le16(iso_frame_desc[offset_index].length);
687 }
688}
689
690
691/* Fill in the common request header and xfer-type specific data. */
692static void __wa_xfer_setup_hdr0(struct wa_xfer *xfer,
693 struct wa_xfer_hdr *xfer_hdr0,
694 enum wa_xfer_type xfer_type,
695 size_t xfer_hdr_size)
696{
697 struct wa_rpipe *rpipe = xfer->ep->hcpriv;
698 struct wa_seg *seg = xfer->seg[0];
699
700 xfer_hdr0 = &seg->xfer_hdr;
701 xfer_hdr0->bLength = xfer_hdr_size;
702 xfer_hdr0->bRequestType = xfer_type;
703 xfer_hdr0->wRPipe = rpipe->descr.wRPipeIndex;
704 xfer_hdr0->dwTransferID = wa_xfer_id_le32(xfer);
705 xfer_hdr0->bTransferSegment = 0;
706 switch (xfer_type) {
707 case WA_XFER_TYPE_CTL: {
708 struct wa_xfer_ctl *xfer_ctl =
709 container_of(xfer_hdr0, struct wa_xfer_ctl, hdr);
710 xfer_ctl->bmAttribute = xfer->is_inbound ? 1 : 0;
711 memcpy(&xfer_ctl->baSetupData, xfer->urb->setup_packet,
712 sizeof(xfer_ctl->baSetupData));
713 break;
714 }
715 case WA_XFER_TYPE_BI:
716 break;
717 case WA_XFER_TYPE_ISO: {
718 struct wa_xfer_hwaiso *xfer_iso =
719 container_of(xfer_hdr0, struct wa_xfer_hwaiso, hdr);
720 struct wa_xfer_packet_info_hwaiso *packet_desc =
721 ((void *)xfer_iso) + xfer_hdr_size;
722
723 /* populate the isoc section of the transfer request. */
724 xfer_iso->dwNumOfPackets = cpu_to_le32(seg->isoc_frame_count);
725 /* populate isoc packet descriptor. */
726 __wa_setup_isoc_packet_descr(packet_desc, xfer, seg);
727 break;
728 }
729 default:
730 BUG();
731 };
732}
733
734/*
735 * Callback for the OUT data phase of the segment request
736 *
737 * Check wa_seg_tr_cb(); most comments also apply here because this
738 * function does almost the same thing and they work closely
739 * together.
740 *
741 * If the seg request has failed but this DTO phase has succeeded,
742 * wa_seg_tr_cb() has already failed the segment and moved the
743 * status to WA_SEG_ERROR, so this will go through 'case 0' and
744 * effectively do nothing.
745 */
746static void wa_seg_dto_cb(struct urb *urb)
747{
748 struct wa_seg *seg = urb->context;
749 struct wa_xfer *xfer = seg->xfer;
750 struct wahc *wa;
751 struct device *dev;
752 struct wa_rpipe *rpipe;
753 unsigned long flags;
754 unsigned rpipe_ready = 0;
755 int data_send_done = 1, release_dto = 0, holding_dto = 0;
756 u8 done = 0;
757 int result;
758
759 /* free the sg if it was used. */
760 kfree(urb->sg);
761 urb->sg = NULL;
762
763 spin_lock_irqsave(&xfer->lock, flags);
764 wa = xfer->wa;
765 dev = &wa->usb_iface->dev;
766 if (usb_pipeisoc(xfer->urb->pipe)) {
767 /* Alereon HWA sends all isoc frames in a single transfer. */
768 if (wa->quirks & WUSB_QUIRK_ALEREON_HWA_CONCAT_ISOC)
769 seg->isoc_frame_index += seg->isoc_frame_count;
770 else
771 seg->isoc_frame_index += 1;
772 if (seg->isoc_frame_index < seg->isoc_frame_count) {
773 data_send_done = 0;
774 holding_dto = 1; /* checked in error cases. */
775 /*
776 * if this is the last isoc frame of the segment, we
777 * can release DTO after sending this frame.
778 */
779 if ((seg->isoc_frame_index + 1) >=
780 seg->isoc_frame_count)
781 release_dto = 1;
782 }
783 dev_dbg(dev, "xfer 0x%08X#%u: isoc frame = %d, holding_dto = %d, release_dto = %d.\n",
784 wa_xfer_id(xfer), seg->index, seg->isoc_frame_index,
785 holding_dto, release_dto);
786 }
787 spin_unlock_irqrestore(&xfer->lock, flags);
788
789 switch (urb->status) {
790 case 0:
791 spin_lock_irqsave(&xfer->lock, flags);
792 seg->result += urb->actual_length;
793 if (data_send_done) {
794 dev_dbg(dev, "xfer 0x%08X#%u: data out done (%zu bytes)\n",
795 wa_xfer_id(xfer), seg->index, seg->result);
796 if (seg->status < WA_SEG_PENDING)
797 seg->status = WA_SEG_PENDING;
798 } else {
799 /* should only hit this for isoc xfers. */
800 /*
801 * Populate the dto URB with the next isoc frame buffer,
802 * send the URB and release DTO if we no longer need it.
803 */
804 __wa_populate_dto_urb_isoc(xfer, seg,
805 seg->isoc_frame_offset + seg->isoc_frame_index);
806
807 /* resubmit the URB with the next isoc frame. */
808 /* take a ref on resubmit. */
809 wa_xfer_get(xfer);
810 result = usb_submit_urb(seg->dto_urb, GFP_ATOMIC);
811 if (result < 0) {
812 dev_err(dev, "xfer 0x%08X#%u: DTO submit failed: %d\n",
813 wa_xfer_id(xfer), seg->index, result);
814 spin_unlock_irqrestore(&xfer->lock, flags);
815 goto error_dto_submit;
816 }
817 }
818 spin_unlock_irqrestore(&xfer->lock, flags);
819 if (release_dto) {
820 __wa_dto_put(wa);
821 wa_check_for_delayed_rpipes(wa);
822 }
823 break;
824 case -ECONNRESET: /* URB unlinked; no need to do anything */
825 case -ENOENT: /* as it was done by the who unlinked us */
826 if (holding_dto) {
827 __wa_dto_put(wa);
828 wa_check_for_delayed_rpipes(wa);
829 }
830 break;
831 default: /* Other errors ... */
832 dev_err(dev, "xfer 0x%08X#%u: data out error %d\n",
833 wa_xfer_id(xfer), seg->index, urb->status);
834 goto error_default;
835 }
836
837 /* taken when this URB was submitted. */
838 wa_xfer_put(xfer);
839 return;
840
841error_dto_submit:
842 /* taken on resubmit attempt. */
843 wa_xfer_put(xfer);
844error_default:
845 spin_lock_irqsave(&xfer->lock, flags);
846 rpipe = xfer->ep->hcpriv;
847 if (edc_inc(&wa->nep_edc, EDC_MAX_ERRORS,
848 EDC_ERROR_TIMEFRAME)){
849 dev_err(dev, "DTO: URB max acceptable errors exceeded, resetting device\n");
850 wa_reset_all(wa);
851 }
852 if (seg->status != WA_SEG_ERROR) {
853 seg->result = urb->status;
854 __wa_xfer_abort(xfer);
855 rpipe_ready = rpipe_avail_inc(rpipe);
856 done = __wa_xfer_mark_seg_as_done(xfer, seg, WA_SEG_ERROR);
857 }
858 spin_unlock_irqrestore(&xfer->lock, flags);
859 if (holding_dto) {
860 __wa_dto_put(wa);
861 wa_check_for_delayed_rpipes(wa);
862 }
863 if (done)
864 wa_xfer_completion(xfer);
865 if (rpipe_ready)
866 wa_xfer_delayed_run(rpipe);
867 /* taken when this URB was submitted. */
868 wa_xfer_put(xfer);
869}
870
871/*
872 * Callback for the isoc packet descriptor phase of the segment request
873 *
874 * Check wa_seg_tr_cb(); most comments also apply here because this
875 * function does almost the same thing and they work closely
876 * together.
877 *
878 * If the seg request has failed but this phase has succeeded,
879 * wa_seg_tr_cb() has already failed the segment and moved the
880 * status to WA_SEG_ERROR, so this will go through 'case 0' and
881 * effectively do nothing.
882 */
883static void wa_seg_iso_pack_desc_cb(struct urb *urb)
884{
885 struct wa_seg *seg = urb->context;
886 struct wa_xfer *xfer = seg->xfer;
887 struct wahc *wa;
888 struct device *dev;
889 struct wa_rpipe *rpipe;
890 unsigned long flags;
891 unsigned rpipe_ready = 0;
892 u8 done = 0;
893
894 switch (urb->status) {
895 case 0:
896 spin_lock_irqsave(&xfer->lock, flags);
897 wa = xfer->wa;
898 dev = &wa->usb_iface->dev;
899 dev_dbg(dev, "iso xfer %08X#%u: packet descriptor done\n",
900 wa_xfer_id(xfer), seg->index);
901 if (xfer->is_inbound && seg->status < WA_SEG_PENDING)
902 seg->status = WA_SEG_PENDING;
903 spin_unlock_irqrestore(&xfer->lock, flags);
904 break;
905 case -ECONNRESET: /* URB unlinked; no need to do anything */
906 case -ENOENT: /* as it was done by the who unlinked us */
907 break;
908 default: /* Other errors ... */
909 spin_lock_irqsave(&xfer->lock, flags);
910 wa = xfer->wa;
911 dev = &wa->usb_iface->dev;
912 rpipe = xfer->ep->hcpriv;
913 pr_err_ratelimited("iso xfer %08X#%u: packet descriptor error %d\n",
914 wa_xfer_id(xfer), seg->index, urb->status);
915 if (edc_inc(&wa->nep_edc, EDC_MAX_ERRORS,
916 EDC_ERROR_TIMEFRAME)){
917 dev_err(dev, "iso xfer: URB max acceptable errors exceeded, resetting device\n");
918 wa_reset_all(wa);
919 }
920 if (seg->status != WA_SEG_ERROR) {
921 usb_unlink_urb(seg->dto_urb);
922 seg->result = urb->status;
923 __wa_xfer_abort(xfer);
924 rpipe_ready = rpipe_avail_inc(rpipe);
925 done = __wa_xfer_mark_seg_as_done(xfer, seg,
926 WA_SEG_ERROR);
927 }
928 spin_unlock_irqrestore(&xfer->lock, flags);
929 if (done)
930 wa_xfer_completion(xfer);
931 if (rpipe_ready)
932 wa_xfer_delayed_run(rpipe);
933 }
934 /* taken when this URB was submitted. */
935 wa_xfer_put(xfer);
936}
937
938/*
939 * Callback for the segment request
940 *
941 * If successful transition state (unless already transitioned or
942 * outbound transfer); otherwise, take a note of the error, mark this
943 * segment done and try completion.
944 *
945 * Note we don't access until we are sure that the transfer hasn't
946 * been cancelled (ECONNRESET, ENOENT), which could mean that
947 * seg->xfer could be already gone.
948 *
949 * We have to check before setting the status to WA_SEG_PENDING
950 * because sometimes the xfer result callback arrives before this
951 * callback (geeeeeeze), so it might happen that we are already in
952 * another state. As well, we don't set it if the transfer is not inbound,
953 * as in that case, wa_seg_dto_cb will do it when the OUT data phase
954 * finishes.
955 */
956static void wa_seg_tr_cb(struct urb *urb)
957{
958 struct wa_seg *seg = urb->context;
959 struct wa_xfer *xfer = seg->xfer;
960 struct wahc *wa;
961 struct device *dev;
962 struct wa_rpipe *rpipe;
963 unsigned long flags;
964 unsigned rpipe_ready;
965 u8 done = 0;
966
967 switch (urb->status) {
968 case 0:
969 spin_lock_irqsave(&xfer->lock, flags);
970 wa = xfer->wa;
971 dev = &wa->usb_iface->dev;
972 dev_dbg(dev, "xfer %p ID 0x%08X#%u: request done\n",
973 xfer, wa_xfer_id(xfer), seg->index);
974 if (xfer->is_inbound &&
975 seg->status < WA_SEG_PENDING &&
976 !(usb_pipeisoc(xfer->urb->pipe)))
977 seg->status = WA_SEG_PENDING;
978 spin_unlock_irqrestore(&xfer->lock, flags);
979 break;
980 case -ECONNRESET: /* URB unlinked; no need to do anything */
981 case -ENOENT: /* as it was done by the who unlinked us */
982 break;
983 default: /* Other errors ... */
984 spin_lock_irqsave(&xfer->lock, flags);
985 wa = xfer->wa;
986 dev = &wa->usb_iface->dev;
987 rpipe = xfer->ep->hcpriv;
988 if (printk_ratelimit())
989 dev_err(dev, "xfer %p ID 0x%08X#%u: request error %d\n",
990 xfer, wa_xfer_id(xfer), seg->index,
991 urb->status);
992 if (edc_inc(&wa->nep_edc, EDC_MAX_ERRORS,
993 EDC_ERROR_TIMEFRAME)){
994 dev_err(dev, "DTO: URB max acceptable errors "
995 "exceeded, resetting device\n");
996 wa_reset_all(wa);
997 }
998 usb_unlink_urb(seg->isoc_pack_desc_urb);
999 usb_unlink_urb(seg->dto_urb);
1000 seg->result = urb->status;
1001 __wa_xfer_abort(xfer);
1002 rpipe_ready = rpipe_avail_inc(rpipe);
1003 done = __wa_xfer_mark_seg_as_done(xfer, seg, WA_SEG_ERROR);
1004 spin_unlock_irqrestore(&xfer->lock, flags);
1005 if (done)
1006 wa_xfer_completion(xfer);
1007 if (rpipe_ready)
1008 wa_xfer_delayed_run(rpipe);
1009 }
1010 /* taken when this URB was submitted. */
1011 wa_xfer_put(xfer);
1012}
1013
1014/*
1015 * Allocate an SG list to store bytes_to_transfer bytes and copy the
1016 * subset of the in_sg that matches the buffer subset
1017 * we are about to transfer.
1018 */
1019static struct scatterlist *wa_xfer_create_subset_sg(struct scatterlist *in_sg,
1020 const unsigned int bytes_transferred,
1021 const unsigned int bytes_to_transfer, int *out_num_sgs)
1022{
1023 struct scatterlist *out_sg;
1024 unsigned int bytes_processed = 0, offset_into_current_page_data = 0,
1025 nents;
1026 struct scatterlist *current_xfer_sg = in_sg;
1027 struct scatterlist *current_seg_sg, *last_seg_sg;
1028
1029 /* skip previously transferred pages. */
1030 while ((current_xfer_sg) &&
1031 (bytes_processed < bytes_transferred)) {
1032 bytes_processed += current_xfer_sg->length;
1033
1034 /* advance the sg if current segment starts on or past the
1035 next page. */
1036 if (bytes_processed <= bytes_transferred)
1037 current_xfer_sg = sg_next(current_xfer_sg);
1038 }
1039
1040 /* the data for the current segment starts in current_xfer_sg.
1041 calculate the offset. */
1042 if (bytes_processed > bytes_transferred) {
1043 offset_into_current_page_data = current_xfer_sg->length -
1044 (bytes_processed - bytes_transferred);
1045 }
1046
1047 /* calculate the number of pages needed by this segment. */
1048 nents = DIV_ROUND_UP((bytes_to_transfer +
1049 offset_into_current_page_data +
1050 current_xfer_sg->offset),
1051 PAGE_SIZE);
1052
1053 out_sg = kmalloc((sizeof(struct scatterlist) * nents), GFP_ATOMIC);
1054 if (out_sg) {
1055 sg_init_table(out_sg, nents);
1056
1057 /* copy the portion of the incoming SG that correlates to the
1058 * data to be transferred by this segment to the segment SG. */
1059 last_seg_sg = current_seg_sg = out_sg;
1060 bytes_processed = 0;
1061
1062 /* reset nents and calculate the actual number of sg entries
1063 needed. */
1064 nents = 0;
1065 while ((bytes_processed < bytes_to_transfer) &&
1066 current_seg_sg && current_xfer_sg) {
1067 unsigned int page_len = min((current_xfer_sg->length -
1068 offset_into_current_page_data),
1069 (bytes_to_transfer - bytes_processed));
1070
1071 sg_set_page(current_seg_sg, sg_page(current_xfer_sg),
1072 page_len,
1073 current_xfer_sg->offset +
1074 offset_into_current_page_data);
1075
1076 bytes_processed += page_len;
1077
1078 last_seg_sg = current_seg_sg;
1079 current_seg_sg = sg_next(current_seg_sg);
1080 current_xfer_sg = sg_next(current_xfer_sg);
1081
1082 /* only the first page may require additional offset. */
1083 offset_into_current_page_data = 0;
1084 nents++;
1085 }
1086
1087 /* update num_sgs and terminate the list since we may have
1088 * concatenated pages. */
1089 sg_mark_end(last_seg_sg);
1090 *out_num_sgs = nents;
1091 }
1092
1093 return out_sg;
1094}
1095
1096/*
1097 * Populate DMA buffer info for the isoc dto urb.
1098 */
1099static void __wa_populate_dto_urb_isoc(struct wa_xfer *xfer,
1100 struct wa_seg *seg, int curr_iso_frame)
1101{
1102 seg->dto_urb->transfer_flags |= URB_NO_TRANSFER_DMA_MAP;
1103 seg->dto_urb->sg = NULL;
1104 seg->dto_urb->num_sgs = 0;
1105 /* dto urb buffer address pulled from iso_frame_desc. */
1106 seg->dto_urb->transfer_dma = xfer->urb->transfer_dma +
1107 xfer->urb->iso_frame_desc[curr_iso_frame].offset;
1108 /* The Alereon HWA sends a single URB with all isoc segs. */
1109 if (xfer->wa->quirks & WUSB_QUIRK_ALEREON_HWA_CONCAT_ISOC)
1110 seg->dto_urb->transfer_buffer_length = seg->isoc_size;
1111 else
1112 seg->dto_urb->transfer_buffer_length =
1113 xfer->urb->iso_frame_desc[curr_iso_frame].length;
1114}
1115
1116/*
1117 * Populate buffer ptr and size, DMA buffer or SG list for the dto urb.
1118 */
1119static int __wa_populate_dto_urb(struct wa_xfer *xfer,
1120 struct wa_seg *seg, size_t buf_itr_offset, size_t buf_itr_size)
1121{
1122 int result = 0;
1123
1124 if (xfer->is_dma) {
1125 seg->dto_urb->transfer_dma =
1126 xfer->urb->transfer_dma + buf_itr_offset;
1127 seg->dto_urb->transfer_flags |= URB_NO_TRANSFER_DMA_MAP;
1128 seg->dto_urb->sg = NULL;
1129 seg->dto_urb->num_sgs = 0;
1130 } else {
1131 /* do buffer or SG processing. */
1132 seg->dto_urb->transfer_flags &=
1133 ~URB_NO_TRANSFER_DMA_MAP;
1134 /* this should always be 0 before a resubmit. */
1135 seg->dto_urb->num_mapped_sgs = 0;
1136
1137 if (xfer->urb->transfer_buffer) {
1138 seg->dto_urb->transfer_buffer =
1139 xfer->urb->transfer_buffer +
1140 buf_itr_offset;
1141 seg->dto_urb->sg = NULL;
1142 seg->dto_urb->num_sgs = 0;
1143 } else {
1144 seg->dto_urb->transfer_buffer = NULL;
1145
1146 /*
1147 * allocate an SG list to store seg_size bytes
1148 * and copy the subset of the xfer->urb->sg that
1149 * matches the buffer subset we are about to
1150 * read.
1151 */
1152 seg->dto_urb->sg = wa_xfer_create_subset_sg(
1153 xfer->urb->sg,
1154 buf_itr_offset, buf_itr_size,
1155 &(seg->dto_urb->num_sgs));
1156 if (!(seg->dto_urb->sg))
1157 result = -ENOMEM;
1158 }
1159 }
1160 seg->dto_urb->transfer_buffer_length = buf_itr_size;
1161
1162 return result;
1163}
1164
1165/*
1166 * Allocate the segs array and initialize each of them
1167 *
1168 * The segments are freed by wa_xfer_destroy() when the xfer use count
1169 * drops to zero; however, because each segment is given the same life
1170 * cycle as the USB URB it contains, it is actually freed by
1171 * usb_put_urb() on the contained USB URB (twisted, eh?).
1172 */
1173static int __wa_xfer_setup_segs(struct wa_xfer *xfer, size_t xfer_hdr_size)
1174{
1175 int result, cnt, isoc_frame_offset = 0;
1176 size_t alloc_size = sizeof(*xfer->seg[0])
1177 - sizeof(xfer->seg[0]->xfer_hdr) + xfer_hdr_size;
1178 struct usb_device *usb_dev = xfer->wa->usb_dev;
1179 const struct usb_endpoint_descriptor *dto_epd = xfer->wa->dto_epd;
1180 struct wa_seg *seg;
1181 size_t buf_itr, buf_size, buf_itr_size;
1182
1183 result = -ENOMEM;
1184 xfer->seg = kcalloc(xfer->segs, sizeof(xfer->seg[0]), GFP_ATOMIC);
1185 if (xfer->seg == NULL)
1186 goto error_segs_kzalloc;
1187 buf_itr = 0;
1188 buf_size = xfer->urb->transfer_buffer_length;
1189 for (cnt = 0; cnt < xfer->segs; cnt++) {
1190 size_t iso_pkt_descr_size = 0;
1191 int seg_isoc_frame_count = 0, seg_isoc_size = 0;
1192
1193 /*
1194 * Adjust the size of the segment object to contain space for
1195 * the isoc packet descriptor buffer.
1196 */
1197 if (usb_pipeisoc(xfer->urb->pipe)) {
1198 seg_isoc_frame_count =
1199 __wa_seg_calculate_isoc_frame_count(xfer,
1200 isoc_frame_offset, &seg_isoc_size);
1201
1202 iso_pkt_descr_size =
1203 sizeof(struct wa_xfer_packet_info_hwaiso) +
1204 (seg_isoc_frame_count * sizeof(__le16));
1205 }
1206 seg = xfer->seg[cnt] = kmalloc(alloc_size + iso_pkt_descr_size,
1207 GFP_ATOMIC);
1208 if (seg == NULL)
1209 goto error_seg_kmalloc;
1210 wa_seg_init(seg);
1211 seg->xfer = xfer;
1212 seg->index = cnt;
1213 usb_fill_bulk_urb(&seg->tr_urb, usb_dev,
1214 usb_sndbulkpipe(usb_dev,
1215 dto_epd->bEndpointAddress),
1216 &seg->xfer_hdr, xfer_hdr_size,
1217 wa_seg_tr_cb, seg);
1218 buf_itr_size = min(buf_size, xfer->seg_size);
1219
1220 if (usb_pipeisoc(xfer->urb->pipe)) {
1221 seg->isoc_frame_count = seg_isoc_frame_count;
1222 seg->isoc_frame_offset = isoc_frame_offset;
1223 seg->isoc_size = seg_isoc_size;
1224 /* iso packet descriptor. */
1225 seg->isoc_pack_desc_urb =
1226 usb_alloc_urb(0, GFP_ATOMIC);
1227 if (seg->isoc_pack_desc_urb == NULL)
1228 goto error_iso_pack_desc_alloc;
1229 /*
1230 * The buffer for the isoc packet descriptor starts
1231 * after the transfer request header in the
1232 * segment object memory buffer.
1233 */
1234 usb_fill_bulk_urb(
1235 seg->isoc_pack_desc_urb, usb_dev,
1236 usb_sndbulkpipe(usb_dev,
1237 dto_epd->bEndpointAddress),
1238 (void *)(&seg->xfer_hdr) +
1239 xfer_hdr_size,
1240 iso_pkt_descr_size,
1241 wa_seg_iso_pack_desc_cb, seg);
1242
1243 /* adjust starting frame offset for next seg. */
1244 isoc_frame_offset += seg_isoc_frame_count;
1245 }
1246
1247 if (xfer->is_inbound == 0 && buf_size > 0) {
1248 /* outbound data. */
1249 seg->dto_urb = usb_alloc_urb(0, GFP_ATOMIC);
1250 if (seg->dto_urb == NULL)
1251 goto error_dto_alloc;
1252 usb_fill_bulk_urb(
1253 seg->dto_urb, usb_dev,
1254 usb_sndbulkpipe(usb_dev,
1255 dto_epd->bEndpointAddress),
1256 NULL, 0, wa_seg_dto_cb, seg);
1257
1258 if (usb_pipeisoc(xfer->urb->pipe)) {
1259 /*
1260 * Fill in the xfer buffer information for the
1261 * first isoc frame. Subsequent frames in this
1262 * segment will be filled in and sent from the
1263 * DTO completion routine, if needed.
1264 */
1265 __wa_populate_dto_urb_isoc(xfer, seg,
1266 seg->isoc_frame_offset);
1267 } else {
1268 /* fill in the xfer buffer information. */
1269 result = __wa_populate_dto_urb(xfer, seg,
1270 buf_itr, buf_itr_size);
1271 if (result < 0)
1272 goto error_seg_outbound_populate;
1273
1274 buf_itr += buf_itr_size;
1275 buf_size -= buf_itr_size;
1276 }
1277 }
1278 seg->status = WA_SEG_READY;
1279 }
1280 return 0;
1281
1282 /*
1283 * Free the memory for the current segment which failed to init.
1284 * Use the fact that cnt is left at were it failed. The remaining
1285 * segments will be cleaned up by wa_xfer_destroy.
1286 */
1287error_seg_outbound_populate:
1288 usb_free_urb(xfer->seg[cnt]->dto_urb);
1289error_dto_alloc:
1290 usb_free_urb(xfer->seg[cnt]->isoc_pack_desc_urb);
1291error_iso_pack_desc_alloc:
1292 kfree(xfer->seg[cnt]);
1293 xfer->seg[cnt] = NULL;
1294error_seg_kmalloc:
1295error_segs_kzalloc:
1296 return result;
1297}
1298
1299/*
1300 * Allocates all the stuff needed to submit a transfer
1301 *
1302 * Breaks the whole data buffer in a list of segments, each one has a
1303 * structure allocated to it and linked in xfer->seg[index]
1304 *
1305 * FIXME: merge setup_segs() and the last part of this function, no
1306 * need to do two for loops when we could run everything in a
1307 * single one
1308 */
1309static int __wa_xfer_setup(struct wa_xfer *xfer, struct urb *urb)
1310{
1311 int result;
1312 struct device *dev = &xfer->wa->usb_iface->dev;
1313 enum wa_xfer_type xfer_type = 0; /* shut up GCC */
1314 size_t xfer_hdr_size, cnt, transfer_size;
1315 struct wa_xfer_hdr *xfer_hdr0, *xfer_hdr;
1316
1317 result = __wa_xfer_setup_sizes(xfer, &xfer_type);
1318 if (result < 0)
1319 goto error_setup_sizes;
1320 xfer_hdr_size = result;
1321 result = __wa_xfer_setup_segs(xfer, xfer_hdr_size);
1322 if (result < 0) {
1323 dev_err(dev, "xfer %p: Failed to allocate %d segments: %d\n",
1324 xfer, xfer->segs, result);
1325 goto error_setup_segs;
1326 }
1327 /* Fill the first header */
1328 xfer_hdr0 = &xfer->seg[0]->xfer_hdr;
1329 wa_xfer_id_init(xfer);
1330 __wa_xfer_setup_hdr0(xfer, xfer_hdr0, xfer_type, xfer_hdr_size);
1331
1332 /* Fill remaining headers */
1333 xfer_hdr = xfer_hdr0;
1334 if (xfer_type == WA_XFER_TYPE_ISO) {
1335 xfer_hdr0->dwTransferLength =
1336 cpu_to_le32(xfer->seg[0]->isoc_size);
1337 for (cnt = 1; cnt < xfer->segs; cnt++) {
1338 struct wa_xfer_packet_info_hwaiso *packet_desc;
1339 struct wa_seg *seg = xfer->seg[cnt];
1340 struct wa_xfer_hwaiso *xfer_iso;
1341
1342 xfer_hdr = &seg->xfer_hdr;
1343 xfer_iso = container_of(xfer_hdr,
1344 struct wa_xfer_hwaiso, hdr);
1345 packet_desc = ((void *)xfer_hdr) + xfer_hdr_size;
1346 /*
1347 * Copy values from the 0th header. Segment specific
1348 * values are set below.
1349 */
1350 memcpy(xfer_hdr, xfer_hdr0, xfer_hdr_size);
1351 xfer_hdr->bTransferSegment = cnt;
1352 xfer_hdr->dwTransferLength =
1353 cpu_to_le32(seg->isoc_size);
1354 xfer_iso->dwNumOfPackets =
1355 cpu_to_le32(seg->isoc_frame_count);
1356 __wa_setup_isoc_packet_descr(packet_desc, xfer, seg);
1357 seg->status = WA_SEG_READY;
1358 }
1359 } else {
1360 transfer_size = urb->transfer_buffer_length;
1361 xfer_hdr0->dwTransferLength = transfer_size > xfer->seg_size ?
1362 cpu_to_le32(xfer->seg_size) :
1363 cpu_to_le32(transfer_size);
1364 transfer_size -= xfer->seg_size;
1365 for (cnt = 1; cnt < xfer->segs; cnt++) {
1366 xfer_hdr = &xfer->seg[cnt]->xfer_hdr;
1367 memcpy(xfer_hdr, xfer_hdr0, xfer_hdr_size);
1368 xfer_hdr->bTransferSegment = cnt;
1369 xfer_hdr->dwTransferLength =
1370 transfer_size > xfer->seg_size ?
1371 cpu_to_le32(xfer->seg_size)
1372 : cpu_to_le32(transfer_size);
1373 xfer->seg[cnt]->status = WA_SEG_READY;
1374 transfer_size -= xfer->seg_size;
1375 }
1376 }
1377 xfer_hdr->bTransferSegment |= 0x80; /* this is the last segment */
1378 result = 0;
1379error_setup_segs:
1380error_setup_sizes:
1381 return result;
1382}
1383
1384/*
1385 *
1386 *
1387 * rpipe->seg_lock is held!
1388 */
1389static int __wa_seg_submit(struct wa_rpipe *rpipe, struct wa_xfer *xfer,
1390 struct wa_seg *seg, int *dto_done)
1391{
1392 int result;
1393
1394 /* default to done unless we encounter a multi-frame isoc segment. */
1395 *dto_done = 1;
1396
1397 /*
1398 * Take a ref for each segment urb so the xfer cannot disappear until
1399 * all of the callbacks run.
1400 */
1401 wa_xfer_get(xfer);
1402 /* submit the transfer request. */
1403 seg->status = WA_SEG_SUBMITTED;
1404 result = usb_submit_urb(&seg->tr_urb, GFP_ATOMIC);
1405 if (result < 0) {
1406 pr_err("%s: xfer %p#%u: REQ submit failed: %d\n",
1407 __func__, xfer, seg->index, result);
1408 wa_xfer_put(xfer);
1409 goto error_tr_submit;
1410 }
1411 /* submit the isoc packet descriptor if present. */
1412 if (seg->isoc_pack_desc_urb) {
1413 wa_xfer_get(xfer);
1414 result = usb_submit_urb(seg->isoc_pack_desc_urb, GFP_ATOMIC);
1415 seg->isoc_frame_index = 0;
1416 if (result < 0) {
1417 pr_err("%s: xfer %p#%u: ISO packet descriptor submit failed: %d\n",
1418 __func__, xfer, seg->index, result);
1419 wa_xfer_put(xfer);
1420 goto error_iso_pack_desc_submit;
1421 }
1422 }
1423 /* submit the out data if this is an out request. */
1424 if (seg->dto_urb) {
1425 struct wahc *wa = xfer->wa;
1426 wa_xfer_get(xfer);
1427 result = usb_submit_urb(seg->dto_urb, GFP_ATOMIC);
1428 if (result < 0) {
1429 pr_err("%s: xfer %p#%u: DTO submit failed: %d\n",
1430 __func__, xfer, seg->index, result);
1431 wa_xfer_put(xfer);
1432 goto error_dto_submit;
1433 }
1434 /*
1435 * If this segment contains more than one isoc frame, hold
1436 * onto the dto resource until we send all frames.
1437 * Only applies to non-Alereon devices.
1438 */
1439 if (((wa->quirks & WUSB_QUIRK_ALEREON_HWA_CONCAT_ISOC) == 0)
1440 && (seg->isoc_frame_count > 1))
1441 *dto_done = 0;
1442 }
1443 rpipe_avail_dec(rpipe);
1444 return 0;
1445
1446error_dto_submit:
1447 usb_unlink_urb(seg->isoc_pack_desc_urb);
1448error_iso_pack_desc_submit:
1449 usb_unlink_urb(&seg->tr_urb);
1450error_tr_submit:
1451 seg->status = WA_SEG_ERROR;
1452 seg->result = result;
1453 *dto_done = 1;
1454 return result;
1455}
1456
1457/*
1458 * Execute more queued request segments until the maximum concurrent allowed.
1459 * Return true if the DTO resource was acquired and released.
1460 *
1461 * The ugly unlock/lock sequence on the error path is needed as the
1462 * xfer->lock normally nests the seg_lock and not viceversa.
1463 */
1464static int __wa_xfer_delayed_run(struct wa_rpipe *rpipe, int *dto_waiting)
1465{
1466 int result, dto_acquired = 0, dto_done = 0;
1467 struct device *dev = &rpipe->wa->usb_iface->dev;
1468 struct wa_seg *seg;
1469 struct wa_xfer *xfer;
1470 unsigned long flags;
1471
1472 *dto_waiting = 0;
1473
1474 spin_lock_irqsave(&rpipe->seg_lock, flags);
1475 while (atomic_read(&rpipe->segs_available) > 0
1476 && !list_empty(&rpipe->seg_list)
1477 && (dto_acquired = __wa_dto_try_get(rpipe->wa))) {
1478 seg = list_first_entry(&(rpipe->seg_list), struct wa_seg,
1479 list_node);
1480 list_del(&seg->list_node);
1481 xfer = seg->xfer;
1482 /*
1483 * Get a reference to the xfer in case the callbacks for the
1484 * URBs submitted by __wa_seg_submit attempt to complete
1485 * the xfer before this function completes.
1486 */
1487 wa_xfer_get(xfer);
1488 result = __wa_seg_submit(rpipe, xfer, seg, &dto_done);
1489 /* release the dto resource if this RPIPE is done with it. */
1490 if (dto_done)
1491 __wa_dto_put(rpipe->wa);
1492 dev_dbg(dev, "xfer %p ID %08X#%u submitted from delayed [%d segments available] %d\n",
1493 xfer, wa_xfer_id(xfer), seg->index,
1494 atomic_read(&rpipe->segs_available), result);
1495 if (unlikely(result < 0)) {
1496 int done;
1497
1498 spin_unlock_irqrestore(&rpipe->seg_lock, flags);
1499 spin_lock_irqsave(&xfer->lock, flags);
1500 __wa_xfer_abort(xfer);
1501 /*
1502 * This seg was marked as submitted when it was put on
1503 * the RPIPE seg_list. Mark it done.
1504 */
1505 xfer->segs_done++;
1506 done = __wa_xfer_is_done(xfer);
1507 spin_unlock_irqrestore(&xfer->lock, flags);
1508 if (done)
1509 wa_xfer_completion(xfer);
1510 spin_lock_irqsave(&rpipe->seg_lock, flags);
1511 }
1512 wa_xfer_put(xfer);
1513 }
1514 /*
1515 * Mark this RPIPE as waiting if dto was not acquired, there are
1516 * delayed segs and no active transfers to wake us up later.
1517 */
1518 if (!dto_acquired && !list_empty(&rpipe->seg_list)
1519 && (atomic_read(&rpipe->segs_available) ==
1520 le16_to_cpu(rpipe->descr.wRequests)))
1521 *dto_waiting = 1;
1522
1523 spin_unlock_irqrestore(&rpipe->seg_lock, flags);
1524
1525 return dto_done;
1526}
1527
1528static void wa_xfer_delayed_run(struct wa_rpipe *rpipe)
1529{
1530 int dto_waiting;
1531 int dto_done = __wa_xfer_delayed_run(rpipe, &dto_waiting);
1532
1533 /*
1534 * If this RPIPE is waiting on the DTO resource, add it to the tail of
1535 * the waiting list.
1536 * Otherwise, if the WA DTO resource was acquired and released by
1537 * __wa_xfer_delayed_run, another RPIPE may have attempted to acquire
1538 * DTO and failed during that time. Check the delayed list and process
1539 * any waiters. Start searching from the next RPIPE index.
1540 */
1541 if (dto_waiting)
1542 wa_add_delayed_rpipe(rpipe->wa, rpipe);
1543 else if (dto_done)
1544 wa_check_for_delayed_rpipes(rpipe->wa);
1545}
1546
1547/*
1548 *
1549 * xfer->lock is taken
1550 *
1551 * On failure submitting we just stop submitting and return error;
1552 * wa_urb_enqueue_b() will execute the completion path
1553 */
1554static int __wa_xfer_submit(struct wa_xfer *xfer)
1555{
1556 int result, dto_acquired = 0, dto_done = 0, dto_waiting = 0;
1557 struct wahc *wa = xfer->wa;
1558 struct device *dev = &wa->usb_iface->dev;
1559 unsigned cnt;
1560 struct wa_seg *seg;
1561 unsigned long flags;
1562 struct wa_rpipe *rpipe = xfer->ep->hcpriv;
1563 size_t maxrequests = le16_to_cpu(rpipe->descr.wRequests);
1564 u8 available;
1565 u8 empty;
1566
1567 spin_lock_irqsave(&wa->xfer_list_lock, flags);
1568 list_add_tail(&xfer->list_node, &wa->xfer_list);
1569 spin_unlock_irqrestore(&wa->xfer_list_lock, flags);
1570
1571 BUG_ON(atomic_read(&rpipe->segs_available) > maxrequests);
1572 result = 0;
1573 spin_lock_irqsave(&rpipe->seg_lock, flags);
1574 for (cnt = 0; cnt < xfer->segs; cnt++) {
1575 int delay_seg = 1;
1576
1577 available = atomic_read(&rpipe->segs_available);
1578 empty = list_empty(&rpipe->seg_list);
1579 seg = xfer->seg[cnt];
1580 if (available && empty) {
1581 /*
1582 * Only attempt to acquire DTO if we have a segment
1583 * to send.
1584 */
1585 dto_acquired = __wa_dto_try_get(rpipe->wa);
1586 if (dto_acquired) {
1587 delay_seg = 0;
1588 result = __wa_seg_submit(rpipe, xfer, seg,
1589 &dto_done);
1590 dev_dbg(dev, "xfer %p ID 0x%08X#%u: available %u empty %u submitted\n",
1591 xfer, wa_xfer_id(xfer), cnt, available,
1592 empty);
1593 if (dto_done)
1594 __wa_dto_put(rpipe->wa);
1595
1596 if (result < 0) {
1597 __wa_xfer_abort(xfer);
1598 goto error_seg_submit;
1599 }
1600 }
1601 }
1602
1603 if (delay_seg) {
1604 dev_dbg(dev, "xfer %p ID 0x%08X#%u: available %u empty %u delayed\n",
1605 xfer, wa_xfer_id(xfer), cnt, available, empty);
1606 seg->status = WA_SEG_DELAYED;
1607 list_add_tail(&seg->list_node, &rpipe->seg_list);
1608 }
1609 xfer->segs_submitted++;
1610 }
1611error_seg_submit:
1612 /*
1613 * Mark this RPIPE as waiting if dto was not acquired, there are
1614 * delayed segs and no active transfers to wake us up later.
1615 */
1616 if (!dto_acquired && !list_empty(&rpipe->seg_list)
1617 && (atomic_read(&rpipe->segs_available) ==
1618 le16_to_cpu(rpipe->descr.wRequests)))
1619 dto_waiting = 1;
1620 spin_unlock_irqrestore(&rpipe->seg_lock, flags);
1621
1622 if (dto_waiting)
1623 wa_add_delayed_rpipe(rpipe->wa, rpipe);
1624 else if (dto_done)
1625 wa_check_for_delayed_rpipes(rpipe->wa);
1626
1627 return result;
1628}
1629
1630/*
1631 * Second part of a URB/transfer enqueuement
1632 *
1633 * Assumes this comes from wa_urb_enqueue() [maybe through
1634 * wa_urb_enqueue_run()]. At this point:
1635 *
1636 * xfer->wa filled and refcounted
1637 * xfer->ep filled with rpipe refcounted if
1638 * delayed == 0
1639 * xfer->urb filled and refcounted (this is the case when called
1640 * from wa_urb_enqueue() as we come from usb_submit_urb()
1641 * and when called by wa_urb_enqueue_run(), as we took an
1642 * extra ref dropped by _run() after we return).
1643 * xfer->gfp filled
1644 *
1645 * If we fail at __wa_xfer_submit(), then we just check if we are done
1646 * and if so, we run the completion procedure. However, if we are not
1647 * yet done, we do nothing and wait for the completion handlers from
1648 * the submitted URBs or from the xfer-result path to kick in. If xfer
1649 * result never kicks in, the xfer will timeout from the USB code and
1650 * dequeue() will be called.
1651 */
1652static int wa_urb_enqueue_b(struct wa_xfer *xfer)
1653{
1654 int result;
1655 unsigned long flags;
1656 struct urb *urb = xfer->urb;
1657 struct wahc *wa = xfer->wa;
1658 struct wusbhc *wusbhc = wa->wusb;
1659 struct wusb_dev *wusb_dev;
1660 unsigned done;
1661
1662 result = rpipe_get_by_ep(wa, xfer->ep, urb, xfer->gfp);
1663 if (result < 0) {
1664 pr_err("%s: error_rpipe_get\n", __func__);
1665 goto error_rpipe_get;
1666 }
1667 result = -ENODEV;
1668 /* FIXME: segmentation broken -- kills DWA */
1669 mutex_lock(&wusbhc->mutex); /* get a WUSB dev */
1670 if (urb->dev == NULL) {
1671 mutex_unlock(&wusbhc->mutex);
1672 pr_err("%s: error usb dev gone\n", __func__);
1673 goto error_dev_gone;
1674 }
1675 wusb_dev = __wusb_dev_get_by_usb_dev(wusbhc, urb->dev);
1676 if (wusb_dev == NULL) {
1677 mutex_unlock(&wusbhc->mutex);
1678 dev_err(&(urb->dev->dev), "%s: error wusb dev gone\n",
1679 __func__);
1680 goto error_dev_gone;
1681 }
1682 mutex_unlock(&wusbhc->mutex);
1683
1684 spin_lock_irqsave(&xfer->lock, flags);
1685 xfer->wusb_dev = wusb_dev;
1686 result = urb->status;
1687 if (urb->status != -EINPROGRESS) {
1688 dev_err(&(urb->dev->dev), "%s: error_dequeued\n", __func__);
1689 goto error_dequeued;
1690 }
1691
1692 result = __wa_xfer_setup(xfer, urb);
1693 if (result < 0) {
1694 dev_err(&(urb->dev->dev), "%s: error_xfer_setup\n", __func__);
1695 goto error_xfer_setup;
1696 }
1697 /*
1698 * Get a xfer reference since __wa_xfer_submit starts asynchronous
1699 * operations that may try to complete the xfer before this function
1700 * exits.
1701 */
1702 wa_xfer_get(xfer);
1703 result = __wa_xfer_submit(xfer);
1704 if (result < 0) {
1705 dev_err(&(urb->dev->dev), "%s: error_xfer_submit\n", __func__);
1706 goto error_xfer_submit;
1707 }
1708 spin_unlock_irqrestore(&xfer->lock, flags);
1709 wa_xfer_put(xfer);
1710 return 0;
1711
1712 /*
1713 * this is basically wa_xfer_completion() broken up wa_xfer_giveback()
1714 * does a wa_xfer_put() that will call wa_xfer_destroy() and undo
1715 * setup().
1716 */
1717error_xfer_setup:
1718error_dequeued:
1719 spin_unlock_irqrestore(&xfer->lock, flags);
1720 /* FIXME: segmentation broken, kills DWA */
1721 if (wusb_dev)
1722 wusb_dev_put(wusb_dev);
1723error_dev_gone:
1724 rpipe_put(xfer->ep->hcpriv);
1725error_rpipe_get:
1726 xfer->result = result;
1727 return result;
1728
1729error_xfer_submit:
1730 done = __wa_xfer_is_done(xfer);
1731 xfer->result = result;
1732 spin_unlock_irqrestore(&xfer->lock, flags);
1733 if (done)
1734 wa_xfer_completion(xfer);
1735 wa_xfer_put(xfer);
1736 /* return success since the completion routine will run. */
1737 return 0;
1738}
1739
1740/*
1741 * Execute the delayed transfers in the Wire Adapter @wa
1742 *
1743 * We need to be careful here, as dequeue() could be called in the
1744 * middle. That's why we do the whole thing under the
1745 * wa->xfer_list_lock. If dequeue() jumps in, it first locks xfer->lock
1746 * and then checks the list -- so as we would be acquiring in inverse
1747 * order, we move the delayed list to a separate list while locked and then
1748 * submit them without the list lock held.
1749 */
1750void wa_urb_enqueue_run(struct work_struct *ws)
1751{
1752 struct wahc *wa = container_of(ws, struct wahc, xfer_enqueue_work);
1753 struct wa_xfer *xfer, *next;
1754 struct urb *urb;
1755 LIST_HEAD(tmp_list);
1756
1757 /* Create a copy of the wa->xfer_delayed_list while holding the lock */
1758 spin_lock_irq(&wa->xfer_list_lock);
1759 list_cut_position(&tmp_list, &wa->xfer_delayed_list,
1760 wa->xfer_delayed_list.prev);
1761 spin_unlock_irq(&wa->xfer_list_lock);
1762
1763 /*
1764 * enqueue from temp list without list lock held since wa_urb_enqueue_b
1765 * can take xfer->lock as well as lock mutexes.
1766 */
1767 list_for_each_entry_safe(xfer, next, &tmp_list, list_node) {
1768 list_del_init(&xfer->list_node);
1769
1770 urb = xfer->urb;
1771 if (wa_urb_enqueue_b(xfer) < 0)
1772 wa_xfer_giveback(xfer);
1773 usb_put_urb(urb); /* taken when queuing */
1774 }
1775}
1776EXPORT_SYMBOL_GPL(wa_urb_enqueue_run);
1777
1778/*
1779 * Process the errored transfers on the Wire Adapter outside of interrupt.
1780 */
1781void wa_process_errored_transfers_run(struct work_struct *ws)
1782{
1783 struct wahc *wa = container_of(ws, struct wahc, xfer_error_work);
1784 struct wa_xfer *xfer, *next;
1785 LIST_HEAD(tmp_list);
1786
1787 pr_info("%s: Run delayed STALL processing.\n", __func__);
1788
1789 /* Create a copy of the wa->xfer_errored_list while holding the lock */
1790 spin_lock_irq(&wa->xfer_list_lock);
1791 list_cut_position(&tmp_list, &wa->xfer_errored_list,
1792 wa->xfer_errored_list.prev);
1793 spin_unlock_irq(&wa->xfer_list_lock);
1794
1795 /*
1796 * run rpipe_clear_feature_stalled from temp list without list lock
1797 * held.
1798 */
1799 list_for_each_entry_safe(xfer, next, &tmp_list, list_node) {
1800 struct usb_host_endpoint *ep;
1801 unsigned long flags;
1802 struct wa_rpipe *rpipe;
1803
1804 spin_lock_irqsave(&xfer->lock, flags);
1805 ep = xfer->ep;
1806 rpipe = ep->hcpriv;
1807 spin_unlock_irqrestore(&xfer->lock, flags);
1808
1809 /* clear RPIPE feature stalled without holding a lock. */
1810 rpipe_clear_feature_stalled(wa, ep);
1811
1812 /* complete the xfer. This removes it from the tmp list. */
1813 wa_xfer_completion(xfer);
1814
1815 /* check for work. */
1816 wa_xfer_delayed_run(rpipe);
1817 }
1818}
1819EXPORT_SYMBOL_GPL(wa_process_errored_transfers_run);
1820
1821/*
1822 * Submit a transfer to the Wire Adapter in a delayed way
1823 *
1824 * The process of enqueuing involves possible sleeps() [see
1825 * enqueue_b(), for the rpipe_get() and the mutex_lock()]. If we are
1826 * in an atomic section, we defer the enqueue_b() call--else we call direct.
1827 *
1828 * @urb: We own a reference to it done by the HCI Linux USB stack that
1829 * will be given up by calling usb_hcd_giveback_urb() or by
1830 * returning error from this function -> ergo we don't have to
1831 * refcount it.
1832 */
1833int wa_urb_enqueue(struct wahc *wa, struct usb_host_endpoint *ep,
1834 struct urb *urb, gfp_t gfp)
1835{
1836 int result;
1837 struct device *dev = &wa->usb_iface->dev;
1838 struct wa_xfer *xfer;
1839 unsigned long my_flags;
1840 unsigned cant_sleep = irqs_disabled() | in_atomic();
1841
1842 if ((urb->transfer_buffer == NULL)
1843 && (urb->sg == NULL)
1844 && !(urb->transfer_flags & URB_NO_TRANSFER_DMA_MAP)
1845 && urb->transfer_buffer_length != 0) {
1846 dev_err(dev, "BUG? urb %p: NULL xfer buffer & NODMA\n", urb);
1847 dump_stack();
1848 }
1849
1850 spin_lock_irqsave(&wa->xfer_list_lock, my_flags);
1851 result = usb_hcd_link_urb_to_ep(&(wa->wusb->usb_hcd), urb);
1852 spin_unlock_irqrestore(&wa->xfer_list_lock, my_flags);
1853 if (result < 0)
1854 goto error_link_urb;
1855
1856 result = -ENOMEM;
1857 xfer = kzalloc(sizeof(*xfer), gfp);
1858 if (xfer == NULL)
1859 goto error_kmalloc;
1860
1861 result = -ENOENT;
1862 if (urb->status != -EINPROGRESS) /* cancelled */
1863 goto error_dequeued; /* before starting? */
1864 wa_xfer_init(xfer);
1865 xfer->wa = wa_get(wa);
1866 xfer->urb = urb;
1867 xfer->gfp = gfp;
1868 xfer->ep = ep;
1869 urb->hcpriv = xfer;
1870
1871 dev_dbg(dev, "xfer %p urb %p pipe 0x%02x [%d bytes] %s %s %s\n",
1872 xfer, urb, urb->pipe, urb->transfer_buffer_length,
1873 urb->transfer_flags & URB_NO_TRANSFER_DMA_MAP ? "dma" : "nodma",
1874 urb->pipe & USB_DIR_IN ? "inbound" : "outbound",
1875 cant_sleep ? "deferred" : "inline");
1876
1877 if (cant_sleep) {
1878 usb_get_urb(urb);
1879 spin_lock_irqsave(&wa->xfer_list_lock, my_flags);
1880 list_add_tail(&xfer->list_node, &wa->xfer_delayed_list);
1881 spin_unlock_irqrestore(&wa->xfer_list_lock, my_flags);
1882 queue_work(wusbd, &wa->xfer_enqueue_work);
1883 } else {
1884 result = wa_urb_enqueue_b(xfer);
1885 if (result < 0) {
1886 /*
1887 * URB submit/enqueue failed. Clean up, return an
1888 * error and do not run the callback. This avoids
1889 * an infinite submit/complete loop.
1890 */
1891 dev_err(dev, "%s: URB enqueue failed: %d\n",
1892 __func__, result);
1893 wa_put(xfer->wa);
1894 wa_xfer_put(xfer);
1895 spin_lock_irqsave(&wa->xfer_list_lock, my_flags);
1896 usb_hcd_unlink_urb_from_ep(&(wa->wusb->usb_hcd), urb);
1897 spin_unlock_irqrestore(&wa->xfer_list_lock, my_flags);
1898 return result;
1899 }
1900 }
1901 return 0;
1902
1903error_dequeued:
1904 kfree(xfer);
1905error_kmalloc:
1906 spin_lock_irqsave(&wa->xfer_list_lock, my_flags);
1907 usb_hcd_unlink_urb_from_ep(&(wa->wusb->usb_hcd), urb);
1908 spin_unlock_irqrestore(&wa->xfer_list_lock, my_flags);
1909error_link_urb:
1910 return result;
1911}
1912EXPORT_SYMBOL_GPL(wa_urb_enqueue);
1913
1914/*
1915 * Dequeue a URB and make sure uwb_hcd_giveback_urb() [completion
1916 * handler] is called.
1917 *
1918 * Until a transfer goes successfully through wa_urb_enqueue() it
1919 * needs to be dequeued with completion calling; when stuck in delayed
1920 * or before wa_xfer_setup() is called, we need to do completion.
1921 *
1922 * not setup If there is no hcpriv yet, that means that that enqueue
1923 * still had no time to set the xfer up. Because
1924 * urb->status should be other than -EINPROGRESS,
1925 * enqueue() will catch that and bail out.
1926 *
1927 * If the transfer has gone through setup, we just need to clean it
1928 * up. If it has gone through submit(), we have to abort it [with an
1929 * asynch request] and then make sure we cancel each segment.
1930 *
1931 */
1932int wa_urb_dequeue(struct wahc *wa, struct urb *urb, int status)
1933{
1934 unsigned long flags, flags2;
1935 struct wa_xfer *xfer;
1936 struct wa_seg *seg;
1937 struct wa_rpipe *rpipe;
1938 unsigned cnt, done = 0, xfer_abort_pending;
1939 unsigned rpipe_ready = 0;
1940 int result;
1941
1942 /* check if it is safe to unlink. */
1943 spin_lock_irqsave(&wa->xfer_list_lock, flags);
1944 result = usb_hcd_check_unlink_urb(&(wa->wusb->usb_hcd), urb, status);
1945 if ((result == 0) && urb->hcpriv) {
1946 /*
1947 * Get a xfer ref to prevent a race with wa_xfer_giveback
1948 * cleaning up the xfer while we are working with it.
1949 */
1950 wa_xfer_get(urb->hcpriv);
1951 }
1952 spin_unlock_irqrestore(&wa->xfer_list_lock, flags);
1953 if (result)
1954 return result;
1955
1956 xfer = urb->hcpriv;
1957 if (xfer == NULL)
1958 return -ENOENT;
1959 spin_lock_irqsave(&xfer->lock, flags);
1960 pr_debug("%s: DEQUEUE xfer id 0x%08X\n", __func__, wa_xfer_id(xfer));
1961 rpipe = xfer->ep->hcpriv;
1962 if (rpipe == NULL) {
1963 pr_debug("%s: xfer %p id 0x%08X has no RPIPE. %s",
1964 __func__, xfer, wa_xfer_id(xfer),
1965 "Probably already aborted.\n" );
1966 result = -ENOENT;
1967 goto out_unlock;
1968 }
1969 /*
1970 * Check for done to avoid racing with wa_xfer_giveback and completing
1971 * twice.
1972 */
1973 if (__wa_xfer_is_done(xfer)) {
1974 pr_debug("%s: xfer %p id 0x%08X already done.\n", __func__,
1975 xfer, wa_xfer_id(xfer));
1976 result = -ENOENT;
1977 goto out_unlock;
1978 }
1979 /* Check the delayed list -> if there, release and complete */
1980 spin_lock_irqsave(&wa->xfer_list_lock, flags2);
1981 if (!list_empty(&xfer->list_node) && xfer->seg == NULL)
1982 goto dequeue_delayed;
1983 spin_unlock_irqrestore(&wa->xfer_list_lock, flags2);
1984 if (xfer->seg == NULL) /* still hasn't reached */
1985 goto out_unlock; /* setup(), enqueue_b() completes */
1986 /* Ok, the xfer is in flight already, it's been setup and submitted.*/
1987 xfer_abort_pending = __wa_xfer_abort(xfer) >= 0;
1988 /*
1989 * grab the rpipe->seg_lock here to prevent racing with
1990 * __wa_xfer_delayed_run.
1991 */
1992 spin_lock(&rpipe->seg_lock);
1993 for (cnt = 0; cnt < xfer->segs; cnt++) {
1994 seg = xfer->seg[cnt];
1995 pr_debug("%s: xfer id 0x%08X#%d status = %d\n",
1996 __func__, wa_xfer_id(xfer), cnt, seg->status);
1997 switch (seg->status) {
1998 case WA_SEG_NOTREADY:
1999 case WA_SEG_READY:
2000 printk(KERN_ERR "xfer %p#%u: dequeue bad state %u\n",
2001 xfer, cnt, seg->status);
2002 WARN_ON(1);
2003 break;
2004 case WA_SEG_DELAYED:
2005 /*
2006 * delete from rpipe delayed list. If no segments on
2007 * this xfer have been submitted, __wa_xfer_is_done will
2008 * trigger a giveback below. Otherwise, the submitted
2009 * segments will be completed in the DTI interrupt.
2010 */
2011 seg->status = WA_SEG_ABORTED;
2012 seg->result = -ENOENT;
2013 list_del(&seg->list_node);
2014 xfer->segs_done++;
2015 break;
2016 case WA_SEG_DONE:
2017 case WA_SEG_ERROR:
2018 case WA_SEG_ABORTED:
2019 break;
2020 /*
2021 * The buf_in data for a segment in the
2022 * WA_SEG_DTI_PENDING state is actively being read.
2023 * Let wa_buf_in_cb handle it since it will be called
2024 * and will increment xfer->segs_done. Cleaning up
2025 * here could cause wa_buf_in_cb to access the xfer
2026 * after it has been completed/freed.
2027 */
2028 case WA_SEG_DTI_PENDING:
2029 break;
2030 /*
2031 * In the states below, the HWA device already knows
2032 * about the transfer. If an abort request was sent,
2033 * allow the HWA to process it and wait for the
2034 * results. Otherwise, the DTI state and seg completed
2035 * counts can get out of sync.
2036 */
2037 case WA_SEG_SUBMITTED:
2038 case WA_SEG_PENDING:
2039 /*
2040 * Check if the abort was successfully sent. This could
2041 * be false if the HWA has been removed but we haven't
2042 * gotten the disconnect notification yet.
2043 */
2044 if (!xfer_abort_pending) {
2045 seg->status = WA_SEG_ABORTED;
2046 rpipe_ready = rpipe_avail_inc(rpipe);
2047 xfer->segs_done++;
2048 }
2049 break;
2050 }
2051 }
2052 spin_unlock(&rpipe->seg_lock);
2053 xfer->result = urb->status; /* -ENOENT or -ECONNRESET */
2054 done = __wa_xfer_is_done(xfer);
2055 spin_unlock_irqrestore(&xfer->lock, flags);
2056 if (done)
2057 wa_xfer_completion(xfer);
2058 if (rpipe_ready)
2059 wa_xfer_delayed_run(rpipe);
2060 wa_xfer_put(xfer);
2061 return result;
2062
2063out_unlock:
2064 spin_unlock_irqrestore(&xfer->lock, flags);
2065 wa_xfer_put(xfer);
2066 return result;
2067
2068dequeue_delayed:
2069 list_del_init(&xfer->list_node);
2070 spin_unlock_irqrestore(&wa->xfer_list_lock, flags2);
2071 xfer->result = urb->status;
2072 spin_unlock_irqrestore(&xfer->lock, flags);
2073 wa_xfer_giveback(xfer);
2074 wa_xfer_put(xfer);
2075 usb_put_urb(urb); /* we got a ref in enqueue() */
2076 return 0;
2077}
2078EXPORT_SYMBOL_GPL(wa_urb_dequeue);
2079
2080/*
2081 * Translation from WA status codes (WUSB1.0 Table 8.15) to errno
2082 * codes
2083 *
2084 * Positive errno values are internal inconsistencies and should be
2085 * flagged louder. Negative are to be passed up to the user in the
2086 * normal way.
2087 *
2088 * @status: USB WA status code -- high two bits are stripped.
2089 */
2090static int wa_xfer_status_to_errno(u8 status)
2091{
2092 int errno;
2093 u8 real_status = status;
2094 static int xlat[] = {
2095 [WA_XFER_STATUS_SUCCESS] = 0,
2096 [WA_XFER_STATUS_HALTED] = -EPIPE,
2097 [WA_XFER_STATUS_DATA_BUFFER_ERROR] = -ENOBUFS,
2098 [WA_XFER_STATUS_BABBLE] = -EOVERFLOW,
2099 [WA_XFER_RESERVED] = EINVAL,
2100 [WA_XFER_STATUS_NOT_FOUND] = 0,
2101 [WA_XFER_STATUS_INSUFFICIENT_RESOURCE] = -ENOMEM,
2102 [WA_XFER_STATUS_TRANSACTION_ERROR] = -EILSEQ,
2103 [WA_XFER_STATUS_ABORTED] = -ENOENT,
2104 [WA_XFER_STATUS_RPIPE_NOT_READY] = EINVAL,
2105 [WA_XFER_INVALID_FORMAT] = EINVAL,
2106 [WA_XFER_UNEXPECTED_SEGMENT_NUMBER] = EINVAL,
2107 [WA_XFER_STATUS_RPIPE_TYPE_MISMATCH] = EINVAL,
2108 };
2109 status &= 0x3f;
2110
2111 if (status == 0)
2112 return 0;
2113 if (status >= ARRAY_SIZE(xlat)) {
2114 printk_ratelimited(KERN_ERR "%s(): BUG? "
2115 "Unknown WA transfer status 0x%02x\n",
2116 __func__, real_status);
2117 return -EINVAL;
2118 }
2119 errno = xlat[status];
2120 if (unlikely(errno > 0)) {
2121 printk_ratelimited(KERN_ERR "%s(): BUG? "
2122 "Inconsistent WA status: 0x%02x\n",
2123 __func__, real_status);
2124 errno = -errno;
2125 }
2126 return errno;
2127}
2128
2129/*
2130 * If a last segment flag and/or a transfer result error is encountered,
2131 * no other segment transfer results will be returned from the device.
2132 * Mark the remaining submitted or pending xfers as completed so that
2133 * the xfer will complete cleanly.
2134 *
2135 * xfer->lock must be held
2136 *
2137 */
2138static void wa_complete_remaining_xfer_segs(struct wa_xfer *xfer,
2139 int starting_index, enum wa_seg_status status)
2140{
2141 int index;
2142 struct wa_rpipe *rpipe = xfer->ep->hcpriv;
2143
2144 for (index = starting_index; index < xfer->segs_submitted; index++) {
2145 struct wa_seg *current_seg = xfer->seg[index];
2146
2147 BUG_ON(current_seg == NULL);
2148
2149 switch (current_seg->status) {
2150 case WA_SEG_SUBMITTED:
2151 case WA_SEG_PENDING:
2152 case WA_SEG_DTI_PENDING:
2153 rpipe_avail_inc(rpipe);
2154 /*
2155 * do not increment RPIPE avail for the WA_SEG_DELAYED case
2156 * since it has not been submitted to the RPIPE.
2157 */
2158 case WA_SEG_DELAYED:
2159 xfer->segs_done++;
2160 current_seg->status = status;
2161 break;
2162 case WA_SEG_ABORTED:
2163 break;
2164 default:
2165 WARN(1, "%s: xfer 0x%08X#%d. bad seg status = %d\n",
2166 __func__, wa_xfer_id(xfer), index,
2167 current_seg->status);
2168 break;
2169 }
2170 }
2171}
2172
2173/* Populate the given urb based on the current isoc transfer state. */
2174static int __wa_populate_buf_in_urb_isoc(struct wahc *wa,
2175 struct urb *buf_in_urb, struct wa_xfer *xfer, struct wa_seg *seg)
2176{
2177 int urb_start_frame = seg->isoc_frame_index + seg->isoc_frame_offset;
2178 int seg_index, total_len = 0, urb_frame_index = urb_start_frame;
2179 struct usb_iso_packet_descriptor *iso_frame_desc =
2180 xfer->urb->iso_frame_desc;
2181 const int dti_packet_size = usb_endpoint_maxp(wa->dti_epd);
2182 int next_frame_contiguous;
2183 struct usb_iso_packet_descriptor *iso_frame;
2184
2185 BUG_ON(buf_in_urb->status == -EINPROGRESS);
2186
2187 /*
2188 * If the current frame actual_length is contiguous with the next frame
2189 * and actual_length is a multiple of the DTI endpoint max packet size,
2190 * combine the current frame with the next frame in a single URB. This
2191 * reduces the number of URBs that must be submitted in that case.
2192 */
2193 seg_index = seg->isoc_frame_index;
2194 do {
2195 next_frame_contiguous = 0;
2196
2197 iso_frame = &iso_frame_desc[urb_frame_index];
2198 total_len += iso_frame->actual_length;
2199 ++urb_frame_index;
2200 ++seg_index;
2201
2202 if (seg_index < seg->isoc_frame_count) {
2203 struct usb_iso_packet_descriptor *next_iso_frame;
2204
2205 next_iso_frame = &iso_frame_desc[urb_frame_index];
2206
2207 if ((iso_frame->offset + iso_frame->actual_length) ==
2208 next_iso_frame->offset)
2209 next_frame_contiguous = 1;
2210 }
2211 } while (next_frame_contiguous
2212 && ((iso_frame->actual_length % dti_packet_size) == 0));
2213
2214 /* this should always be 0 before a resubmit. */
2215 buf_in_urb->num_mapped_sgs = 0;
2216 buf_in_urb->transfer_dma = xfer->urb->transfer_dma +
2217 iso_frame_desc[urb_start_frame].offset;
2218 buf_in_urb->transfer_buffer_length = total_len;
2219 buf_in_urb->transfer_flags |= URB_NO_TRANSFER_DMA_MAP;
2220 buf_in_urb->transfer_buffer = NULL;
2221 buf_in_urb->sg = NULL;
2222 buf_in_urb->num_sgs = 0;
2223 buf_in_urb->context = seg;
2224
2225 /* return the number of frames included in this URB. */
2226 return seg_index - seg->isoc_frame_index;
2227}
2228
2229/* Populate the given urb based on the current transfer state. */
2230static int wa_populate_buf_in_urb(struct urb *buf_in_urb, struct wa_xfer *xfer,
2231 unsigned int seg_idx, unsigned int bytes_transferred)
2232{
2233 int result = 0;
2234 struct wa_seg *seg = xfer->seg[seg_idx];
2235
2236 BUG_ON(buf_in_urb->status == -EINPROGRESS);
2237 /* this should always be 0 before a resubmit. */
2238 buf_in_urb->num_mapped_sgs = 0;
2239
2240 if (xfer->is_dma) {
2241 buf_in_urb->transfer_dma = xfer->urb->transfer_dma
2242 + (seg_idx * xfer->seg_size);
2243 buf_in_urb->transfer_flags |= URB_NO_TRANSFER_DMA_MAP;
2244 buf_in_urb->transfer_buffer = NULL;
2245 buf_in_urb->sg = NULL;
2246 buf_in_urb->num_sgs = 0;
2247 } else {
2248 /* do buffer or SG processing. */
2249 buf_in_urb->transfer_flags &= ~URB_NO_TRANSFER_DMA_MAP;
2250
2251 if (xfer->urb->transfer_buffer) {
2252 buf_in_urb->transfer_buffer =
2253 xfer->urb->transfer_buffer
2254 + (seg_idx * xfer->seg_size);
2255 buf_in_urb->sg = NULL;
2256 buf_in_urb->num_sgs = 0;
2257 } else {
2258 /* allocate an SG list to store seg_size bytes
2259 and copy the subset of the xfer->urb->sg
2260 that matches the buffer subset we are
2261 about to read. */
2262 buf_in_urb->sg = wa_xfer_create_subset_sg(
2263 xfer->urb->sg,
2264 seg_idx * xfer->seg_size,
2265 bytes_transferred,
2266 &(buf_in_urb->num_sgs));
2267
2268 if (!(buf_in_urb->sg)) {
2269 buf_in_urb->num_sgs = 0;
2270 result = -ENOMEM;
2271 }
2272 buf_in_urb->transfer_buffer = NULL;
2273 }
2274 }
2275 buf_in_urb->transfer_buffer_length = bytes_transferred;
2276 buf_in_urb->context = seg;
2277
2278 return result;
2279}
2280
2281/*
2282 * Process a xfer result completion message
2283 *
2284 * inbound transfers: need to schedule a buf_in_urb read
2285 *
2286 * FIXME: this function needs to be broken up in parts
2287 */
2288static void wa_xfer_result_chew(struct wahc *wa, struct wa_xfer *xfer,
2289 struct wa_xfer_result *xfer_result)
2290{
2291 int result;
2292 struct device *dev = &wa->usb_iface->dev;
2293 unsigned long flags;
2294 unsigned int seg_idx;
2295 struct wa_seg *seg;
2296 struct wa_rpipe *rpipe;
2297 unsigned done = 0;
2298 u8 usb_status;
2299 unsigned rpipe_ready = 0;
2300 unsigned bytes_transferred = le32_to_cpu(xfer_result->dwTransferLength);
2301 struct urb *buf_in_urb = &(wa->buf_in_urbs[0]);
2302
2303 spin_lock_irqsave(&xfer->lock, flags);
2304 seg_idx = xfer_result->bTransferSegment & 0x7f;
2305 if (unlikely(seg_idx >= xfer->segs))
2306 goto error_bad_seg;
2307 seg = xfer->seg[seg_idx];
2308 rpipe = xfer->ep->hcpriv;
2309 usb_status = xfer_result->bTransferStatus;
2310 dev_dbg(dev, "xfer %p ID 0x%08X#%u: bTransferStatus 0x%02x (seg status %u)\n",
2311 xfer, wa_xfer_id(xfer), seg_idx, usb_status, seg->status);
2312 if (seg->status == WA_SEG_ABORTED
2313 || seg->status == WA_SEG_ERROR) /* already handled */
2314 goto segment_aborted;
2315 if (seg->status == WA_SEG_SUBMITTED) /* ops, got here */
2316 seg->status = WA_SEG_PENDING; /* before wa_seg{_dto}_cb() */
2317 if (seg->status != WA_SEG_PENDING) {
2318 if (printk_ratelimit())
2319 dev_err(dev, "xfer %p#%u: Bad segment state %u\n",
2320 xfer, seg_idx, seg->status);
2321 seg->status = WA_SEG_PENDING; /* workaround/"fix" it */
2322 }
2323 if (usb_status & 0x80) {
2324 seg->result = wa_xfer_status_to_errno(usb_status);
2325 dev_err(dev, "DTI: xfer %p 0x%08X:#%u failed (0x%02x)\n",
2326 xfer, xfer->id, seg->index, usb_status);
2327 seg->status = ((usb_status & 0x7F) == WA_XFER_STATUS_ABORTED) ?
2328 WA_SEG_ABORTED : WA_SEG_ERROR;
2329 goto error_complete;
2330 }
2331 /* FIXME: we ignore warnings, tally them for stats */
2332 if (usb_status & 0x40) /* Warning?... */
2333 usb_status = 0; /* ... pass */
2334 /*
2335 * If the last segment bit is set, complete the remaining segments.
2336 * When the current segment is completed, either in wa_buf_in_cb for
2337 * transfers with data or below for no data, the xfer will complete.
2338 */
2339 if (xfer_result->bTransferSegment & 0x80)
2340 wa_complete_remaining_xfer_segs(xfer, seg->index + 1,
2341 WA_SEG_DONE);
2342 if (usb_pipeisoc(xfer->urb->pipe)
2343 && (le32_to_cpu(xfer_result->dwNumOfPackets) > 0)) {
2344 /* set up WA state to read the isoc packet status next. */
2345 wa->dti_isoc_xfer_in_progress = wa_xfer_id(xfer);
2346 wa->dti_isoc_xfer_seg = seg_idx;
2347 wa->dti_state = WA_DTI_ISOC_PACKET_STATUS_PENDING;
2348 } else if (xfer->is_inbound && !usb_pipeisoc(xfer->urb->pipe)
2349 && (bytes_transferred > 0)) {
2350 /* IN data phase: read to buffer */
2351 seg->status = WA_SEG_DTI_PENDING;
2352 result = wa_populate_buf_in_urb(buf_in_urb, xfer, seg_idx,
2353 bytes_transferred);
2354 if (result < 0)
2355 goto error_buf_in_populate;
2356 ++(wa->active_buf_in_urbs);
2357 result = usb_submit_urb(buf_in_urb, GFP_ATOMIC);
2358 if (result < 0) {
2359 --(wa->active_buf_in_urbs);
2360 goto error_submit_buf_in;
2361 }
2362 } else {
2363 /* OUT data phase or no data, complete it -- */
2364 seg->result = bytes_transferred;
2365 rpipe_ready = rpipe_avail_inc(rpipe);
2366 done = __wa_xfer_mark_seg_as_done(xfer, seg, WA_SEG_DONE);
2367 }
2368 spin_unlock_irqrestore(&xfer->lock, flags);
2369 if (done)
2370 wa_xfer_completion(xfer);
2371 if (rpipe_ready)
2372 wa_xfer_delayed_run(rpipe);
2373 return;
2374
2375error_submit_buf_in:
2376 if (edc_inc(&wa->dti_edc, EDC_MAX_ERRORS, EDC_ERROR_TIMEFRAME)) {
2377 dev_err(dev, "DTI: URB max acceptable errors "
2378 "exceeded, resetting device\n");
2379 wa_reset_all(wa);
2380 }
2381 if (printk_ratelimit())
2382 dev_err(dev, "xfer %p#%u: can't submit DTI data phase: %d\n",
2383 xfer, seg_idx, result);
2384 seg->result = result;
2385 kfree(buf_in_urb->sg);
2386 buf_in_urb->sg = NULL;
2387error_buf_in_populate:
2388 __wa_xfer_abort(xfer);
2389 seg->status = WA_SEG_ERROR;
2390error_complete:
2391 xfer->segs_done++;
2392 rpipe_ready = rpipe_avail_inc(rpipe);
2393 wa_complete_remaining_xfer_segs(xfer, seg->index + 1, seg->status);
2394 done = __wa_xfer_is_done(xfer);
2395 /*
2396 * queue work item to clear STALL for control endpoints.
2397 * Otherwise, let endpoint_reset take care of it.
2398 */
2399 if (((usb_status & 0x3f) == WA_XFER_STATUS_HALTED) &&
2400 usb_endpoint_xfer_control(&xfer->ep->desc) &&
2401 done) {
2402
2403 dev_info(dev, "Control EP stall. Queue delayed work.\n");
2404 spin_lock(&wa->xfer_list_lock);
2405 /* move xfer from xfer_list to xfer_errored_list. */
2406 list_move_tail(&xfer->list_node, &wa->xfer_errored_list);
2407 spin_unlock(&wa->xfer_list_lock);
2408 spin_unlock_irqrestore(&xfer->lock, flags);
2409 queue_work(wusbd, &wa->xfer_error_work);
2410 } else {
2411 spin_unlock_irqrestore(&xfer->lock, flags);
2412 if (done)
2413 wa_xfer_completion(xfer);
2414 if (rpipe_ready)
2415 wa_xfer_delayed_run(rpipe);
2416 }
2417
2418 return;
2419
2420error_bad_seg:
2421 spin_unlock_irqrestore(&xfer->lock, flags);
2422 wa_urb_dequeue(wa, xfer->urb, -ENOENT);
2423 if (printk_ratelimit())
2424 dev_err(dev, "xfer %p#%u: bad segment\n", xfer, seg_idx);
2425 if (edc_inc(&wa->dti_edc, EDC_MAX_ERRORS, EDC_ERROR_TIMEFRAME)) {
2426 dev_err(dev, "DTI: URB max acceptable errors "
2427 "exceeded, resetting device\n");
2428 wa_reset_all(wa);
2429 }
2430 return;
2431
2432segment_aborted:
2433 /* nothing to do, as the aborter did the completion */
2434 spin_unlock_irqrestore(&xfer->lock, flags);
2435}
2436
2437/*
2438 * Process a isochronous packet status message
2439 *
2440 * inbound transfers: need to schedule a buf_in_urb read
2441 */
2442static int wa_process_iso_packet_status(struct wahc *wa, struct urb *urb)
2443{
2444 struct device *dev = &wa->usb_iface->dev;
2445 struct wa_xfer_packet_status_hwaiso *packet_status;
2446 struct wa_xfer_packet_status_len_hwaiso *status_array;
2447 struct wa_xfer *xfer;
2448 unsigned long flags;
2449 struct wa_seg *seg;
2450 struct wa_rpipe *rpipe;
2451 unsigned done = 0, dti_busy = 0, data_frame_count = 0, seg_index;
2452 unsigned first_frame_index = 0, rpipe_ready = 0;
2453 int expected_size;
2454
2455 /* We have a xfer result buffer; check it */
2456 dev_dbg(dev, "DTI: isoc packet status %d bytes at %p\n",
2457 urb->actual_length, urb->transfer_buffer);
2458 packet_status = (struct wa_xfer_packet_status_hwaiso *)(wa->dti_buf);
2459 if (packet_status->bPacketType != WA_XFER_ISO_PACKET_STATUS) {
2460 dev_err(dev, "DTI Error: isoc packet status--bad type 0x%02x\n",
2461 packet_status->bPacketType);
2462 goto error_parse_buffer;
2463 }
2464 xfer = wa_xfer_get_by_id(wa, wa->dti_isoc_xfer_in_progress);
2465 if (xfer == NULL) {
2466 dev_err(dev, "DTI Error: isoc packet status--unknown xfer 0x%08x\n",
2467 wa->dti_isoc_xfer_in_progress);
2468 goto error_parse_buffer;
2469 }
2470 spin_lock_irqsave(&xfer->lock, flags);
2471 if (unlikely(wa->dti_isoc_xfer_seg >= xfer->segs))
2472 goto error_bad_seg;
2473 seg = xfer->seg[wa->dti_isoc_xfer_seg];
2474 rpipe = xfer->ep->hcpriv;
2475 expected_size = sizeof(*packet_status) +
2476 (sizeof(packet_status->PacketStatus[0]) *
2477 seg->isoc_frame_count);
2478 if (urb->actual_length != expected_size) {
2479 dev_err(dev, "DTI Error: isoc packet status--bad urb length (%d bytes vs %d needed)\n",
2480 urb->actual_length, expected_size);
2481 goto error_bad_seg;
2482 }
2483 if (le16_to_cpu(packet_status->wLength) != expected_size) {
2484 dev_err(dev, "DTI Error: isoc packet status--bad length %u\n",
2485 le16_to_cpu(packet_status->wLength));
2486 goto error_bad_seg;
2487 }
2488 /* write isoc packet status and lengths back to the xfer urb. */
2489 status_array = packet_status->PacketStatus;
2490 xfer->urb->start_frame =
2491 wa->wusb->usb_hcd.driver->get_frame_number(&wa->wusb->usb_hcd);
2492 for (seg_index = 0; seg_index < seg->isoc_frame_count; ++seg_index) {
2493 struct usb_iso_packet_descriptor *iso_frame_desc =
2494 xfer->urb->iso_frame_desc;
2495 const int xfer_frame_index =
2496 seg->isoc_frame_offset + seg_index;
2497
2498 iso_frame_desc[xfer_frame_index].status =
2499 wa_xfer_status_to_errno(
2500 le16_to_cpu(status_array[seg_index].PacketStatus));
2501 iso_frame_desc[xfer_frame_index].actual_length =
2502 le16_to_cpu(status_array[seg_index].PacketLength);
2503 /* track the number of frames successfully transferred. */
2504 if (iso_frame_desc[xfer_frame_index].actual_length > 0) {
2505 /* save the starting frame index for buf_in_urb. */
2506 if (!data_frame_count)
2507 first_frame_index = seg_index;
2508 ++data_frame_count;
2509 }
2510 }
2511
2512 if (xfer->is_inbound && data_frame_count) {
2513 int result, total_frames_read = 0, urb_index = 0;
2514 struct urb *buf_in_urb;
2515
2516 /* IN data phase: read to buffer */
2517 seg->status = WA_SEG_DTI_PENDING;
2518
2519 /* start with the first frame with data. */
2520 seg->isoc_frame_index = first_frame_index;
2521 /* submit up to WA_MAX_BUF_IN_URBS read URBs. */
2522 do {
2523 int urb_frame_index, urb_frame_count;
2524 struct usb_iso_packet_descriptor *iso_frame_desc;
2525
2526 buf_in_urb = &(wa->buf_in_urbs[urb_index]);
2527 urb_frame_count = __wa_populate_buf_in_urb_isoc(wa,
2528 buf_in_urb, xfer, seg);
2529 /* advance frame index to start of next read URB. */
2530 seg->isoc_frame_index += urb_frame_count;
2531 total_frames_read += urb_frame_count;
2532
2533 ++(wa->active_buf_in_urbs);
2534 result = usb_submit_urb(buf_in_urb, GFP_ATOMIC);
2535
2536 /* skip 0-byte frames. */
2537 urb_frame_index =
2538 seg->isoc_frame_offset + seg->isoc_frame_index;
2539 iso_frame_desc =
2540 &(xfer->urb->iso_frame_desc[urb_frame_index]);
2541 while ((seg->isoc_frame_index <
2542 seg->isoc_frame_count) &&
2543 (iso_frame_desc->actual_length == 0)) {
2544 ++(seg->isoc_frame_index);
2545 ++iso_frame_desc;
2546 }
2547 ++urb_index;
2548
2549 } while ((result == 0) && (urb_index < WA_MAX_BUF_IN_URBS)
2550 && (seg->isoc_frame_index <
2551 seg->isoc_frame_count));
2552
2553 if (result < 0) {
2554 --(wa->active_buf_in_urbs);
2555 dev_err(dev, "DTI Error: Could not submit buf in URB (%d)",
2556 result);
2557 wa_reset_all(wa);
2558 } else if (data_frame_count > total_frames_read)
2559 /* If we need to read more frames, set DTI busy. */
2560 dti_busy = 1;
2561 } else {
2562 /* OUT transfer or no more IN data, complete it -- */
2563 rpipe_ready = rpipe_avail_inc(rpipe);
2564 done = __wa_xfer_mark_seg_as_done(xfer, seg, WA_SEG_DONE);
2565 }
2566 spin_unlock_irqrestore(&xfer->lock, flags);
2567 if (dti_busy)
2568 wa->dti_state = WA_DTI_BUF_IN_DATA_PENDING;
2569 else
2570 wa->dti_state = WA_DTI_TRANSFER_RESULT_PENDING;
2571 if (done)
2572 wa_xfer_completion(xfer);
2573 if (rpipe_ready)
2574 wa_xfer_delayed_run(rpipe);
2575 wa_xfer_put(xfer);
2576 return dti_busy;
2577
2578error_bad_seg:
2579 spin_unlock_irqrestore(&xfer->lock, flags);
2580 wa_xfer_put(xfer);
2581error_parse_buffer:
2582 return dti_busy;
2583}
2584
2585/*
2586 * Callback for the IN data phase
2587 *
2588 * If successful transition state; otherwise, take a note of the
2589 * error, mark this segment done and try completion.
2590 *
2591 * Note we don't access until we are sure that the transfer hasn't
2592 * been cancelled (ECONNRESET, ENOENT), which could mean that
2593 * seg->xfer could be already gone.
2594 */
2595static void wa_buf_in_cb(struct urb *urb)
2596{
2597 struct wa_seg *seg = urb->context;
2598 struct wa_xfer *xfer = seg->xfer;
2599 struct wahc *wa;
2600 struct device *dev;
2601 struct wa_rpipe *rpipe;
2602 unsigned rpipe_ready = 0, isoc_data_frame_count = 0;
2603 unsigned long flags;
2604 int resubmit_dti = 0, active_buf_in_urbs;
2605 u8 done = 0;
2606
2607 /* free the sg if it was used. */
2608 kfree(urb->sg);
2609 urb->sg = NULL;
2610
2611 spin_lock_irqsave(&xfer->lock, flags);
2612 wa = xfer->wa;
2613 dev = &wa->usb_iface->dev;
2614 --(wa->active_buf_in_urbs);
2615 active_buf_in_urbs = wa->active_buf_in_urbs;
2616 rpipe = xfer->ep->hcpriv;
2617
2618 if (usb_pipeisoc(xfer->urb->pipe)) {
2619 struct usb_iso_packet_descriptor *iso_frame_desc =
2620 xfer->urb->iso_frame_desc;
2621 int seg_index;
2622
2623 /*
2624 * Find the next isoc frame with data and count how many
2625 * frames with data remain.
2626 */
2627 seg_index = seg->isoc_frame_index;
2628 while (seg_index < seg->isoc_frame_count) {
2629 const int urb_frame_index =
2630 seg->isoc_frame_offset + seg_index;
2631
2632 if (iso_frame_desc[urb_frame_index].actual_length > 0) {
2633 /* save the index of the next frame with data */
2634 if (!isoc_data_frame_count)
2635 seg->isoc_frame_index = seg_index;
2636 ++isoc_data_frame_count;
2637 }
2638 ++seg_index;
2639 }
2640 }
2641 spin_unlock_irqrestore(&xfer->lock, flags);
2642
2643 switch (urb->status) {
2644 case 0:
2645 spin_lock_irqsave(&xfer->lock, flags);
2646
2647 seg->result += urb->actual_length;
2648 if (isoc_data_frame_count > 0) {
2649 int result, urb_frame_count;
2650
2651 /* submit a read URB for the next frame with data. */
2652 urb_frame_count = __wa_populate_buf_in_urb_isoc(wa, urb,
2653 xfer, seg);
2654 /* advance index to start of next read URB. */
2655 seg->isoc_frame_index += urb_frame_count;
2656 ++(wa->active_buf_in_urbs);
2657 result = usb_submit_urb(urb, GFP_ATOMIC);
2658 if (result < 0) {
2659 --(wa->active_buf_in_urbs);
2660 dev_err(dev, "DTI Error: Could not submit buf in URB (%d)",
2661 result);
2662 wa_reset_all(wa);
2663 }
2664 /*
2665 * If we are in this callback and
2666 * isoc_data_frame_count > 0, it means that the dti_urb
2667 * submission was delayed in wa_dti_cb. Once
2668 * we submit the last buf_in_urb, we can submit the
2669 * delayed dti_urb.
2670 */
2671 resubmit_dti = (isoc_data_frame_count ==
2672 urb_frame_count);
2673 } else if (active_buf_in_urbs == 0) {
2674 dev_dbg(dev,
2675 "xfer %p 0x%08X#%u: data in done (%zu bytes)\n",
2676 xfer, wa_xfer_id(xfer), seg->index,
2677 seg->result);
2678 rpipe_ready = rpipe_avail_inc(rpipe);
2679 done = __wa_xfer_mark_seg_as_done(xfer, seg,
2680 WA_SEG_DONE);
2681 }
2682 spin_unlock_irqrestore(&xfer->lock, flags);
2683 if (done)
2684 wa_xfer_completion(xfer);
2685 if (rpipe_ready)
2686 wa_xfer_delayed_run(rpipe);
2687 break;
2688 case -ECONNRESET: /* URB unlinked; no need to do anything */
2689 case -ENOENT: /* as it was done by the who unlinked us */
2690 break;
2691 default: /* Other errors ... */
2692 /*
2693 * Error on data buf read. Only resubmit DTI if it hasn't
2694 * already been done by previously hitting this error or by a
2695 * successful completion of the previous buf_in_urb.
2696 */
2697 resubmit_dti = wa->dti_state != WA_DTI_TRANSFER_RESULT_PENDING;
2698 spin_lock_irqsave(&xfer->lock, flags);
2699 if (printk_ratelimit())
2700 dev_err(dev, "xfer %p 0x%08X#%u: data in error %d\n",
2701 xfer, wa_xfer_id(xfer), seg->index,
2702 urb->status);
2703 if (edc_inc(&wa->nep_edc, EDC_MAX_ERRORS,
2704 EDC_ERROR_TIMEFRAME)){
2705 dev_err(dev, "DTO: URB max acceptable errors "
2706 "exceeded, resetting device\n");
2707 wa_reset_all(wa);
2708 }
2709 seg->result = urb->status;
2710 rpipe_ready = rpipe_avail_inc(rpipe);
2711 if (active_buf_in_urbs == 0)
2712 done = __wa_xfer_mark_seg_as_done(xfer, seg,
2713 WA_SEG_ERROR);
2714 else
2715 __wa_xfer_abort(xfer);
2716 spin_unlock_irqrestore(&xfer->lock, flags);
2717 if (done)
2718 wa_xfer_completion(xfer);
2719 if (rpipe_ready)
2720 wa_xfer_delayed_run(rpipe);
2721 }
2722
2723 if (resubmit_dti) {
2724 int result;
2725
2726 wa->dti_state = WA_DTI_TRANSFER_RESULT_PENDING;
2727
2728 result = usb_submit_urb(wa->dti_urb, GFP_ATOMIC);
2729 if (result < 0) {
2730 dev_err(dev, "DTI Error: Could not submit DTI URB (%d)\n",
2731 result);
2732 wa_reset_all(wa);
2733 }
2734 }
2735}
2736
2737/*
2738 * Handle an incoming transfer result buffer
2739 *
2740 * Given a transfer result buffer, it completes the transfer (possibly
2741 * scheduling and buffer in read) and then resubmits the DTI URB for a
2742 * new transfer result read.
2743 *
2744 *
2745 * The xfer_result DTI URB state machine
2746 *
2747 * States: OFF | RXR (Read-Xfer-Result) | RBI (Read-Buffer-In)
2748 *
2749 * We start in OFF mode, the first xfer_result notification [through
2750 * wa_handle_notif_xfer()] moves us to RXR by posting the DTI-URB to
2751 * read.
2752 *
2753 * We receive a buffer -- if it is not a xfer_result, we complain and
2754 * repost the DTI-URB. If it is a xfer_result then do the xfer seg
2755 * request accounting. If it is an IN segment, we move to RBI and post
2756 * a BUF-IN-URB to the right buffer. The BUF-IN-URB callback will
2757 * repost the DTI-URB and move to RXR state. if there was no IN
2758 * segment, it will repost the DTI-URB.
2759 *
2760 * We go back to OFF when we detect a ENOENT or ESHUTDOWN (or too many
2761 * errors) in the URBs.
2762 */
2763static void wa_dti_cb(struct urb *urb)
2764{
2765 int result, dti_busy = 0;
2766 struct wahc *wa = urb->context;
2767 struct device *dev = &wa->usb_iface->dev;
2768 u32 xfer_id;
2769 u8 usb_status;
2770
2771 BUG_ON(wa->dti_urb != urb);
2772 switch (wa->dti_urb->status) {
2773 case 0:
2774 if (wa->dti_state == WA_DTI_TRANSFER_RESULT_PENDING) {
2775 struct wa_xfer_result *xfer_result;
2776 struct wa_xfer *xfer;
2777
2778 /* We have a xfer result buffer; check it */
2779 dev_dbg(dev, "DTI: xfer result %d bytes at %p\n",
2780 urb->actual_length, urb->transfer_buffer);
2781 if (urb->actual_length != sizeof(*xfer_result)) {
2782 dev_err(dev, "DTI Error: xfer result--bad size xfer result (%d bytes vs %zu needed)\n",
2783 urb->actual_length,
2784 sizeof(*xfer_result));
2785 break;
2786 }
2787 xfer_result = (struct wa_xfer_result *)(wa->dti_buf);
2788 if (xfer_result->hdr.bLength != sizeof(*xfer_result)) {
2789 dev_err(dev, "DTI Error: xfer result--bad header length %u\n",
2790 xfer_result->hdr.bLength);
2791 break;
2792 }
2793 if (xfer_result->hdr.bNotifyType != WA_XFER_RESULT) {
2794 dev_err(dev, "DTI Error: xfer result--bad header type 0x%02x\n",
2795 xfer_result->hdr.bNotifyType);
2796 break;
2797 }
2798 xfer_id = le32_to_cpu(xfer_result->dwTransferID);
2799 usb_status = xfer_result->bTransferStatus & 0x3f;
2800 if (usb_status == WA_XFER_STATUS_NOT_FOUND) {
2801 /* taken care of already */
2802 dev_dbg(dev, "%s: xfer 0x%08X#%u not found.\n",
2803 __func__, xfer_id,
2804 xfer_result->bTransferSegment & 0x7f);
2805 break;
2806 }
2807 xfer = wa_xfer_get_by_id(wa, xfer_id);
2808 if (xfer == NULL) {
2809 /* FIXME: transaction not found. */
2810 dev_err(dev, "DTI Error: xfer result--unknown xfer 0x%08x (status 0x%02x)\n",
2811 xfer_id, usb_status);
2812 break;
2813 }
2814 wa_xfer_result_chew(wa, xfer, xfer_result);
2815 wa_xfer_put(xfer);
2816 } else if (wa->dti_state == WA_DTI_ISOC_PACKET_STATUS_PENDING) {
2817 dti_busy = wa_process_iso_packet_status(wa, urb);
2818 } else {
2819 dev_err(dev, "DTI Error: unexpected EP state = %d\n",
2820 wa->dti_state);
2821 }
2822 break;
2823 case -ENOENT: /* (we killed the URB)...so, no broadcast */
2824 case -ESHUTDOWN: /* going away! */
2825 dev_dbg(dev, "DTI: going down! %d\n", urb->status);
2826 goto out;
2827 default:
2828 /* Unknown error */
2829 if (edc_inc(&wa->dti_edc, EDC_MAX_ERRORS,
2830 EDC_ERROR_TIMEFRAME)) {
2831 dev_err(dev, "DTI: URB max acceptable errors "
2832 "exceeded, resetting device\n");
2833 wa_reset_all(wa);
2834 goto out;
2835 }
2836 if (printk_ratelimit())
2837 dev_err(dev, "DTI: URB error %d\n", urb->status);
2838 break;
2839 }
2840
2841 /* Resubmit the DTI URB if we are not busy processing isoc in frames. */
2842 if (!dti_busy) {
2843 result = usb_submit_urb(wa->dti_urb, GFP_ATOMIC);
2844 if (result < 0) {
2845 dev_err(dev, "DTI Error: Could not submit DTI URB (%d)\n",
2846 result);
2847 wa_reset_all(wa);
2848 }
2849 }
2850out:
2851 return;
2852}
2853
2854/*
2855 * Initialize the DTI URB for reading transfer result notifications and also
2856 * the buffer-in URB, for reading buffers. Then we just submit the DTI URB.
2857 */
2858int wa_dti_start(struct wahc *wa)
2859{
2860 const struct usb_endpoint_descriptor *dti_epd = wa->dti_epd;
2861 struct device *dev = &wa->usb_iface->dev;
2862 int result = -ENOMEM, index;
2863
2864 if (wa->dti_urb != NULL) /* DTI URB already started */
2865 goto out;
2866
2867 wa->dti_urb = usb_alloc_urb(0, GFP_KERNEL);
2868 if (wa->dti_urb == NULL) {
2869 dev_err(dev, "Can't allocate DTI URB\n");
2870 goto error_dti_urb_alloc;
2871 }
2872 usb_fill_bulk_urb(
2873 wa->dti_urb, wa->usb_dev,
2874 usb_rcvbulkpipe(wa->usb_dev, 0x80 | dti_epd->bEndpointAddress),
2875 wa->dti_buf, wa->dti_buf_size,
2876 wa_dti_cb, wa);
2877
2878 /* init the buf in URBs */
2879 for (index = 0; index < WA_MAX_BUF_IN_URBS; ++index) {
2880 usb_fill_bulk_urb(
2881 &(wa->buf_in_urbs[index]), wa->usb_dev,
2882 usb_rcvbulkpipe(wa->usb_dev,
2883 0x80 | dti_epd->bEndpointAddress),
2884 NULL, 0, wa_buf_in_cb, wa);
2885 }
2886 result = usb_submit_urb(wa->dti_urb, GFP_KERNEL);
2887 if (result < 0) {
2888 dev_err(dev, "DTI Error: Could not submit DTI URB (%d) resetting\n",
2889 result);
2890 goto error_dti_urb_submit;
2891 }
2892out:
2893 return 0;
2894
2895error_dti_urb_submit:
2896 usb_put_urb(wa->dti_urb);
2897 wa->dti_urb = NULL;
2898error_dti_urb_alloc:
2899 return result;
2900}
2901EXPORT_SYMBOL_GPL(wa_dti_start);
2902/*
2903 * Transfer complete notification
2904 *
2905 * Called from the notif.c code. We get a notification on EP2 saying
2906 * that some endpoint has some transfer result data available. We are
2907 * about to read it.
2908 *
2909 * To speed up things, we always have a URB reading the DTI URB; we
2910 * don't really set it up and start it until the first xfer complete
2911 * notification arrives, which is what we do here.
2912 *
2913 * Follow up in wa_dti_cb(), as that's where the whole state
2914 * machine starts.
2915 *
2916 * @wa shall be referenced
2917 */
2918void wa_handle_notif_xfer(struct wahc *wa, struct wa_notif_hdr *notif_hdr)
2919{
2920 struct device *dev = &wa->usb_iface->dev;
2921 struct wa_notif_xfer *notif_xfer;
2922 const struct usb_endpoint_descriptor *dti_epd = wa->dti_epd;
2923
2924 notif_xfer = container_of(notif_hdr, struct wa_notif_xfer, hdr);
2925 BUG_ON(notif_hdr->bNotifyType != WA_NOTIF_TRANSFER);
2926
2927 if ((0x80 | notif_xfer->bEndpoint) != dti_epd->bEndpointAddress) {
2928 /* FIXME: hardcoded limitation, adapt */
2929 dev_err(dev, "BUG: DTI ep is %u, not %u (hack me)\n",
2930 notif_xfer->bEndpoint, dti_epd->bEndpointAddress);
2931 goto error;
2932 }
2933
2934 /* attempt to start the DTI ep processing. */
2935 if (wa_dti_start(wa) < 0)
2936 goto error;
2937
2938 return;
2939
2940error:
2941 wa_reset_all(wa);
2942}
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * WUSB Wire Adapter
4 * Data transfer and URB enqueing
5 *
6 * Copyright (C) 2005-2006 Intel Corporation
7 * Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com>
8 *
9 * How transfers work: get a buffer, break it up in segments (segment
10 * size is a multiple of the maxpacket size). For each segment issue a
11 * segment request (struct wa_xfer_*), then send the data buffer if
12 * out or nothing if in (all over the DTO endpoint).
13 *
14 * For each submitted segment request, a notification will come over
15 * the NEP endpoint and a transfer result (struct xfer_result) will
16 * arrive in the DTI URB. Read it, get the xfer ID, see if there is
17 * data coming (inbound transfer), schedule a read and handle it.
18 *
19 * Sounds simple, it is a pain to implement.
20 *
21 *
22 * ENTRY POINTS
23 *
24 * FIXME
25 *
26 * LIFE CYCLE / STATE DIAGRAM
27 *
28 * FIXME
29 *
30 * THIS CODE IS DISGUSTING
31 *
32 * Warned you are; it's my second try and still not happy with it.
33 *
34 * NOTES:
35 *
36 * - No iso
37 *
38 * - Supports DMA xfers, control, bulk and maybe interrupt
39 *
40 * - Does not recycle unused rpipes
41 *
42 * An rpipe is assigned to an endpoint the first time it is used,
43 * and then it's there, assigned, until the endpoint is disabled
44 * (destroyed [{h,d}wahc_op_ep_disable()]. The assignment of the
45 * rpipe to the endpoint is done under the wa->rpipe_sem semaphore
46 * (should be a mutex).
47 *
48 * Two methods it could be done:
49 *
50 * (a) set up a timer every time an rpipe's use count drops to 1
51 * (which means unused) or when a transfer ends. Reset the
52 * timer when a xfer is queued. If the timer expires, release
53 * the rpipe [see rpipe_ep_disable()].
54 *
55 * (b) when looking for free rpipes to attach [rpipe_get_by_ep()],
56 * when none are found go over the list, check their endpoint
57 * and their activity record (if no last-xfer-done-ts in the
58 * last x seconds) take it
59 *
60 * However, due to the fact that we have a set of limited
61 * resources (max-segments-at-the-same-time per xfer,
62 * xfers-per-ripe, blocks-per-rpipe, rpipes-per-host), at the end
63 * we are going to have to rebuild all this based on an scheduler,
64 * to where we have a list of transactions to do and based on the
65 * availability of the different required components (blocks,
66 * rpipes, segment slots, etc), we go scheduling them. Painful.
67 */
68#include <linux/spinlock.h>
69#include <linux/slab.h>
70#include <linux/hash.h>
71#include <linux/ratelimit.h>
72#include <linux/export.h>
73#include <linux/scatterlist.h>
74
75#include "wa-hc.h"
76#include "wusbhc.h"
77
78enum {
79 /* [WUSB] section 8.3.3 allocates 7 bits for the segment index. */
80 WA_SEGS_MAX = 128,
81};
82
83enum wa_seg_status {
84 WA_SEG_NOTREADY,
85 WA_SEG_READY,
86 WA_SEG_DELAYED,
87 WA_SEG_SUBMITTED,
88 WA_SEG_PENDING,
89 WA_SEG_DTI_PENDING,
90 WA_SEG_DONE,
91 WA_SEG_ERROR,
92 WA_SEG_ABORTED,
93};
94
95static void wa_xfer_delayed_run(struct wa_rpipe *);
96static int __wa_xfer_delayed_run(struct wa_rpipe *rpipe, int *dto_waiting);
97
98/*
99 * Life cycle governed by 'struct urb' (the refcount of the struct is
100 * that of the 'struct urb' and usb_free_urb() would free the whole
101 * struct).
102 */
103struct wa_seg {
104 struct urb tr_urb; /* transfer request urb. */
105 struct urb *isoc_pack_desc_urb; /* for isoc packet descriptor. */
106 struct urb *dto_urb; /* for data output. */
107 struct list_head list_node; /* for rpipe->req_list */
108 struct wa_xfer *xfer; /* out xfer */
109 u8 index; /* which segment we are */
110 int isoc_frame_count; /* number of isoc frames in this segment. */
111 int isoc_frame_offset; /* starting frame offset in the xfer URB. */
112 /* Isoc frame that the current transfer buffer corresponds to. */
113 int isoc_frame_index;
114 int isoc_size; /* size of all isoc frames sent by this seg. */
115 enum wa_seg_status status;
116 ssize_t result; /* bytes xfered or error */
117 struct wa_xfer_hdr xfer_hdr;
118};
119
120static inline void wa_seg_init(struct wa_seg *seg)
121{
122 usb_init_urb(&seg->tr_urb);
123
124 /* set the remaining memory to 0. */
125 memset(((void *)seg) + sizeof(seg->tr_urb), 0,
126 sizeof(*seg) - sizeof(seg->tr_urb));
127}
128
129/*
130 * Protected by xfer->lock
131 *
132 */
133struct wa_xfer {
134 struct kref refcnt;
135 struct list_head list_node;
136 spinlock_t lock;
137 u32 id;
138
139 struct wahc *wa; /* Wire adapter we are plugged to */
140 struct usb_host_endpoint *ep;
141 struct urb *urb; /* URB we are transferring for */
142 struct wa_seg **seg; /* transfer segments */
143 u8 segs, segs_submitted, segs_done;
144 unsigned is_inbound:1;
145 unsigned is_dma:1;
146 size_t seg_size;
147 int result;
148
149 gfp_t gfp; /* allocation mask */
150
151 struct wusb_dev *wusb_dev; /* for activity timestamps */
152};
153
154static void __wa_populate_dto_urb_isoc(struct wa_xfer *xfer,
155 struct wa_seg *seg, int curr_iso_frame);
156static void wa_complete_remaining_xfer_segs(struct wa_xfer *xfer,
157 int starting_index, enum wa_seg_status status);
158
159static inline void wa_xfer_init(struct wa_xfer *xfer)
160{
161 kref_init(&xfer->refcnt);
162 INIT_LIST_HEAD(&xfer->list_node);
163 spin_lock_init(&xfer->lock);
164}
165
166/*
167 * Destroy a transfer structure
168 *
169 * Note that freeing xfer->seg[cnt]->tr_urb will free the containing
170 * xfer->seg[cnt] memory that was allocated by __wa_xfer_setup_segs.
171 */
172static void wa_xfer_destroy(struct kref *_xfer)
173{
174 struct wa_xfer *xfer = container_of(_xfer, struct wa_xfer, refcnt);
175 if (xfer->seg) {
176 unsigned cnt;
177 for (cnt = 0; cnt < xfer->segs; cnt++) {
178 struct wa_seg *seg = xfer->seg[cnt];
179 if (seg) {
180 usb_free_urb(seg->isoc_pack_desc_urb);
181 if (seg->dto_urb) {
182 kfree(seg->dto_urb->sg);
183 usb_free_urb(seg->dto_urb);
184 }
185 usb_free_urb(&seg->tr_urb);
186 }
187 }
188 kfree(xfer->seg);
189 }
190 kfree(xfer);
191}
192
193static void wa_xfer_get(struct wa_xfer *xfer)
194{
195 kref_get(&xfer->refcnt);
196}
197
198static void wa_xfer_put(struct wa_xfer *xfer)
199{
200 kref_put(&xfer->refcnt, wa_xfer_destroy);
201}
202
203/*
204 * Try to get exclusive access to the DTO endpoint resource. Return true
205 * if successful.
206 */
207static inline int __wa_dto_try_get(struct wahc *wa)
208{
209 return (test_and_set_bit(0, &wa->dto_in_use) == 0);
210}
211
212/* Release the DTO endpoint resource. */
213static inline void __wa_dto_put(struct wahc *wa)
214{
215 clear_bit_unlock(0, &wa->dto_in_use);
216}
217
218/* Service RPIPEs that are waiting on the DTO resource. */
219static void wa_check_for_delayed_rpipes(struct wahc *wa)
220{
221 unsigned long flags;
222 int dto_waiting = 0;
223 struct wa_rpipe *rpipe;
224
225 spin_lock_irqsave(&wa->rpipe_lock, flags);
226 while (!list_empty(&wa->rpipe_delayed_list) && !dto_waiting) {
227 rpipe = list_first_entry(&wa->rpipe_delayed_list,
228 struct wa_rpipe, list_node);
229 __wa_xfer_delayed_run(rpipe, &dto_waiting);
230 /* remove this RPIPE from the list if it is not waiting. */
231 if (!dto_waiting) {
232 pr_debug("%s: RPIPE %d serviced and removed from delayed list.\n",
233 __func__,
234 le16_to_cpu(rpipe->descr.wRPipeIndex));
235 list_del_init(&rpipe->list_node);
236 }
237 }
238 spin_unlock_irqrestore(&wa->rpipe_lock, flags);
239}
240
241/* add this RPIPE to the end of the delayed RPIPE list. */
242static void wa_add_delayed_rpipe(struct wahc *wa, struct wa_rpipe *rpipe)
243{
244 unsigned long flags;
245
246 spin_lock_irqsave(&wa->rpipe_lock, flags);
247 /* add rpipe to the list if it is not already on it. */
248 if (list_empty(&rpipe->list_node)) {
249 pr_debug("%s: adding RPIPE %d to the delayed list.\n",
250 __func__, le16_to_cpu(rpipe->descr.wRPipeIndex));
251 list_add_tail(&rpipe->list_node, &wa->rpipe_delayed_list);
252 }
253 spin_unlock_irqrestore(&wa->rpipe_lock, flags);
254}
255
256/*
257 * xfer is referenced
258 *
259 * xfer->lock has to be unlocked
260 *
261 * We take xfer->lock for setting the result; this is a barrier
262 * against drivers/usb/core/hcd.c:unlink1() being called after we call
263 * usb_hcd_giveback_urb() and wa_urb_dequeue() trying to get a
264 * reference to the transfer.
265 */
266static void wa_xfer_giveback(struct wa_xfer *xfer)
267{
268 unsigned long flags;
269
270 spin_lock_irqsave(&xfer->wa->xfer_list_lock, flags);
271 list_del_init(&xfer->list_node);
272 usb_hcd_unlink_urb_from_ep(&(xfer->wa->wusb->usb_hcd), xfer->urb);
273 spin_unlock_irqrestore(&xfer->wa->xfer_list_lock, flags);
274 /* FIXME: segmentation broken -- kills DWA */
275 wusbhc_giveback_urb(xfer->wa->wusb, xfer->urb, xfer->result);
276 wa_put(xfer->wa);
277 wa_xfer_put(xfer);
278}
279
280/*
281 * xfer is referenced
282 *
283 * xfer->lock has to be unlocked
284 */
285static void wa_xfer_completion(struct wa_xfer *xfer)
286{
287 if (xfer->wusb_dev)
288 wusb_dev_put(xfer->wusb_dev);
289 rpipe_put(xfer->ep->hcpriv);
290 wa_xfer_giveback(xfer);
291}
292
293/*
294 * Initialize a transfer's ID
295 *
296 * We need to use a sequential number; if we use the pointer or the
297 * hash of the pointer, it can repeat over sequential transfers and
298 * then it will confuse the HWA....wonder why in hell they put a 32
299 * bit handle in there then.
300 */
301static void wa_xfer_id_init(struct wa_xfer *xfer)
302{
303 xfer->id = atomic_add_return(1, &xfer->wa->xfer_id_count);
304}
305
306/* Return the xfer's ID. */
307static inline u32 wa_xfer_id(struct wa_xfer *xfer)
308{
309 return xfer->id;
310}
311
312/* Return the xfer's ID in transport format (little endian). */
313static inline __le32 wa_xfer_id_le32(struct wa_xfer *xfer)
314{
315 return cpu_to_le32(xfer->id);
316}
317
318/*
319 * If transfer is done, wrap it up and return true
320 *
321 * xfer->lock has to be locked
322 */
323static unsigned __wa_xfer_is_done(struct wa_xfer *xfer)
324{
325 struct device *dev = &xfer->wa->usb_iface->dev;
326 unsigned result, cnt;
327 struct wa_seg *seg;
328 struct urb *urb = xfer->urb;
329 unsigned found_short = 0;
330
331 result = xfer->segs_done == xfer->segs_submitted;
332 if (result == 0)
333 goto out;
334 urb->actual_length = 0;
335 for (cnt = 0; cnt < xfer->segs; cnt++) {
336 seg = xfer->seg[cnt];
337 switch (seg->status) {
338 case WA_SEG_DONE:
339 if (found_short && seg->result > 0) {
340 dev_dbg(dev, "xfer %p ID %08X#%u: bad short segments (%zu)\n",
341 xfer, wa_xfer_id(xfer), cnt,
342 seg->result);
343 urb->status = -EINVAL;
344 goto out;
345 }
346 urb->actual_length += seg->result;
347 if (!(usb_pipeisoc(xfer->urb->pipe))
348 && seg->result < xfer->seg_size
349 && cnt != xfer->segs-1)
350 found_short = 1;
351 dev_dbg(dev, "xfer %p ID %08X#%u: DONE short %d "
352 "result %zu urb->actual_length %d\n",
353 xfer, wa_xfer_id(xfer), seg->index, found_short,
354 seg->result, urb->actual_length);
355 break;
356 case WA_SEG_ERROR:
357 xfer->result = seg->result;
358 dev_dbg(dev, "xfer %p ID %08X#%u: ERROR result %zi(0x%08zX)\n",
359 xfer, wa_xfer_id(xfer), seg->index, seg->result,
360 seg->result);
361 goto out;
362 case WA_SEG_ABORTED:
363 xfer->result = seg->result;
364 dev_dbg(dev, "xfer %p ID %08X#%u: ABORTED result %zi(0x%08zX)\n",
365 xfer, wa_xfer_id(xfer), seg->index, seg->result,
366 seg->result);
367 goto out;
368 default:
369 dev_warn(dev, "xfer %p ID %08X#%u: is_done bad state %d\n",
370 xfer, wa_xfer_id(xfer), cnt, seg->status);
371 xfer->result = -EINVAL;
372 goto out;
373 }
374 }
375 xfer->result = 0;
376out:
377 return result;
378}
379
380/*
381 * Mark the given segment as done. Return true if this completes the xfer.
382 * This should only be called for segs that have been submitted to an RPIPE.
383 * Delayed segs are not marked as submitted so they do not need to be marked
384 * as done when cleaning up.
385 *
386 * xfer->lock has to be locked
387 */
388static unsigned __wa_xfer_mark_seg_as_done(struct wa_xfer *xfer,
389 struct wa_seg *seg, enum wa_seg_status status)
390{
391 seg->status = status;
392 xfer->segs_done++;
393
394 /* check for done. */
395 return __wa_xfer_is_done(xfer);
396}
397
398/*
399 * Search for a transfer list ID on the HCD's URB list
400 *
401 * For 32 bit architectures, we use the pointer itself; for 64 bits, a
402 * 32-bit hash of the pointer.
403 *
404 * @returns NULL if not found.
405 */
406static struct wa_xfer *wa_xfer_get_by_id(struct wahc *wa, u32 id)
407{
408 unsigned long flags;
409 struct wa_xfer *xfer_itr;
410 spin_lock_irqsave(&wa->xfer_list_lock, flags);
411 list_for_each_entry(xfer_itr, &wa->xfer_list, list_node) {
412 if (id == xfer_itr->id) {
413 wa_xfer_get(xfer_itr);
414 goto out;
415 }
416 }
417 xfer_itr = NULL;
418out:
419 spin_unlock_irqrestore(&wa->xfer_list_lock, flags);
420 return xfer_itr;
421}
422
423struct wa_xfer_abort_buffer {
424 struct urb urb;
425 struct wahc *wa;
426 struct wa_xfer_abort cmd;
427};
428
429static void __wa_xfer_abort_cb(struct urb *urb)
430{
431 struct wa_xfer_abort_buffer *b = urb->context;
432 struct wahc *wa = b->wa;
433
434 /*
435 * If the abort request URB failed, then the HWA did not get the abort
436 * command. Forcibly clean up the xfer without waiting for a Transfer
437 * Result from the HWA.
438 */
439 if (urb->status < 0) {
440 struct wa_xfer *xfer;
441 struct device *dev = &wa->usb_iface->dev;
442
443 xfer = wa_xfer_get_by_id(wa, le32_to_cpu(b->cmd.dwTransferID));
444 dev_err(dev, "%s: Transfer Abort request failed. result: %d\n",
445 __func__, urb->status);
446 if (xfer) {
447 unsigned long flags;
448 int done, seg_index = 0;
449 struct wa_rpipe *rpipe = xfer->ep->hcpriv;
450
451 dev_err(dev, "%s: cleaning up xfer %p ID 0x%08X.\n",
452 __func__, xfer, wa_xfer_id(xfer));
453 spin_lock_irqsave(&xfer->lock, flags);
454 /* skip done segs. */
455 while (seg_index < xfer->segs) {
456 struct wa_seg *seg = xfer->seg[seg_index];
457
458 if ((seg->status == WA_SEG_DONE) ||
459 (seg->status == WA_SEG_ERROR)) {
460 ++seg_index;
461 } else {
462 break;
463 }
464 }
465 /* mark remaining segs as aborted. */
466 wa_complete_remaining_xfer_segs(xfer, seg_index,
467 WA_SEG_ABORTED);
468 done = __wa_xfer_is_done(xfer);
469 spin_unlock_irqrestore(&xfer->lock, flags);
470 if (done)
471 wa_xfer_completion(xfer);
472 wa_xfer_delayed_run(rpipe);
473 wa_xfer_put(xfer);
474 } else {
475 dev_err(dev, "%s: xfer ID 0x%08X already gone.\n",
476 __func__, le32_to_cpu(b->cmd.dwTransferID));
477 }
478 }
479
480 wa_put(wa); /* taken in __wa_xfer_abort */
481 usb_put_urb(&b->urb);
482}
483
484/*
485 * Aborts an ongoing transaction
486 *
487 * Assumes the transfer is referenced and locked and in a submitted
488 * state (mainly that there is an endpoint/rpipe assigned).
489 *
490 * The callback (see above) does nothing but freeing up the data by
491 * putting the URB. Because the URB is allocated at the head of the
492 * struct, the whole space we allocated is kfreed. *
493 */
494static int __wa_xfer_abort(struct wa_xfer *xfer)
495{
496 int result = -ENOMEM;
497 struct device *dev = &xfer->wa->usb_iface->dev;
498 struct wa_xfer_abort_buffer *b;
499 struct wa_rpipe *rpipe = xfer->ep->hcpriv;
500
501 b = kmalloc(sizeof(*b), GFP_ATOMIC);
502 if (b == NULL)
503 goto error_kmalloc;
504 b->cmd.bLength = sizeof(b->cmd);
505 b->cmd.bRequestType = WA_XFER_ABORT;
506 b->cmd.wRPipe = rpipe->descr.wRPipeIndex;
507 b->cmd.dwTransferID = wa_xfer_id_le32(xfer);
508 b->wa = wa_get(xfer->wa);
509
510 usb_init_urb(&b->urb);
511 usb_fill_bulk_urb(&b->urb, xfer->wa->usb_dev,
512 usb_sndbulkpipe(xfer->wa->usb_dev,
513 xfer->wa->dto_epd->bEndpointAddress),
514 &b->cmd, sizeof(b->cmd), __wa_xfer_abort_cb, b);
515 result = usb_submit_urb(&b->urb, GFP_ATOMIC);
516 if (result < 0)
517 goto error_submit;
518 return result; /* callback frees! */
519
520
521error_submit:
522 wa_put(xfer->wa);
523 if (printk_ratelimit())
524 dev_err(dev, "xfer %p: Can't submit abort request: %d\n",
525 xfer, result);
526 kfree(b);
527error_kmalloc:
528 return result;
529
530}
531
532/*
533 * Calculate the number of isoc frames starting from isoc_frame_offset
534 * that will fit a in transfer segment.
535 */
536static int __wa_seg_calculate_isoc_frame_count(struct wa_xfer *xfer,
537 int isoc_frame_offset, int *total_size)
538{
539 int segment_size = 0, frame_count = 0;
540 int index = isoc_frame_offset;
541 struct usb_iso_packet_descriptor *iso_frame_desc =
542 xfer->urb->iso_frame_desc;
543
544 while ((index < xfer->urb->number_of_packets)
545 && ((segment_size + iso_frame_desc[index].length)
546 <= xfer->seg_size)) {
547 /*
548 * For Alereon HWA devices, only include an isoc frame in an
549 * out segment if it is physically contiguous with the previous
550 * frame. This is required because those devices expect
551 * the isoc frames to be sent as a single USB transaction as
552 * opposed to one transaction per frame with standard HWA.
553 */
554 if ((xfer->wa->quirks & WUSB_QUIRK_ALEREON_HWA_CONCAT_ISOC)
555 && (xfer->is_inbound == 0)
556 && (index > isoc_frame_offset)
557 && ((iso_frame_desc[index - 1].offset +
558 iso_frame_desc[index - 1].length) !=
559 iso_frame_desc[index].offset))
560 break;
561
562 /* this frame fits. count it. */
563 ++frame_count;
564 segment_size += iso_frame_desc[index].length;
565
566 /* move to the next isoc frame. */
567 ++index;
568 }
569
570 *total_size = segment_size;
571 return frame_count;
572}
573
574/*
575 *
576 * @returns < 0 on error, transfer segment request size if ok
577 */
578static ssize_t __wa_xfer_setup_sizes(struct wa_xfer *xfer,
579 enum wa_xfer_type *pxfer_type)
580{
581 ssize_t result;
582 struct device *dev = &xfer->wa->usb_iface->dev;
583 size_t maxpktsize;
584 struct urb *urb = xfer->urb;
585 struct wa_rpipe *rpipe = xfer->ep->hcpriv;
586
587 switch (rpipe->descr.bmAttribute & 0x3) {
588 case USB_ENDPOINT_XFER_CONTROL:
589 *pxfer_type = WA_XFER_TYPE_CTL;
590 result = sizeof(struct wa_xfer_ctl);
591 break;
592 case USB_ENDPOINT_XFER_INT:
593 case USB_ENDPOINT_XFER_BULK:
594 *pxfer_type = WA_XFER_TYPE_BI;
595 result = sizeof(struct wa_xfer_bi);
596 break;
597 case USB_ENDPOINT_XFER_ISOC:
598 *pxfer_type = WA_XFER_TYPE_ISO;
599 result = sizeof(struct wa_xfer_hwaiso);
600 break;
601 default:
602 /* never happens */
603 BUG();
604 result = -EINVAL; /* shut gcc up */
605 }
606 xfer->is_inbound = urb->pipe & USB_DIR_IN ? 1 : 0;
607 xfer->is_dma = urb->transfer_flags & URB_NO_TRANSFER_DMA_MAP ? 1 : 0;
608
609 maxpktsize = le16_to_cpu(rpipe->descr.wMaxPacketSize);
610 xfer->seg_size = le16_to_cpu(rpipe->descr.wBlocks)
611 * 1 << (xfer->wa->wa_descr->bRPipeBlockSize - 1);
612 /* Compute the segment size and make sure it is a multiple of
613 * the maxpktsize (WUSB1.0[8.3.3.1])...not really too much of
614 * a check (FIXME) */
615 if (xfer->seg_size < maxpktsize) {
616 dev_err(dev,
617 "HW BUG? seg_size %zu smaller than maxpktsize %zu\n",
618 xfer->seg_size, maxpktsize);
619 result = -EINVAL;
620 goto error;
621 }
622 xfer->seg_size = (xfer->seg_size / maxpktsize) * maxpktsize;
623 if ((rpipe->descr.bmAttribute & 0x3) == USB_ENDPOINT_XFER_ISOC) {
624 int index = 0;
625
626 xfer->segs = 0;
627 /*
628 * loop over urb->number_of_packets to determine how many
629 * xfer segments will be needed to send the isoc frames.
630 */
631 while (index < urb->number_of_packets) {
632 int seg_size; /* don't care. */
633 index += __wa_seg_calculate_isoc_frame_count(xfer,
634 index, &seg_size);
635 ++xfer->segs;
636 }
637 } else {
638 xfer->segs = DIV_ROUND_UP(urb->transfer_buffer_length,
639 xfer->seg_size);
640 if (xfer->segs == 0 && *pxfer_type == WA_XFER_TYPE_CTL)
641 xfer->segs = 1;
642 }
643
644 if (xfer->segs > WA_SEGS_MAX) {
645 dev_err(dev, "BUG? oops, number of segments %zu bigger than %d\n",
646 (urb->transfer_buffer_length/xfer->seg_size),
647 WA_SEGS_MAX);
648 result = -EINVAL;
649 goto error;
650 }
651error:
652 return result;
653}
654
655static void __wa_setup_isoc_packet_descr(
656 struct wa_xfer_packet_info_hwaiso *packet_desc,
657 struct wa_xfer *xfer,
658 struct wa_seg *seg) {
659 struct usb_iso_packet_descriptor *iso_frame_desc =
660 xfer->urb->iso_frame_desc;
661 int frame_index;
662
663 /* populate isoc packet descriptor. */
664 packet_desc->bPacketType = WA_XFER_ISO_PACKET_INFO;
665 packet_desc->wLength = cpu_to_le16(sizeof(*packet_desc) +
666 (sizeof(packet_desc->PacketLength[0]) *
667 seg->isoc_frame_count));
668 for (frame_index = 0; frame_index < seg->isoc_frame_count;
669 ++frame_index) {
670 int offset_index = frame_index + seg->isoc_frame_offset;
671 packet_desc->PacketLength[frame_index] =
672 cpu_to_le16(iso_frame_desc[offset_index].length);
673 }
674}
675
676
677/* Fill in the common request header and xfer-type specific data. */
678static void __wa_xfer_setup_hdr0(struct wa_xfer *xfer,
679 struct wa_xfer_hdr *xfer_hdr0,
680 enum wa_xfer_type xfer_type,
681 size_t xfer_hdr_size)
682{
683 struct wa_rpipe *rpipe = xfer->ep->hcpriv;
684 struct wa_seg *seg = xfer->seg[0];
685
686 xfer_hdr0 = &seg->xfer_hdr;
687 xfer_hdr0->bLength = xfer_hdr_size;
688 xfer_hdr0->bRequestType = xfer_type;
689 xfer_hdr0->wRPipe = rpipe->descr.wRPipeIndex;
690 xfer_hdr0->dwTransferID = wa_xfer_id_le32(xfer);
691 xfer_hdr0->bTransferSegment = 0;
692 switch (xfer_type) {
693 case WA_XFER_TYPE_CTL: {
694 struct wa_xfer_ctl *xfer_ctl =
695 container_of(xfer_hdr0, struct wa_xfer_ctl, hdr);
696 xfer_ctl->bmAttribute = xfer->is_inbound ? 1 : 0;
697 memcpy(&xfer_ctl->baSetupData, xfer->urb->setup_packet,
698 sizeof(xfer_ctl->baSetupData));
699 break;
700 }
701 case WA_XFER_TYPE_BI:
702 break;
703 case WA_XFER_TYPE_ISO: {
704 struct wa_xfer_hwaiso *xfer_iso =
705 container_of(xfer_hdr0, struct wa_xfer_hwaiso, hdr);
706 struct wa_xfer_packet_info_hwaiso *packet_desc =
707 ((void *)xfer_iso) + xfer_hdr_size;
708
709 /* populate the isoc section of the transfer request. */
710 xfer_iso->dwNumOfPackets = cpu_to_le32(seg->isoc_frame_count);
711 /* populate isoc packet descriptor. */
712 __wa_setup_isoc_packet_descr(packet_desc, xfer, seg);
713 break;
714 }
715 default:
716 BUG();
717 };
718}
719
720/*
721 * Callback for the OUT data phase of the segment request
722 *
723 * Check wa_seg_tr_cb(); most comments also apply here because this
724 * function does almost the same thing and they work closely
725 * together.
726 *
727 * If the seg request has failed but this DTO phase has succeeded,
728 * wa_seg_tr_cb() has already failed the segment and moved the
729 * status to WA_SEG_ERROR, so this will go through 'case 0' and
730 * effectively do nothing.
731 */
732static void wa_seg_dto_cb(struct urb *urb)
733{
734 struct wa_seg *seg = urb->context;
735 struct wa_xfer *xfer = seg->xfer;
736 struct wahc *wa;
737 struct device *dev;
738 struct wa_rpipe *rpipe;
739 unsigned long flags;
740 unsigned rpipe_ready = 0;
741 int data_send_done = 1, release_dto = 0, holding_dto = 0;
742 u8 done = 0;
743 int result;
744
745 /* free the sg if it was used. */
746 kfree(urb->sg);
747 urb->sg = NULL;
748
749 spin_lock_irqsave(&xfer->lock, flags);
750 wa = xfer->wa;
751 dev = &wa->usb_iface->dev;
752 if (usb_pipeisoc(xfer->urb->pipe)) {
753 /* Alereon HWA sends all isoc frames in a single transfer. */
754 if (wa->quirks & WUSB_QUIRK_ALEREON_HWA_CONCAT_ISOC)
755 seg->isoc_frame_index += seg->isoc_frame_count;
756 else
757 seg->isoc_frame_index += 1;
758 if (seg->isoc_frame_index < seg->isoc_frame_count) {
759 data_send_done = 0;
760 holding_dto = 1; /* checked in error cases. */
761 /*
762 * if this is the last isoc frame of the segment, we
763 * can release DTO after sending this frame.
764 */
765 if ((seg->isoc_frame_index + 1) >=
766 seg->isoc_frame_count)
767 release_dto = 1;
768 }
769 dev_dbg(dev, "xfer 0x%08X#%u: isoc frame = %d, holding_dto = %d, release_dto = %d.\n",
770 wa_xfer_id(xfer), seg->index, seg->isoc_frame_index,
771 holding_dto, release_dto);
772 }
773 spin_unlock_irqrestore(&xfer->lock, flags);
774
775 switch (urb->status) {
776 case 0:
777 spin_lock_irqsave(&xfer->lock, flags);
778 seg->result += urb->actual_length;
779 if (data_send_done) {
780 dev_dbg(dev, "xfer 0x%08X#%u: data out done (%zu bytes)\n",
781 wa_xfer_id(xfer), seg->index, seg->result);
782 if (seg->status < WA_SEG_PENDING)
783 seg->status = WA_SEG_PENDING;
784 } else {
785 /* should only hit this for isoc xfers. */
786 /*
787 * Populate the dto URB with the next isoc frame buffer,
788 * send the URB and release DTO if we no longer need it.
789 */
790 __wa_populate_dto_urb_isoc(xfer, seg,
791 seg->isoc_frame_offset + seg->isoc_frame_index);
792
793 /* resubmit the URB with the next isoc frame. */
794 /* take a ref on resubmit. */
795 wa_xfer_get(xfer);
796 result = usb_submit_urb(seg->dto_urb, GFP_ATOMIC);
797 if (result < 0) {
798 dev_err(dev, "xfer 0x%08X#%u: DTO submit failed: %d\n",
799 wa_xfer_id(xfer), seg->index, result);
800 spin_unlock_irqrestore(&xfer->lock, flags);
801 goto error_dto_submit;
802 }
803 }
804 spin_unlock_irqrestore(&xfer->lock, flags);
805 if (release_dto) {
806 __wa_dto_put(wa);
807 wa_check_for_delayed_rpipes(wa);
808 }
809 break;
810 case -ECONNRESET: /* URB unlinked; no need to do anything */
811 case -ENOENT: /* as it was done by the who unlinked us */
812 if (holding_dto) {
813 __wa_dto_put(wa);
814 wa_check_for_delayed_rpipes(wa);
815 }
816 break;
817 default: /* Other errors ... */
818 dev_err(dev, "xfer 0x%08X#%u: data out error %d\n",
819 wa_xfer_id(xfer), seg->index, urb->status);
820 goto error_default;
821 }
822
823 /* taken when this URB was submitted. */
824 wa_xfer_put(xfer);
825 return;
826
827error_dto_submit:
828 /* taken on resubmit attempt. */
829 wa_xfer_put(xfer);
830error_default:
831 spin_lock_irqsave(&xfer->lock, flags);
832 rpipe = xfer->ep->hcpriv;
833 if (edc_inc(&wa->nep_edc, EDC_MAX_ERRORS,
834 EDC_ERROR_TIMEFRAME)){
835 dev_err(dev, "DTO: URB max acceptable errors exceeded, resetting device\n");
836 wa_reset_all(wa);
837 }
838 if (seg->status != WA_SEG_ERROR) {
839 seg->result = urb->status;
840 __wa_xfer_abort(xfer);
841 rpipe_ready = rpipe_avail_inc(rpipe);
842 done = __wa_xfer_mark_seg_as_done(xfer, seg, WA_SEG_ERROR);
843 }
844 spin_unlock_irqrestore(&xfer->lock, flags);
845 if (holding_dto) {
846 __wa_dto_put(wa);
847 wa_check_for_delayed_rpipes(wa);
848 }
849 if (done)
850 wa_xfer_completion(xfer);
851 if (rpipe_ready)
852 wa_xfer_delayed_run(rpipe);
853 /* taken when this URB was submitted. */
854 wa_xfer_put(xfer);
855}
856
857/*
858 * Callback for the isoc packet descriptor phase of the segment request
859 *
860 * Check wa_seg_tr_cb(); most comments also apply here because this
861 * function does almost the same thing and they work closely
862 * together.
863 *
864 * If the seg request has failed but this phase has succeeded,
865 * wa_seg_tr_cb() has already failed the segment and moved the
866 * status to WA_SEG_ERROR, so this will go through 'case 0' and
867 * effectively do nothing.
868 */
869static void wa_seg_iso_pack_desc_cb(struct urb *urb)
870{
871 struct wa_seg *seg = urb->context;
872 struct wa_xfer *xfer = seg->xfer;
873 struct wahc *wa;
874 struct device *dev;
875 struct wa_rpipe *rpipe;
876 unsigned long flags;
877 unsigned rpipe_ready = 0;
878 u8 done = 0;
879
880 switch (urb->status) {
881 case 0:
882 spin_lock_irqsave(&xfer->lock, flags);
883 wa = xfer->wa;
884 dev = &wa->usb_iface->dev;
885 dev_dbg(dev, "iso xfer %08X#%u: packet descriptor done\n",
886 wa_xfer_id(xfer), seg->index);
887 if (xfer->is_inbound && seg->status < WA_SEG_PENDING)
888 seg->status = WA_SEG_PENDING;
889 spin_unlock_irqrestore(&xfer->lock, flags);
890 break;
891 case -ECONNRESET: /* URB unlinked; no need to do anything */
892 case -ENOENT: /* as it was done by the who unlinked us */
893 break;
894 default: /* Other errors ... */
895 spin_lock_irqsave(&xfer->lock, flags);
896 wa = xfer->wa;
897 dev = &wa->usb_iface->dev;
898 rpipe = xfer->ep->hcpriv;
899 pr_err_ratelimited("iso xfer %08X#%u: packet descriptor error %d\n",
900 wa_xfer_id(xfer), seg->index, urb->status);
901 if (edc_inc(&wa->nep_edc, EDC_MAX_ERRORS,
902 EDC_ERROR_TIMEFRAME)){
903 dev_err(dev, "iso xfer: URB max acceptable errors exceeded, resetting device\n");
904 wa_reset_all(wa);
905 }
906 if (seg->status != WA_SEG_ERROR) {
907 usb_unlink_urb(seg->dto_urb);
908 seg->result = urb->status;
909 __wa_xfer_abort(xfer);
910 rpipe_ready = rpipe_avail_inc(rpipe);
911 done = __wa_xfer_mark_seg_as_done(xfer, seg,
912 WA_SEG_ERROR);
913 }
914 spin_unlock_irqrestore(&xfer->lock, flags);
915 if (done)
916 wa_xfer_completion(xfer);
917 if (rpipe_ready)
918 wa_xfer_delayed_run(rpipe);
919 }
920 /* taken when this URB was submitted. */
921 wa_xfer_put(xfer);
922}
923
924/*
925 * Callback for the segment request
926 *
927 * If successful transition state (unless already transitioned or
928 * outbound transfer); otherwise, take a note of the error, mark this
929 * segment done and try completion.
930 *
931 * Note we don't access until we are sure that the transfer hasn't
932 * been cancelled (ECONNRESET, ENOENT), which could mean that
933 * seg->xfer could be already gone.
934 *
935 * We have to check before setting the status to WA_SEG_PENDING
936 * because sometimes the xfer result callback arrives before this
937 * callback (geeeeeeze), so it might happen that we are already in
938 * another state. As well, we don't set it if the transfer is not inbound,
939 * as in that case, wa_seg_dto_cb will do it when the OUT data phase
940 * finishes.
941 */
942static void wa_seg_tr_cb(struct urb *urb)
943{
944 struct wa_seg *seg = urb->context;
945 struct wa_xfer *xfer = seg->xfer;
946 struct wahc *wa;
947 struct device *dev;
948 struct wa_rpipe *rpipe;
949 unsigned long flags;
950 unsigned rpipe_ready;
951 u8 done = 0;
952
953 switch (urb->status) {
954 case 0:
955 spin_lock_irqsave(&xfer->lock, flags);
956 wa = xfer->wa;
957 dev = &wa->usb_iface->dev;
958 dev_dbg(dev, "xfer %p ID 0x%08X#%u: request done\n",
959 xfer, wa_xfer_id(xfer), seg->index);
960 if (xfer->is_inbound &&
961 seg->status < WA_SEG_PENDING &&
962 !(usb_pipeisoc(xfer->urb->pipe)))
963 seg->status = WA_SEG_PENDING;
964 spin_unlock_irqrestore(&xfer->lock, flags);
965 break;
966 case -ECONNRESET: /* URB unlinked; no need to do anything */
967 case -ENOENT: /* as it was done by the who unlinked us */
968 break;
969 default: /* Other errors ... */
970 spin_lock_irqsave(&xfer->lock, flags);
971 wa = xfer->wa;
972 dev = &wa->usb_iface->dev;
973 rpipe = xfer->ep->hcpriv;
974 if (printk_ratelimit())
975 dev_err(dev, "xfer %p ID 0x%08X#%u: request error %d\n",
976 xfer, wa_xfer_id(xfer), seg->index,
977 urb->status);
978 if (edc_inc(&wa->nep_edc, EDC_MAX_ERRORS,
979 EDC_ERROR_TIMEFRAME)){
980 dev_err(dev, "DTO: URB max acceptable errors "
981 "exceeded, resetting device\n");
982 wa_reset_all(wa);
983 }
984 usb_unlink_urb(seg->isoc_pack_desc_urb);
985 usb_unlink_urb(seg->dto_urb);
986 seg->result = urb->status;
987 __wa_xfer_abort(xfer);
988 rpipe_ready = rpipe_avail_inc(rpipe);
989 done = __wa_xfer_mark_seg_as_done(xfer, seg, WA_SEG_ERROR);
990 spin_unlock_irqrestore(&xfer->lock, flags);
991 if (done)
992 wa_xfer_completion(xfer);
993 if (rpipe_ready)
994 wa_xfer_delayed_run(rpipe);
995 }
996 /* taken when this URB was submitted. */
997 wa_xfer_put(xfer);
998}
999
1000/*
1001 * Allocate an SG list to store bytes_to_transfer bytes and copy the
1002 * subset of the in_sg that matches the buffer subset
1003 * we are about to transfer.
1004 */
1005static struct scatterlist *wa_xfer_create_subset_sg(struct scatterlist *in_sg,
1006 const unsigned int bytes_transferred,
1007 const unsigned int bytes_to_transfer, int *out_num_sgs)
1008{
1009 struct scatterlist *out_sg;
1010 unsigned int bytes_processed = 0, offset_into_current_page_data = 0,
1011 nents;
1012 struct scatterlist *current_xfer_sg = in_sg;
1013 struct scatterlist *current_seg_sg, *last_seg_sg;
1014
1015 /* skip previously transferred pages. */
1016 while ((current_xfer_sg) &&
1017 (bytes_processed < bytes_transferred)) {
1018 bytes_processed += current_xfer_sg->length;
1019
1020 /* advance the sg if current segment starts on or past the
1021 next page. */
1022 if (bytes_processed <= bytes_transferred)
1023 current_xfer_sg = sg_next(current_xfer_sg);
1024 }
1025
1026 /* the data for the current segment starts in current_xfer_sg.
1027 calculate the offset. */
1028 if (bytes_processed > bytes_transferred) {
1029 offset_into_current_page_data = current_xfer_sg->length -
1030 (bytes_processed - bytes_transferred);
1031 }
1032
1033 /* calculate the number of pages needed by this segment. */
1034 nents = DIV_ROUND_UP((bytes_to_transfer +
1035 offset_into_current_page_data +
1036 current_xfer_sg->offset),
1037 PAGE_SIZE);
1038
1039 out_sg = kmalloc((sizeof(struct scatterlist) * nents), GFP_ATOMIC);
1040 if (out_sg) {
1041 sg_init_table(out_sg, nents);
1042
1043 /* copy the portion of the incoming SG that correlates to the
1044 * data to be transferred by this segment to the segment SG. */
1045 last_seg_sg = current_seg_sg = out_sg;
1046 bytes_processed = 0;
1047
1048 /* reset nents and calculate the actual number of sg entries
1049 needed. */
1050 nents = 0;
1051 while ((bytes_processed < bytes_to_transfer) &&
1052 current_seg_sg && current_xfer_sg) {
1053 unsigned int page_len = min((current_xfer_sg->length -
1054 offset_into_current_page_data),
1055 (bytes_to_transfer - bytes_processed));
1056
1057 sg_set_page(current_seg_sg, sg_page(current_xfer_sg),
1058 page_len,
1059 current_xfer_sg->offset +
1060 offset_into_current_page_data);
1061
1062 bytes_processed += page_len;
1063
1064 last_seg_sg = current_seg_sg;
1065 current_seg_sg = sg_next(current_seg_sg);
1066 current_xfer_sg = sg_next(current_xfer_sg);
1067
1068 /* only the first page may require additional offset. */
1069 offset_into_current_page_data = 0;
1070 nents++;
1071 }
1072
1073 /* update num_sgs and terminate the list since we may have
1074 * concatenated pages. */
1075 sg_mark_end(last_seg_sg);
1076 *out_num_sgs = nents;
1077 }
1078
1079 return out_sg;
1080}
1081
1082/*
1083 * Populate DMA buffer info for the isoc dto urb.
1084 */
1085static void __wa_populate_dto_urb_isoc(struct wa_xfer *xfer,
1086 struct wa_seg *seg, int curr_iso_frame)
1087{
1088 seg->dto_urb->transfer_flags |= URB_NO_TRANSFER_DMA_MAP;
1089 seg->dto_urb->sg = NULL;
1090 seg->dto_urb->num_sgs = 0;
1091 /* dto urb buffer address pulled from iso_frame_desc. */
1092 seg->dto_urb->transfer_dma = xfer->urb->transfer_dma +
1093 xfer->urb->iso_frame_desc[curr_iso_frame].offset;
1094 /* The Alereon HWA sends a single URB with all isoc segs. */
1095 if (xfer->wa->quirks & WUSB_QUIRK_ALEREON_HWA_CONCAT_ISOC)
1096 seg->dto_urb->transfer_buffer_length = seg->isoc_size;
1097 else
1098 seg->dto_urb->transfer_buffer_length =
1099 xfer->urb->iso_frame_desc[curr_iso_frame].length;
1100}
1101
1102/*
1103 * Populate buffer ptr and size, DMA buffer or SG list for the dto urb.
1104 */
1105static int __wa_populate_dto_urb(struct wa_xfer *xfer,
1106 struct wa_seg *seg, size_t buf_itr_offset, size_t buf_itr_size)
1107{
1108 int result = 0;
1109
1110 if (xfer->is_dma) {
1111 seg->dto_urb->transfer_dma =
1112 xfer->urb->transfer_dma + buf_itr_offset;
1113 seg->dto_urb->transfer_flags |= URB_NO_TRANSFER_DMA_MAP;
1114 seg->dto_urb->sg = NULL;
1115 seg->dto_urb->num_sgs = 0;
1116 } else {
1117 /* do buffer or SG processing. */
1118 seg->dto_urb->transfer_flags &=
1119 ~URB_NO_TRANSFER_DMA_MAP;
1120 /* this should always be 0 before a resubmit. */
1121 seg->dto_urb->num_mapped_sgs = 0;
1122
1123 if (xfer->urb->transfer_buffer) {
1124 seg->dto_urb->transfer_buffer =
1125 xfer->urb->transfer_buffer +
1126 buf_itr_offset;
1127 seg->dto_urb->sg = NULL;
1128 seg->dto_urb->num_sgs = 0;
1129 } else {
1130 seg->dto_urb->transfer_buffer = NULL;
1131
1132 /*
1133 * allocate an SG list to store seg_size bytes
1134 * and copy the subset of the xfer->urb->sg that
1135 * matches the buffer subset we are about to
1136 * read.
1137 */
1138 seg->dto_urb->sg = wa_xfer_create_subset_sg(
1139 xfer->urb->sg,
1140 buf_itr_offset, buf_itr_size,
1141 &(seg->dto_urb->num_sgs));
1142 if (!(seg->dto_urb->sg))
1143 result = -ENOMEM;
1144 }
1145 }
1146 seg->dto_urb->transfer_buffer_length = buf_itr_size;
1147
1148 return result;
1149}
1150
1151/*
1152 * Allocate the segs array and initialize each of them
1153 *
1154 * The segments are freed by wa_xfer_destroy() when the xfer use count
1155 * drops to zero; however, because each segment is given the same life
1156 * cycle as the USB URB it contains, it is actually freed by
1157 * usb_put_urb() on the contained USB URB (twisted, eh?).
1158 */
1159static int __wa_xfer_setup_segs(struct wa_xfer *xfer, size_t xfer_hdr_size)
1160{
1161 int result, cnt, isoc_frame_offset = 0;
1162 size_t alloc_size = sizeof(*xfer->seg[0])
1163 - sizeof(xfer->seg[0]->xfer_hdr) + xfer_hdr_size;
1164 struct usb_device *usb_dev = xfer->wa->usb_dev;
1165 const struct usb_endpoint_descriptor *dto_epd = xfer->wa->dto_epd;
1166 struct wa_seg *seg;
1167 size_t buf_itr, buf_size, buf_itr_size;
1168
1169 result = -ENOMEM;
1170 xfer->seg = kcalloc(xfer->segs, sizeof(xfer->seg[0]), GFP_ATOMIC);
1171 if (xfer->seg == NULL)
1172 goto error_segs_kzalloc;
1173 buf_itr = 0;
1174 buf_size = xfer->urb->transfer_buffer_length;
1175 for (cnt = 0; cnt < xfer->segs; cnt++) {
1176 size_t iso_pkt_descr_size = 0;
1177 int seg_isoc_frame_count = 0, seg_isoc_size = 0;
1178
1179 /*
1180 * Adjust the size of the segment object to contain space for
1181 * the isoc packet descriptor buffer.
1182 */
1183 if (usb_pipeisoc(xfer->urb->pipe)) {
1184 seg_isoc_frame_count =
1185 __wa_seg_calculate_isoc_frame_count(xfer,
1186 isoc_frame_offset, &seg_isoc_size);
1187
1188 iso_pkt_descr_size =
1189 sizeof(struct wa_xfer_packet_info_hwaiso) +
1190 (seg_isoc_frame_count * sizeof(__le16));
1191 }
1192 result = -ENOMEM;
1193 seg = xfer->seg[cnt] = kmalloc(alloc_size + iso_pkt_descr_size,
1194 GFP_ATOMIC);
1195 if (seg == NULL)
1196 goto error_seg_kmalloc;
1197 wa_seg_init(seg);
1198 seg->xfer = xfer;
1199 seg->index = cnt;
1200 usb_fill_bulk_urb(&seg->tr_urb, usb_dev,
1201 usb_sndbulkpipe(usb_dev,
1202 dto_epd->bEndpointAddress),
1203 &seg->xfer_hdr, xfer_hdr_size,
1204 wa_seg_tr_cb, seg);
1205 buf_itr_size = min(buf_size, xfer->seg_size);
1206
1207 if (usb_pipeisoc(xfer->urb->pipe)) {
1208 seg->isoc_frame_count = seg_isoc_frame_count;
1209 seg->isoc_frame_offset = isoc_frame_offset;
1210 seg->isoc_size = seg_isoc_size;
1211 /* iso packet descriptor. */
1212 seg->isoc_pack_desc_urb =
1213 usb_alloc_urb(0, GFP_ATOMIC);
1214 if (seg->isoc_pack_desc_urb == NULL)
1215 goto error_iso_pack_desc_alloc;
1216 /*
1217 * The buffer for the isoc packet descriptor starts
1218 * after the transfer request header in the
1219 * segment object memory buffer.
1220 */
1221 usb_fill_bulk_urb(
1222 seg->isoc_pack_desc_urb, usb_dev,
1223 usb_sndbulkpipe(usb_dev,
1224 dto_epd->bEndpointAddress),
1225 (void *)(&seg->xfer_hdr) +
1226 xfer_hdr_size,
1227 iso_pkt_descr_size,
1228 wa_seg_iso_pack_desc_cb, seg);
1229
1230 /* adjust starting frame offset for next seg. */
1231 isoc_frame_offset += seg_isoc_frame_count;
1232 }
1233
1234 if (xfer->is_inbound == 0 && buf_size > 0) {
1235 /* outbound data. */
1236 seg->dto_urb = usb_alloc_urb(0, GFP_ATOMIC);
1237 if (seg->dto_urb == NULL)
1238 goto error_dto_alloc;
1239 usb_fill_bulk_urb(
1240 seg->dto_urb, usb_dev,
1241 usb_sndbulkpipe(usb_dev,
1242 dto_epd->bEndpointAddress),
1243 NULL, 0, wa_seg_dto_cb, seg);
1244
1245 if (usb_pipeisoc(xfer->urb->pipe)) {
1246 /*
1247 * Fill in the xfer buffer information for the
1248 * first isoc frame. Subsequent frames in this
1249 * segment will be filled in and sent from the
1250 * DTO completion routine, if needed.
1251 */
1252 __wa_populate_dto_urb_isoc(xfer, seg,
1253 seg->isoc_frame_offset);
1254 } else {
1255 /* fill in the xfer buffer information. */
1256 result = __wa_populate_dto_urb(xfer, seg,
1257 buf_itr, buf_itr_size);
1258 if (result < 0)
1259 goto error_seg_outbound_populate;
1260
1261 buf_itr += buf_itr_size;
1262 buf_size -= buf_itr_size;
1263 }
1264 }
1265 seg->status = WA_SEG_READY;
1266 }
1267 return 0;
1268
1269 /*
1270 * Free the memory for the current segment which failed to init.
1271 * Use the fact that cnt is left at were it failed. The remaining
1272 * segments will be cleaned up by wa_xfer_destroy.
1273 */
1274error_seg_outbound_populate:
1275 usb_free_urb(xfer->seg[cnt]->dto_urb);
1276error_dto_alloc:
1277 usb_free_urb(xfer->seg[cnt]->isoc_pack_desc_urb);
1278error_iso_pack_desc_alloc:
1279 kfree(xfer->seg[cnt]);
1280 xfer->seg[cnt] = NULL;
1281error_seg_kmalloc:
1282error_segs_kzalloc:
1283 return result;
1284}
1285
1286/*
1287 * Allocates all the stuff needed to submit a transfer
1288 *
1289 * Breaks the whole data buffer in a list of segments, each one has a
1290 * structure allocated to it and linked in xfer->seg[index]
1291 *
1292 * FIXME: merge setup_segs() and the last part of this function, no
1293 * need to do two for loops when we could run everything in a
1294 * single one
1295 */
1296static int __wa_xfer_setup(struct wa_xfer *xfer, struct urb *urb)
1297{
1298 int result;
1299 struct device *dev = &xfer->wa->usb_iface->dev;
1300 enum wa_xfer_type xfer_type = 0; /* shut up GCC */
1301 size_t xfer_hdr_size, cnt, transfer_size;
1302 struct wa_xfer_hdr *xfer_hdr0, *xfer_hdr;
1303
1304 result = __wa_xfer_setup_sizes(xfer, &xfer_type);
1305 if (result < 0)
1306 goto error_setup_sizes;
1307 xfer_hdr_size = result;
1308 result = __wa_xfer_setup_segs(xfer, xfer_hdr_size);
1309 if (result < 0) {
1310 dev_err(dev, "xfer %p: Failed to allocate %d segments: %d\n",
1311 xfer, xfer->segs, result);
1312 goto error_setup_segs;
1313 }
1314 /* Fill the first header */
1315 xfer_hdr0 = &xfer->seg[0]->xfer_hdr;
1316 wa_xfer_id_init(xfer);
1317 __wa_xfer_setup_hdr0(xfer, xfer_hdr0, xfer_type, xfer_hdr_size);
1318
1319 /* Fill remaining headers */
1320 xfer_hdr = xfer_hdr0;
1321 if (xfer_type == WA_XFER_TYPE_ISO) {
1322 xfer_hdr0->dwTransferLength =
1323 cpu_to_le32(xfer->seg[0]->isoc_size);
1324 for (cnt = 1; cnt < xfer->segs; cnt++) {
1325 struct wa_xfer_packet_info_hwaiso *packet_desc;
1326 struct wa_seg *seg = xfer->seg[cnt];
1327 struct wa_xfer_hwaiso *xfer_iso;
1328
1329 xfer_hdr = &seg->xfer_hdr;
1330 xfer_iso = container_of(xfer_hdr,
1331 struct wa_xfer_hwaiso, hdr);
1332 packet_desc = ((void *)xfer_hdr) + xfer_hdr_size;
1333 /*
1334 * Copy values from the 0th header. Segment specific
1335 * values are set below.
1336 */
1337 memcpy(xfer_hdr, xfer_hdr0, xfer_hdr_size);
1338 xfer_hdr->bTransferSegment = cnt;
1339 xfer_hdr->dwTransferLength =
1340 cpu_to_le32(seg->isoc_size);
1341 xfer_iso->dwNumOfPackets =
1342 cpu_to_le32(seg->isoc_frame_count);
1343 __wa_setup_isoc_packet_descr(packet_desc, xfer, seg);
1344 seg->status = WA_SEG_READY;
1345 }
1346 } else {
1347 transfer_size = urb->transfer_buffer_length;
1348 xfer_hdr0->dwTransferLength = transfer_size > xfer->seg_size ?
1349 cpu_to_le32(xfer->seg_size) :
1350 cpu_to_le32(transfer_size);
1351 transfer_size -= xfer->seg_size;
1352 for (cnt = 1; cnt < xfer->segs; cnt++) {
1353 xfer_hdr = &xfer->seg[cnt]->xfer_hdr;
1354 memcpy(xfer_hdr, xfer_hdr0, xfer_hdr_size);
1355 xfer_hdr->bTransferSegment = cnt;
1356 xfer_hdr->dwTransferLength =
1357 transfer_size > xfer->seg_size ?
1358 cpu_to_le32(xfer->seg_size)
1359 : cpu_to_le32(transfer_size);
1360 xfer->seg[cnt]->status = WA_SEG_READY;
1361 transfer_size -= xfer->seg_size;
1362 }
1363 }
1364 xfer_hdr->bTransferSegment |= 0x80; /* this is the last segment */
1365 result = 0;
1366error_setup_segs:
1367error_setup_sizes:
1368 return result;
1369}
1370
1371/*
1372 *
1373 *
1374 * rpipe->seg_lock is held!
1375 */
1376static int __wa_seg_submit(struct wa_rpipe *rpipe, struct wa_xfer *xfer,
1377 struct wa_seg *seg, int *dto_done)
1378{
1379 int result;
1380
1381 /* default to done unless we encounter a multi-frame isoc segment. */
1382 *dto_done = 1;
1383
1384 /*
1385 * Take a ref for each segment urb so the xfer cannot disappear until
1386 * all of the callbacks run.
1387 */
1388 wa_xfer_get(xfer);
1389 /* submit the transfer request. */
1390 seg->status = WA_SEG_SUBMITTED;
1391 result = usb_submit_urb(&seg->tr_urb, GFP_ATOMIC);
1392 if (result < 0) {
1393 pr_err("%s: xfer %p#%u: REQ submit failed: %d\n",
1394 __func__, xfer, seg->index, result);
1395 wa_xfer_put(xfer);
1396 goto error_tr_submit;
1397 }
1398 /* submit the isoc packet descriptor if present. */
1399 if (seg->isoc_pack_desc_urb) {
1400 wa_xfer_get(xfer);
1401 result = usb_submit_urb(seg->isoc_pack_desc_urb, GFP_ATOMIC);
1402 seg->isoc_frame_index = 0;
1403 if (result < 0) {
1404 pr_err("%s: xfer %p#%u: ISO packet descriptor submit failed: %d\n",
1405 __func__, xfer, seg->index, result);
1406 wa_xfer_put(xfer);
1407 goto error_iso_pack_desc_submit;
1408 }
1409 }
1410 /* submit the out data if this is an out request. */
1411 if (seg->dto_urb) {
1412 struct wahc *wa = xfer->wa;
1413 wa_xfer_get(xfer);
1414 result = usb_submit_urb(seg->dto_urb, GFP_ATOMIC);
1415 if (result < 0) {
1416 pr_err("%s: xfer %p#%u: DTO submit failed: %d\n",
1417 __func__, xfer, seg->index, result);
1418 wa_xfer_put(xfer);
1419 goto error_dto_submit;
1420 }
1421 /*
1422 * If this segment contains more than one isoc frame, hold
1423 * onto the dto resource until we send all frames.
1424 * Only applies to non-Alereon devices.
1425 */
1426 if (((wa->quirks & WUSB_QUIRK_ALEREON_HWA_CONCAT_ISOC) == 0)
1427 && (seg->isoc_frame_count > 1))
1428 *dto_done = 0;
1429 }
1430 rpipe_avail_dec(rpipe);
1431 return 0;
1432
1433error_dto_submit:
1434 usb_unlink_urb(seg->isoc_pack_desc_urb);
1435error_iso_pack_desc_submit:
1436 usb_unlink_urb(&seg->tr_urb);
1437error_tr_submit:
1438 seg->status = WA_SEG_ERROR;
1439 seg->result = result;
1440 *dto_done = 1;
1441 return result;
1442}
1443
1444/*
1445 * Execute more queued request segments until the maximum concurrent allowed.
1446 * Return true if the DTO resource was acquired and released.
1447 *
1448 * The ugly unlock/lock sequence on the error path is needed as the
1449 * xfer->lock normally nests the seg_lock and not viceversa.
1450 */
1451static int __wa_xfer_delayed_run(struct wa_rpipe *rpipe, int *dto_waiting)
1452{
1453 int result, dto_acquired = 0, dto_done = 0;
1454 struct device *dev = &rpipe->wa->usb_iface->dev;
1455 struct wa_seg *seg;
1456 struct wa_xfer *xfer;
1457 unsigned long flags;
1458
1459 *dto_waiting = 0;
1460
1461 spin_lock_irqsave(&rpipe->seg_lock, flags);
1462 while (atomic_read(&rpipe->segs_available) > 0
1463 && !list_empty(&rpipe->seg_list)
1464 && (dto_acquired = __wa_dto_try_get(rpipe->wa))) {
1465 seg = list_first_entry(&(rpipe->seg_list), struct wa_seg,
1466 list_node);
1467 list_del(&seg->list_node);
1468 xfer = seg->xfer;
1469 /*
1470 * Get a reference to the xfer in case the callbacks for the
1471 * URBs submitted by __wa_seg_submit attempt to complete
1472 * the xfer before this function completes.
1473 */
1474 wa_xfer_get(xfer);
1475 result = __wa_seg_submit(rpipe, xfer, seg, &dto_done);
1476 /* release the dto resource if this RPIPE is done with it. */
1477 if (dto_done)
1478 __wa_dto_put(rpipe->wa);
1479 dev_dbg(dev, "xfer %p ID %08X#%u submitted from delayed [%d segments available] %d\n",
1480 xfer, wa_xfer_id(xfer), seg->index,
1481 atomic_read(&rpipe->segs_available), result);
1482 if (unlikely(result < 0)) {
1483 int done;
1484
1485 spin_unlock_irqrestore(&rpipe->seg_lock, flags);
1486 spin_lock_irqsave(&xfer->lock, flags);
1487 __wa_xfer_abort(xfer);
1488 /*
1489 * This seg was marked as submitted when it was put on
1490 * the RPIPE seg_list. Mark it done.
1491 */
1492 xfer->segs_done++;
1493 done = __wa_xfer_is_done(xfer);
1494 spin_unlock_irqrestore(&xfer->lock, flags);
1495 if (done)
1496 wa_xfer_completion(xfer);
1497 spin_lock_irqsave(&rpipe->seg_lock, flags);
1498 }
1499 wa_xfer_put(xfer);
1500 }
1501 /*
1502 * Mark this RPIPE as waiting if dto was not acquired, there are
1503 * delayed segs and no active transfers to wake us up later.
1504 */
1505 if (!dto_acquired && !list_empty(&rpipe->seg_list)
1506 && (atomic_read(&rpipe->segs_available) ==
1507 le16_to_cpu(rpipe->descr.wRequests)))
1508 *dto_waiting = 1;
1509
1510 spin_unlock_irqrestore(&rpipe->seg_lock, flags);
1511
1512 return dto_done;
1513}
1514
1515static void wa_xfer_delayed_run(struct wa_rpipe *rpipe)
1516{
1517 int dto_waiting;
1518 int dto_done = __wa_xfer_delayed_run(rpipe, &dto_waiting);
1519
1520 /*
1521 * If this RPIPE is waiting on the DTO resource, add it to the tail of
1522 * the waiting list.
1523 * Otherwise, if the WA DTO resource was acquired and released by
1524 * __wa_xfer_delayed_run, another RPIPE may have attempted to acquire
1525 * DTO and failed during that time. Check the delayed list and process
1526 * any waiters. Start searching from the next RPIPE index.
1527 */
1528 if (dto_waiting)
1529 wa_add_delayed_rpipe(rpipe->wa, rpipe);
1530 else if (dto_done)
1531 wa_check_for_delayed_rpipes(rpipe->wa);
1532}
1533
1534/*
1535 *
1536 * xfer->lock is taken
1537 *
1538 * On failure submitting we just stop submitting and return error;
1539 * wa_urb_enqueue_b() will execute the completion path
1540 */
1541static int __wa_xfer_submit(struct wa_xfer *xfer)
1542{
1543 int result, dto_acquired = 0, dto_done = 0, dto_waiting = 0;
1544 struct wahc *wa = xfer->wa;
1545 struct device *dev = &wa->usb_iface->dev;
1546 unsigned cnt;
1547 struct wa_seg *seg;
1548 unsigned long flags;
1549 struct wa_rpipe *rpipe = xfer->ep->hcpriv;
1550 size_t maxrequests = le16_to_cpu(rpipe->descr.wRequests);
1551 u8 available;
1552 u8 empty;
1553
1554 spin_lock_irqsave(&wa->xfer_list_lock, flags);
1555 list_add_tail(&xfer->list_node, &wa->xfer_list);
1556 spin_unlock_irqrestore(&wa->xfer_list_lock, flags);
1557
1558 BUG_ON(atomic_read(&rpipe->segs_available) > maxrequests);
1559 result = 0;
1560 spin_lock_irqsave(&rpipe->seg_lock, flags);
1561 for (cnt = 0; cnt < xfer->segs; cnt++) {
1562 int delay_seg = 1;
1563
1564 available = atomic_read(&rpipe->segs_available);
1565 empty = list_empty(&rpipe->seg_list);
1566 seg = xfer->seg[cnt];
1567 if (available && empty) {
1568 /*
1569 * Only attempt to acquire DTO if we have a segment
1570 * to send.
1571 */
1572 dto_acquired = __wa_dto_try_get(rpipe->wa);
1573 if (dto_acquired) {
1574 delay_seg = 0;
1575 result = __wa_seg_submit(rpipe, xfer, seg,
1576 &dto_done);
1577 dev_dbg(dev, "xfer %p ID 0x%08X#%u: available %u empty %u submitted\n",
1578 xfer, wa_xfer_id(xfer), cnt, available,
1579 empty);
1580 if (dto_done)
1581 __wa_dto_put(rpipe->wa);
1582
1583 if (result < 0) {
1584 __wa_xfer_abort(xfer);
1585 goto error_seg_submit;
1586 }
1587 }
1588 }
1589
1590 if (delay_seg) {
1591 dev_dbg(dev, "xfer %p ID 0x%08X#%u: available %u empty %u delayed\n",
1592 xfer, wa_xfer_id(xfer), cnt, available, empty);
1593 seg->status = WA_SEG_DELAYED;
1594 list_add_tail(&seg->list_node, &rpipe->seg_list);
1595 }
1596 xfer->segs_submitted++;
1597 }
1598error_seg_submit:
1599 /*
1600 * Mark this RPIPE as waiting if dto was not acquired, there are
1601 * delayed segs and no active transfers to wake us up later.
1602 */
1603 if (!dto_acquired && !list_empty(&rpipe->seg_list)
1604 && (atomic_read(&rpipe->segs_available) ==
1605 le16_to_cpu(rpipe->descr.wRequests)))
1606 dto_waiting = 1;
1607 spin_unlock_irqrestore(&rpipe->seg_lock, flags);
1608
1609 if (dto_waiting)
1610 wa_add_delayed_rpipe(rpipe->wa, rpipe);
1611 else if (dto_done)
1612 wa_check_for_delayed_rpipes(rpipe->wa);
1613
1614 return result;
1615}
1616
1617/*
1618 * Second part of a URB/transfer enqueuement
1619 *
1620 * Assumes this comes from wa_urb_enqueue() [maybe through
1621 * wa_urb_enqueue_run()]. At this point:
1622 *
1623 * xfer->wa filled and refcounted
1624 * xfer->ep filled with rpipe refcounted if
1625 * delayed == 0
1626 * xfer->urb filled and refcounted (this is the case when called
1627 * from wa_urb_enqueue() as we come from usb_submit_urb()
1628 * and when called by wa_urb_enqueue_run(), as we took an
1629 * extra ref dropped by _run() after we return).
1630 * xfer->gfp filled
1631 *
1632 * If we fail at __wa_xfer_submit(), then we just check if we are done
1633 * and if so, we run the completion procedure. However, if we are not
1634 * yet done, we do nothing and wait for the completion handlers from
1635 * the submitted URBs or from the xfer-result path to kick in. If xfer
1636 * result never kicks in, the xfer will timeout from the USB code and
1637 * dequeue() will be called.
1638 */
1639static int wa_urb_enqueue_b(struct wa_xfer *xfer)
1640{
1641 int result;
1642 unsigned long flags;
1643 struct urb *urb = xfer->urb;
1644 struct wahc *wa = xfer->wa;
1645 struct wusbhc *wusbhc = wa->wusb;
1646 struct wusb_dev *wusb_dev;
1647 unsigned done;
1648
1649 result = rpipe_get_by_ep(wa, xfer->ep, urb, xfer->gfp);
1650 if (result < 0) {
1651 pr_err("%s: error_rpipe_get\n", __func__);
1652 goto error_rpipe_get;
1653 }
1654 result = -ENODEV;
1655 /* FIXME: segmentation broken -- kills DWA */
1656 mutex_lock(&wusbhc->mutex); /* get a WUSB dev */
1657 if (urb->dev == NULL) {
1658 mutex_unlock(&wusbhc->mutex);
1659 pr_err("%s: error usb dev gone\n", __func__);
1660 goto error_dev_gone;
1661 }
1662 wusb_dev = __wusb_dev_get_by_usb_dev(wusbhc, urb->dev);
1663 if (wusb_dev == NULL) {
1664 mutex_unlock(&wusbhc->mutex);
1665 dev_err(&(urb->dev->dev), "%s: error wusb dev gone\n",
1666 __func__);
1667 goto error_dev_gone;
1668 }
1669 mutex_unlock(&wusbhc->mutex);
1670
1671 spin_lock_irqsave(&xfer->lock, flags);
1672 xfer->wusb_dev = wusb_dev;
1673 result = urb->status;
1674 if (urb->status != -EINPROGRESS) {
1675 dev_err(&(urb->dev->dev), "%s: error_dequeued\n", __func__);
1676 goto error_dequeued;
1677 }
1678
1679 result = __wa_xfer_setup(xfer, urb);
1680 if (result < 0) {
1681 dev_err(&(urb->dev->dev), "%s: error_xfer_setup\n", __func__);
1682 goto error_xfer_setup;
1683 }
1684 /*
1685 * Get a xfer reference since __wa_xfer_submit starts asynchronous
1686 * operations that may try to complete the xfer before this function
1687 * exits.
1688 */
1689 wa_xfer_get(xfer);
1690 result = __wa_xfer_submit(xfer);
1691 if (result < 0) {
1692 dev_err(&(urb->dev->dev), "%s: error_xfer_submit\n", __func__);
1693 goto error_xfer_submit;
1694 }
1695 spin_unlock_irqrestore(&xfer->lock, flags);
1696 wa_xfer_put(xfer);
1697 return 0;
1698
1699 /*
1700 * this is basically wa_xfer_completion() broken up wa_xfer_giveback()
1701 * does a wa_xfer_put() that will call wa_xfer_destroy() and undo
1702 * setup().
1703 */
1704error_xfer_setup:
1705error_dequeued:
1706 spin_unlock_irqrestore(&xfer->lock, flags);
1707 /* FIXME: segmentation broken, kills DWA */
1708 if (wusb_dev)
1709 wusb_dev_put(wusb_dev);
1710error_dev_gone:
1711 rpipe_put(xfer->ep->hcpriv);
1712error_rpipe_get:
1713 xfer->result = result;
1714 return result;
1715
1716error_xfer_submit:
1717 done = __wa_xfer_is_done(xfer);
1718 xfer->result = result;
1719 spin_unlock_irqrestore(&xfer->lock, flags);
1720 if (done)
1721 wa_xfer_completion(xfer);
1722 wa_xfer_put(xfer);
1723 /* return success since the completion routine will run. */
1724 return 0;
1725}
1726
1727/*
1728 * Execute the delayed transfers in the Wire Adapter @wa
1729 *
1730 * We need to be careful here, as dequeue() could be called in the
1731 * middle. That's why we do the whole thing under the
1732 * wa->xfer_list_lock. If dequeue() jumps in, it first locks xfer->lock
1733 * and then checks the list -- so as we would be acquiring in inverse
1734 * order, we move the delayed list to a separate list while locked and then
1735 * submit them without the list lock held.
1736 */
1737void wa_urb_enqueue_run(struct work_struct *ws)
1738{
1739 struct wahc *wa = container_of(ws, struct wahc, xfer_enqueue_work);
1740 struct wa_xfer *xfer, *next;
1741 struct urb *urb;
1742 LIST_HEAD(tmp_list);
1743
1744 /* Create a copy of the wa->xfer_delayed_list while holding the lock */
1745 spin_lock_irq(&wa->xfer_list_lock);
1746 list_cut_position(&tmp_list, &wa->xfer_delayed_list,
1747 wa->xfer_delayed_list.prev);
1748 spin_unlock_irq(&wa->xfer_list_lock);
1749
1750 /*
1751 * enqueue from temp list without list lock held since wa_urb_enqueue_b
1752 * can take xfer->lock as well as lock mutexes.
1753 */
1754 list_for_each_entry_safe(xfer, next, &tmp_list, list_node) {
1755 list_del_init(&xfer->list_node);
1756
1757 urb = xfer->urb;
1758 if (wa_urb_enqueue_b(xfer) < 0)
1759 wa_xfer_giveback(xfer);
1760 usb_put_urb(urb); /* taken when queuing */
1761 }
1762}
1763EXPORT_SYMBOL_GPL(wa_urb_enqueue_run);
1764
1765/*
1766 * Process the errored transfers on the Wire Adapter outside of interrupt.
1767 */
1768void wa_process_errored_transfers_run(struct work_struct *ws)
1769{
1770 struct wahc *wa = container_of(ws, struct wahc, xfer_error_work);
1771 struct wa_xfer *xfer, *next;
1772 LIST_HEAD(tmp_list);
1773
1774 pr_info("%s: Run delayed STALL processing.\n", __func__);
1775
1776 /* Create a copy of the wa->xfer_errored_list while holding the lock */
1777 spin_lock_irq(&wa->xfer_list_lock);
1778 list_cut_position(&tmp_list, &wa->xfer_errored_list,
1779 wa->xfer_errored_list.prev);
1780 spin_unlock_irq(&wa->xfer_list_lock);
1781
1782 /*
1783 * run rpipe_clear_feature_stalled from temp list without list lock
1784 * held.
1785 */
1786 list_for_each_entry_safe(xfer, next, &tmp_list, list_node) {
1787 struct usb_host_endpoint *ep;
1788 unsigned long flags;
1789 struct wa_rpipe *rpipe;
1790
1791 spin_lock_irqsave(&xfer->lock, flags);
1792 ep = xfer->ep;
1793 rpipe = ep->hcpriv;
1794 spin_unlock_irqrestore(&xfer->lock, flags);
1795
1796 /* clear RPIPE feature stalled without holding a lock. */
1797 rpipe_clear_feature_stalled(wa, ep);
1798
1799 /* complete the xfer. This removes it from the tmp list. */
1800 wa_xfer_completion(xfer);
1801
1802 /* check for work. */
1803 wa_xfer_delayed_run(rpipe);
1804 }
1805}
1806EXPORT_SYMBOL_GPL(wa_process_errored_transfers_run);
1807
1808/*
1809 * Submit a transfer to the Wire Adapter in a delayed way
1810 *
1811 * The process of enqueuing involves possible sleeps() [see
1812 * enqueue_b(), for the rpipe_get() and the mutex_lock()]. If we are
1813 * in an atomic section, we defer the enqueue_b() call--else we call direct.
1814 *
1815 * @urb: We own a reference to it done by the HCI Linux USB stack that
1816 * will be given up by calling usb_hcd_giveback_urb() or by
1817 * returning error from this function -> ergo we don't have to
1818 * refcount it.
1819 */
1820int wa_urb_enqueue(struct wahc *wa, struct usb_host_endpoint *ep,
1821 struct urb *urb, gfp_t gfp)
1822{
1823 int result;
1824 struct device *dev = &wa->usb_iface->dev;
1825 struct wa_xfer *xfer;
1826 unsigned long my_flags;
1827 unsigned cant_sleep = irqs_disabled() | in_atomic();
1828
1829 if ((urb->transfer_buffer == NULL)
1830 && (urb->sg == NULL)
1831 && !(urb->transfer_flags & URB_NO_TRANSFER_DMA_MAP)
1832 && urb->transfer_buffer_length != 0) {
1833 dev_err(dev, "BUG? urb %p: NULL xfer buffer & NODMA\n", urb);
1834 dump_stack();
1835 }
1836
1837 spin_lock_irqsave(&wa->xfer_list_lock, my_flags);
1838 result = usb_hcd_link_urb_to_ep(&(wa->wusb->usb_hcd), urb);
1839 spin_unlock_irqrestore(&wa->xfer_list_lock, my_flags);
1840 if (result < 0)
1841 goto error_link_urb;
1842
1843 result = -ENOMEM;
1844 xfer = kzalloc(sizeof(*xfer), gfp);
1845 if (xfer == NULL)
1846 goto error_kmalloc;
1847
1848 result = -ENOENT;
1849 if (urb->status != -EINPROGRESS) /* cancelled */
1850 goto error_dequeued; /* before starting? */
1851 wa_xfer_init(xfer);
1852 xfer->wa = wa_get(wa);
1853 xfer->urb = urb;
1854 xfer->gfp = gfp;
1855 xfer->ep = ep;
1856 urb->hcpriv = xfer;
1857
1858 dev_dbg(dev, "xfer %p urb %p pipe 0x%02x [%d bytes] %s %s %s\n",
1859 xfer, urb, urb->pipe, urb->transfer_buffer_length,
1860 urb->transfer_flags & URB_NO_TRANSFER_DMA_MAP ? "dma" : "nodma",
1861 urb->pipe & USB_DIR_IN ? "inbound" : "outbound",
1862 cant_sleep ? "deferred" : "inline");
1863
1864 if (cant_sleep) {
1865 usb_get_urb(urb);
1866 spin_lock_irqsave(&wa->xfer_list_lock, my_flags);
1867 list_add_tail(&xfer->list_node, &wa->xfer_delayed_list);
1868 spin_unlock_irqrestore(&wa->xfer_list_lock, my_flags);
1869 queue_work(wusbd, &wa->xfer_enqueue_work);
1870 } else {
1871 result = wa_urb_enqueue_b(xfer);
1872 if (result < 0) {
1873 /*
1874 * URB submit/enqueue failed. Clean up, return an
1875 * error and do not run the callback. This avoids
1876 * an infinite submit/complete loop.
1877 */
1878 dev_err(dev, "%s: URB enqueue failed: %d\n",
1879 __func__, result);
1880 wa_put(xfer->wa);
1881 wa_xfer_put(xfer);
1882 spin_lock_irqsave(&wa->xfer_list_lock, my_flags);
1883 usb_hcd_unlink_urb_from_ep(&(wa->wusb->usb_hcd), urb);
1884 spin_unlock_irqrestore(&wa->xfer_list_lock, my_flags);
1885 return result;
1886 }
1887 }
1888 return 0;
1889
1890error_dequeued:
1891 kfree(xfer);
1892error_kmalloc:
1893 spin_lock_irqsave(&wa->xfer_list_lock, my_flags);
1894 usb_hcd_unlink_urb_from_ep(&(wa->wusb->usb_hcd), urb);
1895 spin_unlock_irqrestore(&wa->xfer_list_lock, my_flags);
1896error_link_urb:
1897 return result;
1898}
1899EXPORT_SYMBOL_GPL(wa_urb_enqueue);
1900
1901/*
1902 * Dequeue a URB and make sure uwb_hcd_giveback_urb() [completion
1903 * handler] is called.
1904 *
1905 * Until a transfer goes successfully through wa_urb_enqueue() it
1906 * needs to be dequeued with completion calling; when stuck in delayed
1907 * or before wa_xfer_setup() is called, we need to do completion.
1908 *
1909 * not setup If there is no hcpriv yet, that means that that enqueue
1910 * still had no time to set the xfer up. Because
1911 * urb->status should be other than -EINPROGRESS,
1912 * enqueue() will catch that and bail out.
1913 *
1914 * If the transfer has gone through setup, we just need to clean it
1915 * up. If it has gone through submit(), we have to abort it [with an
1916 * asynch request] and then make sure we cancel each segment.
1917 *
1918 */
1919int wa_urb_dequeue(struct wahc *wa, struct urb *urb, int status)
1920{
1921 unsigned long flags, flags2;
1922 struct wa_xfer *xfer;
1923 struct wa_seg *seg;
1924 struct wa_rpipe *rpipe;
1925 unsigned cnt, done = 0, xfer_abort_pending;
1926 unsigned rpipe_ready = 0;
1927 int result;
1928
1929 /* check if it is safe to unlink. */
1930 spin_lock_irqsave(&wa->xfer_list_lock, flags);
1931 result = usb_hcd_check_unlink_urb(&(wa->wusb->usb_hcd), urb, status);
1932 if ((result == 0) && urb->hcpriv) {
1933 /*
1934 * Get a xfer ref to prevent a race with wa_xfer_giveback
1935 * cleaning up the xfer while we are working with it.
1936 */
1937 wa_xfer_get(urb->hcpriv);
1938 }
1939 spin_unlock_irqrestore(&wa->xfer_list_lock, flags);
1940 if (result)
1941 return result;
1942
1943 xfer = urb->hcpriv;
1944 if (xfer == NULL)
1945 return -ENOENT;
1946 spin_lock_irqsave(&xfer->lock, flags);
1947 pr_debug("%s: DEQUEUE xfer id 0x%08X\n", __func__, wa_xfer_id(xfer));
1948 rpipe = xfer->ep->hcpriv;
1949 if (rpipe == NULL) {
1950 pr_debug("%s: xfer %p id 0x%08X has no RPIPE. %s",
1951 __func__, xfer, wa_xfer_id(xfer),
1952 "Probably already aborted.\n" );
1953 result = -ENOENT;
1954 goto out_unlock;
1955 }
1956 /*
1957 * Check for done to avoid racing with wa_xfer_giveback and completing
1958 * twice.
1959 */
1960 if (__wa_xfer_is_done(xfer)) {
1961 pr_debug("%s: xfer %p id 0x%08X already done.\n", __func__,
1962 xfer, wa_xfer_id(xfer));
1963 result = -ENOENT;
1964 goto out_unlock;
1965 }
1966 /* Check the delayed list -> if there, release and complete */
1967 spin_lock_irqsave(&wa->xfer_list_lock, flags2);
1968 if (!list_empty(&xfer->list_node) && xfer->seg == NULL)
1969 goto dequeue_delayed;
1970 spin_unlock_irqrestore(&wa->xfer_list_lock, flags2);
1971 if (xfer->seg == NULL) /* still hasn't reached */
1972 goto out_unlock; /* setup(), enqueue_b() completes */
1973 /* Ok, the xfer is in flight already, it's been setup and submitted.*/
1974 xfer_abort_pending = __wa_xfer_abort(xfer) >= 0;
1975 /*
1976 * grab the rpipe->seg_lock here to prevent racing with
1977 * __wa_xfer_delayed_run.
1978 */
1979 spin_lock(&rpipe->seg_lock);
1980 for (cnt = 0; cnt < xfer->segs; cnt++) {
1981 seg = xfer->seg[cnt];
1982 pr_debug("%s: xfer id 0x%08X#%d status = %d\n",
1983 __func__, wa_xfer_id(xfer), cnt, seg->status);
1984 switch (seg->status) {
1985 case WA_SEG_NOTREADY:
1986 case WA_SEG_READY:
1987 printk(KERN_ERR "xfer %p#%u: dequeue bad state %u\n",
1988 xfer, cnt, seg->status);
1989 WARN_ON(1);
1990 break;
1991 case WA_SEG_DELAYED:
1992 /*
1993 * delete from rpipe delayed list. If no segments on
1994 * this xfer have been submitted, __wa_xfer_is_done will
1995 * trigger a giveback below. Otherwise, the submitted
1996 * segments will be completed in the DTI interrupt.
1997 */
1998 seg->status = WA_SEG_ABORTED;
1999 seg->result = -ENOENT;
2000 list_del(&seg->list_node);
2001 xfer->segs_done++;
2002 break;
2003 case WA_SEG_DONE:
2004 case WA_SEG_ERROR:
2005 case WA_SEG_ABORTED:
2006 break;
2007 /*
2008 * The buf_in data for a segment in the
2009 * WA_SEG_DTI_PENDING state is actively being read.
2010 * Let wa_buf_in_cb handle it since it will be called
2011 * and will increment xfer->segs_done. Cleaning up
2012 * here could cause wa_buf_in_cb to access the xfer
2013 * after it has been completed/freed.
2014 */
2015 case WA_SEG_DTI_PENDING:
2016 break;
2017 /*
2018 * In the states below, the HWA device already knows
2019 * about the transfer. If an abort request was sent,
2020 * allow the HWA to process it and wait for the
2021 * results. Otherwise, the DTI state and seg completed
2022 * counts can get out of sync.
2023 */
2024 case WA_SEG_SUBMITTED:
2025 case WA_SEG_PENDING:
2026 /*
2027 * Check if the abort was successfully sent. This could
2028 * be false if the HWA has been removed but we haven't
2029 * gotten the disconnect notification yet.
2030 */
2031 if (!xfer_abort_pending) {
2032 seg->status = WA_SEG_ABORTED;
2033 rpipe_ready = rpipe_avail_inc(rpipe);
2034 xfer->segs_done++;
2035 }
2036 break;
2037 }
2038 }
2039 spin_unlock(&rpipe->seg_lock);
2040 xfer->result = urb->status; /* -ENOENT or -ECONNRESET */
2041 done = __wa_xfer_is_done(xfer);
2042 spin_unlock_irqrestore(&xfer->lock, flags);
2043 if (done)
2044 wa_xfer_completion(xfer);
2045 if (rpipe_ready)
2046 wa_xfer_delayed_run(rpipe);
2047 wa_xfer_put(xfer);
2048 return result;
2049
2050out_unlock:
2051 spin_unlock_irqrestore(&xfer->lock, flags);
2052 wa_xfer_put(xfer);
2053 return result;
2054
2055dequeue_delayed:
2056 list_del_init(&xfer->list_node);
2057 spin_unlock_irqrestore(&wa->xfer_list_lock, flags2);
2058 xfer->result = urb->status;
2059 spin_unlock_irqrestore(&xfer->lock, flags);
2060 wa_xfer_giveback(xfer);
2061 wa_xfer_put(xfer);
2062 usb_put_urb(urb); /* we got a ref in enqueue() */
2063 return 0;
2064}
2065EXPORT_SYMBOL_GPL(wa_urb_dequeue);
2066
2067/*
2068 * Translation from WA status codes (WUSB1.0 Table 8.15) to errno
2069 * codes
2070 *
2071 * Positive errno values are internal inconsistencies and should be
2072 * flagged louder. Negative are to be passed up to the user in the
2073 * normal way.
2074 *
2075 * @status: USB WA status code -- high two bits are stripped.
2076 */
2077static int wa_xfer_status_to_errno(u8 status)
2078{
2079 int errno;
2080 u8 real_status = status;
2081 static int xlat[] = {
2082 [WA_XFER_STATUS_SUCCESS] = 0,
2083 [WA_XFER_STATUS_HALTED] = -EPIPE,
2084 [WA_XFER_STATUS_DATA_BUFFER_ERROR] = -ENOBUFS,
2085 [WA_XFER_STATUS_BABBLE] = -EOVERFLOW,
2086 [WA_XFER_RESERVED] = EINVAL,
2087 [WA_XFER_STATUS_NOT_FOUND] = 0,
2088 [WA_XFER_STATUS_INSUFFICIENT_RESOURCE] = -ENOMEM,
2089 [WA_XFER_STATUS_TRANSACTION_ERROR] = -EILSEQ,
2090 [WA_XFER_STATUS_ABORTED] = -ENOENT,
2091 [WA_XFER_STATUS_RPIPE_NOT_READY] = EINVAL,
2092 [WA_XFER_INVALID_FORMAT] = EINVAL,
2093 [WA_XFER_UNEXPECTED_SEGMENT_NUMBER] = EINVAL,
2094 [WA_XFER_STATUS_RPIPE_TYPE_MISMATCH] = EINVAL,
2095 };
2096 status &= 0x3f;
2097
2098 if (status == 0)
2099 return 0;
2100 if (status >= ARRAY_SIZE(xlat)) {
2101 printk_ratelimited(KERN_ERR "%s(): BUG? "
2102 "Unknown WA transfer status 0x%02x\n",
2103 __func__, real_status);
2104 return -EINVAL;
2105 }
2106 errno = xlat[status];
2107 if (unlikely(errno > 0)) {
2108 printk_ratelimited(KERN_ERR "%s(): BUG? "
2109 "Inconsistent WA status: 0x%02x\n",
2110 __func__, real_status);
2111 errno = -errno;
2112 }
2113 return errno;
2114}
2115
2116/*
2117 * If a last segment flag and/or a transfer result error is encountered,
2118 * no other segment transfer results will be returned from the device.
2119 * Mark the remaining submitted or pending xfers as completed so that
2120 * the xfer will complete cleanly.
2121 *
2122 * xfer->lock must be held
2123 *
2124 */
2125static void wa_complete_remaining_xfer_segs(struct wa_xfer *xfer,
2126 int starting_index, enum wa_seg_status status)
2127{
2128 int index;
2129 struct wa_rpipe *rpipe = xfer->ep->hcpriv;
2130
2131 for (index = starting_index; index < xfer->segs_submitted; index++) {
2132 struct wa_seg *current_seg = xfer->seg[index];
2133
2134 BUG_ON(current_seg == NULL);
2135
2136 switch (current_seg->status) {
2137 case WA_SEG_SUBMITTED:
2138 case WA_SEG_PENDING:
2139 case WA_SEG_DTI_PENDING:
2140 rpipe_avail_inc(rpipe);
2141 /*
2142 * do not increment RPIPE avail for the WA_SEG_DELAYED case
2143 * since it has not been submitted to the RPIPE.
2144 */
2145 /* fall through */
2146 case WA_SEG_DELAYED:
2147 xfer->segs_done++;
2148 current_seg->status = status;
2149 break;
2150 case WA_SEG_ABORTED:
2151 break;
2152 default:
2153 WARN(1, "%s: xfer 0x%08X#%d. bad seg status = %d\n",
2154 __func__, wa_xfer_id(xfer), index,
2155 current_seg->status);
2156 break;
2157 }
2158 }
2159}
2160
2161/* Populate the given urb based on the current isoc transfer state. */
2162static int __wa_populate_buf_in_urb_isoc(struct wahc *wa,
2163 struct urb *buf_in_urb, struct wa_xfer *xfer, struct wa_seg *seg)
2164{
2165 int urb_start_frame = seg->isoc_frame_index + seg->isoc_frame_offset;
2166 int seg_index, total_len = 0, urb_frame_index = urb_start_frame;
2167 struct usb_iso_packet_descriptor *iso_frame_desc =
2168 xfer->urb->iso_frame_desc;
2169 const int dti_packet_size = usb_endpoint_maxp(wa->dti_epd);
2170 int next_frame_contiguous;
2171 struct usb_iso_packet_descriptor *iso_frame;
2172
2173 BUG_ON(buf_in_urb->status == -EINPROGRESS);
2174
2175 /*
2176 * If the current frame actual_length is contiguous with the next frame
2177 * and actual_length is a multiple of the DTI endpoint max packet size,
2178 * combine the current frame with the next frame in a single URB. This
2179 * reduces the number of URBs that must be submitted in that case.
2180 */
2181 seg_index = seg->isoc_frame_index;
2182 do {
2183 next_frame_contiguous = 0;
2184
2185 iso_frame = &iso_frame_desc[urb_frame_index];
2186 total_len += iso_frame->actual_length;
2187 ++urb_frame_index;
2188 ++seg_index;
2189
2190 if (seg_index < seg->isoc_frame_count) {
2191 struct usb_iso_packet_descriptor *next_iso_frame;
2192
2193 next_iso_frame = &iso_frame_desc[urb_frame_index];
2194
2195 if ((iso_frame->offset + iso_frame->actual_length) ==
2196 next_iso_frame->offset)
2197 next_frame_contiguous = 1;
2198 }
2199 } while (next_frame_contiguous
2200 && ((iso_frame->actual_length % dti_packet_size) == 0));
2201
2202 /* this should always be 0 before a resubmit. */
2203 buf_in_urb->num_mapped_sgs = 0;
2204 buf_in_urb->transfer_dma = xfer->urb->transfer_dma +
2205 iso_frame_desc[urb_start_frame].offset;
2206 buf_in_urb->transfer_buffer_length = total_len;
2207 buf_in_urb->transfer_flags |= URB_NO_TRANSFER_DMA_MAP;
2208 buf_in_urb->transfer_buffer = NULL;
2209 buf_in_urb->sg = NULL;
2210 buf_in_urb->num_sgs = 0;
2211 buf_in_urb->context = seg;
2212
2213 /* return the number of frames included in this URB. */
2214 return seg_index - seg->isoc_frame_index;
2215}
2216
2217/* Populate the given urb based on the current transfer state. */
2218static int wa_populate_buf_in_urb(struct urb *buf_in_urb, struct wa_xfer *xfer,
2219 unsigned int seg_idx, unsigned int bytes_transferred)
2220{
2221 int result = 0;
2222 struct wa_seg *seg = xfer->seg[seg_idx];
2223
2224 BUG_ON(buf_in_urb->status == -EINPROGRESS);
2225 /* this should always be 0 before a resubmit. */
2226 buf_in_urb->num_mapped_sgs = 0;
2227
2228 if (xfer->is_dma) {
2229 buf_in_urb->transfer_dma = xfer->urb->transfer_dma
2230 + (seg_idx * xfer->seg_size);
2231 buf_in_urb->transfer_flags |= URB_NO_TRANSFER_DMA_MAP;
2232 buf_in_urb->transfer_buffer = NULL;
2233 buf_in_urb->sg = NULL;
2234 buf_in_urb->num_sgs = 0;
2235 } else {
2236 /* do buffer or SG processing. */
2237 buf_in_urb->transfer_flags &= ~URB_NO_TRANSFER_DMA_MAP;
2238
2239 if (xfer->urb->transfer_buffer) {
2240 buf_in_urb->transfer_buffer =
2241 xfer->urb->transfer_buffer
2242 + (seg_idx * xfer->seg_size);
2243 buf_in_urb->sg = NULL;
2244 buf_in_urb->num_sgs = 0;
2245 } else {
2246 /* allocate an SG list to store seg_size bytes
2247 and copy the subset of the xfer->urb->sg
2248 that matches the buffer subset we are
2249 about to read. */
2250 buf_in_urb->sg = wa_xfer_create_subset_sg(
2251 xfer->urb->sg,
2252 seg_idx * xfer->seg_size,
2253 bytes_transferred,
2254 &(buf_in_urb->num_sgs));
2255
2256 if (!(buf_in_urb->sg)) {
2257 buf_in_urb->num_sgs = 0;
2258 result = -ENOMEM;
2259 }
2260 buf_in_urb->transfer_buffer = NULL;
2261 }
2262 }
2263 buf_in_urb->transfer_buffer_length = bytes_transferred;
2264 buf_in_urb->context = seg;
2265
2266 return result;
2267}
2268
2269/*
2270 * Process a xfer result completion message
2271 *
2272 * inbound transfers: need to schedule a buf_in_urb read
2273 *
2274 * FIXME: this function needs to be broken up in parts
2275 */
2276static void wa_xfer_result_chew(struct wahc *wa, struct wa_xfer *xfer,
2277 struct wa_xfer_result *xfer_result)
2278{
2279 int result;
2280 struct device *dev = &wa->usb_iface->dev;
2281 unsigned long flags;
2282 unsigned int seg_idx;
2283 struct wa_seg *seg;
2284 struct wa_rpipe *rpipe;
2285 unsigned done = 0;
2286 u8 usb_status;
2287 unsigned rpipe_ready = 0;
2288 unsigned bytes_transferred = le32_to_cpu(xfer_result->dwTransferLength);
2289 struct urb *buf_in_urb = &(wa->buf_in_urbs[0]);
2290
2291 spin_lock_irqsave(&xfer->lock, flags);
2292 seg_idx = xfer_result->bTransferSegment & 0x7f;
2293 if (unlikely(seg_idx >= xfer->segs))
2294 goto error_bad_seg;
2295 seg = xfer->seg[seg_idx];
2296 rpipe = xfer->ep->hcpriv;
2297 usb_status = xfer_result->bTransferStatus;
2298 dev_dbg(dev, "xfer %p ID 0x%08X#%u: bTransferStatus 0x%02x (seg status %u)\n",
2299 xfer, wa_xfer_id(xfer), seg_idx, usb_status, seg->status);
2300 if (seg->status == WA_SEG_ABORTED
2301 || seg->status == WA_SEG_ERROR) /* already handled */
2302 goto segment_aborted;
2303 if (seg->status == WA_SEG_SUBMITTED) /* ops, got here */
2304 seg->status = WA_SEG_PENDING; /* before wa_seg{_dto}_cb() */
2305 if (seg->status != WA_SEG_PENDING) {
2306 if (printk_ratelimit())
2307 dev_err(dev, "xfer %p#%u: Bad segment state %u\n",
2308 xfer, seg_idx, seg->status);
2309 seg->status = WA_SEG_PENDING; /* workaround/"fix" it */
2310 }
2311 if (usb_status & 0x80) {
2312 seg->result = wa_xfer_status_to_errno(usb_status);
2313 dev_err(dev, "DTI: xfer %p 0x%08X:#%u failed (0x%02x)\n",
2314 xfer, xfer->id, seg->index, usb_status);
2315 seg->status = ((usb_status & 0x7F) == WA_XFER_STATUS_ABORTED) ?
2316 WA_SEG_ABORTED : WA_SEG_ERROR;
2317 goto error_complete;
2318 }
2319 /* FIXME: we ignore warnings, tally them for stats */
2320 if (usb_status & 0x40) /* Warning?... */
2321 usb_status = 0; /* ... pass */
2322 /*
2323 * If the last segment bit is set, complete the remaining segments.
2324 * When the current segment is completed, either in wa_buf_in_cb for
2325 * transfers with data or below for no data, the xfer will complete.
2326 */
2327 if (xfer_result->bTransferSegment & 0x80)
2328 wa_complete_remaining_xfer_segs(xfer, seg->index + 1,
2329 WA_SEG_DONE);
2330 if (usb_pipeisoc(xfer->urb->pipe)
2331 && (le32_to_cpu(xfer_result->dwNumOfPackets) > 0)) {
2332 /* set up WA state to read the isoc packet status next. */
2333 wa->dti_isoc_xfer_in_progress = wa_xfer_id(xfer);
2334 wa->dti_isoc_xfer_seg = seg_idx;
2335 wa->dti_state = WA_DTI_ISOC_PACKET_STATUS_PENDING;
2336 } else if (xfer->is_inbound && !usb_pipeisoc(xfer->urb->pipe)
2337 && (bytes_transferred > 0)) {
2338 /* IN data phase: read to buffer */
2339 seg->status = WA_SEG_DTI_PENDING;
2340 result = wa_populate_buf_in_urb(buf_in_urb, xfer, seg_idx,
2341 bytes_transferred);
2342 if (result < 0)
2343 goto error_buf_in_populate;
2344 ++(wa->active_buf_in_urbs);
2345 result = usb_submit_urb(buf_in_urb, GFP_ATOMIC);
2346 if (result < 0) {
2347 --(wa->active_buf_in_urbs);
2348 goto error_submit_buf_in;
2349 }
2350 } else {
2351 /* OUT data phase or no data, complete it -- */
2352 seg->result = bytes_transferred;
2353 rpipe_ready = rpipe_avail_inc(rpipe);
2354 done = __wa_xfer_mark_seg_as_done(xfer, seg, WA_SEG_DONE);
2355 }
2356 spin_unlock_irqrestore(&xfer->lock, flags);
2357 if (done)
2358 wa_xfer_completion(xfer);
2359 if (rpipe_ready)
2360 wa_xfer_delayed_run(rpipe);
2361 return;
2362
2363error_submit_buf_in:
2364 if (edc_inc(&wa->dti_edc, EDC_MAX_ERRORS, EDC_ERROR_TIMEFRAME)) {
2365 dev_err(dev, "DTI: URB max acceptable errors "
2366 "exceeded, resetting device\n");
2367 wa_reset_all(wa);
2368 }
2369 if (printk_ratelimit())
2370 dev_err(dev, "xfer %p#%u: can't submit DTI data phase: %d\n",
2371 xfer, seg_idx, result);
2372 seg->result = result;
2373 kfree(buf_in_urb->sg);
2374 buf_in_urb->sg = NULL;
2375error_buf_in_populate:
2376 __wa_xfer_abort(xfer);
2377 seg->status = WA_SEG_ERROR;
2378error_complete:
2379 xfer->segs_done++;
2380 rpipe_ready = rpipe_avail_inc(rpipe);
2381 wa_complete_remaining_xfer_segs(xfer, seg->index + 1, seg->status);
2382 done = __wa_xfer_is_done(xfer);
2383 /*
2384 * queue work item to clear STALL for control endpoints.
2385 * Otherwise, let endpoint_reset take care of it.
2386 */
2387 if (((usb_status & 0x3f) == WA_XFER_STATUS_HALTED) &&
2388 usb_endpoint_xfer_control(&xfer->ep->desc) &&
2389 done) {
2390
2391 dev_info(dev, "Control EP stall. Queue delayed work.\n");
2392 spin_lock(&wa->xfer_list_lock);
2393 /* move xfer from xfer_list to xfer_errored_list. */
2394 list_move_tail(&xfer->list_node, &wa->xfer_errored_list);
2395 spin_unlock(&wa->xfer_list_lock);
2396 spin_unlock_irqrestore(&xfer->lock, flags);
2397 queue_work(wusbd, &wa->xfer_error_work);
2398 } else {
2399 spin_unlock_irqrestore(&xfer->lock, flags);
2400 if (done)
2401 wa_xfer_completion(xfer);
2402 if (rpipe_ready)
2403 wa_xfer_delayed_run(rpipe);
2404 }
2405
2406 return;
2407
2408error_bad_seg:
2409 spin_unlock_irqrestore(&xfer->lock, flags);
2410 wa_urb_dequeue(wa, xfer->urb, -ENOENT);
2411 if (printk_ratelimit())
2412 dev_err(dev, "xfer %p#%u: bad segment\n", xfer, seg_idx);
2413 if (edc_inc(&wa->dti_edc, EDC_MAX_ERRORS, EDC_ERROR_TIMEFRAME)) {
2414 dev_err(dev, "DTI: URB max acceptable errors "
2415 "exceeded, resetting device\n");
2416 wa_reset_all(wa);
2417 }
2418 return;
2419
2420segment_aborted:
2421 /* nothing to do, as the aborter did the completion */
2422 spin_unlock_irqrestore(&xfer->lock, flags);
2423}
2424
2425/*
2426 * Process a isochronous packet status message
2427 *
2428 * inbound transfers: need to schedule a buf_in_urb read
2429 */
2430static int wa_process_iso_packet_status(struct wahc *wa, struct urb *urb)
2431{
2432 struct device *dev = &wa->usb_iface->dev;
2433 struct wa_xfer_packet_status_hwaiso *packet_status;
2434 struct wa_xfer_packet_status_len_hwaiso *status_array;
2435 struct wa_xfer *xfer;
2436 unsigned long flags;
2437 struct wa_seg *seg;
2438 struct wa_rpipe *rpipe;
2439 unsigned done = 0, dti_busy = 0, data_frame_count = 0, seg_index;
2440 unsigned first_frame_index = 0, rpipe_ready = 0;
2441 int expected_size;
2442
2443 /* We have a xfer result buffer; check it */
2444 dev_dbg(dev, "DTI: isoc packet status %d bytes at %p\n",
2445 urb->actual_length, urb->transfer_buffer);
2446 packet_status = (struct wa_xfer_packet_status_hwaiso *)(wa->dti_buf);
2447 if (packet_status->bPacketType != WA_XFER_ISO_PACKET_STATUS) {
2448 dev_err(dev, "DTI Error: isoc packet status--bad type 0x%02x\n",
2449 packet_status->bPacketType);
2450 goto error_parse_buffer;
2451 }
2452 xfer = wa_xfer_get_by_id(wa, wa->dti_isoc_xfer_in_progress);
2453 if (xfer == NULL) {
2454 dev_err(dev, "DTI Error: isoc packet status--unknown xfer 0x%08x\n",
2455 wa->dti_isoc_xfer_in_progress);
2456 goto error_parse_buffer;
2457 }
2458 spin_lock_irqsave(&xfer->lock, flags);
2459 if (unlikely(wa->dti_isoc_xfer_seg >= xfer->segs))
2460 goto error_bad_seg;
2461 seg = xfer->seg[wa->dti_isoc_xfer_seg];
2462 rpipe = xfer->ep->hcpriv;
2463 expected_size = sizeof(*packet_status) +
2464 (sizeof(packet_status->PacketStatus[0]) *
2465 seg->isoc_frame_count);
2466 if (urb->actual_length != expected_size) {
2467 dev_err(dev, "DTI Error: isoc packet status--bad urb length (%d bytes vs %d needed)\n",
2468 urb->actual_length, expected_size);
2469 goto error_bad_seg;
2470 }
2471 if (le16_to_cpu(packet_status->wLength) != expected_size) {
2472 dev_err(dev, "DTI Error: isoc packet status--bad length %u\n",
2473 le16_to_cpu(packet_status->wLength));
2474 goto error_bad_seg;
2475 }
2476 /* write isoc packet status and lengths back to the xfer urb. */
2477 status_array = packet_status->PacketStatus;
2478 xfer->urb->start_frame =
2479 wa->wusb->usb_hcd.driver->get_frame_number(&wa->wusb->usb_hcd);
2480 for (seg_index = 0; seg_index < seg->isoc_frame_count; ++seg_index) {
2481 struct usb_iso_packet_descriptor *iso_frame_desc =
2482 xfer->urb->iso_frame_desc;
2483 const int xfer_frame_index =
2484 seg->isoc_frame_offset + seg_index;
2485
2486 iso_frame_desc[xfer_frame_index].status =
2487 wa_xfer_status_to_errno(
2488 le16_to_cpu(status_array[seg_index].PacketStatus));
2489 iso_frame_desc[xfer_frame_index].actual_length =
2490 le16_to_cpu(status_array[seg_index].PacketLength);
2491 /* track the number of frames successfully transferred. */
2492 if (iso_frame_desc[xfer_frame_index].actual_length > 0) {
2493 /* save the starting frame index for buf_in_urb. */
2494 if (!data_frame_count)
2495 first_frame_index = seg_index;
2496 ++data_frame_count;
2497 }
2498 }
2499
2500 if (xfer->is_inbound && data_frame_count) {
2501 int result, total_frames_read = 0, urb_index = 0;
2502 struct urb *buf_in_urb;
2503
2504 /* IN data phase: read to buffer */
2505 seg->status = WA_SEG_DTI_PENDING;
2506
2507 /* start with the first frame with data. */
2508 seg->isoc_frame_index = first_frame_index;
2509 /* submit up to WA_MAX_BUF_IN_URBS read URBs. */
2510 do {
2511 int urb_frame_index, urb_frame_count;
2512 struct usb_iso_packet_descriptor *iso_frame_desc;
2513
2514 buf_in_urb = &(wa->buf_in_urbs[urb_index]);
2515 urb_frame_count = __wa_populate_buf_in_urb_isoc(wa,
2516 buf_in_urb, xfer, seg);
2517 /* advance frame index to start of next read URB. */
2518 seg->isoc_frame_index += urb_frame_count;
2519 total_frames_read += urb_frame_count;
2520
2521 ++(wa->active_buf_in_urbs);
2522 result = usb_submit_urb(buf_in_urb, GFP_ATOMIC);
2523
2524 /* skip 0-byte frames. */
2525 urb_frame_index =
2526 seg->isoc_frame_offset + seg->isoc_frame_index;
2527 iso_frame_desc =
2528 &(xfer->urb->iso_frame_desc[urb_frame_index]);
2529 while ((seg->isoc_frame_index <
2530 seg->isoc_frame_count) &&
2531 (iso_frame_desc->actual_length == 0)) {
2532 ++(seg->isoc_frame_index);
2533 ++iso_frame_desc;
2534 }
2535 ++urb_index;
2536
2537 } while ((result == 0) && (urb_index < WA_MAX_BUF_IN_URBS)
2538 && (seg->isoc_frame_index <
2539 seg->isoc_frame_count));
2540
2541 if (result < 0) {
2542 --(wa->active_buf_in_urbs);
2543 dev_err(dev, "DTI Error: Could not submit buf in URB (%d)",
2544 result);
2545 wa_reset_all(wa);
2546 } else if (data_frame_count > total_frames_read)
2547 /* If we need to read more frames, set DTI busy. */
2548 dti_busy = 1;
2549 } else {
2550 /* OUT transfer or no more IN data, complete it -- */
2551 rpipe_ready = rpipe_avail_inc(rpipe);
2552 done = __wa_xfer_mark_seg_as_done(xfer, seg, WA_SEG_DONE);
2553 }
2554 spin_unlock_irqrestore(&xfer->lock, flags);
2555 if (dti_busy)
2556 wa->dti_state = WA_DTI_BUF_IN_DATA_PENDING;
2557 else
2558 wa->dti_state = WA_DTI_TRANSFER_RESULT_PENDING;
2559 if (done)
2560 wa_xfer_completion(xfer);
2561 if (rpipe_ready)
2562 wa_xfer_delayed_run(rpipe);
2563 wa_xfer_put(xfer);
2564 return dti_busy;
2565
2566error_bad_seg:
2567 spin_unlock_irqrestore(&xfer->lock, flags);
2568 wa_xfer_put(xfer);
2569error_parse_buffer:
2570 return dti_busy;
2571}
2572
2573/*
2574 * Callback for the IN data phase
2575 *
2576 * If successful transition state; otherwise, take a note of the
2577 * error, mark this segment done and try completion.
2578 *
2579 * Note we don't access until we are sure that the transfer hasn't
2580 * been cancelled (ECONNRESET, ENOENT), which could mean that
2581 * seg->xfer could be already gone.
2582 */
2583static void wa_buf_in_cb(struct urb *urb)
2584{
2585 struct wa_seg *seg = urb->context;
2586 struct wa_xfer *xfer = seg->xfer;
2587 struct wahc *wa;
2588 struct device *dev;
2589 struct wa_rpipe *rpipe;
2590 unsigned rpipe_ready = 0, isoc_data_frame_count = 0;
2591 unsigned long flags;
2592 int resubmit_dti = 0, active_buf_in_urbs;
2593 u8 done = 0;
2594
2595 /* free the sg if it was used. */
2596 kfree(urb->sg);
2597 urb->sg = NULL;
2598
2599 spin_lock_irqsave(&xfer->lock, flags);
2600 wa = xfer->wa;
2601 dev = &wa->usb_iface->dev;
2602 --(wa->active_buf_in_urbs);
2603 active_buf_in_urbs = wa->active_buf_in_urbs;
2604 rpipe = xfer->ep->hcpriv;
2605
2606 if (usb_pipeisoc(xfer->urb->pipe)) {
2607 struct usb_iso_packet_descriptor *iso_frame_desc =
2608 xfer->urb->iso_frame_desc;
2609 int seg_index;
2610
2611 /*
2612 * Find the next isoc frame with data and count how many
2613 * frames with data remain.
2614 */
2615 seg_index = seg->isoc_frame_index;
2616 while (seg_index < seg->isoc_frame_count) {
2617 const int urb_frame_index =
2618 seg->isoc_frame_offset + seg_index;
2619
2620 if (iso_frame_desc[urb_frame_index].actual_length > 0) {
2621 /* save the index of the next frame with data */
2622 if (!isoc_data_frame_count)
2623 seg->isoc_frame_index = seg_index;
2624 ++isoc_data_frame_count;
2625 }
2626 ++seg_index;
2627 }
2628 }
2629 spin_unlock_irqrestore(&xfer->lock, flags);
2630
2631 switch (urb->status) {
2632 case 0:
2633 spin_lock_irqsave(&xfer->lock, flags);
2634
2635 seg->result += urb->actual_length;
2636 if (isoc_data_frame_count > 0) {
2637 int result, urb_frame_count;
2638
2639 /* submit a read URB for the next frame with data. */
2640 urb_frame_count = __wa_populate_buf_in_urb_isoc(wa, urb,
2641 xfer, seg);
2642 /* advance index to start of next read URB. */
2643 seg->isoc_frame_index += urb_frame_count;
2644 ++(wa->active_buf_in_urbs);
2645 result = usb_submit_urb(urb, GFP_ATOMIC);
2646 if (result < 0) {
2647 --(wa->active_buf_in_urbs);
2648 dev_err(dev, "DTI Error: Could not submit buf in URB (%d)",
2649 result);
2650 wa_reset_all(wa);
2651 }
2652 /*
2653 * If we are in this callback and
2654 * isoc_data_frame_count > 0, it means that the dti_urb
2655 * submission was delayed in wa_dti_cb. Once
2656 * we submit the last buf_in_urb, we can submit the
2657 * delayed dti_urb.
2658 */
2659 resubmit_dti = (isoc_data_frame_count ==
2660 urb_frame_count);
2661 } else if (active_buf_in_urbs == 0) {
2662 dev_dbg(dev,
2663 "xfer %p 0x%08X#%u: data in done (%zu bytes)\n",
2664 xfer, wa_xfer_id(xfer), seg->index,
2665 seg->result);
2666 rpipe_ready = rpipe_avail_inc(rpipe);
2667 done = __wa_xfer_mark_seg_as_done(xfer, seg,
2668 WA_SEG_DONE);
2669 }
2670 spin_unlock_irqrestore(&xfer->lock, flags);
2671 if (done)
2672 wa_xfer_completion(xfer);
2673 if (rpipe_ready)
2674 wa_xfer_delayed_run(rpipe);
2675 break;
2676 case -ECONNRESET: /* URB unlinked; no need to do anything */
2677 case -ENOENT: /* as it was done by the who unlinked us */
2678 break;
2679 default: /* Other errors ... */
2680 /*
2681 * Error on data buf read. Only resubmit DTI if it hasn't
2682 * already been done by previously hitting this error or by a
2683 * successful completion of the previous buf_in_urb.
2684 */
2685 resubmit_dti = wa->dti_state != WA_DTI_TRANSFER_RESULT_PENDING;
2686 spin_lock_irqsave(&xfer->lock, flags);
2687 if (printk_ratelimit())
2688 dev_err(dev, "xfer %p 0x%08X#%u: data in error %d\n",
2689 xfer, wa_xfer_id(xfer), seg->index,
2690 urb->status);
2691 if (edc_inc(&wa->nep_edc, EDC_MAX_ERRORS,
2692 EDC_ERROR_TIMEFRAME)){
2693 dev_err(dev, "DTO: URB max acceptable errors "
2694 "exceeded, resetting device\n");
2695 wa_reset_all(wa);
2696 }
2697 seg->result = urb->status;
2698 rpipe_ready = rpipe_avail_inc(rpipe);
2699 if (active_buf_in_urbs == 0)
2700 done = __wa_xfer_mark_seg_as_done(xfer, seg,
2701 WA_SEG_ERROR);
2702 else
2703 __wa_xfer_abort(xfer);
2704 spin_unlock_irqrestore(&xfer->lock, flags);
2705 if (done)
2706 wa_xfer_completion(xfer);
2707 if (rpipe_ready)
2708 wa_xfer_delayed_run(rpipe);
2709 }
2710
2711 if (resubmit_dti) {
2712 int result;
2713
2714 wa->dti_state = WA_DTI_TRANSFER_RESULT_PENDING;
2715
2716 result = usb_submit_urb(wa->dti_urb, GFP_ATOMIC);
2717 if (result < 0) {
2718 dev_err(dev, "DTI Error: Could not submit DTI URB (%d)\n",
2719 result);
2720 wa_reset_all(wa);
2721 }
2722 }
2723}
2724
2725/*
2726 * Handle an incoming transfer result buffer
2727 *
2728 * Given a transfer result buffer, it completes the transfer (possibly
2729 * scheduling and buffer in read) and then resubmits the DTI URB for a
2730 * new transfer result read.
2731 *
2732 *
2733 * The xfer_result DTI URB state machine
2734 *
2735 * States: OFF | RXR (Read-Xfer-Result) | RBI (Read-Buffer-In)
2736 *
2737 * We start in OFF mode, the first xfer_result notification [through
2738 * wa_handle_notif_xfer()] moves us to RXR by posting the DTI-URB to
2739 * read.
2740 *
2741 * We receive a buffer -- if it is not a xfer_result, we complain and
2742 * repost the DTI-URB. If it is a xfer_result then do the xfer seg
2743 * request accounting. If it is an IN segment, we move to RBI and post
2744 * a BUF-IN-URB to the right buffer. The BUF-IN-URB callback will
2745 * repost the DTI-URB and move to RXR state. if there was no IN
2746 * segment, it will repost the DTI-URB.
2747 *
2748 * We go back to OFF when we detect a ENOENT or ESHUTDOWN (or too many
2749 * errors) in the URBs.
2750 */
2751static void wa_dti_cb(struct urb *urb)
2752{
2753 int result, dti_busy = 0;
2754 struct wahc *wa = urb->context;
2755 struct device *dev = &wa->usb_iface->dev;
2756 u32 xfer_id;
2757 u8 usb_status;
2758
2759 BUG_ON(wa->dti_urb != urb);
2760 switch (wa->dti_urb->status) {
2761 case 0:
2762 if (wa->dti_state == WA_DTI_TRANSFER_RESULT_PENDING) {
2763 struct wa_xfer_result *xfer_result;
2764 struct wa_xfer *xfer;
2765
2766 /* We have a xfer result buffer; check it */
2767 dev_dbg(dev, "DTI: xfer result %d bytes at %p\n",
2768 urb->actual_length, urb->transfer_buffer);
2769 if (urb->actual_length != sizeof(*xfer_result)) {
2770 dev_err(dev, "DTI Error: xfer result--bad size xfer result (%d bytes vs %zu needed)\n",
2771 urb->actual_length,
2772 sizeof(*xfer_result));
2773 break;
2774 }
2775 xfer_result = (struct wa_xfer_result *)(wa->dti_buf);
2776 if (xfer_result->hdr.bLength != sizeof(*xfer_result)) {
2777 dev_err(dev, "DTI Error: xfer result--bad header length %u\n",
2778 xfer_result->hdr.bLength);
2779 break;
2780 }
2781 if (xfer_result->hdr.bNotifyType != WA_XFER_RESULT) {
2782 dev_err(dev, "DTI Error: xfer result--bad header type 0x%02x\n",
2783 xfer_result->hdr.bNotifyType);
2784 break;
2785 }
2786 xfer_id = le32_to_cpu(xfer_result->dwTransferID);
2787 usb_status = xfer_result->bTransferStatus & 0x3f;
2788 if (usb_status == WA_XFER_STATUS_NOT_FOUND) {
2789 /* taken care of already */
2790 dev_dbg(dev, "%s: xfer 0x%08X#%u not found.\n",
2791 __func__, xfer_id,
2792 xfer_result->bTransferSegment & 0x7f);
2793 break;
2794 }
2795 xfer = wa_xfer_get_by_id(wa, xfer_id);
2796 if (xfer == NULL) {
2797 /* FIXME: transaction not found. */
2798 dev_err(dev, "DTI Error: xfer result--unknown xfer 0x%08x (status 0x%02x)\n",
2799 xfer_id, usb_status);
2800 break;
2801 }
2802 wa_xfer_result_chew(wa, xfer, xfer_result);
2803 wa_xfer_put(xfer);
2804 } else if (wa->dti_state == WA_DTI_ISOC_PACKET_STATUS_PENDING) {
2805 dti_busy = wa_process_iso_packet_status(wa, urb);
2806 } else {
2807 dev_err(dev, "DTI Error: unexpected EP state = %d\n",
2808 wa->dti_state);
2809 }
2810 break;
2811 case -ENOENT: /* (we killed the URB)...so, no broadcast */
2812 case -ESHUTDOWN: /* going away! */
2813 dev_dbg(dev, "DTI: going down! %d\n", urb->status);
2814 goto out;
2815 default:
2816 /* Unknown error */
2817 if (edc_inc(&wa->dti_edc, EDC_MAX_ERRORS,
2818 EDC_ERROR_TIMEFRAME)) {
2819 dev_err(dev, "DTI: URB max acceptable errors "
2820 "exceeded, resetting device\n");
2821 wa_reset_all(wa);
2822 goto out;
2823 }
2824 if (printk_ratelimit())
2825 dev_err(dev, "DTI: URB error %d\n", urb->status);
2826 break;
2827 }
2828
2829 /* Resubmit the DTI URB if we are not busy processing isoc in frames. */
2830 if (!dti_busy) {
2831 result = usb_submit_urb(wa->dti_urb, GFP_ATOMIC);
2832 if (result < 0) {
2833 dev_err(dev, "DTI Error: Could not submit DTI URB (%d)\n",
2834 result);
2835 wa_reset_all(wa);
2836 }
2837 }
2838out:
2839 return;
2840}
2841
2842/*
2843 * Initialize the DTI URB for reading transfer result notifications and also
2844 * the buffer-in URB, for reading buffers. Then we just submit the DTI URB.
2845 */
2846int wa_dti_start(struct wahc *wa)
2847{
2848 const struct usb_endpoint_descriptor *dti_epd = wa->dti_epd;
2849 struct device *dev = &wa->usb_iface->dev;
2850 int result = -ENOMEM, index;
2851
2852 if (wa->dti_urb != NULL) /* DTI URB already started */
2853 goto out;
2854
2855 wa->dti_urb = usb_alloc_urb(0, GFP_KERNEL);
2856 if (wa->dti_urb == NULL)
2857 goto error_dti_urb_alloc;
2858 usb_fill_bulk_urb(
2859 wa->dti_urb, wa->usb_dev,
2860 usb_rcvbulkpipe(wa->usb_dev, 0x80 | dti_epd->bEndpointAddress),
2861 wa->dti_buf, wa->dti_buf_size,
2862 wa_dti_cb, wa);
2863
2864 /* init the buf in URBs */
2865 for (index = 0; index < WA_MAX_BUF_IN_URBS; ++index) {
2866 usb_fill_bulk_urb(
2867 &(wa->buf_in_urbs[index]), wa->usb_dev,
2868 usb_rcvbulkpipe(wa->usb_dev,
2869 0x80 | dti_epd->bEndpointAddress),
2870 NULL, 0, wa_buf_in_cb, wa);
2871 }
2872 result = usb_submit_urb(wa->dti_urb, GFP_KERNEL);
2873 if (result < 0) {
2874 dev_err(dev, "DTI Error: Could not submit DTI URB (%d) resetting\n",
2875 result);
2876 goto error_dti_urb_submit;
2877 }
2878out:
2879 return 0;
2880
2881error_dti_urb_submit:
2882 usb_put_urb(wa->dti_urb);
2883 wa->dti_urb = NULL;
2884error_dti_urb_alloc:
2885 return result;
2886}
2887EXPORT_SYMBOL_GPL(wa_dti_start);
2888/*
2889 * Transfer complete notification
2890 *
2891 * Called from the notif.c code. We get a notification on EP2 saying
2892 * that some endpoint has some transfer result data available. We are
2893 * about to read it.
2894 *
2895 * To speed up things, we always have a URB reading the DTI URB; we
2896 * don't really set it up and start it until the first xfer complete
2897 * notification arrives, which is what we do here.
2898 *
2899 * Follow up in wa_dti_cb(), as that's where the whole state
2900 * machine starts.
2901 *
2902 * @wa shall be referenced
2903 */
2904void wa_handle_notif_xfer(struct wahc *wa, struct wa_notif_hdr *notif_hdr)
2905{
2906 struct device *dev = &wa->usb_iface->dev;
2907 struct wa_notif_xfer *notif_xfer;
2908 const struct usb_endpoint_descriptor *dti_epd = wa->dti_epd;
2909
2910 notif_xfer = container_of(notif_hdr, struct wa_notif_xfer, hdr);
2911 BUG_ON(notif_hdr->bNotifyType != WA_NOTIF_TRANSFER);
2912
2913 if ((0x80 | notif_xfer->bEndpoint) != dti_epd->bEndpointAddress) {
2914 /* FIXME: hardcoded limitation, adapt */
2915 dev_err(dev, "BUG: DTI ep is %u, not %u (hack me)\n",
2916 notif_xfer->bEndpoint, dti_epd->bEndpointAddress);
2917 goto error;
2918 }
2919
2920 /* attempt to start the DTI ep processing. */
2921 if (wa_dti_start(wa) < 0)
2922 goto error;
2923
2924 return;
2925
2926error:
2927 wa_reset_all(wa);
2928}