Linux Audio

Check our new training course

Loading...
v3.1
 
   1/*
   2 * WUSB Wire Adapter
   3 * Data transfer and URB enqueing
   4 *
   5 * Copyright (C) 2005-2006 Intel Corporation
   6 * Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com>
   7 *
   8 * This program is free software; you can redistribute it and/or
   9 * modify it under the terms of the GNU General Public License version
  10 * 2 as published by the Free Software Foundation.
  11 *
  12 * This program is distributed in the hope that it will be useful,
  13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
  14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  15 * GNU General Public License for more details.
  16 *
  17 * You should have received a copy of the GNU General Public License
  18 * along with this program; if not, write to the Free Software
  19 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
  20 * 02110-1301, USA.
  21 *
  22 *
  23 * How transfers work: get a buffer, break it up in segments (segment
  24 * size is a multiple of the maxpacket size). For each segment issue a
  25 * segment request (struct wa_xfer_*), then send the data buffer if
  26 * out or nothing if in (all over the DTO endpoint).
  27 *
  28 * For each submitted segment request, a notification will come over
  29 * the NEP endpoint and a transfer result (struct xfer_result) will
  30 * arrive in the DTI URB. Read it, get the xfer ID, see if there is
  31 * data coming (inbound transfer), schedule a read and handle it.
  32 *
  33 * Sounds simple, it is a pain to implement.
  34 *
  35 *
  36 * ENTRY POINTS
  37 *
  38 *   FIXME
  39 *
  40 * LIFE CYCLE / STATE DIAGRAM
  41 *
  42 *   FIXME
  43 *
  44 * THIS CODE IS DISGUSTING
  45 *
  46 *   Warned you are; it's my second try and still not happy with it.
  47 *
  48 * NOTES:
  49 *
  50 *   - No iso
  51 *
  52 *   - Supports DMA xfers, control, bulk and maybe interrupt
  53 *
  54 *   - Does not recycle unused rpipes
  55 *
  56 *     An rpipe is assigned to an endpoint the first time it is used,
  57 *     and then it's there, assigned, until the endpoint is disabled
  58 *     (destroyed [{h,d}wahc_op_ep_disable()]. The assignment of the
  59 *     rpipe to the endpoint is done under the wa->rpipe_sem semaphore
  60 *     (should be a mutex).
  61 *
  62 *     Two methods it could be done:
  63 *
  64 *     (a) set up a timer every time an rpipe's use count drops to 1
  65 *         (which means unused) or when a transfer ends. Reset the
  66 *         timer when a xfer is queued. If the timer expires, release
  67 *         the rpipe [see rpipe_ep_disable()].
  68 *
  69 *     (b) when looking for free rpipes to attach [rpipe_get_by_ep()],
  70 *         when none are found go over the list, check their endpoint
  71 *         and their activity record (if no last-xfer-done-ts in the
  72 *         last x seconds) take it
  73 *
  74 *     However, due to the fact that we have a set of limited
  75 *     resources (max-segments-at-the-same-time per xfer,
  76 *     xfers-per-ripe, blocks-per-rpipe, rpipes-per-host), at the end
  77 *     we are going to have to rebuild all this based on an scheduler,
  78 *     to where we have a list of transactions to do and based on the
  79 *     availability of the different required components (blocks,
  80 *     rpipes, segment slots, etc), we go scheduling them. Painful.
  81 */
  82#include <linux/init.h>
  83#include <linux/spinlock.h>
  84#include <linux/slab.h>
  85#include <linux/hash.h>
  86#include <linux/ratelimit.h>
 
 
  87
  88#include "wa-hc.h"
  89#include "wusbhc.h"
  90
  91enum {
  92	WA_SEGS_MAX = 255,
 
  93};
  94
  95enum wa_seg_status {
  96	WA_SEG_NOTREADY,
  97	WA_SEG_READY,
  98	WA_SEG_DELAYED,
  99	WA_SEG_SUBMITTED,
 100	WA_SEG_PENDING,
 101	WA_SEG_DTI_PENDING,
 102	WA_SEG_DONE,
 103	WA_SEG_ERROR,
 104	WA_SEG_ABORTED,
 105};
 106
 107static void wa_xfer_delayed_run(struct wa_rpipe *);
 
 108
 109/*
 110 * Life cycle governed by 'struct urb' (the refcount of the struct is
 111 * that of the 'struct urb' and usb_free_urb() would free the whole
 112 * struct).
 113 */
 114struct wa_seg {
 115	struct urb urb;
 116	struct urb *dto_urb;		/* for data output? */
 
 117	struct list_head list_node;	/* for rpipe->req_list */
 118	struct wa_xfer *xfer;		/* out xfer */
 119	u8 index;			/* which segment we are */
 
 
 
 
 
 120	enum wa_seg_status status;
 121	ssize_t result;			/* bytes xfered or error */
 122	struct wa_xfer_hdr xfer_hdr;
 123	u8 xfer_extra[];		/* xtra space for xfer_hdr_ctl */
 124};
 125
 126static void wa_seg_init(struct wa_seg *seg)
 127{
 128	/* usb_init_urb() repeats a lot of work, so we do it here */
 129	kref_init(&seg->urb.kref);
 
 
 
 130}
 131
 132/*
 133 * Protected by xfer->lock
 134 *
 135 */
 136struct wa_xfer {
 137	struct kref refcnt;
 138	struct list_head list_node;
 139	spinlock_t lock;
 140	u32 id;
 141
 142	struct wahc *wa;		/* Wire adapter we are plugged to */
 143	struct usb_host_endpoint *ep;
 144	struct urb *urb;		/* URB we are transferring for */
 145	struct wa_seg **seg;		/* transfer segments */
 146	u8 segs, segs_submitted, segs_done;
 147	unsigned is_inbound:1;
 148	unsigned is_dma:1;
 149	size_t seg_size;
 150	int result;
 151
 152	gfp_t gfp;			/* allocation mask */
 153
 154	struct wusb_dev *wusb_dev;	/* for activity timestamps */
 155};
 156
 
 
 
 
 
 157static inline void wa_xfer_init(struct wa_xfer *xfer)
 158{
 159	kref_init(&xfer->refcnt);
 160	INIT_LIST_HEAD(&xfer->list_node);
 161	spin_lock_init(&xfer->lock);
 162}
 163
 164/*
 165 * Destroy a transfer structure
 166 *
 167 * Note that the xfer->seg[index] thingies follow the URB life cycle,
 168 * so we need to put them, not free them.
 169 */
 170static void wa_xfer_destroy(struct kref *_xfer)
 171{
 172	struct wa_xfer *xfer = container_of(_xfer, struct wa_xfer, refcnt);
 173	if (xfer->seg) {
 174		unsigned cnt;
 175		for (cnt = 0; cnt < xfer->segs; cnt++) {
 176			if (xfer->is_inbound)
 177				usb_put_urb(xfer->seg[cnt]->dto_urb);
 178			usb_put_urb(&xfer->seg[cnt]->urb);
 
 
 
 
 
 
 179		}
 
 180	}
 181	kfree(xfer);
 182}
 183
 184static void wa_xfer_get(struct wa_xfer *xfer)
 185{
 186	kref_get(&xfer->refcnt);
 187}
 188
 189static void wa_xfer_put(struct wa_xfer *xfer)
 190{
 191	kref_put(&xfer->refcnt, wa_xfer_destroy);
 192}
 193
 194/*
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 195 * xfer is referenced
 196 *
 197 * xfer->lock has to be unlocked
 198 *
 199 * We take xfer->lock for setting the result; this is a barrier
 200 * against drivers/usb/core/hcd.c:unlink1() being called after we call
 201 * usb_hcd_giveback_urb() and wa_urb_dequeue() trying to get a
 202 * reference to the transfer.
 203 */
 204static void wa_xfer_giveback(struct wa_xfer *xfer)
 205{
 206	unsigned long flags;
 207
 208	spin_lock_irqsave(&xfer->wa->xfer_list_lock, flags);
 209	list_del_init(&xfer->list_node);
 
 210	spin_unlock_irqrestore(&xfer->wa->xfer_list_lock, flags);
 211	/* FIXME: segmentation broken -- kills DWA */
 212	wusbhc_giveback_urb(xfer->wa->wusb, xfer->urb, xfer->result);
 213	wa_put(xfer->wa);
 214	wa_xfer_put(xfer);
 215}
 216
 217/*
 218 * xfer is referenced
 219 *
 220 * xfer->lock has to be unlocked
 221 */
 222static void wa_xfer_completion(struct wa_xfer *xfer)
 223{
 224	if (xfer->wusb_dev)
 225		wusb_dev_put(xfer->wusb_dev);
 226	rpipe_put(xfer->ep->hcpriv);
 227	wa_xfer_giveback(xfer);
 228}
 229
 230/*
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 231 * If transfer is done, wrap it up and return true
 232 *
 233 * xfer->lock has to be locked
 234 */
 235static unsigned __wa_xfer_is_done(struct wa_xfer *xfer)
 236{
 237	struct device *dev = &xfer->wa->usb_iface->dev;
 238	unsigned result, cnt;
 239	struct wa_seg *seg;
 240	struct urb *urb = xfer->urb;
 241	unsigned found_short = 0;
 242
 243	result = xfer->segs_done == xfer->segs_submitted;
 244	if (result == 0)
 245		goto out;
 246	urb->actual_length = 0;
 247	for (cnt = 0; cnt < xfer->segs; cnt++) {
 248		seg = xfer->seg[cnt];
 249		switch (seg->status) {
 250		case WA_SEG_DONE:
 251			if (found_short && seg->result > 0) {
 252				dev_dbg(dev, "xfer %p#%u: bad short segments (%zu)\n",
 253					xfer, cnt, seg->result);
 
 254				urb->status = -EINVAL;
 255				goto out;
 256			}
 257			urb->actual_length += seg->result;
 258			if (seg->result < xfer->seg_size
 
 259			    && cnt != xfer->segs-1)
 260				found_short = 1;
 261			dev_dbg(dev, "xfer %p#%u: DONE short %d "
 262				"result %zu urb->actual_length %d\n",
 263				xfer, seg->index, found_short, seg->result,
 264				urb->actual_length);
 265			break;
 266		case WA_SEG_ERROR:
 267			xfer->result = seg->result;
 268			dev_dbg(dev, "xfer %p#%u: ERROR result %zu\n",
 269				xfer, seg->index, seg->result);
 
 270			goto out;
 271		case WA_SEG_ABORTED:
 272			dev_dbg(dev, "xfer %p#%u ABORTED: result %d\n",
 273				xfer, seg->index, urb->status);
 274			xfer->result = urb->status;
 
 275			goto out;
 276		default:
 277			dev_warn(dev, "xfer %p#%u: is_done bad state %d\n",
 278				 xfer, cnt, seg->status);
 279			xfer->result = -EINVAL;
 280			goto out;
 281		}
 282	}
 283	xfer->result = 0;
 284out:
 285	return result;
 286}
 287
 288/*
 289 * Initialize a transfer's ID
 
 
 
 290 *
 291 * We need to use a sequential number; if we use the pointer or the
 292 * hash of the pointer, it can repeat over sequential transfers and
 293 * then it will confuse the HWA....wonder why in hell they put a 32
 294 * bit handle in there then.
 295 */
 296static void wa_xfer_id_init(struct wa_xfer *xfer)
 
 297{
 298	xfer->id = atomic_add_return(1, &xfer->wa->xfer_id_count);
 299}
 300
 301/*
 302 * Return the xfer's ID associated with xfer
 303 *
 304 * Need to generate a
 305 */
 306static u32 wa_xfer_id(struct wa_xfer *xfer)
 307{
 308	return xfer->id;
 309}
 310
 311/*
 312 * Search for a transfer list ID on the HCD's URB list
 313 *
 314 * For 32 bit architectures, we use the pointer itself; for 64 bits, a
 315 * 32-bit hash of the pointer.
 316 *
 317 * @returns NULL if not found.
 318 */
 319static struct wa_xfer *wa_xfer_get_by_id(struct wahc *wa, u32 id)
 320{
 321	unsigned long flags;
 322	struct wa_xfer *xfer_itr;
 323	spin_lock_irqsave(&wa->xfer_list_lock, flags);
 324	list_for_each_entry(xfer_itr, &wa->xfer_list, list_node) {
 325		if (id == xfer_itr->id) {
 326			wa_xfer_get(xfer_itr);
 327			goto out;
 328		}
 329	}
 330	xfer_itr = NULL;
 331out:
 332	spin_unlock_irqrestore(&wa->xfer_list_lock, flags);
 333	return xfer_itr;
 334}
 335
 336struct wa_xfer_abort_buffer {
 337	struct urb urb;
 
 338	struct wa_xfer_abort cmd;
 339};
 340
 341static void __wa_xfer_abort_cb(struct urb *urb)
 342{
 343	struct wa_xfer_abort_buffer *b = urb->context;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 344	usb_put_urb(&b->urb);
 345}
 346
 347/*
 348 * Aborts an ongoing transaction
 349 *
 350 * Assumes the transfer is referenced and locked and in a submitted
 351 * state (mainly that there is an endpoint/rpipe assigned).
 352 *
 353 * The callback (see above) does nothing but freeing up the data by
 354 * putting the URB. Because the URB is allocated at the head of the
 355 * struct, the whole space we allocated is kfreed.
 356 *
 357 * We'll get an 'aborted transaction' xfer result on DTI, that'll
 358 * politely ignore because at this point the transaction has been
 359 * marked as aborted already.
 360 */
 361static void __wa_xfer_abort(struct wa_xfer *xfer)
 362{
 363	int result;
 364	struct device *dev = &xfer->wa->usb_iface->dev;
 365	struct wa_xfer_abort_buffer *b;
 366	struct wa_rpipe *rpipe = xfer->ep->hcpriv;
 367
 368	b = kmalloc(sizeof(*b), GFP_ATOMIC);
 369	if (b == NULL)
 370		goto error_kmalloc;
 371	b->cmd.bLength =  sizeof(b->cmd);
 372	b->cmd.bRequestType = WA_XFER_ABORT;
 373	b->cmd.wRPipe = rpipe->descr.wRPipeIndex;
 374	b->cmd.dwTransferID = wa_xfer_id(xfer);
 
 375
 376	usb_init_urb(&b->urb);
 377	usb_fill_bulk_urb(&b->urb, xfer->wa->usb_dev,
 378		usb_sndbulkpipe(xfer->wa->usb_dev,
 379				xfer->wa->dto_epd->bEndpointAddress),
 380		&b->cmd, sizeof(b->cmd), __wa_xfer_abort_cb, b);
 381	result = usb_submit_urb(&b->urb, GFP_ATOMIC);
 382	if (result < 0)
 383		goto error_submit;
 384	return;				/* callback frees! */
 385
 386
 387error_submit:
 
 388	if (printk_ratelimit())
 389		dev_err(dev, "xfer %p: Can't submit abort request: %d\n",
 390			xfer, result);
 391	kfree(b);
 392error_kmalloc:
 393	return;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 394
 
 
 
 
 
 
 
 
 
 
 395}
 396
 397/*
 398 *
 399 * @returns < 0 on error, transfer segment request size if ok
 400 */
 401static ssize_t __wa_xfer_setup_sizes(struct wa_xfer *xfer,
 402				     enum wa_xfer_type *pxfer_type)
 403{
 404	ssize_t result;
 405	struct device *dev = &xfer->wa->usb_iface->dev;
 406	size_t maxpktsize;
 407	struct urb *urb = xfer->urb;
 408	struct wa_rpipe *rpipe = xfer->ep->hcpriv;
 409
 410	switch (rpipe->descr.bmAttribute & 0x3) {
 411	case USB_ENDPOINT_XFER_CONTROL:
 412		*pxfer_type = WA_XFER_TYPE_CTL;
 413		result = sizeof(struct wa_xfer_ctl);
 414		break;
 415	case USB_ENDPOINT_XFER_INT:
 416	case USB_ENDPOINT_XFER_BULK:
 417		*pxfer_type = WA_XFER_TYPE_BI;
 418		result = sizeof(struct wa_xfer_bi);
 419		break;
 420	case USB_ENDPOINT_XFER_ISOC:
 421		dev_err(dev, "FIXME: ISOC not implemented\n");
 422		result = -ENOSYS;
 423		goto error;
 424	default:
 425		/* never happens */
 426		BUG();
 427		result = -EINVAL;	/* shut gcc up */
 428	};
 429	xfer->is_inbound = urb->pipe & USB_DIR_IN ? 1 : 0;
 430	xfer->is_dma = urb->transfer_flags & URB_NO_TRANSFER_DMA_MAP ? 1 : 0;
 
 
 431	xfer->seg_size = le16_to_cpu(rpipe->descr.wBlocks)
 432		* 1 << (xfer->wa->wa_descr->bRPipeBlockSize - 1);
 433	/* Compute the segment size and make sure it is a multiple of
 434	 * the maxpktsize (WUSB1.0[8.3.3.1])...not really too much of
 435	 * a check (FIXME) */
 436	maxpktsize = le16_to_cpu(rpipe->descr.wMaxPacketSize);
 437	if (xfer->seg_size < maxpktsize) {
 438		dev_err(dev, "HW BUG? seg_size %zu smaller than maxpktsize "
 439			"%zu\n", xfer->seg_size, maxpktsize);
 
 440		result = -EINVAL;
 441		goto error;
 442	}
 443	xfer->seg_size = (xfer->seg_size / maxpktsize) * maxpktsize;
 444	xfer->segs = (urb->transfer_buffer_length + xfer->seg_size - 1)
 445		/ xfer->seg_size;
 446	if (xfer->segs >= WA_SEGS_MAX) {
 447		dev_err(dev, "BUG? ops, number of segments %d bigger than %d\n",
 448			(int)(urb->transfer_buffer_length / xfer->seg_size),
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 449			WA_SEGS_MAX);
 450		result = -EINVAL;
 451		goto error;
 452	}
 453	if (xfer->segs == 0 && *pxfer_type == WA_XFER_TYPE_CTL)
 454		xfer->segs = 1;
 455error:
 456	return result;
 457}
 458
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 459/* Fill in the common request header and xfer-type specific data. */
 460static void __wa_xfer_setup_hdr0(struct wa_xfer *xfer,
 461				 struct wa_xfer_hdr *xfer_hdr0,
 462				 enum wa_xfer_type xfer_type,
 463				 size_t xfer_hdr_size)
 464{
 465	struct wa_rpipe *rpipe = xfer->ep->hcpriv;
 
 466
 467	xfer_hdr0 = &xfer->seg[0]->xfer_hdr;
 468	xfer_hdr0->bLength = xfer_hdr_size;
 469	xfer_hdr0->bRequestType = xfer_type;
 470	xfer_hdr0->wRPipe = rpipe->descr.wRPipeIndex;
 471	xfer_hdr0->dwTransferID = wa_xfer_id(xfer);
 472	xfer_hdr0->bTransferSegment = 0;
 473	switch (xfer_type) {
 474	case WA_XFER_TYPE_CTL: {
 475		struct wa_xfer_ctl *xfer_ctl =
 476			container_of(xfer_hdr0, struct wa_xfer_ctl, hdr);
 477		xfer_ctl->bmAttribute = xfer->is_inbound ? 1 : 0;
 478		memcpy(&xfer_ctl->baSetupData, xfer->urb->setup_packet,
 479		       sizeof(xfer_ctl->baSetupData));
 480		break;
 481	}
 482	case WA_XFER_TYPE_BI:
 483		break;
 484	case WA_XFER_TYPE_ISO:
 485		printk(KERN_ERR "FIXME: ISOC not implemented\n");
 
 
 
 
 
 
 
 
 
 
 486	default:
 487		BUG();
 488	};
 489}
 490
 491/*
 492 * Callback for the OUT data phase of the segment request
 493 *
 494 * Check wa_seg_cb(); most comments also apply here because this
 495 * function does almost the same thing and they work closely
 496 * together.
 497 *
 498 * If the seg request has failed but this DTO phase has succeeded,
 499 * wa_seg_cb() has already failed the segment and moved the
 500 * status to WA_SEG_ERROR, so this will go through 'case 0' and
 501 * effectively do nothing.
 502 */
 503static void wa_seg_dto_cb(struct urb *urb)
 504{
 505	struct wa_seg *seg = urb->context;
 506	struct wa_xfer *xfer = seg->xfer;
 507	struct wahc *wa;
 508	struct device *dev;
 509	struct wa_rpipe *rpipe;
 510	unsigned long flags;
 511	unsigned rpipe_ready = 0;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 512	u8 done = 0;
 513
 514	switch (urb->status) {
 515	case 0:
 516		spin_lock_irqsave(&xfer->lock, flags);
 517		wa = xfer->wa;
 518		dev = &wa->usb_iface->dev;
 519		dev_dbg(dev, "xfer %p#%u: data out done (%d bytes)\n",
 520			xfer, seg->index, urb->actual_length);
 521		if (seg->status < WA_SEG_PENDING)
 522			seg->status = WA_SEG_PENDING;
 523		seg->result = urb->actual_length;
 524		spin_unlock_irqrestore(&xfer->lock, flags);
 525		break;
 526	case -ECONNRESET:	/* URB unlinked; no need to do anything */
 527	case -ENOENT:		/* as it was done by the who unlinked us */
 528		break;
 529	default:		/* Other errors ... */
 530		spin_lock_irqsave(&xfer->lock, flags);
 531		wa = xfer->wa;
 532		dev = &wa->usb_iface->dev;
 533		rpipe = xfer->ep->hcpriv;
 534		dev_dbg(dev, "xfer %p#%u: data out error %d\n",
 535			xfer, seg->index, urb->status);
 536		if (edc_inc(&wa->nep_edc, EDC_MAX_ERRORS,
 537			    EDC_ERROR_TIMEFRAME)){
 538			dev_err(dev, "DTO: URB max acceptable errors "
 539				"exceeded, resetting device\n");
 540			wa_reset_all(wa);
 541		}
 542		if (seg->status != WA_SEG_ERROR) {
 543			seg->status = WA_SEG_ERROR;
 544			seg->result = urb->status;
 545			xfer->segs_done++;
 546			__wa_xfer_abort(xfer);
 547			rpipe_ready = rpipe_avail_inc(rpipe);
 548			done = __wa_xfer_is_done(xfer);
 
 549		}
 550		spin_unlock_irqrestore(&xfer->lock, flags);
 551		if (done)
 552			wa_xfer_completion(xfer);
 553		if (rpipe_ready)
 554			wa_xfer_delayed_run(rpipe);
 555	}
 
 
 556}
 557
 558/*
 559 * Callback for the segment request
 560 *
 561 * If successful transition state (unless already transitioned or
 562 * outbound transfer); otherwise, take a note of the error, mark this
 563 * segment done and try completion.
 564 *
 565 * Note we don't access until we are sure that the transfer hasn't
 566 * been cancelled (ECONNRESET, ENOENT), which could mean that
 567 * seg->xfer could be already gone.
 568 *
 569 * We have to check before setting the status to WA_SEG_PENDING
 570 * because sometimes the xfer result callback arrives before this
 571 * callback (geeeeeeze), so it might happen that we are already in
 572 * another state. As well, we don't set it if the transfer is inbound,
 573 * as in that case, wa_seg_dto_cb will do it when the OUT data phase
 574 * finishes.
 575 */
 576static void wa_seg_cb(struct urb *urb)
 577{
 578	struct wa_seg *seg = urb->context;
 579	struct wa_xfer *xfer = seg->xfer;
 580	struct wahc *wa;
 581	struct device *dev;
 582	struct wa_rpipe *rpipe;
 583	unsigned long flags;
 584	unsigned rpipe_ready;
 585	u8 done = 0;
 586
 587	switch (urb->status) {
 588	case 0:
 589		spin_lock_irqsave(&xfer->lock, flags);
 590		wa = xfer->wa;
 591		dev = &wa->usb_iface->dev;
 592		dev_dbg(dev, "xfer %p#%u: request done\n", xfer, seg->index);
 593		if (xfer->is_inbound && seg->status < WA_SEG_PENDING)
 
 
 
 594			seg->status = WA_SEG_PENDING;
 595		spin_unlock_irqrestore(&xfer->lock, flags);
 596		break;
 597	case -ECONNRESET:	/* URB unlinked; no need to do anything */
 598	case -ENOENT:		/* as it was done by the who unlinked us */
 599		break;
 600	default:		/* Other errors ... */
 601		spin_lock_irqsave(&xfer->lock, flags);
 602		wa = xfer->wa;
 603		dev = &wa->usb_iface->dev;
 604		rpipe = xfer->ep->hcpriv;
 605		if (printk_ratelimit())
 606			dev_err(dev, "xfer %p#%u: request error %d\n",
 607				xfer, seg->index, urb->status);
 
 608		if (edc_inc(&wa->nep_edc, EDC_MAX_ERRORS,
 609			    EDC_ERROR_TIMEFRAME)){
 610			dev_err(dev, "DTO: URB max acceptable errors "
 611				"exceeded, resetting device\n");
 612			wa_reset_all(wa);
 613		}
 
 614		usb_unlink_urb(seg->dto_urb);
 615		seg->status = WA_SEG_ERROR;
 616		seg->result = urb->status;
 617		xfer->segs_done++;
 618		__wa_xfer_abort(xfer);
 619		rpipe_ready = rpipe_avail_inc(rpipe);
 620		done = __wa_xfer_is_done(xfer);
 621		spin_unlock_irqrestore(&xfer->lock, flags);
 622		if (done)
 623			wa_xfer_completion(xfer);
 624		if (rpipe_ready)
 625			wa_xfer_delayed_run(rpipe);
 626	}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 627}
 628
 629/*
 630 * Allocate the segs array and initialize each of them
 631 *
 632 * The segments are freed by wa_xfer_destroy() when the xfer use count
 633 * drops to zero; however, because each segment is given the same life
 634 * cycle as the USB URB it contains, it is actually freed by
 635 * usb_put_urb() on the contained USB URB (twisted, eh?).
 636 */
 637static int __wa_xfer_setup_segs(struct wa_xfer *xfer, size_t xfer_hdr_size)
 638{
 639	int result, cnt;
 640	size_t alloc_size = sizeof(*xfer->seg[0])
 641		- sizeof(xfer->seg[0]->xfer_hdr) + xfer_hdr_size;
 642	struct usb_device *usb_dev = xfer->wa->usb_dev;
 643	const struct usb_endpoint_descriptor *dto_epd = xfer->wa->dto_epd;
 644	struct wa_seg *seg;
 645	size_t buf_itr, buf_size, buf_itr_size;
 646
 647	result = -ENOMEM;
 648	xfer->seg = kcalloc(xfer->segs, sizeof(xfer->seg[0]), GFP_ATOMIC);
 649	if (xfer->seg == NULL)
 650		goto error_segs_kzalloc;
 651	buf_itr = 0;
 652	buf_size = xfer->urb->transfer_buffer_length;
 653	for (cnt = 0; cnt < xfer->segs; cnt++) {
 654		seg = xfer->seg[cnt] = kzalloc(alloc_size, GFP_ATOMIC);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 655		if (seg == NULL)
 656			goto error_seg_kzalloc;
 657		wa_seg_init(seg);
 658		seg->xfer = xfer;
 659		seg->index = cnt;
 660		usb_fill_bulk_urb(&seg->urb, usb_dev,
 661				  usb_sndbulkpipe(usb_dev,
 662						  dto_epd->bEndpointAddress),
 663				  &seg->xfer_hdr, xfer_hdr_size,
 664				  wa_seg_cb, seg);
 665		buf_itr_size = buf_size > xfer->seg_size ?
 666			xfer->seg_size : buf_size;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 667		if (xfer->is_inbound == 0 && buf_size > 0) {
 
 668			seg->dto_urb = usb_alloc_urb(0, GFP_ATOMIC);
 669			if (seg->dto_urb == NULL)
 670				goto error_dto_alloc;
 671			usb_fill_bulk_urb(
 672				seg->dto_urb, usb_dev,
 673				usb_sndbulkpipe(usb_dev,
 674						dto_epd->bEndpointAddress),
 675				NULL, 0, wa_seg_dto_cb, seg);
 676			if (xfer->is_dma) {
 677				seg->dto_urb->transfer_dma =
 678					xfer->urb->transfer_dma + buf_itr;
 679				seg->dto_urb->transfer_flags |=
 680					URB_NO_TRANSFER_DMA_MAP;
 681			} else
 682				seg->dto_urb->transfer_buffer =
 683					xfer->urb->transfer_buffer + buf_itr;
 684			seg->dto_urb->transfer_buffer_length = buf_itr_size;
 
 
 
 
 
 
 
 
 
 
 
 685		}
 686		seg->status = WA_SEG_READY;
 687		buf_itr += buf_itr_size;
 688		buf_size -= buf_itr_size;
 689	}
 690	return 0;
 691
 
 
 
 
 
 
 
 692error_dto_alloc:
 
 
 693	kfree(xfer->seg[cnt]);
 694	cnt--;
 695error_seg_kzalloc:
 696	/* use the fact that cnt is left at were it failed */
 697	for (; cnt > 0; cnt--) {
 698		if (xfer->is_inbound == 0)
 699			kfree(xfer->seg[cnt]->dto_urb);
 700		kfree(xfer->seg[cnt]);
 701	}
 702error_segs_kzalloc:
 703	return result;
 704}
 705
 706/*
 707 * Allocates all the stuff needed to submit a transfer
 708 *
 709 * Breaks the whole data buffer in a list of segments, each one has a
 710 * structure allocated to it and linked in xfer->seg[index]
 711 *
 712 * FIXME: merge setup_segs() and the last part of this function, no
 713 *        need to do two for loops when we could run everything in a
 714 *        single one
 715 */
 716static int __wa_xfer_setup(struct wa_xfer *xfer, struct urb *urb)
 717{
 718	int result;
 719	struct device *dev = &xfer->wa->usb_iface->dev;
 720	enum wa_xfer_type xfer_type = 0; /* shut up GCC */
 721	size_t xfer_hdr_size, cnt, transfer_size;
 722	struct wa_xfer_hdr *xfer_hdr0, *xfer_hdr;
 723
 724	result = __wa_xfer_setup_sizes(xfer, &xfer_type);
 725	if (result < 0)
 726		goto error_setup_sizes;
 727	xfer_hdr_size = result;
 728	result = __wa_xfer_setup_segs(xfer, xfer_hdr_size);
 729	if (result < 0) {
 730		dev_err(dev, "xfer %p: Failed to allocate %d segments: %d\n",
 731			xfer, xfer->segs, result);
 732		goto error_setup_segs;
 733	}
 734	/* Fill the first header */
 735	xfer_hdr0 = &xfer->seg[0]->xfer_hdr;
 736	wa_xfer_id_init(xfer);
 737	__wa_xfer_setup_hdr0(xfer, xfer_hdr0, xfer_type, xfer_hdr_size);
 738
 739	/* Fill remainig headers */
 740	xfer_hdr = xfer_hdr0;
 741	transfer_size = urb->transfer_buffer_length;
 742	xfer_hdr0->dwTransferLength = transfer_size > xfer->seg_size ?
 743		xfer->seg_size : transfer_size;
 744	transfer_size -=  xfer->seg_size;
 745	for (cnt = 1; cnt < xfer->segs; cnt++) {
 746		xfer_hdr = &xfer->seg[cnt]->xfer_hdr;
 747		memcpy(xfer_hdr, xfer_hdr0, xfer_hdr_size);
 748		xfer_hdr->bTransferSegment = cnt;
 749		xfer_hdr->dwTransferLength = transfer_size > xfer->seg_size ?
 750			cpu_to_le32(xfer->seg_size)
 751			: cpu_to_le32(transfer_size);
 752		xfer->seg[cnt]->status = WA_SEG_READY;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 753		transfer_size -=  xfer->seg_size;
 
 
 
 
 
 
 
 
 
 
 
 754	}
 755	xfer_hdr->bTransferSegment |= 0x80;	/* this is the last segment */
 756	result = 0;
 757error_setup_segs:
 758error_setup_sizes:
 759	return result;
 760}
 761
 762/*
 763 *
 764 *
 765 * rpipe->seg_lock is held!
 766 */
 767static int __wa_seg_submit(struct wa_rpipe *rpipe, struct wa_xfer *xfer,
 768			   struct wa_seg *seg)
 769{
 770	int result;
 771	result = usb_submit_urb(&seg->urb, GFP_ATOMIC);
 
 
 
 
 
 
 
 
 
 
 
 772	if (result < 0) {
 773		printk(KERN_ERR "xfer %p#%u: REQ submit failed: %d\n",
 774		       xfer, seg->index, result);
 775		goto error_seg_submit;
 
 
 
 
 
 
 
 
 
 
 
 
 
 776	}
 
 777	if (seg->dto_urb) {
 
 
 778		result = usb_submit_urb(seg->dto_urb, GFP_ATOMIC);
 779		if (result < 0) {
 780			printk(KERN_ERR "xfer %p#%u: DTO submit failed: %d\n",
 781			       xfer, seg->index, result);
 
 782			goto error_dto_submit;
 783		}
 
 
 
 
 
 
 
 
 784	}
 785	seg->status = WA_SEG_SUBMITTED;
 786	rpipe_avail_dec(rpipe);
 787	return 0;
 788
 789error_dto_submit:
 790	usb_unlink_urb(&seg->urb);
 791error_seg_submit:
 
 
 792	seg->status = WA_SEG_ERROR;
 793	seg->result = result;
 
 794	return result;
 795}
 796
 797/*
 798 * Execute more queued request segments until the maximum concurrent allowed
 
 799 *
 800 * The ugly unlock/lock sequence on the error path is needed as the
 801 * xfer->lock normally nests the seg_lock and not viceversa.
 802 *
 803 */
 804static void wa_xfer_delayed_run(struct wa_rpipe *rpipe)
 805{
 806	int result;
 807	struct device *dev = &rpipe->wa->usb_iface->dev;
 808	struct wa_seg *seg;
 809	struct wa_xfer *xfer;
 810	unsigned long flags;
 811
 
 
 812	spin_lock_irqsave(&rpipe->seg_lock, flags);
 813	while (atomic_read(&rpipe->segs_available) > 0
 814	      && !list_empty(&rpipe->seg_list)) {
 815		seg = list_entry(rpipe->seg_list.next, struct wa_seg,
 
 816				 list_node);
 817		list_del(&seg->list_node);
 818		xfer = seg->xfer;
 819		result = __wa_seg_submit(rpipe, xfer, seg);
 820		dev_dbg(dev, "xfer %p#%u submitted from delayed [%d segments available] %d\n",
 821			xfer, seg->index, atomic_read(&rpipe->segs_available), result);
 
 
 
 
 
 
 
 
 
 
 822		if (unlikely(result < 0)) {
 
 
 823			spin_unlock_irqrestore(&rpipe->seg_lock, flags);
 824			spin_lock_irqsave(&xfer->lock, flags);
 825			__wa_xfer_abort(xfer);
 
 
 
 
 826			xfer->segs_done++;
 
 827			spin_unlock_irqrestore(&xfer->lock, flags);
 
 
 828			spin_lock_irqsave(&rpipe->seg_lock, flags);
 829		}
 
 830	}
 
 
 
 
 
 
 
 
 
 831	spin_unlock_irqrestore(&rpipe->seg_lock, flags);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 832}
 833
 834/*
 835 *
 836 * xfer->lock is taken
 837 *
 838 * On failure submitting we just stop submitting and return error;
 839 * wa_urb_enqueue_b() will execute the completion path
 840 */
 841static int __wa_xfer_submit(struct wa_xfer *xfer)
 842{
 843	int result;
 844	struct wahc *wa = xfer->wa;
 845	struct device *dev = &wa->usb_iface->dev;
 846	unsigned cnt;
 847	struct wa_seg *seg;
 848	unsigned long flags;
 849	struct wa_rpipe *rpipe = xfer->ep->hcpriv;
 850	size_t maxrequests = le16_to_cpu(rpipe->descr.wRequests);
 851	u8 available;
 852	u8 empty;
 853
 854	spin_lock_irqsave(&wa->xfer_list_lock, flags);
 855	list_add_tail(&xfer->list_node, &wa->xfer_list);
 856	spin_unlock_irqrestore(&wa->xfer_list_lock, flags);
 857
 858	BUG_ON(atomic_read(&rpipe->segs_available) > maxrequests);
 859	result = 0;
 860	spin_lock_irqsave(&rpipe->seg_lock, flags);
 861	for (cnt = 0; cnt < xfer->segs; cnt++) {
 
 
 862		available = atomic_read(&rpipe->segs_available);
 863		empty = list_empty(&rpipe->seg_list);
 864		seg = xfer->seg[cnt];
 865		dev_dbg(dev, "xfer %p#%u: available %u empty %u (%s)\n",
 866			xfer, cnt, available, empty,
 867			available == 0 || !empty ? "delayed" : "submitted");
 868		if (available == 0 || !empty) {
 869			dev_dbg(dev, "xfer %p#%u: delayed\n", xfer, cnt);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 870			seg->status = WA_SEG_DELAYED;
 871			list_add_tail(&seg->list_node, &rpipe->seg_list);
 872		} else {
 873			result = __wa_seg_submit(rpipe, xfer, seg);
 874			if (result < 0) {
 875				__wa_xfer_abort(xfer);
 876				goto error_seg_submit;
 877			}
 878		}
 879		xfer->segs_submitted++;
 880	}
 881error_seg_submit:
 
 
 
 
 
 
 
 
 882	spin_unlock_irqrestore(&rpipe->seg_lock, flags);
 
 
 
 
 
 
 883	return result;
 884}
 885
 886/*
 887 * Second part of a URB/transfer enqueuement
 888 *
 889 * Assumes this comes from wa_urb_enqueue() [maybe through
 890 * wa_urb_enqueue_run()]. At this point:
 891 *
 892 * xfer->wa	filled and refcounted
 893 * xfer->ep	filled with rpipe refcounted if
 894 *              delayed == 0
 895 * xfer->urb 	filled and refcounted (this is the case when called
 896 *              from wa_urb_enqueue() as we come from usb_submit_urb()
 897 *              and when called by wa_urb_enqueue_run(), as we took an
 898 *              extra ref dropped by _run() after we return).
 899 * xfer->gfp	filled
 900 *
 901 * If we fail at __wa_xfer_submit(), then we just check if we are done
 902 * and if so, we run the completion procedure. However, if we are not
 903 * yet done, we do nothing and wait for the completion handlers from
 904 * the submitted URBs or from the xfer-result path to kick in. If xfer
 905 * result never kicks in, the xfer will timeout from the USB code and
 906 * dequeue() will be called.
 907 */
 908static void wa_urb_enqueue_b(struct wa_xfer *xfer)
 909{
 910	int result;
 911	unsigned long flags;
 912	struct urb *urb = xfer->urb;
 913	struct wahc *wa = xfer->wa;
 914	struct wusbhc *wusbhc = wa->wusb;
 915	struct wusb_dev *wusb_dev;
 916	unsigned done;
 917
 918	result = rpipe_get_by_ep(wa, xfer->ep, urb, xfer->gfp);
 919	if (result < 0)
 
 920		goto error_rpipe_get;
 
 921	result = -ENODEV;
 922	/* FIXME: segmentation broken -- kills DWA */
 923	mutex_lock(&wusbhc->mutex);		/* get a WUSB dev */
 924	if (urb->dev == NULL) {
 925		mutex_unlock(&wusbhc->mutex);
 
 926		goto error_dev_gone;
 927	}
 928	wusb_dev = __wusb_dev_get_by_usb_dev(wusbhc, urb->dev);
 929	if (wusb_dev == NULL) {
 930		mutex_unlock(&wusbhc->mutex);
 
 
 931		goto error_dev_gone;
 932	}
 933	mutex_unlock(&wusbhc->mutex);
 934
 935	spin_lock_irqsave(&xfer->lock, flags);
 936	xfer->wusb_dev = wusb_dev;
 937	result = urb->status;
 938	if (urb->status != -EINPROGRESS)
 
 939		goto error_dequeued;
 
 940
 941	result = __wa_xfer_setup(xfer, urb);
 942	if (result < 0)
 
 943		goto error_xfer_setup;
 
 
 
 
 
 
 
 944	result = __wa_xfer_submit(xfer);
 945	if (result < 0)
 
 946		goto error_xfer_submit;
 
 947	spin_unlock_irqrestore(&xfer->lock, flags);
 948	return;
 
 949
 950	/* this is basically wa_xfer_completion() broken up wa_xfer_giveback()
 951	 * does a wa_xfer_put() that will call wa_xfer_destroy() and clean
 952	 * upundo setup().
 
 953	 */
 954error_xfer_setup:
 955error_dequeued:
 956	spin_unlock_irqrestore(&xfer->lock, flags);
 957	/* FIXME: segmentation broken, kills DWA */
 958	if (wusb_dev)
 959		wusb_dev_put(wusb_dev);
 960error_dev_gone:
 961	rpipe_put(xfer->ep->hcpriv);
 962error_rpipe_get:
 963	xfer->result = result;
 964	wa_xfer_giveback(xfer);
 965	return;
 966
 967error_xfer_submit:
 968	done = __wa_xfer_is_done(xfer);
 969	xfer->result = result;
 970	spin_unlock_irqrestore(&xfer->lock, flags);
 971	if (done)
 972		wa_xfer_completion(xfer);
 
 
 
 973}
 974
 975/*
 976 * Execute the delayed transfers in the Wire Adapter @wa
 977 *
 978 * We need to be careful here, as dequeue() could be called in the
 979 * middle.  That's why we do the whole thing under the
 980 * wa->xfer_list_lock. If dequeue() jumps in, it first locks urb->lock
 981 * and then checks the list -- so as we would be acquiring in inverse
 982 * order, we just drop the lock once we have the xfer and reacquire it
 983 * later.
 984 */
 985void wa_urb_enqueue_run(struct work_struct *ws)
 986{
 987	struct wahc *wa = container_of(ws, struct wahc, xfer_work);
 988	struct wa_xfer *xfer, *next;
 989	struct urb *urb;
 
 990
 
 991	spin_lock_irq(&wa->xfer_list_lock);
 992	list_for_each_entry_safe(xfer, next, &wa->xfer_delayed_list,
 993				 list_node) {
 
 
 
 
 
 
 
 994		list_del_init(&xfer->list_node);
 995		spin_unlock_irq(&wa->xfer_list_lock);
 996
 997		urb = xfer->urb;
 998		wa_urb_enqueue_b(xfer);
 
 999		usb_put_urb(urb);	/* taken when queuing */
1000
1001		spin_lock_irq(&wa->xfer_list_lock);
1002	}
1003	spin_unlock_irq(&wa->xfer_list_lock);
1004}
1005EXPORT_SYMBOL_GPL(wa_urb_enqueue_run);
1006
1007/*
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1008 * Submit a transfer to the Wire Adapter in a delayed way
1009 *
1010 * The process of enqueuing involves possible sleeps() [see
1011 * enqueue_b(), for the rpipe_get() and the mutex_lock()]. If we are
1012 * in an atomic section, we defer the enqueue_b() call--else we call direct.
1013 *
1014 * @urb: We own a reference to it done by the HCI Linux USB stack that
1015 *       will be given up by calling usb_hcd_giveback_urb() or by
1016 *       returning error from this function -> ergo we don't have to
1017 *       refcount it.
1018 */
1019int wa_urb_enqueue(struct wahc *wa, struct usb_host_endpoint *ep,
1020		   struct urb *urb, gfp_t gfp)
1021{
1022	int result;
1023	struct device *dev = &wa->usb_iface->dev;
1024	struct wa_xfer *xfer;
1025	unsigned long my_flags;
1026	unsigned cant_sleep = irqs_disabled() | in_atomic();
1027
1028	if (urb->transfer_buffer == NULL
 
1029	    && !(urb->transfer_flags & URB_NO_TRANSFER_DMA_MAP)
1030	    && urb->transfer_buffer_length != 0) {
1031		dev_err(dev, "BUG? urb %p: NULL xfer buffer & NODMA\n", urb);
1032		dump_stack();
1033	}
1034
 
 
 
 
 
 
1035	result = -ENOMEM;
1036	xfer = kzalloc(sizeof(*xfer), gfp);
1037	if (xfer == NULL)
1038		goto error_kmalloc;
1039
1040	result = -ENOENT;
1041	if (urb->status != -EINPROGRESS)	/* cancelled */
1042		goto error_dequeued;		/* before starting? */
1043	wa_xfer_init(xfer);
1044	xfer->wa = wa_get(wa);
1045	xfer->urb = urb;
1046	xfer->gfp = gfp;
1047	xfer->ep = ep;
1048	urb->hcpriv = xfer;
1049
1050	dev_dbg(dev, "xfer %p urb %p pipe 0x%02x [%d bytes] %s %s %s\n",
1051		xfer, urb, urb->pipe, urb->transfer_buffer_length,
1052		urb->transfer_flags & URB_NO_TRANSFER_DMA_MAP ? "dma" : "nodma",
1053		urb->pipe & USB_DIR_IN ? "inbound" : "outbound",
1054		cant_sleep ? "deferred" : "inline");
1055
1056	if (cant_sleep) {
1057		usb_get_urb(urb);
1058		spin_lock_irqsave(&wa->xfer_list_lock, my_flags);
1059		list_add_tail(&xfer->list_node, &wa->xfer_delayed_list);
1060		spin_unlock_irqrestore(&wa->xfer_list_lock, my_flags);
1061		queue_work(wusbd, &wa->xfer_work);
1062	} else {
1063		wa_urb_enqueue_b(xfer);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1064	}
1065	return 0;
1066
1067error_dequeued:
1068	kfree(xfer);
1069error_kmalloc:
 
 
 
 
1070	return result;
1071}
1072EXPORT_SYMBOL_GPL(wa_urb_enqueue);
1073
1074/*
1075 * Dequeue a URB and make sure uwb_hcd_giveback_urb() [completion
1076 * handler] is called.
1077 *
1078 * Until a transfer goes successfully through wa_urb_enqueue() it
1079 * needs to be dequeued with completion calling; when stuck in delayed
1080 * or before wa_xfer_setup() is called, we need to do completion.
1081 *
1082 *  not setup  If there is no hcpriv yet, that means that that enqueue
1083 *             still had no time to set the xfer up. Because
1084 *             urb->status should be other than -EINPROGRESS,
1085 *             enqueue() will catch that and bail out.
1086 *
1087 * If the transfer has gone through setup, we just need to clean it
1088 * up. If it has gone through submit(), we have to abort it [with an
1089 * asynch request] and then make sure we cancel each segment.
1090 *
1091 */
1092int wa_urb_dequeue(struct wahc *wa, struct urb *urb)
1093{
1094	unsigned long flags, flags2;
1095	struct wa_xfer *xfer;
1096	struct wa_seg *seg;
1097	struct wa_rpipe *rpipe;
1098	unsigned cnt;
1099	unsigned rpipe_ready = 0;
 
1100
1101	xfer = urb->hcpriv;
1102	if (xfer == NULL) {
1103		/* NOthing setup yet enqueue will see urb->status !=
1104		 * -EINPROGRESS (by hcd layer) and bail out with
1105		 * error, no need to do completion
 
 
1106		 */
1107		BUG_ON(urb->status == -EINPROGRESS);
1108		goto out;
1109	}
 
 
 
 
 
 
 
1110	spin_lock_irqsave(&xfer->lock, flags);
 
1111	rpipe = xfer->ep->hcpriv;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1112	/* Check the delayed list -> if there, release and complete */
1113	spin_lock_irqsave(&wa->xfer_list_lock, flags2);
1114	if (!list_empty(&xfer->list_node) && xfer->seg == NULL)
1115		goto dequeue_delayed;
1116	spin_unlock_irqrestore(&wa->xfer_list_lock, flags2);
1117	if (xfer->seg == NULL)  	/* still hasn't reached */
1118		goto out_unlock;	/* setup(), enqueue_b() completes */
1119	/* Ok, the xfer is in flight already, it's been setup and submitted.*/
1120	__wa_xfer_abort(xfer);
 
 
 
 
 
1121	for (cnt = 0; cnt < xfer->segs; cnt++) {
1122		seg = xfer->seg[cnt];
 
 
1123		switch (seg->status) {
1124		case WA_SEG_NOTREADY:
1125		case WA_SEG_READY:
1126			printk(KERN_ERR "xfer %p#%u: dequeue bad state %u\n",
1127			       xfer, cnt, seg->status);
1128			WARN_ON(1);
1129			break;
1130		case WA_SEG_DELAYED:
 
 
 
 
 
 
1131			seg->status = WA_SEG_ABORTED;
1132			spin_lock_irqsave(&rpipe->seg_lock, flags2);
1133			list_del(&seg->list_node);
1134			xfer->segs_done++;
1135			rpipe_ready = rpipe_avail_inc(rpipe);
1136			spin_unlock_irqrestore(&rpipe->seg_lock, flags2);
1137			break;
1138		case WA_SEG_SUBMITTED:
1139			seg->status = WA_SEG_ABORTED;
1140			usb_unlink_urb(&seg->urb);
1141			if (xfer->is_inbound == 0)
1142				usb_unlink_urb(seg->dto_urb);
1143			xfer->segs_done++;
1144			rpipe_ready = rpipe_avail_inc(rpipe);
1145			break;
1146		case WA_SEG_PENDING:
1147			seg->status = WA_SEG_ABORTED;
1148			xfer->segs_done++;
1149			rpipe_ready = rpipe_avail_inc(rpipe);
1150			break;
1151		case WA_SEG_DTI_PENDING:
1152			usb_unlink_urb(wa->dti_urb);
1153			seg->status = WA_SEG_ABORTED;
1154			xfer->segs_done++;
1155			rpipe_ready = rpipe_avail_inc(rpipe);
1156			break;
1157		case WA_SEG_DONE:
1158		case WA_SEG_ERROR:
1159		case WA_SEG_ABORTED:
1160			break;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1161		}
1162	}
 
1163	xfer->result = urb->status;	/* -ENOENT or -ECONNRESET */
1164	__wa_xfer_is_done(xfer);
1165	spin_unlock_irqrestore(&xfer->lock, flags);
1166	wa_xfer_completion(xfer);
 
1167	if (rpipe_ready)
1168		wa_xfer_delayed_run(rpipe);
1169	return 0;
 
1170
1171out_unlock:
1172	spin_unlock_irqrestore(&xfer->lock, flags);
1173out:
1174	return 0;
1175
1176dequeue_delayed:
1177	list_del_init(&xfer->list_node);
1178	spin_unlock_irqrestore(&wa->xfer_list_lock, flags2);
1179	xfer->result = urb->status;
1180	spin_unlock_irqrestore(&xfer->lock, flags);
1181	wa_xfer_giveback(xfer);
 
1182	usb_put_urb(urb);		/* we got a ref in enqueue() */
1183	return 0;
1184}
1185EXPORT_SYMBOL_GPL(wa_urb_dequeue);
1186
1187/*
1188 * Translation from WA status codes (WUSB1.0 Table 8.15) to errno
1189 * codes
1190 *
1191 * Positive errno values are internal inconsistencies and should be
1192 * flagged louder. Negative are to be passed up to the user in the
1193 * normal way.
1194 *
1195 * @status: USB WA status code -- high two bits are stripped.
1196 */
1197static int wa_xfer_status_to_errno(u8 status)
1198{
1199	int errno;
1200	u8 real_status = status;
1201	static int xlat[] = {
1202		[WA_XFER_STATUS_SUCCESS] = 		0,
1203		[WA_XFER_STATUS_HALTED] = 		-EPIPE,
1204		[WA_XFER_STATUS_DATA_BUFFER_ERROR] = 	-ENOBUFS,
1205		[WA_XFER_STATUS_BABBLE] = 		-EOVERFLOW,
1206		[WA_XFER_RESERVED] = 			EINVAL,
1207		[WA_XFER_STATUS_NOT_FOUND] =		0,
1208		[WA_XFER_STATUS_INSUFFICIENT_RESOURCE] = -ENOMEM,
1209		[WA_XFER_STATUS_TRANSACTION_ERROR] = 	-EILSEQ,
1210		[WA_XFER_STATUS_ABORTED] = 		-EINTR,
1211		[WA_XFER_STATUS_RPIPE_NOT_READY] = 	EINVAL,
1212		[WA_XFER_INVALID_FORMAT] = 		EINVAL,
1213		[WA_XFER_UNEXPECTED_SEGMENT_NUMBER] = 	EINVAL,
1214		[WA_XFER_STATUS_RPIPE_TYPE_MISMATCH] = 	EINVAL,
1215	};
1216	status &= 0x3f;
1217
1218	if (status == 0)
1219		return 0;
1220	if (status >= ARRAY_SIZE(xlat)) {
1221		printk_ratelimited(KERN_ERR "%s(): BUG? "
1222			       "Unknown WA transfer status 0x%02x\n",
1223			       __func__, real_status);
1224		return -EINVAL;
1225	}
1226	errno = xlat[status];
1227	if (unlikely(errno > 0)) {
1228		printk_ratelimited(KERN_ERR "%s(): BUG? "
1229			       "Inconsistent WA status: 0x%02x\n",
1230			       __func__, real_status);
1231		errno = -errno;
1232	}
1233	return errno;
1234}
1235
1236/*
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1237 * Process a xfer result completion message
1238 *
1239 * inbound transfers: need to schedule a DTI read
1240 *
1241 * FIXME: this functio needs to be broken up in parts
1242 */
1243static void wa_xfer_result_chew(struct wahc *wa, struct wa_xfer *xfer)
 
1244{
1245	int result;
1246	struct device *dev = &wa->usb_iface->dev;
1247	unsigned long flags;
1248	u8 seg_idx;
1249	struct wa_seg *seg;
1250	struct wa_rpipe *rpipe;
1251	struct wa_xfer_result *xfer_result = wa->xfer_result;
1252	u8 done = 0;
1253	u8 usb_status;
1254	unsigned rpipe_ready = 0;
 
 
1255
1256	spin_lock_irqsave(&xfer->lock, flags);
1257	seg_idx = xfer_result->bTransferSegment & 0x7f;
1258	if (unlikely(seg_idx >= xfer->segs))
1259		goto error_bad_seg;
1260	seg = xfer->seg[seg_idx];
1261	rpipe = xfer->ep->hcpriv;
1262	usb_status = xfer_result->bTransferStatus;
1263	dev_dbg(dev, "xfer %p#%u: bTransferStatus 0x%02x (seg %u)\n",
1264		xfer, seg_idx, usb_status, seg->status);
1265	if (seg->status == WA_SEG_ABORTED
1266	    || seg->status == WA_SEG_ERROR)	/* already handled */
1267		goto segment_aborted;
1268	if (seg->status == WA_SEG_SUBMITTED)	/* ops, got here */
1269		seg->status = WA_SEG_PENDING;	/* before wa_seg{_dto}_cb() */
1270	if (seg->status != WA_SEG_PENDING) {
1271		if (printk_ratelimit())
1272			dev_err(dev, "xfer %p#%u: Bad segment state %u\n",
1273				xfer, seg_idx, seg->status);
1274		seg->status = WA_SEG_PENDING;	/* workaround/"fix" it */
1275	}
1276	if (usb_status & 0x80) {
1277		seg->result = wa_xfer_status_to_errno(usb_status);
1278		dev_err(dev, "DTI: xfer %p#%u failed (0x%02x)\n",
1279			xfer, seg->index, usb_status);
 
 
1280		goto error_complete;
1281	}
1282	/* FIXME: we ignore warnings, tally them for stats */
1283	if (usb_status & 0x40) 		/* Warning?... */
1284		usb_status = 0;		/* ... pass */
1285	if (xfer->is_inbound) {	/* IN data phase: read to buffer */
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1286		seg->status = WA_SEG_DTI_PENDING;
1287		BUG_ON(wa->buf_in_urb->status == -EINPROGRESS);
1288		if (xfer->is_dma) {
1289			wa->buf_in_urb->transfer_dma =
1290				xfer->urb->transfer_dma
1291				+ seg_idx * xfer->seg_size;
1292			wa->buf_in_urb->transfer_flags
1293				|= URB_NO_TRANSFER_DMA_MAP;
1294		} else {
1295			wa->buf_in_urb->transfer_buffer =
1296				xfer->urb->transfer_buffer
1297				+ seg_idx * xfer->seg_size;
1298			wa->buf_in_urb->transfer_flags
1299				&= ~URB_NO_TRANSFER_DMA_MAP;
1300		}
1301		wa->buf_in_urb->transfer_buffer_length =
1302			le32_to_cpu(xfer_result->dwTransferLength);
1303		wa->buf_in_urb->context = seg;
1304		result = usb_submit_urb(wa->buf_in_urb, GFP_ATOMIC);
1305		if (result < 0)
 
 
 
 
 
1306			goto error_submit_buf_in;
 
1307	} else {
1308		/* OUT data phase, complete it -- */
1309		seg->status = WA_SEG_DONE;
1310		seg->result = le32_to_cpu(xfer_result->dwTransferLength);
1311		xfer->segs_done++;
1312		rpipe_ready = rpipe_avail_inc(rpipe);
1313		done = __wa_xfer_is_done(xfer);
1314	}
1315	spin_unlock_irqrestore(&xfer->lock, flags);
1316	if (done)
1317		wa_xfer_completion(xfer);
1318	if (rpipe_ready)
1319		wa_xfer_delayed_run(rpipe);
1320	return;
1321
1322error_submit_buf_in:
1323	if (edc_inc(&wa->dti_edc, EDC_MAX_ERRORS, EDC_ERROR_TIMEFRAME)) {
1324		dev_err(dev, "DTI: URB max acceptable errors "
1325			"exceeded, resetting device\n");
1326		wa_reset_all(wa);
1327	}
1328	if (printk_ratelimit())
1329		dev_err(dev, "xfer %p#%u: can't submit DTI data phase: %d\n",
1330			xfer, seg_idx, result);
1331	seg->result = result;
1332error_complete:
 
 
 
1333	seg->status = WA_SEG_ERROR;
 
1334	xfer->segs_done++;
1335	rpipe_ready = rpipe_avail_inc(rpipe);
1336	__wa_xfer_abort(xfer);
1337	done = __wa_xfer_is_done(xfer);
1338	spin_unlock_irqrestore(&xfer->lock, flags);
1339	if (done)
1340		wa_xfer_completion(xfer);
1341	if (rpipe_ready)
1342		wa_xfer_delayed_run(rpipe);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1343	return;
1344
1345error_bad_seg:
1346	spin_unlock_irqrestore(&xfer->lock, flags);
1347	wa_urb_dequeue(wa, xfer->urb);
1348	if (printk_ratelimit())
1349		dev_err(dev, "xfer %p#%u: bad segment\n", xfer, seg_idx);
1350	if (edc_inc(&wa->dti_edc, EDC_MAX_ERRORS, EDC_ERROR_TIMEFRAME)) {
1351		dev_err(dev, "DTI: URB max acceptable errors "
1352			"exceeded, resetting device\n");
1353		wa_reset_all(wa);
1354	}
1355	return;
1356
1357segment_aborted:
1358	/* nothing to do, as the aborter did the completion */
1359	spin_unlock_irqrestore(&xfer->lock, flags);
1360}
1361
1362/*
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1363 * Callback for the IN data phase
1364 *
1365 * If successful transition state; otherwise, take a note of the
1366 * error, mark this segment done and try completion.
1367 *
1368 * Note we don't access until we are sure that the transfer hasn't
1369 * been cancelled (ECONNRESET, ENOENT), which could mean that
1370 * seg->xfer could be already gone.
1371 */
1372static void wa_buf_in_cb(struct urb *urb)
1373{
1374	struct wa_seg *seg = urb->context;
1375	struct wa_xfer *xfer = seg->xfer;
1376	struct wahc *wa;
1377	struct device *dev;
1378	struct wa_rpipe *rpipe;
1379	unsigned rpipe_ready;
1380	unsigned long flags;
 
1381	u8 done = 0;
1382
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1383	switch (urb->status) {
1384	case 0:
1385		spin_lock_irqsave(&xfer->lock, flags);
1386		wa = xfer->wa;
1387		dev = &wa->usb_iface->dev;
1388		rpipe = xfer->ep->hcpriv;
1389		dev_dbg(dev, "xfer %p#%u: data in done (%zu bytes)\n",
1390			xfer, seg->index, (size_t)urb->actual_length);
1391		seg->status = WA_SEG_DONE;
1392		seg->result = urb->actual_length;
1393		xfer->segs_done++;
1394		rpipe_ready = rpipe_avail_inc(rpipe);
1395		done = __wa_xfer_is_done(xfer);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1396		spin_unlock_irqrestore(&xfer->lock, flags);
1397		if (done)
1398			wa_xfer_completion(xfer);
1399		if (rpipe_ready)
1400			wa_xfer_delayed_run(rpipe);
1401		break;
1402	case -ECONNRESET:	/* URB unlinked; no need to do anything */
1403	case -ENOENT:		/* as it was done by the who unlinked us */
1404		break;
1405	default:		/* Other errors ... */
 
 
 
 
 
 
1406		spin_lock_irqsave(&xfer->lock, flags);
1407		wa = xfer->wa;
1408		dev = &wa->usb_iface->dev;
1409		rpipe = xfer->ep->hcpriv;
1410		if (printk_ratelimit())
1411			dev_err(dev, "xfer %p#%u: data in error %d\n",
1412				xfer, seg->index, urb->status);
 
1413		if (edc_inc(&wa->nep_edc, EDC_MAX_ERRORS,
1414			    EDC_ERROR_TIMEFRAME)){
1415			dev_err(dev, "DTO: URB max acceptable errors "
1416				"exceeded, resetting device\n");
1417			wa_reset_all(wa);
1418		}
1419		seg->status = WA_SEG_ERROR;
1420		seg->result = urb->status;
1421		xfer->segs_done++;
1422		rpipe_ready = rpipe_avail_inc(rpipe);
1423		__wa_xfer_abort(xfer);
1424		done = __wa_xfer_is_done(xfer);
 
 
 
1425		spin_unlock_irqrestore(&xfer->lock, flags);
1426		if (done)
1427			wa_xfer_completion(xfer);
1428		if (rpipe_ready)
1429			wa_xfer_delayed_run(rpipe);
1430	}
 
 
 
 
 
 
 
 
 
 
 
 
 
1431}
1432
1433/*
1434 * Handle an incoming transfer result buffer
1435 *
1436 * Given a transfer result buffer, it completes the transfer (possibly
1437 * scheduling and buffer in read) and then resubmits the DTI URB for a
1438 * new transfer result read.
1439 *
1440 *
1441 * The xfer_result DTI URB state machine
1442 *
1443 * States: OFF | RXR (Read-Xfer-Result) | RBI (Read-Buffer-In)
1444 *
1445 * We start in OFF mode, the first xfer_result notification [through
1446 * wa_handle_notif_xfer()] moves us to RXR by posting the DTI-URB to
1447 * read.
1448 *
1449 * We receive a buffer -- if it is not a xfer_result, we complain and
1450 * repost the DTI-URB. If it is a xfer_result then do the xfer seg
1451 * request accounting. If it is an IN segment, we move to RBI and post
1452 * a BUF-IN-URB to the right buffer. The BUF-IN-URB callback will
1453 * repost the DTI-URB and move to RXR state. if there was no IN
1454 * segment, it will repost the DTI-URB.
1455 *
1456 * We go back to OFF when we detect a ENOENT or ESHUTDOWN (or too many
1457 * errors) in the URBs.
1458 */
1459static void wa_xfer_result_cb(struct urb *urb)
1460{
1461	int result;
1462	struct wahc *wa = urb->context;
1463	struct device *dev = &wa->usb_iface->dev;
1464	struct wa_xfer_result *xfer_result;
1465	u32 xfer_id;
1466	struct wa_xfer *xfer;
1467	u8 usb_status;
1468
1469	BUG_ON(wa->dti_urb != urb);
1470	switch (wa->dti_urb->status) {
1471	case 0:
1472		/* We have a xfer result buffer; check it */
1473		dev_dbg(dev, "DTI: xfer result %d bytes at %p\n",
1474			urb->actual_length, urb->transfer_buffer);
1475		if (wa->dti_urb->actual_length != sizeof(*xfer_result)) {
1476			dev_err(dev, "DTI Error: xfer result--bad size "
1477				"xfer result (%d bytes vs %zu needed)\n",
1478				urb->actual_length, sizeof(*xfer_result));
1479			break;
1480		}
1481		xfer_result = wa->xfer_result;
1482		if (xfer_result->hdr.bLength != sizeof(*xfer_result)) {
1483			dev_err(dev, "DTI Error: xfer result--"
1484				"bad header length %u\n",
1485				xfer_result->hdr.bLength);
1486			break;
1487		}
1488		if (xfer_result->hdr.bNotifyType != WA_XFER_RESULT) {
1489			dev_err(dev, "DTI Error: xfer result--"
1490				"bad header type 0x%02x\n",
1491				xfer_result->hdr.bNotifyType);
1492			break;
1493		}
1494		usb_status = xfer_result->bTransferStatus & 0x3f;
1495		if (usb_status == WA_XFER_STATUS_ABORTED
1496		    || usb_status == WA_XFER_STATUS_NOT_FOUND)
1497			/* taken care of already */
1498			break;
1499		xfer_id = xfer_result->dwTransferID;
1500		xfer = wa_xfer_get_by_id(wa, xfer_id);
1501		if (xfer == NULL) {
1502			/* FIXME: transaction might have been cancelled */
1503			dev_err(dev, "DTI Error: xfer result--"
1504				"unknown xfer 0x%08x (status 0x%02x)\n",
1505				xfer_id, usb_status);
1506			break;
 
 
 
 
 
 
 
 
 
 
 
 
1507		}
1508		wa_xfer_result_chew(wa, xfer);
1509		wa_xfer_put(xfer);
1510		break;
1511	case -ENOENT:		/* (we killed the URB)...so, no broadcast */
1512	case -ESHUTDOWN:	/* going away! */
1513		dev_dbg(dev, "DTI: going down! %d\n", urb->status);
1514		goto out;
1515	default:
1516		/* Unknown error */
1517		if (edc_inc(&wa->dti_edc, EDC_MAX_ERRORS,
1518			    EDC_ERROR_TIMEFRAME)) {
1519			dev_err(dev, "DTI: URB max acceptable errors "
1520				"exceeded, resetting device\n");
1521			wa_reset_all(wa);
1522			goto out;
1523		}
1524		if (printk_ratelimit())
1525			dev_err(dev, "DTI: URB error %d\n", urb->status);
1526		break;
1527	}
1528	/* Resubmit the DTI URB */
1529	result = usb_submit_urb(wa->dti_urb, GFP_ATOMIC);
1530	if (result < 0) {
1531		dev_err(dev, "DTI Error: Could not submit DTI URB (%d), "
1532			"resetting\n", result);
1533		wa_reset_all(wa);
 
 
 
1534	}
1535out:
1536	return;
1537}
1538
1539/*
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1540 * Transfer complete notification
1541 *
1542 * Called from the notif.c code. We get a notification on EP2 saying
1543 * that some endpoint has some transfer result data available. We are
1544 * about to read it.
1545 *
1546 * To speed up things, we always have a URB reading the DTI URB; we
1547 * don't really set it up and start it until the first xfer complete
1548 * notification arrives, which is what we do here.
1549 *
1550 * Follow up in wa_xfer_result_cb(), as that's where the whole state
1551 * machine starts.
1552 *
1553 * So here we just initialize the DTI URB for reading transfer result
1554 * notifications and also the buffer-in URB, for reading buffers. Then
1555 * we just submit the DTI URB.
1556 *
1557 * @wa shall be referenced
1558 */
1559void wa_handle_notif_xfer(struct wahc *wa, struct wa_notif_hdr *notif_hdr)
1560{
1561	int result;
1562	struct device *dev = &wa->usb_iface->dev;
1563	struct wa_notif_xfer *notif_xfer;
1564	const struct usb_endpoint_descriptor *dti_epd = wa->dti_epd;
1565
1566	notif_xfer = container_of(notif_hdr, struct wa_notif_xfer, hdr);
1567	BUG_ON(notif_hdr->bNotifyType != WA_NOTIF_TRANSFER);
1568
1569	if ((0x80 | notif_xfer->bEndpoint) != dti_epd->bEndpointAddress) {
1570		/* FIXME: hardcoded limitation, adapt */
1571		dev_err(dev, "BUG: DTI ep is %u, not %u (hack me)\n",
1572			notif_xfer->bEndpoint, dti_epd->bEndpointAddress);
1573		goto error;
1574	}
1575	if (wa->dti_urb != NULL)	/* DTI URB already started */
1576		goto out;
1577
1578	wa->dti_urb = usb_alloc_urb(0, GFP_KERNEL);
1579	if (wa->dti_urb == NULL) {
1580		dev_err(dev, "Can't allocate DTI URB\n");
1581		goto error_dti_urb_alloc;
1582	}
1583	usb_fill_bulk_urb(
1584		wa->dti_urb, wa->usb_dev,
1585		usb_rcvbulkpipe(wa->usb_dev, 0x80 | notif_xfer->bEndpoint),
1586		wa->xfer_result, wa->xfer_result_size,
1587		wa_xfer_result_cb, wa);
1588
1589	wa->buf_in_urb = usb_alloc_urb(0, GFP_KERNEL);
1590	if (wa->buf_in_urb == NULL) {
1591		dev_err(dev, "Can't allocate BUF-IN URB\n");
1592		goto error_buf_in_urb_alloc;
1593	}
1594	usb_fill_bulk_urb(
1595		wa->buf_in_urb, wa->usb_dev,
1596		usb_rcvbulkpipe(wa->usb_dev, 0x80 | notif_xfer->bEndpoint),
1597		NULL, 0, wa_buf_in_cb, wa);
1598	result = usb_submit_urb(wa->dti_urb, GFP_KERNEL);
1599	if (result < 0) {
1600		dev_err(dev, "DTI Error: Could not submit DTI URB (%d), "
1601			"resetting\n", result);
1602		goto error_dti_urb_submit;
1603	}
1604out:
1605	return;
1606
1607error_dti_urb_submit:
1608	usb_put_urb(wa->buf_in_urb);
1609error_buf_in_urb_alloc:
1610	usb_put_urb(wa->dti_urb);
1611	wa->dti_urb = NULL;
1612error_dti_urb_alloc:
1613error:
1614	wa_reset_all(wa);
1615}
v4.17
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 * WUSB Wire Adapter
   4 * Data transfer and URB enqueing
   5 *
   6 * Copyright (C) 2005-2006 Intel Corporation
   7 * Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com>
   8 *
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
   9 * How transfers work: get a buffer, break it up in segments (segment
  10 * size is a multiple of the maxpacket size). For each segment issue a
  11 * segment request (struct wa_xfer_*), then send the data buffer if
  12 * out or nothing if in (all over the DTO endpoint).
  13 *
  14 * For each submitted segment request, a notification will come over
  15 * the NEP endpoint and a transfer result (struct xfer_result) will
  16 * arrive in the DTI URB. Read it, get the xfer ID, see if there is
  17 * data coming (inbound transfer), schedule a read and handle it.
  18 *
  19 * Sounds simple, it is a pain to implement.
  20 *
  21 *
  22 * ENTRY POINTS
  23 *
  24 *   FIXME
  25 *
  26 * LIFE CYCLE / STATE DIAGRAM
  27 *
  28 *   FIXME
  29 *
  30 * THIS CODE IS DISGUSTING
  31 *
  32 *   Warned you are; it's my second try and still not happy with it.
  33 *
  34 * NOTES:
  35 *
  36 *   - No iso
  37 *
  38 *   - Supports DMA xfers, control, bulk and maybe interrupt
  39 *
  40 *   - Does not recycle unused rpipes
  41 *
  42 *     An rpipe is assigned to an endpoint the first time it is used,
  43 *     and then it's there, assigned, until the endpoint is disabled
  44 *     (destroyed [{h,d}wahc_op_ep_disable()]. The assignment of the
  45 *     rpipe to the endpoint is done under the wa->rpipe_sem semaphore
  46 *     (should be a mutex).
  47 *
  48 *     Two methods it could be done:
  49 *
  50 *     (a) set up a timer every time an rpipe's use count drops to 1
  51 *         (which means unused) or when a transfer ends. Reset the
  52 *         timer when a xfer is queued. If the timer expires, release
  53 *         the rpipe [see rpipe_ep_disable()].
  54 *
  55 *     (b) when looking for free rpipes to attach [rpipe_get_by_ep()],
  56 *         when none are found go over the list, check their endpoint
  57 *         and their activity record (if no last-xfer-done-ts in the
  58 *         last x seconds) take it
  59 *
  60 *     However, due to the fact that we have a set of limited
  61 *     resources (max-segments-at-the-same-time per xfer,
  62 *     xfers-per-ripe, blocks-per-rpipe, rpipes-per-host), at the end
  63 *     we are going to have to rebuild all this based on an scheduler,
  64 *     to where we have a list of transactions to do and based on the
  65 *     availability of the different required components (blocks,
  66 *     rpipes, segment slots, etc), we go scheduling them. Painful.
  67 */
 
  68#include <linux/spinlock.h>
  69#include <linux/slab.h>
  70#include <linux/hash.h>
  71#include <linux/ratelimit.h>
  72#include <linux/export.h>
  73#include <linux/scatterlist.h>
  74
  75#include "wa-hc.h"
  76#include "wusbhc.h"
  77
  78enum {
  79	/* [WUSB] section 8.3.3 allocates 7 bits for the segment index. */
  80	WA_SEGS_MAX = 128,
  81};
  82
  83enum wa_seg_status {
  84	WA_SEG_NOTREADY,
  85	WA_SEG_READY,
  86	WA_SEG_DELAYED,
  87	WA_SEG_SUBMITTED,
  88	WA_SEG_PENDING,
  89	WA_SEG_DTI_PENDING,
  90	WA_SEG_DONE,
  91	WA_SEG_ERROR,
  92	WA_SEG_ABORTED,
  93};
  94
  95static void wa_xfer_delayed_run(struct wa_rpipe *);
  96static int __wa_xfer_delayed_run(struct wa_rpipe *rpipe, int *dto_waiting);
  97
  98/*
  99 * Life cycle governed by 'struct urb' (the refcount of the struct is
 100 * that of the 'struct urb' and usb_free_urb() would free the whole
 101 * struct).
 102 */
 103struct wa_seg {
 104	struct urb tr_urb;		/* transfer request urb. */
 105	struct urb *isoc_pack_desc_urb;	/* for isoc packet descriptor. */
 106	struct urb *dto_urb;		/* for data output. */
 107	struct list_head list_node;	/* for rpipe->req_list */
 108	struct wa_xfer *xfer;		/* out xfer */
 109	u8 index;			/* which segment we are */
 110	int isoc_frame_count;	/* number of isoc frames in this segment. */
 111	int isoc_frame_offset;	/* starting frame offset in the xfer URB. */
 112	/* Isoc frame that the current transfer buffer corresponds to. */
 113	int isoc_frame_index;
 114	int isoc_size;	/* size of all isoc frames sent by this seg. */
 115	enum wa_seg_status status;
 116	ssize_t result;			/* bytes xfered or error */
 117	struct wa_xfer_hdr xfer_hdr;
 
 118};
 119
 120static inline void wa_seg_init(struct wa_seg *seg)
 121{
 122	usb_init_urb(&seg->tr_urb);
 123
 124	/* set the remaining memory to 0. */
 125	memset(((void *)seg) + sizeof(seg->tr_urb), 0,
 126		sizeof(*seg) - sizeof(seg->tr_urb));
 127}
 128
 129/*
 130 * Protected by xfer->lock
 131 *
 132 */
 133struct wa_xfer {
 134	struct kref refcnt;
 135	struct list_head list_node;
 136	spinlock_t lock;
 137	u32 id;
 138
 139	struct wahc *wa;		/* Wire adapter we are plugged to */
 140	struct usb_host_endpoint *ep;
 141	struct urb *urb;		/* URB we are transferring for */
 142	struct wa_seg **seg;		/* transfer segments */
 143	u8 segs, segs_submitted, segs_done;
 144	unsigned is_inbound:1;
 145	unsigned is_dma:1;
 146	size_t seg_size;
 147	int result;
 148
 149	gfp_t gfp;			/* allocation mask */
 150
 151	struct wusb_dev *wusb_dev;	/* for activity timestamps */
 152};
 153
 154static void __wa_populate_dto_urb_isoc(struct wa_xfer *xfer,
 155	struct wa_seg *seg, int curr_iso_frame);
 156static void wa_complete_remaining_xfer_segs(struct wa_xfer *xfer,
 157		int starting_index, enum wa_seg_status status);
 158
 159static inline void wa_xfer_init(struct wa_xfer *xfer)
 160{
 161	kref_init(&xfer->refcnt);
 162	INIT_LIST_HEAD(&xfer->list_node);
 163	spin_lock_init(&xfer->lock);
 164}
 165
 166/*
 167 * Destroy a transfer structure
 168 *
 169 * Note that freeing xfer->seg[cnt]->tr_urb will free the containing
 170 * xfer->seg[cnt] memory that was allocated by __wa_xfer_setup_segs.
 171 */
 172static void wa_xfer_destroy(struct kref *_xfer)
 173{
 174	struct wa_xfer *xfer = container_of(_xfer, struct wa_xfer, refcnt);
 175	if (xfer->seg) {
 176		unsigned cnt;
 177		for (cnt = 0; cnt < xfer->segs; cnt++) {
 178			struct wa_seg *seg = xfer->seg[cnt];
 179			if (seg) {
 180				usb_free_urb(seg->isoc_pack_desc_urb);
 181				if (seg->dto_urb) {
 182					kfree(seg->dto_urb->sg);
 183					usb_free_urb(seg->dto_urb);
 184				}
 185				usb_free_urb(&seg->tr_urb);
 186			}
 187		}
 188		kfree(xfer->seg);
 189	}
 190	kfree(xfer);
 191}
 192
 193static void wa_xfer_get(struct wa_xfer *xfer)
 194{
 195	kref_get(&xfer->refcnt);
 196}
 197
 198static void wa_xfer_put(struct wa_xfer *xfer)
 199{
 200	kref_put(&xfer->refcnt, wa_xfer_destroy);
 201}
 202
 203/*
 204 * Try to get exclusive access to the DTO endpoint resource.  Return true
 205 * if successful.
 206 */
 207static inline int __wa_dto_try_get(struct wahc *wa)
 208{
 209	return (test_and_set_bit(0, &wa->dto_in_use) == 0);
 210}
 211
 212/* Release the DTO endpoint resource. */
 213static inline void __wa_dto_put(struct wahc *wa)
 214{
 215	clear_bit_unlock(0, &wa->dto_in_use);
 216}
 217
 218/* Service RPIPEs that are waiting on the DTO resource. */
 219static void wa_check_for_delayed_rpipes(struct wahc *wa)
 220{
 221	unsigned long flags;
 222	int dto_waiting = 0;
 223	struct wa_rpipe *rpipe;
 224
 225	spin_lock_irqsave(&wa->rpipe_lock, flags);
 226	while (!list_empty(&wa->rpipe_delayed_list) && !dto_waiting) {
 227		rpipe = list_first_entry(&wa->rpipe_delayed_list,
 228				struct wa_rpipe, list_node);
 229		__wa_xfer_delayed_run(rpipe, &dto_waiting);
 230		/* remove this RPIPE from the list if it is not waiting. */
 231		if (!dto_waiting) {
 232			pr_debug("%s: RPIPE %d serviced and removed from delayed list.\n",
 233				__func__,
 234				le16_to_cpu(rpipe->descr.wRPipeIndex));
 235			list_del_init(&rpipe->list_node);
 236		}
 237	}
 238	spin_unlock_irqrestore(&wa->rpipe_lock, flags);
 239}
 240
 241/* add this RPIPE to the end of the delayed RPIPE list. */
 242static void wa_add_delayed_rpipe(struct wahc *wa, struct wa_rpipe *rpipe)
 243{
 244	unsigned long flags;
 245
 246	spin_lock_irqsave(&wa->rpipe_lock, flags);
 247	/* add rpipe to the list if it is not already on it. */
 248	if (list_empty(&rpipe->list_node)) {
 249		pr_debug("%s: adding RPIPE %d to the delayed list.\n",
 250			__func__, le16_to_cpu(rpipe->descr.wRPipeIndex));
 251		list_add_tail(&rpipe->list_node, &wa->rpipe_delayed_list);
 252	}
 253	spin_unlock_irqrestore(&wa->rpipe_lock, flags);
 254}
 255
 256/*
 257 * xfer is referenced
 258 *
 259 * xfer->lock has to be unlocked
 260 *
 261 * We take xfer->lock for setting the result; this is a barrier
 262 * against drivers/usb/core/hcd.c:unlink1() being called after we call
 263 * usb_hcd_giveback_urb() and wa_urb_dequeue() trying to get a
 264 * reference to the transfer.
 265 */
 266static void wa_xfer_giveback(struct wa_xfer *xfer)
 267{
 268	unsigned long flags;
 269
 270	spin_lock_irqsave(&xfer->wa->xfer_list_lock, flags);
 271	list_del_init(&xfer->list_node);
 272	usb_hcd_unlink_urb_from_ep(&(xfer->wa->wusb->usb_hcd), xfer->urb);
 273	spin_unlock_irqrestore(&xfer->wa->xfer_list_lock, flags);
 274	/* FIXME: segmentation broken -- kills DWA */
 275	wusbhc_giveback_urb(xfer->wa->wusb, xfer->urb, xfer->result);
 276	wa_put(xfer->wa);
 277	wa_xfer_put(xfer);
 278}
 279
 280/*
 281 * xfer is referenced
 282 *
 283 * xfer->lock has to be unlocked
 284 */
 285static void wa_xfer_completion(struct wa_xfer *xfer)
 286{
 287	if (xfer->wusb_dev)
 288		wusb_dev_put(xfer->wusb_dev);
 289	rpipe_put(xfer->ep->hcpriv);
 290	wa_xfer_giveback(xfer);
 291}
 292
 293/*
 294 * Initialize a transfer's ID
 295 *
 296 * We need to use a sequential number; if we use the pointer or the
 297 * hash of the pointer, it can repeat over sequential transfers and
 298 * then it will confuse the HWA....wonder why in hell they put a 32
 299 * bit handle in there then.
 300 */
 301static void wa_xfer_id_init(struct wa_xfer *xfer)
 302{
 303	xfer->id = atomic_add_return(1, &xfer->wa->xfer_id_count);
 304}
 305
 306/* Return the xfer's ID. */
 307static inline u32 wa_xfer_id(struct wa_xfer *xfer)
 308{
 309	return xfer->id;
 310}
 311
 312/* Return the xfer's ID in transport format (little endian). */
 313static inline __le32 wa_xfer_id_le32(struct wa_xfer *xfer)
 314{
 315	return cpu_to_le32(xfer->id);
 316}
 317
 318/*
 319 * If transfer is done, wrap it up and return true
 320 *
 321 * xfer->lock has to be locked
 322 */
 323static unsigned __wa_xfer_is_done(struct wa_xfer *xfer)
 324{
 325	struct device *dev = &xfer->wa->usb_iface->dev;
 326	unsigned result, cnt;
 327	struct wa_seg *seg;
 328	struct urb *urb = xfer->urb;
 329	unsigned found_short = 0;
 330
 331	result = xfer->segs_done == xfer->segs_submitted;
 332	if (result == 0)
 333		goto out;
 334	urb->actual_length = 0;
 335	for (cnt = 0; cnt < xfer->segs; cnt++) {
 336		seg = xfer->seg[cnt];
 337		switch (seg->status) {
 338		case WA_SEG_DONE:
 339			if (found_short && seg->result > 0) {
 340				dev_dbg(dev, "xfer %p ID %08X#%u: bad short segments (%zu)\n",
 341					xfer, wa_xfer_id(xfer), cnt,
 342					seg->result);
 343				urb->status = -EINVAL;
 344				goto out;
 345			}
 346			urb->actual_length += seg->result;
 347			if (!(usb_pipeisoc(xfer->urb->pipe))
 348				&& seg->result < xfer->seg_size
 349			    && cnt != xfer->segs-1)
 350				found_short = 1;
 351			dev_dbg(dev, "xfer %p ID %08X#%u: DONE short %d "
 352				"result %zu urb->actual_length %d\n",
 353				xfer, wa_xfer_id(xfer), seg->index, found_short,
 354				seg->result, urb->actual_length);
 355			break;
 356		case WA_SEG_ERROR:
 357			xfer->result = seg->result;
 358			dev_dbg(dev, "xfer %p ID %08X#%u: ERROR result %zi(0x%08zX)\n",
 359				xfer, wa_xfer_id(xfer), seg->index, seg->result,
 360				seg->result);
 361			goto out;
 362		case WA_SEG_ABORTED:
 363			xfer->result = seg->result;
 364			dev_dbg(dev, "xfer %p ID %08X#%u: ABORTED result %zi(0x%08zX)\n",
 365				xfer, wa_xfer_id(xfer), seg->index, seg->result,
 366				seg->result);
 367			goto out;
 368		default:
 369			dev_warn(dev, "xfer %p ID %08X#%u: is_done bad state %d\n",
 370				 xfer, wa_xfer_id(xfer), cnt, seg->status);
 371			xfer->result = -EINVAL;
 372			goto out;
 373		}
 374	}
 375	xfer->result = 0;
 376out:
 377	return result;
 378}
 379
 380/*
 381 * Mark the given segment as done.  Return true if this completes the xfer.
 382 * This should only be called for segs that have been submitted to an RPIPE.
 383 * Delayed segs are not marked as submitted so they do not need to be marked
 384 * as done when cleaning up.
 385 *
 386 * xfer->lock has to be locked
 
 
 
 387 */
 388static unsigned __wa_xfer_mark_seg_as_done(struct wa_xfer *xfer,
 389	struct wa_seg *seg, enum wa_seg_status status)
 390{
 391	seg->status = status;
 392	xfer->segs_done++;
 393
 394	/* check for done. */
 395	return __wa_xfer_is_done(xfer);
 
 
 
 
 
 
 396}
 397
 398/*
 399 * Search for a transfer list ID on the HCD's URB list
 400 *
 401 * For 32 bit architectures, we use the pointer itself; for 64 bits, a
 402 * 32-bit hash of the pointer.
 403 *
 404 * @returns NULL if not found.
 405 */
 406static struct wa_xfer *wa_xfer_get_by_id(struct wahc *wa, u32 id)
 407{
 408	unsigned long flags;
 409	struct wa_xfer *xfer_itr;
 410	spin_lock_irqsave(&wa->xfer_list_lock, flags);
 411	list_for_each_entry(xfer_itr, &wa->xfer_list, list_node) {
 412		if (id == xfer_itr->id) {
 413			wa_xfer_get(xfer_itr);
 414			goto out;
 415		}
 416	}
 417	xfer_itr = NULL;
 418out:
 419	spin_unlock_irqrestore(&wa->xfer_list_lock, flags);
 420	return xfer_itr;
 421}
 422
 423struct wa_xfer_abort_buffer {
 424	struct urb urb;
 425	struct wahc *wa;
 426	struct wa_xfer_abort cmd;
 427};
 428
 429static void __wa_xfer_abort_cb(struct urb *urb)
 430{
 431	struct wa_xfer_abort_buffer *b = urb->context;
 432	struct wahc *wa = b->wa;
 433
 434	/*
 435	 * If the abort request URB failed, then the HWA did not get the abort
 436	 * command.  Forcibly clean up the xfer without waiting for a Transfer
 437	 * Result from the HWA.
 438	 */
 439	if (urb->status < 0) {
 440		struct wa_xfer *xfer;
 441		struct device *dev = &wa->usb_iface->dev;
 442
 443		xfer = wa_xfer_get_by_id(wa, le32_to_cpu(b->cmd.dwTransferID));
 444		dev_err(dev, "%s: Transfer Abort request failed. result: %d\n",
 445			__func__, urb->status);
 446		if (xfer) {
 447			unsigned long flags;
 448			int done, seg_index = 0;
 449			struct wa_rpipe *rpipe = xfer->ep->hcpriv;
 450
 451			dev_err(dev, "%s: cleaning up xfer %p ID 0x%08X.\n",
 452				__func__, xfer, wa_xfer_id(xfer));
 453			spin_lock_irqsave(&xfer->lock, flags);
 454			/* skip done segs. */
 455			while (seg_index < xfer->segs) {
 456				struct wa_seg *seg = xfer->seg[seg_index];
 457
 458				if ((seg->status == WA_SEG_DONE) ||
 459					(seg->status == WA_SEG_ERROR)) {
 460					++seg_index;
 461				} else {
 462					break;
 463				}
 464			}
 465			/* mark remaining segs as aborted. */
 466			wa_complete_remaining_xfer_segs(xfer, seg_index,
 467				WA_SEG_ABORTED);
 468			done = __wa_xfer_is_done(xfer);
 469			spin_unlock_irqrestore(&xfer->lock, flags);
 470			if (done)
 471				wa_xfer_completion(xfer);
 472			wa_xfer_delayed_run(rpipe);
 473			wa_xfer_put(xfer);
 474		} else {
 475			dev_err(dev, "%s: xfer ID 0x%08X already gone.\n",
 476				 __func__, le32_to_cpu(b->cmd.dwTransferID));
 477		}
 478	}
 479
 480	wa_put(wa);	/* taken in __wa_xfer_abort */
 481	usb_put_urb(&b->urb);
 482}
 483
 484/*
 485 * Aborts an ongoing transaction
 486 *
 487 * Assumes the transfer is referenced and locked and in a submitted
 488 * state (mainly that there is an endpoint/rpipe assigned).
 489 *
 490 * The callback (see above) does nothing but freeing up the data by
 491 * putting the URB. Because the URB is allocated at the head of the
 492 * struct, the whole space we allocated is kfreed. *
 
 
 
 
 493 */
 494static int __wa_xfer_abort(struct wa_xfer *xfer)
 495{
 496	int result = -ENOMEM;
 497	struct device *dev = &xfer->wa->usb_iface->dev;
 498	struct wa_xfer_abort_buffer *b;
 499	struct wa_rpipe *rpipe = xfer->ep->hcpriv;
 500
 501	b = kmalloc(sizeof(*b), GFP_ATOMIC);
 502	if (b == NULL)
 503		goto error_kmalloc;
 504	b->cmd.bLength =  sizeof(b->cmd);
 505	b->cmd.bRequestType = WA_XFER_ABORT;
 506	b->cmd.wRPipe = rpipe->descr.wRPipeIndex;
 507	b->cmd.dwTransferID = wa_xfer_id_le32(xfer);
 508	b->wa = wa_get(xfer->wa);
 509
 510	usb_init_urb(&b->urb);
 511	usb_fill_bulk_urb(&b->urb, xfer->wa->usb_dev,
 512		usb_sndbulkpipe(xfer->wa->usb_dev,
 513				xfer->wa->dto_epd->bEndpointAddress),
 514		&b->cmd, sizeof(b->cmd), __wa_xfer_abort_cb, b);
 515	result = usb_submit_urb(&b->urb, GFP_ATOMIC);
 516	if (result < 0)
 517		goto error_submit;
 518	return result;				/* callback frees! */
 519
 520
 521error_submit:
 522	wa_put(xfer->wa);
 523	if (printk_ratelimit())
 524		dev_err(dev, "xfer %p: Can't submit abort request: %d\n",
 525			xfer, result);
 526	kfree(b);
 527error_kmalloc:
 528	return result;
 529
 530}
 531
 532/*
 533 * Calculate the number of isoc frames starting from isoc_frame_offset
 534 * that will fit a in transfer segment.
 535 */
 536static int __wa_seg_calculate_isoc_frame_count(struct wa_xfer *xfer,
 537	int isoc_frame_offset, int *total_size)
 538{
 539	int segment_size = 0, frame_count = 0;
 540	int index = isoc_frame_offset;
 541	struct usb_iso_packet_descriptor *iso_frame_desc =
 542		xfer->urb->iso_frame_desc;
 543
 544	while ((index < xfer->urb->number_of_packets)
 545		&& ((segment_size + iso_frame_desc[index].length)
 546				<= xfer->seg_size)) {
 547		/*
 548		 * For Alereon HWA devices, only include an isoc frame in an
 549		 * out segment if it is physically contiguous with the previous
 550		 * frame.  This is required because those devices expect
 551		 * the isoc frames to be sent as a single USB transaction as
 552		 * opposed to one transaction per frame with standard HWA.
 553		 */
 554		if ((xfer->wa->quirks & WUSB_QUIRK_ALEREON_HWA_CONCAT_ISOC)
 555			&& (xfer->is_inbound == 0)
 556			&& (index > isoc_frame_offset)
 557			&& ((iso_frame_desc[index - 1].offset +
 558				iso_frame_desc[index - 1].length) !=
 559				iso_frame_desc[index].offset))
 560			break;
 561
 562		/* this frame fits. count it. */
 563		++frame_count;
 564		segment_size += iso_frame_desc[index].length;
 565
 566		/* move to the next isoc frame. */
 567		++index;
 568	}
 569
 570	*total_size = segment_size;
 571	return frame_count;
 572}
 573
 574/*
 575 *
 576 * @returns < 0 on error, transfer segment request size if ok
 577 */
 578static ssize_t __wa_xfer_setup_sizes(struct wa_xfer *xfer,
 579				     enum wa_xfer_type *pxfer_type)
 580{
 581	ssize_t result;
 582	struct device *dev = &xfer->wa->usb_iface->dev;
 583	size_t maxpktsize;
 584	struct urb *urb = xfer->urb;
 585	struct wa_rpipe *rpipe = xfer->ep->hcpriv;
 586
 587	switch (rpipe->descr.bmAttribute & 0x3) {
 588	case USB_ENDPOINT_XFER_CONTROL:
 589		*pxfer_type = WA_XFER_TYPE_CTL;
 590		result = sizeof(struct wa_xfer_ctl);
 591		break;
 592	case USB_ENDPOINT_XFER_INT:
 593	case USB_ENDPOINT_XFER_BULK:
 594		*pxfer_type = WA_XFER_TYPE_BI;
 595		result = sizeof(struct wa_xfer_bi);
 596		break;
 597	case USB_ENDPOINT_XFER_ISOC:
 598		*pxfer_type = WA_XFER_TYPE_ISO;
 599		result = sizeof(struct wa_xfer_hwaiso);
 600		break;
 601	default:
 602		/* never happens */
 603		BUG();
 604		result = -EINVAL;	/* shut gcc up */
 605	}
 606	xfer->is_inbound = urb->pipe & USB_DIR_IN ? 1 : 0;
 607	xfer->is_dma = urb->transfer_flags & URB_NO_TRANSFER_DMA_MAP ? 1 : 0;
 608
 609	maxpktsize = le16_to_cpu(rpipe->descr.wMaxPacketSize);
 610	xfer->seg_size = le16_to_cpu(rpipe->descr.wBlocks)
 611		* 1 << (xfer->wa->wa_descr->bRPipeBlockSize - 1);
 612	/* Compute the segment size and make sure it is a multiple of
 613	 * the maxpktsize (WUSB1.0[8.3.3.1])...not really too much of
 614	 * a check (FIXME) */
 
 615	if (xfer->seg_size < maxpktsize) {
 616		dev_err(dev,
 617			"HW BUG? seg_size %zu smaller than maxpktsize %zu\n",
 618			xfer->seg_size, maxpktsize);
 619		result = -EINVAL;
 620		goto error;
 621	}
 622	xfer->seg_size = (xfer->seg_size / maxpktsize) * maxpktsize;
 623	if ((rpipe->descr.bmAttribute & 0x3) == USB_ENDPOINT_XFER_ISOC) {
 624		int index = 0;
 625
 626		xfer->segs = 0;
 627		/*
 628		 * loop over urb->number_of_packets to determine how many
 629		 * xfer segments will be needed to send the isoc frames.
 630		 */
 631		while (index < urb->number_of_packets) {
 632			int seg_size; /* don't care. */
 633			index += __wa_seg_calculate_isoc_frame_count(xfer,
 634					index, &seg_size);
 635			++xfer->segs;
 636		}
 637	} else {
 638		xfer->segs = DIV_ROUND_UP(urb->transfer_buffer_length,
 639						xfer->seg_size);
 640		if (xfer->segs == 0 && *pxfer_type == WA_XFER_TYPE_CTL)
 641			xfer->segs = 1;
 642	}
 643
 644	if (xfer->segs > WA_SEGS_MAX) {
 645		dev_err(dev, "BUG? oops, number of segments %zu bigger than %d\n",
 646			(urb->transfer_buffer_length/xfer->seg_size),
 647			WA_SEGS_MAX);
 648		result = -EINVAL;
 649		goto error;
 650	}
 
 
 651error:
 652	return result;
 653}
 654
 655static void __wa_setup_isoc_packet_descr(
 656		struct wa_xfer_packet_info_hwaiso *packet_desc,
 657		struct wa_xfer *xfer,
 658		struct wa_seg *seg) {
 659	struct usb_iso_packet_descriptor *iso_frame_desc =
 660		xfer->urb->iso_frame_desc;
 661	int frame_index;
 662
 663	/* populate isoc packet descriptor. */
 664	packet_desc->bPacketType = WA_XFER_ISO_PACKET_INFO;
 665	packet_desc->wLength = cpu_to_le16(sizeof(*packet_desc) +
 666		(sizeof(packet_desc->PacketLength[0]) *
 667			seg->isoc_frame_count));
 668	for (frame_index = 0; frame_index < seg->isoc_frame_count;
 669		++frame_index) {
 670		int offset_index = frame_index + seg->isoc_frame_offset;
 671		packet_desc->PacketLength[frame_index] =
 672			cpu_to_le16(iso_frame_desc[offset_index].length);
 673	}
 674}
 675
 676
 677/* Fill in the common request header and xfer-type specific data. */
 678static void __wa_xfer_setup_hdr0(struct wa_xfer *xfer,
 679				 struct wa_xfer_hdr *xfer_hdr0,
 680				 enum wa_xfer_type xfer_type,
 681				 size_t xfer_hdr_size)
 682{
 683	struct wa_rpipe *rpipe = xfer->ep->hcpriv;
 684	struct wa_seg *seg = xfer->seg[0];
 685
 686	xfer_hdr0 = &seg->xfer_hdr;
 687	xfer_hdr0->bLength = xfer_hdr_size;
 688	xfer_hdr0->bRequestType = xfer_type;
 689	xfer_hdr0->wRPipe = rpipe->descr.wRPipeIndex;
 690	xfer_hdr0->dwTransferID = wa_xfer_id_le32(xfer);
 691	xfer_hdr0->bTransferSegment = 0;
 692	switch (xfer_type) {
 693	case WA_XFER_TYPE_CTL: {
 694		struct wa_xfer_ctl *xfer_ctl =
 695			container_of(xfer_hdr0, struct wa_xfer_ctl, hdr);
 696		xfer_ctl->bmAttribute = xfer->is_inbound ? 1 : 0;
 697		memcpy(&xfer_ctl->baSetupData, xfer->urb->setup_packet,
 698		       sizeof(xfer_ctl->baSetupData));
 699		break;
 700	}
 701	case WA_XFER_TYPE_BI:
 702		break;
 703	case WA_XFER_TYPE_ISO: {
 704		struct wa_xfer_hwaiso *xfer_iso =
 705			container_of(xfer_hdr0, struct wa_xfer_hwaiso, hdr);
 706		struct wa_xfer_packet_info_hwaiso *packet_desc =
 707			((void *)xfer_iso) + xfer_hdr_size;
 708
 709		/* populate the isoc section of the transfer request. */
 710		xfer_iso->dwNumOfPackets = cpu_to_le32(seg->isoc_frame_count);
 711		/* populate isoc packet descriptor. */
 712		__wa_setup_isoc_packet_descr(packet_desc, xfer, seg);
 713		break;
 714	}
 715	default:
 716		BUG();
 717	};
 718}
 719
 720/*
 721 * Callback for the OUT data phase of the segment request
 722 *
 723 * Check wa_seg_tr_cb(); most comments also apply here because this
 724 * function does almost the same thing and they work closely
 725 * together.
 726 *
 727 * If the seg request has failed but this DTO phase has succeeded,
 728 * wa_seg_tr_cb() has already failed the segment and moved the
 729 * status to WA_SEG_ERROR, so this will go through 'case 0' and
 730 * effectively do nothing.
 731 */
 732static void wa_seg_dto_cb(struct urb *urb)
 733{
 734	struct wa_seg *seg = urb->context;
 735	struct wa_xfer *xfer = seg->xfer;
 736	struct wahc *wa;
 737	struct device *dev;
 738	struct wa_rpipe *rpipe;
 739	unsigned long flags;
 740	unsigned rpipe_ready = 0;
 741	int data_send_done = 1, release_dto = 0, holding_dto = 0;
 742	u8 done = 0;
 743	int result;
 744
 745	/* free the sg if it was used. */
 746	kfree(urb->sg);
 747	urb->sg = NULL;
 748
 749	spin_lock_irqsave(&xfer->lock, flags);
 750	wa = xfer->wa;
 751	dev = &wa->usb_iface->dev;
 752	if (usb_pipeisoc(xfer->urb->pipe)) {
 753		/* Alereon HWA sends all isoc frames in a single transfer. */
 754		if (wa->quirks & WUSB_QUIRK_ALEREON_HWA_CONCAT_ISOC)
 755			seg->isoc_frame_index += seg->isoc_frame_count;
 756		else
 757			seg->isoc_frame_index += 1;
 758		if (seg->isoc_frame_index < seg->isoc_frame_count) {
 759			data_send_done = 0;
 760			holding_dto = 1; /* checked in error cases. */
 761			/*
 762			 * if this is the last isoc frame of the segment, we
 763			 * can release DTO after sending this frame.
 764			 */
 765			if ((seg->isoc_frame_index + 1) >=
 766				seg->isoc_frame_count)
 767				release_dto = 1;
 768		}
 769		dev_dbg(dev, "xfer 0x%08X#%u: isoc frame = %d, holding_dto = %d, release_dto = %d.\n",
 770			wa_xfer_id(xfer), seg->index, seg->isoc_frame_index,
 771			holding_dto, release_dto);
 772	}
 773	spin_unlock_irqrestore(&xfer->lock, flags);
 774
 775	switch (urb->status) {
 776	case 0:
 777		spin_lock_irqsave(&xfer->lock, flags);
 778		seg->result += urb->actual_length;
 779		if (data_send_done) {
 780			dev_dbg(dev, "xfer 0x%08X#%u: data out done (%zu bytes)\n",
 781				wa_xfer_id(xfer), seg->index, seg->result);
 782			if (seg->status < WA_SEG_PENDING)
 783				seg->status = WA_SEG_PENDING;
 784		} else {
 785			/* should only hit this for isoc xfers. */
 786			/*
 787			 * Populate the dto URB with the next isoc frame buffer,
 788			 * send the URB and release DTO if we no longer need it.
 789			 */
 790			 __wa_populate_dto_urb_isoc(xfer, seg,
 791				seg->isoc_frame_offset + seg->isoc_frame_index);
 792
 793			/* resubmit the URB with the next isoc frame. */
 794			/* take a ref on resubmit. */
 795			wa_xfer_get(xfer);
 796			result = usb_submit_urb(seg->dto_urb, GFP_ATOMIC);
 797			if (result < 0) {
 798				dev_err(dev, "xfer 0x%08X#%u: DTO submit failed: %d\n",
 799				       wa_xfer_id(xfer), seg->index, result);
 800				spin_unlock_irqrestore(&xfer->lock, flags);
 801				goto error_dto_submit;
 802			}
 803		}
 804		spin_unlock_irqrestore(&xfer->lock, flags);
 805		if (release_dto) {
 806			__wa_dto_put(wa);
 807			wa_check_for_delayed_rpipes(wa);
 808		}
 809		break;
 810	case -ECONNRESET:	/* URB unlinked; no need to do anything */
 811	case -ENOENT:		/* as it was done by the who unlinked us */
 812		if (holding_dto) {
 813			__wa_dto_put(wa);
 814			wa_check_for_delayed_rpipes(wa);
 815		}
 816		break;
 817	default:		/* Other errors ... */
 818		dev_err(dev, "xfer 0x%08X#%u: data out error %d\n",
 819			wa_xfer_id(xfer), seg->index, urb->status);
 820		goto error_default;
 821	}
 822
 823	/* taken when this URB was submitted. */
 824	wa_xfer_put(xfer);
 825	return;
 826
 827error_dto_submit:
 828	/* taken on resubmit attempt. */
 829	wa_xfer_put(xfer);
 830error_default:
 831	spin_lock_irqsave(&xfer->lock, flags);
 832	rpipe = xfer->ep->hcpriv;
 833	if (edc_inc(&wa->nep_edc, EDC_MAX_ERRORS,
 834		    EDC_ERROR_TIMEFRAME)){
 835		dev_err(dev, "DTO: URB max acceptable errors exceeded, resetting device\n");
 836		wa_reset_all(wa);
 837	}
 838	if (seg->status != WA_SEG_ERROR) {
 839		seg->result = urb->status;
 840		__wa_xfer_abort(xfer);
 841		rpipe_ready = rpipe_avail_inc(rpipe);
 842		done = __wa_xfer_mark_seg_as_done(xfer, seg, WA_SEG_ERROR);
 843	}
 844	spin_unlock_irqrestore(&xfer->lock, flags);
 845	if (holding_dto) {
 846		__wa_dto_put(wa);
 847		wa_check_for_delayed_rpipes(wa);
 848	}
 849	if (done)
 850		wa_xfer_completion(xfer);
 851	if (rpipe_ready)
 852		wa_xfer_delayed_run(rpipe);
 853	/* taken when this URB was submitted. */
 854	wa_xfer_put(xfer);
 855}
 856
 857/*
 858 * Callback for the isoc packet descriptor phase of the segment request
 859 *
 860 * Check wa_seg_tr_cb(); most comments also apply here because this
 861 * function does almost the same thing and they work closely
 862 * together.
 863 *
 864 * If the seg request has failed but this phase has succeeded,
 865 * wa_seg_tr_cb() has already failed the segment and moved the
 866 * status to WA_SEG_ERROR, so this will go through 'case 0' and
 867 * effectively do nothing.
 868 */
 869static void wa_seg_iso_pack_desc_cb(struct urb *urb)
 870{
 871	struct wa_seg *seg = urb->context;
 872	struct wa_xfer *xfer = seg->xfer;
 873	struct wahc *wa;
 874	struct device *dev;
 875	struct wa_rpipe *rpipe;
 876	unsigned long flags;
 877	unsigned rpipe_ready = 0;
 878	u8 done = 0;
 879
 880	switch (urb->status) {
 881	case 0:
 882		spin_lock_irqsave(&xfer->lock, flags);
 883		wa = xfer->wa;
 884		dev = &wa->usb_iface->dev;
 885		dev_dbg(dev, "iso xfer %08X#%u: packet descriptor done\n",
 886			wa_xfer_id(xfer), seg->index);
 887		if (xfer->is_inbound && seg->status < WA_SEG_PENDING)
 888			seg->status = WA_SEG_PENDING;
 
 889		spin_unlock_irqrestore(&xfer->lock, flags);
 890		break;
 891	case -ECONNRESET:	/* URB unlinked; no need to do anything */
 892	case -ENOENT:		/* as it was done by the who unlinked us */
 893		break;
 894	default:		/* Other errors ... */
 895		spin_lock_irqsave(&xfer->lock, flags);
 896		wa = xfer->wa;
 897		dev = &wa->usb_iface->dev;
 898		rpipe = xfer->ep->hcpriv;
 899		pr_err_ratelimited("iso xfer %08X#%u: packet descriptor error %d\n",
 900				wa_xfer_id(xfer), seg->index, urb->status);
 901		if (edc_inc(&wa->nep_edc, EDC_MAX_ERRORS,
 902			    EDC_ERROR_TIMEFRAME)){
 903			dev_err(dev, "iso xfer: URB max acceptable errors exceeded, resetting device\n");
 
 904			wa_reset_all(wa);
 905		}
 906		if (seg->status != WA_SEG_ERROR) {
 907			usb_unlink_urb(seg->dto_urb);
 908			seg->result = urb->status;
 
 909			__wa_xfer_abort(xfer);
 910			rpipe_ready = rpipe_avail_inc(rpipe);
 911			done = __wa_xfer_mark_seg_as_done(xfer, seg,
 912					WA_SEG_ERROR);
 913		}
 914		spin_unlock_irqrestore(&xfer->lock, flags);
 915		if (done)
 916			wa_xfer_completion(xfer);
 917		if (rpipe_ready)
 918			wa_xfer_delayed_run(rpipe);
 919	}
 920	/* taken when this URB was submitted. */
 921	wa_xfer_put(xfer);
 922}
 923
 924/*
 925 * Callback for the segment request
 926 *
 927 * If successful transition state (unless already transitioned or
 928 * outbound transfer); otherwise, take a note of the error, mark this
 929 * segment done and try completion.
 930 *
 931 * Note we don't access until we are sure that the transfer hasn't
 932 * been cancelled (ECONNRESET, ENOENT), which could mean that
 933 * seg->xfer could be already gone.
 934 *
 935 * We have to check before setting the status to WA_SEG_PENDING
 936 * because sometimes the xfer result callback arrives before this
 937 * callback (geeeeeeze), so it might happen that we are already in
 938 * another state. As well, we don't set it if the transfer is not inbound,
 939 * as in that case, wa_seg_dto_cb will do it when the OUT data phase
 940 * finishes.
 941 */
 942static void wa_seg_tr_cb(struct urb *urb)
 943{
 944	struct wa_seg *seg = urb->context;
 945	struct wa_xfer *xfer = seg->xfer;
 946	struct wahc *wa;
 947	struct device *dev;
 948	struct wa_rpipe *rpipe;
 949	unsigned long flags;
 950	unsigned rpipe_ready;
 951	u8 done = 0;
 952
 953	switch (urb->status) {
 954	case 0:
 955		spin_lock_irqsave(&xfer->lock, flags);
 956		wa = xfer->wa;
 957		dev = &wa->usb_iface->dev;
 958		dev_dbg(dev, "xfer %p ID 0x%08X#%u: request done\n",
 959			xfer, wa_xfer_id(xfer), seg->index);
 960		if (xfer->is_inbound &&
 961			seg->status < WA_SEG_PENDING &&
 962			!(usb_pipeisoc(xfer->urb->pipe)))
 963			seg->status = WA_SEG_PENDING;
 964		spin_unlock_irqrestore(&xfer->lock, flags);
 965		break;
 966	case -ECONNRESET:	/* URB unlinked; no need to do anything */
 967	case -ENOENT:		/* as it was done by the who unlinked us */
 968		break;
 969	default:		/* Other errors ... */
 970		spin_lock_irqsave(&xfer->lock, flags);
 971		wa = xfer->wa;
 972		dev = &wa->usb_iface->dev;
 973		rpipe = xfer->ep->hcpriv;
 974		if (printk_ratelimit())
 975			dev_err(dev, "xfer %p ID 0x%08X#%u: request error %d\n",
 976				xfer, wa_xfer_id(xfer), seg->index,
 977				urb->status);
 978		if (edc_inc(&wa->nep_edc, EDC_MAX_ERRORS,
 979			    EDC_ERROR_TIMEFRAME)){
 980			dev_err(dev, "DTO: URB max acceptable errors "
 981				"exceeded, resetting device\n");
 982			wa_reset_all(wa);
 983		}
 984		usb_unlink_urb(seg->isoc_pack_desc_urb);
 985		usb_unlink_urb(seg->dto_urb);
 
 986		seg->result = urb->status;
 
 987		__wa_xfer_abort(xfer);
 988		rpipe_ready = rpipe_avail_inc(rpipe);
 989		done = __wa_xfer_mark_seg_as_done(xfer, seg, WA_SEG_ERROR);
 990		spin_unlock_irqrestore(&xfer->lock, flags);
 991		if (done)
 992			wa_xfer_completion(xfer);
 993		if (rpipe_ready)
 994			wa_xfer_delayed_run(rpipe);
 995	}
 996	/* taken when this URB was submitted. */
 997	wa_xfer_put(xfer);
 998}
 999
1000/*
1001 * Allocate an SG list to store bytes_to_transfer bytes and copy the
1002 * subset of the in_sg that matches the buffer subset
1003 * we are about to transfer.
1004 */
1005static struct scatterlist *wa_xfer_create_subset_sg(struct scatterlist *in_sg,
1006	const unsigned int bytes_transferred,
1007	const unsigned int bytes_to_transfer, int *out_num_sgs)
1008{
1009	struct scatterlist *out_sg;
1010	unsigned int bytes_processed = 0, offset_into_current_page_data = 0,
1011		nents;
1012	struct scatterlist *current_xfer_sg = in_sg;
1013	struct scatterlist *current_seg_sg, *last_seg_sg;
1014
1015	/* skip previously transferred pages. */
1016	while ((current_xfer_sg) &&
1017			(bytes_processed < bytes_transferred)) {
1018		bytes_processed += current_xfer_sg->length;
1019
1020		/* advance the sg if current segment starts on or past the
1021			next page. */
1022		if (bytes_processed <= bytes_transferred)
1023			current_xfer_sg = sg_next(current_xfer_sg);
1024	}
1025
1026	/* the data for the current segment starts in current_xfer_sg.
1027		calculate the offset. */
1028	if (bytes_processed > bytes_transferred) {
1029		offset_into_current_page_data = current_xfer_sg->length -
1030			(bytes_processed - bytes_transferred);
1031	}
1032
1033	/* calculate the number of pages needed by this segment. */
1034	nents = DIV_ROUND_UP((bytes_to_transfer +
1035		offset_into_current_page_data +
1036		current_xfer_sg->offset),
1037		PAGE_SIZE);
1038
1039	out_sg = kmalloc((sizeof(struct scatterlist) * nents), GFP_ATOMIC);
1040	if (out_sg) {
1041		sg_init_table(out_sg, nents);
1042
1043		/* copy the portion of the incoming SG that correlates to the
1044		 * data to be transferred by this segment to the segment SG. */
1045		last_seg_sg = current_seg_sg = out_sg;
1046		bytes_processed = 0;
1047
1048		/* reset nents and calculate the actual number of sg entries
1049			needed. */
1050		nents = 0;
1051		while ((bytes_processed < bytes_to_transfer) &&
1052				current_seg_sg && current_xfer_sg) {
1053			unsigned int page_len = min((current_xfer_sg->length -
1054				offset_into_current_page_data),
1055				(bytes_to_transfer - bytes_processed));
1056
1057			sg_set_page(current_seg_sg, sg_page(current_xfer_sg),
1058				page_len,
1059				current_xfer_sg->offset +
1060				offset_into_current_page_data);
1061
1062			bytes_processed += page_len;
1063
1064			last_seg_sg = current_seg_sg;
1065			current_seg_sg = sg_next(current_seg_sg);
1066			current_xfer_sg = sg_next(current_xfer_sg);
1067
1068			/* only the first page may require additional offset. */
1069			offset_into_current_page_data = 0;
1070			nents++;
1071		}
1072
1073		/* update num_sgs and terminate the list since we may have
1074		 *  concatenated pages. */
1075		sg_mark_end(last_seg_sg);
1076		*out_num_sgs = nents;
1077	}
1078
1079	return out_sg;
1080}
1081
1082/*
1083 * Populate DMA buffer info for the isoc dto urb.
1084 */
1085static void __wa_populate_dto_urb_isoc(struct wa_xfer *xfer,
1086	struct wa_seg *seg, int curr_iso_frame)
1087{
1088	seg->dto_urb->transfer_flags |= URB_NO_TRANSFER_DMA_MAP;
1089	seg->dto_urb->sg = NULL;
1090	seg->dto_urb->num_sgs = 0;
1091	/* dto urb buffer address pulled from iso_frame_desc. */
1092	seg->dto_urb->transfer_dma = xfer->urb->transfer_dma +
1093		xfer->urb->iso_frame_desc[curr_iso_frame].offset;
1094	/* The Alereon HWA sends a single URB with all isoc segs. */
1095	if (xfer->wa->quirks & WUSB_QUIRK_ALEREON_HWA_CONCAT_ISOC)
1096		seg->dto_urb->transfer_buffer_length = seg->isoc_size;
1097	else
1098		seg->dto_urb->transfer_buffer_length =
1099			xfer->urb->iso_frame_desc[curr_iso_frame].length;
1100}
1101
1102/*
1103 * Populate buffer ptr and size, DMA buffer or SG list for the dto urb.
1104 */
1105static int __wa_populate_dto_urb(struct wa_xfer *xfer,
1106	struct wa_seg *seg, size_t buf_itr_offset, size_t buf_itr_size)
1107{
1108	int result = 0;
1109
1110	if (xfer->is_dma) {
1111		seg->dto_urb->transfer_dma =
1112			xfer->urb->transfer_dma + buf_itr_offset;
1113		seg->dto_urb->transfer_flags |= URB_NO_TRANSFER_DMA_MAP;
1114		seg->dto_urb->sg = NULL;
1115		seg->dto_urb->num_sgs = 0;
1116	} else {
1117		/* do buffer or SG processing. */
1118		seg->dto_urb->transfer_flags &=
1119			~URB_NO_TRANSFER_DMA_MAP;
1120		/* this should always be 0 before a resubmit. */
1121		seg->dto_urb->num_mapped_sgs = 0;
1122
1123		if (xfer->urb->transfer_buffer) {
1124			seg->dto_urb->transfer_buffer =
1125				xfer->urb->transfer_buffer +
1126				buf_itr_offset;
1127			seg->dto_urb->sg = NULL;
1128			seg->dto_urb->num_sgs = 0;
1129		} else {
1130			seg->dto_urb->transfer_buffer = NULL;
1131
1132			/*
1133			 * allocate an SG list to store seg_size bytes
1134			 * and copy the subset of the xfer->urb->sg that
1135			 * matches the buffer subset we are about to
1136			 * read.
1137			 */
1138			seg->dto_urb->sg = wa_xfer_create_subset_sg(
1139				xfer->urb->sg,
1140				buf_itr_offset, buf_itr_size,
1141				&(seg->dto_urb->num_sgs));
1142			if (!(seg->dto_urb->sg))
1143				result = -ENOMEM;
1144		}
1145	}
1146	seg->dto_urb->transfer_buffer_length = buf_itr_size;
1147
1148	return result;
1149}
1150
1151/*
1152 * Allocate the segs array and initialize each of them
1153 *
1154 * The segments are freed by wa_xfer_destroy() when the xfer use count
1155 * drops to zero; however, because each segment is given the same life
1156 * cycle as the USB URB it contains, it is actually freed by
1157 * usb_put_urb() on the contained USB URB (twisted, eh?).
1158 */
1159static int __wa_xfer_setup_segs(struct wa_xfer *xfer, size_t xfer_hdr_size)
1160{
1161	int result, cnt, isoc_frame_offset = 0;
1162	size_t alloc_size = sizeof(*xfer->seg[0])
1163		- sizeof(xfer->seg[0]->xfer_hdr) + xfer_hdr_size;
1164	struct usb_device *usb_dev = xfer->wa->usb_dev;
1165	const struct usb_endpoint_descriptor *dto_epd = xfer->wa->dto_epd;
1166	struct wa_seg *seg;
1167	size_t buf_itr, buf_size, buf_itr_size;
1168
1169	result = -ENOMEM;
1170	xfer->seg = kcalloc(xfer->segs, sizeof(xfer->seg[0]), GFP_ATOMIC);
1171	if (xfer->seg == NULL)
1172		goto error_segs_kzalloc;
1173	buf_itr = 0;
1174	buf_size = xfer->urb->transfer_buffer_length;
1175	for (cnt = 0; cnt < xfer->segs; cnt++) {
1176		size_t iso_pkt_descr_size = 0;
1177		int seg_isoc_frame_count = 0, seg_isoc_size = 0;
1178
1179		/*
1180		 * Adjust the size of the segment object to contain space for
1181		 * the isoc packet descriptor buffer.
1182		 */
1183		if (usb_pipeisoc(xfer->urb->pipe)) {
1184			seg_isoc_frame_count =
1185				__wa_seg_calculate_isoc_frame_count(xfer,
1186					isoc_frame_offset, &seg_isoc_size);
1187
1188			iso_pkt_descr_size =
1189				sizeof(struct wa_xfer_packet_info_hwaiso) +
1190				(seg_isoc_frame_count * sizeof(__le16));
1191		}
1192		result = -ENOMEM;
1193		seg = xfer->seg[cnt] = kmalloc(alloc_size + iso_pkt_descr_size,
1194						GFP_ATOMIC);
1195		if (seg == NULL)
1196			goto error_seg_kmalloc;
1197		wa_seg_init(seg);
1198		seg->xfer = xfer;
1199		seg->index = cnt;
1200		usb_fill_bulk_urb(&seg->tr_urb, usb_dev,
1201				  usb_sndbulkpipe(usb_dev,
1202						  dto_epd->bEndpointAddress),
1203				  &seg->xfer_hdr, xfer_hdr_size,
1204				  wa_seg_tr_cb, seg);
1205		buf_itr_size = min(buf_size, xfer->seg_size);
1206
1207		if (usb_pipeisoc(xfer->urb->pipe)) {
1208			seg->isoc_frame_count = seg_isoc_frame_count;
1209			seg->isoc_frame_offset = isoc_frame_offset;
1210			seg->isoc_size = seg_isoc_size;
1211			/* iso packet descriptor. */
1212			seg->isoc_pack_desc_urb =
1213					usb_alloc_urb(0, GFP_ATOMIC);
1214			if (seg->isoc_pack_desc_urb == NULL)
1215				goto error_iso_pack_desc_alloc;
1216			/*
1217			 * The buffer for the isoc packet descriptor starts
1218			 * after the transfer request header in the
1219			 * segment object memory buffer.
1220			 */
1221			usb_fill_bulk_urb(
1222				seg->isoc_pack_desc_urb, usb_dev,
1223				usb_sndbulkpipe(usb_dev,
1224					dto_epd->bEndpointAddress),
1225				(void *)(&seg->xfer_hdr) +
1226					xfer_hdr_size,
1227				iso_pkt_descr_size,
1228				wa_seg_iso_pack_desc_cb, seg);
1229
1230			/* adjust starting frame offset for next seg. */
1231			isoc_frame_offset += seg_isoc_frame_count;
1232		}
1233
1234		if (xfer->is_inbound == 0 && buf_size > 0) {
1235			/* outbound data. */
1236			seg->dto_urb = usb_alloc_urb(0, GFP_ATOMIC);
1237			if (seg->dto_urb == NULL)
1238				goto error_dto_alloc;
1239			usb_fill_bulk_urb(
1240				seg->dto_urb, usb_dev,
1241				usb_sndbulkpipe(usb_dev,
1242						dto_epd->bEndpointAddress),
1243				NULL, 0, wa_seg_dto_cb, seg);
1244
1245			if (usb_pipeisoc(xfer->urb->pipe)) {
1246				/*
1247				 * Fill in the xfer buffer information for the
1248				 * first isoc frame.  Subsequent frames in this
1249				 * segment will be filled in and sent from the
1250				 * DTO completion routine, if needed.
1251				 */
1252				__wa_populate_dto_urb_isoc(xfer, seg,
1253					seg->isoc_frame_offset);
1254			} else {
1255				/* fill in the xfer buffer information. */
1256				result = __wa_populate_dto_urb(xfer, seg,
1257							buf_itr, buf_itr_size);
1258				if (result < 0)
1259					goto error_seg_outbound_populate;
1260
1261				buf_itr += buf_itr_size;
1262				buf_size -= buf_itr_size;
1263			}
1264		}
1265		seg->status = WA_SEG_READY;
 
 
1266	}
1267	return 0;
1268
1269	/*
1270	 * Free the memory for the current segment which failed to init.
1271	 * Use the fact that cnt is left at were it failed.  The remaining
1272	 * segments will be cleaned up by wa_xfer_destroy.
1273	 */
1274error_seg_outbound_populate:
1275	usb_free_urb(xfer->seg[cnt]->dto_urb);
1276error_dto_alloc:
1277	usb_free_urb(xfer->seg[cnt]->isoc_pack_desc_urb);
1278error_iso_pack_desc_alloc:
1279	kfree(xfer->seg[cnt]);
1280	xfer->seg[cnt] = NULL;
1281error_seg_kmalloc:
 
 
 
 
 
 
1282error_segs_kzalloc:
1283	return result;
1284}
1285
1286/*
1287 * Allocates all the stuff needed to submit a transfer
1288 *
1289 * Breaks the whole data buffer in a list of segments, each one has a
1290 * structure allocated to it and linked in xfer->seg[index]
1291 *
1292 * FIXME: merge setup_segs() and the last part of this function, no
1293 *        need to do two for loops when we could run everything in a
1294 *        single one
1295 */
1296static int __wa_xfer_setup(struct wa_xfer *xfer, struct urb *urb)
1297{
1298	int result;
1299	struct device *dev = &xfer->wa->usb_iface->dev;
1300	enum wa_xfer_type xfer_type = 0; /* shut up GCC */
1301	size_t xfer_hdr_size, cnt, transfer_size;
1302	struct wa_xfer_hdr *xfer_hdr0, *xfer_hdr;
1303
1304	result = __wa_xfer_setup_sizes(xfer, &xfer_type);
1305	if (result < 0)
1306		goto error_setup_sizes;
1307	xfer_hdr_size = result;
1308	result = __wa_xfer_setup_segs(xfer, xfer_hdr_size);
1309	if (result < 0) {
1310		dev_err(dev, "xfer %p: Failed to allocate %d segments: %d\n",
1311			xfer, xfer->segs, result);
1312		goto error_setup_segs;
1313	}
1314	/* Fill the first header */
1315	xfer_hdr0 = &xfer->seg[0]->xfer_hdr;
1316	wa_xfer_id_init(xfer);
1317	__wa_xfer_setup_hdr0(xfer, xfer_hdr0, xfer_type, xfer_hdr_size);
1318
1319	/* Fill remaining headers */
1320	xfer_hdr = xfer_hdr0;
1321	if (xfer_type == WA_XFER_TYPE_ISO) {
1322		xfer_hdr0->dwTransferLength =
1323			cpu_to_le32(xfer->seg[0]->isoc_size);
1324		for (cnt = 1; cnt < xfer->segs; cnt++) {
1325			struct wa_xfer_packet_info_hwaiso *packet_desc;
1326			struct wa_seg *seg = xfer->seg[cnt];
1327			struct wa_xfer_hwaiso *xfer_iso;
1328
1329			xfer_hdr = &seg->xfer_hdr;
1330			xfer_iso = container_of(xfer_hdr,
1331						struct wa_xfer_hwaiso, hdr);
1332			packet_desc = ((void *)xfer_hdr) + xfer_hdr_size;
1333			/*
1334			 * Copy values from the 0th header. Segment specific
1335			 * values are set below.
1336			 */
1337			memcpy(xfer_hdr, xfer_hdr0, xfer_hdr_size);
1338			xfer_hdr->bTransferSegment = cnt;
1339			xfer_hdr->dwTransferLength =
1340				cpu_to_le32(seg->isoc_size);
1341			xfer_iso->dwNumOfPackets =
1342					cpu_to_le32(seg->isoc_frame_count);
1343			__wa_setup_isoc_packet_descr(packet_desc, xfer, seg);
1344			seg->status = WA_SEG_READY;
1345		}
1346	} else {
1347		transfer_size = urb->transfer_buffer_length;
1348		xfer_hdr0->dwTransferLength = transfer_size > xfer->seg_size ?
1349			cpu_to_le32(xfer->seg_size) :
1350			cpu_to_le32(transfer_size);
1351		transfer_size -=  xfer->seg_size;
1352		for (cnt = 1; cnt < xfer->segs; cnt++) {
1353			xfer_hdr = &xfer->seg[cnt]->xfer_hdr;
1354			memcpy(xfer_hdr, xfer_hdr0, xfer_hdr_size);
1355			xfer_hdr->bTransferSegment = cnt;
1356			xfer_hdr->dwTransferLength =
1357				transfer_size > xfer->seg_size ?
1358					cpu_to_le32(xfer->seg_size)
1359					: cpu_to_le32(transfer_size);
1360			xfer->seg[cnt]->status = WA_SEG_READY;
1361			transfer_size -=  xfer->seg_size;
1362		}
1363	}
1364	xfer_hdr->bTransferSegment |= 0x80;	/* this is the last segment */
1365	result = 0;
1366error_setup_segs:
1367error_setup_sizes:
1368	return result;
1369}
1370
1371/*
1372 *
1373 *
1374 * rpipe->seg_lock is held!
1375 */
1376static int __wa_seg_submit(struct wa_rpipe *rpipe, struct wa_xfer *xfer,
1377			   struct wa_seg *seg, int *dto_done)
1378{
1379	int result;
1380
1381	/* default to done unless we encounter a multi-frame isoc segment. */
1382	*dto_done = 1;
1383
1384	/*
1385	 * Take a ref for each segment urb so the xfer cannot disappear until
1386	 * all of the callbacks run.
1387	 */
1388	wa_xfer_get(xfer);
1389	/* submit the transfer request. */
1390	seg->status = WA_SEG_SUBMITTED;
1391	result = usb_submit_urb(&seg->tr_urb, GFP_ATOMIC);
1392	if (result < 0) {
1393		pr_err("%s: xfer %p#%u: REQ submit failed: %d\n",
1394		       __func__, xfer, seg->index, result);
1395		wa_xfer_put(xfer);
1396		goto error_tr_submit;
1397	}
1398	/* submit the isoc packet descriptor if present. */
1399	if (seg->isoc_pack_desc_urb) {
1400		wa_xfer_get(xfer);
1401		result = usb_submit_urb(seg->isoc_pack_desc_urb, GFP_ATOMIC);
1402		seg->isoc_frame_index = 0;
1403		if (result < 0) {
1404			pr_err("%s: xfer %p#%u: ISO packet descriptor submit failed: %d\n",
1405			       __func__, xfer, seg->index, result);
1406			wa_xfer_put(xfer);
1407			goto error_iso_pack_desc_submit;
1408		}
1409	}
1410	/* submit the out data if this is an out request. */
1411	if (seg->dto_urb) {
1412		struct wahc *wa = xfer->wa;
1413		wa_xfer_get(xfer);
1414		result = usb_submit_urb(seg->dto_urb, GFP_ATOMIC);
1415		if (result < 0) {
1416			pr_err("%s: xfer %p#%u: DTO submit failed: %d\n",
1417			       __func__, xfer, seg->index, result);
1418			wa_xfer_put(xfer);
1419			goto error_dto_submit;
1420		}
1421		/*
1422		 * If this segment contains more than one isoc frame, hold
1423		 * onto the dto resource until we send all frames.
1424		 * Only applies to non-Alereon devices.
1425		 */
1426		if (((wa->quirks & WUSB_QUIRK_ALEREON_HWA_CONCAT_ISOC) == 0)
1427			&& (seg->isoc_frame_count > 1))
1428			*dto_done = 0;
1429	}
 
1430	rpipe_avail_dec(rpipe);
1431	return 0;
1432
1433error_dto_submit:
1434	usb_unlink_urb(seg->isoc_pack_desc_urb);
1435error_iso_pack_desc_submit:
1436	usb_unlink_urb(&seg->tr_urb);
1437error_tr_submit:
1438	seg->status = WA_SEG_ERROR;
1439	seg->result = result;
1440	*dto_done = 1;
1441	return result;
1442}
1443
1444/*
1445 * Execute more queued request segments until the maximum concurrent allowed.
1446 * Return true if the DTO resource was acquired and released.
1447 *
1448 * The ugly unlock/lock sequence on the error path is needed as the
1449 * xfer->lock normally nests the seg_lock and not viceversa.
 
1450 */
1451static int __wa_xfer_delayed_run(struct wa_rpipe *rpipe, int *dto_waiting)
1452{
1453	int result, dto_acquired = 0, dto_done = 0;
1454	struct device *dev = &rpipe->wa->usb_iface->dev;
1455	struct wa_seg *seg;
1456	struct wa_xfer *xfer;
1457	unsigned long flags;
1458
1459	*dto_waiting = 0;
1460
1461	spin_lock_irqsave(&rpipe->seg_lock, flags);
1462	while (atomic_read(&rpipe->segs_available) > 0
1463	      && !list_empty(&rpipe->seg_list)
1464	      && (dto_acquired = __wa_dto_try_get(rpipe->wa))) {
1465		seg = list_first_entry(&(rpipe->seg_list), struct wa_seg,
1466				 list_node);
1467		list_del(&seg->list_node);
1468		xfer = seg->xfer;
1469		/*
1470		 * Get a reference to the xfer in case the callbacks for the
1471		 * URBs submitted by __wa_seg_submit attempt to complete
1472		 * the xfer before this function completes.
1473		 */
1474		wa_xfer_get(xfer);
1475		result = __wa_seg_submit(rpipe, xfer, seg, &dto_done);
1476		/* release the dto resource if this RPIPE is done with it. */
1477		if (dto_done)
1478			__wa_dto_put(rpipe->wa);
1479		dev_dbg(dev, "xfer %p ID %08X#%u submitted from delayed [%d segments available] %d\n",
1480			xfer, wa_xfer_id(xfer), seg->index,
1481			atomic_read(&rpipe->segs_available), result);
1482		if (unlikely(result < 0)) {
1483			int done;
1484
1485			spin_unlock_irqrestore(&rpipe->seg_lock, flags);
1486			spin_lock_irqsave(&xfer->lock, flags);
1487			__wa_xfer_abort(xfer);
1488			/*
1489			 * This seg was marked as submitted when it was put on
1490			 * the RPIPE seg_list.  Mark it done.
1491			 */
1492			xfer->segs_done++;
1493			done = __wa_xfer_is_done(xfer);
1494			spin_unlock_irqrestore(&xfer->lock, flags);
1495			if (done)
1496				wa_xfer_completion(xfer);
1497			spin_lock_irqsave(&rpipe->seg_lock, flags);
1498		}
1499		wa_xfer_put(xfer);
1500	}
1501	/*
1502	 * Mark this RPIPE as waiting if dto was not acquired, there are
1503	 * delayed segs and no active transfers to wake us up later.
1504	 */
1505	if (!dto_acquired && !list_empty(&rpipe->seg_list)
1506		&& (atomic_read(&rpipe->segs_available) ==
1507			le16_to_cpu(rpipe->descr.wRequests)))
1508		*dto_waiting = 1;
1509
1510	spin_unlock_irqrestore(&rpipe->seg_lock, flags);
1511
1512	return dto_done;
1513}
1514
1515static void wa_xfer_delayed_run(struct wa_rpipe *rpipe)
1516{
1517	int dto_waiting;
1518	int dto_done = __wa_xfer_delayed_run(rpipe, &dto_waiting);
1519
1520	/*
1521	 * If this RPIPE is waiting on the DTO resource, add it to the tail of
1522	 * the waiting list.
1523	 * Otherwise, if the WA DTO resource was acquired and released by
1524	 *  __wa_xfer_delayed_run, another RPIPE may have attempted to acquire
1525	 * DTO and failed during that time.  Check the delayed list and process
1526	 * any waiters.  Start searching from the next RPIPE index.
1527	 */
1528	if (dto_waiting)
1529		wa_add_delayed_rpipe(rpipe->wa, rpipe);
1530	else if (dto_done)
1531		wa_check_for_delayed_rpipes(rpipe->wa);
1532}
1533
1534/*
1535 *
1536 * xfer->lock is taken
1537 *
1538 * On failure submitting we just stop submitting and return error;
1539 * wa_urb_enqueue_b() will execute the completion path
1540 */
1541static int __wa_xfer_submit(struct wa_xfer *xfer)
1542{
1543	int result, dto_acquired = 0, dto_done = 0, dto_waiting = 0;
1544	struct wahc *wa = xfer->wa;
1545	struct device *dev = &wa->usb_iface->dev;
1546	unsigned cnt;
1547	struct wa_seg *seg;
1548	unsigned long flags;
1549	struct wa_rpipe *rpipe = xfer->ep->hcpriv;
1550	size_t maxrequests = le16_to_cpu(rpipe->descr.wRequests);
1551	u8 available;
1552	u8 empty;
1553
1554	spin_lock_irqsave(&wa->xfer_list_lock, flags);
1555	list_add_tail(&xfer->list_node, &wa->xfer_list);
1556	spin_unlock_irqrestore(&wa->xfer_list_lock, flags);
1557
1558	BUG_ON(atomic_read(&rpipe->segs_available) > maxrequests);
1559	result = 0;
1560	spin_lock_irqsave(&rpipe->seg_lock, flags);
1561	for (cnt = 0; cnt < xfer->segs; cnt++) {
1562		int delay_seg = 1;
1563
1564		available = atomic_read(&rpipe->segs_available);
1565		empty = list_empty(&rpipe->seg_list);
1566		seg = xfer->seg[cnt];
1567		if (available && empty) {
1568			/*
1569			 * Only attempt to acquire DTO if we have a segment
1570			 * to send.
1571			 */
1572			dto_acquired = __wa_dto_try_get(rpipe->wa);
1573			if (dto_acquired) {
1574				delay_seg = 0;
1575				result = __wa_seg_submit(rpipe, xfer, seg,
1576							&dto_done);
1577				dev_dbg(dev, "xfer %p ID 0x%08X#%u: available %u empty %u submitted\n",
1578					xfer, wa_xfer_id(xfer), cnt, available,
1579					empty);
1580				if (dto_done)
1581					__wa_dto_put(rpipe->wa);
1582
1583				if (result < 0) {
1584					__wa_xfer_abort(xfer);
1585					goto error_seg_submit;
1586				}
1587			}
1588		}
1589
1590		if (delay_seg) {
1591			dev_dbg(dev, "xfer %p ID 0x%08X#%u: available %u empty %u delayed\n",
1592				xfer, wa_xfer_id(xfer), cnt, available,  empty);
1593			seg->status = WA_SEG_DELAYED;
1594			list_add_tail(&seg->list_node, &rpipe->seg_list);
 
 
 
 
 
 
1595		}
1596		xfer->segs_submitted++;
1597	}
1598error_seg_submit:
1599	/*
1600	 * Mark this RPIPE as waiting if dto was not acquired, there are
1601	 * delayed segs and no active transfers to wake us up later.
1602	 */
1603	if (!dto_acquired && !list_empty(&rpipe->seg_list)
1604		&& (atomic_read(&rpipe->segs_available) ==
1605			le16_to_cpu(rpipe->descr.wRequests)))
1606		dto_waiting = 1;
1607	spin_unlock_irqrestore(&rpipe->seg_lock, flags);
1608
1609	if (dto_waiting)
1610		wa_add_delayed_rpipe(rpipe->wa, rpipe);
1611	else if (dto_done)
1612		wa_check_for_delayed_rpipes(rpipe->wa);
1613
1614	return result;
1615}
1616
1617/*
1618 * Second part of a URB/transfer enqueuement
1619 *
1620 * Assumes this comes from wa_urb_enqueue() [maybe through
1621 * wa_urb_enqueue_run()]. At this point:
1622 *
1623 * xfer->wa	filled and refcounted
1624 * xfer->ep	filled with rpipe refcounted if
1625 *              delayed == 0
1626 * xfer->urb 	filled and refcounted (this is the case when called
1627 *              from wa_urb_enqueue() as we come from usb_submit_urb()
1628 *              and when called by wa_urb_enqueue_run(), as we took an
1629 *              extra ref dropped by _run() after we return).
1630 * xfer->gfp	filled
1631 *
1632 * If we fail at __wa_xfer_submit(), then we just check if we are done
1633 * and if so, we run the completion procedure. However, if we are not
1634 * yet done, we do nothing and wait for the completion handlers from
1635 * the submitted URBs or from the xfer-result path to kick in. If xfer
1636 * result never kicks in, the xfer will timeout from the USB code and
1637 * dequeue() will be called.
1638 */
1639static int wa_urb_enqueue_b(struct wa_xfer *xfer)
1640{
1641	int result;
1642	unsigned long flags;
1643	struct urb *urb = xfer->urb;
1644	struct wahc *wa = xfer->wa;
1645	struct wusbhc *wusbhc = wa->wusb;
1646	struct wusb_dev *wusb_dev;
1647	unsigned done;
1648
1649	result = rpipe_get_by_ep(wa, xfer->ep, urb, xfer->gfp);
1650	if (result < 0) {
1651		pr_err("%s: error_rpipe_get\n", __func__);
1652		goto error_rpipe_get;
1653	}
1654	result = -ENODEV;
1655	/* FIXME: segmentation broken -- kills DWA */
1656	mutex_lock(&wusbhc->mutex);		/* get a WUSB dev */
1657	if (urb->dev == NULL) {
1658		mutex_unlock(&wusbhc->mutex);
1659		pr_err("%s: error usb dev gone\n", __func__);
1660		goto error_dev_gone;
1661	}
1662	wusb_dev = __wusb_dev_get_by_usb_dev(wusbhc, urb->dev);
1663	if (wusb_dev == NULL) {
1664		mutex_unlock(&wusbhc->mutex);
1665		dev_err(&(urb->dev->dev), "%s: error wusb dev gone\n",
1666			__func__);
1667		goto error_dev_gone;
1668	}
1669	mutex_unlock(&wusbhc->mutex);
1670
1671	spin_lock_irqsave(&xfer->lock, flags);
1672	xfer->wusb_dev = wusb_dev;
1673	result = urb->status;
1674	if (urb->status != -EINPROGRESS) {
1675		dev_err(&(urb->dev->dev), "%s: error_dequeued\n", __func__);
1676		goto error_dequeued;
1677	}
1678
1679	result = __wa_xfer_setup(xfer, urb);
1680	if (result < 0) {
1681		dev_err(&(urb->dev->dev), "%s: error_xfer_setup\n", __func__);
1682		goto error_xfer_setup;
1683	}
1684	/*
1685	 * Get a xfer reference since __wa_xfer_submit starts asynchronous
1686	 * operations that may try to complete the xfer before this function
1687	 * exits.
1688	 */
1689	wa_xfer_get(xfer);
1690	result = __wa_xfer_submit(xfer);
1691	if (result < 0) {
1692		dev_err(&(urb->dev->dev), "%s: error_xfer_submit\n", __func__);
1693		goto error_xfer_submit;
1694	}
1695	spin_unlock_irqrestore(&xfer->lock, flags);
1696	wa_xfer_put(xfer);
1697	return 0;
1698
1699	/*
1700	 * this is basically wa_xfer_completion() broken up wa_xfer_giveback()
1701	 * does a wa_xfer_put() that will call wa_xfer_destroy() and undo
1702	 * setup().
1703	 */
1704error_xfer_setup:
1705error_dequeued:
1706	spin_unlock_irqrestore(&xfer->lock, flags);
1707	/* FIXME: segmentation broken, kills DWA */
1708	if (wusb_dev)
1709		wusb_dev_put(wusb_dev);
1710error_dev_gone:
1711	rpipe_put(xfer->ep->hcpriv);
1712error_rpipe_get:
1713	xfer->result = result;
1714	return result;
 
1715
1716error_xfer_submit:
1717	done = __wa_xfer_is_done(xfer);
1718	xfer->result = result;
1719	spin_unlock_irqrestore(&xfer->lock, flags);
1720	if (done)
1721		wa_xfer_completion(xfer);
1722	wa_xfer_put(xfer);
1723	/* return success since the completion routine will run. */
1724	return 0;
1725}
1726
1727/*
1728 * Execute the delayed transfers in the Wire Adapter @wa
1729 *
1730 * We need to be careful here, as dequeue() could be called in the
1731 * middle.  That's why we do the whole thing under the
1732 * wa->xfer_list_lock. If dequeue() jumps in, it first locks xfer->lock
1733 * and then checks the list -- so as we would be acquiring in inverse
1734 * order, we move the delayed list to a separate list while locked and then
1735 * submit them without the list lock held.
1736 */
1737void wa_urb_enqueue_run(struct work_struct *ws)
1738{
1739	struct wahc *wa = container_of(ws, struct wahc, xfer_enqueue_work);
1740	struct wa_xfer *xfer, *next;
1741	struct urb *urb;
1742	LIST_HEAD(tmp_list);
1743
1744	/* Create a copy of the wa->xfer_delayed_list while holding the lock */
1745	spin_lock_irq(&wa->xfer_list_lock);
1746	list_cut_position(&tmp_list, &wa->xfer_delayed_list,
1747			wa->xfer_delayed_list.prev);
1748	spin_unlock_irq(&wa->xfer_list_lock);
1749
1750	/*
1751	 * enqueue from temp list without list lock held since wa_urb_enqueue_b
1752	 * can take xfer->lock as well as lock mutexes.
1753	 */
1754	list_for_each_entry_safe(xfer, next, &tmp_list, list_node) {
1755		list_del_init(&xfer->list_node);
 
1756
1757		urb = xfer->urb;
1758		if (wa_urb_enqueue_b(xfer) < 0)
1759			wa_xfer_giveback(xfer);
1760		usb_put_urb(urb);	/* taken when queuing */
 
 
1761	}
 
1762}
1763EXPORT_SYMBOL_GPL(wa_urb_enqueue_run);
1764
1765/*
1766 * Process the errored transfers on the Wire Adapter outside of interrupt.
1767 */
1768void wa_process_errored_transfers_run(struct work_struct *ws)
1769{
1770	struct wahc *wa = container_of(ws, struct wahc, xfer_error_work);
1771	struct wa_xfer *xfer, *next;
1772	LIST_HEAD(tmp_list);
1773
1774	pr_info("%s: Run delayed STALL processing.\n", __func__);
1775
1776	/* Create a copy of the wa->xfer_errored_list while holding the lock */
1777	spin_lock_irq(&wa->xfer_list_lock);
1778	list_cut_position(&tmp_list, &wa->xfer_errored_list,
1779			wa->xfer_errored_list.prev);
1780	spin_unlock_irq(&wa->xfer_list_lock);
1781
1782	/*
1783	 * run rpipe_clear_feature_stalled from temp list without list lock
1784	 * held.
1785	 */
1786	list_for_each_entry_safe(xfer, next, &tmp_list, list_node) {
1787		struct usb_host_endpoint *ep;
1788		unsigned long flags;
1789		struct wa_rpipe *rpipe;
1790
1791		spin_lock_irqsave(&xfer->lock, flags);
1792		ep = xfer->ep;
1793		rpipe = ep->hcpriv;
1794		spin_unlock_irqrestore(&xfer->lock, flags);
1795
1796		/* clear RPIPE feature stalled without holding a lock. */
1797		rpipe_clear_feature_stalled(wa, ep);
1798
1799		/* complete the xfer. This removes it from the tmp list. */
1800		wa_xfer_completion(xfer);
1801
1802		/* check for work. */
1803		wa_xfer_delayed_run(rpipe);
1804	}
1805}
1806EXPORT_SYMBOL_GPL(wa_process_errored_transfers_run);
1807
1808/*
1809 * Submit a transfer to the Wire Adapter in a delayed way
1810 *
1811 * The process of enqueuing involves possible sleeps() [see
1812 * enqueue_b(), for the rpipe_get() and the mutex_lock()]. If we are
1813 * in an atomic section, we defer the enqueue_b() call--else we call direct.
1814 *
1815 * @urb: We own a reference to it done by the HCI Linux USB stack that
1816 *       will be given up by calling usb_hcd_giveback_urb() or by
1817 *       returning error from this function -> ergo we don't have to
1818 *       refcount it.
1819 */
1820int wa_urb_enqueue(struct wahc *wa, struct usb_host_endpoint *ep,
1821		   struct urb *urb, gfp_t gfp)
1822{
1823	int result;
1824	struct device *dev = &wa->usb_iface->dev;
1825	struct wa_xfer *xfer;
1826	unsigned long my_flags;
1827	unsigned cant_sleep = irqs_disabled() | in_atomic();
1828
1829	if ((urb->transfer_buffer == NULL)
1830	    && (urb->sg == NULL)
1831	    && !(urb->transfer_flags & URB_NO_TRANSFER_DMA_MAP)
1832	    && urb->transfer_buffer_length != 0) {
1833		dev_err(dev, "BUG? urb %p: NULL xfer buffer & NODMA\n", urb);
1834		dump_stack();
1835	}
1836
1837	spin_lock_irqsave(&wa->xfer_list_lock, my_flags);
1838	result = usb_hcd_link_urb_to_ep(&(wa->wusb->usb_hcd), urb);
1839	spin_unlock_irqrestore(&wa->xfer_list_lock, my_flags);
1840	if (result < 0)
1841		goto error_link_urb;
1842
1843	result = -ENOMEM;
1844	xfer = kzalloc(sizeof(*xfer), gfp);
1845	if (xfer == NULL)
1846		goto error_kmalloc;
1847
1848	result = -ENOENT;
1849	if (urb->status != -EINPROGRESS)	/* cancelled */
1850		goto error_dequeued;		/* before starting? */
1851	wa_xfer_init(xfer);
1852	xfer->wa = wa_get(wa);
1853	xfer->urb = urb;
1854	xfer->gfp = gfp;
1855	xfer->ep = ep;
1856	urb->hcpriv = xfer;
1857
1858	dev_dbg(dev, "xfer %p urb %p pipe 0x%02x [%d bytes] %s %s %s\n",
1859		xfer, urb, urb->pipe, urb->transfer_buffer_length,
1860		urb->transfer_flags & URB_NO_TRANSFER_DMA_MAP ? "dma" : "nodma",
1861		urb->pipe & USB_DIR_IN ? "inbound" : "outbound",
1862		cant_sleep ? "deferred" : "inline");
1863
1864	if (cant_sleep) {
1865		usb_get_urb(urb);
1866		spin_lock_irqsave(&wa->xfer_list_lock, my_flags);
1867		list_add_tail(&xfer->list_node, &wa->xfer_delayed_list);
1868		spin_unlock_irqrestore(&wa->xfer_list_lock, my_flags);
1869		queue_work(wusbd, &wa->xfer_enqueue_work);
1870	} else {
1871		result = wa_urb_enqueue_b(xfer);
1872		if (result < 0) {
1873			/*
1874			 * URB submit/enqueue failed.  Clean up, return an
1875			 * error and do not run the callback.  This avoids
1876			 * an infinite submit/complete loop.
1877			 */
1878			dev_err(dev, "%s: URB enqueue failed: %d\n",
1879			   __func__, result);
1880			wa_put(xfer->wa);
1881			wa_xfer_put(xfer);
1882			spin_lock_irqsave(&wa->xfer_list_lock, my_flags);
1883			usb_hcd_unlink_urb_from_ep(&(wa->wusb->usb_hcd), urb);
1884			spin_unlock_irqrestore(&wa->xfer_list_lock, my_flags);
1885			return result;
1886		}
1887	}
1888	return 0;
1889
1890error_dequeued:
1891	kfree(xfer);
1892error_kmalloc:
1893	spin_lock_irqsave(&wa->xfer_list_lock, my_flags);
1894	usb_hcd_unlink_urb_from_ep(&(wa->wusb->usb_hcd), urb);
1895	spin_unlock_irqrestore(&wa->xfer_list_lock, my_flags);
1896error_link_urb:
1897	return result;
1898}
1899EXPORT_SYMBOL_GPL(wa_urb_enqueue);
1900
1901/*
1902 * Dequeue a URB and make sure uwb_hcd_giveback_urb() [completion
1903 * handler] is called.
1904 *
1905 * Until a transfer goes successfully through wa_urb_enqueue() it
1906 * needs to be dequeued with completion calling; when stuck in delayed
1907 * or before wa_xfer_setup() is called, we need to do completion.
1908 *
1909 *  not setup  If there is no hcpriv yet, that means that that enqueue
1910 *             still had no time to set the xfer up. Because
1911 *             urb->status should be other than -EINPROGRESS,
1912 *             enqueue() will catch that and bail out.
1913 *
1914 * If the transfer has gone through setup, we just need to clean it
1915 * up. If it has gone through submit(), we have to abort it [with an
1916 * asynch request] and then make sure we cancel each segment.
1917 *
1918 */
1919int wa_urb_dequeue(struct wahc *wa, struct urb *urb, int status)
1920{
1921	unsigned long flags, flags2;
1922	struct wa_xfer *xfer;
1923	struct wa_seg *seg;
1924	struct wa_rpipe *rpipe;
1925	unsigned cnt, done = 0, xfer_abort_pending;
1926	unsigned rpipe_ready = 0;
1927	int result;
1928
1929	/* check if it is safe to unlink. */
1930	spin_lock_irqsave(&wa->xfer_list_lock, flags);
1931	result = usb_hcd_check_unlink_urb(&(wa->wusb->usb_hcd), urb, status);
1932	if ((result == 0) && urb->hcpriv) {
1933		/*
1934		 * Get a xfer ref to prevent a race with wa_xfer_giveback
1935		 * cleaning up the xfer while we are working with it.
1936		 */
1937		wa_xfer_get(urb->hcpriv);
 
1938	}
1939	spin_unlock_irqrestore(&wa->xfer_list_lock, flags);
1940	if (result)
1941		return result;
1942
1943	xfer = urb->hcpriv;
1944	if (xfer == NULL)
1945		return -ENOENT;
1946	spin_lock_irqsave(&xfer->lock, flags);
1947	pr_debug("%s: DEQUEUE xfer id 0x%08X\n", __func__, wa_xfer_id(xfer));
1948	rpipe = xfer->ep->hcpriv;
1949	if (rpipe == NULL) {
1950		pr_debug("%s: xfer %p id 0x%08X has no RPIPE.  %s",
1951			__func__, xfer, wa_xfer_id(xfer),
1952			"Probably already aborted.\n" );
1953		result = -ENOENT;
1954		goto out_unlock;
1955	}
1956	/*
1957	 * Check for done to avoid racing with wa_xfer_giveback and completing
1958	 * twice.
1959	 */
1960	if (__wa_xfer_is_done(xfer)) {
1961		pr_debug("%s: xfer %p id 0x%08X already done.\n", __func__,
1962			xfer, wa_xfer_id(xfer));
1963		result = -ENOENT;
1964		goto out_unlock;
1965	}
1966	/* Check the delayed list -> if there, release and complete */
1967	spin_lock_irqsave(&wa->xfer_list_lock, flags2);
1968	if (!list_empty(&xfer->list_node) && xfer->seg == NULL)
1969		goto dequeue_delayed;
1970	spin_unlock_irqrestore(&wa->xfer_list_lock, flags2);
1971	if (xfer->seg == NULL)  	/* still hasn't reached */
1972		goto out_unlock;	/* setup(), enqueue_b() completes */
1973	/* Ok, the xfer is in flight already, it's been setup and submitted.*/
1974	xfer_abort_pending = __wa_xfer_abort(xfer) >= 0;
1975	/*
1976	 * grab the rpipe->seg_lock here to prevent racing with
1977	 * __wa_xfer_delayed_run.
1978	 */
1979	spin_lock(&rpipe->seg_lock);
1980	for (cnt = 0; cnt < xfer->segs; cnt++) {
1981		seg = xfer->seg[cnt];
1982		pr_debug("%s: xfer id 0x%08X#%d status = %d\n",
1983			__func__, wa_xfer_id(xfer), cnt, seg->status);
1984		switch (seg->status) {
1985		case WA_SEG_NOTREADY:
1986		case WA_SEG_READY:
1987			printk(KERN_ERR "xfer %p#%u: dequeue bad state %u\n",
1988			       xfer, cnt, seg->status);
1989			WARN_ON(1);
1990			break;
1991		case WA_SEG_DELAYED:
1992			/*
1993			 * delete from rpipe delayed list.  If no segments on
1994			 * this xfer have been submitted, __wa_xfer_is_done will
1995			 * trigger a giveback below.  Otherwise, the submitted
1996			 * segments will be completed in the DTI interrupt.
1997			 */
1998			seg->status = WA_SEG_ABORTED;
1999			seg->result = -ENOENT;
2000			list_del(&seg->list_node);
2001			xfer->segs_done++;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2002			break;
2003		case WA_SEG_DONE:
2004		case WA_SEG_ERROR:
2005		case WA_SEG_ABORTED:
2006			break;
2007			/*
2008			 * The buf_in data for a segment in the
2009			 * WA_SEG_DTI_PENDING state is actively being read.
2010			 * Let wa_buf_in_cb handle it since it will be called
2011			 * and will increment xfer->segs_done.  Cleaning up
2012			 * here could cause wa_buf_in_cb to access the xfer
2013			 * after it has been completed/freed.
2014			 */
2015		case WA_SEG_DTI_PENDING:
2016			break;
2017			/*
2018			 * In the states below, the HWA device already knows
2019			 * about the transfer.  If an abort request was sent,
2020			 * allow the HWA to process it and wait for the
2021			 * results.  Otherwise, the DTI state and seg completed
2022			 * counts can get out of sync.
2023			 */
2024		case WA_SEG_SUBMITTED:
2025		case WA_SEG_PENDING:
2026			/*
2027			 * Check if the abort was successfully sent.  This could
2028			 * be false if the HWA has been removed but we haven't
2029			 * gotten the disconnect notification yet.
2030			 */
2031			if (!xfer_abort_pending) {
2032				seg->status = WA_SEG_ABORTED;
2033				rpipe_ready = rpipe_avail_inc(rpipe);
2034				xfer->segs_done++;
2035			}
2036			break;
2037		}
2038	}
2039	spin_unlock(&rpipe->seg_lock);
2040	xfer->result = urb->status;	/* -ENOENT or -ECONNRESET */
2041	done = __wa_xfer_is_done(xfer);
2042	spin_unlock_irqrestore(&xfer->lock, flags);
2043	if (done)
2044		wa_xfer_completion(xfer);
2045	if (rpipe_ready)
2046		wa_xfer_delayed_run(rpipe);
2047	wa_xfer_put(xfer);
2048	return result;
2049
2050out_unlock:
2051	spin_unlock_irqrestore(&xfer->lock, flags);
2052	wa_xfer_put(xfer);
2053	return result;
2054
2055dequeue_delayed:
2056	list_del_init(&xfer->list_node);
2057	spin_unlock_irqrestore(&wa->xfer_list_lock, flags2);
2058	xfer->result = urb->status;
2059	spin_unlock_irqrestore(&xfer->lock, flags);
2060	wa_xfer_giveback(xfer);
2061	wa_xfer_put(xfer);
2062	usb_put_urb(urb);		/* we got a ref in enqueue() */
2063	return 0;
2064}
2065EXPORT_SYMBOL_GPL(wa_urb_dequeue);
2066
2067/*
2068 * Translation from WA status codes (WUSB1.0 Table 8.15) to errno
2069 * codes
2070 *
2071 * Positive errno values are internal inconsistencies and should be
2072 * flagged louder. Negative are to be passed up to the user in the
2073 * normal way.
2074 *
2075 * @status: USB WA status code -- high two bits are stripped.
2076 */
2077static int wa_xfer_status_to_errno(u8 status)
2078{
2079	int errno;
2080	u8 real_status = status;
2081	static int xlat[] = {
2082		[WA_XFER_STATUS_SUCCESS] = 		0,
2083		[WA_XFER_STATUS_HALTED] = 		-EPIPE,
2084		[WA_XFER_STATUS_DATA_BUFFER_ERROR] = 	-ENOBUFS,
2085		[WA_XFER_STATUS_BABBLE] = 		-EOVERFLOW,
2086		[WA_XFER_RESERVED] = 			EINVAL,
2087		[WA_XFER_STATUS_NOT_FOUND] =		0,
2088		[WA_XFER_STATUS_INSUFFICIENT_RESOURCE] = -ENOMEM,
2089		[WA_XFER_STATUS_TRANSACTION_ERROR] = 	-EILSEQ,
2090		[WA_XFER_STATUS_ABORTED] =		-ENOENT,
2091		[WA_XFER_STATUS_RPIPE_NOT_READY] = 	EINVAL,
2092		[WA_XFER_INVALID_FORMAT] = 		EINVAL,
2093		[WA_XFER_UNEXPECTED_SEGMENT_NUMBER] = 	EINVAL,
2094		[WA_XFER_STATUS_RPIPE_TYPE_MISMATCH] = 	EINVAL,
2095	};
2096	status &= 0x3f;
2097
2098	if (status == 0)
2099		return 0;
2100	if (status >= ARRAY_SIZE(xlat)) {
2101		printk_ratelimited(KERN_ERR "%s(): BUG? "
2102			       "Unknown WA transfer status 0x%02x\n",
2103			       __func__, real_status);
2104		return -EINVAL;
2105	}
2106	errno = xlat[status];
2107	if (unlikely(errno > 0)) {
2108		printk_ratelimited(KERN_ERR "%s(): BUG? "
2109			       "Inconsistent WA status: 0x%02x\n",
2110			       __func__, real_status);
2111		errno = -errno;
2112	}
2113	return errno;
2114}
2115
2116/*
2117 * If a last segment flag and/or a transfer result error is encountered,
2118 * no other segment transfer results will be returned from the device.
2119 * Mark the remaining submitted or pending xfers as completed so that
2120 * the xfer will complete cleanly.
2121 *
2122 * xfer->lock must be held
2123 *
2124 */
2125static void wa_complete_remaining_xfer_segs(struct wa_xfer *xfer,
2126		int starting_index, enum wa_seg_status status)
2127{
2128	int index;
2129	struct wa_rpipe *rpipe = xfer->ep->hcpriv;
2130
2131	for (index = starting_index; index < xfer->segs_submitted; index++) {
2132		struct wa_seg *current_seg = xfer->seg[index];
2133
2134		BUG_ON(current_seg == NULL);
2135
2136		switch (current_seg->status) {
2137		case WA_SEG_SUBMITTED:
2138		case WA_SEG_PENDING:
2139		case WA_SEG_DTI_PENDING:
2140			rpipe_avail_inc(rpipe);
2141		/*
2142		 * do not increment RPIPE avail for the WA_SEG_DELAYED case
2143		 * since it has not been submitted to the RPIPE.
2144		 */
2145		/* fall through */
2146		case WA_SEG_DELAYED:
2147			xfer->segs_done++;
2148			current_seg->status = status;
2149			break;
2150		case WA_SEG_ABORTED:
2151			break;
2152		default:
2153			WARN(1, "%s: xfer 0x%08X#%d. bad seg status = %d\n",
2154				__func__, wa_xfer_id(xfer), index,
2155				current_seg->status);
2156			break;
2157		}
2158	}
2159}
2160
2161/* Populate the given urb based on the current isoc transfer state. */
2162static int __wa_populate_buf_in_urb_isoc(struct wahc *wa,
2163	struct urb *buf_in_urb, struct wa_xfer *xfer, struct wa_seg *seg)
2164{
2165	int urb_start_frame = seg->isoc_frame_index + seg->isoc_frame_offset;
2166	int seg_index, total_len = 0, urb_frame_index = urb_start_frame;
2167	struct usb_iso_packet_descriptor *iso_frame_desc =
2168						xfer->urb->iso_frame_desc;
2169	const int dti_packet_size = usb_endpoint_maxp(wa->dti_epd);
2170	int next_frame_contiguous;
2171	struct usb_iso_packet_descriptor *iso_frame;
2172
2173	BUG_ON(buf_in_urb->status == -EINPROGRESS);
2174
2175	/*
2176	 * If the current frame actual_length is contiguous with the next frame
2177	 * and actual_length is a multiple of the DTI endpoint max packet size,
2178	 * combine the current frame with the next frame in a single URB.  This
2179	 * reduces the number of URBs that must be submitted in that case.
2180	 */
2181	seg_index = seg->isoc_frame_index;
2182	do {
2183		next_frame_contiguous = 0;
2184
2185		iso_frame = &iso_frame_desc[urb_frame_index];
2186		total_len += iso_frame->actual_length;
2187		++urb_frame_index;
2188		++seg_index;
2189
2190		if (seg_index < seg->isoc_frame_count) {
2191			struct usb_iso_packet_descriptor *next_iso_frame;
2192
2193			next_iso_frame = &iso_frame_desc[urb_frame_index];
2194
2195			if ((iso_frame->offset + iso_frame->actual_length) ==
2196				next_iso_frame->offset)
2197				next_frame_contiguous = 1;
2198		}
2199	} while (next_frame_contiguous
2200			&& ((iso_frame->actual_length % dti_packet_size) == 0));
2201
2202	/* this should always be 0 before a resubmit. */
2203	buf_in_urb->num_mapped_sgs	= 0;
2204	buf_in_urb->transfer_dma = xfer->urb->transfer_dma +
2205		iso_frame_desc[urb_start_frame].offset;
2206	buf_in_urb->transfer_buffer_length = total_len;
2207	buf_in_urb->transfer_flags |= URB_NO_TRANSFER_DMA_MAP;
2208	buf_in_urb->transfer_buffer = NULL;
2209	buf_in_urb->sg = NULL;
2210	buf_in_urb->num_sgs = 0;
2211	buf_in_urb->context = seg;
2212
2213	/* return the number of frames included in this URB. */
2214	return seg_index - seg->isoc_frame_index;
2215}
2216
2217/* Populate the given urb based on the current transfer state. */
2218static int wa_populate_buf_in_urb(struct urb *buf_in_urb, struct wa_xfer *xfer,
2219	unsigned int seg_idx, unsigned int bytes_transferred)
2220{
2221	int result = 0;
2222	struct wa_seg *seg = xfer->seg[seg_idx];
2223
2224	BUG_ON(buf_in_urb->status == -EINPROGRESS);
2225	/* this should always be 0 before a resubmit. */
2226	buf_in_urb->num_mapped_sgs	= 0;
2227
2228	if (xfer->is_dma) {
2229		buf_in_urb->transfer_dma = xfer->urb->transfer_dma
2230			+ (seg_idx * xfer->seg_size);
2231		buf_in_urb->transfer_flags |= URB_NO_TRANSFER_DMA_MAP;
2232		buf_in_urb->transfer_buffer = NULL;
2233		buf_in_urb->sg = NULL;
2234		buf_in_urb->num_sgs = 0;
2235	} else {
2236		/* do buffer or SG processing. */
2237		buf_in_urb->transfer_flags &= ~URB_NO_TRANSFER_DMA_MAP;
2238
2239		if (xfer->urb->transfer_buffer) {
2240			buf_in_urb->transfer_buffer =
2241				xfer->urb->transfer_buffer
2242				+ (seg_idx * xfer->seg_size);
2243			buf_in_urb->sg = NULL;
2244			buf_in_urb->num_sgs = 0;
2245		} else {
2246			/* allocate an SG list to store seg_size bytes
2247				and copy the subset of the xfer->urb->sg
2248				that matches the buffer subset we are
2249				about to read. */
2250			buf_in_urb->sg = wa_xfer_create_subset_sg(
2251				xfer->urb->sg,
2252				seg_idx * xfer->seg_size,
2253				bytes_transferred,
2254				&(buf_in_urb->num_sgs));
2255
2256			if (!(buf_in_urb->sg)) {
2257				buf_in_urb->num_sgs	= 0;
2258				result = -ENOMEM;
2259			}
2260			buf_in_urb->transfer_buffer = NULL;
2261		}
2262	}
2263	buf_in_urb->transfer_buffer_length = bytes_transferred;
2264	buf_in_urb->context = seg;
2265
2266	return result;
2267}
2268
2269/*
2270 * Process a xfer result completion message
2271 *
2272 * inbound transfers: need to schedule a buf_in_urb read
2273 *
2274 * FIXME: this function needs to be broken up in parts
2275 */
2276static void wa_xfer_result_chew(struct wahc *wa, struct wa_xfer *xfer,
2277		struct wa_xfer_result *xfer_result)
2278{
2279	int result;
2280	struct device *dev = &wa->usb_iface->dev;
2281	unsigned long flags;
2282	unsigned int seg_idx;
2283	struct wa_seg *seg;
2284	struct wa_rpipe *rpipe;
2285	unsigned done = 0;
 
2286	u8 usb_status;
2287	unsigned rpipe_ready = 0;
2288	unsigned bytes_transferred = le32_to_cpu(xfer_result->dwTransferLength);
2289	struct urb *buf_in_urb = &(wa->buf_in_urbs[0]);
2290
2291	spin_lock_irqsave(&xfer->lock, flags);
2292	seg_idx = xfer_result->bTransferSegment & 0x7f;
2293	if (unlikely(seg_idx >= xfer->segs))
2294		goto error_bad_seg;
2295	seg = xfer->seg[seg_idx];
2296	rpipe = xfer->ep->hcpriv;
2297	usb_status = xfer_result->bTransferStatus;
2298	dev_dbg(dev, "xfer %p ID 0x%08X#%u: bTransferStatus 0x%02x (seg status %u)\n",
2299		xfer, wa_xfer_id(xfer), seg_idx, usb_status, seg->status);
2300	if (seg->status == WA_SEG_ABORTED
2301	    || seg->status == WA_SEG_ERROR)	/* already handled */
2302		goto segment_aborted;
2303	if (seg->status == WA_SEG_SUBMITTED)	/* ops, got here */
2304		seg->status = WA_SEG_PENDING;	/* before wa_seg{_dto}_cb() */
2305	if (seg->status != WA_SEG_PENDING) {
2306		if (printk_ratelimit())
2307			dev_err(dev, "xfer %p#%u: Bad segment state %u\n",
2308				xfer, seg_idx, seg->status);
2309		seg->status = WA_SEG_PENDING;	/* workaround/"fix" it */
2310	}
2311	if (usb_status & 0x80) {
2312		seg->result = wa_xfer_status_to_errno(usb_status);
2313		dev_err(dev, "DTI: xfer %p 0x%08X:#%u failed (0x%02x)\n",
2314			xfer, xfer->id, seg->index, usb_status);
2315		seg->status = ((usb_status & 0x7F) == WA_XFER_STATUS_ABORTED) ?
2316			WA_SEG_ABORTED : WA_SEG_ERROR;
2317		goto error_complete;
2318	}
2319	/* FIXME: we ignore warnings, tally them for stats */
2320	if (usb_status & 0x40) 		/* Warning?... */
2321		usb_status = 0;		/* ... pass */
2322	/*
2323	 * If the last segment bit is set, complete the remaining segments.
2324	 * When the current segment is completed, either in wa_buf_in_cb for
2325	 * transfers with data or below for no data, the xfer will complete.
2326	 */
2327	if (xfer_result->bTransferSegment & 0x80)
2328		wa_complete_remaining_xfer_segs(xfer, seg->index + 1,
2329			WA_SEG_DONE);
2330	if (usb_pipeisoc(xfer->urb->pipe)
2331		&& (le32_to_cpu(xfer_result->dwNumOfPackets) > 0)) {
2332		/* set up WA state to read the isoc packet status next. */
2333		wa->dti_isoc_xfer_in_progress = wa_xfer_id(xfer);
2334		wa->dti_isoc_xfer_seg = seg_idx;
2335		wa->dti_state = WA_DTI_ISOC_PACKET_STATUS_PENDING;
2336	} else if (xfer->is_inbound && !usb_pipeisoc(xfer->urb->pipe)
2337			&& (bytes_transferred > 0)) {
2338		/* IN data phase: read to buffer */
2339		seg->status = WA_SEG_DTI_PENDING;
2340		result = wa_populate_buf_in_urb(buf_in_urb, xfer, seg_idx,
2341			bytes_transferred);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2342		if (result < 0)
2343			goto error_buf_in_populate;
2344		++(wa->active_buf_in_urbs);
2345		result = usb_submit_urb(buf_in_urb, GFP_ATOMIC);
2346		if (result < 0) {
2347			--(wa->active_buf_in_urbs);
2348			goto error_submit_buf_in;
2349		}
2350	} else {
2351		/* OUT data phase or no data, complete it -- */
2352		seg->result = bytes_transferred;
 
 
2353		rpipe_ready = rpipe_avail_inc(rpipe);
2354		done = __wa_xfer_mark_seg_as_done(xfer, seg, WA_SEG_DONE);
2355	}
2356	spin_unlock_irqrestore(&xfer->lock, flags);
2357	if (done)
2358		wa_xfer_completion(xfer);
2359	if (rpipe_ready)
2360		wa_xfer_delayed_run(rpipe);
2361	return;
2362
2363error_submit_buf_in:
2364	if (edc_inc(&wa->dti_edc, EDC_MAX_ERRORS, EDC_ERROR_TIMEFRAME)) {
2365		dev_err(dev, "DTI: URB max acceptable errors "
2366			"exceeded, resetting device\n");
2367		wa_reset_all(wa);
2368	}
2369	if (printk_ratelimit())
2370		dev_err(dev, "xfer %p#%u: can't submit DTI data phase: %d\n",
2371			xfer, seg_idx, result);
2372	seg->result = result;
2373	kfree(buf_in_urb->sg);
2374	buf_in_urb->sg = NULL;
2375error_buf_in_populate:
2376	__wa_xfer_abort(xfer);
2377	seg->status = WA_SEG_ERROR;
2378error_complete:
2379	xfer->segs_done++;
2380	rpipe_ready = rpipe_avail_inc(rpipe);
2381	wa_complete_remaining_xfer_segs(xfer, seg->index + 1, seg->status);
2382	done = __wa_xfer_is_done(xfer);
2383	/*
2384	 * queue work item to clear STALL for control endpoints.
2385	 * Otherwise, let endpoint_reset take care of it.
2386	 */
2387	if (((usb_status & 0x3f) == WA_XFER_STATUS_HALTED) &&
2388		usb_endpoint_xfer_control(&xfer->ep->desc) &&
2389		done) {
2390
2391		dev_info(dev, "Control EP stall.  Queue delayed work.\n");
2392		spin_lock(&wa->xfer_list_lock);
2393		/* move xfer from xfer_list to xfer_errored_list. */
2394		list_move_tail(&xfer->list_node, &wa->xfer_errored_list);
2395		spin_unlock(&wa->xfer_list_lock);
2396		spin_unlock_irqrestore(&xfer->lock, flags);
2397		queue_work(wusbd, &wa->xfer_error_work);
2398	} else {
2399		spin_unlock_irqrestore(&xfer->lock, flags);
2400		if (done)
2401			wa_xfer_completion(xfer);
2402		if (rpipe_ready)
2403			wa_xfer_delayed_run(rpipe);
2404	}
2405
2406	return;
2407
2408error_bad_seg:
2409	spin_unlock_irqrestore(&xfer->lock, flags);
2410	wa_urb_dequeue(wa, xfer->urb, -ENOENT);
2411	if (printk_ratelimit())
2412		dev_err(dev, "xfer %p#%u: bad segment\n", xfer, seg_idx);
2413	if (edc_inc(&wa->dti_edc, EDC_MAX_ERRORS, EDC_ERROR_TIMEFRAME)) {
2414		dev_err(dev, "DTI: URB max acceptable errors "
2415			"exceeded, resetting device\n");
2416		wa_reset_all(wa);
2417	}
2418	return;
2419
2420segment_aborted:
2421	/* nothing to do, as the aborter did the completion */
2422	spin_unlock_irqrestore(&xfer->lock, flags);
2423}
2424
2425/*
2426 * Process a isochronous packet status message
2427 *
2428 * inbound transfers: need to schedule a buf_in_urb read
2429 */
2430static int wa_process_iso_packet_status(struct wahc *wa, struct urb *urb)
2431{
2432	struct device *dev = &wa->usb_iface->dev;
2433	struct wa_xfer_packet_status_hwaiso *packet_status;
2434	struct wa_xfer_packet_status_len_hwaiso *status_array;
2435	struct wa_xfer *xfer;
2436	unsigned long flags;
2437	struct wa_seg *seg;
2438	struct wa_rpipe *rpipe;
2439	unsigned done = 0, dti_busy = 0, data_frame_count = 0, seg_index;
2440	unsigned first_frame_index = 0, rpipe_ready = 0;
2441	int expected_size;
2442
2443	/* We have a xfer result buffer; check it */
2444	dev_dbg(dev, "DTI: isoc packet status %d bytes at %p\n",
2445		urb->actual_length, urb->transfer_buffer);
2446	packet_status = (struct wa_xfer_packet_status_hwaiso *)(wa->dti_buf);
2447	if (packet_status->bPacketType != WA_XFER_ISO_PACKET_STATUS) {
2448		dev_err(dev, "DTI Error: isoc packet status--bad type 0x%02x\n",
2449			packet_status->bPacketType);
2450		goto error_parse_buffer;
2451	}
2452	xfer = wa_xfer_get_by_id(wa, wa->dti_isoc_xfer_in_progress);
2453	if (xfer == NULL) {
2454		dev_err(dev, "DTI Error: isoc packet status--unknown xfer 0x%08x\n",
2455			wa->dti_isoc_xfer_in_progress);
2456		goto error_parse_buffer;
2457	}
2458	spin_lock_irqsave(&xfer->lock, flags);
2459	if (unlikely(wa->dti_isoc_xfer_seg >= xfer->segs))
2460		goto error_bad_seg;
2461	seg = xfer->seg[wa->dti_isoc_xfer_seg];
2462	rpipe = xfer->ep->hcpriv;
2463	expected_size = sizeof(*packet_status) +
2464			(sizeof(packet_status->PacketStatus[0]) *
2465			seg->isoc_frame_count);
2466	if (urb->actual_length != expected_size) {
2467		dev_err(dev, "DTI Error: isoc packet status--bad urb length (%d bytes vs %d needed)\n",
2468			urb->actual_length, expected_size);
2469		goto error_bad_seg;
2470	}
2471	if (le16_to_cpu(packet_status->wLength) != expected_size) {
2472		dev_err(dev, "DTI Error: isoc packet status--bad length %u\n",
2473			le16_to_cpu(packet_status->wLength));
2474		goto error_bad_seg;
2475	}
2476	/* write isoc packet status and lengths back to the xfer urb. */
2477	status_array = packet_status->PacketStatus;
2478	xfer->urb->start_frame =
2479		wa->wusb->usb_hcd.driver->get_frame_number(&wa->wusb->usb_hcd);
2480	for (seg_index = 0; seg_index < seg->isoc_frame_count; ++seg_index) {
2481		struct usb_iso_packet_descriptor *iso_frame_desc =
2482			xfer->urb->iso_frame_desc;
2483		const int xfer_frame_index =
2484			seg->isoc_frame_offset + seg_index;
2485
2486		iso_frame_desc[xfer_frame_index].status =
2487			wa_xfer_status_to_errno(
2488			le16_to_cpu(status_array[seg_index].PacketStatus));
2489		iso_frame_desc[xfer_frame_index].actual_length =
2490			le16_to_cpu(status_array[seg_index].PacketLength);
2491		/* track the number of frames successfully transferred. */
2492		if (iso_frame_desc[xfer_frame_index].actual_length > 0) {
2493			/* save the starting frame index for buf_in_urb. */
2494			if (!data_frame_count)
2495				first_frame_index = seg_index;
2496			++data_frame_count;
2497		}
2498	}
2499
2500	if (xfer->is_inbound && data_frame_count) {
2501		int result, total_frames_read = 0, urb_index = 0;
2502		struct urb *buf_in_urb;
2503
2504		/* IN data phase: read to buffer */
2505		seg->status = WA_SEG_DTI_PENDING;
2506
2507		/* start with the first frame with data. */
2508		seg->isoc_frame_index = first_frame_index;
2509		/* submit up to WA_MAX_BUF_IN_URBS read URBs. */
2510		do {
2511			int urb_frame_index, urb_frame_count;
2512			struct usb_iso_packet_descriptor *iso_frame_desc;
2513
2514			buf_in_urb = &(wa->buf_in_urbs[urb_index]);
2515			urb_frame_count = __wa_populate_buf_in_urb_isoc(wa,
2516				buf_in_urb, xfer, seg);
2517			/* advance frame index to start of next read URB. */
2518			seg->isoc_frame_index += urb_frame_count;
2519			total_frames_read += urb_frame_count;
2520
2521			++(wa->active_buf_in_urbs);
2522			result = usb_submit_urb(buf_in_urb, GFP_ATOMIC);
2523
2524			/* skip 0-byte frames. */
2525			urb_frame_index =
2526				seg->isoc_frame_offset + seg->isoc_frame_index;
2527			iso_frame_desc =
2528				&(xfer->urb->iso_frame_desc[urb_frame_index]);
2529			while ((seg->isoc_frame_index <
2530						seg->isoc_frame_count) &&
2531				 (iso_frame_desc->actual_length == 0)) {
2532				++(seg->isoc_frame_index);
2533				++iso_frame_desc;
2534			}
2535			++urb_index;
2536
2537		} while ((result == 0) && (urb_index < WA_MAX_BUF_IN_URBS)
2538				&& (seg->isoc_frame_index <
2539						seg->isoc_frame_count));
2540
2541		if (result < 0) {
2542			--(wa->active_buf_in_urbs);
2543			dev_err(dev, "DTI Error: Could not submit buf in URB (%d)",
2544				result);
2545			wa_reset_all(wa);
2546		} else if (data_frame_count > total_frames_read)
2547			/* If we need to read more frames, set DTI busy. */
2548			dti_busy = 1;
2549	} else {
2550		/* OUT transfer or no more IN data, complete it -- */
2551		rpipe_ready = rpipe_avail_inc(rpipe);
2552		done = __wa_xfer_mark_seg_as_done(xfer, seg, WA_SEG_DONE);
2553	}
2554	spin_unlock_irqrestore(&xfer->lock, flags);
2555	if (dti_busy)
2556		wa->dti_state = WA_DTI_BUF_IN_DATA_PENDING;
2557	else
2558		wa->dti_state = WA_DTI_TRANSFER_RESULT_PENDING;
2559	if (done)
2560		wa_xfer_completion(xfer);
2561	if (rpipe_ready)
2562		wa_xfer_delayed_run(rpipe);
2563	wa_xfer_put(xfer);
2564	return dti_busy;
2565
2566error_bad_seg:
2567	spin_unlock_irqrestore(&xfer->lock, flags);
2568	wa_xfer_put(xfer);
2569error_parse_buffer:
2570	return dti_busy;
2571}
2572
2573/*
2574 * Callback for the IN data phase
2575 *
2576 * If successful transition state; otherwise, take a note of the
2577 * error, mark this segment done and try completion.
2578 *
2579 * Note we don't access until we are sure that the transfer hasn't
2580 * been cancelled (ECONNRESET, ENOENT), which could mean that
2581 * seg->xfer could be already gone.
2582 */
2583static void wa_buf_in_cb(struct urb *urb)
2584{
2585	struct wa_seg *seg = urb->context;
2586	struct wa_xfer *xfer = seg->xfer;
2587	struct wahc *wa;
2588	struct device *dev;
2589	struct wa_rpipe *rpipe;
2590	unsigned rpipe_ready = 0, isoc_data_frame_count = 0;
2591	unsigned long flags;
2592	int resubmit_dti = 0, active_buf_in_urbs;
2593	u8 done = 0;
2594
2595	/* free the sg if it was used. */
2596	kfree(urb->sg);
2597	urb->sg = NULL;
2598
2599	spin_lock_irqsave(&xfer->lock, flags);
2600	wa = xfer->wa;
2601	dev = &wa->usb_iface->dev;
2602	--(wa->active_buf_in_urbs);
2603	active_buf_in_urbs = wa->active_buf_in_urbs;
2604	rpipe = xfer->ep->hcpriv;
2605
2606	if (usb_pipeisoc(xfer->urb->pipe)) {
2607		struct usb_iso_packet_descriptor *iso_frame_desc =
2608			xfer->urb->iso_frame_desc;
2609		int	seg_index;
2610
2611		/*
2612		 * Find the next isoc frame with data and count how many
2613		 * frames with data remain.
2614		 */
2615		seg_index = seg->isoc_frame_index;
2616		while (seg_index < seg->isoc_frame_count) {
2617			const int urb_frame_index =
2618				seg->isoc_frame_offset + seg_index;
2619
2620			if (iso_frame_desc[urb_frame_index].actual_length > 0) {
2621				/* save the index of the next frame with data */
2622				if (!isoc_data_frame_count)
2623					seg->isoc_frame_index = seg_index;
2624				++isoc_data_frame_count;
2625			}
2626			++seg_index;
2627		}
2628	}
2629	spin_unlock_irqrestore(&xfer->lock, flags);
2630
2631	switch (urb->status) {
2632	case 0:
2633		spin_lock_irqsave(&xfer->lock, flags);
2634
2635		seg->result += urb->actual_length;
2636		if (isoc_data_frame_count > 0) {
2637			int result, urb_frame_count;
2638
2639			/* submit a read URB for the next frame with data. */
2640			urb_frame_count = __wa_populate_buf_in_urb_isoc(wa, urb,
2641				 xfer, seg);
2642			/* advance index to start of next read URB. */
2643			seg->isoc_frame_index += urb_frame_count;
2644			++(wa->active_buf_in_urbs);
2645			result = usb_submit_urb(urb, GFP_ATOMIC);
2646			if (result < 0) {
2647				--(wa->active_buf_in_urbs);
2648				dev_err(dev, "DTI Error: Could not submit buf in URB (%d)",
2649					result);
2650				wa_reset_all(wa);
2651			}
2652			/*
2653			 * If we are in this callback and
2654			 * isoc_data_frame_count > 0, it means that the dti_urb
2655			 * submission was delayed in wa_dti_cb.  Once
2656			 * we submit the last buf_in_urb, we can submit the
2657			 * delayed dti_urb.
2658			 */
2659			  resubmit_dti = (isoc_data_frame_count ==
2660							urb_frame_count);
2661		} else if (active_buf_in_urbs == 0) {
2662			dev_dbg(dev,
2663				"xfer %p 0x%08X#%u: data in done (%zu bytes)\n",
2664				xfer, wa_xfer_id(xfer), seg->index,
2665				seg->result);
2666			rpipe_ready = rpipe_avail_inc(rpipe);
2667			done = __wa_xfer_mark_seg_as_done(xfer, seg,
2668					WA_SEG_DONE);
2669		}
2670		spin_unlock_irqrestore(&xfer->lock, flags);
2671		if (done)
2672			wa_xfer_completion(xfer);
2673		if (rpipe_ready)
2674			wa_xfer_delayed_run(rpipe);
2675		break;
2676	case -ECONNRESET:	/* URB unlinked; no need to do anything */
2677	case -ENOENT:		/* as it was done by the who unlinked us */
2678		break;
2679	default:		/* Other errors ... */
2680		/*
2681		 * Error on data buf read.  Only resubmit DTI if it hasn't
2682		 * already been done by previously hitting this error or by a
2683		 * successful completion of the previous buf_in_urb.
2684		 */
2685		resubmit_dti = wa->dti_state != WA_DTI_TRANSFER_RESULT_PENDING;
2686		spin_lock_irqsave(&xfer->lock, flags);
 
 
 
2687		if (printk_ratelimit())
2688			dev_err(dev, "xfer %p 0x%08X#%u: data in error %d\n",
2689				xfer, wa_xfer_id(xfer), seg->index,
2690				urb->status);
2691		if (edc_inc(&wa->nep_edc, EDC_MAX_ERRORS,
2692			    EDC_ERROR_TIMEFRAME)){
2693			dev_err(dev, "DTO: URB max acceptable errors "
2694				"exceeded, resetting device\n");
2695			wa_reset_all(wa);
2696		}
 
2697		seg->result = urb->status;
 
2698		rpipe_ready = rpipe_avail_inc(rpipe);
2699		if (active_buf_in_urbs == 0)
2700			done = __wa_xfer_mark_seg_as_done(xfer, seg,
2701				WA_SEG_ERROR);
2702		else
2703			__wa_xfer_abort(xfer);
2704		spin_unlock_irqrestore(&xfer->lock, flags);
2705		if (done)
2706			wa_xfer_completion(xfer);
2707		if (rpipe_ready)
2708			wa_xfer_delayed_run(rpipe);
2709	}
2710
2711	if (resubmit_dti) {
2712		int result;
2713
2714		wa->dti_state = WA_DTI_TRANSFER_RESULT_PENDING;
2715
2716		result = usb_submit_urb(wa->dti_urb, GFP_ATOMIC);
2717		if (result < 0) {
2718			dev_err(dev, "DTI Error: Could not submit DTI URB (%d)\n",
2719				result);
2720			wa_reset_all(wa);
2721		}
2722	}
2723}
2724
2725/*
2726 * Handle an incoming transfer result buffer
2727 *
2728 * Given a transfer result buffer, it completes the transfer (possibly
2729 * scheduling and buffer in read) and then resubmits the DTI URB for a
2730 * new transfer result read.
2731 *
2732 *
2733 * The xfer_result DTI URB state machine
2734 *
2735 * States: OFF | RXR (Read-Xfer-Result) | RBI (Read-Buffer-In)
2736 *
2737 * We start in OFF mode, the first xfer_result notification [through
2738 * wa_handle_notif_xfer()] moves us to RXR by posting the DTI-URB to
2739 * read.
2740 *
2741 * We receive a buffer -- if it is not a xfer_result, we complain and
2742 * repost the DTI-URB. If it is a xfer_result then do the xfer seg
2743 * request accounting. If it is an IN segment, we move to RBI and post
2744 * a BUF-IN-URB to the right buffer. The BUF-IN-URB callback will
2745 * repost the DTI-URB and move to RXR state. if there was no IN
2746 * segment, it will repost the DTI-URB.
2747 *
2748 * We go back to OFF when we detect a ENOENT or ESHUTDOWN (or too many
2749 * errors) in the URBs.
2750 */
2751static void wa_dti_cb(struct urb *urb)
2752{
2753	int result, dti_busy = 0;
2754	struct wahc *wa = urb->context;
2755	struct device *dev = &wa->usb_iface->dev;
 
2756	u32 xfer_id;
 
2757	u8 usb_status;
2758
2759	BUG_ON(wa->dti_urb != urb);
2760	switch (wa->dti_urb->status) {
2761	case 0:
2762		if (wa->dti_state == WA_DTI_TRANSFER_RESULT_PENDING) {
2763			struct wa_xfer_result *xfer_result;
2764			struct wa_xfer *xfer;
2765
2766			/* We have a xfer result buffer; check it */
2767			dev_dbg(dev, "DTI: xfer result %d bytes at %p\n",
2768				urb->actual_length, urb->transfer_buffer);
2769			if (urb->actual_length != sizeof(*xfer_result)) {
2770				dev_err(dev, "DTI Error: xfer result--bad size xfer result (%d bytes vs %zu needed)\n",
2771					urb->actual_length,
2772					sizeof(*xfer_result));
2773				break;
2774			}
2775			xfer_result = (struct wa_xfer_result *)(wa->dti_buf);
2776			if (xfer_result->hdr.bLength != sizeof(*xfer_result)) {
2777				dev_err(dev, "DTI Error: xfer result--bad header length %u\n",
2778					xfer_result->hdr.bLength);
2779				break;
2780			}
2781			if (xfer_result->hdr.bNotifyType != WA_XFER_RESULT) {
2782				dev_err(dev, "DTI Error: xfer result--bad header type 0x%02x\n",
2783					xfer_result->hdr.bNotifyType);
2784				break;
2785			}
2786			xfer_id = le32_to_cpu(xfer_result->dwTransferID);
2787			usb_status = xfer_result->bTransferStatus & 0x3f;
2788			if (usb_status == WA_XFER_STATUS_NOT_FOUND) {
2789				/* taken care of already */
2790				dev_dbg(dev, "%s: xfer 0x%08X#%u not found.\n",
2791					__func__, xfer_id,
2792					xfer_result->bTransferSegment & 0x7f);
2793				break;
2794			}
2795			xfer = wa_xfer_get_by_id(wa, xfer_id);
2796			if (xfer == NULL) {
2797				/* FIXME: transaction not found. */
2798				dev_err(dev, "DTI Error: xfer result--unknown xfer 0x%08x (status 0x%02x)\n",
2799					xfer_id, usb_status);
2800				break;
2801			}
2802			wa_xfer_result_chew(wa, xfer, xfer_result);
2803			wa_xfer_put(xfer);
2804		} else if (wa->dti_state == WA_DTI_ISOC_PACKET_STATUS_PENDING) {
2805			dti_busy = wa_process_iso_packet_status(wa, urb);
2806		} else {
2807			dev_err(dev, "DTI Error: unexpected EP state = %d\n",
2808				wa->dti_state);
2809		}
 
 
2810		break;
2811	case -ENOENT:		/* (we killed the URB)...so, no broadcast */
2812	case -ESHUTDOWN:	/* going away! */
2813		dev_dbg(dev, "DTI: going down! %d\n", urb->status);
2814		goto out;
2815	default:
2816		/* Unknown error */
2817		if (edc_inc(&wa->dti_edc, EDC_MAX_ERRORS,
2818			    EDC_ERROR_TIMEFRAME)) {
2819			dev_err(dev, "DTI: URB max acceptable errors "
2820				"exceeded, resetting device\n");
2821			wa_reset_all(wa);
2822			goto out;
2823		}
2824		if (printk_ratelimit())
2825			dev_err(dev, "DTI: URB error %d\n", urb->status);
2826		break;
2827	}
2828
2829	/* Resubmit the DTI URB if we are not busy processing isoc in frames. */
2830	if (!dti_busy) {
2831		result = usb_submit_urb(wa->dti_urb, GFP_ATOMIC);
2832		if (result < 0) {
2833			dev_err(dev, "DTI Error: Could not submit DTI URB (%d)\n",
2834				result);
2835			wa_reset_all(wa);
2836		}
2837	}
2838out:
2839	return;
2840}
2841
2842/*
2843 * Initialize the DTI URB for reading transfer result notifications and also
2844 * the buffer-in URB, for reading buffers. Then we just submit the DTI URB.
2845 */
2846int wa_dti_start(struct wahc *wa)
2847{
2848	const struct usb_endpoint_descriptor *dti_epd = wa->dti_epd;
2849	struct device *dev = &wa->usb_iface->dev;
2850	int result = -ENOMEM, index;
2851
2852	if (wa->dti_urb != NULL)	/* DTI URB already started */
2853		goto out;
2854
2855	wa->dti_urb = usb_alloc_urb(0, GFP_KERNEL);
2856	if (wa->dti_urb == NULL)
2857		goto error_dti_urb_alloc;
2858	usb_fill_bulk_urb(
2859		wa->dti_urb, wa->usb_dev,
2860		usb_rcvbulkpipe(wa->usb_dev, 0x80 | dti_epd->bEndpointAddress),
2861		wa->dti_buf, wa->dti_buf_size,
2862		wa_dti_cb, wa);
2863
2864	/* init the buf in URBs */
2865	for (index = 0; index < WA_MAX_BUF_IN_URBS; ++index) {
2866		usb_fill_bulk_urb(
2867			&(wa->buf_in_urbs[index]), wa->usb_dev,
2868			usb_rcvbulkpipe(wa->usb_dev,
2869				0x80 | dti_epd->bEndpointAddress),
2870			NULL, 0, wa_buf_in_cb, wa);
2871	}
2872	result = usb_submit_urb(wa->dti_urb, GFP_KERNEL);
2873	if (result < 0) {
2874		dev_err(dev, "DTI Error: Could not submit DTI URB (%d) resetting\n",
2875			result);
2876		goto error_dti_urb_submit;
2877	}
2878out:
2879	return 0;
2880
2881error_dti_urb_submit:
2882	usb_put_urb(wa->dti_urb);
2883	wa->dti_urb = NULL;
2884error_dti_urb_alloc:
2885	return result;
2886}
2887EXPORT_SYMBOL_GPL(wa_dti_start);
2888/*
2889 * Transfer complete notification
2890 *
2891 * Called from the notif.c code. We get a notification on EP2 saying
2892 * that some endpoint has some transfer result data available. We are
2893 * about to read it.
2894 *
2895 * To speed up things, we always have a URB reading the DTI URB; we
2896 * don't really set it up and start it until the first xfer complete
2897 * notification arrives, which is what we do here.
2898 *
2899 * Follow up in wa_dti_cb(), as that's where the whole state
2900 * machine starts.
2901 *
 
 
 
 
2902 * @wa shall be referenced
2903 */
2904void wa_handle_notif_xfer(struct wahc *wa, struct wa_notif_hdr *notif_hdr)
2905{
 
2906	struct device *dev = &wa->usb_iface->dev;
2907	struct wa_notif_xfer *notif_xfer;
2908	const struct usb_endpoint_descriptor *dti_epd = wa->dti_epd;
2909
2910	notif_xfer = container_of(notif_hdr, struct wa_notif_xfer, hdr);
2911	BUG_ON(notif_hdr->bNotifyType != WA_NOTIF_TRANSFER);
2912
2913	if ((0x80 | notif_xfer->bEndpoint) != dti_epd->bEndpointAddress) {
2914		/* FIXME: hardcoded limitation, adapt */
2915		dev_err(dev, "BUG: DTI ep is %u, not %u (hack me)\n",
2916			notif_xfer->bEndpoint, dti_epd->bEndpointAddress);
2917		goto error;
2918	}
 
 
2919
2920	/* attempt to start the DTI ep processing. */
2921	if (wa_dti_start(wa) < 0)
2922		goto error;
2923
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2924	return;
2925
 
 
 
 
 
 
2926error:
2927	wa_reset_all(wa);
2928}