Linux Audio

Check our new training course

Loading...
v3.1
   1/*
   2 * Universal Host Controller Interface driver for USB.
   3 *
   4 * Maintainer: Alan Stern <stern@rowland.harvard.edu>
   5 *
   6 * (C) Copyright 1999 Linus Torvalds
   7 * (C) Copyright 1999-2002 Johannes Erdfelt, johannes@erdfelt.com
   8 * (C) Copyright 1999 Randy Dunlap
   9 * (C) Copyright 1999 Georg Acher, acher@in.tum.de
  10 * (C) Copyright 1999 Deti Fliegl, deti@fliegl.de
  11 * (C) Copyright 1999 Thomas Sailer, sailer@ife.ee.ethz.ch
  12 * (C) Copyright 1999 Roman Weissgaerber, weissg@vienna.at
  13 * (C) Copyright 2000 Yggdrasil Computing, Inc. (port of new PCI interface
  14 *               support from usb-ohci.c by Adam Richter, adam@yggdrasil.com).
  15 * (C) Copyright 1999 Gregory P. Smith (from usb-ohci.c)
  16 * (C) Copyright 2004-2007 Alan Stern, stern@rowland.harvard.edu
  17 */
  18
  19
  20/*
  21 * Technically, updating td->status here is a race, but it's not really a
  22 * problem. The worst that can happen is that we set the IOC bit again
  23 * generating a spurious interrupt. We could fix this by creating another
  24 * QH and leaving the IOC bit always set, but then we would have to play
  25 * games with the FSBR code to make sure we get the correct order in all
  26 * the cases. I don't think it's worth the effort
  27 */
  28static void uhci_set_next_interrupt(struct uhci_hcd *uhci)
  29{
  30	if (uhci->is_stopped)
  31		mod_timer(&uhci_to_hcd(uhci)->rh_timer, jiffies);
  32	uhci->term_td->status |= cpu_to_hc32(uhci, TD_CTRL_IOC);
  33}
  34
  35static inline void uhci_clear_next_interrupt(struct uhci_hcd *uhci)
  36{
  37	uhci->term_td->status &= ~cpu_to_hc32(uhci, TD_CTRL_IOC);
  38}
  39
  40
  41/*
  42 * Full-Speed Bandwidth Reclamation (FSBR).
  43 * We turn on FSBR whenever a queue that wants it is advancing,
  44 * and leave it on for a short time thereafter.
  45 */
  46static void uhci_fsbr_on(struct uhci_hcd *uhci)
  47{
  48	struct uhci_qh *lqh;
  49
  50	/* The terminating skeleton QH always points back to the first
  51	 * FSBR QH.  Make the last async QH point to the terminating
  52	 * skeleton QH. */
  53	uhci->fsbr_is_on = 1;
  54	lqh = list_entry(uhci->skel_async_qh->node.prev,
  55			struct uhci_qh, node);
  56	lqh->link = LINK_TO_QH(uhci, uhci->skel_term_qh);
  57}
  58
  59static void uhci_fsbr_off(struct uhci_hcd *uhci)
  60{
  61	struct uhci_qh *lqh;
  62
  63	/* Remove the link from the last async QH to the terminating
  64	 * skeleton QH. */
  65	uhci->fsbr_is_on = 0;
  66	lqh = list_entry(uhci->skel_async_qh->node.prev,
  67			struct uhci_qh, node);
  68	lqh->link = UHCI_PTR_TERM(uhci);
  69}
  70
  71static void uhci_add_fsbr(struct uhci_hcd *uhci, struct urb *urb)
  72{
  73	struct urb_priv *urbp = urb->hcpriv;
  74
  75	if (!(urb->transfer_flags & URB_NO_FSBR))
  76		urbp->fsbr = 1;
  77}
  78
  79static void uhci_urbp_wants_fsbr(struct uhci_hcd *uhci, struct urb_priv *urbp)
  80{
  81	if (urbp->fsbr) {
  82		uhci->fsbr_is_wanted = 1;
  83		if (!uhci->fsbr_is_on)
  84			uhci_fsbr_on(uhci);
  85		else if (uhci->fsbr_expiring) {
  86			uhci->fsbr_expiring = 0;
  87			del_timer(&uhci->fsbr_timer);
  88		}
  89	}
  90}
  91
  92static void uhci_fsbr_timeout(unsigned long _uhci)
  93{
  94	struct uhci_hcd *uhci = (struct uhci_hcd *) _uhci;
  95	unsigned long flags;
  96
  97	spin_lock_irqsave(&uhci->lock, flags);
  98	if (uhci->fsbr_expiring) {
  99		uhci->fsbr_expiring = 0;
 100		uhci_fsbr_off(uhci);
 101	}
 102	spin_unlock_irqrestore(&uhci->lock, flags);
 103}
 104
 105
 106static struct uhci_td *uhci_alloc_td(struct uhci_hcd *uhci)
 107{
 108	dma_addr_t dma_handle;
 109	struct uhci_td *td;
 110
 111	td = dma_pool_alloc(uhci->td_pool, GFP_ATOMIC, &dma_handle);
 112	if (!td)
 113		return NULL;
 114
 115	td->dma_handle = dma_handle;
 116	td->frame = -1;
 117
 118	INIT_LIST_HEAD(&td->list);
 119	INIT_LIST_HEAD(&td->fl_list);
 120
 121	return td;
 122}
 123
 124static void uhci_free_td(struct uhci_hcd *uhci, struct uhci_td *td)
 125{
 126	if (!list_empty(&td->list))
 127		dev_WARN(uhci_dev(uhci), "td %p still in list!\n", td);
 128	if (!list_empty(&td->fl_list))
 129		dev_WARN(uhci_dev(uhci), "td %p still in fl_list!\n", td);
 130
 131	dma_pool_free(uhci->td_pool, td, td->dma_handle);
 132}
 133
 134static inline void uhci_fill_td(struct uhci_hcd *uhci, struct uhci_td *td,
 135		u32 status, u32 token, u32 buffer)
 136{
 137	td->status = cpu_to_hc32(uhci, status);
 138	td->token = cpu_to_hc32(uhci, token);
 139	td->buffer = cpu_to_hc32(uhci, buffer);
 140}
 141
 142static void uhci_add_td_to_urbp(struct uhci_td *td, struct urb_priv *urbp)
 143{
 144	list_add_tail(&td->list, &urbp->td_list);
 145}
 146
 147static void uhci_remove_td_from_urbp(struct uhci_td *td)
 148{
 149	list_del_init(&td->list);
 150}
 151
 152/*
 153 * We insert Isochronous URBs directly into the frame list at the beginning
 154 */
 155static inline void uhci_insert_td_in_frame_list(struct uhci_hcd *uhci,
 156		struct uhci_td *td, unsigned framenum)
 157{
 158	framenum &= (UHCI_NUMFRAMES - 1);
 159
 160	td->frame = framenum;
 161
 162	/* Is there a TD already mapped there? */
 163	if (uhci->frame_cpu[framenum]) {
 164		struct uhci_td *ftd, *ltd;
 165
 166		ftd = uhci->frame_cpu[framenum];
 167		ltd = list_entry(ftd->fl_list.prev, struct uhci_td, fl_list);
 168
 169		list_add_tail(&td->fl_list, &ftd->fl_list);
 170
 171		td->link = ltd->link;
 172		wmb();
 173		ltd->link = LINK_TO_TD(uhci, td);
 174	} else {
 175		td->link = uhci->frame[framenum];
 176		wmb();
 177		uhci->frame[framenum] = LINK_TO_TD(uhci, td);
 178		uhci->frame_cpu[framenum] = td;
 179	}
 180}
 181
 182static inline void uhci_remove_td_from_frame_list(struct uhci_hcd *uhci,
 183		struct uhci_td *td)
 184{
 185	/* If it's not inserted, don't remove it */
 186	if (td->frame == -1) {
 187		WARN_ON(!list_empty(&td->fl_list));
 188		return;
 189	}
 190
 191	if (uhci->frame_cpu[td->frame] == td) {
 192		if (list_empty(&td->fl_list)) {
 193			uhci->frame[td->frame] = td->link;
 194			uhci->frame_cpu[td->frame] = NULL;
 195		} else {
 196			struct uhci_td *ntd;
 197
 198			ntd = list_entry(td->fl_list.next,
 199					 struct uhci_td,
 200					 fl_list);
 201			uhci->frame[td->frame] = LINK_TO_TD(uhci, ntd);
 202			uhci->frame_cpu[td->frame] = ntd;
 203		}
 204	} else {
 205		struct uhci_td *ptd;
 206
 207		ptd = list_entry(td->fl_list.prev, struct uhci_td, fl_list);
 208		ptd->link = td->link;
 209	}
 210
 211	list_del_init(&td->fl_list);
 212	td->frame = -1;
 213}
 214
 215static inline void uhci_remove_tds_from_frame(struct uhci_hcd *uhci,
 216		unsigned int framenum)
 217{
 218	struct uhci_td *ftd, *ltd;
 219
 220	framenum &= (UHCI_NUMFRAMES - 1);
 221
 222	ftd = uhci->frame_cpu[framenum];
 223	if (ftd) {
 224		ltd = list_entry(ftd->fl_list.prev, struct uhci_td, fl_list);
 225		uhci->frame[framenum] = ltd->link;
 226		uhci->frame_cpu[framenum] = NULL;
 227
 228		while (!list_empty(&ftd->fl_list))
 229			list_del_init(ftd->fl_list.prev);
 230	}
 231}
 232
 233/*
 234 * Remove all the TDs for an Isochronous URB from the frame list
 235 */
 236static void uhci_unlink_isochronous_tds(struct uhci_hcd *uhci, struct urb *urb)
 237{
 238	struct urb_priv *urbp = (struct urb_priv *) urb->hcpriv;
 239	struct uhci_td *td;
 240
 241	list_for_each_entry(td, &urbp->td_list, list)
 242		uhci_remove_td_from_frame_list(uhci, td);
 243}
 244
 245static struct uhci_qh *uhci_alloc_qh(struct uhci_hcd *uhci,
 246		struct usb_device *udev, struct usb_host_endpoint *hep)
 247{
 248	dma_addr_t dma_handle;
 249	struct uhci_qh *qh;
 250
 251	qh = dma_pool_alloc(uhci->qh_pool, GFP_ATOMIC, &dma_handle);
 252	if (!qh)
 253		return NULL;
 254
 255	memset(qh, 0, sizeof(*qh));
 256	qh->dma_handle = dma_handle;
 257
 258	qh->element = UHCI_PTR_TERM(uhci);
 259	qh->link = UHCI_PTR_TERM(uhci);
 260
 261	INIT_LIST_HEAD(&qh->queue);
 262	INIT_LIST_HEAD(&qh->node);
 263
 264	if (udev) {		/* Normal QH */
 265		qh->type = usb_endpoint_type(&hep->desc);
 266		if (qh->type != USB_ENDPOINT_XFER_ISOC) {
 267			qh->dummy_td = uhci_alloc_td(uhci);
 268			if (!qh->dummy_td) {
 269				dma_pool_free(uhci->qh_pool, qh, dma_handle);
 270				return NULL;
 271			}
 272		}
 273		qh->state = QH_STATE_IDLE;
 274		qh->hep = hep;
 275		qh->udev = udev;
 276		hep->hcpriv = qh;
 277
 278		if (qh->type == USB_ENDPOINT_XFER_INT ||
 279				qh->type == USB_ENDPOINT_XFER_ISOC)
 280			qh->load = usb_calc_bus_time(udev->speed,
 281					usb_endpoint_dir_in(&hep->desc),
 282					qh->type == USB_ENDPOINT_XFER_ISOC,
 283					le16_to_cpu(hep->desc.wMaxPacketSize))
 284				/ 1000 + 1;
 285
 286	} else {		/* Skeleton QH */
 287		qh->state = QH_STATE_ACTIVE;
 288		qh->type = -1;
 289	}
 290	return qh;
 291}
 292
 293static void uhci_free_qh(struct uhci_hcd *uhci, struct uhci_qh *qh)
 294{
 295	WARN_ON(qh->state != QH_STATE_IDLE && qh->udev);
 296	if (!list_empty(&qh->queue))
 297		dev_WARN(uhci_dev(uhci), "qh %p list not empty!\n", qh);
 298
 299	list_del(&qh->node);
 300	if (qh->udev) {
 301		qh->hep->hcpriv = NULL;
 302		if (qh->dummy_td)
 303			uhci_free_td(uhci, qh->dummy_td);
 304	}
 305	dma_pool_free(uhci->qh_pool, qh, qh->dma_handle);
 306}
 307
 308/*
 309 * When a queue is stopped and a dequeued URB is given back, adjust
 310 * the previous TD link (if the URB isn't first on the queue) or
 311 * save its toggle value (if it is first and is currently executing).
 312 *
 313 * Returns 0 if the URB should not yet be given back, 1 otherwise.
 314 */
 315static int uhci_cleanup_queue(struct uhci_hcd *uhci, struct uhci_qh *qh,
 316		struct urb *urb)
 317{
 318	struct urb_priv *urbp = urb->hcpriv;
 319	struct uhci_td *td;
 320	int ret = 1;
 321
 322	/* Isochronous pipes don't use toggles and their TD link pointers
 323	 * get adjusted during uhci_urb_dequeue().  But since their queues
 324	 * cannot truly be stopped, we have to watch out for dequeues
 325	 * occurring after the nominal unlink frame. */
 326	if (qh->type == USB_ENDPOINT_XFER_ISOC) {
 327		ret = (uhci->frame_number + uhci->is_stopped !=
 328				qh->unlink_frame);
 329		goto done;
 330	}
 331
 332	/* If the URB isn't first on its queue, adjust the link pointer
 333	 * of the last TD in the previous URB.  The toggle doesn't need
 334	 * to be saved since this URB can't be executing yet. */
 335	if (qh->queue.next != &urbp->node) {
 336		struct urb_priv *purbp;
 337		struct uhci_td *ptd;
 338
 339		purbp = list_entry(urbp->node.prev, struct urb_priv, node);
 340		WARN_ON(list_empty(&purbp->td_list));
 341		ptd = list_entry(purbp->td_list.prev, struct uhci_td,
 342				list);
 343		td = list_entry(urbp->td_list.prev, struct uhci_td,
 344				list);
 345		ptd->link = td->link;
 346		goto done;
 347	}
 348
 349	/* If the QH element pointer is UHCI_PTR_TERM then then currently
 350	 * executing URB has already been unlinked, so this one isn't it. */
 351	if (qh_element(qh) == UHCI_PTR_TERM(uhci))
 352		goto done;
 353	qh->element = UHCI_PTR_TERM(uhci);
 354
 355	/* Control pipes don't have to worry about toggles */
 356	if (qh->type == USB_ENDPOINT_XFER_CONTROL)
 357		goto done;
 358
 359	/* Save the next toggle value */
 360	WARN_ON(list_empty(&urbp->td_list));
 361	td = list_entry(urbp->td_list.next, struct uhci_td, list);
 362	qh->needs_fixup = 1;
 363	qh->initial_toggle = uhci_toggle(td_token(uhci, td));
 364
 365done:
 366	return ret;
 367}
 368
 369/*
 370 * Fix up the data toggles for URBs in a queue, when one of them
 371 * terminates early (short transfer, error, or dequeued).
 372 */
 373static void uhci_fixup_toggles(struct uhci_hcd *uhci, struct uhci_qh *qh,
 374			int skip_first)
 375{
 376	struct urb_priv *urbp = NULL;
 377	struct uhci_td *td;
 378	unsigned int toggle = qh->initial_toggle;
 379	unsigned int pipe;
 380
 381	/* Fixups for a short transfer start with the second URB in the
 382	 * queue (the short URB is the first). */
 383	if (skip_first)
 384		urbp = list_entry(qh->queue.next, struct urb_priv, node);
 385
 386	/* When starting with the first URB, if the QH element pointer is
 387	 * still valid then we know the URB's toggles are okay. */
 388	else if (qh_element(qh) != UHCI_PTR_TERM(uhci))
 389		toggle = 2;
 390
 391	/* Fix up the toggle for the URBs in the queue.  Normally this
 392	 * loop won't run more than once: When an error or short transfer
 393	 * occurs, the queue usually gets emptied. */
 394	urbp = list_prepare_entry(urbp, &qh->queue, node);
 395	list_for_each_entry_continue(urbp, &qh->queue, node) {
 396
 397		/* If the first TD has the right toggle value, we don't
 398		 * need to change any toggles in this URB */
 399		td = list_entry(urbp->td_list.next, struct uhci_td, list);
 400		if (toggle > 1 || uhci_toggle(td_token(uhci, td)) == toggle) {
 401			td = list_entry(urbp->td_list.prev, struct uhci_td,
 402					list);
 403			toggle = uhci_toggle(td_token(uhci, td)) ^ 1;
 404
 405		/* Otherwise all the toggles in the URB have to be switched */
 406		} else {
 407			list_for_each_entry(td, &urbp->td_list, list) {
 408				td->token ^= cpu_to_hc32(uhci,
 409							TD_TOKEN_TOGGLE);
 410				toggle ^= 1;
 411			}
 412		}
 413	}
 414
 415	wmb();
 416	pipe = list_entry(qh->queue.next, struct urb_priv, node)->urb->pipe;
 417	usb_settoggle(qh->udev, usb_pipeendpoint(pipe),
 418			usb_pipeout(pipe), toggle);
 419	qh->needs_fixup = 0;
 420}
 421
 422/*
 423 * Link an Isochronous QH into its skeleton's list
 424 */
 425static inline void link_iso(struct uhci_hcd *uhci, struct uhci_qh *qh)
 426{
 427	list_add_tail(&qh->node, &uhci->skel_iso_qh->node);
 428
 429	/* Isochronous QHs aren't linked by the hardware */
 430}
 431
 432/*
 433 * Link a high-period interrupt QH into the schedule at the end of its
 434 * skeleton's list
 435 */
 436static void link_interrupt(struct uhci_hcd *uhci, struct uhci_qh *qh)
 437{
 438	struct uhci_qh *pqh;
 439
 440	list_add_tail(&qh->node, &uhci->skelqh[qh->skel]->node);
 441
 442	pqh = list_entry(qh->node.prev, struct uhci_qh, node);
 443	qh->link = pqh->link;
 444	wmb();
 445	pqh->link = LINK_TO_QH(uhci, qh);
 446}
 447
 448/*
 449 * Link a period-1 interrupt or async QH into the schedule at the
 450 * correct spot in the async skeleton's list, and update the FSBR link
 451 */
 452static void link_async(struct uhci_hcd *uhci, struct uhci_qh *qh)
 453{
 454	struct uhci_qh *pqh;
 455	__hc32 link_to_new_qh;
 456
 457	/* Find the predecessor QH for our new one and insert it in the list.
 458	 * The list of QHs is expected to be short, so linear search won't
 459	 * take too long. */
 460	list_for_each_entry_reverse(pqh, &uhci->skel_async_qh->node, node) {
 461		if (pqh->skel <= qh->skel)
 462			break;
 463	}
 464	list_add(&qh->node, &pqh->node);
 465
 466	/* Link it into the schedule */
 467	qh->link = pqh->link;
 468	wmb();
 469	link_to_new_qh = LINK_TO_QH(uhci, qh);
 470	pqh->link = link_to_new_qh;
 471
 472	/* If this is now the first FSBR QH, link the terminating skeleton
 473	 * QH to it. */
 474	if (pqh->skel < SKEL_FSBR && qh->skel >= SKEL_FSBR)
 475		uhci->skel_term_qh->link = link_to_new_qh;
 476}
 477
 478/*
 479 * Put a QH on the schedule in both hardware and software
 480 */
 481static void uhci_activate_qh(struct uhci_hcd *uhci, struct uhci_qh *qh)
 482{
 483	WARN_ON(list_empty(&qh->queue));
 484
 485	/* Set the element pointer if it isn't set already.
 486	 * This isn't needed for Isochronous queues, but it doesn't hurt. */
 487	if (qh_element(qh) == UHCI_PTR_TERM(uhci)) {
 488		struct urb_priv *urbp = list_entry(qh->queue.next,
 489				struct urb_priv, node);
 490		struct uhci_td *td = list_entry(urbp->td_list.next,
 491				struct uhci_td, list);
 492
 493		qh->element = LINK_TO_TD(uhci, td);
 494	}
 495
 496	/* Treat the queue as if it has just advanced */
 497	qh->wait_expired = 0;
 498	qh->advance_jiffies = jiffies;
 499
 500	if (qh->state == QH_STATE_ACTIVE)
 501		return;
 502	qh->state = QH_STATE_ACTIVE;
 503
 504	/* Move the QH from its old list to the correct spot in the appropriate
 505	 * skeleton's list */
 506	if (qh == uhci->next_qh)
 507		uhci->next_qh = list_entry(qh->node.next, struct uhci_qh,
 508				node);
 509	list_del(&qh->node);
 510
 511	if (qh->skel == SKEL_ISO)
 512		link_iso(uhci, qh);
 513	else if (qh->skel < SKEL_ASYNC)
 514		link_interrupt(uhci, qh);
 515	else
 516		link_async(uhci, qh);
 517}
 518
 519/*
 520 * Unlink a high-period interrupt QH from the schedule
 521 */
 522static void unlink_interrupt(struct uhci_hcd *uhci, struct uhci_qh *qh)
 523{
 524	struct uhci_qh *pqh;
 525
 526	pqh = list_entry(qh->node.prev, struct uhci_qh, node);
 527	pqh->link = qh->link;
 528	mb();
 529}
 530
 531/*
 532 * Unlink a period-1 interrupt or async QH from the schedule
 533 */
 534static void unlink_async(struct uhci_hcd *uhci, struct uhci_qh *qh)
 535{
 536	struct uhci_qh *pqh;
 537	__hc32 link_to_next_qh = qh->link;
 538
 539	pqh = list_entry(qh->node.prev, struct uhci_qh, node);
 540	pqh->link = link_to_next_qh;
 541
 542	/* If this was the old first FSBR QH, link the terminating skeleton
 543	 * QH to the next (new first FSBR) QH. */
 544	if (pqh->skel < SKEL_FSBR && qh->skel >= SKEL_FSBR)
 545		uhci->skel_term_qh->link = link_to_next_qh;
 546	mb();
 547}
 548
 549/*
 550 * Take a QH off the hardware schedule
 551 */
 552static void uhci_unlink_qh(struct uhci_hcd *uhci, struct uhci_qh *qh)
 553{
 554	if (qh->state == QH_STATE_UNLINKING)
 555		return;
 556	WARN_ON(qh->state != QH_STATE_ACTIVE || !qh->udev);
 557	qh->state = QH_STATE_UNLINKING;
 558
 559	/* Unlink the QH from the schedule and record when we did it */
 560	if (qh->skel == SKEL_ISO)
 561		;
 562	else if (qh->skel < SKEL_ASYNC)
 563		unlink_interrupt(uhci, qh);
 564	else
 565		unlink_async(uhci, qh);
 566
 567	uhci_get_current_frame_number(uhci);
 568	qh->unlink_frame = uhci->frame_number;
 569
 570	/* Force an interrupt so we know when the QH is fully unlinked */
 571	if (list_empty(&uhci->skel_unlink_qh->node) || uhci->is_stopped)
 572		uhci_set_next_interrupt(uhci);
 573
 574	/* Move the QH from its old list to the end of the unlinking list */
 575	if (qh == uhci->next_qh)
 576		uhci->next_qh = list_entry(qh->node.next, struct uhci_qh,
 577				node);
 578	list_move_tail(&qh->node, &uhci->skel_unlink_qh->node);
 579}
 580
 581/*
 582 * When we and the controller are through with a QH, it becomes IDLE.
 583 * This happens when a QH has been off the schedule (on the unlinking
 584 * list) for more than one frame, or when an error occurs while adding
 585 * the first URB onto a new QH.
 586 */
 587static void uhci_make_qh_idle(struct uhci_hcd *uhci, struct uhci_qh *qh)
 588{
 589	WARN_ON(qh->state == QH_STATE_ACTIVE);
 590
 591	if (qh == uhci->next_qh)
 592		uhci->next_qh = list_entry(qh->node.next, struct uhci_qh,
 593				node);
 594	list_move(&qh->node, &uhci->idle_qh_list);
 595	qh->state = QH_STATE_IDLE;
 596
 597	/* Now that the QH is idle, its post_td isn't being used */
 598	if (qh->post_td) {
 599		uhci_free_td(uhci, qh->post_td);
 600		qh->post_td = NULL;
 601	}
 602
 603	/* If anyone is waiting for a QH to become idle, wake them up */
 604	if (uhci->num_waiting)
 605		wake_up_all(&uhci->waitqh);
 606}
 607
 608/*
 609 * Find the highest existing bandwidth load for a given phase and period.
 610 */
 611static int uhci_highest_load(struct uhci_hcd *uhci, int phase, int period)
 612{
 613	int highest_load = uhci->load[phase];
 614
 615	for (phase += period; phase < MAX_PHASE; phase += period)
 616		highest_load = max_t(int, highest_load, uhci->load[phase]);
 617	return highest_load;
 618}
 619
 620/*
 621 * Set qh->phase to the optimal phase for a periodic transfer and
 622 * check whether the bandwidth requirement is acceptable.
 623 */
 624static int uhci_check_bandwidth(struct uhci_hcd *uhci, struct uhci_qh *qh)
 625{
 626	int minimax_load;
 627
 628	/* Find the optimal phase (unless it is already set) and get
 629	 * its load value. */
 630	if (qh->phase >= 0)
 631		minimax_load = uhci_highest_load(uhci, qh->phase, qh->period);
 632	else {
 633		int phase, load;
 634		int max_phase = min_t(int, MAX_PHASE, qh->period);
 635
 636		qh->phase = 0;
 637		minimax_load = uhci_highest_load(uhci, qh->phase, qh->period);
 638		for (phase = 1; phase < max_phase; ++phase) {
 639			load = uhci_highest_load(uhci, phase, qh->period);
 640			if (load < minimax_load) {
 641				minimax_load = load;
 642				qh->phase = phase;
 643			}
 644		}
 645	}
 646
 647	/* Maximum allowable periodic bandwidth is 90%, or 900 us per frame */
 648	if (minimax_load + qh->load > 900) {
 649		dev_dbg(uhci_dev(uhci), "bandwidth allocation failed: "
 650				"period %d, phase %d, %d + %d us\n",
 651				qh->period, qh->phase, minimax_load, qh->load);
 652		return -ENOSPC;
 653	}
 654	return 0;
 655}
 656
 657/*
 658 * Reserve a periodic QH's bandwidth in the schedule
 659 */
 660static void uhci_reserve_bandwidth(struct uhci_hcd *uhci, struct uhci_qh *qh)
 661{
 662	int i;
 663	int load = qh->load;
 664	char *p = "??";
 665
 666	for (i = qh->phase; i < MAX_PHASE; i += qh->period) {
 667		uhci->load[i] += load;
 668		uhci->total_load += load;
 669	}
 670	uhci_to_hcd(uhci)->self.bandwidth_allocated =
 671			uhci->total_load / MAX_PHASE;
 672	switch (qh->type) {
 673	case USB_ENDPOINT_XFER_INT:
 674		++uhci_to_hcd(uhci)->self.bandwidth_int_reqs;
 675		p = "INT";
 676		break;
 677	case USB_ENDPOINT_XFER_ISOC:
 678		++uhci_to_hcd(uhci)->self.bandwidth_isoc_reqs;
 679		p = "ISO";
 680		break;
 681	}
 682	qh->bandwidth_reserved = 1;
 683	dev_dbg(uhci_dev(uhci),
 684			"%s dev %d ep%02x-%s, period %d, phase %d, %d us\n",
 685			"reserve", qh->udev->devnum,
 686			qh->hep->desc.bEndpointAddress, p,
 687			qh->period, qh->phase, load);
 688}
 689
 690/*
 691 * Release a periodic QH's bandwidth reservation
 692 */
 693static void uhci_release_bandwidth(struct uhci_hcd *uhci, struct uhci_qh *qh)
 694{
 695	int i;
 696	int load = qh->load;
 697	char *p = "??";
 698
 699	for (i = qh->phase; i < MAX_PHASE; i += qh->period) {
 700		uhci->load[i] -= load;
 701		uhci->total_load -= load;
 702	}
 703	uhci_to_hcd(uhci)->self.bandwidth_allocated =
 704			uhci->total_load / MAX_PHASE;
 705	switch (qh->type) {
 706	case USB_ENDPOINT_XFER_INT:
 707		--uhci_to_hcd(uhci)->self.bandwidth_int_reqs;
 708		p = "INT";
 709		break;
 710	case USB_ENDPOINT_XFER_ISOC:
 711		--uhci_to_hcd(uhci)->self.bandwidth_isoc_reqs;
 712		p = "ISO";
 713		break;
 714	}
 715	qh->bandwidth_reserved = 0;
 716	dev_dbg(uhci_dev(uhci),
 717			"%s dev %d ep%02x-%s, period %d, phase %d, %d us\n",
 718			"release", qh->udev->devnum,
 719			qh->hep->desc.bEndpointAddress, p,
 720			qh->period, qh->phase, load);
 721}
 722
 723static inline struct urb_priv *uhci_alloc_urb_priv(struct uhci_hcd *uhci,
 724		struct urb *urb)
 725{
 726	struct urb_priv *urbp;
 727
 728	urbp = kmem_cache_zalloc(uhci_up_cachep, GFP_ATOMIC);
 729	if (!urbp)
 730		return NULL;
 731
 732	urbp->urb = urb;
 733	urb->hcpriv = urbp;
 734
 735	INIT_LIST_HEAD(&urbp->node);
 736	INIT_LIST_HEAD(&urbp->td_list);
 737
 738	return urbp;
 739}
 740
 741static void uhci_free_urb_priv(struct uhci_hcd *uhci,
 742		struct urb_priv *urbp)
 743{
 744	struct uhci_td *td, *tmp;
 745
 746	if (!list_empty(&urbp->node))
 747		dev_WARN(uhci_dev(uhci), "urb %p still on QH's list!\n",
 748				urbp->urb);
 749
 750	list_for_each_entry_safe(td, tmp, &urbp->td_list, list) {
 751		uhci_remove_td_from_urbp(td);
 752		uhci_free_td(uhci, td);
 753	}
 754
 755	kmem_cache_free(uhci_up_cachep, urbp);
 756}
 757
 758/*
 759 * Map status to standard result codes
 760 *
 761 * <status> is (td_status(uhci, td) & 0xF60000), a.k.a.
 762 * uhci_status_bits(td_status(uhci, td)).
 763 * Note: <status> does not include the TD_CTRL_NAK bit.
 764 * <dir_out> is True for output TDs and False for input TDs.
 765 */
 766static int uhci_map_status(int status, int dir_out)
 767{
 768	if (!status)
 769		return 0;
 770	if (status & TD_CTRL_BITSTUFF)			/* Bitstuff error */
 771		return -EPROTO;
 772	if (status & TD_CTRL_CRCTIMEO) {		/* CRC/Timeout */
 773		if (dir_out)
 774			return -EPROTO;
 775		else
 776			return -EILSEQ;
 777	}
 778	if (status & TD_CTRL_BABBLE)			/* Babble */
 779		return -EOVERFLOW;
 780	if (status & TD_CTRL_DBUFERR)			/* Buffer error */
 781		return -ENOSR;
 782	if (status & TD_CTRL_STALLED)			/* Stalled */
 783		return -EPIPE;
 784	return 0;
 785}
 786
 787/*
 788 * Control transfers
 789 */
 790static int uhci_submit_control(struct uhci_hcd *uhci, struct urb *urb,
 791		struct uhci_qh *qh)
 792{
 793	struct uhci_td *td;
 794	unsigned long destination, status;
 795	int maxsze = le16_to_cpu(qh->hep->desc.wMaxPacketSize);
 796	int len = urb->transfer_buffer_length;
 797	dma_addr_t data = urb->transfer_dma;
 798	__hc32 *plink;
 799	struct urb_priv *urbp = urb->hcpriv;
 800	int skel;
 801
 802	/* The "pipe" thing contains the destination in bits 8--18 */
 803	destination = (urb->pipe & PIPE_DEVEP_MASK) | USB_PID_SETUP;
 804
 805	/* 3 errors, dummy TD remains inactive */
 806	status = uhci_maxerr(3);
 807	if (urb->dev->speed == USB_SPEED_LOW)
 808		status |= TD_CTRL_LS;
 809
 810	/*
 811	 * Build the TD for the control request setup packet
 812	 */
 813	td = qh->dummy_td;
 814	uhci_add_td_to_urbp(td, urbp);
 815	uhci_fill_td(uhci, td, status, destination | uhci_explen(8),
 816			urb->setup_dma);
 817	plink = &td->link;
 818	status |= TD_CTRL_ACTIVE;
 819
 820	/*
 821	 * If direction is "send", change the packet ID from SETUP (0x2D)
 822	 * to OUT (0xE1).  Else change it from SETUP to IN (0x69) and
 823	 * set Short Packet Detect (SPD) for all data packets.
 824	 *
 825	 * 0-length transfers always get treated as "send".
 826	 */
 827	if (usb_pipeout(urb->pipe) || len == 0)
 828		destination ^= (USB_PID_SETUP ^ USB_PID_OUT);
 829	else {
 830		destination ^= (USB_PID_SETUP ^ USB_PID_IN);
 831		status |= TD_CTRL_SPD;
 832	}
 833
 834	/*
 835	 * Build the DATA TDs
 836	 */
 837	while (len > 0) {
 838		int pktsze = maxsze;
 839
 840		if (len <= pktsze) {		/* The last data packet */
 841			pktsze = len;
 842			status &= ~TD_CTRL_SPD;
 843		}
 844
 845		td = uhci_alloc_td(uhci);
 846		if (!td)
 847			goto nomem;
 848		*plink = LINK_TO_TD(uhci, td);
 849
 850		/* Alternate Data0/1 (start with Data1) */
 851		destination ^= TD_TOKEN_TOGGLE;
 852
 853		uhci_add_td_to_urbp(td, urbp);
 854		uhci_fill_td(uhci, td, status,
 855			destination | uhci_explen(pktsze), data);
 856		plink = &td->link;
 857
 858		data += pktsze;
 859		len -= pktsze;
 860	}
 861
 862	/*
 863	 * Build the final TD for control status
 864	 */
 865	td = uhci_alloc_td(uhci);
 866	if (!td)
 867		goto nomem;
 868	*plink = LINK_TO_TD(uhci, td);
 869
 870	/* Change direction for the status transaction */
 871	destination ^= (USB_PID_IN ^ USB_PID_OUT);
 872	destination |= TD_TOKEN_TOGGLE;		/* End in Data1 */
 873
 874	uhci_add_td_to_urbp(td, urbp);
 875	uhci_fill_td(uhci, td, status | TD_CTRL_IOC,
 876			destination | uhci_explen(0), 0);
 877	plink = &td->link;
 878
 879	/*
 880	 * Build the new dummy TD and activate the old one
 881	 */
 882	td = uhci_alloc_td(uhci);
 883	if (!td)
 884		goto nomem;
 885	*plink = LINK_TO_TD(uhci, td);
 886
 887	uhci_fill_td(uhci, td, 0, USB_PID_OUT | uhci_explen(0), 0);
 888	wmb();
 889	qh->dummy_td->status |= cpu_to_hc32(uhci, TD_CTRL_ACTIVE);
 890	qh->dummy_td = td;
 891
 892	/* Low-speed transfers get a different queue, and won't hog the bus.
 893	 * Also, some devices enumerate better without FSBR; the easiest way
 894	 * to do that is to put URBs on the low-speed queue while the device
 895	 * isn't in the CONFIGURED state. */
 896	if (urb->dev->speed == USB_SPEED_LOW ||
 897			urb->dev->state != USB_STATE_CONFIGURED)
 898		skel = SKEL_LS_CONTROL;
 899	else {
 900		skel = SKEL_FS_CONTROL;
 901		uhci_add_fsbr(uhci, urb);
 902	}
 903	if (qh->state != QH_STATE_ACTIVE)
 904		qh->skel = skel;
 905	return 0;
 906
 907nomem:
 908	/* Remove the dummy TD from the td_list so it doesn't get freed */
 909	uhci_remove_td_from_urbp(qh->dummy_td);
 910	return -ENOMEM;
 911}
 912
 913/*
 914 * Common submit for bulk and interrupt
 915 */
 916static int uhci_submit_common(struct uhci_hcd *uhci, struct urb *urb,
 917		struct uhci_qh *qh)
 918{
 919	struct uhci_td *td;
 920	unsigned long destination, status;
 921	int maxsze = le16_to_cpu(qh->hep->desc.wMaxPacketSize);
 922	int len = urb->transfer_buffer_length;
 923	int this_sg_len;
 924	dma_addr_t data;
 925	__hc32 *plink;
 926	struct urb_priv *urbp = urb->hcpriv;
 927	unsigned int toggle;
 928	struct scatterlist  *sg;
 929	int i;
 930
 931	if (len < 0)
 932		return -EINVAL;
 933
 934	/* The "pipe" thing contains the destination in bits 8--18 */
 935	destination = (urb->pipe & PIPE_DEVEP_MASK) | usb_packetid(urb->pipe);
 936	toggle = usb_gettoggle(urb->dev, usb_pipeendpoint(urb->pipe),
 937			 usb_pipeout(urb->pipe));
 938
 939	/* 3 errors, dummy TD remains inactive */
 940	status = uhci_maxerr(3);
 941	if (urb->dev->speed == USB_SPEED_LOW)
 942		status |= TD_CTRL_LS;
 943	if (usb_pipein(urb->pipe))
 944		status |= TD_CTRL_SPD;
 945
 946	i = urb->num_sgs;
 947	if (len > 0 && i > 0) {
 948		sg = urb->sg;
 949		data = sg_dma_address(sg);
 950
 951		/* urb->transfer_buffer_length may be smaller than the
 952		 * size of the scatterlist (or vice versa)
 953		 */
 954		this_sg_len = min_t(int, sg_dma_len(sg), len);
 955	} else {
 956		sg = NULL;
 957		data = urb->transfer_dma;
 958		this_sg_len = len;
 959	}
 960	/*
 961	 * Build the DATA TDs
 962	 */
 963	plink = NULL;
 964	td = qh->dummy_td;
 965	for (;;) {	/* Allow zero length packets */
 966		int pktsze = maxsze;
 967
 968		if (len <= pktsze) {		/* The last packet */
 969			pktsze = len;
 970			if (!(urb->transfer_flags & URB_SHORT_NOT_OK))
 971				status &= ~TD_CTRL_SPD;
 972		}
 973
 974		if (plink) {
 975			td = uhci_alloc_td(uhci);
 976			if (!td)
 977				goto nomem;
 978			*plink = LINK_TO_TD(uhci, td);
 979		}
 980		uhci_add_td_to_urbp(td, urbp);
 981		uhci_fill_td(uhci, td, status,
 982				destination | uhci_explen(pktsze) |
 983					(toggle << TD_TOKEN_TOGGLE_SHIFT),
 984				data);
 985		plink = &td->link;
 986		status |= TD_CTRL_ACTIVE;
 987
 988		toggle ^= 1;
 989		data += pktsze;
 990		this_sg_len -= pktsze;
 991		len -= maxsze;
 992		if (this_sg_len <= 0) {
 993			if (--i <= 0 || len <= 0)
 994				break;
 995			sg = sg_next(sg);
 996			data = sg_dma_address(sg);
 997			this_sg_len = min_t(int, sg_dma_len(sg), len);
 998		}
 999	}
1000
1001	/*
1002	 * URB_ZERO_PACKET means adding a 0-length packet, if direction
1003	 * is OUT and the transfer_length was an exact multiple of maxsze,
1004	 * hence (len = transfer_length - N * maxsze) == 0
1005	 * however, if transfer_length == 0, the zero packet was already
1006	 * prepared above.
1007	 */
1008	if ((urb->transfer_flags & URB_ZERO_PACKET) &&
1009			usb_pipeout(urb->pipe) && len == 0 &&
1010			urb->transfer_buffer_length > 0) {
1011		td = uhci_alloc_td(uhci);
1012		if (!td)
1013			goto nomem;
1014		*plink = LINK_TO_TD(uhci, td);
1015
1016		uhci_add_td_to_urbp(td, urbp);
1017		uhci_fill_td(uhci, td, status,
1018				destination | uhci_explen(0) |
1019					(toggle << TD_TOKEN_TOGGLE_SHIFT),
1020				data);
1021		plink = &td->link;
1022
1023		toggle ^= 1;
1024	}
1025
1026	/* Set the interrupt-on-completion flag on the last packet.
1027	 * A more-or-less typical 4 KB URB (= size of one memory page)
1028	 * will require about 3 ms to transfer; that's a little on the
1029	 * fast side but not enough to justify delaying an interrupt
1030	 * more than 2 or 3 URBs, so we will ignore the URB_NO_INTERRUPT
1031	 * flag setting. */
1032	td->status |= cpu_to_hc32(uhci, TD_CTRL_IOC);
1033
1034	/*
1035	 * Build the new dummy TD and activate the old one
1036	 */
1037	td = uhci_alloc_td(uhci);
1038	if (!td)
1039		goto nomem;
1040	*plink = LINK_TO_TD(uhci, td);
1041
1042	uhci_fill_td(uhci, td, 0, USB_PID_OUT | uhci_explen(0), 0);
1043	wmb();
1044	qh->dummy_td->status |= cpu_to_hc32(uhci, TD_CTRL_ACTIVE);
1045	qh->dummy_td = td;
1046
1047	usb_settoggle(urb->dev, usb_pipeendpoint(urb->pipe),
1048			usb_pipeout(urb->pipe), toggle);
1049	return 0;
1050
1051nomem:
1052	/* Remove the dummy TD from the td_list so it doesn't get freed */
1053	uhci_remove_td_from_urbp(qh->dummy_td);
1054	return -ENOMEM;
1055}
1056
1057static int uhci_submit_bulk(struct uhci_hcd *uhci, struct urb *urb,
1058		struct uhci_qh *qh)
1059{
1060	int ret;
1061
1062	/* Can't have low-speed bulk transfers */
1063	if (urb->dev->speed == USB_SPEED_LOW)
1064		return -EINVAL;
1065
1066	if (qh->state != QH_STATE_ACTIVE)
1067		qh->skel = SKEL_BULK;
1068	ret = uhci_submit_common(uhci, urb, qh);
1069	if (ret == 0)
1070		uhci_add_fsbr(uhci, urb);
1071	return ret;
1072}
1073
1074static int uhci_submit_interrupt(struct uhci_hcd *uhci, struct urb *urb,
1075		struct uhci_qh *qh)
1076{
1077	int ret;
1078
1079	/* USB 1.1 interrupt transfers only involve one packet per interval.
1080	 * Drivers can submit URBs of any length, but longer ones will need
1081	 * multiple intervals to complete.
1082	 */
1083
1084	if (!qh->bandwidth_reserved) {
1085		int exponent;
1086
1087		/* Figure out which power-of-two queue to use */
1088		for (exponent = 7; exponent >= 0; --exponent) {
1089			if ((1 << exponent) <= urb->interval)
1090				break;
1091		}
1092		if (exponent < 0)
1093			return -EINVAL;
1094
1095		/* If the slot is full, try a lower period */
1096		do {
1097			qh->period = 1 << exponent;
1098			qh->skel = SKEL_INDEX(exponent);
1099
1100			/* For now, interrupt phase is fixed by the layout
1101			 * of the QH lists.
1102			 */
1103			qh->phase = (qh->period / 2) & (MAX_PHASE - 1);
1104			ret = uhci_check_bandwidth(uhci, qh);
1105		} while (ret != 0 && --exponent >= 0);
1106		if (ret)
1107			return ret;
1108	} else if (qh->period > urb->interval)
1109		return -EINVAL;		/* Can't decrease the period */
1110
1111	ret = uhci_submit_common(uhci, urb, qh);
1112	if (ret == 0) {
1113		urb->interval = qh->period;
1114		if (!qh->bandwidth_reserved)
1115			uhci_reserve_bandwidth(uhci, qh);
1116	}
1117	return ret;
1118}
1119
1120/*
1121 * Fix up the data structures following a short transfer
1122 */
1123static int uhci_fixup_short_transfer(struct uhci_hcd *uhci,
1124		struct uhci_qh *qh, struct urb_priv *urbp)
1125{
1126	struct uhci_td *td;
1127	struct list_head *tmp;
1128	int ret;
1129
1130	td = list_entry(urbp->td_list.prev, struct uhci_td, list);
1131	if (qh->type == USB_ENDPOINT_XFER_CONTROL) {
1132
1133		/* When a control transfer is short, we have to restart
1134		 * the queue at the status stage transaction, which is
1135		 * the last TD. */
1136		WARN_ON(list_empty(&urbp->td_list));
1137		qh->element = LINK_TO_TD(uhci, td);
1138		tmp = td->list.prev;
1139		ret = -EINPROGRESS;
1140
1141	} else {
1142
1143		/* When a bulk/interrupt transfer is short, we have to
1144		 * fix up the toggles of the following URBs on the queue
1145		 * before restarting the queue at the next URB. */
1146		qh->initial_toggle =
1147			uhci_toggle(td_token(uhci, qh->post_td)) ^ 1;
1148		uhci_fixup_toggles(uhci, qh, 1);
1149
1150		if (list_empty(&urbp->td_list))
1151			td = qh->post_td;
1152		qh->element = td->link;
1153		tmp = urbp->td_list.prev;
1154		ret = 0;
1155	}
1156
1157	/* Remove all the TDs we skipped over, from tmp back to the start */
1158	while (tmp != &urbp->td_list) {
1159		td = list_entry(tmp, struct uhci_td, list);
1160		tmp = tmp->prev;
1161
1162		uhci_remove_td_from_urbp(td);
1163		uhci_free_td(uhci, td);
1164	}
1165	return ret;
1166}
1167
1168/*
1169 * Common result for control, bulk, and interrupt
1170 */
1171static int uhci_result_common(struct uhci_hcd *uhci, struct urb *urb)
1172{
1173	struct urb_priv *urbp = urb->hcpriv;
1174	struct uhci_qh *qh = urbp->qh;
1175	struct uhci_td *td, *tmp;
1176	unsigned status;
1177	int ret = 0;
1178
1179	list_for_each_entry_safe(td, tmp, &urbp->td_list, list) {
1180		unsigned int ctrlstat;
1181		int len;
1182
1183		ctrlstat = td_status(uhci, td);
1184		status = uhci_status_bits(ctrlstat);
1185		if (status & TD_CTRL_ACTIVE)
1186			return -EINPROGRESS;
1187
1188		len = uhci_actual_length(ctrlstat);
1189		urb->actual_length += len;
1190
1191		if (status) {
1192			ret = uhci_map_status(status,
1193					uhci_packetout(td_token(uhci, td)));
1194			if ((debug == 1 && ret != -EPIPE) || debug > 1) {
1195				/* Some debugging code */
1196				dev_dbg(&urb->dev->dev,
1197						"%s: failed with status %x\n",
1198						__func__, status);
1199
1200				if (debug > 1 && errbuf) {
1201					/* Print the chain for debugging */
1202					uhci_show_qh(uhci, urbp->qh, errbuf,
1203							ERRBUF_LEN, 0);
1204					lprintk(errbuf);
1205				}
1206			}
1207
1208		/* Did we receive a short packet? */
1209		} else if (len < uhci_expected_length(td_token(uhci, td))) {
1210
1211			/* For control transfers, go to the status TD if
1212			 * this isn't already the last data TD */
1213			if (qh->type == USB_ENDPOINT_XFER_CONTROL) {
1214				if (td->list.next != urbp->td_list.prev)
1215					ret = 1;
1216			}
1217
1218			/* For bulk and interrupt, this may be an error */
1219			else if (urb->transfer_flags & URB_SHORT_NOT_OK)
1220				ret = -EREMOTEIO;
1221
1222			/* Fixup needed only if this isn't the URB's last TD */
1223			else if (&td->list != urbp->td_list.prev)
1224				ret = 1;
1225		}
1226
1227		uhci_remove_td_from_urbp(td);
1228		if (qh->post_td)
1229			uhci_free_td(uhci, qh->post_td);
1230		qh->post_td = td;
1231
1232		if (ret != 0)
1233			goto err;
1234	}
1235	return ret;
1236
1237err:
1238	if (ret < 0) {
1239		/* Note that the queue has stopped and save
1240		 * the next toggle value */
1241		qh->element = UHCI_PTR_TERM(uhci);
1242		qh->is_stopped = 1;
1243		qh->needs_fixup = (qh->type != USB_ENDPOINT_XFER_CONTROL);
1244		qh->initial_toggle = uhci_toggle(td_token(uhci, td)) ^
1245				(ret == -EREMOTEIO);
1246
1247	} else		/* Short packet received */
1248		ret = uhci_fixup_short_transfer(uhci, qh, urbp);
1249	return ret;
1250}
1251
1252/*
1253 * Isochronous transfers
1254 */
1255static int uhci_submit_isochronous(struct uhci_hcd *uhci, struct urb *urb,
1256		struct uhci_qh *qh)
1257{
1258	struct uhci_td *td = NULL;	/* Since urb->number_of_packets > 0 */
1259	int i, frame;
 
1260	unsigned long destination, status;
1261	struct urb_priv *urbp = (struct urb_priv *) urb->hcpriv;
1262
1263	/* Values must not be too big (could overflow below) */
1264	if (urb->interval >= UHCI_NUMFRAMES ||
1265			urb->number_of_packets >= UHCI_NUMFRAMES)
1266		return -EFBIG;
1267
 
 
1268	/* Check the period and figure out the starting frame number */
1269	if (!qh->bandwidth_reserved) {
1270		qh->period = urb->interval;
1271		if (urb->transfer_flags & URB_ISO_ASAP) {
1272			qh->phase = -1;		/* Find the best phase */
1273			i = uhci_check_bandwidth(uhci, qh);
1274			if (i)
1275				return i;
1276
1277			/* Allow a little time to allocate the TDs */
1278			uhci_get_current_frame_number(uhci);
1279			frame = uhci->frame_number + 10;
1280
1281			/* Move forward to the first frame having the
1282			 * correct phase */
1283			urb->start_frame = frame + ((qh->phase - frame) &
1284					(qh->period - 1));
1285		} else {
1286			i = urb->start_frame - uhci->last_iso_frame;
1287			if (i <= 0 || i >= UHCI_NUMFRAMES)
1288				return -EINVAL;
1289			qh->phase = urb->start_frame & (qh->period - 1);
1290			i = uhci_check_bandwidth(uhci, qh);
1291			if (i)
1292				return i;
1293		}
1294
1295	} else if (qh->period != urb->interval) {
1296		return -EINVAL;		/* Can't change the period */
1297
1298	} else {
 
 
1299		/* Find the next unused frame */
1300		if (list_empty(&qh->queue)) {
1301			frame = qh->iso_frame;
1302		} else {
1303			struct urb *lurb;
1304
1305			lurb = list_entry(qh->queue.prev,
1306					struct urb_priv, node)->urb;
1307			frame = lurb->start_frame +
1308					lurb->number_of_packets *
1309					lurb->interval;
1310		}
1311		if (urb->transfer_flags & URB_ISO_ASAP) {
1312			/* Skip some frames if necessary to insure
1313			 * the start frame is in the future.
 
 
 
 
 
 
 
 
 
1314			 */
1315			uhci_get_current_frame_number(uhci);
1316			if (uhci_frame_before_eq(frame, uhci->frame_number)) {
1317				frame = uhci->frame_number + 1;
1318				frame += ((qh->phase - frame) &
1319					(qh->period - 1));
1320			}
1321		}	/* Otherwise pick up where the last URB leaves off */
1322		urb->start_frame = frame;
 
1323	}
1324
1325	/* Make sure we won't have to go too far into the future */
1326	if (uhci_frame_before_eq(uhci->last_iso_frame + UHCI_NUMFRAMES,
1327			urb->start_frame + urb->number_of_packets *
1328				urb->interval))
1329		return -EFBIG;
 
1330
1331	status = TD_CTRL_ACTIVE | TD_CTRL_IOS;
1332	destination = (urb->pipe & PIPE_DEVEP_MASK) | usb_packetid(urb->pipe);
1333
1334	for (i = 0; i < urb->number_of_packets; i++) {
1335		td = uhci_alloc_td(uhci);
1336		if (!td)
1337			return -ENOMEM;
1338
1339		uhci_add_td_to_urbp(td, urbp);
1340		uhci_fill_td(uhci, td, status, destination |
1341				uhci_explen(urb->iso_frame_desc[i].length),
1342				urb->transfer_dma +
1343					urb->iso_frame_desc[i].offset);
1344	}
1345
1346	/* Set the interrupt-on-completion flag on the last packet. */
1347	td->status |= cpu_to_hc32(uhci, TD_CTRL_IOC);
1348
1349	/* Add the TDs to the frame list */
1350	frame = urb->start_frame;
1351	list_for_each_entry(td, &urbp->td_list, list) {
1352		uhci_insert_td_in_frame_list(uhci, td, frame);
1353		frame += qh->period;
1354	}
1355
1356	if (list_empty(&qh->queue)) {
1357		qh->iso_packet_desc = &urb->iso_frame_desc[0];
1358		qh->iso_frame = urb->start_frame;
1359	}
1360
1361	qh->skel = SKEL_ISO;
1362	if (!qh->bandwidth_reserved)
1363		uhci_reserve_bandwidth(uhci, qh);
1364	return 0;
1365}
1366
1367static int uhci_result_isochronous(struct uhci_hcd *uhci, struct urb *urb)
1368{
1369	struct uhci_td *td, *tmp;
1370	struct urb_priv *urbp = urb->hcpriv;
1371	struct uhci_qh *qh = urbp->qh;
1372
1373	list_for_each_entry_safe(td, tmp, &urbp->td_list, list) {
1374		unsigned int ctrlstat;
1375		int status;
1376		int actlength;
1377
1378		if (uhci_frame_before_eq(uhci->cur_iso_frame, qh->iso_frame))
1379			return -EINPROGRESS;
1380
1381		uhci_remove_tds_from_frame(uhci, qh->iso_frame);
1382
1383		ctrlstat = td_status(uhci, td);
1384		if (ctrlstat & TD_CTRL_ACTIVE) {
1385			status = -EXDEV;	/* TD was added too late? */
1386		} else {
1387			status = uhci_map_status(uhci_status_bits(ctrlstat),
1388					usb_pipeout(urb->pipe));
1389			actlength = uhci_actual_length(ctrlstat);
1390
1391			urb->actual_length += actlength;
1392			qh->iso_packet_desc->actual_length = actlength;
1393			qh->iso_packet_desc->status = status;
1394		}
1395		if (status)
1396			urb->error_count++;
1397
1398		uhci_remove_td_from_urbp(td);
1399		uhci_free_td(uhci, td);
1400		qh->iso_frame += qh->period;
1401		++qh->iso_packet_desc;
1402	}
1403	return 0;
1404}
1405
1406static int uhci_urb_enqueue(struct usb_hcd *hcd,
1407		struct urb *urb, gfp_t mem_flags)
1408{
1409	int ret;
1410	struct uhci_hcd *uhci = hcd_to_uhci(hcd);
1411	unsigned long flags;
1412	struct urb_priv *urbp;
1413	struct uhci_qh *qh;
1414
1415	spin_lock_irqsave(&uhci->lock, flags);
1416
1417	ret = usb_hcd_link_urb_to_ep(hcd, urb);
1418	if (ret)
1419		goto done_not_linked;
1420
1421	ret = -ENOMEM;
1422	urbp = uhci_alloc_urb_priv(uhci, urb);
1423	if (!urbp)
1424		goto done;
1425
1426	if (urb->ep->hcpriv)
1427		qh = urb->ep->hcpriv;
1428	else {
1429		qh = uhci_alloc_qh(uhci, urb->dev, urb->ep);
1430		if (!qh)
1431			goto err_no_qh;
1432	}
1433	urbp->qh = qh;
1434
1435	switch (qh->type) {
1436	case USB_ENDPOINT_XFER_CONTROL:
1437		ret = uhci_submit_control(uhci, urb, qh);
1438		break;
1439	case USB_ENDPOINT_XFER_BULK:
1440		ret = uhci_submit_bulk(uhci, urb, qh);
1441		break;
1442	case USB_ENDPOINT_XFER_INT:
1443		ret = uhci_submit_interrupt(uhci, urb, qh);
1444		break;
1445	case USB_ENDPOINT_XFER_ISOC:
1446		urb->error_count = 0;
1447		ret = uhci_submit_isochronous(uhci, urb, qh);
1448		break;
1449	}
1450	if (ret != 0)
1451		goto err_submit_failed;
1452
1453	/* Add this URB to the QH */
1454	list_add_tail(&urbp->node, &qh->queue);
1455
1456	/* If the new URB is the first and only one on this QH then either
1457	 * the QH is new and idle or else it's unlinked and waiting to
1458	 * become idle, so we can activate it right away.  But only if the
1459	 * queue isn't stopped. */
1460	if (qh->queue.next == &urbp->node && !qh->is_stopped) {
1461		uhci_activate_qh(uhci, qh);
1462		uhci_urbp_wants_fsbr(uhci, urbp);
1463	}
1464	goto done;
1465
1466err_submit_failed:
1467	if (qh->state == QH_STATE_IDLE)
1468		uhci_make_qh_idle(uhci, qh);	/* Reclaim unused QH */
1469err_no_qh:
1470	uhci_free_urb_priv(uhci, urbp);
1471done:
1472	if (ret)
1473		usb_hcd_unlink_urb_from_ep(hcd, urb);
1474done_not_linked:
1475	spin_unlock_irqrestore(&uhci->lock, flags);
1476	return ret;
1477}
1478
1479static int uhci_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status)
1480{
1481	struct uhci_hcd *uhci = hcd_to_uhci(hcd);
1482	unsigned long flags;
1483	struct uhci_qh *qh;
1484	int rc;
1485
1486	spin_lock_irqsave(&uhci->lock, flags);
1487	rc = usb_hcd_check_unlink_urb(hcd, urb, status);
1488	if (rc)
1489		goto done;
1490
1491	qh = ((struct urb_priv *) urb->hcpriv)->qh;
1492
1493	/* Remove Isochronous TDs from the frame list ASAP */
1494	if (qh->type == USB_ENDPOINT_XFER_ISOC) {
1495		uhci_unlink_isochronous_tds(uhci, urb);
1496		mb();
1497
1498		/* If the URB has already started, update the QH unlink time */
1499		uhci_get_current_frame_number(uhci);
1500		if (uhci_frame_before_eq(urb->start_frame, uhci->frame_number))
1501			qh->unlink_frame = uhci->frame_number;
1502	}
1503
1504	uhci_unlink_qh(uhci, qh);
1505
1506done:
1507	spin_unlock_irqrestore(&uhci->lock, flags);
1508	return rc;
1509}
1510
1511/*
1512 * Finish unlinking an URB and give it back
1513 */
1514static void uhci_giveback_urb(struct uhci_hcd *uhci, struct uhci_qh *qh,
1515		struct urb *urb, int status)
1516__releases(uhci->lock)
1517__acquires(uhci->lock)
1518{
1519	struct urb_priv *urbp = (struct urb_priv *) urb->hcpriv;
1520
1521	if (qh->type == USB_ENDPOINT_XFER_CONTROL) {
1522
1523		/* Subtract off the length of the SETUP packet from
1524		 * urb->actual_length.
1525		 */
1526		urb->actual_length -= min_t(u32, 8, urb->actual_length);
1527	}
1528
1529	/* When giving back the first URB in an Isochronous queue,
1530	 * reinitialize the QH's iso-related members for the next URB. */
1531	else if (qh->type == USB_ENDPOINT_XFER_ISOC &&
1532			urbp->node.prev == &qh->queue &&
1533			urbp->node.next != &qh->queue) {
1534		struct urb *nurb = list_entry(urbp->node.next,
1535				struct urb_priv, node)->urb;
1536
1537		qh->iso_packet_desc = &nurb->iso_frame_desc[0];
1538		qh->iso_frame = nurb->start_frame;
1539	}
1540
1541	/* Take the URB off the QH's queue.  If the queue is now empty,
1542	 * this is a perfect time for a toggle fixup. */
1543	list_del_init(&urbp->node);
1544	if (list_empty(&qh->queue) && qh->needs_fixup) {
1545		usb_settoggle(urb->dev, usb_pipeendpoint(urb->pipe),
1546				usb_pipeout(urb->pipe), qh->initial_toggle);
1547		qh->needs_fixup = 0;
1548	}
1549
1550	uhci_free_urb_priv(uhci, urbp);
1551	usb_hcd_unlink_urb_from_ep(uhci_to_hcd(uhci), urb);
1552
1553	spin_unlock(&uhci->lock);
1554	usb_hcd_giveback_urb(uhci_to_hcd(uhci), urb, status);
1555	spin_lock(&uhci->lock);
1556
1557	/* If the queue is now empty, we can unlink the QH and give up its
1558	 * reserved bandwidth. */
1559	if (list_empty(&qh->queue)) {
1560		uhci_unlink_qh(uhci, qh);
1561		if (qh->bandwidth_reserved)
1562			uhci_release_bandwidth(uhci, qh);
1563	}
1564}
1565
1566/*
1567 * Scan the URBs in a QH's queue
1568 */
1569#define QH_FINISHED_UNLINKING(qh)			\
1570		(qh->state == QH_STATE_UNLINKING &&	\
1571		uhci->frame_number + uhci->is_stopped != qh->unlink_frame)
1572
1573static void uhci_scan_qh(struct uhci_hcd *uhci, struct uhci_qh *qh)
1574{
1575	struct urb_priv *urbp;
1576	struct urb *urb;
1577	int status;
1578
1579	while (!list_empty(&qh->queue)) {
1580		urbp = list_entry(qh->queue.next, struct urb_priv, node);
1581		urb = urbp->urb;
1582
1583		if (qh->type == USB_ENDPOINT_XFER_ISOC)
1584			status = uhci_result_isochronous(uhci, urb);
1585		else
1586			status = uhci_result_common(uhci, urb);
1587		if (status == -EINPROGRESS)
1588			break;
1589
1590		/* Dequeued but completed URBs can't be given back unless
1591		 * the QH is stopped or has finished unlinking. */
1592		if (urb->unlinked) {
1593			if (QH_FINISHED_UNLINKING(qh))
1594				qh->is_stopped = 1;
1595			else if (!qh->is_stopped)
1596				return;
1597		}
1598
1599		uhci_giveback_urb(uhci, qh, urb, status);
1600		if (status < 0)
1601			break;
1602	}
1603
1604	/* If the QH is neither stopped nor finished unlinking (normal case),
1605	 * our work here is done. */
1606	if (QH_FINISHED_UNLINKING(qh))
1607		qh->is_stopped = 1;
1608	else if (!qh->is_stopped)
1609		return;
1610
1611	/* Otherwise give back each of the dequeued URBs */
1612restart:
1613	list_for_each_entry(urbp, &qh->queue, node) {
1614		urb = urbp->urb;
1615		if (urb->unlinked) {
1616
1617			/* Fix up the TD links and save the toggles for
1618			 * non-Isochronous queues.  For Isochronous queues,
1619			 * test for too-recent dequeues. */
1620			if (!uhci_cleanup_queue(uhci, qh, urb)) {
1621				qh->is_stopped = 0;
1622				return;
1623			}
1624			uhci_giveback_urb(uhci, qh, urb, 0);
1625			goto restart;
1626		}
1627	}
1628	qh->is_stopped = 0;
1629
1630	/* There are no more dequeued URBs.  If there are still URBs on the
1631	 * queue, the QH can now be re-activated. */
1632	if (!list_empty(&qh->queue)) {
1633		if (qh->needs_fixup)
1634			uhci_fixup_toggles(uhci, qh, 0);
1635
1636		/* If the first URB on the queue wants FSBR but its time
1637		 * limit has expired, set the next TD to interrupt on
1638		 * completion before reactivating the QH. */
1639		urbp = list_entry(qh->queue.next, struct urb_priv, node);
1640		if (urbp->fsbr && qh->wait_expired) {
1641			struct uhci_td *td = list_entry(urbp->td_list.next,
1642					struct uhci_td, list);
1643
1644			td->status |= cpu_to_hc32(uhci, TD_CTRL_IOC);
1645		}
1646
1647		uhci_activate_qh(uhci, qh);
1648	}
1649
1650	/* The queue is empty.  The QH can become idle if it is fully
1651	 * unlinked. */
1652	else if (QH_FINISHED_UNLINKING(qh))
1653		uhci_make_qh_idle(uhci, qh);
1654}
1655
1656/*
1657 * Check for queues that have made some forward progress.
1658 * Returns 0 if the queue is not Isochronous, is ACTIVE, and
1659 * has not advanced since last examined; 1 otherwise.
1660 *
1661 * Early Intel controllers have a bug which causes qh->element sometimes
1662 * not to advance when a TD completes successfully.  The queue remains
1663 * stuck on the inactive completed TD.  We detect such cases and advance
1664 * the element pointer by hand.
1665 */
1666static int uhci_advance_check(struct uhci_hcd *uhci, struct uhci_qh *qh)
1667{
1668	struct urb_priv *urbp = NULL;
1669	struct uhci_td *td;
1670	int ret = 1;
1671	unsigned status;
1672
1673	if (qh->type == USB_ENDPOINT_XFER_ISOC)
1674		goto done;
1675
1676	/* Treat an UNLINKING queue as though it hasn't advanced.
1677	 * This is okay because reactivation will treat it as though
1678	 * it has advanced, and if it is going to become IDLE then
1679	 * this doesn't matter anyway.  Furthermore it's possible
1680	 * for an UNLINKING queue not to have any URBs at all, or
1681	 * for its first URB not to have any TDs (if it was dequeued
1682	 * just as it completed).  So it's not easy in any case to
1683	 * test whether such queues have advanced. */
1684	if (qh->state != QH_STATE_ACTIVE) {
1685		urbp = NULL;
1686		status = 0;
1687
1688	} else {
1689		urbp = list_entry(qh->queue.next, struct urb_priv, node);
1690		td = list_entry(urbp->td_list.next, struct uhci_td, list);
1691		status = td_status(uhci, td);
1692		if (!(status & TD_CTRL_ACTIVE)) {
1693
1694			/* We're okay, the queue has advanced */
1695			qh->wait_expired = 0;
1696			qh->advance_jiffies = jiffies;
1697			goto done;
1698		}
1699		ret = uhci->is_stopped;
1700	}
1701
1702	/* The queue hasn't advanced; check for timeout */
1703	if (qh->wait_expired)
1704		goto done;
1705
1706	if (time_after(jiffies, qh->advance_jiffies + QH_WAIT_TIMEOUT)) {
1707
1708		/* Detect the Intel bug and work around it */
1709		if (qh->post_td && qh_element(qh) ==
1710			LINK_TO_TD(uhci, qh->post_td)) {
1711			qh->element = qh->post_td->link;
1712			qh->advance_jiffies = jiffies;
1713			ret = 1;
1714			goto done;
1715		}
1716
1717		qh->wait_expired = 1;
1718
1719		/* If the current URB wants FSBR, unlink it temporarily
1720		 * so that we can safely set the next TD to interrupt on
1721		 * completion.  That way we'll know as soon as the queue
1722		 * starts moving again. */
1723		if (urbp && urbp->fsbr && !(status & TD_CTRL_IOC))
1724			uhci_unlink_qh(uhci, qh);
1725
1726	} else {
1727		/* Unmoving but not-yet-expired queues keep FSBR alive */
1728		if (urbp)
1729			uhci_urbp_wants_fsbr(uhci, urbp);
1730	}
1731
1732done:
1733	return ret;
1734}
1735
1736/*
1737 * Process events in the schedule, but only in one thread at a time
1738 */
1739static void uhci_scan_schedule(struct uhci_hcd *uhci)
1740{
1741	int i;
1742	struct uhci_qh *qh;
1743
1744	/* Don't allow re-entrant calls */
1745	if (uhci->scan_in_progress) {
1746		uhci->need_rescan = 1;
1747		return;
1748	}
1749	uhci->scan_in_progress = 1;
1750rescan:
1751	uhci->need_rescan = 0;
1752	uhci->fsbr_is_wanted = 0;
1753
1754	uhci_clear_next_interrupt(uhci);
1755	uhci_get_current_frame_number(uhci);
1756	uhci->cur_iso_frame = uhci->frame_number;
1757
1758	/* Go through all the QH queues and process the URBs in each one */
1759	for (i = 0; i < UHCI_NUM_SKELQH - 1; ++i) {
1760		uhci->next_qh = list_entry(uhci->skelqh[i]->node.next,
1761				struct uhci_qh, node);
1762		while ((qh = uhci->next_qh) != uhci->skelqh[i]) {
1763			uhci->next_qh = list_entry(qh->node.next,
1764					struct uhci_qh, node);
1765
1766			if (uhci_advance_check(uhci, qh)) {
1767				uhci_scan_qh(uhci, qh);
1768				if (qh->state == QH_STATE_ACTIVE) {
1769					uhci_urbp_wants_fsbr(uhci,
1770	list_entry(qh->queue.next, struct urb_priv, node));
1771				}
1772			}
1773		}
1774	}
1775
1776	uhci->last_iso_frame = uhci->cur_iso_frame;
1777	if (uhci->need_rescan)
1778		goto rescan;
1779	uhci->scan_in_progress = 0;
1780
1781	if (uhci->fsbr_is_on && !uhci->fsbr_is_wanted &&
1782			!uhci->fsbr_expiring) {
1783		uhci->fsbr_expiring = 1;
1784		mod_timer(&uhci->fsbr_timer, jiffies + FSBR_OFF_DELAY);
1785	}
1786
1787	if (list_empty(&uhci->skel_unlink_qh->node))
1788		uhci_clear_next_interrupt(uhci);
1789	else
1790		uhci_set_next_interrupt(uhci);
1791}
v3.15
   1/*
   2 * Universal Host Controller Interface driver for USB.
   3 *
   4 * Maintainer: Alan Stern <stern@rowland.harvard.edu>
   5 *
   6 * (C) Copyright 1999 Linus Torvalds
   7 * (C) Copyright 1999-2002 Johannes Erdfelt, johannes@erdfelt.com
   8 * (C) Copyright 1999 Randy Dunlap
   9 * (C) Copyright 1999 Georg Acher, acher@in.tum.de
  10 * (C) Copyright 1999 Deti Fliegl, deti@fliegl.de
  11 * (C) Copyright 1999 Thomas Sailer, sailer@ife.ee.ethz.ch
  12 * (C) Copyright 1999 Roman Weissgaerber, weissg@vienna.at
  13 * (C) Copyright 2000 Yggdrasil Computing, Inc. (port of new PCI interface
  14 *               support from usb-ohci.c by Adam Richter, adam@yggdrasil.com).
  15 * (C) Copyright 1999 Gregory P. Smith (from usb-ohci.c)
  16 * (C) Copyright 2004-2007 Alan Stern, stern@rowland.harvard.edu
  17 */
  18
  19
  20/*
  21 * Technically, updating td->status here is a race, but it's not really a
  22 * problem. The worst that can happen is that we set the IOC bit again
  23 * generating a spurious interrupt. We could fix this by creating another
  24 * QH and leaving the IOC bit always set, but then we would have to play
  25 * games with the FSBR code to make sure we get the correct order in all
  26 * the cases. I don't think it's worth the effort
  27 */
  28static void uhci_set_next_interrupt(struct uhci_hcd *uhci)
  29{
  30	if (uhci->is_stopped)
  31		mod_timer(&uhci_to_hcd(uhci)->rh_timer, jiffies);
  32	uhci->term_td->status |= cpu_to_hc32(uhci, TD_CTRL_IOC);
  33}
  34
  35static inline void uhci_clear_next_interrupt(struct uhci_hcd *uhci)
  36{
  37	uhci->term_td->status &= ~cpu_to_hc32(uhci, TD_CTRL_IOC);
  38}
  39
  40
  41/*
  42 * Full-Speed Bandwidth Reclamation (FSBR).
  43 * We turn on FSBR whenever a queue that wants it is advancing,
  44 * and leave it on for a short time thereafter.
  45 */
  46static void uhci_fsbr_on(struct uhci_hcd *uhci)
  47{
  48	struct uhci_qh *lqh;
  49
  50	/* The terminating skeleton QH always points back to the first
  51	 * FSBR QH.  Make the last async QH point to the terminating
  52	 * skeleton QH. */
  53	uhci->fsbr_is_on = 1;
  54	lqh = list_entry(uhci->skel_async_qh->node.prev,
  55			struct uhci_qh, node);
  56	lqh->link = LINK_TO_QH(uhci, uhci->skel_term_qh);
  57}
  58
  59static void uhci_fsbr_off(struct uhci_hcd *uhci)
  60{
  61	struct uhci_qh *lqh;
  62
  63	/* Remove the link from the last async QH to the terminating
  64	 * skeleton QH. */
  65	uhci->fsbr_is_on = 0;
  66	lqh = list_entry(uhci->skel_async_qh->node.prev,
  67			struct uhci_qh, node);
  68	lqh->link = UHCI_PTR_TERM(uhci);
  69}
  70
  71static void uhci_add_fsbr(struct uhci_hcd *uhci, struct urb *urb)
  72{
  73	struct urb_priv *urbp = urb->hcpriv;
  74
  75	if (!(urb->transfer_flags & URB_NO_FSBR))
  76		urbp->fsbr = 1;
  77}
  78
  79static void uhci_urbp_wants_fsbr(struct uhci_hcd *uhci, struct urb_priv *urbp)
  80{
  81	if (urbp->fsbr) {
  82		uhci->fsbr_is_wanted = 1;
  83		if (!uhci->fsbr_is_on)
  84			uhci_fsbr_on(uhci);
  85		else if (uhci->fsbr_expiring) {
  86			uhci->fsbr_expiring = 0;
  87			del_timer(&uhci->fsbr_timer);
  88		}
  89	}
  90}
  91
  92static void uhci_fsbr_timeout(unsigned long _uhci)
  93{
  94	struct uhci_hcd *uhci = (struct uhci_hcd *) _uhci;
  95	unsigned long flags;
  96
  97	spin_lock_irqsave(&uhci->lock, flags);
  98	if (uhci->fsbr_expiring) {
  99		uhci->fsbr_expiring = 0;
 100		uhci_fsbr_off(uhci);
 101	}
 102	spin_unlock_irqrestore(&uhci->lock, flags);
 103}
 104
 105
 106static struct uhci_td *uhci_alloc_td(struct uhci_hcd *uhci)
 107{
 108	dma_addr_t dma_handle;
 109	struct uhci_td *td;
 110
 111	td = dma_pool_alloc(uhci->td_pool, GFP_ATOMIC, &dma_handle);
 112	if (!td)
 113		return NULL;
 114
 115	td->dma_handle = dma_handle;
 116	td->frame = -1;
 117
 118	INIT_LIST_HEAD(&td->list);
 119	INIT_LIST_HEAD(&td->fl_list);
 120
 121	return td;
 122}
 123
 124static void uhci_free_td(struct uhci_hcd *uhci, struct uhci_td *td)
 125{
 126	if (!list_empty(&td->list))
 127		dev_WARN(uhci_dev(uhci), "td %p still in list!\n", td);
 128	if (!list_empty(&td->fl_list))
 129		dev_WARN(uhci_dev(uhci), "td %p still in fl_list!\n", td);
 130
 131	dma_pool_free(uhci->td_pool, td, td->dma_handle);
 132}
 133
 134static inline void uhci_fill_td(struct uhci_hcd *uhci, struct uhci_td *td,
 135		u32 status, u32 token, u32 buffer)
 136{
 137	td->status = cpu_to_hc32(uhci, status);
 138	td->token = cpu_to_hc32(uhci, token);
 139	td->buffer = cpu_to_hc32(uhci, buffer);
 140}
 141
 142static void uhci_add_td_to_urbp(struct uhci_td *td, struct urb_priv *urbp)
 143{
 144	list_add_tail(&td->list, &urbp->td_list);
 145}
 146
 147static void uhci_remove_td_from_urbp(struct uhci_td *td)
 148{
 149	list_del_init(&td->list);
 150}
 151
 152/*
 153 * We insert Isochronous URBs directly into the frame list at the beginning
 154 */
 155static inline void uhci_insert_td_in_frame_list(struct uhci_hcd *uhci,
 156		struct uhci_td *td, unsigned framenum)
 157{
 158	framenum &= (UHCI_NUMFRAMES - 1);
 159
 160	td->frame = framenum;
 161
 162	/* Is there a TD already mapped there? */
 163	if (uhci->frame_cpu[framenum]) {
 164		struct uhci_td *ftd, *ltd;
 165
 166		ftd = uhci->frame_cpu[framenum];
 167		ltd = list_entry(ftd->fl_list.prev, struct uhci_td, fl_list);
 168
 169		list_add_tail(&td->fl_list, &ftd->fl_list);
 170
 171		td->link = ltd->link;
 172		wmb();
 173		ltd->link = LINK_TO_TD(uhci, td);
 174	} else {
 175		td->link = uhci->frame[framenum];
 176		wmb();
 177		uhci->frame[framenum] = LINK_TO_TD(uhci, td);
 178		uhci->frame_cpu[framenum] = td;
 179	}
 180}
 181
 182static inline void uhci_remove_td_from_frame_list(struct uhci_hcd *uhci,
 183		struct uhci_td *td)
 184{
 185	/* If it's not inserted, don't remove it */
 186	if (td->frame == -1) {
 187		WARN_ON(!list_empty(&td->fl_list));
 188		return;
 189	}
 190
 191	if (uhci->frame_cpu[td->frame] == td) {
 192		if (list_empty(&td->fl_list)) {
 193			uhci->frame[td->frame] = td->link;
 194			uhci->frame_cpu[td->frame] = NULL;
 195		} else {
 196			struct uhci_td *ntd;
 197
 198			ntd = list_entry(td->fl_list.next,
 199					 struct uhci_td,
 200					 fl_list);
 201			uhci->frame[td->frame] = LINK_TO_TD(uhci, ntd);
 202			uhci->frame_cpu[td->frame] = ntd;
 203		}
 204	} else {
 205		struct uhci_td *ptd;
 206
 207		ptd = list_entry(td->fl_list.prev, struct uhci_td, fl_list);
 208		ptd->link = td->link;
 209	}
 210
 211	list_del_init(&td->fl_list);
 212	td->frame = -1;
 213}
 214
 215static inline void uhci_remove_tds_from_frame(struct uhci_hcd *uhci,
 216		unsigned int framenum)
 217{
 218	struct uhci_td *ftd, *ltd;
 219
 220	framenum &= (UHCI_NUMFRAMES - 1);
 221
 222	ftd = uhci->frame_cpu[framenum];
 223	if (ftd) {
 224		ltd = list_entry(ftd->fl_list.prev, struct uhci_td, fl_list);
 225		uhci->frame[framenum] = ltd->link;
 226		uhci->frame_cpu[framenum] = NULL;
 227
 228		while (!list_empty(&ftd->fl_list))
 229			list_del_init(ftd->fl_list.prev);
 230	}
 231}
 232
 233/*
 234 * Remove all the TDs for an Isochronous URB from the frame list
 235 */
 236static void uhci_unlink_isochronous_tds(struct uhci_hcd *uhci, struct urb *urb)
 237{
 238	struct urb_priv *urbp = (struct urb_priv *) urb->hcpriv;
 239	struct uhci_td *td;
 240
 241	list_for_each_entry(td, &urbp->td_list, list)
 242		uhci_remove_td_from_frame_list(uhci, td);
 243}
 244
 245static struct uhci_qh *uhci_alloc_qh(struct uhci_hcd *uhci,
 246		struct usb_device *udev, struct usb_host_endpoint *hep)
 247{
 248	dma_addr_t dma_handle;
 249	struct uhci_qh *qh;
 250
 251	qh = dma_pool_alloc(uhci->qh_pool, GFP_ATOMIC, &dma_handle);
 252	if (!qh)
 253		return NULL;
 254
 255	memset(qh, 0, sizeof(*qh));
 256	qh->dma_handle = dma_handle;
 257
 258	qh->element = UHCI_PTR_TERM(uhci);
 259	qh->link = UHCI_PTR_TERM(uhci);
 260
 261	INIT_LIST_HEAD(&qh->queue);
 262	INIT_LIST_HEAD(&qh->node);
 263
 264	if (udev) {		/* Normal QH */
 265		qh->type = usb_endpoint_type(&hep->desc);
 266		if (qh->type != USB_ENDPOINT_XFER_ISOC) {
 267			qh->dummy_td = uhci_alloc_td(uhci);
 268			if (!qh->dummy_td) {
 269				dma_pool_free(uhci->qh_pool, qh, dma_handle);
 270				return NULL;
 271			}
 272		}
 273		qh->state = QH_STATE_IDLE;
 274		qh->hep = hep;
 275		qh->udev = udev;
 276		hep->hcpriv = qh;
 277
 278		if (qh->type == USB_ENDPOINT_XFER_INT ||
 279				qh->type == USB_ENDPOINT_XFER_ISOC)
 280			qh->load = usb_calc_bus_time(udev->speed,
 281					usb_endpoint_dir_in(&hep->desc),
 282					qh->type == USB_ENDPOINT_XFER_ISOC,
 283					usb_endpoint_maxp(&hep->desc))
 284				/ 1000 + 1;
 285
 286	} else {		/* Skeleton QH */
 287		qh->state = QH_STATE_ACTIVE;
 288		qh->type = -1;
 289	}
 290	return qh;
 291}
 292
 293static void uhci_free_qh(struct uhci_hcd *uhci, struct uhci_qh *qh)
 294{
 295	WARN_ON(qh->state != QH_STATE_IDLE && qh->udev);
 296	if (!list_empty(&qh->queue))
 297		dev_WARN(uhci_dev(uhci), "qh %p list not empty!\n", qh);
 298
 299	list_del(&qh->node);
 300	if (qh->udev) {
 301		qh->hep->hcpriv = NULL;
 302		if (qh->dummy_td)
 303			uhci_free_td(uhci, qh->dummy_td);
 304	}
 305	dma_pool_free(uhci->qh_pool, qh, qh->dma_handle);
 306}
 307
 308/*
 309 * When a queue is stopped and a dequeued URB is given back, adjust
 310 * the previous TD link (if the URB isn't first on the queue) or
 311 * save its toggle value (if it is first and is currently executing).
 312 *
 313 * Returns 0 if the URB should not yet be given back, 1 otherwise.
 314 */
 315static int uhci_cleanup_queue(struct uhci_hcd *uhci, struct uhci_qh *qh,
 316		struct urb *urb)
 317{
 318	struct urb_priv *urbp = urb->hcpriv;
 319	struct uhci_td *td;
 320	int ret = 1;
 321
 322	/* Isochronous pipes don't use toggles and their TD link pointers
 323	 * get adjusted during uhci_urb_dequeue().  But since their queues
 324	 * cannot truly be stopped, we have to watch out for dequeues
 325	 * occurring after the nominal unlink frame. */
 326	if (qh->type == USB_ENDPOINT_XFER_ISOC) {
 327		ret = (uhci->frame_number + uhci->is_stopped !=
 328				qh->unlink_frame);
 329		goto done;
 330	}
 331
 332	/* If the URB isn't first on its queue, adjust the link pointer
 333	 * of the last TD in the previous URB.  The toggle doesn't need
 334	 * to be saved since this URB can't be executing yet. */
 335	if (qh->queue.next != &urbp->node) {
 336		struct urb_priv *purbp;
 337		struct uhci_td *ptd;
 338
 339		purbp = list_entry(urbp->node.prev, struct urb_priv, node);
 340		WARN_ON(list_empty(&purbp->td_list));
 341		ptd = list_entry(purbp->td_list.prev, struct uhci_td,
 342				list);
 343		td = list_entry(urbp->td_list.prev, struct uhci_td,
 344				list);
 345		ptd->link = td->link;
 346		goto done;
 347	}
 348
 349	/* If the QH element pointer is UHCI_PTR_TERM then then currently
 350	 * executing URB has already been unlinked, so this one isn't it. */
 351	if (qh_element(qh) == UHCI_PTR_TERM(uhci))
 352		goto done;
 353	qh->element = UHCI_PTR_TERM(uhci);
 354
 355	/* Control pipes don't have to worry about toggles */
 356	if (qh->type == USB_ENDPOINT_XFER_CONTROL)
 357		goto done;
 358
 359	/* Save the next toggle value */
 360	WARN_ON(list_empty(&urbp->td_list));
 361	td = list_entry(urbp->td_list.next, struct uhci_td, list);
 362	qh->needs_fixup = 1;
 363	qh->initial_toggle = uhci_toggle(td_token(uhci, td));
 364
 365done:
 366	return ret;
 367}
 368
 369/*
 370 * Fix up the data toggles for URBs in a queue, when one of them
 371 * terminates early (short transfer, error, or dequeued).
 372 */
 373static void uhci_fixup_toggles(struct uhci_hcd *uhci, struct uhci_qh *qh,
 374			int skip_first)
 375{
 376	struct urb_priv *urbp = NULL;
 377	struct uhci_td *td;
 378	unsigned int toggle = qh->initial_toggle;
 379	unsigned int pipe;
 380
 381	/* Fixups for a short transfer start with the second URB in the
 382	 * queue (the short URB is the first). */
 383	if (skip_first)
 384		urbp = list_entry(qh->queue.next, struct urb_priv, node);
 385
 386	/* When starting with the first URB, if the QH element pointer is
 387	 * still valid then we know the URB's toggles are okay. */
 388	else if (qh_element(qh) != UHCI_PTR_TERM(uhci))
 389		toggle = 2;
 390
 391	/* Fix up the toggle for the URBs in the queue.  Normally this
 392	 * loop won't run more than once: When an error or short transfer
 393	 * occurs, the queue usually gets emptied. */
 394	urbp = list_prepare_entry(urbp, &qh->queue, node);
 395	list_for_each_entry_continue(urbp, &qh->queue, node) {
 396
 397		/* If the first TD has the right toggle value, we don't
 398		 * need to change any toggles in this URB */
 399		td = list_entry(urbp->td_list.next, struct uhci_td, list);
 400		if (toggle > 1 || uhci_toggle(td_token(uhci, td)) == toggle) {
 401			td = list_entry(urbp->td_list.prev, struct uhci_td,
 402					list);
 403			toggle = uhci_toggle(td_token(uhci, td)) ^ 1;
 404
 405		/* Otherwise all the toggles in the URB have to be switched */
 406		} else {
 407			list_for_each_entry(td, &urbp->td_list, list) {
 408				td->token ^= cpu_to_hc32(uhci,
 409							TD_TOKEN_TOGGLE);
 410				toggle ^= 1;
 411			}
 412		}
 413	}
 414
 415	wmb();
 416	pipe = list_entry(qh->queue.next, struct urb_priv, node)->urb->pipe;
 417	usb_settoggle(qh->udev, usb_pipeendpoint(pipe),
 418			usb_pipeout(pipe), toggle);
 419	qh->needs_fixup = 0;
 420}
 421
 422/*
 423 * Link an Isochronous QH into its skeleton's list
 424 */
 425static inline void link_iso(struct uhci_hcd *uhci, struct uhci_qh *qh)
 426{
 427	list_add_tail(&qh->node, &uhci->skel_iso_qh->node);
 428
 429	/* Isochronous QHs aren't linked by the hardware */
 430}
 431
 432/*
 433 * Link a high-period interrupt QH into the schedule at the end of its
 434 * skeleton's list
 435 */
 436static void link_interrupt(struct uhci_hcd *uhci, struct uhci_qh *qh)
 437{
 438	struct uhci_qh *pqh;
 439
 440	list_add_tail(&qh->node, &uhci->skelqh[qh->skel]->node);
 441
 442	pqh = list_entry(qh->node.prev, struct uhci_qh, node);
 443	qh->link = pqh->link;
 444	wmb();
 445	pqh->link = LINK_TO_QH(uhci, qh);
 446}
 447
 448/*
 449 * Link a period-1 interrupt or async QH into the schedule at the
 450 * correct spot in the async skeleton's list, and update the FSBR link
 451 */
 452static void link_async(struct uhci_hcd *uhci, struct uhci_qh *qh)
 453{
 454	struct uhci_qh *pqh;
 455	__hc32 link_to_new_qh;
 456
 457	/* Find the predecessor QH for our new one and insert it in the list.
 458	 * The list of QHs is expected to be short, so linear search won't
 459	 * take too long. */
 460	list_for_each_entry_reverse(pqh, &uhci->skel_async_qh->node, node) {
 461		if (pqh->skel <= qh->skel)
 462			break;
 463	}
 464	list_add(&qh->node, &pqh->node);
 465
 466	/* Link it into the schedule */
 467	qh->link = pqh->link;
 468	wmb();
 469	link_to_new_qh = LINK_TO_QH(uhci, qh);
 470	pqh->link = link_to_new_qh;
 471
 472	/* If this is now the first FSBR QH, link the terminating skeleton
 473	 * QH to it. */
 474	if (pqh->skel < SKEL_FSBR && qh->skel >= SKEL_FSBR)
 475		uhci->skel_term_qh->link = link_to_new_qh;
 476}
 477
 478/*
 479 * Put a QH on the schedule in both hardware and software
 480 */
 481static void uhci_activate_qh(struct uhci_hcd *uhci, struct uhci_qh *qh)
 482{
 483	WARN_ON(list_empty(&qh->queue));
 484
 485	/* Set the element pointer if it isn't set already.
 486	 * This isn't needed for Isochronous queues, but it doesn't hurt. */
 487	if (qh_element(qh) == UHCI_PTR_TERM(uhci)) {
 488		struct urb_priv *urbp = list_entry(qh->queue.next,
 489				struct urb_priv, node);
 490		struct uhci_td *td = list_entry(urbp->td_list.next,
 491				struct uhci_td, list);
 492
 493		qh->element = LINK_TO_TD(uhci, td);
 494	}
 495
 496	/* Treat the queue as if it has just advanced */
 497	qh->wait_expired = 0;
 498	qh->advance_jiffies = jiffies;
 499
 500	if (qh->state == QH_STATE_ACTIVE)
 501		return;
 502	qh->state = QH_STATE_ACTIVE;
 503
 504	/* Move the QH from its old list to the correct spot in the appropriate
 505	 * skeleton's list */
 506	if (qh == uhci->next_qh)
 507		uhci->next_qh = list_entry(qh->node.next, struct uhci_qh,
 508				node);
 509	list_del(&qh->node);
 510
 511	if (qh->skel == SKEL_ISO)
 512		link_iso(uhci, qh);
 513	else if (qh->skel < SKEL_ASYNC)
 514		link_interrupt(uhci, qh);
 515	else
 516		link_async(uhci, qh);
 517}
 518
 519/*
 520 * Unlink a high-period interrupt QH from the schedule
 521 */
 522static void unlink_interrupt(struct uhci_hcd *uhci, struct uhci_qh *qh)
 523{
 524	struct uhci_qh *pqh;
 525
 526	pqh = list_entry(qh->node.prev, struct uhci_qh, node);
 527	pqh->link = qh->link;
 528	mb();
 529}
 530
 531/*
 532 * Unlink a period-1 interrupt or async QH from the schedule
 533 */
 534static void unlink_async(struct uhci_hcd *uhci, struct uhci_qh *qh)
 535{
 536	struct uhci_qh *pqh;
 537	__hc32 link_to_next_qh = qh->link;
 538
 539	pqh = list_entry(qh->node.prev, struct uhci_qh, node);
 540	pqh->link = link_to_next_qh;
 541
 542	/* If this was the old first FSBR QH, link the terminating skeleton
 543	 * QH to the next (new first FSBR) QH. */
 544	if (pqh->skel < SKEL_FSBR && qh->skel >= SKEL_FSBR)
 545		uhci->skel_term_qh->link = link_to_next_qh;
 546	mb();
 547}
 548
 549/*
 550 * Take a QH off the hardware schedule
 551 */
 552static void uhci_unlink_qh(struct uhci_hcd *uhci, struct uhci_qh *qh)
 553{
 554	if (qh->state == QH_STATE_UNLINKING)
 555		return;
 556	WARN_ON(qh->state != QH_STATE_ACTIVE || !qh->udev);
 557	qh->state = QH_STATE_UNLINKING;
 558
 559	/* Unlink the QH from the schedule and record when we did it */
 560	if (qh->skel == SKEL_ISO)
 561		;
 562	else if (qh->skel < SKEL_ASYNC)
 563		unlink_interrupt(uhci, qh);
 564	else
 565		unlink_async(uhci, qh);
 566
 567	uhci_get_current_frame_number(uhci);
 568	qh->unlink_frame = uhci->frame_number;
 569
 570	/* Force an interrupt so we know when the QH is fully unlinked */
 571	if (list_empty(&uhci->skel_unlink_qh->node) || uhci->is_stopped)
 572		uhci_set_next_interrupt(uhci);
 573
 574	/* Move the QH from its old list to the end of the unlinking list */
 575	if (qh == uhci->next_qh)
 576		uhci->next_qh = list_entry(qh->node.next, struct uhci_qh,
 577				node);
 578	list_move_tail(&qh->node, &uhci->skel_unlink_qh->node);
 579}
 580
 581/*
 582 * When we and the controller are through with a QH, it becomes IDLE.
 583 * This happens when a QH has been off the schedule (on the unlinking
 584 * list) for more than one frame, or when an error occurs while adding
 585 * the first URB onto a new QH.
 586 */
 587static void uhci_make_qh_idle(struct uhci_hcd *uhci, struct uhci_qh *qh)
 588{
 589	WARN_ON(qh->state == QH_STATE_ACTIVE);
 590
 591	if (qh == uhci->next_qh)
 592		uhci->next_qh = list_entry(qh->node.next, struct uhci_qh,
 593				node);
 594	list_move(&qh->node, &uhci->idle_qh_list);
 595	qh->state = QH_STATE_IDLE;
 596
 597	/* Now that the QH is idle, its post_td isn't being used */
 598	if (qh->post_td) {
 599		uhci_free_td(uhci, qh->post_td);
 600		qh->post_td = NULL;
 601	}
 602
 603	/* If anyone is waiting for a QH to become idle, wake them up */
 604	if (uhci->num_waiting)
 605		wake_up_all(&uhci->waitqh);
 606}
 607
 608/*
 609 * Find the highest existing bandwidth load for a given phase and period.
 610 */
 611static int uhci_highest_load(struct uhci_hcd *uhci, int phase, int period)
 612{
 613	int highest_load = uhci->load[phase];
 614
 615	for (phase += period; phase < MAX_PHASE; phase += period)
 616		highest_load = max_t(int, highest_load, uhci->load[phase]);
 617	return highest_load;
 618}
 619
 620/*
 621 * Set qh->phase to the optimal phase for a periodic transfer and
 622 * check whether the bandwidth requirement is acceptable.
 623 */
 624static int uhci_check_bandwidth(struct uhci_hcd *uhci, struct uhci_qh *qh)
 625{
 626	int minimax_load;
 627
 628	/* Find the optimal phase (unless it is already set) and get
 629	 * its load value. */
 630	if (qh->phase >= 0)
 631		minimax_load = uhci_highest_load(uhci, qh->phase, qh->period);
 632	else {
 633		int phase, load;
 634		int max_phase = min_t(int, MAX_PHASE, qh->period);
 635
 636		qh->phase = 0;
 637		minimax_load = uhci_highest_load(uhci, qh->phase, qh->period);
 638		for (phase = 1; phase < max_phase; ++phase) {
 639			load = uhci_highest_load(uhci, phase, qh->period);
 640			if (load < minimax_load) {
 641				minimax_load = load;
 642				qh->phase = phase;
 643			}
 644		}
 645	}
 646
 647	/* Maximum allowable periodic bandwidth is 90%, or 900 us per frame */
 648	if (minimax_load + qh->load > 900) {
 649		dev_dbg(uhci_dev(uhci), "bandwidth allocation failed: "
 650				"period %d, phase %d, %d + %d us\n",
 651				qh->period, qh->phase, minimax_load, qh->load);
 652		return -ENOSPC;
 653	}
 654	return 0;
 655}
 656
 657/*
 658 * Reserve a periodic QH's bandwidth in the schedule
 659 */
 660static void uhci_reserve_bandwidth(struct uhci_hcd *uhci, struct uhci_qh *qh)
 661{
 662	int i;
 663	int load = qh->load;
 664	char *p = "??";
 665
 666	for (i = qh->phase; i < MAX_PHASE; i += qh->period) {
 667		uhci->load[i] += load;
 668		uhci->total_load += load;
 669	}
 670	uhci_to_hcd(uhci)->self.bandwidth_allocated =
 671			uhci->total_load / MAX_PHASE;
 672	switch (qh->type) {
 673	case USB_ENDPOINT_XFER_INT:
 674		++uhci_to_hcd(uhci)->self.bandwidth_int_reqs;
 675		p = "INT";
 676		break;
 677	case USB_ENDPOINT_XFER_ISOC:
 678		++uhci_to_hcd(uhci)->self.bandwidth_isoc_reqs;
 679		p = "ISO";
 680		break;
 681	}
 682	qh->bandwidth_reserved = 1;
 683	dev_dbg(uhci_dev(uhci),
 684			"%s dev %d ep%02x-%s, period %d, phase %d, %d us\n",
 685			"reserve", qh->udev->devnum,
 686			qh->hep->desc.bEndpointAddress, p,
 687			qh->period, qh->phase, load);
 688}
 689
 690/*
 691 * Release a periodic QH's bandwidth reservation
 692 */
 693static void uhci_release_bandwidth(struct uhci_hcd *uhci, struct uhci_qh *qh)
 694{
 695	int i;
 696	int load = qh->load;
 697	char *p = "??";
 698
 699	for (i = qh->phase; i < MAX_PHASE; i += qh->period) {
 700		uhci->load[i] -= load;
 701		uhci->total_load -= load;
 702	}
 703	uhci_to_hcd(uhci)->self.bandwidth_allocated =
 704			uhci->total_load / MAX_PHASE;
 705	switch (qh->type) {
 706	case USB_ENDPOINT_XFER_INT:
 707		--uhci_to_hcd(uhci)->self.bandwidth_int_reqs;
 708		p = "INT";
 709		break;
 710	case USB_ENDPOINT_XFER_ISOC:
 711		--uhci_to_hcd(uhci)->self.bandwidth_isoc_reqs;
 712		p = "ISO";
 713		break;
 714	}
 715	qh->bandwidth_reserved = 0;
 716	dev_dbg(uhci_dev(uhci),
 717			"%s dev %d ep%02x-%s, period %d, phase %d, %d us\n",
 718			"release", qh->udev->devnum,
 719			qh->hep->desc.bEndpointAddress, p,
 720			qh->period, qh->phase, load);
 721}
 722
 723static inline struct urb_priv *uhci_alloc_urb_priv(struct uhci_hcd *uhci,
 724		struct urb *urb)
 725{
 726	struct urb_priv *urbp;
 727
 728	urbp = kmem_cache_zalloc(uhci_up_cachep, GFP_ATOMIC);
 729	if (!urbp)
 730		return NULL;
 731
 732	urbp->urb = urb;
 733	urb->hcpriv = urbp;
 734
 735	INIT_LIST_HEAD(&urbp->node);
 736	INIT_LIST_HEAD(&urbp->td_list);
 737
 738	return urbp;
 739}
 740
 741static void uhci_free_urb_priv(struct uhci_hcd *uhci,
 742		struct urb_priv *urbp)
 743{
 744	struct uhci_td *td, *tmp;
 745
 746	if (!list_empty(&urbp->node))
 747		dev_WARN(uhci_dev(uhci), "urb %p still on QH's list!\n",
 748				urbp->urb);
 749
 750	list_for_each_entry_safe(td, tmp, &urbp->td_list, list) {
 751		uhci_remove_td_from_urbp(td);
 752		uhci_free_td(uhci, td);
 753	}
 754
 755	kmem_cache_free(uhci_up_cachep, urbp);
 756}
 757
 758/*
 759 * Map status to standard result codes
 760 *
 761 * <status> is (td_status(uhci, td) & 0xF60000), a.k.a.
 762 * uhci_status_bits(td_status(uhci, td)).
 763 * Note: <status> does not include the TD_CTRL_NAK bit.
 764 * <dir_out> is True for output TDs and False for input TDs.
 765 */
 766static int uhci_map_status(int status, int dir_out)
 767{
 768	if (!status)
 769		return 0;
 770	if (status & TD_CTRL_BITSTUFF)			/* Bitstuff error */
 771		return -EPROTO;
 772	if (status & TD_CTRL_CRCTIMEO) {		/* CRC/Timeout */
 773		if (dir_out)
 774			return -EPROTO;
 775		else
 776			return -EILSEQ;
 777	}
 778	if (status & TD_CTRL_BABBLE)			/* Babble */
 779		return -EOVERFLOW;
 780	if (status & TD_CTRL_DBUFERR)			/* Buffer error */
 781		return -ENOSR;
 782	if (status & TD_CTRL_STALLED)			/* Stalled */
 783		return -EPIPE;
 784	return 0;
 785}
 786
 787/*
 788 * Control transfers
 789 */
 790static int uhci_submit_control(struct uhci_hcd *uhci, struct urb *urb,
 791		struct uhci_qh *qh)
 792{
 793	struct uhci_td *td;
 794	unsigned long destination, status;
 795	int maxsze = usb_endpoint_maxp(&qh->hep->desc);
 796	int len = urb->transfer_buffer_length;
 797	dma_addr_t data = urb->transfer_dma;
 798	__hc32 *plink;
 799	struct urb_priv *urbp = urb->hcpriv;
 800	int skel;
 801
 802	/* The "pipe" thing contains the destination in bits 8--18 */
 803	destination = (urb->pipe & PIPE_DEVEP_MASK) | USB_PID_SETUP;
 804
 805	/* 3 errors, dummy TD remains inactive */
 806	status = uhci_maxerr(3);
 807	if (urb->dev->speed == USB_SPEED_LOW)
 808		status |= TD_CTRL_LS;
 809
 810	/*
 811	 * Build the TD for the control request setup packet
 812	 */
 813	td = qh->dummy_td;
 814	uhci_add_td_to_urbp(td, urbp);
 815	uhci_fill_td(uhci, td, status, destination | uhci_explen(8),
 816			urb->setup_dma);
 817	plink = &td->link;
 818	status |= TD_CTRL_ACTIVE;
 819
 820	/*
 821	 * If direction is "send", change the packet ID from SETUP (0x2D)
 822	 * to OUT (0xE1).  Else change it from SETUP to IN (0x69) and
 823	 * set Short Packet Detect (SPD) for all data packets.
 824	 *
 825	 * 0-length transfers always get treated as "send".
 826	 */
 827	if (usb_pipeout(urb->pipe) || len == 0)
 828		destination ^= (USB_PID_SETUP ^ USB_PID_OUT);
 829	else {
 830		destination ^= (USB_PID_SETUP ^ USB_PID_IN);
 831		status |= TD_CTRL_SPD;
 832	}
 833
 834	/*
 835	 * Build the DATA TDs
 836	 */
 837	while (len > 0) {
 838		int pktsze = maxsze;
 839
 840		if (len <= pktsze) {		/* The last data packet */
 841			pktsze = len;
 842			status &= ~TD_CTRL_SPD;
 843		}
 844
 845		td = uhci_alloc_td(uhci);
 846		if (!td)
 847			goto nomem;
 848		*plink = LINK_TO_TD(uhci, td);
 849
 850		/* Alternate Data0/1 (start with Data1) */
 851		destination ^= TD_TOKEN_TOGGLE;
 852
 853		uhci_add_td_to_urbp(td, urbp);
 854		uhci_fill_td(uhci, td, status,
 855			destination | uhci_explen(pktsze), data);
 856		plink = &td->link;
 857
 858		data += pktsze;
 859		len -= pktsze;
 860	}
 861
 862	/*
 863	 * Build the final TD for control status
 864	 */
 865	td = uhci_alloc_td(uhci);
 866	if (!td)
 867		goto nomem;
 868	*plink = LINK_TO_TD(uhci, td);
 869
 870	/* Change direction for the status transaction */
 871	destination ^= (USB_PID_IN ^ USB_PID_OUT);
 872	destination |= TD_TOKEN_TOGGLE;		/* End in Data1 */
 873
 874	uhci_add_td_to_urbp(td, urbp);
 875	uhci_fill_td(uhci, td, status | TD_CTRL_IOC,
 876			destination | uhci_explen(0), 0);
 877	plink = &td->link;
 878
 879	/*
 880	 * Build the new dummy TD and activate the old one
 881	 */
 882	td = uhci_alloc_td(uhci);
 883	if (!td)
 884		goto nomem;
 885	*plink = LINK_TO_TD(uhci, td);
 886
 887	uhci_fill_td(uhci, td, 0, USB_PID_OUT | uhci_explen(0), 0);
 888	wmb();
 889	qh->dummy_td->status |= cpu_to_hc32(uhci, TD_CTRL_ACTIVE);
 890	qh->dummy_td = td;
 891
 892	/* Low-speed transfers get a different queue, and won't hog the bus.
 893	 * Also, some devices enumerate better without FSBR; the easiest way
 894	 * to do that is to put URBs on the low-speed queue while the device
 895	 * isn't in the CONFIGURED state. */
 896	if (urb->dev->speed == USB_SPEED_LOW ||
 897			urb->dev->state != USB_STATE_CONFIGURED)
 898		skel = SKEL_LS_CONTROL;
 899	else {
 900		skel = SKEL_FS_CONTROL;
 901		uhci_add_fsbr(uhci, urb);
 902	}
 903	if (qh->state != QH_STATE_ACTIVE)
 904		qh->skel = skel;
 905	return 0;
 906
 907nomem:
 908	/* Remove the dummy TD from the td_list so it doesn't get freed */
 909	uhci_remove_td_from_urbp(qh->dummy_td);
 910	return -ENOMEM;
 911}
 912
 913/*
 914 * Common submit for bulk and interrupt
 915 */
 916static int uhci_submit_common(struct uhci_hcd *uhci, struct urb *urb,
 917		struct uhci_qh *qh)
 918{
 919	struct uhci_td *td;
 920	unsigned long destination, status;
 921	int maxsze = usb_endpoint_maxp(&qh->hep->desc);
 922	int len = urb->transfer_buffer_length;
 923	int this_sg_len;
 924	dma_addr_t data;
 925	__hc32 *plink;
 926	struct urb_priv *urbp = urb->hcpriv;
 927	unsigned int toggle;
 928	struct scatterlist  *sg;
 929	int i;
 930
 931	if (len < 0)
 932		return -EINVAL;
 933
 934	/* The "pipe" thing contains the destination in bits 8--18 */
 935	destination = (urb->pipe & PIPE_DEVEP_MASK) | usb_packetid(urb->pipe);
 936	toggle = usb_gettoggle(urb->dev, usb_pipeendpoint(urb->pipe),
 937			 usb_pipeout(urb->pipe));
 938
 939	/* 3 errors, dummy TD remains inactive */
 940	status = uhci_maxerr(3);
 941	if (urb->dev->speed == USB_SPEED_LOW)
 942		status |= TD_CTRL_LS;
 943	if (usb_pipein(urb->pipe))
 944		status |= TD_CTRL_SPD;
 945
 946	i = urb->num_mapped_sgs;
 947	if (len > 0 && i > 0) {
 948		sg = urb->sg;
 949		data = sg_dma_address(sg);
 950
 951		/* urb->transfer_buffer_length may be smaller than the
 952		 * size of the scatterlist (or vice versa)
 953		 */
 954		this_sg_len = min_t(int, sg_dma_len(sg), len);
 955	} else {
 956		sg = NULL;
 957		data = urb->transfer_dma;
 958		this_sg_len = len;
 959	}
 960	/*
 961	 * Build the DATA TDs
 962	 */
 963	plink = NULL;
 964	td = qh->dummy_td;
 965	for (;;) {	/* Allow zero length packets */
 966		int pktsze = maxsze;
 967
 968		if (len <= pktsze) {		/* The last packet */
 969			pktsze = len;
 970			if (!(urb->transfer_flags & URB_SHORT_NOT_OK))
 971				status &= ~TD_CTRL_SPD;
 972		}
 973
 974		if (plink) {
 975			td = uhci_alloc_td(uhci);
 976			if (!td)
 977				goto nomem;
 978			*plink = LINK_TO_TD(uhci, td);
 979		}
 980		uhci_add_td_to_urbp(td, urbp);
 981		uhci_fill_td(uhci, td, status,
 982				destination | uhci_explen(pktsze) |
 983					(toggle << TD_TOKEN_TOGGLE_SHIFT),
 984				data);
 985		plink = &td->link;
 986		status |= TD_CTRL_ACTIVE;
 987
 988		toggle ^= 1;
 989		data += pktsze;
 990		this_sg_len -= pktsze;
 991		len -= maxsze;
 992		if (this_sg_len <= 0) {
 993			if (--i <= 0 || len <= 0)
 994				break;
 995			sg = sg_next(sg);
 996			data = sg_dma_address(sg);
 997			this_sg_len = min_t(int, sg_dma_len(sg), len);
 998		}
 999	}
1000
1001	/*
1002	 * URB_ZERO_PACKET means adding a 0-length packet, if direction
1003	 * is OUT and the transfer_length was an exact multiple of maxsze,
1004	 * hence (len = transfer_length - N * maxsze) == 0
1005	 * however, if transfer_length == 0, the zero packet was already
1006	 * prepared above.
1007	 */
1008	if ((urb->transfer_flags & URB_ZERO_PACKET) &&
1009			usb_pipeout(urb->pipe) && len == 0 &&
1010			urb->transfer_buffer_length > 0) {
1011		td = uhci_alloc_td(uhci);
1012		if (!td)
1013			goto nomem;
1014		*plink = LINK_TO_TD(uhci, td);
1015
1016		uhci_add_td_to_urbp(td, urbp);
1017		uhci_fill_td(uhci, td, status,
1018				destination | uhci_explen(0) |
1019					(toggle << TD_TOKEN_TOGGLE_SHIFT),
1020				data);
1021		plink = &td->link;
1022
1023		toggle ^= 1;
1024	}
1025
1026	/* Set the interrupt-on-completion flag on the last packet.
1027	 * A more-or-less typical 4 KB URB (= size of one memory page)
1028	 * will require about 3 ms to transfer; that's a little on the
1029	 * fast side but not enough to justify delaying an interrupt
1030	 * more than 2 or 3 URBs, so we will ignore the URB_NO_INTERRUPT
1031	 * flag setting. */
1032	td->status |= cpu_to_hc32(uhci, TD_CTRL_IOC);
1033
1034	/*
1035	 * Build the new dummy TD and activate the old one
1036	 */
1037	td = uhci_alloc_td(uhci);
1038	if (!td)
1039		goto nomem;
1040	*plink = LINK_TO_TD(uhci, td);
1041
1042	uhci_fill_td(uhci, td, 0, USB_PID_OUT | uhci_explen(0), 0);
1043	wmb();
1044	qh->dummy_td->status |= cpu_to_hc32(uhci, TD_CTRL_ACTIVE);
1045	qh->dummy_td = td;
1046
1047	usb_settoggle(urb->dev, usb_pipeendpoint(urb->pipe),
1048			usb_pipeout(urb->pipe), toggle);
1049	return 0;
1050
1051nomem:
1052	/* Remove the dummy TD from the td_list so it doesn't get freed */
1053	uhci_remove_td_from_urbp(qh->dummy_td);
1054	return -ENOMEM;
1055}
1056
1057static int uhci_submit_bulk(struct uhci_hcd *uhci, struct urb *urb,
1058		struct uhci_qh *qh)
1059{
1060	int ret;
1061
1062	/* Can't have low-speed bulk transfers */
1063	if (urb->dev->speed == USB_SPEED_LOW)
1064		return -EINVAL;
1065
1066	if (qh->state != QH_STATE_ACTIVE)
1067		qh->skel = SKEL_BULK;
1068	ret = uhci_submit_common(uhci, urb, qh);
1069	if (ret == 0)
1070		uhci_add_fsbr(uhci, urb);
1071	return ret;
1072}
1073
1074static int uhci_submit_interrupt(struct uhci_hcd *uhci, struct urb *urb,
1075		struct uhci_qh *qh)
1076{
1077	int ret;
1078
1079	/* USB 1.1 interrupt transfers only involve one packet per interval.
1080	 * Drivers can submit URBs of any length, but longer ones will need
1081	 * multiple intervals to complete.
1082	 */
1083
1084	if (!qh->bandwidth_reserved) {
1085		int exponent;
1086
1087		/* Figure out which power-of-two queue to use */
1088		for (exponent = 7; exponent >= 0; --exponent) {
1089			if ((1 << exponent) <= urb->interval)
1090				break;
1091		}
1092		if (exponent < 0)
1093			return -EINVAL;
1094
1095		/* If the slot is full, try a lower period */
1096		do {
1097			qh->period = 1 << exponent;
1098			qh->skel = SKEL_INDEX(exponent);
1099
1100			/* For now, interrupt phase is fixed by the layout
1101			 * of the QH lists.
1102			 */
1103			qh->phase = (qh->period / 2) & (MAX_PHASE - 1);
1104			ret = uhci_check_bandwidth(uhci, qh);
1105		} while (ret != 0 && --exponent >= 0);
1106		if (ret)
1107			return ret;
1108	} else if (qh->period > urb->interval)
1109		return -EINVAL;		/* Can't decrease the period */
1110
1111	ret = uhci_submit_common(uhci, urb, qh);
1112	if (ret == 0) {
1113		urb->interval = qh->period;
1114		if (!qh->bandwidth_reserved)
1115			uhci_reserve_bandwidth(uhci, qh);
1116	}
1117	return ret;
1118}
1119
1120/*
1121 * Fix up the data structures following a short transfer
1122 */
1123static int uhci_fixup_short_transfer(struct uhci_hcd *uhci,
1124		struct uhci_qh *qh, struct urb_priv *urbp)
1125{
1126	struct uhci_td *td;
1127	struct list_head *tmp;
1128	int ret;
1129
1130	td = list_entry(urbp->td_list.prev, struct uhci_td, list);
1131	if (qh->type == USB_ENDPOINT_XFER_CONTROL) {
1132
1133		/* When a control transfer is short, we have to restart
1134		 * the queue at the status stage transaction, which is
1135		 * the last TD. */
1136		WARN_ON(list_empty(&urbp->td_list));
1137		qh->element = LINK_TO_TD(uhci, td);
1138		tmp = td->list.prev;
1139		ret = -EINPROGRESS;
1140
1141	} else {
1142
1143		/* When a bulk/interrupt transfer is short, we have to
1144		 * fix up the toggles of the following URBs on the queue
1145		 * before restarting the queue at the next URB. */
1146		qh->initial_toggle =
1147			uhci_toggle(td_token(uhci, qh->post_td)) ^ 1;
1148		uhci_fixup_toggles(uhci, qh, 1);
1149
1150		if (list_empty(&urbp->td_list))
1151			td = qh->post_td;
1152		qh->element = td->link;
1153		tmp = urbp->td_list.prev;
1154		ret = 0;
1155	}
1156
1157	/* Remove all the TDs we skipped over, from tmp back to the start */
1158	while (tmp != &urbp->td_list) {
1159		td = list_entry(tmp, struct uhci_td, list);
1160		tmp = tmp->prev;
1161
1162		uhci_remove_td_from_urbp(td);
1163		uhci_free_td(uhci, td);
1164	}
1165	return ret;
1166}
1167
1168/*
1169 * Common result for control, bulk, and interrupt
1170 */
1171static int uhci_result_common(struct uhci_hcd *uhci, struct urb *urb)
1172{
1173	struct urb_priv *urbp = urb->hcpriv;
1174	struct uhci_qh *qh = urbp->qh;
1175	struct uhci_td *td, *tmp;
1176	unsigned status;
1177	int ret = 0;
1178
1179	list_for_each_entry_safe(td, tmp, &urbp->td_list, list) {
1180		unsigned int ctrlstat;
1181		int len;
1182
1183		ctrlstat = td_status(uhci, td);
1184		status = uhci_status_bits(ctrlstat);
1185		if (status & TD_CTRL_ACTIVE)
1186			return -EINPROGRESS;
1187
1188		len = uhci_actual_length(ctrlstat);
1189		urb->actual_length += len;
1190
1191		if (status) {
1192			ret = uhci_map_status(status,
1193					uhci_packetout(td_token(uhci, td)));
1194			if ((debug == 1 && ret != -EPIPE) || debug > 1) {
1195				/* Some debugging code */
1196				dev_dbg(&urb->dev->dev,
1197						"%s: failed with status %x\n",
1198						__func__, status);
1199
1200				if (debug > 1 && errbuf) {
1201					/* Print the chain for debugging */
1202					uhci_show_qh(uhci, urbp->qh, errbuf,
1203						ERRBUF_LEN - EXTRA_SPACE, 0);
1204					lprintk(errbuf);
1205				}
1206			}
1207
1208		/* Did we receive a short packet? */
1209		} else if (len < uhci_expected_length(td_token(uhci, td))) {
1210
1211			/* For control transfers, go to the status TD if
1212			 * this isn't already the last data TD */
1213			if (qh->type == USB_ENDPOINT_XFER_CONTROL) {
1214				if (td->list.next != urbp->td_list.prev)
1215					ret = 1;
1216			}
1217
1218			/* For bulk and interrupt, this may be an error */
1219			else if (urb->transfer_flags & URB_SHORT_NOT_OK)
1220				ret = -EREMOTEIO;
1221
1222			/* Fixup needed only if this isn't the URB's last TD */
1223			else if (&td->list != urbp->td_list.prev)
1224				ret = 1;
1225		}
1226
1227		uhci_remove_td_from_urbp(td);
1228		if (qh->post_td)
1229			uhci_free_td(uhci, qh->post_td);
1230		qh->post_td = td;
1231
1232		if (ret != 0)
1233			goto err;
1234	}
1235	return ret;
1236
1237err:
1238	if (ret < 0) {
1239		/* Note that the queue has stopped and save
1240		 * the next toggle value */
1241		qh->element = UHCI_PTR_TERM(uhci);
1242		qh->is_stopped = 1;
1243		qh->needs_fixup = (qh->type != USB_ENDPOINT_XFER_CONTROL);
1244		qh->initial_toggle = uhci_toggle(td_token(uhci, td)) ^
1245				(ret == -EREMOTEIO);
1246
1247	} else		/* Short packet received */
1248		ret = uhci_fixup_short_transfer(uhci, qh, urbp);
1249	return ret;
1250}
1251
1252/*
1253 * Isochronous transfers
1254 */
1255static int uhci_submit_isochronous(struct uhci_hcd *uhci, struct urb *urb,
1256		struct uhci_qh *qh)
1257{
1258	struct uhci_td *td = NULL;	/* Since urb->number_of_packets > 0 */
1259	int i;
1260	unsigned frame, next;
1261	unsigned long destination, status;
1262	struct urb_priv *urbp = (struct urb_priv *) urb->hcpriv;
1263
1264	/* Values must not be too big (could overflow below) */
1265	if (urb->interval >= UHCI_NUMFRAMES ||
1266			urb->number_of_packets >= UHCI_NUMFRAMES)
1267		return -EFBIG;
1268
1269	uhci_get_current_frame_number(uhci);
1270
1271	/* Check the period and figure out the starting frame number */
1272	if (!qh->bandwidth_reserved) {
1273		qh->period = urb->interval;
1274		qh->phase = -1;		/* Find the best phase */
1275		i = uhci_check_bandwidth(uhci, qh);
1276		if (i)
1277			return i;
1278
1279		/* Allow a little time to allocate the TDs */
1280		next = uhci->frame_number + 10;
1281		frame = qh->phase;
1282
1283		/* Round up to the first available slot */
1284		frame += (next - frame + qh->period - 1) & -qh->period;
 
 
 
 
 
 
 
 
 
 
 
 
1285
1286	} else if (qh->period != urb->interval) {
1287		return -EINVAL;		/* Can't change the period */
1288
1289	} else {
1290		next = uhci->frame_number + 1;
1291
1292		/* Find the next unused frame */
1293		if (list_empty(&qh->queue)) {
1294			frame = qh->iso_frame;
1295		} else {
1296			struct urb *lurb;
1297
1298			lurb = list_entry(qh->queue.prev,
1299					struct urb_priv, node)->urb;
1300			frame = lurb->start_frame +
1301					lurb->number_of_packets *
1302					lurb->interval;
1303		}
1304
1305		/* Fell behind? */
1306		if (!uhci_frame_before_eq(next, frame)) {
1307
1308			/* USB_ISO_ASAP: Round up to the first available slot */
1309			if (urb->transfer_flags & URB_ISO_ASAP)
1310				frame += (next - frame + qh->period - 1) &
1311						-qh->period;
1312
1313			/*
1314			 * Not ASAP: Use the next slot in the stream,
1315			 * no matter what.
1316			 */
1317			else if (!uhci_frame_before_eq(next,
1318					frame + (urb->number_of_packets - 1) *
1319						qh->period))
1320				dev_dbg(uhci_dev(uhci), "iso underrun %p (%u+%u < %u)\n",
1321						urb, frame,
1322						(urb->number_of_packets - 1) *
1323							qh->period,
1324						next);
1325		}
1326	}
1327
1328	/* Make sure we won't have to go too far into the future */
1329	if (uhci_frame_before_eq(uhci->last_iso_frame + UHCI_NUMFRAMES,
1330			frame + urb->number_of_packets * urb->interval))
 
1331		return -EFBIG;
1332	urb->start_frame = frame;
1333
1334	status = TD_CTRL_ACTIVE | TD_CTRL_IOS;
1335	destination = (urb->pipe & PIPE_DEVEP_MASK) | usb_packetid(urb->pipe);
1336
1337	for (i = 0; i < urb->number_of_packets; i++) {
1338		td = uhci_alloc_td(uhci);
1339		if (!td)
1340			return -ENOMEM;
1341
1342		uhci_add_td_to_urbp(td, urbp);
1343		uhci_fill_td(uhci, td, status, destination |
1344				uhci_explen(urb->iso_frame_desc[i].length),
1345				urb->transfer_dma +
1346					urb->iso_frame_desc[i].offset);
1347	}
1348
1349	/* Set the interrupt-on-completion flag on the last packet. */
1350	td->status |= cpu_to_hc32(uhci, TD_CTRL_IOC);
1351
1352	/* Add the TDs to the frame list */
1353	frame = urb->start_frame;
1354	list_for_each_entry(td, &urbp->td_list, list) {
1355		uhci_insert_td_in_frame_list(uhci, td, frame);
1356		frame += qh->period;
1357	}
1358
1359	if (list_empty(&qh->queue)) {
1360		qh->iso_packet_desc = &urb->iso_frame_desc[0];
1361		qh->iso_frame = urb->start_frame;
1362	}
1363
1364	qh->skel = SKEL_ISO;
1365	if (!qh->bandwidth_reserved)
1366		uhci_reserve_bandwidth(uhci, qh);
1367	return 0;
1368}
1369
1370static int uhci_result_isochronous(struct uhci_hcd *uhci, struct urb *urb)
1371{
1372	struct uhci_td *td, *tmp;
1373	struct urb_priv *urbp = urb->hcpriv;
1374	struct uhci_qh *qh = urbp->qh;
1375
1376	list_for_each_entry_safe(td, tmp, &urbp->td_list, list) {
1377		unsigned int ctrlstat;
1378		int status;
1379		int actlength;
1380
1381		if (uhci_frame_before_eq(uhci->cur_iso_frame, qh->iso_frame))
1382			return -EINPROGRESS;
1383
1384		uhci_remove_tds_from_frame(uhci, qh->iso_frame);
1385
1386		ctrlstat = td_status(uhci, td);
1387		if (ctrlstat & TD_CTRL_ACTIVE) {
1388			status = -EXDEV;	/* TD was added too late? */
1389		} else {
1390			status = uhci_map_status(uhci_status_bits(ctrlstat),
1391					usb_pipeout(urb->pipe));
1392			actlength = uhci_actual_length(ctrlstat);
1393
1394			urb->actual_length += actlength;
1395			qh->iso_packet_desc->actual_length = actlength;
1396			qh->iso_packet_desc->status = status;
1397		}
1398		if (status)
1399			urb->error_count++;
1400
1401		uhci_remove_td_from_urbp(td);
1402		uhci_free_td(uhci, td);
1403		qh->iso_frame += qh->period;
1404		++qh->iso_packet_desc;
1405	}
1406	return 0;
1407}
1408
1409static int uhci_urb_enqueue(struct usb_hcd *hcd,
1410		struct urb *urb, gfp_t mem_flags)
1411{
1412	int ret;
1413	struct uhci_hcd *uhci = hcd_to_uhci(hcd);
1414	unsigned long flags;
1415	struct urb_priv *urbp;
1416	struct uhci_qh *qh;
1417
1418	spin_lock_irqsave(&uhci->lock, flags);
1419
1420	ret = usb_hcd_link_urb_to_ep(hcd, urb);
1421	if (ret)
1422		goto done_not_linked;
1423
1424	ret = -ENOMEM;
1425	urbp = uhci_alloc_urb_priv(uhci, urb);
1426	if (!urbp)
1427		goto done;
1428
1429	if (urb->ep->hcpriv)
1430		qh = urb->ep->hcpriv;
1431	else {
1432		qh = uhci_alloc_qh(uhci, urb->dev, urb->ep);
1433		if (!qh)
1434			goto err_no_qh;
1435	}
1436	urbp->qh = qh;
1437
1438	switch (qh->type) {
1439	case USB_ENDPOINT_XFER_CONTROL:
1440		ret = uhci_submit_control(uhci, urb, qh);
1441		break;
1442	case USB_ENDPOINT_XFER_BULK:
1443		ret = uhci_submit_bulk(uhci, urb, qh);
1444		break;
1445	case USB_ENDPOINT_XFER_INT:
1446		ret = uhci_submit_interrupt(uhci, urb, qh);
1447		break;
1448	case USB_ENDPOINT_XFER_ISOC:
1449		urb->error_count = 0;
1450		ret = uhci_submit_isochronous(uhci, urb, qh);
1451		break;
1452	}
1453	if (ret != 0)
1454		goto err_submit_failed;
1455
1456	/* Add this URB to the QH */
1457	list_add_tail(&urbp->node, &qh->queue);
1458
1459	/* If the new URB is the first and only one on this QH then either
1460	 * the QH is new and idle or else it's unlinked and waiting to
1461	 * become idle, so we can activate it right away.  But only if the
1462	 * queue isn't stopped. */
1463	if (qh->queue.next == &urbp->node && !qh->is_stopped) {
1464		uhci_activate_qh(uhci, qh);
1465		uhci_urbp_wants_fsbr(uhci, urbp);
1466	}
1467	goto done;
1468
1469err_submit_failed:
1470	if (qh->state == QH_STATE_IDLE)
1471		uhci_make_qh_idle(uhci, qh);	/* Reclaim unused QH */
1472err_no_qh:
1473	uhci_free_urb_priv(uhci, urbp);
1474done:
1475	if (ret)
1476		usb_hcd_unlink_urb_from_ep(hcd, urb);
1477done_not_linked:
1478	spin_unlock_irqrestore(&uhci->lock, flags);
1479	return ret;
1480}
1481
1482static int uhci_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status)
1483{
1484	struct uhci_hcd *uhci = hcd_to_uhci(hcd);
1485	unsigned long flags;
1486	struct uhci_qh *qh;
1487	int rc;
1488
1489	spin_lock_irqsave(&uhci->lock, flags);
1490	rc = usb_hcd_check_unlink_urb(hcd, urb, status);
1491	if (rc)
1492		goto done;
1493
1494	qh = ((struct urb_priv *) urb->hcpriv)->qh;
1495
1496	/* Remove Isochronous TDs from the frame list ASAP */
1497	if (qh->type == USB_ENDPOINT_XFER_ISOC) {
1498		uhci_unlink_isochronous_tds(uhci, urb);
1499		mb();
1500
1501		/* If the URB has already started, update the QH unlink time */
1502		uhci_get_current_frame_number(uhci);
1503		if (uhci_frame_before_eq(urb->start_frame, uhci->frame_number))
1504			qh->unlink_frame = uhci->frame_number;
1505	}
1506
1507	uhci_unlink_qh(uhci, qh);
1508
1509done:
1510	spin_unlock_irqrestore(&uhci->lock, flags);
1511	return rc;
1512}
1513
1514/*
1515 * Finish unlinking an URB and give it back
1516 */
1517static void uhci_giveback_urb(struct uhci_hcd *uhci, struct uhci_qh *qh,
1518		struct urb *urb, int status)
1519__releases(uhci->lock)
1520__acquires(uhci->lock)
1521{
1522	struct urb_priv *urbp = (struct urb_priv *) urb->hcpriv;
1523
1524	if (qh->type == USB_ENDPOINT_XFER_CONTROL) {
1525
1526		/* Subtract off the length of the SETUP packet from
1527		 * urb->actual_length.
1528		 */
1529		urb->actual_length -= min_t(u32, 8, urb->actual_length);
1530	}
1531
1532	/* When giving back the first URB in an Isochronous queue,
1533	 * reinitialize the QH's iso-related members for the next URB. */
1534	else if (qh->type == USB_ENDPOINT_XFER_ISOC &&
1535			urbp->node.prev == &qh->queue &&
1536			urbp->node.next != &qh->queue) {
1537		struct urb *nurb = list_entry(urbp->node.next,
1538				struct urb_priv, node)->urb;
1539
1540		qh->iso_packet_desc = &nurb->iso_frame_desc[0];
1541		qh->iso_frame = nurb->start_frame;
1542	}
1543
1544	/* Take the URB off the QH's queue.  If the queue is now empty,
1545	 * this is a perfect time for a toggle fixup. */
1546	list_del_init(&urbp->node);
1547	if (list_empty(&qh->queue) && qh->needs_fixup) {
1548		usb_settoggle(urb->dev, usb_pipeendpoint(urb->pipe),
1549				usb_pipeout(urb->pipe), qh->initial_toggle);
1550		qh->needs_fixup = 0;
1551	}
1552
1553	uhci_free_urb_priv(uhci, urbp);
1554	usb_hcd_unlink_urb_from_ep(uhci_to_hcd(uhci), urb);
1555
1556	spin_unlock(&uhci->lock);
1557	usb_hcd_giveback_urb(uhci_to_hcd(uhci), urb, status);
1558	spin_lock(&uhci->lock);
1559
1560	/* If the queue is now empty, we can unlink the QH and give up its
1561	 * reserved bandwidth. */
1562	if (list_empty(&qh->queue)) {
1563		uhci_unlink_qh(uhci, qh);
1564		if (qh->bandwidth_reserved)
1565			uhci_release_bandwidth(uhci, qh);
1566	}
1567}
1568
1569/*
1570 * Scan the URBs in a QH's queue
1571 */
1572#define QH_FINISHED_UNLINKING(qh)			\
1573		(qh->state == QH_STATE_UNLINKING &&	\
1574		uhci->frame_number + uhci->is_stopped != qh->unlink_frame)
1575
1576static void uhci_scan_qh(struct uhci_hcd *uhci, struct uhci_qh *qh)
1577{
1578	struct urb_priv *urbp;
1579	struct urb *urb;
1580	int status;
1581
1582	while (!list_empty(&qh->queue)) {
1583		urbp = list_entry(qh->queue.next, struct urb_priv, node);
1584		urb = urbp->urb;
1585
1586		if (qh->type == USB_ENDPOINT_XFER_ISOC)
1587			status = uhci_result_isochronous(uhci, urb);
1588		else
1589			status = uhci_result_common(uhci, urb);
1590		if (status == -EINPROGRESS)
1591			break;
1592
1593		/* Dequeued but completed URBs can't be given back unless
1594		 * the QH is stopped or has finished unlinking. */
1595		if (urb->unlinked) {
1596			if (QH_FINISHED_UNLINKING(qh))
1597				qh->is_stopped = 1;
1598			else if (!qh->is_stopped)
1599				return;
1600		}
1601
1602		uhci_giveback_urb(uhci, qh, urb, status);
1603		if (status < 0)
1604			break;
1605	}
1606
1607	/* If the QH is neither stopped nor finished unlinking (normal case),
1608	 * our work here is done. */
1609	if (QH_FINISHED_UNLINKING(qh))
1610		qh->is_stopped = 1;
1611	else if (!qh->is_stopped)
1612		return;
1613
1614	/* Otherwise give back each of the dequeued URBs */
1615restart:
1616	list_for_each_entry(urbp, &qh->queue, node) {
1617		urb = urbp->urb;
1618		if (urb->unlinked) {
1619
1620			/* Fix up the TD links and save the toggles for
1621			 * non-Isochronous queues.  For Isochronous queues,
1622			 * test for too-recent dequeues. */
1623			if (!uhci_cleanup_queue(uhci, qh, urb)) {
1624				qh->is_stopped = 0;
1625				return;
1626			}
1627			uhci_giveback_urb(uhci, qh, urb, 0);
1628			goto restart;
1629		}
1630	}
1631	qh->is_stopped = 0;
1632
1633	/* There are no more dequeued URBs.  If there are still URBs on the
1634	 * queue, the QH can now be re-activated. */
1635	if (!list_empty(&qh->queue)) {
1636		if (qh->needs_fixup)
1637			uhci_fixup_toggles(uhci, qh, 0);
1638
1639		/* If the first URB on the queue wants FSBR but its time
1640		 * limit has expired, set the next TD to interrupt on
1641		 * completion before reactivating the QH. */
1642		urbp = list_entry(qh->queue.next, struct urb_priv, node);
1643		if (urbp->fsbr && qh->wait_expired) {
1644			struct uhci_td *td = list_entry(urbp->td_list.next,
1645					struct uhci_td, list);
1646
1647			td->status |= cpu_to_hc32(uhci, TD_CTRL_IOC);
1648		}
1649
1650		uhci_activate_qh(uhci, qh);
1651	}
1652
1653	/* The queue is empty.  The QH can become idle if it is fully
1654	 * unlinked. */
1655	else if (QH_FINISHED_UNLINKING(qh))
1656		uhci_make_qh_idle(uhci, qh);
1657}
1658
1659/*
1660 * Check for queues that have made some forward progress.
1661 * Returns 0 if the queue is not Isochronous, is ACTIVE, and
1662 * has not advanced since last examined; 1 otherwise.
1663 *
1664 * Early Intel controllers have a bug which causes qh->element sometimes
1665 * not to advance when a TD completes successfully.  The queue remains
1666 * stuck on the inactive completed TD.  We detect such cases and advance
1667 * the element pointer by hand.
1668 */
1669static int uhci_advance_check(struct uhci_hcd *uhci, struct uhci_qh *qh)
1670{
1671	struct urb_priv *urbp = NULL;
1672	struct uhci_td *td;
1673	int ret = 1;
1674	unsigned status;
1675
1676	if (qh->type == USB_ENDPOINT_XFER_ISOC)
1677		goto done;
1678
1679	/* Treat an UNLINKING queue as though it hasn't advanced.
1680	 * This is okay because reactivation will treat it as though
1681	 * it has advanced, and if it is going to become IDLE then
1682	 * this doesn't matter anyway.  Furthermore it's possible
1683	 * for an UNLINKING queue not to have any URBs at all, or
1684	 * for its first URB not to have any TDs (if it was dequeued
1685	 * just as it completed).  So it's not easy in any case to
1686	 * test whether such queues have advanced. */
1687	if (qh->state != QH_STATE_ACTIVE) {
1688		urbp = NULL;
1689		status = 0;
1690
1691	} else {
1692		urbp = list_entry(qh->queue.next, struct urb_priv, node);
1693		td = list_entry(urbp->td_list.next, struct uhci_td, list);
1694		status = td_status(uhci, td);
1695		if (!(status & TD_CTRL_ACTIVE)) {
1696
1697			/* We're okay, the queue has advanced */
1698			qh->wait_expired = 0;
1699			qh->advance_jiffies = jiffies;
1700			goto done;
1701		}
1702		ret = uhci->is_stopped;
1703	}
1704
1705	/* The queue hasn't advanced; check for timeout */
1706	if (qh->wait_expired)
1707		goto done;
1708
1709	if (time_after(jiffies, qh->advance_jiffies + QH_WAIT_TIMEOUT)) {
1710
1711		/* Detect the Intel bug and work around it */
1712		if (qh->post_td && qh_element(qh) ==
1713			LINK_TO_TD(uhci, qh->post_td)) {
1714			qh->element = qh->post_td->link;
1715			qh->advance_jiffies = jiffies;
1716			ret = 1;
1717			goto done;
1718		}
1719
1720		qh->wait_expired = 1;
1721
1722		/* If the current URB wants FSBR, unlink it temporarily
1723		 * so that we can safely set the next TD to interrupt on
1724		 * completion.  That way we'll know as soon as the queue
1725		 * starts moving again. */
1726		if (urbp && urbp->fsbr && !(status & TD_CTRL_IOC))
1727			uhci_unlink_qh(uhci, qh);
1728
1729	} else {
1730		/* Unmoving but not-yet-expired queues keep FSBR alive */
1731		if (urbp)
1732			uhci_urbp_wants_fsbr(uhci, urbp);
1733	}
1734
1735done:
1736	return ret;
1737}
1738
1739/*
1740 * Process events in the schedule, but only in one thread at a time
1741 */
1742static void uhci_scan_schedule(struct uhci_hcd *uhci)
1743{
1744	int i;
1745	struct uhci_qh *qh;
1746
1747	/* Don't allow re-entrant calls */
1748	if (uhci->scan_in_progress) {
1749		uhci->need_rescan = 1;
1750		return;
1751	}
1752	uhci->scan_in_progress = 1;
1753rescan:
1754	uhci->need_rescan = 0;
1755	uhci->fsbr_is_wanted = 0;
1756
1757	uhci_clear_next_interrupt(uhci);
1758	uhci_get_current_frame_number(uhci);
1759	uhci->cur_iso_frame = uhci->frame_number;
1760
1761	/* Go through all the QH queues and process the URBs in each one */
1762	for (i = 0; i < UHCI_NUM_SKELQH - 1; ++i) {
1763		uhci->next_qh = list_entry(uhci->skelqh[i]->node.next,
1764				struct uhci_qh, node);
1765		while ((qh = uhci->next_qh) != uhci->skelqh[i]) {
1766			uhci->next_qh = list_entry(qh->node.next,
1767					struct uhci_qh, node);
1768
1769			if (uhci_advance_check(uhci, qh)) {
1770				uhci_scan_qh(uhci, qh);
1771				if (qh->state == QH_STATE_ACTIVE) {
1772					uhci_urbp_wants_fsbr(uhci,
1773	list_entry(qh->queue.next, struct urb_priv, node));
1774				}
1775			}
1776		}
1777	}
1778
1779	uhci->last_iso_frame = uhci->cur_iso_frame;
1780	if (uhci->need_rescan)
1781		goto rescan;
1782	uhci->scan_in_progress = 0;
1783
1784	if (uhci->fsbr_is_on && !uhci->fsbr_is_wanted &&
1785			!uhci->fsbr_expiring) {
1786		uhci->fsbr_expiring = 1;
1787		mod_timer(&uhci->fsbr_timer, jiffies + FSBR_OFF_DELAY);
1788	}
1789
1790	if (list_empty(&uhci->skel_unlink_qh->node))
1791		uhci_clear_next_interrupt(uhci);
1792	else
1793		uhci_set_next_interrupt(uhci);
1794}