Linux Audio

Check our new training course

Loading...
v4.17
   1// SPDX-License-Identifier: GPL-2.0+
   2/*
   3 * Copyright (c) 2001-2004 by David Brownell
   4 * Copyright (c) 2003 Michal Sojka, for high-speed iso transfers
   5 */
   6
   7/* this file is part of ehci-hcd.c */
   8
   9/*-------------------------------------------------------------------------*/
  10
  11/*
  12 * EHCI scheduled transaction support:  interrupt, iso, split iso
  13 * These are called "periodic" transactions in the EHCI spec.
  14 *
  15 * Note that for interrupt transfers, the QH/QTD manipulation is shared
  16 * with the "asynchronous" transaction support (control/bulk transfers).
  17 * The only real difference is in how interrupt transfers are scheduled.
  18 *
  19 * For ISO, we make an "iso_stream" head to serve the same role as a QH.
  20 * It keeps track of every ITD (or SITD) that's linked, and holds enough
  21 * pre-calculated schedule data to make appending to the queue be quick.
  22 */
  23
  24static int ehci_get_frame(struct usb_hcd *hcd);
  25
  26/*
  27 * periodic_next_shadow - return "next" pointer on shadow list
  28 * @periodic: host pointer to qh/itd/sitd
  29 * @tag: hardware tag for type of this record
  30 */
  31static union ehci_shadow *
  32periodic_next_shadow(struct ehci_hcd *ehci, union ehci_shadow *periodic,
  33		__hc32 tag)
  34{
  35	switch (hc32_to_cpu(ehci, tag)) {
  36	case Q_TYPE_QH:
  37		return &periodic->qh->qh_next;
  38	case Q_TYPE_FSTN:
  39		return &periodic->fstn->fstn_next;
  40	case Q_TYPE_ITD:
  41		return &periodic->itd->itd_next;
  42	/* case Q_TYPE_SITD: */
  43	default:
  44		return &periodic->sitd->sitd_next;
  45	}
  46}
  47
  48static __hc32 *
  49shadow_next_periodic(struct ehci_hcd *ehci, union ehci_shadow *periodic,
  50		__hc32 tag)
  51{
  52	switch (hc32_to_cpu(ehci, tag)) {
  53	/* our ehci_shadow.qh is actually software part */
  54	case Q_TYPE_QH:
  55		return &periodic->qh->hw->hw_next;
  56	/* others are hw parts */
  57	default:
  58		return periodic->hw_next;
  59	}
  60}
  61
  62/* caller must hold ehci->lock */
  63static void periodic_unlink(struct ehci_hcd *ehci, unsigned frame, void *ptr)
  64{
  65	union ehci_shadow	*prev_p = &ehci->pshadow[frame];
  66	__hc32			*hw_p = &ehci->periodic[frame];
  67	union ehci_shadow	here = *prev_p;
  68
  69	/* find predecessor of "ptr"; hw and shadow lists are in sync */
  70	while (here.ptr && here.ptr != ptr) {
  71		prev_p = periodic_next_shadow(ehci, prev_p,
  72				Q_NEXT_TYPE(ehci, *hw_p));
  73		hw_p = shadow_next_periodic(ehci, &here,
  74				Q_NEXT_TYPE(ehci, *hw_p));
  75		here = *prev_p;
  76	}
  77	/* an interrupt entry (at list end) could have been shared */
  78	if (!here.ptr)
  79		return;
  80
  81	/* update shadow and hardware lists ... the old "next" pointers
  82	 * from ptr may still be in use, the caller updates them.
  83	 */
  84	*prev_p = *periodic_next_shadow(ehci, &here,
  85			Q_NEXT_TYPE(ehci, *hw_p));
  86
  87	if (!ehci->use_dummy_qh ||
  88	    *shadow_next_periodic(ehci, &here, Q_NEXT_TYPE(ehci, *hw_p))
  89			!= EHCI_LIST_END(ehci))
  90		*hw_p = *shadow_next_periodic(ehci, &here,
  91				Q_NEXT_TYPE(ehci, *hw_p));
  92	else
  93		*hw_p = cpu_to_hc32(ehci, ehci->dummy->qh_dma);
  94}
  95
  96/*-------------------------------------------------------------------------*/
  97
  98/* Bandwidth and TT management */
  99
 100/* Find the TT data structure for this device; create it if necessary */
 101static struct ehci_tt *find_tt(struct usb_device *udev)
 102{
 103	struct usb_tt		*utt = udev->tt;
 104	struct ehci_tt		*tt, **tt_index, **ptt;
 105	unsigned		port;
 106	bool			allocated_index = false;
 107
 108	if (!utt)
 109		return NULL;		/* Not below a TT */
 110
 111	/*
 112	 * Find/create our data structure.
 113	 * For hubs with a single TT, we get it directly.
 114	 * For hubs with multiple TTs, there's an extra level of pointers.
 115	 */
 116	tt_index = NULL;
 117	if (utt->multi) {
 118		tt_index = utt->hcpriv;
 119		if (!tt_index) {		/* Create the index array */
 120			tt_index = kzalloc(utt->hub->maxchild *
 121					sizeof(*tt_index), GFP_ATOMIC);
 
 122			if (!tt_index)
 123				return ERR_PTR(-ENOMEM);
 124			utt->hcpriv = tt_index;
 125			allocated_index = true;
 126		}
 127		port = udev->ttport - 1;
 128		ptt = &tt_index[port];
 129	} else {
 130		port = 0;
 131		ptt = (struct ehci_tt **) &utt->hcpriv;
 132	}
 133
 134	tt = *ptt;
 135	if (!tt) {				/* Create the ehci_tt */
 136		struct ehci_hcd		*ehci =
 137				hcd_to_ehci(bus_to_hcd(udev->bus));
 138
 139		tt = kzalloc(sizeof(*tt), GFP_ATOMIC);
 140		if (!tt) {
 141			if (allocated_index) {
 142				utt->hcpriv = NULL;
 143				kfree(tt_index);
 144			}
 145			return ERR_PTR(-ENOMEM);
 146		}
 147		list_add_tail(&tt->tt_list, &ehci->tt_list);
 148		INIT_LIST_HEAD(&tt->ps_list);
 149		tt->usb_tt = utt;
 150		tt->tt_port = port;
 151		*ptt = tt;
 152	}
 153
 154	return tt;
 155}
 156
 157/* Release the TT above udev, if it's not in use */
 158static void drop_tt(struct usb_device *udev)
 159{
 160	struct usb_tt		*utt = udev->tt;
 161	struct ehci_tt		*tt, **tt_index, **ptt;
 162	int			cnt, i;
 163
 164	if (!utt || !utt->hcpriv)
 165		return;		/* Not below a TT, or never allocated */
 166
 167	cnt = 0;
 168	if (utt->multi) {
 169		tt_index = utt->hcpriv;
 170		ptt = &tt_index[udev->ttport - 1];
 171
 172		/* How many entries are left in tt_index? */
 173		for (i = 0; i < utt->hub->maxchild; ++i)
 174			cnt += !!tt_index[i];
 175	} else {
 176		tt_index = NULL;
 177		ptt = (struct ehci_tt **) &utt->hcpriv;
 178	}
 179
 180	tt = *ptt;
 181	if (!tt || !list_empty(&tt->ps_list))
 182		return;		/* never allocated, or still in use */
 183
 184	list_del(&tt->tt_list);
 185	*ptt = NULL;
 186	kfree(tt);
 187	if (cnt == 1) {
 188		utt->hcpriv = NULL;
 189		kfree(tt_index);
 190	}
 191}
 192
 193static void bandwidth_dbg(struct ehci_hcd *ehci, int sign, char *type,
 194		struct ehci_per_sched *ps)
 195{
 196	dev_dbg(&ps->udev->dev,
 197			"ep %02x: %s %s @ %u+%u (%u.%u+%u) [%u/%u us] mask %04x\n",
 198			ps->ep->desc.bEndpointAddress,
 199			(sign >= 0 ? "reserve" : "release"), type,
 200			(ps->bw_phase << 3) + ps->phase_uf, ps->bw_uperiod,
 201			ps->phase, ps->phase_uf, ps->period,
 202			ps->usecs, ps->c_usecs, ps->cs_mask);
 203}
 204
 205static void reserve_release_intr_bandwidth(struct ehci_hcd *ehci,
 206		struct ehci_qh *qh, int sign)
 207{
 208	unsigned		start_uf;
 209	unsigned		i, j, m;
 210	int			usecs = qh->ps.usecs;
 211	int			c_usecs = qh->ps.c_usecs;
 212	int			tt_usecs = qh->ps.tt_usecs;
 213	struct ehci_tt		*tt;
 214
 215	if (qh->ps.phase == NO_FRAME)	/* Bandwidth wasn't reserved */
 216		return;
 217	start_uf = qh->ps.bw_phase << 3;
 218
 219	bandwidth_dbg(ehci, sign, "intr", &qh->ps);
 220
 221	if (sign < 0) {		/* Release bandwidth */
 222		usecs = -usecs;
 223		c_usecs = -c_usecs;
 224		tt_usecs = -tt_usecs;
 225	}
 226
 227	/* Entire transaction (high speed) or start-split (full/low speed) */
 228	for (i = start_uf + qh->ps.phase_uf; i < EHCI_BANDWIDTH_SIZE;
 229			i += qh->ps.bw_uperiod)
 230		ehci->bandwidth[i] += usecs;
 231
 232	/* Complete-split (full/low speed) */
 233	if (qh->ps.c_usecs) {
 234		/* NOTE: adjustments needed for FSTN */
 235		for (i = start_uf; i < EHCI_BANDWIDTH_SIZE;
 236				i += qh->ps.bw_uperiod) {
 237			for ((j = 2, m = 1 << (j+8)); j < 8; (++j, m <<= 1)) {
 238				if (qh->ps.cs_mask & m)
 239					ehci->bandwidth[i+j] += c_usecs;
 240			}
 241		}
 242	}
 243
 244	/* FS/LS bus bandwidth */
 245	if (tt_usecs) {
 246		tt = find_tt(qh->ps.udev);
 247		if (sign > 0)
 248			list_add_tail(&qh->ps.ps_list, &tt->ps_list);
 249		else
 250			list_del(&qh->ps.ps_list);
 251
 252		for (i = start_uf >> 3; i < EHCI_BANDWIDTH_FRAMES;
 253				i += qh->ps.bw_period)
 254			tt->bandwidth[i] += tt_usecs;
 255	}
 256}
 257
 258/*-------------------------------------------------------------------------*/
 259
 260static void compute_tt_budget(u8 budget_table[EHCI_BANDWIDTH_SIZE],
 261		struct ehci_tt *tt)
 262{
 263	struct ehci_per_sched	*ps;
 264	unsigned		uframe, uf, x;
 265	u8			*budget_line;
 266
 267	if (!tt)
 268		return;
 269	memset(budget_table, 0, EHCI_BANDWIDTH_SIZE);
 270
 271	/* Add up the contributions from all the endpoints using this TT */
 272	list_for_each_entry(ps, &tt->ps_list, ps_list) {
 273		for (uframe = ps->bw_phase << 3; uframe < EHCI_BANDWIDTH_SIZE;
 274				uframe += ps->bw_uperiod) {
 275			budget_line = &budget_table[uframe];
 276			x = ps->tt_usecs;
 277
 278			/* propagate the time forward */
 279			for (uf = ps->phase_uf; uf < 8; ++uf) {
 280				x += budget_line[uf];
 281
 282				/* Each microframe lasts 125 us */
 283				if (x <= 125) {
 284					budget_line[uf] = x;
 285					break;
 286				}
 287				budget_line[uf] = 125;
 288				x -= 125;
 289			}
 290		}
 291	}
 292}
 293
 294static int __maybe_unused same_tt(struct usb_device *dev1,
 295		struct usb_device *dev2)
 296{
 297	if (!dev1->tt || !dev2->tt)
 298		return 0;
 299	if (dev1->tt != dev2->tt)
 300		return 0;
 301	if (dev1->tt->multi)
 302		return dev1->ttport == dev2->ttport;
 303	else
 304		return 1;
 305}
 306
 307#ifdef CONFIG_USB_EHCI_TT_NEWSCHED
 308
 309/* Which uframe does the low/fullspeed transfer start in?
 310 *
 311 * The parameter is the mask of ssplits in "H-frame" terms
 312 * and this returns the transfer start uframe in "B-frame" terms,
 313 * which allows both to match, e.g. a ssplit in "H-frame" uframe 0
 314 * will cause a transfer in "B-frame" uframe 0.  "B-frames" lag
 315 * "H-frames" by 1 uframe.  See the EHCI spec sec 4.5 and figure 4.7.
 316 */
 317static inline unsigned char tt_start_uframe(struct ehci_hcd *ehci, __hc32 mask)
 318{
 319	unsigned char smask = hc32_to_cpu(ehci, mask) & QH_SMASK;
 320
 321	if (!smask) {
 322		ehci_err(ehci, "invalid empty smask!\n");
 323		/* uframe 7 can't have bw so this will indicate failure */
 324		return 7;
 325	}
 326	return ffs(smask) - 1;
 327}
 328
 329static const unsigned char
 330max_tt_usecs[] = { 125, 125, 125, 125, 125, 125, 30, 0 };
 331
 332/* carryover low/fullspeed bandwidth that crosses uframe boundries */
 333static inline void carryover_tt_bandwidth(unsigned short tt_usecs[8])
 334{
 335	int i;
 336
 337	for (i = 0; i < 7; i++) {
 338		if (max_tt_usecs[i] < tt_usecs[i]) {
 339			tt_usecs[i+1] += tt_usecs[i] - max_tt_usecs[i];
 340			tt_usecs[i] = max_tt_usecs[i];
 341		}
 342	}
 343}
 344
 345/*
 346 * Return true if the device's tt's downstream bus is available for a
 347 * periodic transfer of the specified length (usecs), starting at the
 348 * specified frame/uframe.  Note that (as summarized in section 11.19
 349 * of the usb 2.0 spec) TTs can buffer multiple transactions for each
 350 * uframe.
 351 *
 352 * The uframe parameter is when the fullspeed/lowspeed transfer
 353 * should be executed in "B-frame" terms, which is the same as the
 354 * highspeed ssplit's uframe (which is in "H-frame" terms).  For example
 355 * a ssplit in "H-frame" 0 causes a transfer in "B-frame" 0.
 356 * See the EHCI spec sec 4.5 and fig 4.7.
 357 *
 358 * This checks if the full/lowspeed bus, at the specified starting uframe,
 359 * has the specified bandwidth available, according to rules listed
 360 * in USB 2.0 spec section 11.18.1 fig 11-60.
 361 *
 362 * This does not check if the transfer would exceed the max ssplit
 363 * limit of 16, specified in USB 2.0 spec section 11.18.4 requirement #4,
 364 * since proper scheduling limits ssplits to less than 16 per uframe.
 365 */
 366static int tt_available(
 367	struct ehci_hcd		*ehci,
 368	struct ehci_per_sched	*ps,
 369	struct ehci_tt		*tt,
 370	unsigned		frame,
 371	unsigned		uframe
 372)
 373{
 374	unsigned		period = ps->bw_period;
 375	unsigned		usecs = ps->tt_usecs;
 376
 377	if ((period == 0) || (uframe >= 7))	/* error */
 378		return 0;
 379
 380	for (frame &= period - 1; frame < EHCI_BANDWIDTH_FRAMES;
 381			frame += period) {
 382		unsigned	i, uf;
 383		unsigned short	tt_usecs[8];
 384
 385		if (tt->bandwidth[frame] + usecs > 900)
 386			return 0;
 387
 388		uf = frame << 3;
 389		for (i = 0; i < 8; (++i, ++uf))
 390			tt_usecs[i] = ehci->tt_budget[uf];
 391
 392		if (max_tt_usecs[uframe] <= tt_usecs[uframe])
 393			return 0;
 394
 395		/* special case for isoc transfers larger than 125us:
 396		 * the first and each subsequent fully used uframe
 397		 * must be empty, so as to not illegally delay
 398		 * already scheduled transactions
 399		 */
 400		if (usecs > 125) {
 401			int ufs = (usecs / 125);
 402
 403			for (i = uframe; i < (uframe + ufs) && i < 8; i++)
 404				if (tt_usecs[i] > 0)
 405					return 0;
 406		}
 407
 408		tt_usecs[uframe] += usecs;
 409
 410		carryover_tt_bandwidth(tt_usecs);
 411
 412		/* fail if the carryover pushed bw past the last uframe's limit */
 413		if (max_tt_usecs[7] < tt_usecs[7])
 414			return 0;
 415	}
 416
 417	return 1;
 418}
 419
 420#else
 421
 422/* return true iff the device's transaction translator is available
 423 * for a periodic transfer starting at the specified frame, using
 424 * all the uframes in the mask.
 425 */
 426static int tt_no_collision(
 427	struct ehci_hcd		*ehci,
 428	unsigned		period,
 429	struct usb_device	*dev,
 430	unsigned		frame,
 431	u32			uf_mask
 432)
 433{
 434	if (period == 0)	/* error */
 435		return 0;
 436
 437	/* note bandwidth wastage:  split never follows csplit
 438	 * (different dev or endpoint) until the next uframe.
 439	 * calling convention doesn't make that distinction.
 440	 */
 441	for (; frame < ehci->periodic_size; frame += period) {
 442		union ehci_shadow	here;
 443		__hc32			type;
 444		struct ehci_qh_hw	*hw;
 445
 446		here = ehci->pshadow[frame];
 447		type = Q_NEXT_TYPE(ehci, ehci->periodic[frame]);
 448		while (here.ptr) {
 449			switch (hc32_to_cpu(ehci, type)) {
 450			case Q_TYPE_ITD:
 451				type = Q_NEXT_TYPE(ehci, here.itd->hw_next);
 452				here = here.itd->itd_next;
 453				continue;
 454			case Q_TYPE_QH:
 455				hw = here.qh->hw;
 456				if (same_tt(dev, here.qh->ps.udev)) {
 457					u32		mask;
 458
 459					mask = hc32_to_cpu(ehci,
 460							hw->hw_info2);
 461					/* "knows" no gap is needed */
 462					mask |= mask >> 8;
 463					if (mask & uf_mask)
 464						break;
 465				}
 466				type = Q_NEXT_TYPE(ehci, hw->hw_next);
 467				here = here.qh->qh_next;
 468				continue;
 469			case Q_TYPE_SITD:
 470				if (same_tt(dev, here.sitd->urb->dev)) {
 471					u16		mask;
 472
 473					mask = hc32_to_cpu(ehci, here.sitd
 474								->hw_uframe);
 475					/* FIXME assumes no gap for IN! */
 476					mask |= mask >> 8;
 477					if (mask & uf_mask)
 478						break;
 479				}
 480				type = Q_NEXT_TYPE(ehci, here.sitd->hw_next);
 481				here = here.sitd->sitd_next;
 482				continue;
 483			/* case Q_TYPE_FSTN: */
 484			default:
 485				ehci_dbg(ehci,
 486					"periodic frame %d bogus type %d\n",
 487					frame, type);
 488			}
 489
 490			/* collision or error */
 491			return 0;
 492		}
 493	}
 494
 495	/* no collision */
 496	return 1;
 497}
 498
 499#endif /* CONFIG_USB_EHCI_TT_NEWSCHED */
 500
 501/*-------------------------------------------------------------------------*/
 502
 503static void enable_periodic(struct ehci_hcd *ehci)
 504{
 505	if (ehci->periodic_count++)
 506		return;
 507
 508	/* Stop waiting to turn off the periodic schedule */
 509	ehci->enabled_hrtimer_events &= ~BIT(EHCI_HRTIMER_DISABLE_PERIODIC);
 510
 511	/* Don't start the schedule until PSS is 0 */
 512	ehci_poll_PSS(ehci);
 513	turn_on_io_watchdog(ehci);
 514}
 515
 516static void disable_periodic(struct ehci_hcd *ehci)
 517{
 518	if (--ehci->periodic_count)
 519		return;
 520
 521	/* Don't turn off the schedule until PSS is 1 */
 522	ehci_poll_PSS(ehci);
 523}
 524
 525/*-------------------------------------------------------------------------*/
 526
 527/* periodic schedule slots have iso tds (normal or split) first, then a
 528 * sparse tree for active interrupt transfers.
 529 *
 530 * this just links in a qh; caller guarantees uframe masks are set right.
 531 * no FSTN support (yet; ehci 0.96+)
 532 */
 533static void qh_link_periodic(struct ehci_hcd *ehci, struct ehci_qh *qh)
 534{
 535	unsigned	i;
 536	unsigned	period = qh->ps.period;
 537
 538	dev_dbg(&qh->ps.udev->dev,
 539		"link qh%d-%04x/%p start %d [%d/%d us]\n",
 540		period, hc32_to_cpup(ehci, &qh->hw->hw_info2)
 541			& (QH_CMASK | QH_SMASK),
 542		qh, qh->ps.phase, qh->ps.usecs, qh->ps.c_usecs);
 543
 544	/* high bandwidth, or otherwise every microframe */
 545	if (period == 0)
 546		period = 1;
 547
 548	for (i = qh->ps.phase; i < ehci->periodic_size; i += period) {
 549		union ehci_shadow	*prev = &ehci->pshadow[i];
 550		__hc32			*hw_p = &ehci->periodic[i];
 551		union ehci_shadow	here = *prev;
 552		__hc32			type = 0;
 553
 554		/* skip the iso nodes at list head */
 555		while (here.ptr) {
 556			type = Q_NEXT_TYPE(ehci, *hw_p);
 557			if (type == cpu_to_hc32(ehci, Q_TYPE_QH))
 558				break;
 559			prev = periodic_next_shadow(ehci, prev, type);
 560			hw_p = shadow_next_periodic(ehci, &here, type);
 561			here = *prev;
 562		}
 563
 564		/* sorting each branch by period (slow-->fast)
 565		 * enables sharing interior tree nodes
 566		 */
 567		while (here.ptr && qh != here.qh) {
 568			if (qh->ps.period > here.qh->ps.period)
 569				break;
 570			prev = &here.qh->qh_next;
 571			hw_p = &here.qh->hw->hw_next;
 572			here = *prev;
 573		}
 574		/* link in this qh, unless some earlier pass did that */
 575		if (qh != here.qh) {
 576			qh->qh_next = here;
 577			if (here.qh)
 578				qh->hw->hw_next = *hw_p;
 579			wmb();
 580			prev->qh = qh;
 581			*hw_p = QH_NEXT(ehci, qh->qh_dma);
 582		}
 583	}
 584	qh->qh_state = QH_STATE_LINKED;
 585	qh->xacterrs = 0;
 586	qh->unlink_reason = 0;
 587
 588	/* update per-qh bandwidth for debugfs */
 589	ehci_to_hcd(ehci)->self.bandwidth_allocated += qh->ps.bw_period
 590		? ((qh->ps.usecs + qh->ps.c_usecs) / qh->ps.bw_period)
 591		: (qh->ps.usecs * 8);
 592
 593	list_add(&qh->intr_node, &ehci->intr_qh_list);
 594
 595	/* maybe enable periodic schedule processing */
 596	++ehci->intr_count;
 597	enable_periodic(ehci);
 598}
 599
 600static void qh_unlink_periodic(struct ehci_hcd *ehci, struct ehci_qh *qh)
 601{
 602	unsigned	i;
 603	unsigned	period;
 604
 605	/*
 606	 * If qh is for a low/full-speed device, simply unlinking it
 607	 * could interfere with an ongoing split transaction.  To unlink
 608	 * it safely would require setting the QH_INACTIVATE bit and
 609	 * waiting at least one frame, as described in EHCI 4.12.2.5.
 610	 *
 611	 * We won't bother with any of this.  Instead, we assume that the
 612	 * only reason for unlinking an interrupt QH while the current URB
 613	 * is still active is to dequeue all the URBs (flush the whole
 614	 * endpoint queue).
 615	 *
 616	 * If rebalancing the periodic schedule is ever implemented, this
 617	 * approach will no longer be valid.
 618	 */
 619
 620	/* high bandwidth, or otherwise part of every microframe */
 621	period = qh->ps.period ? : 1;
 622
 623	for (i = qh->ps.phase; i < ehci->periodic_size; i += period)
 624		periodic_unlink(ehci, i, qh);
 625
 626	/* update per-qh bandwidth for debugfs */
 627	ehci_to_hcd(ehci)->self.bandwidth_allocated -= qh->ps.bw_period
 628		? ((qh->ps.usecs + qh->ps.c_usecs) / qh->ps.bw_period)
 629		: (qh->ps.usecs * 8);
 630
 631	dev_dbg(&qh->ps.udev->dev,
 632		"unlink qh%d-%04x/%p start %d [%d/%d us]\n",
 633		qh->ps.period,
 634		hc32_to_cpup(ehci, &qh->hw->hw_info2) & (QH_CMASK | QH_SMASK),
 635		qh, qh->ps.phase, qh->ps.usecs, qh->ps.c_usecs);
 636
 637	/* qh->qh_next still "live" to HC */
 638	qh->qh_state = QH_STATE_UNLINK;
 639	qh->qh_next.ptr = NULL;
 640
 641	if (ehci->qh_scan_next == qh)
 642		ehci->qh_scan_next = list_entry(qh->intr_node.next,
 643				struct ehci_qh, intr_node);
 644	list_del(&qh->intr_node);
 645}
 646
 647static void cancel_unlink_wait_intr(struct ehci_hcd *ehci, struct ehci_qh *qh)
 648{
 649	if (qh->qh_state != QH_STATE_LINKED ||
 650			list_empty(&qh->unlink_node))
 651		return;
 652
 653	list_del_init(&qh->unlink_node);
 654
 655	/*
 656	 * TODO: disable the event of EHCI_HRTIMER_START_UNLINK_INTR for
 657	 * avoiding unnecessary CPU wakeup
 658	 */
 659}
 660
 661static void start_unlink_intr(struct ehci_hcd *ehci, struct ehci_qh *qh)
 662{
 663	/* If the QH isn't linked then there's nothing we can do. */
 664	if (qh->qh_state != QH_STATE_LINKED)
 665		return;
 666
 667	/* if the qh is waiting for unlink, cancel it now */
 668	cancel_unlink_wait_intr(ehci, qh);
 669
 670	qh_unlink_periodic(ehci, qh);
 671
 672	/* Make sure the unlinks are visible before starting the timer */
 673	wmb();
 674
 675	/*
 676	 * The EHCI spec doesn't say how long it takes the controller to
 677	 * stop accessing an unlinked interrupt QH.  The timer delay is
 678	 * 9 uframes; presumably that will be long enough.
 679	 */
 680	qh->unlink_cycle = ehci->intr_unlink_cycle;
 681
 682	/* New entries go at the end of the intr_unlink list */
 683	list_add_tail(&qh->unlink_node, &ehci->intr_unlink);
 684
 685	if (ehci->intr_unlinking)
 686		;	/* Avoid recursive calls */
 687	else if (ehci->rh_state < EHCI_RH_RUNNING)
 688		ehci_handle_intr_unlinks(ehci);
 689	else if (ehci->intr_unlink.next == &qh->unlink_node) {
 690		ehci_enable_event(ehci, EHCI_HRTIMER_UNLINK_INTR, true);
 691		++ehci->intr_unlink_cycle;
 692	}
 693}
 694
 695/*
 696 * It is common only one intr URB is scheduled on one qh, and
 697 * given complete() is run in tasklet context, introduce a bit
 698 * delay to avoid unlink qh too early.
 699 */
 700static void start_unlink_intr_wait(struct ehci_hcd *ehci,
 701				   struct ehci_qh *qh)
 702{
 703	qh->unlink_cycle = ehci->intr_unlink_wait_cycle;
 704
 705	/* New entries go at the end of the intr_unlink_wait list */
 706	list_add_tail(&qh->unlink_node, &ehci->intr_unlink_wait);
 707
 708	if (ehci->rh_state < EHCI_RH_RUNNING)
 709		ehci_handle_start_intr_unlinks(ehci);
 710	else if (ehci->intr_unlink_wait.next == &qh->unlink_node) {
 711		ehci_enable_event(ehci, EHCI_HRTIMER_START_UNLINK_INTR, true);
 712		++ehci->intr_unlink_wait_cycle;
 713	}
 714}
 715
 716static void end_unlink_intr(struct ehci_hcd *ehci, struct ehci_qh *qh)
 717{
 718	struct ehci_qh_hw	*hw = qh->hw;
 719	int			rc;
 720
 721	qh->qh_state = QH_STATE_IDLE;
 722	hw->hw_next = EHCI_LIST_END(ehci);
 723
 724	if (!list_empty(&qh->qtd_list))
 725		qh_completions(ehci, qh);
 726
 727	/* reschedule QH iff another request is queued */
 728	if (!list_empty(&qh->qtd_list) && ehci->rh_state == EHCI_RH_RUNNING) {
 729		rc = qh_schedule(ehci, qh);
 730		if (rc == 0) {
 731			qh_refresh(ehci, qh);
 732			qh_link_periodic(ehci, qh);
 733		}
 734
 735		/* An error here likely indicates handshake failure
 736		 * or no space left in the schedule.  Neither fault
 737		 * should happen often ...
 738		 *
 739		 * FIXME kill the now-dysfunctional queued urbs
 740		 */
 741		else {
 742			ehci_err(ehci, "can't reschedule qh %p, err %d\n",
 743					qh, rc);
 744		}
 745	}
 746
 747	/* maybe turn off periodic schedule */
 748	--ehci->intr_count;
 749	disable_periodic(ehci);
 750}
 751
 752/*-------------------------------------------------------------------------*/
 753
 754static int check_period(
 755	struct ehci_hcd *ehci,
 756	unsigned	frame,
 757	unsigned	uframe,
 758	unsigned	uperiod,
 759	unsigned	usecs
 760) {
 761	/* complete split running into next frame?
 762	 * given FSTN support, we could sometimes check...
 763	 */
 764	if (uframe >= 8)
 765		return 0;
 766
 767	/* convert "usecs we need" to "max already claimed" */
 768	usecs = ehci->uframe_periodic_max - usecs;
 769
 770	for (uframe += frame << 3; uframe < EHCI_BANDWIDTH_SIZE;
 771			uframe += uperiod) {
 772		if (ehci->bandwidth[uframe] > usecs)
 773			return 0;
 774	}
 775
 776	/* success! */
 777	return 1;
 778}
 779
 780static int check_intr_schedule(
 781	struct ehci_hcd		*ehci,
 782	unsigned		frame,
 783	unsigned		uframe,
 784	struct ehci_qh		*qh,
 785	unsigned		*c_maskp,
 786	struct ehci_tt		*tt
 787)
 788{
 789	int		retval = -ENOSPC;
 790	u8		mask = 0;
 791
 792	if (qh->ps.c_usecs && uframe >= 6)	/* FSTN territory? */
 793		goto done;
 794
 795	if (!check_period(ehci, frame, uframe, qh->ps.bw_uperiod, qh->ps.usecs))
 796		goto done;
 797	if (!qh->ps.c_usecs) {
 798		retval = 0;
 799		*c_maskp = 0;
 800		goto done;
 801	}
 802
 803#ifdef CONFIG_USB_EHCI_TT_NEWSCHED
 804	if (tt_available(ehci, &qh->ps, tt, frame, uframe)) {
 805		unsigned i;
 806
 807		/* TODO : this may need FSTN for SSPLIT in uframe 5. */
 808		for (i = uframe+2; i < 8 && i <= uframe+4; i++)
 809			if (!check_period(ehci, frame, i,
 810					qh->ps.bw_uperiod, qh->ps.c_usecs))
 811				goto done;
 812			else
 813				mask |= 1 << i;
 814
 815		retval = 0;
 816
 817		*c_maskp = mask;
 818	}
 819#else
 820	/* Make sure this tt's buffer is also available for CSPLITs.
 821	 * We pessimize a bit; probably the typical full speed case
 822	 * doesn't need the second CSPLIT.
 823	 *
 824	 * NOTE:  both SPLIT and CSPLIT could be checked in just
 825	 * one smart pass...
 826	 */
 827	mask = 0x03 << (uframe + qh->gap_uf);
 828	*c_maskp = mask;
 829
 830	mask |= 1 << uframe;
 831	if (tt_no_collision(ehci, qh->ps.bw_period, qh->ps.udev, frame, mask)) {
 832		if (!check_period(ehci, frame, uframe + qh->gap_uf + 1,
 833				qh->ps.bw_uperiod, qh->ps.c_usecs))
 834			goto done;
 835		if (!check_period(ehci, frame, uframe + qh->gap_uf,
 836				qh->ps.bw_uperiod, qh->ps.c_usecs))
 837			goto done;
 838		retval = 0;
 839	}
 840#endif
 841done:
 842	return retval;
 843}
 844
 845/* "first fit" scheduling policy used the first time through,
 846 * or when the previous schedule slot can't be re-used.
 847 */
 848static int qh_schedule(struct ehci_hcd *ehci, struct ehci_qh *qh)
 849{
 850	int		status = 0;
 851	unsigned	uframe;
 852	unsigned	c_mask;
 853	struct ehci_qh_hw	*hw = qh->hw;
 854	struct ehci_tt		*tt;
 855
 856	hw->hw_next = EHCI_LIST_END(ehci);
 857
 858	/* reuse the previous schedule slots, if we can */
 859	if (qh->ps.phase != NO_FRAME) {
 860		ehci_dbg(ehci, "reused qh %p schedule\n", qh);
 861		return 0;
 862	}
 863
 864	uframe = 0;
 865	c_mask = 0;
 866	tt = find_tt(qh->ps.udev);
 867	if (IS_ERR(tt)) {
 868		status = PTR_ERR(tt);
 869		goto done;
 870	}
 871	compute_tt_budget(ehci->tt_budget, tt);
 872
 873	/* else scan the schedule to find a group of slots such that all
 874	 * uframes have enough periodic bandwidth available.
 875	 */
 876	/* "normal" case, uframing flexible except with splits */
 877	if (qh->ps.bw_period) {
 878		int		i;
 879		unsigned	frame;
 880
 881		for (i = qh->ps.bw_period; i > 0; --i) {
 882			frame = ++ehci->random_frame & (qh->ps.bw_period - 1);
 883			for (uframe = 0; uframe < 8; uframe++) {
 884				status = check_intr_schedule(ehci,
 885						frame, uframe, qh, &c_mask, tt);
 886				if (status == 0)
 887					goto got_it;
 888			}
 889		}
 890
 891	/* qh->ps.bw_period == 0 means every uframe */
 892	} else {
 893		status = check_intr_schedule(ehci, 0, 0, qh, &c_mask, tt);
 894	}
 895	if (status)
 896		goto done;
 897
 898 got_it:
 899	qh->ps.phase = (qh->ps.period ? ehci->random_frame &
 900			(qh->ps.period - 1) : 0);
 901	qh->ps.bw_phase = qh->ps.phase & (qh->ps.bw_period - 1);
 902	qh->ps.phase_uf = uframe;
 903	qh->ps.cs_mask = qh->ps.period ?
 904			(c_mask << 8) | (1 << uframe) :
 905			QH_SMASK;
 906
 907	/* reset S-frame and (maybe) C-frame masks */
 908	hw->hw_info2 &= cpu_to_hc32(ehci, ~(QH_CMASK | QH_SMASK));
 909	hw->hw_info2 |= cpu_to_hc32(ehci, qh->ps.cs_mask);
 910	reserve_release_intr_bandwidth(ehci, qh, 1);
 911
 912done:
 913	return status;
 914}
 915
 916static int intr_submit(
 917	struct ehci_hcd		*ehci,
 918	struct urb		*urb,
 919	struct list_head	*qtd_list,
 920	gfp_t			mem_flags
 921) {
 922	unsigned		epnum;
 923	unsigned long		flags;
 924	struct ehci_qh		*qh;
 925	int			status;
 926	struct list_head	empty;
 927
 928	/* get endpoint and transfer/schedule data */
 929	epnum = urb->ep->desc.bEndpointAddress;
 930
 931	spin_lock_irqsave(&ehci->lock, flags);
 932
 933	if (unlikely(!HCD_HW_ACCESSIBLE(ehci_to_hcd(ehci)))) {
 934		status = -ESHUTDOWN;
 935		goto done_not_linked;
 936	}
 937	status = usb_hcd_link_urb_to_ep(ehci_to_hcd(ehci), urb);
 938	if (unlikely(status))
 939		goto done_not_linked;
 940
 941	/* get qh and force any scheduling errors */
 942	INIT_LIST_HEAD(&empty);
 943	qh = qh_append_tds(ehci, urb, &empty, epnum, &urb->ep->hcpriv);
 944	if (qh == NULL) {
 945		status = -ENOMEM;
 946		goto done;
 947	}
 948	if (qh->qh_state == QH_STATE_IDLE) {
 949		status = qh_schedule(ehci, qh);
 950		if (status)
 951			goto done;
 952	}
 953
 954	/* then queue the urb's tds to the qh */
 955	qh = qh_append_tds(ehci, urb, qtd_list, epnum, &urb->ep->hcpriv);
 956	BUG_ON(qh == NULL);
 957
 958	/* stuff into the periodic schedule */
 959	if (qh->qh_state == QH_STATE_IDLE) {
 960		qh_refresh(ehci, qh);
 961		qh_link_periodic(ehci, qh);
 962	} else {
 963		/* cancel unlink wait for the qh */
 964		cancel_unlink_wait_intr(ehci, qh);
 965	}
 966
 967	/* ... update usbfs periodic stats */
 968	ehci_to_hcd(ehci)->self.bandwidth_int_reqs++;
 969
 970done:
 971	if (unlikely(status))
 972		usb_hcd_unlink_urb_from_ep(ehci_to_hcd(ehci), urb);
 973done_not_linked:
 974	spin_unlock_irqrestore(&ehci->lock, flags);
 975	if (status)
 976		qtd_list_free(ehci, urb, qtd_list);
 977
 978	return status;
 979}
 980
 981static void scan_intr(struct ehci_hcd *ehci)
 982{
 983	struct ehci_qh		*qh;
 984
 985	list_for_each_entry_safe(qh, ehci->qh_scan_next, &ehci->intr_qh_list,
 986			intr_node) {
 987
 988		/* clean any finished work for this qh */
 989		if (!list_empty(&qh->qtd_list)) {
 990			int temp;
 991
 992			/*
 993			 * Unlinks could happen here; completion reporting
 994			 * drops the lock.  That's why ehci->qh_scan_next
 995			 * always holds the next qh to scan; if the next qh
 996			 * gets unlinked then ehci->qh_scan_next is adjusted
 997			 * in qh_unlink_periodic().
 998			 */
 999			temp = qh_completions(ehci, qh);
1000			if (unlikely(temp))
1001				start_unlink_intr(ehci, qh);
1002			else if (unlikely(list_empty(&qh->qtd_list) &&
1003					qh->qh_state == QH_STATE_LINKED))
1004				start_unlink_intr_wait(ehci, qh);
1005		}
1006	}
1007}
1008
1009/*-------------------------------------------------------------------------*/
1010
1011/* ehci_iso_stream ops work with both ITD and SITD */
1012
1013static struct ehci_iso_stream *
1014iso_stream_alloc(gfp_t mem_flags)
1015{
1016	struct ehci_iso_stream *stream;
1017
1018	stream = kzalloc(sizeof(*stream), mem_flags);
1019	if (likely(stream != NULL)) {
1020		INIT_LIST_HEAD(&stream->td_list);
1021		INIT_LIST_HEAD(&stream->free_list);
1022		stream->next_uframe = NO_FRAME;
1023		stream->ps.phase = NO_FRAME;
1024	}
1025	return stream;
1026}
1027
1028static void
1029iso_stream_init(
1030	struct ehci_hcd		*ehci,
1031	struct ehci_iso_stream	*stream,
1032	struct urb		*urb
1033)
1034{
1035	static const u8 smask_out[] = { 0x01, 0x03, 0x07, 0x0f, 0x1f, 0x3f };
1036
1037	struct usb_device	*dev = urb->dev;
1038	u32			buf1;
1039	unsigned		epnum, maxp;
1040	int			is_input;
1041	unsigned		tmp;
1042
1043	/*
1044	 * this might be a "high bandwidth" highspeed endpoint,
1045	 * as encoded in the ep descriptor's wMaxPacket field
1046	 */
1047	epnum = usb_pipeendpoint(urb->pipe);
1048	is_input = usb_pipein(urb->pipe) ? USB_DIR_IN : 0;
1049	maxp = usb_endpoint_maxp(&urb->ep->desc);
1050	buf1 = is_input ? 1 << 11 : 0;
1051
1052	/* knows about ITD vs SITD */
1053	if (dev->speed == USB_SPEED_HIGH) {
1054		unsigned multi = usb_endpoint_maxp_mult(&urb->ep->desc);
1055
1056		stream->highspeed = 1;
1057
1058		buf1 |= maxp;
1059		maxp *= multi;
1060
1061		stream->buf0 = cpu_to_hc32(ehci, (epnum << 8) | dev->devnum);
1062		stream->buf1 = cpu_to_hc32(ehci, buf1);
1063		stream->buf2 = cpu_to_hc32(ehci, multi);
1064
1065		/* usbfs wants to report the average usecs per frame tied up
1066		 * when transfers on this endpoint are scheduled ...
1067		 */
1068		stream->ps.usecs = HS_USECS_ISO(maxp);
1069
1070		/* period for bandwidth allocation */
1071		tmp = min_t(unsigned, EHCI_BANDWIDTH_SIZE,
1072				1 << (urb->ep->desc.bInterval - 1));
1073
1074		/* Allow urb->interval to override */
1075		stream->ps.bw_uperiod = min_t(unsigned, tmp, urb->interval);
1076
1077		stream->uperiod = urb->interval;
1078		stream->ps.period = urb->interval >> 3;
1079		stream->bandwidth = stream->ps.usecs * 8 /
1080				stream->ps.bw_uperiod;
1081
1082	} else {
1083		u32		addr;
1084		int		think_time;
1085		int		hs_transfers;
1086
1087		addr = dev->ttport << 24;
1088		if (!ehci_is_TDI(ehci)
1089				|| (dev->tt->hub !=
1090					ehci_to_hcd(ehci)->self.root_hub))
1091			addr |= dev->tt->hub->devnum << 16;
1092		addr |= epnum << 8;
1093		addr |= dev->devnum;
1094		stream->ps.usecs = HS_USECS_ISO(maxp);
1095		think_time = dev->tt->think_time;
1096		stream->ps.tt_usecs = NS_TO_US(think_time + usb_calc_bus_time(
1097				dev->speed, is_input, 1, maxp));
1098		hs_transfers = max(1u, (maxp + 187) / 188);
1099		if (is_input) {
1100			u32	tmp;
1101
1102			addr |= 1 << 31;
1103			stream->ps.c_usecs = stream->ps.usecs;
1104			stream->ps.usecs = HS_USECS_ISO(1);
1105			stream->ps.cs_mask = 1;
1106
1107			/* c-mask as specified in USB 2.0 11.18.4 3.c */
1108			tmp = (1 << (hs_transfers + 2)) - 1;
1109			stream->ps.cs_mask |= tmp << (8 + 2);
1110		} else
1111			stream->ps.cs_mask = smask_out[hs_transfers - 1];
1112
1113		/* period for bandwidth allocation */
1114		tmp = min_t(unsigned, EHCI_BANDWIDTH_FRAMES,
1115				1 << (urb->ep->desc.bInterval - 1));
1116
1117		/* Allow urb->interval to override */
1118		stream->ps.bw_period = min_t(unsigned, tmp, urb->interval);
1119		stream->ps.bw_uperiod = stream->ps.bw_period << 3;
1120
1121		stream->ps.period = urb->interval;
1122		stream->uperiod = urb->interval << 3;
1123		stream->bandwidth = (stream->ps.usecs + stream->ps.c_usecs) /
1124				stream->ps.bw_period;
1125
1126		/* stream->splits gets created from cs_mask later */
1127		stream->address = cpu_to_hc32(ehci, addr);
1128	}
1129
1130	stream->ps.udev = dev;
1131	stream->ps.ep = urb->ep;
1132
1133	stream->bEndpointAddress = is_input | epnum;
1134	stream->maxp = maxp;
1135}
1136
1137static struct ehci_iso_stream *
1138iso_stream_find(struct ehci_hcd *ehci, struct urb *urb)
1139{
1140	unsigned		epnum;
1141	struct ehci_iso_stream	*stream;
1142	struct usb_host_endpoint *ep;
1143	unsigned long		flags;
1144
1145	epnum = usb_pipeendpoint (urb->pipe);
1146	if (usb_pipein(urb->pipe))
1147		ep = urb->dev->ep_in[epnum];
1148	else
1149		ep = urb->dev->ep_out[epnum];
1150
1151	spin_lock_irqsave(&ehci->lock, flags);
1152	stream = ep->hcpriv;
1153
1154	if (unlikely(stream == NULL)) {
1155		stream = iso_stream_alloc(GFP_ATOMIC);
1156		if (likely(stream != NULL)) {
1157			ep->hcpriv = stream;
1158			iso_stream_init(ehci, stream, urb);
1159		}
1160
1161	/* if dev->ep [epnum] is a QH, hw is set */
1162	} else if (unlikely(stream->hw != NULL)) {
1163		ehci_dbg(ehci, "dev %s ep%d%s, not iso??\n",
1164			urb->dev->devpath, epnum,
1165			usb_pipein(urb->pipe) ? "in" : "out");
1166		stream = NULL;
1167	}
1168
1169	spin_unlock_irqrestore(&ehci->lock, flags);
1170	return stream;
1171}
1172
1173/*-------------------------------------------------------------------------*/
1174
1175/* ehci_iso_sched ops can be ITD-only or SITD-only */
1176
1177static struct ehci_iso_sched *
1178iso_sched_alloc(unsigned packets, gfp_t mem_flags)
1179{
1180	struct ehci_iso_sched	*iso_sched;
1181	int			size = sizeof(*iso_sched);
1182
1183	size += packets * sizeof(struct ehci_iso_packet);
1184	iso_sched = kzalloc(size, mem_flags);
1185	if (likely(iso_sched != NULL))
1186		INIT_LIST_HEAD(&iso_sched->td_list);
1187
1188	return iso_sched;
1189}
1190
1191static inline void
1192itd_sched_init(
1193	struct ehci_hcd		*ehci,
1194	struct ehci_iso_sched	*iso_sched,
1195	struct ehci_iso_stream	*stream,
1196	struct urb		*urb
1197)
1198{
1199	unsigned	i;
1200	dma_addr_t	dma = urb->transfer_dma;
1201
1202	/* how many uframes are needed for these transfers */
1203	iso_sched->span = urb->number_of_packets * stream->uperiod;
1204
1205	/* figure out per-uframe itd fields that we'll need later
1206	 * when we fit new itds into the schedule.
1207	 */
1208	for (i = 0; i < urb->number_of_packets; i++) {
1209		struct ehci_iso_packet	*uframe = &iso_sched->packet[i];
1210		unsigned		length;
1211		dma_addr_t		buf;
1212		u32			trans;
1213
1214		length = urb->iso_frame_desc[i].length;
1215		buf = dma + urb->iso_frame_desc[i].offset;
1216
1217		trans = EHCI_ISOC_ACTIVE;
1218		trans |= buf & 0x0fff;
1219		if (unlikely(((i + 1) == urb->number_of_packets))
1220				&& !(urb->transfer_flags & URB_NO_INTERRUPT))
1221			trans |= EHCI_ITD_IOC;
1222		trans |= length << 16;
1223		uframe->transaction = cpu_to_hc32(ehci, trans);
1224
1225		/* might need to cross a buffer page within a uframe */
1226		uframe->bufp = (buf & ~(u64)0x0fff);
1227		buf += length;
1228		if (unlikely((uframe->bufp != (buf & ~(u64)0x0fff))))
1229			uframe->cross = 1;
1230	}
1231}
1232
1233static void
1234iso_sched_free(
1235	struct ehci_iso_stream	*stream,
1236	struct ehci_iso_sched	*iso_sched
1237)
1238{
1239	if (!iso_sched)
1240		return;
1241	/* caller must hold ehci->lock! */
1242	list_splice(&iso_sched->td_list, &stream->free_list);
1243	kfree(iso_sched);
1244}
1245
1246static int
1247itd_urb_transaction(
1248	struct ehci_iso_stream	*stream,
1249	struct ehci_hcd		*ehci,
1250	struct urb		*urb,
1251	gfp_t			mem_flags
1252)
1253{
1254	struct ehci_itd		*itd;
1255	dma_addr_t		itd_dma;
1256	int			i;
1257	unsigned		num_itds;
1258	struct ehci_iso_sched	*sched;
1259	unsigned long		flags;
1260
1261	sched = iso_sched_alloc(urb->number_of_packets, mem_flags);
1262	if (unlikely(sched == NULL))
1263		return -ENOMEM;
1264
1265	itd_sched_init(ehci, sched, stream, urb);
1266
1267	if (urb->interval < 8)
1268		num_itds = 1 + (sched->span + 7) / 8;
1269	else
1270		num_itds = urb->number_of_packets;
1271
1272	/* allocate/init ITDs */
1273	spin_lock_irqsave(&ehci->lock, flags);
1274	for (i = 0; i < num_itds; i++) {
1275
1276		/*
1277		 * Use iTDs from the free list, but not iTDs that may
1278		 * still be in use by the hardware.
1279		 */
1280		if (likely(!list_empty(&stream->free_list))) {
1281			itd = list_first_entry(&stream->free_list,
1282					struct ehci_itd, itd_list);
1283			if (itd->frame == ehci->now_frame)
1284				goto alloc_itd;
1285			list_del(&itd->itd_list);
1286			itd_dma = itd->itd_dma;
1287		} else {
1288 alloc_itd:
1289			spin_unlock_irqrestore(&ehci->lock, flags);
1290			itd = dma_pool_alloc(ehci->itd_pool, mem_flags,
1291					&itd_dma);
1292			spin_lock_irqsave(&ehci->lock, flags);
1293			if (!itd) {
1294				iso_sched_free(stream, sched);
1295				spin_unlock_irqrestore(&ehci->lock, flags);
1296				return -ENOMEM;
1297			}
1298		}
1299
1300		memset(itd, 0, sizeof(*itd));
1301		itd->itd_dma = itd_dma;
1302		itd->frame = NO_FRAME;
1303		list_add(&itd->itd_list, &sched->td_list);
1304	}
1305	spin_unlock_irqrestore(&ehci->lock, flags);
1306
1307	/* temporarily store schedule info in hcpriv */
1308	urb->hcpriv = sched;
1309	urb->error_count = 0;
1310	return 0;
1311}
1312
1313/*-------------------------------------------------------------------------*/
1314
1315static void reserve_release_iso_bandwidth(struct ehci_hcd *ehci,
1316		struct ehci_iso_stream *stream, int sign)
1317{
1318	unsigned		uframe;
1319	unsigned		i, j;
1320	unsigned		s_mask, c_mask, m;
1321	int			usecs = stream->ps.usecs;
1322	int			c_usecs = stream->ps.c_usecs;
1323	int			tt_usecs = stream->ps.tt_usecs;
1324	struct ehci_tt		*tt;
1325
1326	if (stream->ps.phase == NO_FRAME)	/* Bandwidth wasn't reserved */
1327		return;
1328	uframe = stream->ps.bw_phase << 3;
1329
1330	bandwidth_dbg(ehci, sign, "iso", &stream->ps);
1331
1332	if (sign < 0) {		/* Release bandwidth */
1333		usecs = -usecs;
1334		c_usecs = -c_usecs;
1335		tt_usecs = -tt_usecs;
1336	}
1337
1338	if (!stream->splits) {		/* High speed */
1339		for (i = uframe + stream->ps.phase_uf; i < EHCI_BANDWIDTH_SIZE;
1340				i += stream->ps.bw_uperiod)
1341			ehci->bandwidth[i] += usecs;
1342
1343	} else {			/* Full speed */
1344		s_mask = stream->ps.cs_mask;
1345		c_mask = s_mask >> 8;
1346
1347		/* NOTE: adjustment needed for frame overflow */
1348		for (i = uframe; i < EHCI_BANDWIDTH_SIZE;
1349				i += stream->ps.bw_uperiod) {
1350			for ((j = stream->ps.phase_uf, m = 1 << j); j < 8;
1351					(++j, m <<= 1)) {
1352				if (s_mask & m)
1353					ehci->bandwidth[i+j] += usecs;
1354				else if (c_mask & m)
1355					ehci->bandwidth[i+j] += c_usecs;
1356			}
1357		}
1358
1359		tt = find_tt(stream->ps.udev);
1360		if (sign > 0)
1361			list_add_tail(&stream->ps.ps_list, &tt->ps_list);
1362		else
1363			list_del(&stream->ps.ps_list);
1364
1365		for (i = uframe >> 3; i < EHCI_BANDWIDTH_FRAMES;
1366				i += stream->ps.bw_period)
1367			tt->bandwidth[i] += tt_usecs;
1368	}
1369}
1370
1371static inline int
1372itd_slot_ok(
1373	struct ehci_hcd		*ehci,
1374	struct ehci_iso_stream	*stream,
1375	unsigned		uframe
1376)
1377{
1378	unsigned		usecs;
1379
1380	/* convert "usecs we need" to "max already claimed" */
1381	usecs = ehci->uframe_periodic_max - stream->ps.usecs;
1382
1383	for (uframe &= stream->ps.bw_uperiod - 1; uframe < EHCI_BANDWIDTH_SIZE;
1384			uframe += stream->ps.bw_uperiod) {
1385		if (ehci->bandwidth[uframe] > usecs)
1386			return 0;
1387	}
1388	return 1;
1389}
1390
1391static inline int
1392sitd_slot_ok(
1393	struct ehci_hcd		*ehci,
1394	struct ehci_iso_stream	*stream,
1395	unsigned		uframe,
1396	struct ehci_iso_sched	*sched,
1397	struct ehci_tt		*tt
1398)
1399{
1400	unsigned		mask, tmp;
1401	unsigned		frame, uf;
1402
1403	mask = stream->ps.cs_mask << (uframe & 7);
1404
1405	/* for OUT, don't wrap SSPLIT into H-microframe 7 */
1406	if (((stream->ps.cs_mask & 0xff) << (uframe & 7)) >= (1 << 7))
1407		return 0;
1408
1409	/* for IN, don't wrap CSPLIT into the next frame */
1410	if (mask & ~0xffff)
1411		return 0;
1412
1413	/* check bandwidth */
1414	uframe &= stream->ps.bw_uperiod - 1;
1415	frame = uframe >> 3;
1416
1417#ifdef CONFIG_USB_EHCI_TT_NEWSCHED
1418	/* The tt's fullspeed bus bandwidth must be available.
1419	 * tt_available scheduling guarantees 10+% for control/bulk.
1420	 */
1421	uf = uframe & 7;
1422	if (!tt_available(ehci, &stream->ps, tt, frame, uf))
1423		return 0;
1424#else
1425	/* tt must be idle for start(s), any gap, and csplit.
1426	 * assume scheduling slop leaves 10+% for control/bulk.
1427	 */
1428	if (!tt_no_collision(ehci, stream->ps.bw_period,
1429			stream->ps.udev, frame, mask))
1430		return 0;
1431#endif
1432
1433	do {
1434		unsigned	max_used;
1435		unsigned	i;
1436
1437		/* check starts (OUT uses more than one) */
1438		uf = uframe;
1439		max_used = ehci->uframe_periodic_max - stream->ps.usecs;
1440		for (tmp = stream->ps.cs_mask & 0xff; tmp; tmp >>= 1, uf++) {
1441			if (ehci->bandwidth[uf] > max_used)
1442				return 0;
1443		}
1444
1445		/* for IN, check CSPLIT */
1446		if (stream->ps.c_usecs) {
1447			max_used = ehci->uframe_periodic_max -
1448					stream->ps.c_usecs;
1449			uf = uframe & ~7;
1450			tmp = 1 << (2+8);
1451			for (i = (uframe & 7) + 2; i < 8; (++i, tmp <<= 1)) {
1452				if ((stream->ps.cs_mask & tmp) == 0)
1453					continue;
1454				if (ehci->bandwidth[uf+i] > max_used)
1455					return 0;
1456			}
1457		}
1458
1459		uframe += stream->ps.bw_uperiod;
1460	} while (uframe < EHCI_BANDWIDTH_SIZE);
1461
1462	stream->ps.cs_mask <<= uframe & 7;
1463	stream->splits = cpu_to_hc32(ehci, stream->ps.cs_mask);
1464	return 1;
1465}
1466
1467/*
1468 * This scheduler plans almost as far into the future as it has actual
1469 * periodic schedule slots.  (Affected by TUNE_FLS, which defaults to
1470 * "as small as possible" to be cache-friendlier.)  That limits the size
1471 * transfers you can stream reliably; avoid more than 64 msec per urb.
1472 * Also avoid queue depths of less than ehci's worst irq latency (affected
1473 * by the per-urb URB_NO_INTERRUPT hint, the log2_irq_thresh module parameter,
1474 * and other factors); or more than about 230 msec total (for portability,
1475 * given EHCI_TUNE_FLS and the slop).  Or, write a smarter scheduler!
1476 */
1477
1478static int
1479iso_stream_schedule(
1480	struct ehci_hcd		*ehci,
1481	struct urb		*urb,
1482	struct ehci_iso_stream	*stream
1483)
1484{
1485	u32			now, base, next, start, period, span, now2;
1486	u32			wrap = 0, skip = 0;
1487	int			status = 0;
1488	unsigned		mod = ehci->periodic_size << 3;
1489	struct ehci_iso_sched	*sched = urb->hcpriv;
1490	bool			empty = list_empty(&stream->td_list);
1491	bool			new_stream = false;
1492
1493	period = stream->uperiod;
1494	span = sched->span;
1495	if (!stream->highspeed)
1496		span <<= 3;
1497
1498	/* Start a new isochronous stream? */
1499	if (unlikely(empty && !hcd_periodic_completion_in_progress(
1500			ehci_to_hcd(ehci), urb->ep))) {
1501
1502		/* Schedule the endpoint */
1503		if (stream->ps.phase == NO_FRAME) {
1504			int		done = 0;
1505			struct ehci_tt	*tt = find_tt(stream->ps.udev);
1506
1507			if (IS_ERR(tt)) {
1508				status = PTR_ERR(tt);
1509				goto fail;
1510			}
1511			compute_tt_budget(ehci->tt_budget, tt);
1512
1513			start = ((-(++ehci->random_frame)) << 3) & (period - 1);
1514
1515			/* find a uframe slot with enough bandwidth.
1516			 * Early uframes are more precious because full-speed
1517			 * iso IN transfers can't use late uframes,
1518			 * and therefore they should be allocated last.
1519			 */
1520			next = start;
1521			start += period;
1522			do {
1523				start--;
1524				/* check schedule: enough space? */
1525				if (stream->highspeed) {
1526					if (itd_slot_ok(ehci, stream, start))
1527						done = 1;
1528				} else {
1529					if ((start % 8) >= 6)
1530						continue;
1531					if (sitd_slot_ok(ehci, stream, start,
1532							sched, tt))
1533						done = 1;
1534				}
1535			} while (start > next && !done);
1536
1537			/* no room in the schedule */
1538			if (!done) {
1539				ehci_dbg(ehci, "iso sched full %p", urb);
1540				status = -ENOSPC;
1541				goto fail;
1542			}
1543			stream->ps.phase = (start >> 3) &
1544					(stream->ps.period - 1);
1545			stream->ps.bw_phase = stream->ps.phase &
1546					(stream->ps.bw_period - 1);
1547			stream->ps.phase_uf = start & 7;
1548			reserve_release_iso_bandwidth(ehci, stream, 1);
1549		}
1550
1551		/* New stream is already scheduled; use the upcoming slot */
1552		else {
1553			start = (stream->ps.phase << 3) + stream->ps.phase_uf;
1554		}
1555
1556		stream->next_uframe = start;
1557		new_stream = true;
1558	}
1559
1560	now = ehci_read_frame_index(ehci) & (mod - 1);
1561
1562	/* Take the isochronous scheduling threshold into account */
1563	if (ehci->i_thresh)
1564		next = now + ehci->i_thresh;	/* uframe cache */
1565	else
1566		next = (now + 2 + 7) & ~0x07;	/* full frame cache */
1567
1568	/* If needed, initialize last_iso_frame so that this URB will be seen */
1569	if (ehci->isoc_count == 0)
1570		ehci->last_iso_frame = now >> 3;
1571
1572	/*
1573	 * Use ehci->last_iso_frame as the base.  There can't be any
1574	 * TDs scheduled for earlier than that.
1575	 */
1576	base = ehci->last_iso_frame << 3;
1577	next = (next - base) & (mod - 1);
1578	start = (stream->next_uframe - base) & (mod - 1);
1579
1580	if (unlikely(new_stream))
1581		goto do_ASAP;
1582
1583	/*
1584	 * Typical case: reuse current schedule, stream may still be active.
1585	 * Hopefully there are no gaps from the host falling behind
1586	 * (irq delays etc).  If there are, the behavior depends on
1587	 * whether URB_ISO_ASAP is set.
1588	 */
1589	now2 = (now - base) & (mod - 1);
1590
1591	/* Is the schedule about to wrap around? */
1592	if (unlikely(!empty && start < period)) {
1593		ehci_dbg(ehci, "request %p would overflow (%u-%u < %u mod %u)\n",
1594				urb, stream->next_uframe, base, period, mod);
1595		status = -EFBIG;
1596		goto fail;
1597	}
1598
1599	/* Is the next packet scheduled after the base time? */
1600	if (likely(!empty || start <= now2 + period)) {
1601
1602		/* URB_ISO_ASAP: make sure that start >= next */
1603		if (unlikely(start < next &&
1604				(urb->transfer_flags & URB_ISO_ASAP)))
1605			goto do_ASAP;
1606
1607		/* Otherwise use start, if it's not in the past */
1608		if (likely(start >= now2))
1609			goto use_start;
1610
1611	/* Otherwise we got an underrun while the queue was empty */
1612	} else {
1613		if (urb->transfer_flags & URB_ISO_ASAP)
1614			goto do_ASAP;
1615		wrap = mod;
1616		now2 += mod;
1617	}
1618
1619	/* How many uframes and packets do we need to skip? */
1620	skip = (now2 - start + period - 1) & -period;
1621	if (skip >= span) {		/* Entirely in the past? */
1622		ehci_dbg(ehci, "iso underrun %p (%u+%u < %u) [%u]\n",
1623				urb, start + base, span - period, now2 + base,
1624				base);
1625
1626		/* Try to keep the last TD intact for scanning later */
1627		skip = span - period;
1628
1629		/* Will it come before the current scan position? */
1630		if (empty) {
1631			skip = span;	/* Skip the entire URB */
1632			status = 1;	/* and give it back immediately */
1633			iso_sched_free(stream, sched);
1634			sched = NULL;
1635		}
1636	}
1637	urb->error_count = skip / period;
1638	if (sched)
1639		sched->first_packet = urb->error_count;
1640	goto use_start;
1641
1642 do_ASAP:
1643	/* Use the first slot after "next" */
1644	start = next + ((start - next) & (period - 1));
1645
1646 use_start:
1647	/* Tried to schedule too far into the future? */
1648	if (unlikely(start + span - period >= mod + wrap)) {
1649		ehci_dbg(ehci, "request %p would overflow (%u+%u >= %u)\n",
1650				urb, start, span - period, mod + wrap);
1651		status = -EFBIG;
1652		goto fail;
1653	}
1654
1655	start += base;
1656	stream->next_uframe = (start + skip) & (mod - 1);
1657
1658	/* report high speed start in uframes; full speed, in frames */
1659	urb->start_frame = start & (mod - 1);
1660	if (!stream->highspeed)
1661		urb->start_frame >>= 3;
1662	return status;
1663
1664 fail:
1665	iso_sched_free(stream, sched);
1666	urb->hcpriv = NULL;
1667	return status;
1668}
1669
1670/*-------------------------------------------------------------------------*/
1671
1672static inline void
1673itd_init(struct ehci_hcd *ehci, struct ehci_iso_stream *stream,
1674		struct ehci_itd *itd)
1675{
1676	int i;
1677
1678	/* it's been recently zeroed */
1679	itd->hw_next = EHCI_LIST_END(ehci);
1680	itd->hw_bufp[0] = stream->buf0;
1681	itd->hw_bufp[1] = stream->buf1;
1682	itd->hw_bufp[2] = stream->buf2;
1683
1684	for (i = 0; i < 8; i++)
1685		itd->index[i] = -1;
1686
1687	/* All other fields are filled when scheduling */
1688}
1689
1690static inline void
1691itd_patch(
1692	struct ehci_hcd		*ehci,
1693	struct ehci_itd		*itd,
1694	struct ehci_iso_sched	*iso_sched,
1695	unsigned		index,
1696	u16			uframe
1697)
1698{
1699	struct ehci_iso_packet	*uf = &iso_sched->packet[index];
1700	unsigned		pg = itd->pg;
1701
1702	/* BUG_ON(pg == 6 && uf->cross); */
1703
1704	uframe &= 0x07;
1705	itd->index[uframe] = index;
1706
1707	itd->hw_transaction[uframe] = uf->transaction;
1708	itd->hw_transaction[uframe] |= cpu_to_hc32(ehci, pg << 12);
1709	itd->hw_bufp[pg] |= cpu_to_hc32(ehci, uf->bufp & ~(u32)0);
1710	itd->hw_bufp_hi[pg] |= cpu_to_hc32(ehci, (u32)(uf->bufp >> 32));
1711
1712	/* iso_frame_desc[].offset must be strictly increasing */
1713	if (unlikely(uf->cross)) {
1714		u64	bufp = uf->bufp + 4096;
1715
1716		itd->pg = ++pg;
1717		itd->hw_bufp[pg] |= cpu_to_hc32(ehci, bufp & ~(u32)0);
1718		itd->hw_bufp_hi[pg] |= cpu_to_hc32(ehci, (u32)(bufp >> 32));
1719	}
1720}
1721
1722static inline void
1723itd_link(struct ehci_hcd *ehci, unsigned frame, struct ehci_itd *itd)
1724{
1725	union ehci_shadow	*prev = &ehci->pshadow[frame];
1726	__hc32			*hw_p = &ehci->periodic[frame];
1727	union ehci_shadow	here = *prev;
1728	__hc32			type = 0;
1729
1730	/* skip any iso nodes which might belong to previous microframes */
1731	while (here.ptr) {
1732		type = Q_NEXT_TYPE(ehci, *hw_p);
1733		if (type == cpu_to_hc32(ehci, Q_TYPE_QH))
1734			break;
1735		prev = periodic_next_shadow(ehci, prev, type);
1736		hw_p = shadow_next_periodic(ehci, &here, type);
1737		here = *prev;
1738	}
1739
1740	itd->itd_next = here;
1741	itd->hw_next = *hw_p;
1742	prev->itd = itd;
1743	itd->frame = frame;
1744	wmb();
1745	*hw_p = cpu_to_hc32(ehci, itd->itd_dma | Q_TYPE_ITD);
1746}
1747
1748/* fit urb's itds into the selected schedule slot; activate as needed */
1749static void itd_link_urb(
1750	struct ehci_hcd		*ehci,
1751	struct urb		*urb,
1752	unsigned		mod,
1753	struct ehci_iso_stream	*stream
1754)
1755{
1756	int			packet;
1757	unsigned		next_uframe, uframe, frame;
1758	struct ehci_iso_sched	*iso_sched = urb->hcpriv;
1759	struct ehci_itd		*itd;
1760
1761	next_uframe = stream->next_uframe & (mod - 1);
1762
1763	if (unlikely(list_empty(&stream->td_list)))
1764		ehci_to_hcd(ehci)->self.bandwidth_allocated
1765				+= stream->bandwidth;
1766
1767	if (ehci_to_hcd(ehci)->self.bandwidth_isoc_reqs == 0) {
1768		if (ehci->amd_pll_fix == 1)
1769			usb_amd_quirk_pll_disable();
1770	}
1771
1772	ehci_to_hcd(ehci)->self.bandwidth_isoc_reqs++;
1773
1774	/* fill iTDs uframe by uframe */
1775	for (packet = iso_sched->first_packet, itd = NULL;
1776			packet < urb->number_of_packets;) {
1777		if (itd == NULL) {
1778			/* ASSERT:  we have all necessary itds */
1779			/* BUG_ON(list_empty(&iso_sched->td_list)); */
1780
1781			/* ASSERT:  no itds for this endpoint in this uframe */
1782
1783			itd = list_entry(iso_sched->td_list.next,
1784					struct ehci_itd, itd_list);
1785			list_move_tail(&itd->itd_list, &stream->td_list);
1786			itd->stream = stream;
1787			itd->urb = urb;
1788			itd_init(ehci, stream, itd);
1789		}
1790
1791		uframe = next_uframe & 0x07;
1792		frame = next_uframe >> 3;
1793
1794		itd_patch(ehci, itd, iso_sched, packet, uframe);
1795
1796		next_uframe += stream->uperiod;
1797		next_uframe &= mod - 1;
1798		packet++;
1799
1800		/* link completed itds into the schedule */
1801		if (((next_uframe >> 3) != frame)
1802				|| packet == urb->number_of_packets) {
1803			itd_link(ehci, frame & (ehci->periodic_size - 1), itd);
1804			itd = NULL;
1805		}
1806	}
1807	stream->next_uframe = next_uframe;
1808
1809	/* don't need that schedule data any more */
1810	iso_sched_free(stream, iso_sched);
1811	urb->hcpriv = stream;
1812
1813	++ehci->isoc_count;
1814	enable_periodic(ehci);
1815}
1816
1817#define	ISO_ERRS (EHCI_ISOC_BUF_ERR | EHCI_ISOC_BABBLE | EHCI_ISOC_XACTERR)
1818
1819/* Process and recycle a completed ITD.  Return true iff its urb completed,
1820 * and hence its completion callback probably added things to the hardware
1821 * schedule.
1822 *
1823 * Note that we carefully avoid recycling this descriptor until after any
1824 * completion callback runs, so that it won't be reused quickly.  That is,
1825 * assuming (a) no more than two urbs per frame on this endpoint, and also
1826 * (b) only this endpoint's completions submit URBs.  It seems some silicon
1827 * corrupts things if you reuse completed descriptors very quickly...
1828 */
1829static bool itd_complete(struct ehci_hcd *ehci, struct ehci_itd *itd)
1830{
1831	struct urb				*urb = itd->urb;
1832	struct usb_iso_packet_descriptor	*desc;
1833	u32					t;
1834	unsigned				uframe;
1835	int					urb_index = -1;
1836	struct ehci_iso_stream			*stream = itd->stream;
1837	struct usb_device			*dev;
1838	bool					retval = false;
1839
1840	/* for each uframe with a packet */
1841	for (uframe = 0; uframe < 8; uframe++) {
1842		if (likely(itd->index[uframe] == -1))
1843			continue;
1844		urb_index = itd->index[uframe];
1845		desc = &urb->iso_frame_desc[urb_index];
1846
1847		t = hc32_to_cpup(ehci, &itd->hw_transaction[uframe]);
1848		itd->hw_transaction[uframe] = 0;
1849
1850		/* report transfer status */
1851		if (unlikely(t & ISO_ERRS)) {
1852			urb->error_count++;
1853			if (t & EHCI_ISOC_BUF_ERR)
1854				desc->status = usb_pipein(urb->pipe)
1855					? -ENOSR  /* hc couldn't read */
1856					: -ECOMM; /* hc couldn't write */
1857			else if (t & EHCI_ISOC_BABBLE)
1858				desc->status = -EOVERFLOW;
1859			else /* (t & EHCI_ISOC_XACTERR) */
1860				desc->status = -EPROTO;
1861
1862			/* HC need not update length with this error */
1863			if (!(t & EHCI_ISOC_BABBLE)) {
1864				desc->actual_length = EHCI_ITD_LENGTH(t);
1865				urb->actual_length += desc->actual_length;
1866			}
1867		} else if (likely((t & EHCI_ISOC_ACTIVE) == 0)) {
1868			desc->status = 0;
1869			desc->actual_length = EHCI_ITD_LENGTH(t);
1870			urb->actual_length += desc->actual_length;
1871		} else {
1872			/* URB was too late */
1873			urb->error_count++;
1874		}
1875	}
1876
1877	/* handle completion now? */
1878	if (likely((urb_index + 1) != urb->number_of_packets))
1879		goto done;
1880
1881	/*
1882	 * ASSERT: it's really the last itd for this urb
1883	 * list_for_each_entry (itd, &stream->td_list, itd_list)
1884	 *	 BUG_ON(itd->urb == urb);
1885	 */
1886
1887	/* give urb back to the driver; completion often (re)submits */
1888	dev = urb->dev;
1889	ehci_urb_done(ehci, urb, 0);
1890	retval = true;
1891	urb = NULL;
1892
1893	--ehci->isoc_count;
1894	disable_periodic(ehci);
1895
1896	ehci_to_hcd(ehci)->self.bandwidth_isoc_reqs--;
1897	if (ehci_to_hcd(ehci)->self.bandwidth_isoc_reqs == 0) {
1898		if (ehci->amd_pll_fix == 1)
1899			usb_amd_quirk_pll_enable();
1900	}
1901
1902	if (unlikely(list_is_singular(&stream->td_list)))
1903		ehci_to_hcd(ehci)->self.bandwidth_allocated
1904				-= stream->bandwidth;
1905
1906done:
1907	itd->urb = NULL;
1908
1909	/* Add to the end of the free list for later reuse */
1910	list_move_tail(&itd->itd_list, &stream->free_list);
1911
1912	/* Recycle the iTDs when the pipeline is empty (ep no longer in use) */
1913	if (list_empty(&stream->td_list)) {
1914		list_splice_tail_init(&stream->free_list,
1915				&ehci->cached_itd_list);
1916		start_free_itds(ehci);
1917	}
1918
1919	return retval;
1920}
1921
1922/*-------------------------------------------------------------------------*/
1923
1924static int itd_submit(struct ehci_hcd *ehci, struct urb *urb,
1925	gfp_t mem_flags)
1926{
1927	int			status = -EINVAL;
1928	unsigned long		flags;
1929	struct ehci_iso_stream	*stream;
1930
1931	/* Get iso_stream head */
1932	stream = iso_stream_find(ehci, urb);
1933	if (unlikely(stream == NULL)) {
1934		ehci_dbg(ehci, "can't get iso stream\n");
1935		return -ENOMEM;
1936	}
1937	if (unlikely(urb->interval != stream->uperiod)) {
1938		ehci_dbg(ehci, "can't change iso interval %d --> %d\n",
1939			stream->uperiod, urb->interval);
1940		goto done;
1941	}
1942
1943#ifdef EHCI_URB_TRACE
1944	ehci_dbg(ehci,
1945		"%s %s urb %p ep%d%s len %d, %d pkts %d uframes [%p]\n",
1946		__func__, urb->dev->devpath, urb,
1947		usb_pipeendpoint(urb->pipe),
1948		usb_pipein(urb->pipe) ? "in" : "out",
1949		urb->transfer_buffer_length,
1950		urb->number_of_packets, urb->interval,
1951		stream);
1952#endif
1953
1954	/* allocate ITDs w/o locking anything */
1955	status = itd_urb_transaction(stream, ehci, urb, mem_flags);
1956	if (unlikely(status < 0)) {
1957		ehci_dbg(ehci, "can't init itds\n");
1958		goto done;
1959	}
1960
1961	/* schedule ... need to lock */
1962	spin_lock_irqsave(&ehci->lock, flags);
1963	if (unlikely(!HCD_HW_ACCESSIBLE(ehci_to_hcd(ehci)))) {
1964		status = -ESHUTDOWN;
1965		goto done_not_linked;
1966	}
1967	status = usb_hcd_link_urb_to_ep(ehci_to_hcd(ehci), urb);
1968	if (unlikely(status))
1969		goto done_not_linked;
1970	status = iso_stream_schedule(ehci, urb, stream);
1971	if (likely(status == 0)) {
1972		itd_link_urb(ehci, urb, ehci->periodic_size << 3, stream);
1973	} else if (status > 0) {
1974		status = 0;
1975		ehci_urb_done(ehci, urb, 0);
1976	} else {
1977		usb_hcd_unlink_urb_from_ep(ehci_to_hcd(ehci), urb);
1978	}
1979 done_not_linked:
1980	spin_unlock_irqrestore(&ehci->lock, flags);
1981 done:
1982	return status;
1983}
1984
1985/*-------------------------------------------------------------------------*/
1986
1987/*
1988 * "Split ISO TDs" ... used for USB 1.1 devices going through the
1989 * TTs in USB 2.0 hubs.  These need microframe scheduling.
1990 */
1991
1992static inline void
1993sitd_sched_init(
1994	struct ehci_hcd		*ehci,
1995	struct ehci_iso_sched	*iso_sched,
1996	struct ehci_iso_stream	*stream,
1997	struct urb		*urb
1998)
1999{
2000	unsigned	i;
2001	dma_addr_t	dma = urb->transfer_dma;
2002
2003	/* how many frames are needed for these transfers */
2004	iso_sched->span = urb->number_of_packets * stream->ps.period;
2005
2006	/* figure out per-frame sitd fields that we'll need later
2007	 * when we fit new sitds into the schedule.
2008	 */
2009	for (i = 0; i < urb->number_of_packets; i++) {
2010		struct ehci_iso_packet	*packet = &iso_sched->packet[i];
2011		unsigned		length;
2012		dma_addr_t		buf;
2013		u32			trans;
2014
2015		length = urb->iso_frame_desc[i].length & 0x03ff;
2016		buf = dma + urb->iso_frame_desc[i].offset;
2017
2018		trans = SITD_STS_ACTIVE;
2019		if (((i + 1) == urb->number_of_packets)
2020				&& !(urb->transfer_flags & URB_NO_INTERRUPT))
2021			trans |= SITD_IOC;
2022		trans |= length << 16;
2023		packet->transaction = cpu_to_hc32(ehci, trans);
2024
2025		/* might need to cross a buffer page within a td */
2026		packet->bufp = buf;
2027		packet->buf1 = (buf + length) & ~0x0fff;
2028		if (packet->buf1 != (buf & ~(u64)0x0fff))
2029			packet->cross = 1;
2030
2031		/* OUT uses multiple start-splits */
2032		if (stream->bEndpointAddress & USB_DIR_IN)
2033			continue;
2034		length = (length + 187) / 188;
2035		if (length > 1) /* BEGIN vs ALL */
2036			length |= 1 << 3;
2037		packet->buf1 |= length;
2038	}
2039}
2040
2041static int
2042sitd_urb_transaction(
2043	struct ehci_iso_stream	*stream,
2044	struct ehci_hcd		*ehci,
2045	struct urb		*urb,
2046	gfp_t			mem_flags
2047)
2048{
2049	struct ehci_sitd	*sitd;
2050	dma_addr_t		sitd_dma;
2051	int			i;
2052	struct ehci_iso_sched	*iso_sched;
2053	unsigned long		flags;
2054
2055	iso_sched = iso_sched_alloc(urb->number_of_packets, mem_flags);
2056	if (iso_sched == NULL)
2057		return -ENOMEM;
2058
2059	sitd_sched_init(ehci, iso_sched, stream, urb);
2060
2061	/* allocate/init sITDs */
2062	spin_lock_irqsave(&ehci->lock, flags);
2063	for (i = 0; i < urb->number_of_packets; i++) {
2064
2065		/* NOTE:  for now, we don't try to handle wraparound cases
2066		 * for IN (using sitd->hw_backpointer, like a FSTN), which
2067		 * means we never need two sitds for full speed packets.
2068		 */
2069
2070		/*
2071		 * Use siTDs from the free list, but not siTDs that may
2072		 * still be in use by the hardware.
2073		 */
2074		if (likely(!list_empty(&stream->free_list))) {
2075			sitd = list_first_entry(&stream->free_list,
2076					 struct ehci_sitd, sitd_list);
2077			if (sitd->frame == ehci->now_frame)
2078				goto alloc_sitd;
2079			list_del(&sitd->sitd_list);
2080			sitd_dma = sitd->sitd_dma;
2081		} else {
2082 alloc_sitd:
2083			spin_unlock_irqrestore(&ehci->lock, flags);
2084			sitd = dma_pool_alloc(ehci->sitd_pool, mem_flags,
2085					&sitd_dma);
2086			spin_lock_irqsave(&ehci->lock, flags);
2087			if (!sitd) {
2088				iso_sched_free(stream, iso_sched);
2089				spin_unlock_irqrestore(&ehci->lock, flags);
2090				return -ENOMEM;
2091			}
2092		}
2093
2094		memset(sitd, 0, sizeof(*sitd));
2095		sitd->sitd_dma = sitd_dma;
2096		sitd->frame = NO_FRAME;
2097		list_add(&sitd->sitd_list, &iso_sched->td_list);
2098	}
2099
2100	/* temporarily store schedule info in hcpriv */
2101	urb->hcpriv = iso_sched;
2102	urb->error_count = 0;
2103
2104	spin_unlock_irqrestore(&ehci->lock, flags);
2105	return 0;
2106}
2107
2108/*-------------------------------------------------------------------------*/
2109
2110static inline void
2111sitd_patch(
2112	struct ehci_hcd		*ehci,
2113	struct ehci_iso_stream	*stream,
2114	struct ehci_sitd	*sitd,
2115	struct ehci_iso_sched	*iso_sched,
2116	unsigned		index
2117)
2118{
2119	struct ehci_iso_packet	*uf = &iso_sched->packet[index];
2120	u64			bufp;
2121
2122	sitd->hw_next = EHCI_LIST_END(ehci);
2123	sitd->hw_fullspeed_ep = stream->address;
2124	sitd->hw_uframe = stream->splits;
2125	sitd->hw_results = uf->transaction;
2126	sitd->hw_backpointer = EHCI_LIST_END(ehci);
2127
2128	bufp = uf->bufp;
2129	sitd->hw_buf[0] = cpu_to_hc32(ehci, bufp);
2130	sitd->hw_buf_hi[0] = cpu_to_hc32(ehci, bufp >> 32);
2131
2132	sitd->hw_buf[1] = cpu_to_hc32(ehci, uf->buf1);
2133	if (uf->cross)
2134		bufp += 4096;
2135	sitd->hw_buf_hi[1] = cpu_to_hc32(ehci, bufp >> 32);
2136	sitd->index = index;
2137}
2138
2139static inline void
2140sitd_link(struct ehci_hcd *ehci, unsigned frame, struct ehci_sitd *sitd)
2141{
2142	/* note: sitd ordering could matter (CSPLIT then SSPLIT) */
2143	sitd->sitd_next = ehci->pshadow[frame];
2144	sitd->hw_next = ehci->periodic[frame];
2145	ehci->pshadow[frame].sitd = sitd;
2146	sitd->frame = frame;
2147	wmb();
2148	ehci->periodic[frame] = cpu_to_hc32(ehci, sitd->sitd_dma | Q_TYPE_SITD);
2149}
2150
2151/* fit urb's sitds into the selected schedule slot; activate as needed */
2152static void sitd_link_urb(
2153	struct ehci_hcd		*ehci,
2154	struct urb		*urb,
2155	unsigned		mod,
2156	struct ehci_iso_stream	*stream
2157)
2158{
2159	int			packet;
2160	unsigned		next_uframe;
2161	struct ehci_iso_sched	*sched = urb->hcpriv;
2162	struct ehci_sitd	*sitd;
2163
2164	next_uframe = stream->next_uframe;
2165
2166	if (list_empty(&stream->td_list))
2167		/* usbfs ignores TT bandwidth */
2168		ehci_to_hcd(ehci)->self.bandwidth_allocated
2169				+= stream->bandwidth;
2170
2171	if (ehci_to_hcd(ehci)->self.bandwidth_isoc_reqs == 0) {
2172		if (ehci->amd_pll_fix == 1)
2173			usb_amd_quirk_pll_disable();
2174	}
2175
2176	ehci_to_hcd(ehci)->self.bandwidth_isoc_reqs++;
2177
2178	/* fill sITDs frame by frame */
2179	for (packet = sched->first_packet, sitd = NULL;
2180			packet < urb->number_of_packets;
2181			packet++) {
2182
2183		/* ASSERT:  we have all necessary sitds */
2184		BUG_ON(list_empty(&sched->td_list));
2185
2186		/* ASSERT:  no itds for this endpoint in this frame */
2187
2188		sitd = list_entry(sched->td_list.next,
2189				struct ehci_sitd, sitd_list);
2190		list_move_tail(&sitd->sitd_list, &stream->td_list);
2191		sitd->stream = stream;
2192		sitd->urb = urb;
2193
2194		sitd_patch(ehci, stream, sitd, sched, packet);
2195		sitd_link(ehci, (next_uframe >> 3) & (ehci->periodic_size - 1),
2196				sitd);
2197
2198		next_uframe += stream->uperiod;
2199	}
2200	stream->next_uframe = next_uframe & (mod - 1);
2201
2202	/* don't need that schedule data any more */
2203	iso_sched_free(stream, sched);
2204	urb->hcpriv = stream;
2205
2206	++ehci->isoc_count;
2207	enable_periodic(ehci);
2208}
2209
2210/*-------------------------------------------------------------------------*/
2211
2212#define	SITD_ERRS (SITD_STS_ERR | SITD_STS_DBE | SITD_STS_BABBLE \
2213				| SITD_STS_XACT | SITD_STS_MMF)
2214
2215/* Process and recycle a completed SITD.  Return true iff its urb completed,
2216 * and hence its completion callback probably added things to the hardware
2217 * schedule.
2218 *
2219 * Note that we carefully avoid recycling this descriptor until after any
2220 * completion callback runs, so that it won't be reused quickly.  That is,
2221 * assuming (a) no more than two urbs per frame on this endpoint, and also
2222 * (b) only this endpoint's completions submit URBs.  It seems some silicon
2223 * corrupts things if you reuse completed descriptors very quickly...
2224 */
2225static bool sitd_complete(struct ehci_hcd *ehci, struct ehci_sitd *sitd)
2226{
2227	struct urb				*urb = sitd->urb;
2228	struct usb_iso_packet_descriptor	*desc;
2229	u32					t;
2230	int					urb_index;
2231	struct ehci_iso_stream			*stream = sitd->stream;
2232	struct usb_device			*dev;
2233	bool					retval = false;
2234
2235	urb_index = sitd->index;
2236	desc = &urb->iso_frame_desc[urb_index];
2237	t = hc32_to_cpup(ehci, &sitd->hw_results);
2238
2239	/* report transfer status */
2240	if (unlikely(t & SITD_ERRS)) {
2241		urb->error_count++;
2242		if (t & SITD_STS_DBE)
2243			desc->status = usb_pipein(urb->pipe)
2244				? -ENOSR  /* hc couldn't read */
2245				: -ECOMM; /* hc couldn't write */
2246		else if (t & SITD_STS_BABBLE)
2247			desc->status = -EOVERFLOW;
2248		else /* XACT, MMF, etc */
2249			desc->status = -EPROTO;
2250	} else if (unlikely(t & SITD_STS_ACTIVE)) {
2251		/* URB was too late */
2252		urb->error_count++;
2253	} else {
2254		desc->status = 0;
2255		desc->actual_length = desc->length - SITD_LENGTH(t);
2256		urb->actual_length += desc->actual_length;
2257	}
2258
2259	/* handle completion now? */
2260	if ((urb_index + 1) != urb->number_of_packets)
2261		goto done;
2262
2263	/*
2264	 * ASSERT: it's really the last sitd for this urb
2265	 * list_for_each_entry (sitd, &stream->td_list, sitd_list)
2266	 *	 BUG_ON(sitd->urb == urb);
2267	 */
2268
2269	/* give urb back to the driver; completion often (re)submits */
2270	dev = urb->dev;
2271	ehci_urb_done(ehci, urb, 0);
2272	retval = true;
2273	urb = NULL;
2274
2275	--ehci->isoc_count;
2276	disable_periodic(ehci);
2277
2278	ehci_to_hcd(ehci)->self.bandwidth_isoc_reqs--;
2279	if (ehci_to_hcd(ehci)->self.bandwidth_isoc_reqs == 0) {
2280		if (ehci->amd_pll_fix == 1)
2281			usb_amd_quirk_pll_enable();
2282	}
2283
2284	if (list_is_singular(&stream->td_list))
2285		ehci_to_hcd(ehci)->self.bandwidth_allocated
2286				-= stream->bandwidth;
2287
2288done:
2289	sitd->urb = NULL;
2290
2291	/* Add to the end of the free list for later reuse */
2292	list_move_tail(&sitd->sitd_list, &stream->free_list);
2293
2294	/* Recycle the siTDs when the pipeline is empty (ep no longer in use) */
2295	if (list_empty(&stream->td_list)) {
2296		list_splice_tail_init(&stream->free_list,
2297				&ehci->cached_sitd_list);
2298		start_free_itds(ehci);
2299	}
2300
2301	return retval;
2302}
2303
2304
2305static int sitd_submit(struct ehci_hcd *ehci, struct urb *urb,
2306	gfp_t mem_flags)
2307{
2308	int			status = -EINVAL;
2309	unsigned long		flags;
2310	struct ehci_iso_stream	*stream;
2311
2312	/* Get iso_stream head */
2313	stream = iso_stream_find(ehci, urb);
2314	if (stream == NULL) {
2315		ehci_dbg(ehci, "can't get iso stream\n");
2316		return -ENOMEM;
2317	}
2318	if (urb->interval != stream->ps.period) {
2319		ehci_dbg(ehci, "can't change iso interval %d --> %d\n",
2320			stream->ps.period, urb->interval);
2321		goto done;
2322	}
2323
2324#ifdef EHCI_URB_TRACE
2325	ehci_dbg(ehci,
2326		"submit %p dev%s ep%d%s-iso len %d\n",
2327		urb, urb->dev->devpath,
2328		usb_pipeendpoint(urb->pipe),
2329		usb_pipein(urb->pipe) ? "in" : "out",
2330		urb->transfer_buffer_length);
2331#endif
2332
2333	/* allocate SITDs */
2334	status = sitd_urb_transaction(stream, ehci, urb, mem_flags);
2335	if (status < 0) {
2336		ehci_dbg(ehci, "can't init sitds\n");
2337		goto done;
2338	}
2339
2340	/* schedule ... need to lock */
2341	spin_lock_irqsave(&ehci->lock, flags);
2342	if (unlikely(!HCD_HW_ACCESSIBLE(ehci_to_hcd(ehci)))) {
2343		status = -ESHUTDOWN;
2344		goto done_not_linked;
2345	}
2346	status = usb_hcd_link_urb_to_ep(ehci_to_hcd(ehci), urb);
2347	if (unlikely(status))
2348		goto done_not_linked;
2349	status = iso_stream_schedule(ehci, urb, stream);
2350	if (likely(status == 0)) {
2351		sitd_link_urb(ehci, urb, ehci->periodic_size << 3, stream);
2352	} else if (status > 0) {
2353		status = 0;
2354		ehci_urb_done(ehci, urb, 0);
2355	} else {
2356		usb_hcd_unlink_urb_from_ep(ehci_to_hcd(ehci), urb);
2357	}
2358 done_not_linked:
2359	spin_unlock_irqrestore(&ehci->lock, flags);
2360 done:
2361	return status;
2362}
2363
2364/*-------------------------------------------------------------------------*/
2365
2366static void scan_isoc(struct ehci_hcd *ehci)
2367{
2368	unsigned		uf, now_frame, frame;
2369	unsigned		fmask = ehci->periodic_size - 1;
2370	bool			modified, live;
2371	union ehci_shadow	q, *q_p;
2372	__hc32			type, *hw_p;
2373
2374	/*
2375	 * When running, scan from last scan point up to "now"
2376	 * else clean up by scanning everything that's left.
2377	 * Touches as few pages as possible:  cache-friendly.
2378	 */
2379	if (ehci->rh_state >= EHCI_RH_RUNNING) {
2380		uf = ehci_read_frame_index(ehci);
2381		now_frame = (uf >> 3) & fmask;
2382		live = true;
2383	} else  {
2384		now_frame = (ehci->last_iso_frame - 1) & fmask;
2385		live = false;
2386	}
2387	ehci->now_frame = now_frame;
2388
2389	frame = ehci->last_iso_frame;
2390
2391restart:
2392	/* Scan each element in frame's queue for completions */
2393	q_p = &ehci->pshadow[frame];
2394	hw_p = &ehci->periodic[frame];
2395	q.ptr = q_p->ptr;
2396	type = Q_NEXT_TYPE(ehci, *hw_p);
2397	modified = false;
2398
2399	while (q.ptr != NULL) {
2400		switch (hc32_to_cpu(ehci, type)) {
2401		case Q_TYPE_ITD:
2402			/*
2403			 * If this ITD is still active, leave it for
2404			 * later processing ... check the next entry.
2405			 * No need to check for activity unless the
2406			 * frame is current.
2407			 */
2408			if (frame == now_frame && live) {
2409				rmb();
2410				for (uf = 0; uf < 8; uf++) {
2411					if (q.itd->hw_transaction[uf] &
2412							ITD_ACTIVE(ehci))
2413						break;
2414				}
2415				if (uf < 8) {
2416					q_p = &q.itd->itd_next;
2417					hw_p = &q.itd->hw_next;
2418					type = Q_NEXT_TYPE(ehci,
2419							q.itd->hw_next);
2420					q = *q_p;
2421					break;
2422				}
2423			}
2424
2425			/*
2426			 * Take finished ITDs out of the schedule
2427			 * and process them:  recycle, maybe report
2428			 * URB completion.  HC won't cache the
2429			 * pointer for much longer, if at all.
2430			 */
2431			*q_p = q.itd->itd_next;
2432			if (!ehci->use_dummy_qh ||
2433					q.itd->hw_next != EHCI_LIST_END(ehci))
2434				*hw_p = q.itd->hw_next;
2435			else
2436				*hw_p = cpu_to_hc32(ehci, ehci->dummy->qh_dma);
2437			type = Q_NEXT_TYPE(ehci, q.itd->hw_next);
2438			wmb();
2439			modified = itd_complete(ehci, q.itd);
2440			q = *q_p;
2441			break;
2442		case Q_TYPE_SITD:
2443			/*
2444			 * If this SITD is still active, leave it for
2445			 * later processing ... check the next entry.
2446			 * No need to check for activity unless the
2447			 * frame is current.
2448			 */
2449			if (((frame == now_frame) ||
2450					(((frame + 1) & fmask) == now_frame))
2451				&& live
2452				&& (q.sitd->hw_results & SITD_ACTIVE(ehci))) {
2453
2454				q_p = &q.sitd->sitd_next;
2455				hw_p = &q.sitd->hw_next;
2456				type = Q_NEXT_TYPE(ehci, q.sitd->hw_next);
2457				q = *q_p;
2458				break;
2459			}
2460
2461			/*
2462			 * Take finished SITDs out of the schedule
2463			 * and process them:  recycle, maybe report
2464			 * URB completion.
2465			 */
2466			*q_p = q.sitd->sitd_next;
2467			if (!ehci->use_dummy_qh ||
2468					q.sitd->hw_next != EHCI_LIST_END(ehci))
2469				*hw_p = q.sitd->hw_next;
2470			else
2471				*hw_p = cpu_to_hc32(ehci, ehci->dummy->qh_dma);
2472			type = Q_NEXT_TYPE(ehci, q.sitd->hw_next);
2473			wmb();
2474			modified = sitd_complete(ehci, q.sitd);
2475			q = *q_p;
2476			break;
2477		default:
2478			ehci_dbg(ehci, "corrupt type %d frame %d shadow %p\n",
2479					type, frame, q.ptr);
2480			/* BUG(); */
2481			/* FALL THROUGH */
2482		case Q_TYPE_QH:
2483		case Q_TYPE_FSTN:
2484			/* End of the iTDs and siTDs */
2485			q.ptr = NULL;
2486			break;
2487		}
2488
2489		/* Assume completion callbacks modify the queue */
2490		if (unlikely(modified && ehci->isoc_count > 0))
2491			goto restart;
2492	}
2493
2494	/* Stop when we have reached the current frame */
2495	if (frame == now_frame)
2496		return;
2497
2498	/* The last frame may still have active siTDs */
2499	ehci->last_iso_frame = frame;
2500	frame = (frame + 1) & fmask;
2501
2502	goto restart;
2503}
v5.9
   1// SPDX-License-Identifier: GPL-2.0+
   2/*
   3 * Copyright (c) 2001-2004 by David Brownell
   4 * Copyright (c) 2003 Michal Sojka, for high-speed iso transfers
   5 */
   6
   7/* this file is part of ehci-hcd.c */
   8
   9/*-------------------------------------------------------------------------*/
  10
  11/*
  12 * EHCI scheduled transaction support:  interrupt, iso, split iso
  13 * These are called "periodic" transactions in the EHCI spec.
  14 *
  15 * Note that for interrupt transfers, the QH/QTD manipulation is shared
  16 * with the "asynchronous" transaction support (control/bulk transfers).
  17 * The only real difference is in how interrupt transfers are scheduled.
  18 *
  19 * For ISO, we make an "iso_stream" head to serve the same role as a QH.
  20 * It keeps track of every ITD (or SITD) that's linked, and holds enough
  21 * pre-calculated schedule data to make appending to the queue be quick.
  22 */
  23
  24static int ehci_get_frame(struct usb_hcd *hcd);
  25
  26/*
  27 * periodic_next_shadow - return "next" pointer on shadow list
  28 * @periodic: host pointer to qh/itd/sitd
  29 * @tag: hardware tag for type of this record
  30 */
  31static union ehci_shadow *
  32periodic_next_shadow(struct ehci_hcd *ehci, union ehci_shadow *periodic,
  33		__hc32 tag)
  34{
  35	switch (hc32_to_cpu(ehci, tag)) {
  36	case Q_TYPE_QH:
  37		return &periodic->qh->qh_next;
  38	case Q_TYPE_FSTN:
  39		return &periodic->fstn->fstn_next;
  40	case Q_TYPE_ITD:
  41		return &periodic->itd->itd_next;
  42	/* case Q_TYPE_SITD: */
  43	default:
  44		return &periodic->sitd->sitd_next;
  45	}
  46}
  47
  48static __hc32 *
  49shadow_next_periodic(struct ehci_hcd *ehci, union ehci_shadow *periodic,
  50		__hc32 tag)
  51{
  52	switch (hc32_to_cpu(ehci, tag)) {
  53	/* our ehci_shadow.qh is actually software part */
  54	case Q_TYPE_QH:
  55		return &periodic->qh->hw->hw_next;
  56	/* others are hw parts */
  57	default:
  58		return periodic->hw_next;
  59	}
  60}
  61
  62/* caller must hold ehci->lock */
  63static void periodic_unlink(struct ehci_hcd *ehci, unsigned frame, void *ptr)
  64{
  65	union ehci_shadow	*prev_p = &ehci->pshadow[frame];
  66	__hc32			*hw_p = &ehci->periodic[frame];
  67	union ehci_shadow	here = *prev_p;
  68
  69	/* find predecessor of "ptr"; hw and shadow lists are in sync */
  70	while (here.ptr && here.ptr != ptr) {
  71		prev_p = periodic_next_shadow(ehci, prev_p,
  72				Q_NEXT_TYPE(ehci, *hw_p));
  73		hw_p = shadow_next_periodic(ehci, &here,
  74				Q_NEXT_TYPE(ehci, *hw_p));
  75		here = *prev_p;
  76	}
  77	/* an interrupt entry (at list end) could have been shared */
  78	if (!here.ptr)
  79		return;
  80
  81	/* update shadow and hardware lists ... the old "next" pointers
  82	 * from ptr may still be in use, the caller updates them.
  83	 */
  84	*prev_p = *periodic_next_shadow(ehci, &here,
  85			Q_NEXT_TYPE(ehci, *hw_p));
  86
  87	if (!ehci->use_dummy_qh ||
  88	    *shadow_next_periodic(ehci, &here, Q_NEXT_TYPE(ehci, *hw_p))
  89			!= EHCI_LIST_END(ehci))
  90		*hw_p = *shadow_next_periodic(ehci, &here,
  91				Q_NEXT_TYPE(ehci, *hw_p));
  92	else
  93		*hw_p = cpu_to_hc32(ehci, ehci->dummy->qh_dma);
  94}
  95
  96/*-------------------------------------------------------------------------*/
  97
  98/* Bandwidth and TT management */
  99
 100/* Find the TT data structure for this device; create it if necessary */
 101static struct ehci_tt *find_tt(struct usb_device *udev)
 102{
 103	struct usb_tt		*utt = udev->tt;
 104	struct ehci_tt		*tt, **tt_index, **ptt;
 105	unsigned		port;
 106	bool			allocated_index = false;
 107
 108	if (!utt)
 109		return NULL;		/* Not below a TT */
 110
 111	/*
 112	 * Find/create our data structure.
 113	 * For hubs with a single TT, we get it directly.
 114	 * For hubs with multiple TTs, there's an extra level of pointers.
 115	 */
 116	tt_index = NULL;
 117	if (utt->multi) {
 118		tt_index = utt->hcpriv;
 119		if (!tt_index) {		/* Create the index array */
 120			tt_index = kcalloc(utt->hub->maxchild,
 121					   sizeof(*tt_index),
 122					   GFP_ATOMIC);
 123			if (!tt_index)
 124				return ERR_PTR(-ENOMEM);
 125			utt->hcpriv = tt_index;
 126			allocated_index = true;
 127		}
 128		port = udev->ttport - 1;
 129		ptt = &tt_index[port];
 130	} else {
 131		port = 0;
 132		ptt = (struct ehci_tt **) &utt->hcpriv;
 133	}
 134
 135	tt = *ptt;
 136	if (!tt) {				/* Create the ehci_tt */
 137		struct ehci_hcd		*ehci =
 138				hcd_to_ehci(bus_to_hcd(udev->bus));
 139
 140		tt = kzalloc(sizeof(*tt), GFP_ATOMIC);
 141		if (!tt) {
 142			if (allocated_index) {
 143				utt->hcpriv = NULL;
 144				kfree(tt_index);
 145			}
 146			return ERR_PTR(-ENOMEM);
 147		}
 148		list_add_tail(&tt->tt_list, &ehci->tt_list);
 149		INIT_LIST_HEAD(&tt->ps_list);
 150		tt->usb_tt = utt;
 151		tt->tt_port = port;
 152		*ptt = tt;
 153	}
 154
 155	return tt;
 156}
 157
 158/* Release the TT above udev, if it's not in use */
 159static void drop_tt(struct usb_device *udev)
 160{
 161	struct usb_tt		*utt = udev->tt;
 162	struct ehci_tt		*tt, **tt_index, **ptt;
 163	int			cnt, i;
 164
 165	if (!utt || !utt->hcpriv)
 166		return;		/* Not below a TT, or never allocated */
 167
 168	cnt = 0;
 169	if (utt->multi) {
 170		tt_index = utt->hcpriv;
 171		ptt = &tt_index[udev->ttport - 1];
 172
 173		/* How many entries are left in tt_index? */
 174		for (i = 0; i < utt->hub->maxchild; ++i)
 175			cnt += !!tt_index[i];
 176	} else {
 177		tt_index = NULL;
 178		ptt = (struct ehci_tt **) &utt->hcpriv;
 179	}
 180
 181	tt = *ptt;
 182	if (!tt || !list_empty(&tt->ps_list))
 183		return;		/* never allocated, or still in use */
 184
 185	list_del(&tt->tt_list);
 186	*ptt = NULL;
 187	kfree(tt);
 188	if (cnt == 1) {
 189		utt->hcpriv = NULL;
 190		kfree(tt_index);
 191	}
 192}
 193
 194static void bandwidth_dbg(struct ehci_hcd *ehci, int sign, char *type,
 195		struct ehci_per_sched *ps)
 196{
 197	dev_dbg(&ps->udev->dev,
 198			"ep %02x: %s %s @ %u+%u (%u.%u+%u) [%u/%u us] mask %04x\n",
 199			ps->ep->desc.bEndpointAddress,
 200			(sign >= 0 ? "reserve" : "release"), type,
 201			(ps->bw_phase << 3) + ps->phase_uf, ps->bw_uperiod,
 202			ps->phase, ps->phase_uf, ps->period,
 203			ps->usecs, ps->c_usecs, ps->cs_mask);
 204}
 205
 206static void reserve_release_intr_bandwidth(struct ehci_hcd *ehci,
 207		struct ehci_qh *qh, int sign)
 208{
 209	unsigned		start_uf;
 210	unsigned		i, j, m;
 211	int			usecs = qh->ps.usecs;
 212	int			c_usecs = qh->ps.c_usecs;
 213	int			tt_usecs = qh->ps.tt_usecs;
 214	struct ehci_tt		*tt;
 215
 216	if (qh->ps.phase == NO_FRAME)	/* Bandwidth wasn't reserved */
 217		return;
 218	start_uf = qh->ps.bw_phase << 3;
 219
 220	bandwidth_dbg(ehci, sign, "intr", &qh->ps);
 221
 222	if (sign < 0) {		/* Release bandwidth */
 223		usecs = -usecs;
 224		c_usecs = -c_usecs;
 225		tt_usecs = -tt_usecs;
 226	}
 227
 228	/* Entire transaction (high speed) or start-split (full/low speed) */
 229	for (i = start_uf + qh->ps.phase_uf; i < EHCI_BANDWIDTH_SIZE;
 230			i += qh->ps.bw_uperiod)
 231		ehci->bandwidth[i] += usecs;
 232
 233	/* Complete-split (full/low speed) */
 234	if (qh->ps.c_usecs) {
 235		/* NOTE: adjustments needed for FSTN */
 236		for (i = start_uf; i < EHCI_BANDWIDTH_SIZE;
 237				i += qh->ps.bw_uperiod) {
 238			for ((j = 2, m = 1 << (j+8)); j < 8; (++j, m <<= 1)) {
 239				if (qh->ps.cs_mask & m)
 240					ehci->bandwidth[i+j] += c_usecs;
 241			}
 242		}
 243	}
 244
 245	/* FS/LS bus bandwidth */
 246	if (tt_usecs) {
 247		tt = find_tt(qh->ps.udev);
 248		if (sign > 0)
 249			list_add_tail(&qh->ps.ps_list, &tt->ps_list);
 250		else
 251			list_del(&qh->ps.ps_list);
 252
 253		for (i = start_uf >> 3; i < EHCI_BANDWIDTH_FRAMES;
 254				i += qh->ps.bw_period)
 255			tt->bandwidth[i] += tt_usecs;
 256	}
 257}
 258
 259/*-------------------------------------------------------------------------*/
 260
 261static void compute_tt_budget(u8 budget_table[EHCI_BANDWIDTH_SIZE],
 262		struct ehci_tt *tt)
 263{
 264	struct ehci_per_sched	*ps;
 265	unsigned		uframe, uf, x;
 266	u8			*budget_line;
 267
 268	if (!tt)
 269		return;
 270	memset(budget_table, 0, EHCI_BANDWIDTH_SIZE);
 271
 272	/* Add up the contributions from all the endpoints using this TT */
 273	list_for_each_entry(ps, &tt->ps_list, ps_list) {
 274		for (uframe = ps->bw_phase << 3; uframe < EHCI_BANDWIDTH_SIZE;
 275				uframe += ps->bw_uperiod) {
 276			budget_line = &budget_table[uframe];
 277			x = ps->tt_usecs;
 278
 279			/* propagate the time forward */
 280			for (uf = ps->phase_uf; uf < 8; ++uf) {
 281				x += budget_line[uf];
 282
 283				/* Each microframe lasts 125 us */
 284				if (x <= 125) {
 285					budget_line[uf] = x;
 286					break;
 287				}
 288				budget_line[uf] = 125;
 289				x -= 125;
 290			}
 291		}
 292	}
 293}
 294
 295static int __maybe_unused same_tt(struct usb_device *dev1,
 296		struct usb_device *dev2)
 297{
 298	if (!dev1->tt || !dev2->tt)
 299		return 0;
 300	if (dev1->tt != dev2->tt)
 301		return 0;
 302	if (dev1->tt->multi)
 303		return dev1->ttport == dev2->ttport;
 304	else
 305		return 1;
 306}
 307
 308#ifdef CONFIG_USB_EHCI_TT_NEWSCHED
 309
 310/* Which uframe does the low/fullspeed transfer start in?
 311 *
 312 * The parameter is the mask of ssplits in "H-frame" terms
 313 * and this returns the transfer start uframe in "B-frame" terms,
 314 * which allows both to match, e.g. a ssplit in "H-frame" uframe 0
 315 * will cause a transfer in "B-frame" uframe 0.  "B-frames" lag
 316 * "H-frames" by 1 uframe.  See the EHCI spec sec 4.5 and figure 4.7.
 317 */
 318static inline unsigned char tt_start_uframe(struct ehci_hcd *ehci, __hc32 mask)
 319{
 320	unsigned char smask = hc32_to_cpu(ehci, mask) & QH_SMASK;
 321
 322	if (!smask) {
 323		ehci_err(ehci, "invalid empty smask!\n");
 324		/* uframe 7 can't have bw so this will indicate failure */
 325		return 7;
 326	}
 327	return ffs(smask) - 1;
 328}
 329
 330static const unsigned char
 331max_tt_usecs[] = { 125, 125, 125, 125, 125, 125, 30, 0 };
 332
 333/* carryover low/fullspeed bandwidth that crosses uframe boundries */
 334static inline void carryover_tt_bandwidth(unsigned short tt_usecs[8])
 335{
 336	int i;
 337
 338	for (i = 0; i < 7; i++) {
 339		if (max_tt_usecs[i] < tt_usecs[i]) {
 340			tt_usecs[i+1] += tt_usecs[i] - max_tt_usecs[i];
 341			tt_usecs[i] = max_tt_usecs[i];
 342		}
 343	}
 344}
 345
 346/*
 347 * Return true if the device's tt's downstream bus is available for a
 348 * periodic transfer of the specified length (usecs), starting at the
 349 * specified frame/uframe.  Note that (as summarized in section 11.19
 350 * of the usb 2.0 spec) TTs can buffer multiple transactions for each
 351 * uframe.
 352 *
 353 * The uframe parameter is when the fullspeed/lowspeed transfer
 354 * should be executed in "B-frame" terms, which is the same as the
 355 * highspeed ssplit's uframe (which is in "H-frame" terms).  For example
 356 * a ssplit in "H-frame" 0 causes a transfer in "B-frame" 0.
 357 * See the EHCI spec sec 4.5 and fig 4.7.
 358 *
 359 * This checks if the full/lowspeed bus, at the specified starting uframe,
 360 * has the specified bandwidth available, according to rules listed
 361 * in USB 2.0 spec section 11.18.1 fig 11-60.
 362 *
 363 * This does not check if the transfer would exceed the max ssplit
 364 * limit of 16, specified in USB 2.0 spec section 11.18.4 requirement #4,
 365 * since proper scheduling limits ssplits to less than 16 per uframe.
 366 */
 367static int tt_available(
 368	struct ehci_hcd		*ehci,
 369	struct ehci_per_sched	*ps,
 370	struct ehci_tt		*tt,
 371	unsigned		frame,
 372	unsigned		uframe
 373)
 374{
 375	unsigned		period = ps->bw_period;
 376	unsigned		usecs = ps->tt_usecs;
 377
 378	if ((period == 0) || (uframe >= 7))	/* error */
 379		return 0;
 380
 381	for (frame &= period - 1; frame < EHCI_BANDWIDTH_FRAMES;
 382			frame += period) {
 383		unsigned	i, uf;
 384		unsigned short	tt_usecs[8];
 385
 386		if (tt->bandwidth[frame] + usecs > 900)
 387			return 0;
 388
 389		uf = frame << 3;
 390		for (i = 0; i < 8; (++i, ++uf))
 391			tt_usecs[i] = ehci->tt_budget[uf];
 392
 393		if (max_tt_usecs[uframe] <= tt_usecs[uframe])
 394			return 0;
 395
 396		/* special case for isoc transfers larger than 125us:
 397		 * the first and each subsequent fully used uframe
 398		 * must be empty, so as to not illegally delay
 399		 * already scheduled transactions
 400		 */
 401		if (usecs > 125) {
 402			int ufs = (usecs / 125);
 403
 404			for (i = uframe; i < (uframe + ufs) && i < 8; i++)
 405				if (tt_usecs[i] > 0)
 406					return 0;
 407		}
 408
 409		tt_usecs[uframe] += usecs;
 410
 411		carryover_tt_bandwidth(tt_usecs);
 412
 413		/* fail if the carryover pushed bw past the last uframe's limit */
 414		if (max_tt_usecs[7] < tt_usecs[7])
 415			return 0;
 416	}
 417
 418	return 1;
 419}
 420
 421#else
 422
 423/* return true iff the device's transaction translator is available
 424 * for a periodic transfer starting at the specified frame, using
 425 * all the uframes in the mask.
 426 */
 427static int tt_no_collision(
 428	struct ehci_hcd		*ehci,
 429	unsigned		period,
 430	struct usb_device	*dev,
 431	unsigned		frame,
 432	u32			uf_mask
 433)
 434{
 435	if (period == 0)	/* error */
 436		return 0;
 437
 438	/* note bandwidth wastage:  split never follows csplit
 439	 * (different dev or endpoint) until the next uframe.
 440	 * calling convention doesn't make that distinction.
 441	 */
 442	for (; frame < ehci->periodic_size; frame += period) {
 443		union ehci_shadow	here;
 444		__hc32			type;
 445		struct ehci_qh_hw	*hw;
 446
 447		here = ehci->pshadow[frame];
 448		type = Q_NEXT_TYPE(ehci, ehci->periodic[frame]);
 449		while (here.ptr) {
 450			switch (hc32_to_cpu(ehci, type)) {
 451			case Q_TYPE_ITD:
 452				type = Q_NEXT_TYPE(ehci, here.itd->hw_next);
 453				here = here.itd->itd_next;
 454				continue;
 455			case Q_TYPE_QH:
 456				hw = here.qh->hw;
 457				if (same_tt(dev, here.qh->ps.udev)) {
 458					u32		mask;
 459
 460					mask = hc32_to_cpu(ehci,
 461							hw->hw_info2);
 462					/* "knows" no gap is needed */
 463					mask |= mask >> 8;
 464					if (mask & uf_mask)
 465						break;
 466				}
 467				type = Q_NEXT_TYPE(ehci, hw->hw_next);
 468				here = here.qh->qh_next;
 469				continue;
 470			case Q_TYPE_SITD:
 471				if (same_tt(dev, here.sitd->urb->dev)) {
 472					u16		mask;
 473
 474					mask = hc32_to_cpu(ehci, here.sitd
 475								->hw_uframe);
 476					/* FIXME assumes no gap for IN! */
 477					mask |= mask >> 8;
 478					if (mask & uf_mask)
 479						break;
 480				}
 481				type = Q_NEXT_TYPE(ehci, here.sitd->hw_next);
 482				here = here.sitd->sitd_next;
 483				continue;
 484			/* case Q_TYPE_FSTN: */
 485			default:
 486				ehci_dbg(ehci,
 487					"periodic frame %d bogus type %d\n",
 488					frame, type);
 489			}
 490
 491			/* collision or error */
 492			return 0;
 493		}
 494	}
 495
 496	/* no collision */
 497	return 1;
 498}
 499
 500#endif /* CONFIG_USB_EHCI_TT_NEWSCHED */
 501
 502/*-------------------------------------------------------------------------*/
 503
 504static void enable_periodic(struct ehci_hcd *ehci)
 505{
 506	if (ehci->periodic_count++)
 507		return;
 508
 509	/* Stop waiting to turn off the periodic schedule */
 510	ehci->enabled_hrtimer_events &= ~BIT(EHCI_HRTIMER_DISABLE_PERIODIC);
 511
 512	/* Don't start the schedule until PSS is 0 */
 513	ehci_poll_PSS(ehci);
 514	turn_on_io_watchdog(ehci);
 515}
 516
 517static void disable_periodic(struct ehci_hcd *ehci)
 518{
 519	if (--ehci->periodic_count)
 520		return;
 521
 522	/* Don't turn off the schedule until PSS is 1 */
 523	ehci_poll_PSS(ehci);
 524}
 525
 526/*-------------------------------------------------------------------------*/
 527
 528/* periodic schedule slots have iso tds (normal or split) first, then a
 529 * sparse tree for active interrupt transfers.
 530 *
 531 * this just links in a qh; caller guarantees uframe masks are set right.
 532 * no FSTN support (yet; ehci 0.96+)
 533 */
 534static void qh_link_periodic(struct ehci_hcd *ehci, struct ehci_qh *qh)
 535{
 536	unsigned	i;
 537	unsigned	period = qh->ps.period;
 538
 539	dev_dbg(&qh->ps.udev->dev,
 540		"link qh%d-%04x/%p start %d [%d/%d us]\n",
 541		period, hc32_to_cpup(ehci, &qh->hw->hw_info2)
 542			& (QH_CMASK | QH_SMASK),
 543		qh, qh->ps.phase, qh->ps.usecs, qh->ps.c_usecs);
 544
 545	/* high bandwidth, or otherwise every microframe */
 546	if (period == 0)
 547		period = 1;
 548
 549	for (i = qh->ps.phase; i < ehci->periodic_size; i += period) {
 550		union ehci_shadow	*prev = &ehci->pshadow[i];
 551		__hc32			*hw_p = &ehci->periodic[i];
 552		union ehci_shadow	here = *prev;
 553		__hc32			type = 0;
 554
 555		/* skip the iso nodes at list head */
 556		while (here.ptr) {
 557			type = Q_NEXT_TYPE(ehci, *hw_p);
 558			if (type == cpu_to_hc32(ehci, Q_TYPE_QH))
 559				break;
 560			prev = periodic_next_shadow(ehci, prev, type);
 561			hw_p = shadow_next_periodic(ehci, &here, type);
 562			here = *prev;
 563		}
 564
 565		/* sorting each branch by period (slow-->fast)
 566		 * enables sharing interior tree nodes
 567		 */
 568		while (here.ptr && qh != here.qh) {
 569			if (qh->ps.period > here.qh->ps.period)
 570				break;
 571			prev = &here.qh->qh_next;
 572			hw_p = &here.qh->hw->hw_next;
 573			here = *prev;
 574		}
 575		/* link in this qh, unless some earlier pass did that */
 576		if (qh != here.qh) {
 577			qh->qh_next = here;
 578			if (here.qh)
 579				qh->hw->hw_next = *hw_p;
 580			wmb();
 581			prev->qh = qh;
 582			*hw_p = QH_NEXT(ehci, qh->qh_dma);
 583		}
 584	}
 585	qh->qh_state = QH_STATE_LINKED;
 586	qh->xacterrs = 0;
 587	qh->unlink_reason = 0;
 588
 589	/* update per-qh bandwidth for debugfs */
 590	ehci_to_hcd(ehci)->self.bandwidth_allocated += qh->ps.bw_period
 591		? ((qh->ps.usecs + qh->ps.c_usecs) / qh->ps.bw_period)
 592		: (qh->ps.usecs * 8);
 593
 594	list_add(&qh->intr_node, &ehci->intr_qh_list);
 595
 596	/* maybe enable periodic schedule processing */
 597	++ehci->intr_count;
 598	enable_periodic(ehci);
 599}
 600
 601static void qh_unlink_periodic(struct ehci_hcd *ehci, struct ehci_qh *qh)
 602{
 603	unsigned	i;
 604	unsigned	period;
 605
 606	/*
 607	 * If qh is for a low/full-speed device, simply unlinking it
 608	 * could interfere with an ongoing split transaction.  To unlink
 609	 * it safely would require setting the QH_INACTIVATE bit and
 610	 * waiting at least one frame, as described in EHCI 4.12.2.5.
 611	 *
 612	 * We won't bother with any of this.  Instead, we assume that the
 613	 * only reason for unlinking an interrupt QH while the current URB
 614	 * is still active is to dequeue all the URBs (flush the whole
 615	 * endpoint queue).
 616	 *
 617	 * If rebalancing the periodic schedule is ever implemented, this
 618	 * approach will no longer be valid.
 619	 */
 620
 621	/* high bandwidth, or otherwise part of every microframe */
 622	period = qh->ps.period ? : 1;
 623
 624	for (i = qh->ps.phase; i < ehci->periodic_size; i += period)
 625		periodic_unlink(ehci, i, qh);
 626
 627	/* update per-qh bandwidth for debugfs */
 628	ehci_to_hcd(ehci)->self.bandwidth_allocated -= qh->ps.bw_period
 629		? ((qh->ps.usecs + qh->ps.c_usecs) / qh->ps.bw_period)
 630		: (qh->ps.usecs * 8);
 631
 632	dev_dbg(&qh->ps.udev->dev,
 633		"unlink qh%d-%04x/%p start %d [%d/%d us]\n",
 634		qh->ps.period,
 635		hc32_to_cpup(ehci, &qh->hw->hw_info2) & (QH_CMASK | QH_SMASK),
 636		qh, qh->ps.phase, qh->ps.usecs, qh->ps.c_usecs);
 637
 638	/* qh->qh_next still "live" to HC */
 639	qh->qh_state = QH_STATE_UNLINK;
 640	qh->qh_next.ptr = NULL;
 641
 642	if (ehci->qh_scan_next == qh)
 643		ehci->qh_scan_next = list_entry(qh->intr_node.next,
 644				struct ehci_qh, intr_node);
 645	list_del(&qh->intr_node);
 646}
 647
 648static void cancel_unlink_wait_intr(struct ehci_hcd *ehci, struct ehci_qh *qh)
 649{
 650	if (qh->qh_state != QH_STATE_LINKED ||
 651			list_empty(&qh->unlink_node))
 652		return;
 653
 654	list_del_init(&qh->unlink_node);
 655
 656	/*
 657	 * TODO: disable the event of EHCI_HRTIMER_START_UNLINK_INTR for
 658	 * avoiding unnecessary CPU wakeup
 659	 */
 660}
 661
 662static void start_unlink_intr(struct ehci_hcd *ehci, struct ehci_qh *qh)
 663{
 664	/* If the QH isn't linked then there's nothing we can do. */
 665	if (qh->qh_state != QH_STATE_LINKED)
 666		return;
 667
 668	/* if the qh is waiting for unlink, cancel it now */
 669	cancel_unlink_wait_intr(ehci, qh);
 670
 671	qh_unlink_periodic(ehci, qh);
 672
 673	/* Make sure the unlinks are visible before starting the timer */
 674	wmb();
 675
 676	/*
 677	 * The EHCI spec doesn't say how long it takes the controller to
 678	 * stop accessing an unlinked interrupt QH.  The timer delay is
 679	 * 9 uframes; presumably that will be long enough.
 680	 */
 681	qh->unlink_cycle = ehci->intr_unlink_cycle;
 682
 683	/* New entries go at the end of the intr_unlink list */
 684	list_add_tail(&qh->unlink_node, &ehci->intr_unlink);
 685
 686	if (ehci->intr_unlinking)
 687		;	/* Avoid recursive calls */
 688	else if (ehci->rh_state < EHCI_RH_RUNNING)
 689		ehci_handle_intr_unlinks(ehci);
 690	else if (ehci->intr_unlink.next == &qh->unlink_node) {
 691		ehci_enable_event(ehci, EHCI_HRTIMER_UNLINK_INTR, true);
 692		++ehci->intr_unlink_cycle;
 693	}
 694}
 695
 696/*
 697 * It is common only one intr URB is scheduled on one qh, and
 698 * given complete() is run in tasklet context, introduce a bit
 699 * delay to avoid unlink qh too early.
 700 */
 701static void start_unlink_intr_wait(struct ehci_hcd *ehci,
 702				   struct ehci_qh *qh)
 703{
 704	qh->unlink_cycle = ehci->intr_unlink_wait_cycle;
 705
 706	/* New entries go at the end of the intr_unlink_wait list */
 707	list_add_tail(&qh->unlink_node, &ehci->intr_unlink_wait);
 708
 709	if (ehci->rh_state < EHCI_RH_RUNNING)
 710		ehci_handle_start_intr_unlinks(ehci);
 711	else if (ehci->intr_unlink_wait.next == &qh->unlink_node) {
 712		ehci_enable_event(ehci, EHCI_HRTIMER_START_UNLINK_INTR, true);
 713		++ehci->intr_unlink_wait_cycle;
 714	}
 715}
 716
 717static void end_unlink_intr(struct ehci_hcd *ehci, struct ehci_qh *qh)
 718{
 719	struct ehci_qh_hw	*hw = qh->hw;
 720	int			rc;
 721
 722	qh->qh_state = QH_STATE_IDLE;
 723	hw->hw_next = EHCI_LIST_END(ehci);
 724
 725	if (!list_empty(&qh->qtd_list))
 726		qh_completions(ehci, qh);
 727
 728	/* reschedule QH iff another request is queued */
 729	if (!list_empty(&qh->qtd_list) && ehci->rh_state == EHCI_RH_RUNNING) {
 730		rc = qh_schedule(ehci, qh);
 731		if (rc == 0) {
 732			qh_refresh(ehci, qh);
 733			qh_link_periodic(ehci, qh);
 734		}
 735
 736		/* An error here likely indicates handshake failure
 737		 * or no space left in the schedule.  Neither fault
 738		 * should happen often ...
 739		 *
 740		 * FIXME kill the now-dysfunctional queued urbs
 741		 */
 742		else {
 743			ehci_err(ehci, "can't reschedule qh %p, err %d\n",
 744					qh, rc);
 745		}
 746	}
 747
 748	/* maybe turn off periodic schedule */
 749	--ehci->intr_count;
 750	disable_periodic(ehci);
 751}
 752
 753/*-------------------------------------------------------------------------*/
 754
 755static int check_period(
 756	struct ehci_hcd *ehci,
 757	unsigned	frame,
 758	unsigned	uframe,
 759	unsigned	uperiod,
 760	unsigned	usecs
 761) {
 762	/* complete split running into next frame?
 763	 * given FSTN support, we could sometimes check...
 764	 */
 765	if (uframe >= 8)
 766		return 0;
 767
 768	/* convert "usecs we need" to "max already claimed" */
 769	usecs = ehci->uframe_periodic_max - usecs;
 770
 771	for (uframe += frame << 3; uframe < EHCI_BANDWIDTH_SIZE;
 772			uframe += uperiod) {
 773		if (ehci->bandwidth[uframe] > usecs)
 774			return 0;
 775	}
 776
 777	/* success! */
 778	return 1;
 779}
 780
 781static int check_intr_schedule(
 782	struct ehci_hcd		*ehci,
 783	unsigned		frame,
 784	unsigned		uframe,
 785	struct ehci_qh		*qh,
 786	unsigned		*c_maskp,
 787	struct ehci_tt		*tt
 788)
 789{
 790	int		retval = -ENOSPC;
 791	u8		mask = 0;
 792
 793	if (qh->ps.c_usecs && uframe >= 6)	/* FSTN territory? */
 794		goto done;
 795
 796	if (!check_period(ehci, frame, uframe, qh->ps.bw_uperiod, qh->ps.usecs))
 797		goto done;
 798	if (!qh->ps.c_usecs) {
 799		retval = 0;
 800		*c_maskp = 0;
 801		goto done;
 802	}
 803
 804#ifdef CONFIG_USB_EHCI_TT_NEWSCHED
 805	if (tt_available(ehci, &qh->ps, tt, frame, uframe)) {
 806		unsigned i;
 807
 808		/* TODO : this may need FSTN for SSPLIT in uframe 5. */
 809		for (i = uframe+2; i < 8 && i <= uframe+4; i++)
 810			if (!check_period(ehci, frame, i,
 811					qh->ps.bw_uperiod, qh->ps.c_usecs))
 812				goto done;
 813			else
 814				mask |= 1 << i;
 815
 816		retval = 0;
 817
 818		*c_maskp = mask;
 819	}
 820#else
 821	/* Make sure this tt's buffer is also available for CSPLITs.
 822	 * We pessimize a bit; probably the typical full speed case
 823	 * doesn't need the second CSPLIT.
 824	 *
 825	 * NOTE:  both SPLIT and CSPLIT could be checked in just
 826	 * one smart pass...
 827	 */
 828	mask = 0x03 << (uframe + qh->gap_uf);
 829	*c_maskp = mask;
 830
 831	mask |= 1 << uframe;
 832	if (tt_no_collision(ehci, qh->ps.bw_period, qh->ps.udev, frame, mask)) {
 833		if (!check_period(ehci, frame, uframe + qh->gap_uf + 1,
 834				qh->ps.bw_uperiod, qh->ps.c_usecs))
 835			goto done;
 836		if (!check_period(ehci, frame, uframe + qh->gap_uf,
 837				qh->ps.bw_uperiod, qh->ps.c_usecs))
 838			goto done;
 839		retval = 0;
 840	}
 841#endif
 842done:
 843	return retval;
 844}
 845
 846/* "first fit" scheduling policy used the first time through,
 847 * or when the previous schedule slot can't be re-used.
 848 */
 849static int qh_schedule(struct ehci_hcd *ehci, struct ehci_qh *qh)
 850{
 851	int		status = 0;
 852	unsigned	uframe;
 853	unsigned	c_mask;
 854	struct ehci_qh_hw	*hw = qh->hw;
 855	struct ehci_tt		*tt;
 856
 857	hw->hw_next = EHCI_LIST_END(ehci);
 858
 859	/* reuse the previous schedule slots, if we can */
 860	if (qh->ps.phase != NO_FRAME) {
 861		ehci_dbg(ehci, "reused qh %p schedule\n", qh);
 862		return 0;
 863	}
 864
 865	uframe = 0;
 866	c_mask = 0;
 867	tt = find_tt(qh->ps.udev);
 868	if (IS_ERR(tt)) {
 869		status = PTR_ERR(tt);
 870		goto done;
 871	}
 872	compute_tt_budget(ehci->tt_budget, tt);
 873
 874	/* else scan the schedule to find a group of slots such that all
 875	 * uframes have enough periodic bandwidth available.
 876	 */
 877	/* "normal" case, uframing flexible except with splits */
 878	if (qh->ps.bw_period) {
 879		int		i;
 880		unsigned	frame;
 881
 882		for (i = qh->ps.bw_period; i > 0; --i) {
 883			frame = ++ehci->random_frame & (qh->ps.bw_period - 1);
 884			for (uframe = 0; uframe < 8; uframe++) {
 885				status = check_intr_schedule(ehci,
 886						frame, uframe, qh, &c_mask, tt);
 887				if (status == 0)
 888					goto got_it;
 889			}
 890		}
 891
 892	/* qh->ps.bw_period == 0 means every uframe */
 893	} else {
 894		status = check_intr_schedule(ehci, 0, 0, qh, &c_mask, tt);
 895	}
 896	if (status)
 897		goto done;
 898
 899 got_it:
 900	qh->ps.phase = (qh->ps.period ? ehci->random_frame &
 901			(qh->ps.period - 1) : 0);
 902	qh->ps.bw_phase = qh->ps.phase & (qh->ps.bw_period - 1);
 903	qh->ps.phase_uf = uframe;
 904	qh->ps.cs_mask = qh->ps.period ?
 905			(c_mask << 8) | (1 << uframe) :
 906			QH_SMASK;
 907
 908	/* reset S-frame and (maybe) C-frame masks */
 909	hw->hw_info2 &= cpu_to_hc32(ehci, ~(QH_CMASK | QH_SMASK));
 910	hw->hw_info2 |= cpu_to_hc32(ehci, qh->ps.cs_mask);
 911	reserve_release_intr_bandwidth(ehci, qh, 1);
 912
 913done:
 914	return status;
 915}
 916
 917static int intr_submit(
 918	struct ehci_hcd		*ehci,
 919	struct urb		*urb,
 920	struct list_head	*qtd_list,
 921	gfp_t			mem_flags
 922) {
 923	unsigned		epnum;
 924	unsigned long		flags;
 925	struct ehci_qh		*qh;
 926	int			status;
 927	struct list_head	empty;
 928
 929	/* get endpoint and transfer/schedule data */
 930	epnum = urb->ep->desc.bEndpointAddress;
 931
 932	spin_lock_irqsave(&ehci->lock, flags);
 933
 934	if (unlikely(!HCD_HW_ACCESSIBLE(ehci_to_hcd(ehci)))) {
 935		status = -ESHUTDOWN;
 936		goto done_not_linked;
 937	}
 938	status = usb_hcd_link_urb_to_ep(ehci_to_hcd(ehci), urb);
 939	if (unlikely(status))
 940		goto done_not_linked;
 941
 942	/* get qh and force any scheduling errors */
 943	INIT_LIST_HEAD(&empty);
 944	qh = qh_append_tds(ehci, urb, &empty, epnum, &urb->ep->hcpriv);
 945	if (qh == NULL) {
 946		status = -ENOMEM;
 947		goto done;
 948	}
 949	if (qh->qh_state == QH_STATE_IDLE) {
 950		status = qh_schedule(ehci, qh);
 951		if (status)
 952			goto done;
 953	}
 954
 955	/* then queue the urb's tds to the qh */
 956	qh = qh_append_tds(ehci, urb, qtd_list, epnum, &urb->ep->hcpriv);
 957	BUG_ON(qh == NULL);
 958
 959	/* stuff into the periodic schedule */
 960	if (qh->qh_state == QH_STATE_IDLE) {
 961		qh_refresh(ehci, qh);
 962		qh_link_periodic(ehci, qh);
 963	} else {
 964		/* cancel unlink wait for the qh */
 965		cancel_unlink_wait_intr(ehci, qh);
 966	}
 967
 968	/* ... update usbfs periodic stats */
 969	ehci_to_hcd(ehci)->self.bandwidth_int_reqs++;
 970
 971done:
 972	if (unlikely(status))
 973		usb_hcd_unlink_urb_from_ep(ehci_to_hcd(ehci), urb);
 974done_not_linked:
 975	spin_unlock_irqrestore(&ehci->lock, flags);
 976	if (status)
 977		qtd_list_free(ehci, urb, qtd_list);
 978
 979	return status;
 980}
 981
 982static void scan_intr(struct ehci_hcd *ehci)
 983{
 984	struct ehci_qh		*qh;
 985
 986	list_for_each_entry_safe(qh, ehci->qh_scan_next, &ehci->intr_qh_list,
 987			intr_node) {
 988
 989		/* clean any finished work for this qh */
 990		if (!list_empty(&qh->qtd_list)) {
 991			int temp;
 992
 993			/*
 994			 * Unlinks could happen here; completion reporting
 995			 * drops the lock.  That's why ehci->qh_scan_next
 996			 * always holds the next qh to scan; if the next qh
 997			 * gets unlinked then ehci->qh_scan_next is adjusted
 998			 * in qh_unlink_periodic().
 999			 */
1000			temp = qh_completions(ehci, qh);
1001			if (unlikely(temp))
1002				start_unlink_intr(ehci, qh);
1003			else if (unlikely(list_empty(&qh->qtd_list) &&
1004					qh->qh_state == QH_STATE_LINKED))
1005				start_unlink_intr_wait(ehci, qh);
1006		}
1007	}
1008}
1009
1010/*-------------------------------------------------------------------------*/
1011
1012/* ehci_iso_stream ops work with both ITD and SITD */
1013
1014static struct ehci_iso_stream *
1015iso_stream_alloc(gfp_t mem_flags)
1016{
1017	struct ehci_iso_stream *stream;
1018
1019	stream = kzalloc(sizeof(*stream), mem_flags);
1020	if (likely(stream != NULL)) {
1021		INIT_LIST_HEAD(&stream->td_list);
1022		INIT_LIST_HEAD(&stream->free_list);
1023		stream->next_uframe = NO_FRAME;
1024		stream->ps.phase = NO_FRAME;
1025	}
1026	return stream;
1027}
1028
1029static void
1030iso_stream_init(
1031	struct ehci_hcd		*ehci,
1032	struct ehci_iso_stream	*stream,
1033	struct urb		*urb
1034)
1035{
1036	static const u8 smask_out[] = { 0x01, 0x03, 0x07, 0x0f, 0x1f, 0x3f };
1037
1038	struct usb_device	*dev = urb->dev;
1039	u32			buf1;
1040	unsigned		epnum, maxp;
1041	int			is_input;
1042	unsigned		tmp;
1043
1044	/*
1045	 * this might be a "high bandwidth" highspeed endpoint,
1046	 * as encoded in the ep descriptor's wMaxPacket field
1047	 */
1048	epnum = usb_pipeendpoint(urb->pipe);
1049	is_input = usb_pipein(urb->pipe) ? USB_DIR_IN : 0;
1050	maxp = usb_endpoint_maxp(&urb->ep->desc);
1051	buf1 = is_input ? 1 << 11 : 0;
1052
1053	/* knows about ITD vs SITD */
1054	if (dev->speed == USB_SPEED_HIGH) {
1055		unsigned multi = usb_endpoint_maxp_mult(&urb->ep->desc);
1056
1057		stream->highspeed = 1;
1058
1059		buf1 |= maxp;
1060		maxp *= multi;
1061
1062		stream->buf0 = cpu_to_hc32(ehci, (epnum << 8) | dev->devnum);
1063		stream->buf1 = cpu_to_hc32(ehci, buf1);
1064		stream->buf2 = cpu_to_hc32(ehci, multi);
1065
1066		/* usbfs wants to report the average usecs per frame tied up
1067		 * when transfers on this endpoint are scheduled ...
1068		 */
1069		stream->ps.usecs = HS_USECS_ISO(maxp);
1070
1071		/* period for bandwidth allocation */
1072		tmp = min_t(unsigned, EHCI_BANDWIDTH_SIZE,
1073				1 << (urb->ep->desc.bInterval - 1));
1074
1075		/* Allow urb->interval to override */
1076		stream->ps.bw_uperiod = min_t(unsigned, tmp, urb->interval);
1077
1078		stream->uperiod = urb->interval;
1079		stream->ps.period = urb->interval >> 3;
1080		stream->bandwidth = stream->ps.usecs * 8 /
1081				stream->ps.bw_uperiod;
1082
1083	} else {
1084		u32		addr;
1085		int		think_time;
1086		int		hs_transfers;
1087
1088		addr = dev->ttport << 24;
1089		if (!ehci_is_TDI(ehci)
1090				|| (dev->tt->hub !=
1091					ehci_to_hcd(ehci)->self.root_hub))
1092			addr |= dev->tt->hub->devnum << 16;
1093		addr |= epnum << 8;
1094		addr |= dev->devnum;
1095		stream->ps.usecs = HS_USECS_ISO(maxp);
1096		think_time = dev->tt->think_time;
1097		stream->ps.tt_usecs = NS_TO_US(think_time + usb_calc_bus_time(
1098				dev->speed, is_input, 1, maxp));
1099		hs_transfers = max(1u, (maxp + 187) / 188);
1100		if (is_input) {
1101			u32	tmp;
1102
1103			addr |= 1 << 31;
1104			stream->ps.c_usecs = stream->ps.usecs;
1105			stream->ps.usecs = HS_USECS_ISO(1);
1106			stream->ps.cs_mask = 1;
1107
1108			/* c-mask as specified in USB 2.0 11.18.4 3.c */
1109			tmp = (1 << (hs_transfers + 2)) - 1;
1110			stream->ps.cs_mask |= tmp << (8 + 2);
1111		} else
1112			stream->ps.cs_mask = smask_out[hs_transfers - 1];
1113
1114		/* period for bandwidth allocation */
1115		tmp = min_t(unsigned, EHCI_BANDWIDTH_FRAMES,
1116				1 << (urb->ep->desc.bInterval - 1));
1117
1118		/* Allow urb->interval to override */
1119		stream->ps.bw_period = min_t(unsigned, tmp, urb->interval);
1120		stream->ps.bw_uperiod = stream->ps.bw_period << 3;
1121
1122		stream->ps.period = urb->interval;
1123		stream->uperiod = urb->interval << 3;
1124		stream->bandwidth = (stream->ps.usecs + stream->ps.c_usecs) /
1125				stream->ps.bw_period;
1126
1127		/* stream->splits gets created from cs_mask later */
1128		stream->address = cpu_to_hc32(ehci, addr);
1129	}
1130
1131	stream->ps.udev = dev;
1132	stream->ps.ep = urb->ep;
1133
1134	stream->bEndpointAddress = is_input | epnum;
1135	stream->maxp = maxp;
1136}
1137
1138static struct ehci_iso_stream *
1139iso_stream_find(struct ehci_hcd *ehci, struct urb *urb)
1140{
1141	unsigned		epnum;
1142	struct ehci_iso_stream	*stream;
1143	struct usb_host_endpoint *ep;
1144	unsigned long		flags;
1145
1146	epnum = usb_pipeendpoint (urb->pipe);
1147	if (usb_pipein(urb->pipe))
1148		ep = urb->dev->ep_in[epnum];
1149	else
1150		ep = urb->dev->ep_out[epnum];
1151
1152	spin_lock_irqsave(&ehci->lock, flags);
1153	stream = ep->hcpriv;
1154
1155	if (unlikely(stream == NULL)) {
1156		stream = iso_stream_alloc(GFP_ATOMIC);
1157		if (likely(stream != NULL)) {
1158			ep->hcpriv = stream;
1159			iso_stream_init(ehci, stream, urb);
1160		}
1161
1162	/* if dev->ep [epnum] is a QH, hw is set */
1163	} else if (unlikely(stream->hw != NULL)) {
1164		ehci_dbg(ehci, "dev %s ep%d%s, not iso??\n",
1165			urb->dev->devpath, epnum,
1166			usb_pipein(urb->pipe) ? "in" : "out");
1167		stream = NULL;
1168	}
1169
1170	spin_unlock_irqrestore(&ehci->lock, flags);
1171	return stream;
1172}
1173
1174/*-------------------------------------------------------------------------*/
1175
1176/* ehci_iso_sched ops can be ITD-only or SITD-only */
1177
1178static struct ehci_iso_sched *
1179iso_sched_alloc(unsigned packets, gfp_t mem_flags)
1180{
1181	struct ehci_iso_sched	*iso_sched;
1182	int			size = sizeof(*iso_sched);
1183
1184	size += packets * sizeof(struct ehci_iso_packet);
1185	iso_sched = kzalloc(size, mem_flags);
1186	if (likely(iso_sched != NULL))
1187		INIT_LIST_HEAD(&iso_sched->td_list);
1188
1189	return iso_sched;
1190}
1191
1192static inline void
1193itd_sched_init(
1194	struct ehci_hcd		*ehci,
1195	struct ehci_iso_sched	*iso_sched,
1196	struct ehci_iso_stream	*stream,
1197	struct urb		*urb
1198)
1199{
1200	unsigned	i;
1201	dma_addr_t	dma = urb->transfer_dma;
1202
1203	/* how many uframes are needed for these transfers */
1204	iso_sched->span = urb->number_of_packets * stream->uperiod;
1205
1206	/* figure out per-uframe itd fields that we'll need later
1207	 * when we fit new itds into the schedule.
1208	 */
1209	for (i = 0; i < urb->number_of_packets; i++) {
1210		struct ehci_iso_packet	*uframe = &iso_sched->packet[i];
1211		unsigned		length;
1212		dma_addr_t		buf;
1213		u32			trans;
1214
1215		length = urb->iso_frame_desc[i].length;
1216		buf = dma + urb->iso_frame_desc[i].offset;
1217
1218		trans = EHCI_ISOC_ACTIVE;
1219		trans |= buf & 0x0fff;
1220		if (unlikely(((i + 1) == urb->number_of_packets))
1221				&& !(urb->transfer_flags & URB_NO_INTERRUPT))
1222			trans |= EHCI_ITD_IOC;
1223		trans |= length << 16;
1224		uframe->transaction = cpu_to_hc32(ehci, trans);
1225
1226		/* might need to cross a buffer page within a uframe */
1227		uframe->bufp = (buf & ~(u64)0x0fff);
1228		buf += length;
1229		if (unlikely((uframe->bufp != (buf & ~(u64)0x0fff))))
1230			uframe->cross = 1;
1231	}
1232}
1233
1234static void
1235iso_sched_free(
1236	struct ehci_iso_stream	*stream,
1237	struct ehci_iso_sched	*iso_sched
1238)
1239{
1240	if (!iso_sched)
1241		return;
1242	/* caller must hold ehci->lock! */
1243	list_splice(&iso_sched->td_list, &stream->free_list);
1244	kfree(iso_sched);
1245}
1246
1247static int
1248itd_urb_transaction(
1249	struct ehci_iso_stream	*stream,
1250	struct ehci_hcd		*ehci,
1251	struct urb		*urb,
1252	gfp_t			mem_flags
1253)
1254{
1255	struct ehci_itd		*itd;
1256	dma_addr_t		itd_dma;
1257	int			i;
1258	unsigned		num_itds;
1259	struct ehci_iso_sched	*sched;
1260	unsigned long		flags;
1261
1262	sched = iso_sched_alloc(urb->number_of_packets, mem_flags);
1263	if (unlikely(sched == NULL))
1264		return -ENOMEM;
1265
1266	itd_sched_init(ehci, sched, stream, urb);
1267
1268	if (urb->interval < 8)
1269		num_itds = 1 + (sched->span + 7) / 8;
1270	else
1271		num_itds = urb->number_of_packets;
1272
1273	/* allocate/init ITDs */
1274	spin_lock_irqsave(&ehci->lock, flags);
1275	for (i = 0; i < num_itds; i++) {
1276
1277		/*
1278		 * Use iTDs from the free list, but not iTDs that may
1279		 * still be in use by the hardware.
1280		 */
1281		if (likely(!list_empty(&stream->free_list))) {
1282			itd = list_first_entry(&stream->free_list,
1283					struct ehci_itd, itd_list);
1284			if (itd->frame == ehci->now_frame)
1285				goto alloc_itd;
1286			list_del(&itd->itd_list);
1287			itd_dma = itd->itd_dma;
1288		} else {
1289 alloc_itd:
1290			spin_unlock_irqrestore(&ehci->lock, flags);
1291			itd = dma_pool_alloc(ehci->itd_pool, mem_flags,
1292					&itd_dma);
1293			spin_lock_irqsave(&ehci->lock, flags);
1294			if (!itd) {
1295				iso_sched_free(stream, sched);
1296				spin_unlock_irqrestore(&ehci->lock, flags);
1297				return -ENOMEM;
1298			}
1299		}
1300
1301		memset(itd, 0, sizeof(*itd));
1302		itd->itd_dma = itd_dma;
1303		itd->frame = NO_FRAME;
1304		list_add(&itd->itd_list, &sched->td_list);
1305	}
1306	spin_unlock_irqrestore(&ehci->lock, flags);
1307
1308	/* temporarily store schedule info in hcpriv */
1309	urb->hcpriv = sched;
1310	urb->error_count = 0;
1311	return 0;
1312}
1313
1314/*-------------------------------------------------------------------------*/
1315
1316static void reserve_release_iso_bandwidth(struct ehci_hcd *ehci,
1317		struct ehci_iso_stream *stream, int sign)
1318{
1319	unsigned		uframe;
1320	unsigned		i, j;
1321	unsigned		s_mask, c_mask, m;
1322	int			usecs = stream->ps.usecs;
1323	int			c_usecs = stream->ps.c_usecs;
1324	int			tt_usecs = stream->ps.tt_usecs;
1325	struct ehci_tt		*tt;
1326
1327	if (stream->ps.phase == NO_FRAME)	/* Bandwidth wasn't reserved */
1328		return;
1329	uframe = stream->ps.bw_phase << 3;
1330
1331	bandwidth_dbg(ehci, sign, "iso", &stream->ps);
1332
1333	if (sign < 0) {		/* Release bandwidth */
1334		usecs = -usecs;
1335		c_usecs = -c_usecs;
1336		tt_usecs = -tt_usecs;
1337	}
1338
1339	if (!stream->splits) {		/* High speed */
1340		for (i = uframe + stream->ps.phase_uf; i < EHCI_BANDWIDTH_SIZE;
1341				i += stream->ps.bw_uperiod)
1342			ehci->bandwidth[i] += usecs;
1343
1344	} else {			/* Full speed */
1345		s_mask = stream->ps.cs_mask;
1346		c_mask = s_mask >> 8;
1347
1348		/* NOTE: adjustment needed for frame overflow */
1349		for (i = uframe; i < EHCI_BANDWIDTH_SIZE;
1350				i += stream->ps.bw_uperiod) {
1351			for ((j = stream->ps.phase_uf, m = 1 << j); j < 8;
1352					(++j, m <<= 1)) {
1353				if (s_mask & m)
1354					ehci->bandwidth[i+j] += usecs;
1355				else if (c_mask & m)
1356					ehci->bandwidth[i+j] += c_usecs;
1357			}
1358		}
1359
1360		tt = find_tt(stream->ps.udev);
1361		if (sign > 0)
1362			list_add_tail(&stream->ps.ps_list, &tt->ps_list);
1363		else
1364			list_del(&stream->ps.ps_list);
1365
1366		for (i = uframe >> 3; i < EHCI_BANDWIDTH_FRAMES;
1367				i += stream->ps.bw_period)
1368			tt->bandwidth[i] += tt_usecs;
1369	}
1370}
1371
1372static inline int
1373itd_slot_ok(
1374	struct ehci_hcd		*ehci,
1375	struct ehci_iso_stream	*stream,
1376	unsigned		uframe
1377)
1378{
1379	unsigned		usecs;
1380
1381	/* convert "usecs we need" to "max already claimed" */
1382	usecs = ehci->uframe_periodic_max - stream->ps.usecs;
1383
1384	for (uframe &= stream->ps.bw_uperiod - 1; uframe < EHCI_BANDWIDTH_SIZE;
1385			uframe += stream->ps.bw_uperiod) {
1386		if (ehci->bandwidth[uframe] > usecs)
1387			return 0;
1388	}
1389	return 1;
1390}
1391
1392static inline int
1393sitd_slot_ok(
1394	struct ehci_hcd		*ehci,
1395	struct ehci_iso_stream	*stream,
1396	unsigned		uframe,
1397	struct ehci_iso_sched	*sched,
1398	struct ehci_tt		*tt
1399)
1400{
1401	unsigned		mask, tmp;
1402	unsigned		frame, uf;
1403
1404	mask = stream->ps.cs_mask << (uframe & 7);
1405
1406	/* for OUT, don't wrap SSPLIT into H-microframe 7 */
1407	if (((stream->ps.cs_mask & 0xff) << (uframe & 7)) >= (1 << 7))
1408		return 0;
1409
1410	/* for IN, don't wrap CSPLIT into the next frame */
1411	if (mask & ~0xffff)
1412		return 0;
1413
1414	/* check bandwidth */
1415	uframe &= stream->ps.bw_uperiod - 1;
1416	frame = uframe >> 3;
1417
1418#ifdef CONFIG_USB_EHCI_TT_NEWSCHED
1419	/* The tt's fullspeed bus bandwidth must be available.
1420	 * tt_available scheduling guarantees 10+% for control/bulk.
1421	 */
1422	uf = uframe & 7;
1423	if (!tt_available(ehci, &stream->ps, tt, frame, uf))
1424		return 0;
1425#else
1426	/* tt must be idle for start(s), any gap, and csplit.
1427	 * assume scheduling slop leaves 10+% for control/bulk.
1428	 */
1429	if (!tt_no_collision(ehci, stream->ps.bw_period,
1430			stream->ps.udev, frame, mask))
1431		return 0;
1432#endif
1433
1434	do {
1435		unsigned	max_used;
1436		unsigned	i;
1437
1438		/* check starts (OUT uses more than one) */
1439		uf = uframe;
1440		max_used = ehci->uframe_periodic_max - stream->ps.usecs;
1441		for (tmp = stream->ps.cs_mask & 0xff; tmp; tmp >>= 1, uf++) {
1442			if (ehci->bandwidth[uf] > max_used)
1443				return 0;
1444		}
1445
1446		/* for IN, check CSPLIT */
1447		if (stream->ps.c_usecs) {
1448			max_used = ehci->uframe_periodic_max -
1449					stream->ps.c_usecs;
1450			uf = uframe & ~7;
1451			tmp = 1 << (2+8);
1452			for (i = (uframe & 7) + 2; i < 8; (++i, tmp <<= 1)) {
1453				if ((stream->ps.cs_mask & tmp) == 0)
1454					continue;
1455				if (ehci->bandwidth[uf+i] > max_used)
1456					return 0;
1457			}
1458		}
1459
1460		uframe += stream->ps.bw_uperiod;
1461	} while (uframe < EHCI_BANDWIDTH_SIZE);
1462
1463	stream->ps.cs_mask <<= uframe & 7;
1464	stream->splits = cpu_to_hc32(ehci, stream->ps.cs_mask);
1465	return 1;
1466}
1467
1468/*
1469 * This scheduler plans almost as far into the future as it has actual
1470 * periodic schedule slots.  (Affected by TUNE_FLS, which defaults to
1471 * "as small as possible" to be cache-friendlier.)  That limits the size
1472 * transfers you can stream reliably; avoid more than 64 msec per urb.
1473 * Also avoid queue depths of less than ehci's worst irq latency (affected
1474 * by the per-urb URB_NO_INTERRUPT hint, the log2_irq_thresh module parameter,
1475 * and other factors); or more than about 230 msec total (for portability,
1476 * given EHCI_TUNE_FLS and the slop).  Or, write a smarter scheduler!
1477 */
1478
1479static int
1480iso_stream_schedule(
1481	struct ehci_hcd		*ehci,
1482	struct urb		*urb,
1483	struct ehci_iso_stream	*stream
1484)
1485{
1486	u32			now, base, next, start, period, span, now2;
1487	u32			wrap = 0, skip = 0;
1488	int			status = 0;
1489	unsigned		mod = ehci->periodic_size << 3;
1490	struct ehci_iso_sched	*sched = urb->hcpriv;
1491	bool			empty = list_empty(&stream->td_list);
1492	bool			new_stream = false;
1493
1494	period = stream->uperiod;
1495	span = sched->span;
1496	if (!stream->highspeed)
1497		span <<= 3;
1498
1499	/* Start a new isochronous stream? */
1500	if (unlikely(empty && !hcd_periodic_completion_in_progress(
1501			ehci_to_hcd(ehci), urb->ep))) {
1502
1503		/* Schedule the endpoint */
1504		if (stream->ps.phase == NO_FRAME) {
1505			int		done = 0;
1506			struct ehci_tt	*tt = find_tt(stream->ps.udev);
1507
1508			if (IS_ERR(tt)) {
1509				status = PTR_ERR(tt);
1510				goto fail;
1511			}
1512			compute_tt_budget(ehci->tt_budget, tt);
1513
1514			start = ((-(++ehci->random_frame)) << 3) & (period - 1);
1515
1516			/* find a uframe slot with enough bandwidth.
1517			 * Early uframes are more precious because full-speed
1518			 * iso IN transfers can't use late uframes,
1519			 * and therefore they should be allocated last.
1520			 */
1521			next = start;
1522			start += period;
1523			do {
1524				start--;
1525				/* check schedule: enough space? */
1526				if (stream->highspeed) {
1527					if (itd_slot_ok(ehci, stream, start))
1528						done = 1;
1529				} else {
1530					if ((start % 8) >= 6)
1531						continue;
1532					if (sitd_slot_ok(ehci, stream, start,
1533							sched, tt))
1534						done = 1;
1535				}
1536			} while (start > next && !done);
1537
1538			/* no room in the schedule */
1539			if (!done) {
1540				ehci_dbg(ehci, "iso sched full %p", urb);
1541				status = -ENOSPC;
1542				goto fail;
1543			}
1544			stream->ps.phase = (start >> 3) &
1545					(stream->ps.period - 1);
1546			stream->ps.bw_phase = stream->ps.phase &
1547					(stream->ps.bw_period - 1);
1548			stream->ps.phase_uf = start & 7;
1549			reserve_release_iso_bandwidth(ehci, stream, 1);
1550		}
1551
1552		/* New stream is already scheduled; use the upcoming slot */
1553		else {
1554			start = (stream->ps.phase << 3) + stream->ps.phase_uf;
1555		}
1556
1557		stream->next_uframe = start;
1558		new_stream = true;
1559	}
1560
1561	now = ehci_read_frame_index(ehci) & (mod - 1);
1562
1563	/* Take the isochronous scheduling threshold into account */
1564	if (ehci->i_thresh)
1565		next = now + ehci->i_thresh;	/* uframe cache */
1566	else
1567		next = (now + 2 + 7) & ~0x07;	/* full frame cache */
1568
1569	/* If needed, initialize last_iso_frame so that this URB will be seen */
1570	if (ehci->isoc_count == 0)
1571		ehci->last_iso_frame = now >> 3;
1572
1573	/*
1574	 * Use ehci->last_iso_frame as the base.  There can't be any
1575	 * TDs scheduled for earlier than that.
1576	 */
1577	base = ehci->last_iso_frame << 3;
1578	next = (next - base) & (mod - 1);
1579	start = (stream->next_uframe - base) & (mod - 1);
1580
1581	if (unlikely(new_stream))
1582		goto do_ASAP;
1583
1584	/*
1585	 * Typical case: reuse current schedule, stream may still be active.
1586	 * Hopefully there are no gaps from the host falling behind
1587	 * (irq delays etc).  If there are, the behavior depends on
1588	 * whether URB_ISO_ASAP is set.
1589	 */
1590	now2 = (now - base) & (mod - 1);
1591
1592	/* Is the schedule about to wrap around? */
1593	if (unlikely(!empty && start < period)) {
1594		ehci_dbg(ehci, "request %p would overflow (%u-%u < %u mod %u)\n",
1595				urb, stream->next_uframe, base, period, mod);
1596		status = -EFBIG;
1597		goto fail;
1598	}
1599
1600	/* Is the next packet scheduled after the base time? */
1601	if (likely(!empty || start <= now2 + period)) {
1602
1603		/* URB_ISO_ASAP: make sure that start >= next */
1604		if (unlikely(start < next &&
1605				(urb->transfer_flags & URB_ISO_ASAP)))
1606			goto do_ASAP;
1607
1608		/* Otherwise use start, if it's not in the past */
1609		if (likely(start >= now2))
1610			goto use_start;
1611
1612	/* Otherwise we got an underrun while the queue was empty */
1613	} else {
1614		if (urb->transfer_flags & URB_ISO_ASAP)
1615			goto do_ASAP;
1616		wrap = mod;
1617		now2 += mod;
1618	}
1619
1620	/* How many uframes and packets do we need to skip? */
1621	skip = (now2 - start + period - 1) & -period;
1622	if (skip >= span) {		/* Entirely in the past? */
1623		ehci_dbg(ehci, "iso underrun %p (%u+%u < %u) [%u]\n",
1624				urb, start + base, span - period, now2 + base,
1625				base);
1626
1627		/* Try to keep the last TD intact for scanning later */
1628		skip = span - period;
1629
1630		/* Will it come before the current scan position? */
1631		if (empty) {
1632			skip = span;	/* Skip the entire URB */
1633			status = 1;	/* and give it back immediately */
1634			iso_sched_free(stream, sched);
1635			sched = NULL;
1636		}
1637	}
1638	urb->error_count = skip / period;
1639	if (sched)
1640		sched->first_packet = urb->error_count;
1641	goto use_start;
1642
1643 do_ASAP:
1644	/* Use the first slot after "next" */
1645	start = next + ((start - next) & (period - 1));
1646
1647 use_start:
1648	/* Tried to schedule too far into the future? */
1649	if (unlikely(start + span - period >= mod + wrap)) {
1650		ehci_dbg(ehci, "request %p would overflow (%u+%u >= %u)\n",
1651				urb, start, span - period, mod + wrap);
1652		status = -EFBIG;
1653		goto fail;
1654	}
1655
1656	start += base;
1657	stream->next_uframe = (start + skip) & (mod - 1);
1658
1659	/* report high speed start in uframes; full speed, in frames */
1660	urb->start_frame = start & (mod - 1);
1661	if (!stream->highspeed)
1662		urb->start_frame >>= 3;
1663	return status;
1664
1665 fail:
1666	iso_sched_free(stream, sched);
1667	urb->hcpriv = NULL;
1668	return status;
1669}
1670
1671/*-------------------------------------------------------------------------*/
1672
1673static inline void
1674itd_init(struct ehci_hcd *ehci, struct ehci_iso_stream *stream,
1675		struct ehci_itd *itd)
1676{
1677	int i;
1678
1679	/* it's been recently zeroed */
1680	itd->hw_next = EHCI_LIST_END(ehci);
1681	itd->hw_bufp[0] = stream->buf0;
1682	itd->hw_bufp[1] = stream->buf1;
1683	itd->hw_bufp[2] = stream->buf2;
1684
1685	for (i = 0; i < 8; i++)
1686		itd->index[i] = -1;
1687
1688	/* All other fields are filled when scheduling */
1689}
1690
1691static inline void
1692itd_patch(
1693	struct ehci_hcd		*ehci,
1694	struct ehci_itd		*itd,
1695	struct ehci_iso_sched	*iso_sched,
1696	unsigned		index,
1697	u16			uframe
1698)
1699{
1700	struct ehci_iso_packet	*uf = &iso_sched->packet[index];
1701	unsigned		pg = itd->pg;
1702
1703	/* BUG_ON(pg == 6 && uf->cross); */
1704
1705	uframe &= 0x07;
1706	itd->index[uframe] = index;
1707
1708	itd->hw_transaction[uframe] = uf->transaction;
1709	itd->hw_transaction[uframe] |= cpu_to_hc32(ehci, pg << 12);
1710	itd->hw_bufp[pg] |= cpu_to_hc32(ehci, uf->bufp & ~(u32)0);
1711	itd->hw_bufp_hi[pg] |= cpu_to_hc32(ehci, (u32)(uf->bufp >> 32));
1712
1713	/* iso_frame_desc[].offset must be strictly increasing */
1714	if (unlikely(uf->cross)) {
1715		u64	bufp = uf->bufp + 4096;
1716
1717		itd->pg = ++pg;
1718		itd->hw_bufp[pg] |= cpu_to_hc32(ehci, bufp & ~(u32)0);
1719		itd->hw_bufp_hi[pg] |= cpu_to_hc32(ehci, (u32)(bufp >> 32));
1720	}
1721}
1722
1723static inline void
1724itd_link(struct ehci_hcd *ehci, unsigned frame, struct ehci_itd *itd)
1725{
1726	union ehci_shadow	*prev = &ehci->pshadow[frame];
1727	__hc32			*hw_p = &ehci->periodic[frame];
1728	union ehci_shadow	here = *prev;
1729	__hc32			type = 0;
1730
1731	/* skip any iso nodes which might belong to previous microframes */
1732	while (here.ptr) {
1733		type = Q_NEXT_TYPE(ehci, *hw_p);
1734		if (type == cpu_to_hc32(ehci, Q_TYPE_QH))
1735			break;
1736		prev = periodic_next_shadow(ehci, prev, type);
1737		hw_p = shadow_next_periodic(ehci, &here, type);
1738		here = *prev;
1739	}
1740
1741	itd->itd_next = here;
1742	itd->hw_next = *hw_p;
1743	prev->itd = itd;
1744	itd->frame = frame;
1745	wmb();
1746	*hw_p = cpu_to_hc32(ehci, itd->itd_dma | Q_TYPE_ITD);
1747}
1748
1749/* fit urb's itds into the selected schedule slot; activate as needed */
1750static void itd_link_urb(
1751	struct ehci_hcd		*ehci,
1752	struct urb		*urb,
1753	unsigned		mod,
1754	struct ehci_iso_stream	*stream
1755)
1756{
1757	int			packet;
1758	unsigned		next_uframe, uframe, frame;
1759	struct ehci_iso_sched	*iso_sched = urb->hcpriv;
1760	struct ehci_itd		*itd;
1761
1762	next_uframe = stream->next_uframe & (mod - 1);
1763
1764	if (unlikely(list_empty(&stream->td_list)))
1765		ehci_to_hcd(ehci)->self.bandwidth_allocated
1766				+= stream->bandwidth;
1767
1768	if (ehci_to_hcd(ehci)->self.bandwidth_isoc_reqs == 0) {
1769		if (ehci->amd_pll_fix == 1)
1770			usb_amd_quirk_pll_disable();
1771	}
1772
1773	ehci_to_hcd(ehci)->self.bandwidth_isoc_reqs++;
1774
1775	/* fill iTDs uframe by uframe */
1776	for (packet = iso_sched->first_packet, itd = NULL;
1777			packet < urb->number_of_packets;) {
1778		if (itd == NULL) {
1779			/* ASSERT:  we have all necessary itds */
1780			/* BUG_ON(list_empty(&iso_sched->td_list)); */
1781
1782			/* ASSERT:  no itds for this endpoint in this uframe */
1783
1784			itd = list_entry(iso_sched->td_list.next,
1785					struct ehci_itd, itd_list);
1786			list_move_tail(&itd->itd_list, &stream->td_list);
1787			itd->stream = stream;
1788			itd->urb = urb;
1789			itd_init(ehci, stream, itd);
1790		}
1791
1792		uframe = next_uframe & 0x07;
1793		frame = next_uframe >> 3;
1794
1795		itd_patch(ehci, itd, iso_sched, packet, uframe);
1796
1797		next_uframe += stream->uperiod;
1798		next_uframe &= mod - 1;
1799		packet++;
1800
1801		/* link completed itds into the schedule */
1802		if (((next_uframe >> 3) != frame)
1803				|| packet == urb->number_of_packets) {
1804			itd_link(ehci, frame & (ehci->periodic_size - 1), itd);
1805			itd = NULL;
1806		}
1807	}
1808	stream->next_uframe = next_uframe;
1809
1810	/* don't need that schedule data any more */
1811	iso_sched_free(stream, iso_sched);
1812	urb->hcpriv = stream;
1813
1814	++ehci->isoc_count;
1815	enable_periodic(ehci);
1816}
1817
1818#define	ISO_ERRS (EHCI_ISOC_BUF_ERR | EHCI_ISOC_BABBLE | EHCI_ISOC_XACTERR)
1819
1820/* Process and recycle a completed ITD.  Return true iff its urb completed,
1821 * and hence its completion callback probably added things to the hardware
1822 * schedule.
1823 *
1824 * Note that we carefully avoid recycling this descriptor until after any
1825 * completion callback runs, so that it won't be reused quickly.  That is,
1826 * assuming (a) no more than two urbs per frame on this endpoint, and also
1827 * (b) only this endpoint's completions submit URBs.  It seems some silicon
1828 * corrupts things if you reuse completed descriptors very quickly...
1829 */
1830static bool itd_complete(struct ehci_hcd *ehci, struct ehci_itd *itd)
1831{
1832	struct urb				*urb = itd->urb;
1833	struct usb_iso_packet_descriptor	*desc;
1834	u32					t;
1835	unsigned				uframe;
1836	int					urb_index = -1;
1837	struct ehci_iso_stream			*stream = itd->stream;
 
1838	bool					retval = false;
1839
1840	/* for each uframe with a packet */
1841	for (uframe = 0; uframe < 8; uframe++) {
1842		if (likely(itd->index[uframe] == -1))
1843			continue;
1844		urb_index = itd->index[uframe];
1845		desc = &urb->iso_frame_desc[urb_index];
1846
1847		t = hc32_to_cpup(ehci, &itd->hw_transaction[uframe]);
1848		itd->hw_transaction[uframe] = 0;
1849
1850		/* report transfer status */
1851		if (unlikely(t & ISO_ERRS)) {
1852			urb->error_count++;
1853			if (t & EHCI_ISOC_BUF_ERR)
1854				desc->status = usb_pipein(urb->pipe)
1855					? -ENOSR  /* hc couldn't read */
1856					: -ECOMM; /* hc couldn't write */
1857			else if (t & EHCI_ISOC_BABBLE)
1858				desc->status = -EOVERFLOW;
1859			else /* (t & EHCI_ISOC_XACTERR) */
1860				desc->status = -EPROTO;
1861
1862			/* HC need not update length with this error */
1863			if (!(t & EHCI_ISOC_BABBLE)) {
1864				desc->actual_length = EHCI_ITD_LENGTH(t);
1865				urb->actual_length += desc->actual_length;
1866			}
1867		} else if (likely((t & EHCI_ISOC_ACTIVE) == 0)) {
1868			desc->status = 0;
1869			desc->actual_length = EHCI_ITD_LENGTH(t);
1870			urb->actual_length += desc->actual_length;
1871		} else {
1872			/* URB was too late */
1873			urb->error_count++;
1874		}
1875	}
1876
1877	/* handle completion now? */
1878	if (likely((urb_index + 1) != urb->number_of_packets))
1879		goto done;
1880
1881	/*
1882	 * ASSERT: it's really the last itd for this urb
1883	 * list_for_each_entry (itd, &stream->td_list, itd_list)
1884	 *	 BUG_ON(itd->urb == urb);
1885	 */
1886
1887	/* give urb back to the driver; completion often (re)submits */
 
1888	ehci_urb_done(ehci, urb, 0);
1889	retval = true;
1890	urb = NULL;
1891
1892	--ehci->isoc_count;
1893	disable_periodic(ehci);
1894
1895	ehci_to_hcd(ehci)->self.bandwidth_isoc_reqs--;
1896	if (ehci_to_hcd(ehci)->self.bandwidth_isoc_reqs == 0) {
1897		if (ehci->amd_pll_fix == 1)
1898			usb_amd_quirk_pll_enable();
1899	}
1900
1901	if (unlikely(list_is_singular(&stream->td_list)))
1902		ehci_to_hcd(ehci)->self.bandwidth_allocated
1903				-= stream->bandwidth;
1904
1905done:
1906	itd->urb = NULL;
1907
1908	/* Add to the end of the free list for later reuse */
1909	list_move_tail(&itd->itd_list, &stream->free_list);
1910
1911	/* Recycle the iTDs when the pipeline is empty (ep no longer in use) */
1912	if (list_empty(&stream->td_list)) {
1913		list_splice_tail_init(&stream->free_list,
1914				&ehci->cached_itd_list);
1915		start_free_itds(ehci);
1916	}
1917
1918	return retval;
1919}
1920
1921/*-------------------------------------------------------------------------*/
1922
1923static int itd_submit(struct ehci_hcd *ehci, struct urb *urb,
1924	gfp_t mem_flags)
1925{
1926	int			status = -EINVAL;
1927	unsigned long		flags;
1928	struct ehci_iso_stream	*stream;
1929
1930	/* Get iso_stream head */
1931	stream = iso_stream_find(ehci, urb);
1932	if (unlikely(stream == NULL)) {
1933		ehci_dbg(ehci, "can't get iso stream\n");
1934		return -ENOMEM;
1935	}
1936	if (unlikely(urb->interval != stream->uperiod)) {
1937		ehci_dbg(ehci, "can't change iso interval %d --> %d\n",
1938			stream->uperiod, urb->interval);
1939		goto done;
1940	}
1941
1942#ifdef EHCI_URB_TRACE
1943	ehci_dbg(ehci,
1944		"%s %s urb %p ep%d%s len %d, %d pkts %d uframes [%p]\n",
1945		__func__, urb->dev->devpath, urb,
1946		usb_pipeendpoint(urb->pipe),
1947		usb_pipein(urb->pipe) ? "in" : "out",
1948		urb->transfer_buffer_length,
1949		urb->number_of_packets, urb->interval,
1950		stream);
1951#endif
1952
1953	/* allocate ITDs w/o locking anything */
1954	status = itd_urb_transaction(stream, ehci, urb, mem_flags);
1955	if (unlikely(status < 0)) {
1956		ehci_dbg(ehci, "can't init itds\n");
1957		goto done;
1958	}
1959
1960	/* schedule ... need to lock */
1961	spin_lock_irqsave(&ehci->lock, flags);
1962	if (unlikely(!HCD_HW_ACCESSIBLE(ehci_to_hcd(ehci)))) {
1963		status = -ESHUTDOWN;
1964		goto done_not_linked;
1965	}
1966	status = usb_hcd_link_urb_to_ep(ehci_to_hcd(ehci), urb);
1967	if (unlikely(status))
1968		goto done_not_linked;
1969	status = iso_stream_schedule(ehci, urb, stream);
1970	if (likely(status == 0)) {
1971		itd_link_urb(ehci, urb, ehci->periodic_size << 3, stream);
1972	} else if (status > 0) {
1973		status = 0;
1974		ehci_urb_done(ehci, urb, 0);
1975	} else {
1976		usb_hcd_unlink_urb_from_ep(ehci_to_hcd(ehci), urb);
1977	}
1978 done_not_linked:
1979	spin_unlock_irqrestore(&ehci->lock, flags);
1980 done:
1981	return status;
1982}
1983
1984/*-------------------------------------------------------------------------*/
1985
1986/*
1987 * "Split ISO TDs" ... used for USB 1.1 devices going through the
1988 * TTs in USB 2.0 hubs.  These need microframe scheduling.
1989 */
1990
1991static inline void
1992sitd_sched_init(
1993	struct ehci_hcd		*ehci,
1994	struct ehci_iso_sched	*iso_sched,
1995	struct ehci_iso_stream	*stream,
1996	struct urb		*urb
1997)
1998{
1999	unsigned	i;
2000	dma_addr_t	dma = urb->transfer_dma;
2001
2002	/* how many frames are needed for these transfers */
2003	iso_sched->span = urb->number_of_packets * stream->ps.period;
2004
2005	/* figure out per-frame sitd fields that we'll need later
2006	 * when we fit new sitds into the schedule.
2007	 */
2008	for (i = 0; i < urb->number_of_packets; i++) {
2009		struct ehci_iso_packet	*packet = &iso_sched->packet[i];
2010		unsigned		length;
2011		dma_addr_t		buf;
2012		u32			trans;
2013
2014		length = urb->iso_frame_desc[i].length & 0x03ff;
2015		buf = dma + urb->iso_frame_desc[i].offset;
2016
2017		trans = SITD_STS_ACTIVE;
2018		if (((i + 1) == urb->number_of_packets)
2019				&& !(urb->transfer_flags & URB_NO_INTERRUPT))
2020			trans |= SITD_IOC;
2021		trans |= length << 16;
2022		packet->transaction = cpu_to_hc32(ehci, trans);
2023
2024		/* might need to cross a buffer page within a td */
2025		packet->bufp = buf;
2026		packet->buf1 = (buf + length) & ~0x0fff;
2027		if (packet->buf1 != (buf & ~(u64)0x0fff))
2028			packet->cross = 1;
2029
2030		/* OUT uses multiple start-splits */
2031		if (stream->bEndpointAddress & USB_DIR_IN)
2032			continue;
2033		length = (length + 187) / 188;
2034		if (length > 1) /* BEGIN vs ALL */
2035			length |= 1 << 3;
2036		packet->buf1 |= length;
2037	}
2038}
2039
2040static int
2041sitd_urb_transaction(
2042	struct ehci_iso_stream	*stream,
2043	struct ehci_hcd		*ehci,
2044	struct urb		*urb,
2045	gfp_t			mem_flags
2046)
2047{
2048	struct ehci_sitd	*sitd;
2049	dma_addr_t		sitd_dma;
2050	int			i;
2051	struct ehci_iso_sched	*iso_sched;
2052	unsigned long		flags;
2053
2054	iso_sched = iso_sched_alloc(urb->number_of_packets, mem_flags);
2055	if (iso_sched == NULL)
2056		return -ENOMEM;
2057
2058	sitd_sched_init(ehci, iso_sched, stream, urb);
2059
2060	/* allocate/init sITDs */
2061	spin_lock_irqsave(&ehci->lock, flags);
2062	for (i = 0; i < urb->number_of_packets; i++) {
2063
2064		/* NOTE:  for now, we don't try to handle wraparound cases
2065		 * for IN (using sitd->hw_backpointer, like a FSTN), which
2066		 * means we never need two sitds for full speed packets.
2067		 */
2068
2069		/*
2070		 * Use siTDs from the free list, but not siTDs that may
2071		 * still be in use by the hardware.
2072		 */
2073		if (likely(!list_empty(&stream->free_list))) {
2074			sitd = list_first_entry(&stream->free_list,
2075					 struct ehci_sitd, sitd_list);
2076			if (sitd->frame == ehci->now_frame)
2077				goto alloc_sitd;
2078			list_del(&sitd->sitd_list);
2079			sitd_dma = sitd->sitd_dma;
2080		} else {
2081 alloc_sitd:
2082			spin_unlock_irqrestore(&ehci->lock, flags);
2083			sitd = dma_pool_alloc(ehci->sitd_pool, mem_flags,
2084					&sitd_dma);
2085			spin_lock_irqsave(&ehci->lock, flags);
2086			if (!sitd) {
2087				iso_sched_free(stream, iso_sched);
2088				spin_unlock_irqrestore(&ehci->lock, flags);
2089				return -ENOMEM;
2090			}
2091		}
2092
2093		memset(sitd, 0, sizeof(*sitd));
2094		sitd->sitd_dma = sitd_dma;
2095		sitd->frame = NO_FRAME;
2096		list_add(&sitd->sitd_list, &iso_sched->td_list);
2097	}
2098
2099	/* temporarily store schedule info in hcpriv */
2100	urb->hcpriv = iso_sched;
2101	urb->error_count = 0;
2102
2103	spin_unlock_irqrestore(&ehci->lock, flags);
2104	return 0;
2105}
2106
2107/*-------------------------------------------------------------------------*/
2108
2109static inline void
2110sitd_patch(
2111	struct ehci_hcd		*ehci,
2112	struct ehci_iso_stream	*stream,
2113	struct ehci_sitd	*sitd,
2114	struct ehci_iso_sched	*iso_sched,
2115	unsigned		index
2116)
2117{
2118	struct ehci_iso_packet	*uf = &iso_sched->packet[index];
2119	u64			bufp;
2120
2121	sitd->hw_next = EHCI_LIST_END(ehci);
2122	sitd->hw_fullspeed_ep = stream->address;
2123	sitd->hw_uframe = stream->splits;
2124	sitd->hw_results = uf->transaction;
2125	sitd->hw_backpointer = EHCI_LIST_END(ehci);
2126
2127	bufp = uf->bufp;
2128	sitd->hw_buf[0] = cpu_to_hc32(ehci, bufp);
2129	sitd->hw_buf_hi[0] = cpu_to_hc32(ehci, bufp >> 32);
2130
2131	sitd->hw_buf[1] = cpu_to_hc32(ehci, uf->buf1);
2132	if (uf->cross)
2133		bufp += 4096;
2134	sitd->hw_buf_hi[1] = cpu_to_hc32(ehci, bufp >> 32);
2135	sitd->index = index;
2136}
2137
2138static inline void
2139sitd_link(struct ehci_hcd *ehci, unsigned frame, struct ehci_sitd *sitd)
2140{
2141	/* note: sitd ordering could matter (CSPLIT then SSPLIT) */
2142	sitd->sitd_next = ehci->pshadow[frame];
2143	sitd->hw_next = ehci->periodic[frame];
2144	ehci->pshadow[frame].sitd = sitd;
2145	sitd->frame = frame;
2146	wmb();
2147	ehci->periodic[frame] = cpu_to_hc32(ehci, sitd->sitd_dma | Q_TYPE_SITD);
2148}
2149
2150/* fit urb's sitds into the selected schedule slot; activate as needed */
2151static void sitd_link_urb(
2152	struct ehci_hcd		*ehci,
2153	struct urb		*urb,
2154	unsigned		mod,
2155	struct ehci_iso_stream	*stream
2156)
2157{
2158	int			packet;
2159	unsigned		next_uframe;
2160	struct ehci_iso_sched	*sched = urb->hcpriv;
2161	struct ehci_sitd	*sitd;
2162
2163	next_uframe = stream->next_uframe;
2164
2165	if (list_empty(&stream->td_list))
2166		/* usbfs ignores TT bandwidth */
2167		ehci_to_hcd(ehci)->self.bandwidth_allocated
2168				+= stream->bandwidth;
2169
2170	if (ehci_to_hcd(ehci)->self.bandwidth_isoc_reqs == 0) {
2171		if (ehci->amd_pll_fix == 1)
2172			usb_amd_quirk_pll_disable();
2173	}
2174
2175	ehci_to_hcd(ehci)->self.bandwidth_isoc_reqs++;
2176
2177	/* fill sITDs frame by frame */
2178	for (packet = sched->first_packet, sitd = NULL;
2179			packet < urb->number_of_packets;
2180			packet++) {
2181
2182		/* ASSERT:  we have all necessary sitds */
2183		BUG_ON(list_empty(&sched->td_list));
2184
2185		/* ASSERT:  no itds for this endpoint in this frame */
2186
2187		sitd = list_entry(sched->td_list.next,
2188				struct ehci_sitd, sitd_list);
2189		list_move_tail(&sitd->sitd_list, &stream->td_list);
2190		sitd->stream = stream;
2191		sitd->urb = urb;
2192
2193		sitd_patch(ehci, stream, sitd, sched, packet);
2194		sitd_link(ehci, (next_uframe >> 3) & (ehci->periodic_size - 1),
2195				sitd);
2196
2197		next_uframe += stream->uperiod;
2198	}
2199	stream->next_uframe = next_uframe & (mod - 1);
2200
2201	/* don't need that schedule data any more */
2202	iso_sched_free(stream, sched);
2203	urb->hcpriv = stream;
2204
2205	++ehci->isoc_count;
2206	enable_periodic(ehci);
2207}
2208
2209/*-------------------------------------------------------------------------*/
2210
2211#define	SITD_ERRS (SITD_STS_ERR | SITD_STS_DBE | SITD_STS_BABBLE \
2212				| SITD_STS_XACT | SITD_STS_MMF)
2213
2214/* Process and recycle a completed SITD.  Return true iff its urb completed,
2215 * and hence its completion callback probably added things to the hardware
2216 * schedule.
2217 *
2218 * Note that we carefully avoid recycling this descriptor until after any
2219 * completion callback runs, so that it won't be reused quickly.  That is,
2220 * assuming (a) no more than two urbs per frame on this endpoint, and also
2221 * (b) only this endpoint's completions submit URBs.  It seems some silicon
2222 * corrupts things if you reuse completed descriptors very quickly...
2223 */
2224static bool sitd_complete(struct ehci_hcd *ehci, struct ehci_sitd *sitd)
2225{
2226	struct urb				*urb = sitd->urb;
2227	struct usb_iso_packet_descriptor	*desc;
2228	u32					t;
2229	int					urb_index;
2230	struct ehci_iso_stream			*stream = sitd->stream;
 
2231	bool					retval = false;
2232
2233	urb_index = sitd->index;
2234	desc = &urb->iso_frame_desc[urb_index];
2235	t = hc32_to_cpup(ehci, &sitd->hw_results);
2236
2237	/* report transfer status */
2238	if (unlikely(t & SITD_ERRS)) {
2239		urb->error_count++;
2240		if (t & SITD_STS_DBE)
2241			desc->status = usb_pipein(urb->pipe)
2242				? -ENOSR  /* hc couldn't read */
2243				: -ECOMM; /* hc couldn't write */
2244		else if (t & SITD_STS_BABBLE)
2245			desc->status = -EOVERFLOW;
2246		else /* XACT, MMF, etc */
2247			desc->status = -EPROTO;
2248	} else if (unlikely(t & SITD_STS_ACTIVE)) {
2249		/* URB was too late */
2250		urb->error_count++;
2251	} else {
2252		desc->status = 0;
2253		desc->actual_length = desc->length - SITD_LENGTH(t);
2254		urb->actual_length += desc->actual_length;
2255	}
2256
2257	/* handle completion now? */
2258	if ((urb_index + 1) != urb->number_of_packets)
2259		goto done;
2260
2261	/*
2262	 * ASSERT: it's really the last sitd for this urb
2263	 * list_for_each_entry (sitd, &stream->td_list, sitd_list)
2264	 *	 BUG_ON(sitd->urb == urb);
2265	 */
2266
2267	/* give urb back to the driver; completion often (re)submits */
 
2268	ehci_urb_done(ehci, urb, 0);
2269	retval = true;
2270	urb = NULL;
2271
2272	--ehci->isoc_count;
2273	disable_periodic(ehci);
2274
2275	ehci_to_hcd(ehci)->self.bandwidth_isoc_reqs--;
2276	if (ehci_to_hcd(ehci)->self.bandwidth_isoc_reqs == 0) {
2277		if (ehci->amd_pll_fix == 1)
2278			usb_amd_quirk_pll_enable();
2279	}
2280
2281	if (list_is_singular(&stream->td_list))
2282		ehci_to_hcd(ehci)->self.bandwidth_allocated
2283				-= stream->bandwidth;
2284
2285done:
2286	sitd->urb = NULL;
2287
2288	/* Add to the end of the free list for later reuse */
2289	list_move_tail(&sitd->sitd_list, &stream->free_list);
2290
2291	/* Recycle the siTDs when the pipeline is empty (ep no longer in use) */
2292	if (list_empty(&stream->td_list)) {
2293		list_splice_tail_init(&stream->free_list,
2294				&ehci->cached_sitd_list);
2295		start_free_itds(ehci);
2296	}
2297
2298	return retval;
2299}
2300
2301
2302static int sitd_submit(struct ehci_hcd *ehci, struct urb *urb,
2303	gfp_t mem_flags)
2304{
2305	int			status = -EINVAL;
2306	unsigned long		flags;
2307	struct ehci_iso_stream	*stream;
2308
2309	/* Get iso_stream head */
2310	stream = iso_stream_find(ehci, urb);
2311	if (stream == NULL) {
2312		ehci_dbg(ehci, "can't get iso stream\n");
2313		return -ENOMEM;
2314	}
2315	if (urb->interval != stream->ps.period) {
2316		ehci_dbg(ehci, "can't change iso interval %d --> %d\n",
2317			stream->ps.period, urb->interval);
2318		goto done;
2319	}
2320
2321#ifdef EHCI_URB_TRACE
2322	ehci_dbg(ehci,
2323		"submit %p dev%s ep%d%s-iso len %d\n",
2324		urb, urb->dev->devpath,
2325		usb_pipeendpoint(urb->pipe),
2326		usb_pipein(urb->pipe) ? "in" : "out",
2327		urb->transfer_buffer_length);
2328#endif
2329
2330	/* allocate SITDs */
2331	status = sitd_urb_transaction(stream, ehci, urb, mem_flags);
2332	if (status < 0) {
2333		ehci_dbg(ehci, "can't init sitds\n");
2334		goto done;
2335	}
2336
2337	/* schedule ... need to lock */
2338	spin_lock_irqsave(&ehci->lock, flags);
2339	if (unlikely(!HCD_HW_ACCESSIBLE(ehci_to_hcd(ehci)))) {
2340		status = -ESHUTDOWN;
2341		goto done_not_linked;
2342	}
2343	status = usb_hcd_link_urb_to_ep(ehci_to_hcd(ehci), urb);
2344	if (unlikely(status))
2345		goto done_not_linked;
2346	status = iso_stream_schedule(ehci, urb, stream);
2347	if (likely(status == 0)) {
2348		sitd_link_urb(ehci, urb, ehci->periodic_size << 3, stream);
2349	} else if (status > 0) {
2350		status = 0;
2351		ehci_urb_done(ehci, urb, 0);
2352	} else {
2353		usb_hcd_unlink_urb_from_ep(ehci_to_hcd(ehci), urb);
2354	}
2355 done_not_linked:
2356	spin_unlock_irqrestore(&ehci->lock, flags);
2357 done:
2358	return status;
2359}
2360
2361/*-------------------------------------------------------------------------*/
2362
2363static void scan_isoc(struct ehci_hcd *ehci)
2364{
2365	unsigned		uf, now_frame, frame;
2366	unsigned		fmask = ehci->periodic_size - 1;
2367	bool			modified, live;
2368	union ehci_shadow	q, *q_p;
2369	__hc32			type, *hw_p;
2370
2371	/*
2372	 * When running, scan from last scan point up to "now"
2373	 * else clean up by scanning everything that's left.
2374	 * Touches as few pages as possible:  cache-friendly.
2375	 */
2376	if (ehci->rh_state >= EHCI_RH_RUNNING) {
2377		uf = ehci_read_frame_index(ehci);
2378		now_frame = (uf >> 3) & fmask;
2379		live = true;
2380	} else  {
2381		now_frame = (ehci->last_iso_frame - 1) & fmask;
2382		live = false;
2383	}
2384	ehci->now_frame = now_frame;
2385
2386	frame = ehci->last_iso_frame;
2387
2388restart:
2389	/* Scan each element in frame's queue for completions */
2390	q_p = &ehci->pshadow[frame];
2391	hw_p = &ehci->periodic[frame];
2392	q.ptr = q_p->ptr;
2393	type = Q_NEXT_TYPE(ehci, *hw_p);
2394	modified = false;
2395
2396	while (q.ptr != NULL) {
2397		switch (hc32_to_cpu(ehci, type)) {
2398		case Q_TYPE_ITD:
2399			/*
2400			 * If this ITD is still active, leave it for
2401			 * later processing ... check the next entry.
2402			 * No need to check for activity unless the
2403			 * frame is current.
2404			 */
2405			if (frame == now_frame && live) {
2406				rmb();
2407				for (uf = 0; uf < 8; uf++) {
2408					if (q.itd->hw_transaction[uf] &
2409							ITD_ACTIVE(ehci))
2410						break;
2411				}
2412				if (uf < 8) {
2413					q_p = &q.itd->itd_next;
2414					hw_p = &q.itd->hw_next;
2415					type = Q_NEXT_TYPE(ehci,
2416							q.itd->hw_next);
2417					q = *q_p;
2418					break;
2419				}
2420			}
2421
2422			/*
2423			 * Take finished ITDs out of the schedule
2424			 * and process them:  recycle, maybe report
2425			 * URB completion.  HC won't cache the
2426			 * pointer for much longer, if at all.
2427			 */
2428			*q_p = q.itd->itd_next;
2429			if (!ehci->use_dummy_qh ||
2430					q.itd->hw_next != EHCI_LIST_END(ehci))
2431				*hw_p = q.itd->hw_next;
2432			else
2433				*hw_p = cpu_to_hc32(ehci, ehci->dummy->qh_dma);
2434			type = Q_NEXT_TYPE(ehci, q.itd->hw_next);
2435			wmb();
2436			modified = itd_complete(ehci, q.itd);
2437			q = *q_p;
2438			break;
2439		case Q_TYPE_SITD:
2440			/*
2441			 * If this SITD is still active, leave it for
2442			 * later processing ... check the next entry.
2443			 * No need to check for activity unless the
2444			 * frame is current.
2445			 */
2446			if (((frame == now_frame) ||
2447					(((frame + 1) & fmask) == now_frame))
2448				&& live
2449				&& (q.sitd->hw_results & SITD_ACTIVE(ehci))) {
2450
2451				q_p = &q.sitd->sitd_next;
2452				hw_p = &q.sitd->hw_next;
2453				type = Q_NEXT_TYPE(ehci, q.sitd->hw_next);
2454				q = *q_p;
2455				break;
2456			}
2457
2458			/*
2459			 * Take finished SITDs out of the schedule
2460			 * and process them:  recycle, maybe report
2461			 * URB completion.
2462			 */
2463			*q_p = q.sitd->sitd_next;
2464			if (!ehci->use_dummy_qh ||
2465					q.sitd->hw_next != EHCI_LIST_END(ehci))
2466				*hw_p = q.sitd->hw_next;
2467			else
2468				*hw_p = cpu_to_hc32(ehci, ehci->dummy->qh_dma);
2469			type = Q_NEXT_TYPE(ehci, q.sitd->hw_next);
2470			wmb();
2471			modified = sitd_complete(ehci, q.sitd);
2472			q = *q_p;
2473			break;
2474		default:
2475			ehci_dbg(ehci, "corrupt type %d frame %d shadow %p\n",
2476					type, frame, q.ptr);
2477			/* BUG(); */
2478			fallthrough;
2479		case Q_TYPE_QH:
2480		case Q_TYPE_FSTN:
2481			/* End of the iTDs and siTDs */
2482			q.ptr = NULL;
2483			break;
2484		}
2485
2486		/* Assume completion callbacks modify the queue */
2487		if (unlikely(modified && ehci->isoc_count > 0))
2488			goto restart;
2489	}
2490
2491	/* Stop when we have reached the current frame */
2492	if (frame == now_frame)
2493		return;
2494
2495	/* The last frame may still have active siTDs */
2496	ehci->last_iso_frame = frame;
2497	frame = (frame + 1) & fmask;
2498
2499	goto restart;
2500}