Linux Audio

Check our new training course

Loading...
v6.2
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 * ISP1362 HCD (Host Controller Driver) for USB.
   4 *
   5 * Copyright (C) 2005 Lothar Wassmann <LW@KARO-electronics.de>
   6 *
   7 * Derived from the SL811 HCD, rewritten for ISP116x.
   8 * Copyright (C) 2005 Olav Kongas <ok@artecdesign.ee>
   9 *
  10 * Portions:
  11 * Copyright (C) 2004 Psion Teklogix (for NetBook PRO)
  12 * Copyright (C) 2004 David Brownell
  13 */
  14
  15/*
  16 * The ISP1362 chip requires a large delay (300ns and 462ns) between
  17 * accesses to the address and data register.
  18 * The following timing options exist:
  19 *
  20 * 1. Configure your memory controller to add such delays if it can (the best)
  21 * 2. Implement platform-specific delay function possibly
  22 *    combined with configuring the memory controller; see
  23 *    include/linux/usb_isp1362.h for more info.
  24 * 3. Use ndelay (easiest, poorest).
  25 *
  26 * Use the corresponding macros USE_PLATFORM_DELAY and USE_NDELAY in the
  27 * platform specific section of isp1362.h to select the appropriate variant.
  28 *
  29 * Also note that according to the Philips "ISP1362 Errata" document
  30 * Rev 1.00 from 27 May data corruption may occur when the #WR signal
  31 * is reasserted (even with #CS deasserted) within 132ns after a
  32 * write cycle to any controller register. If the hardware doesn't
  33 * implement the recommended fix (gating the #WR with #CS) software
  34 * must ensure that no further write cycle (not necessarily to the chip!)
  35 * is issued by the CPU within this interval.
  36
  37 * For PXA25x this can be ensured by using VLIO with the maximum
  38 * recovery time (MSCx = 0x7f8c) with a memory clock of 99.53 MHz.
  39 */
  40
  41#undef ISP1362_DEBUG
  42
  43/*
  44 * The PXA255 UDC apparently doesn't handle GET_STATUS, GET_CONFIG and
  45 * GET_INTERFACE requests correctly when the SETUP and DATA stages of the
  46 * requests are carried out in separate frames. This will delay any SETUP
  47 * packets until the start of the next frame so that this situation is
  48 * unlikely to occur (and makes usbtest happy running with a PXA255 target
  49 * device).
  50 */
  51#undef BUGGY_PXA2XX_UDC_USBTEST
  52
  53#undef PTD_TRACE
  54#undef URB_TRACE
  55#undef VERBOSE
  56#undef REGISTERS
  57
  58/* This enables a memory test on the ISP1362 chip memory to make sure the
  59 * chip access timing is correct.
  60 */
  61#undef CHIP_BUFFER_TEST
  62
  63#include <linux/module.h>
  64#include <linux/moduleparam.h>
  65#include <linux/kernel.h>
  66#include <linux/delay.h>
  67#include <linux/ioport.h>
  68#include <linux/sched.h>
  69#include <linux/slab.h>
  70#include <linux/errno.h>
  71#include <linux/list.h>
  72#include <linux/interrupt.h>
  73#include <linux/usb.h>
  74#include <linux/usb/isp1362.h>
  75#include <linux/usb/hcd.h>
  76#include <linux/platform_device.h>
  77#include <linux/pm.h>
  78#include <linux/io.h>
  79#include <linux/bitmap.h>
  80#include <linux/prefetch.h>
  81#include <linux/debugfs.h>
  82#include <linux/seq_file.h>
  83
  84#include <asm/irq.h>
  85#include <asm/byteorder.h>
  86#include <asm/unaligned.h>
  87
  88static int dbg_level;
  89#ifdef ISP1362_DEBUG
  90module_param(dbg_level, int, 0644);
  91#else
  92module_param(dbg_level, int, 0);
  93#endif
  94
  95#include "../core/usb.h"
  96#include "isp1362.h"
  97
  98
  99#define DRIVER_VERSION	"2005-04-04"
 100#define DRIVER_DESC	"ISP1362 USB Host Controller Driver"
 101
 102MODULE_DESCRIPTION(DRIVER_DESC);
 103MODULE_LICENSE("GPL");
 104
 105static const char hcd_name[] = "isp1362-hcd";
 106
 107static void isp1362_hc_stop(struct usb_hcd *hcd);
 108static int isp1362_hc_start(struct usb_hcd *hcd);
 109
 110/*-------------------------------------------------------------------------*/
 111
 112/*
 113 * When called from the interrupthandler only isp1362_hcd->irqenb is modified,
 114 * since the interrupt handler will write isp1362_hcd->irqenb to HCuPINT upon
 115 * completion.
 116 * We don't need a 'disable' counterpart, since interrupts will be disabled
 117 * only by the interrupt handler.
 118 */
 119static inline void isp1362_enable_int(struct isp1362_hcd *isp1362_hcd, u16 mask)
 120{
 121	if ((isp1362_hcd->irqenb | mask) == isp1362_hcd->irqenb)
 122		return;
 123	if (mask & ~isp1362_hcd->irqenb)
 124		isp1362_write_reg16(isp1362_hcd, HCuPINT, mask & ~isp1362_hcd->irqenb);
 125	isp1362_hcd->irqenb |= mask;
 126	if (isp1362_hcd->irq_active)
 127		return;
 128	isp1362_write_reg16(isp1362_hcd, HCuPINTENB, isp1362_hcd->irqenb);
 129}
 130
 131/*-------------------------------------------------------------------------*/
 132
 133static inline struct isp1362_ep_queue *get_ptd_queue(struct isp1362_hcd *isp1362_hcd,
 134						     u16 offset)
 135{
 136	struct isp1362_ep_queue *epq = NULL;
 137
 138	if (offset < isp1362_hcd->istl_queue[1].buf_start)
 139		epq = &isp1362_hcd->istl_queue[0];
 140	else if (offset < isp1362_hcd->intl_queue.buf_start)
 141		epq = &isp1362_hcd->istl_queue[1];
 142	else if (offset < isp1362_hcd->atl_queue.buf_start)
 143		epq = &isp1362_hcd->intl_queue;
 144	else if (offset < isp1362_hcd->atl_queue.buf_start +
 145		   isp1362_hcd->atl_queue.buf_size)
 146		epq = &isp1362_hcd->atl_queue;
 147
 148	if (epq)
 149		DBG(1, "%s: PTD $%04x is on %s queue\n", __func__, offset, epq->name);
 150	else
 151		pr_warn("%s: invalid PTD $%04x\n", __func__, offset);
 152
 153	return epq;
 154}
 155
 156static inline int get_ptd_offset(struct isp1362_ep_queue *epq, u8 index)
 157{
 158	int offset;
 159
 160	if (index * epq->blk_size > epq->buf_size) {
 161		pr_warn("%s: Bad %s index %d(%d)\n",
 162			__func__, epq->name, index,
 163			epq->buf_size / epq->blk_size);
 164		return -EINVAL;
 165	}
 166	offset = epq->buf_start + index * epq->blk_size;
 167	DBG(3, "%s: %s PTD[%02x] # %04x\n", __func__, epq->name, index, offset);
 168
 169	return offset;
 170}
 171
 172/*-------------------------------------------------------------------------*/
 173
 174static inline u16 max_transfer_size(struct isp1362_ep_queue *epq, size_t size,
 175				    int mps)
 176{
 177	u16 xfer_size = min_t(size_t, MAX_XFER_SIZE, size);
 178
 179	xfer_size = min_t(size_t, xfer_size, epq->buf_avail * epq->blk_size - PTD_HEADER_SIZE);
 180	if (xfer_size < size && xfer_size % mps)
 181		xfer_size -= xfer_size % mps;
 182
 183	return xfer_size;
 184}
 185
 186static int claim_ptd_buffers(struct isp1362_ep_queue *epq,
 187			     struct isp1362_ep *ep, u16 len)
 188{
 189	int ptd_offset = -EINVAL;
 190	int num_ptds = ((len + PTD_HEADER_SIZE - 1) / epq->blk_size) + 1;
 191	int found;
 192
 193	BUG_ON(len > epq->buf_size);
 194
 195	if (!epq->buf_avail)
 196		return -ENOMEM;
 197
 198	if (ep->num_ptds)
 199		pr_err("%s: %s len %d/%d num_ptds %d buf_map %08lx skip_map %08lx\n", __func__,
 200		    epq->name, len, epq->blk_size, num_ptds, epq->buf_map, epq->skip_map);
 201	BUG_ON(ep->num_ptds != 0);
 202
 203	found = bitmap_find_next_zero_area(&epq->buf_map, epq->buf_count, 0,
 204						num_ptds, 0);
 205	if (found >= epq->buf_count)
 206		return -EOVERFLOW;
 207
 208	DBG(1, "%s: Found %d PTDs[%d] for %d/%d byte\n", __func__,
 209	    num_ptds, found, len, (int)(epq->blk_size - PTD_HEADER_SIZE));
 210	ptd_offset = get_ptd_offset(epq, found);
 211	WARN_ON(ptd_offset < 0);
 212	ep->ptd_offset = ptd_offset;
 213	ep->num_ptds += num_ptds;
 214	epq->buf_avail -= num_ptds;
 215	BUG_ON(epq->buf_avail > epq->buf_count);
 216	ep->ptd_index = found;
 217	bitmap_set(&epq->buf_map, found, num_ptds);
 218	DBG(1, "%s: Done %s PTD[%d] $%04x, avail %d count %d claimed %d %08lx:%08lx\n",
 219	    __func__, epq->name, ep->ptd_index, ep->ptd_offset,
 220	    epq->buf_avail, epq->buf_count, num_ptds, epq->buf_map, epq->skip_map);
 221
 222	return found;
 223}
 224
 225static inline void release_ptd_buffers(struct isp1362_ep_queue *epq, struct isp1362_ep *ep)
 226{
 227	int last = ep->ptd_index + ep->num_ptds;
 228
 229	if (last > epq->buf_count)
 230		pr_err("%s: ep %p req %d len %d %s PTD[%d] $%04x num_ptds %d buf_count %d buf_avail %d buf_map %08lx skip_map %08lx\n",
 231		    __func__, ep, ep->num_req, ep->length, epq->name, ep->ptd_index,
 232		    ep->ptd_offset, ep->num_ptds, epq->buf_count, epq->buf_avail,
 233		    epq->buf_map, epq->skip_map);
 234	BUG_ON(last > epq->buf_count);
 235
 236	bitmap_clear(&epq->buf_map, ep->ptd_index, ep->num_ptds);
 237	bitmap_set(&epq->skip_map, ep->ptd_index, ep->num_ptds);
 238	epq->buf_avail += ep->num_ptds;
 239	epq->ptd_count--;
 240
 241	BUG_ON(epq->buf_avail > epq->buf_count);
 242	BUG_ON(epq->ptd_count > epq->buf_count);
 243
 244	DBG(1, "%s: Done %s PTDs $%04x released %d avail %d count %d\n",
 245	    __func__, epq->name,
 246	    ep->ptd_offset, ep->num_ptds, epq->buf_avail, epq->buf_count);
 247	DBG(1, "%s: buf_map %08lx skip_map %08lx\n", __func__,
 248	    epq->buf_map, epq->skip_map);
 249
 250	ep->num_ptds = 0;
 251	ep->ptd_offset = -EINVAL;
 252	ep->ptd_index = -EINVAL;
 253}
 254
 255/*-------------------------------------------------------------------------*/
 256
 257/*
 258  Set up PTD's.
 259*/
 260static void prepare_ptd(struct isp1362_hcd *isp1362_hcd, struct urb *urb,
 261			struct isp1362_ep *ep, struct isp1362_ep_queue *epq,
 262			u16 fno)
 263{
 264	struct ptd *ptd;
 265	int toggle;
 266	int dir;
 267	u16 len;
 268	size_t buf_len = urb->transfer_buffer_length - urb->actual_length;
 269
 270	DBG(3, "%s: %s ep %p\n", __func__, epq->name, ep);
 271
 272	ptd = &ep->ptd;
 273
 274	ep->data = (unsigned char *)urb->transfer_buffer + urb->actual_length;
 275
 276	switch (ep->nextpid) {
 277	case USB_PID_IN:
 278		toggle = usb_gettoggle(urb->dev, ep->epnum, 0);
 279		dir = PTD_DIR_IN;
 280		if (usb_pipecontrol(urb->pipe)) {
 281			len = min_t(size_t, ep->maxpacket, buf_len);
 282		} else if (usb_pipeisoc(urb->pipe)) {
 283			len = min_t(size_t, urb->iso_frame_desc[fno].length, MAX_XFER_SIZE);
 284			ep->data = urb->transfer_buffer + urb->iso_frame_desc[fno].offset;
 285		} else
 286			len = max_transfer_size(epq, buf_len, ep->maxpacket);
 287		DBG(1, "%s: IN    len %d/%d/%d from URB\n", __func__, len, ep->maxpacket,
 288		    (int)buf_len);
 289		break;
 290	case USB_PID_OUT:
 291		toggle = usb_gettoggle(urb->dev, ep->epnum, 1);
 292		dir = PTD_DIR_OUT;
 293		if (usb_pipecontrol(urb->pipe))
 294			len = min_t(size_t, ep->maxpacket, buf_len);
 295		else if (usb_pipeisoc(urb->pipe))
 296			len = min_t(size_t, urb->iso_frame_desc[0].length, MAX_XFER_SIZE);
 297		else
 298			len = max_transfer_size(epq, buf_len, ep->maxpacket);
 299		if (len == 0)
 300			pr_info("%s: Sending ZERO packet: %d\n", __func__,
 301			     urb->transfer_flags & URB_ZERO_PACKET);
 302		DBG(1, "%s: OUT   len %d/%d/%d from URB\n", __func__, len, ep->maxpacket,
 303		    (int)buf_len);
 304		break;
 305	case USB_PID_SETUP:
 306		toggle = 0;
 307		dir = PTD_DIR_SETUP;
 308		len = sizeof(struct usb_ctrlrequest);
 309		DBG(1, "%s: SETUP len %d\n", __func__, len);
 310		ep->data = urb->setup_packet;
 311		break;
 312	case USB_PID_ACK:
 313		toggle = 1;
 314		len = 0;
 315		dir = (urb->transfer_buffer_length && usb_pipein(urb->pipe)) ?
 316			PTD_DIR_OUT : PTD_DIR_IN;
 317		DBG(1, "%s: ACK   len %d\n", __func__, len);
 318		break;
 319	default:
 320		toggle = dir = len = 0;
 321		pr_err("%s@%d: ep->nextpid %02x\n", __func__, __LINE__, ep->nextpid);
 322		BUG_ON(1);
 323	}
 324
 325	ep->length = len;
 326	if (!len)
 327		ep->data = NULL;
 328
 329	ptd->count = PTD_CC_MSK | PTD_ACTIVE_MSK | PTD_TOGGLE(toggle);
 330	ptd->mps = PTD_MPS(ep->maxpacket) | PTD_SPD(urb->dev->speed == USB_SPEED_LOW) |
 331		PTD_EP(ep->epnum);
 332	ptd->len = PTD_LEN(len) | PTD_DIR(dir);
 333	ptd->faddr = PTD_FA(usb_pipedevice(urb->pipe));
 334
 335	if (usb_pipeint(urb->pipe)) {
 336		ptd->faddr |= PTD_SF_INT(ep->branch);
 337		ptd->faddr |= PTD_PR(ep->interval ? __ffs(ep->interval) : 0);
 338	}
 339	if (usb_pipeisoc(urb->pipe))
 340		ptd->faddr |= PTD_SF_ISO(fno);
 341
 342	DBG(1, "%s: Finished\n", __func__);
 343}
 344
 345static void isp1362_write_ptd(struct isp1362_hcd *isp1362_hcd, struct isp1362_ep *ep,
 346			      struct isp1362_ep_queue *epq)
 347{
 348	struct ptd *ptd = &ep->ptd;
 349	int len = PTD_GET_DIR(ptd) == PTD_DIR_IN ? 0 : ep->length;
 350
 351	prefetch(ptd);
 352	isp1362_write_buffer(isp1362_hcd, ptd, ep->ptd_offset, PTD_HEADER_SIZE);
 353	if (len)
 354		isp1362_write_buffer(isp1362_hcd, ep->data,
 355				     ep->ptd_offset + PTD_HEADER_SIZE, len);
 356
 357	dump_ptd(ptd);
 358	dump_ptd_out_data(ptd, ep->data);
 359}
 360
 361static void isp1362_read_ptd(struct isp1362_hcd *isp1362_hcd, struct isp1362_ep *ep,
 362			     struct isp1362_ep_queue *epq)
 363{
 364	struct ptd *ptd = &ep->ptd;
 365	int act_len;
 366
 367	WARN_ON(list_empty(&ep->active));
 368	BUG_ON(ep->ptd_offset < 0);
 369
 370	list_del_init(&ep->active);
 371	DBG(1, "%s: ep %p removed from active list %p\n", __func__, ep, &epq->active);
 372
 373	prefetchw(ptd);
 374	isp1362_read_buffer(isp1362_hcd, ptd, ep->ptd_offset, PTD_HEADER_SIZE);
 375	dump_ptd(ptd);
 376	act_len = PTD_GET_COUNT(ptd);
 377	if (PTD_GET_DIR(ptd) != PTD_DIR_IN || act_len == 0)
 378		return;
 379	if (act_len > ep->length)
 380		pr_err("%s: ep %p PTD $%04x act_len %d ep->length %d\n", __func__, ep,
 381			 ep->ptd_offset, act_len, ep->length);
 382	BUG_ON(act_len > ep->length);
 383	/* Only transfer the amount of data that has actually been overwritten
 384	 * in the chip buffer. We don't want any data that doesn't belong to the
 385	 * transfer to leak out of the chip to the callers transfer buffer!
 386	 */
 387	prefetchw(ep->data);
 388	isp1362_read_buffer(isp1362_hcd, ep->data,
 389			    ep->ptd_offset + PTD_HEADER_SIZE, act_len);
 390	dump_ptd_in_data(ptd, ep->data);
 391}
 392
 393/*
 394 * INT PTDs will stay in the chip until data is available.
 395 * This function will remove a PTD from the chip when the URB is dequeued.
 396 * Must be called with the spinlock held and IRQs disabled
 397 */
 398static void remove_ptd(struct isp1362_hcd *isp1362_hcd, struct isp1362_ep *ep)
 399
 400{
 401	int index;
 402	struct isp1362_ep_queue *epq;
 403
 404	DBG(1, "%s: ep %p PTD[%d] $%04x\n", __func__, ep, ep->ptd_index, ep->ptd_offset);
 405	BUG_ON(ep->ptd_offset < 0);
 406
 407	epq = get_ptd_queue(isp1362_hcd, ep->ptd_offset);
 408	BUG_ON(!epq);
 409
 410	/* put ep in remove_list for cleanup */
 411	WARN_ON(!list_empty(&ep->remove_list));
 412	list_add_tail(&ep->remove_list, &isp1362_hcd->remove_list);
 413	/* let SOF interrupt handle the cleanup */
 414	isp1362_enable_int(isp1362_hcd, HCuPINT_SOF);
 415
 416	index = ep->ptd_index;
 417	if (index < 0)
 418		/* ISO queues don't have SKIP registers */
 419		return;
 420
 421	DBG(1, "%s: Disabling PTD[%02x] $%04x %08lx|%08x\n", __func__,
 422	    index, ep->ptd_offset, epq->skip_map, 1 << index);
 423
 424	/* prevent further processing of PTD (will be effective after next SOF) */
 425	epq->skip_map |= 1 << index;
 426	if (epq == &isp1362_hcd->atl_queue) {
 427		DBG(2, "%s: ATLSKIP = %08x -> %08lx\n", __func__,
 428		    isp1362_read_reg32(isp1362_hcd, HCATLSKIP), epq->skip_map);
 429		isp1362_write_reg32(isp1362_hcd, HCATLSKIP, epq->skip_map);
 430		if (~epq->skip_map == 0)
 431			isp1362_clr_mask16(isp1362_hcd, HCBUFSTAT, HCBUFSTAT_ATL_ACTIVE);
 432	} else if (epq == &isp1362_hcd->intl_queue) {
 433		DBG(2, "%s: INTLSKIP = %08x -> %08lx\n", __func__,
 434		    isp1362_read_reg32(isp1362_hcd, HCINTLSKIP), epq->skip_map);
 435		isp1362_write_reg32(isp1362_hcd, HCINTLSKIP, epq->skip_map);
 436		if (~epq->skip_map == 0)
 437			isp1362_clr_mask16(isp1362_hcd, HCBUFSTAT, HCBUFSTAT_INTL_ACTIVE);
 438	}
 439}
 440
 441/*
 442  Take done or failed requests out of schedule. Give back
 443  processed urbs.
 444*/
 445static void finish_request(struct isp1362_hcd *isp1362_hcd, struct isp1362_ep *ep,
 446			   struct urb *urb, int status)
 447     __releases(isp1362_hcd->lock)
 448     __acquires(isp1362_hcd->lock)
 449{
 450	urb->hcpriv = NULL;
 451	ep->error_count = 0;
 452
 453	if (usb_pipecontrol(urb->pipe))
 454		ep->nextpid = USB_PID_SETUP;
 455
 456	URB_DBG("%s: req %d FA %d ep%d%s %s: len %d/%d %s stat %d\n", __func__,
 457		ep->num_req, usb_pipedevice(urb->pipe),
 458		usb_pipeendpoint(urb->pipe),
 459		!usb_pipein(urb->pipe) ? "out" : "in",
 460		usb_pipecontrol(urb->pipe) ? "ctrl" :
 461			usb_pipeint(urb->pipe) ? "int" :
 462			usb_pipebulk(urb->pipe) ? "bulk" :
 463			"iso",
 464		urb->actual_length, urb->transfer_buffer_length,
 465		!(urb->transfer_flags & URB_SHORT_NOT_OK) ?
 466		"short_ok" : "", urb->status);
 467
 468
 469	usb_hcd_unlink_urb_from_ep(isp1362_hcd_to_hcd(isp1362_hcd), urb);
 470	spin_unlock(&isp1362_hcd->lock);
 471	usb_hcd_giveback_urb(isp1362_hcd_to_hcd(isp1362_hcd), urb, status);
 472	spin_lock(&isp1362_hcd->lock);
 473
 474	/* take idle endpoints out of the schedule right away */
 475	if (!list_empty(&ep->hep->urb_list))
 476		return;
 477
 478	/* async deschedule */
 479	if (!list_empty(&ep->schedule)) {
 480		list_del_init(&ep->schedule);
 481		return;
 482	}
 483
 484
 485	if (ep->interval) {
 486		/* periodic deschedule */
 487		DBG(1, "deschedule qh%d/%p branch %d load %d bandwidth %d -> %d\n", ep->interval,
 488		    ep, ep->branch, ep->load,
 489		    isp1362_hcd->load[ep->branch],
 490		    isp1362_hcd->load[ep->branch] - ep->load);
 491		isp1362_hcd->load[ep->branch] -= ep->load;
 492		ep->branch = PERIODIC_SIZE;
 493	}
 494}
 495
 496/*
 497 * Analyze transfer results, handle partial transfers and errors
 498*/
 499static void postproc_ep(struct isp1362_hcd *isp1362_hcd, struct isp1362_ep *ep)
 500{
 501	struct urb *urb = get_urb(ep);
 502	struct usb_device *udev;
 503	struct ptd *ptd;
 504	int short_ok;
 505	u16 len;
 506	int urbstat = -EINPROGRESS;
 507	u8 cc;
 508
 509	DBG(2, "%s: ep %p req %d\n", __func__, ep, ep->num_req);
 510
 511	udev = urb->dev;
 512	ptd = &ep->ptd;
 513	cc = PTD_GET_CC(ptd);
 514	if (cc == PTD_NOTACCESSED) {
 515		pr_err("%s: req %d PTD %p Untouched by ISP1362\n", __func__,
 516		    ep->num_req, ptd);
 517		cc = PTD_DEVNOTRESP;
 518	}
 519
 520	short_ok = !(urb->transfer_flags & URB_SHORT_NOT_OK);
 521	len = urb->transfer_buffer_length - urb->actual_length;
 522
 523	/* Data underrun is special. For allowed underrun
 524	   we clear the error and continue as normal. For
 525	   forbidden underrun we finish the DATA stage
 526	   immediately while for control transfer,
 527	   we do a STATUS stage.
 528	*/
 529	if (cc == PTD_DATAUNDERRUN) {
 530		if (short_ok) {
 531			DBG(1, "%s: req %d Allowed data underrun short_%sok %d/%d/%d byte\n",
 532			    __func__, ep->num_req, short_ok ? "" : "not_",
 533			    PTD_GET_COUNT(ptd), ep->maxpacket, len);
 534			cc = PTD_CC_NOERROR;
 535			urbstat = 0;
 536		} else {
 537			DBG(1, "%s: req %d Data Underrun %s nextpid %02x short_%sok %d/%d/%d byte\n",
 538			    __func__, ep->num_req,
 539			    usb_pipein(urb->pipe) ? "IN" : "OUT", ep->nextpid,
 540			    short_ok ? "" : "not_",
 541			    PTD_GET_COUNT(ptd), ep->maxpacket, len);
 542			/* save the data underrun error code for later and
 543			 * proceed with the status stage
 544			 */
 545			urb->actual_length += PTD_GET_COUNT(ptd);
 546			if (usb_pipecontrol(urb->pipe)) {
 547				ep->nextpid = USB_PID_ACK;
 548				BUG_ON(urb->actual_length > urb->transfer_buffer_length);
 549
 550				if (urb->status == -EINPROGRESS)
 551					urb->status = cc_to_error[PTD_DATAUNDERRUN];
 552			} else {
 553				usb_settoggle(udev, ep->epnum, ep->nextpid == USB_PID_OUT,
 554					      PTD_GET_TOGGLE(ptd));
 555				urbstat = cc_to_error[PTD_DATAUNDERRUN];
 556			}
 557			goto out;
 558		}
 559	}
 560
 561	if (cc != PTD_CC_NOERROR) {
 562		if (++ep->error_count >= 3 || cc == PTD_CC_STALL || cc == PTD_DATAOVERRUN) {
 563			urbstat = cc_to_error[cc];
 564			DBG(1, "%s: req %d nextpid %02x, status %d, error %d, error_count %d\n",
 565			    __func__, ep->num_req, ep->nextpid, urbstat, cc,
 566			    ep->error_count);
 567		}
 568		goto out;
 569	}
 570
 571	switch (ep->nextpid) {
 572	case USB_PID_OUT:
 573		if (PTD_GET_COUNT(ptd) != ep->length)
 574			pr_err("%s: count=%d len=%d\n", __func__,
 575			   PTD_GET_COUNT(ptd), ep->length);
 576		BUG_ON(PTD_GET_COUNT(ptd) != ep->length);
 577		urb->actual_length += ep->length;
 578		BUG_ON(urb->actual_length > urb->transfer_buffer_length);
 579		usb_settoggle(udev, ep->epnum, 1, PTD_GET_TOGGLE(ptd));
 580		if (urb->actual_length == urb->transfer_buffer_length) {
 581			DBG(3, "%s: req %d xfer complete %d/%d status %d -> 0\n", __func__,
 582			    ep->num_req, len, ep->maxpacket, urbstat);
 583			if (usb_pipecontrol(urb->pipe)) {
 584				DBG(3, "%s: req %d %s Wait for ACK\n", __func__,
 585				    ep->num_req,
 586				    usb_pipein(urb->pipe) ? "IN" : "OUT");
 587				ep->nextpid = USB_PID_ACK;
 588			} else {
 589				if (len % ep->maxpacket ||
 590				    !(urb->transfer_flags & URB_ZERO_PACKET)) {
 591					urbstat = 0;
 592					DBG(3, "%s: req %d URB %s status %d count %d/%d/%d\n",
 593					    __func__, ep->num_req, usb_pipein(urb->pipe) ? "IN" : "OUT",
 594					    urbstat, len, ep->maxpacket, urb->actual_length);
 595				}
 596			}
 597		}
 598		break;
 599	case USB_PID_IN:
 600		len = PTD_GET_COUNT(ptd);
 601		BUG_ON(len > ep->length);
 602		urb->actual_length += len;
 603		BUG_ON(urb->actual_length > urb->transfer_buffer_length);
 604		usb_settoggle(udev, ep->epnum, 0, PTD_GET_TOGGLE(ptd));
 605		/* if transfer completed or (allowed) data underrun */
 606		if ((urb->transfer_buffer_length == urb->actual_length) ||
 607		    len % ep->maxpacket) {
 608			DBG(3, "%s: req %d xfer complete %d/%d status %d -> 0\n", __func__,
 609			    ep->num_req, len, ep->maxpacket, urbstat);
 610			if (usb_pipecontrol(urb->pipe)) {
 611				DBG(3, "%s: req %d %s Wait for ACK\n", __func__,
 612				    ep->num_req,
 613				    usb_pipein(urb->pipe) ? "IN" : "OUT");
 614				ep->nextpid = USB_PID_ACK;
 615			} else {
 616				urbstat = 0;
 617				DBG(3, "%s: req %d URB %s status %d count %d/%d/%d\n",
 618				    __func__, ep->num_req, usb_pipein(urb->pipe) ? "IN" : "OUT",
 619				    urbstat, len, ep->maxpacket, urb->actual_length);
 620			}
 621		}
 622		break;
 623	case USB_PID_SETUP:
 624		if (urb->transfer_buffer_length == urb->actual_length) {
 625			ep->nextpid = USB_PID_ACK;
 626		} else if (usb_pipeout(urb->pipe)) {
 627			usb_settoggle(udev, 0, 1, 1);
 628			ep->nextpid = USB_PID_OUT;
 629		} else {
 630			usb_settoggle(udev, 0, 0, 1);
 631			ep->nextpid = USB_PID_IN;
 632		}
 633		break;
 634	case USB_PID_ACK:
 635		DBG(3, "%s: req %d got ACK %d -> 0\n", __func__, ep->num_req,
 636		    urbstat);
 637		WARN_ON(urbstat != -EINPROGRESS);
 638		urbstat = 0;
 639		ep->nextpid = 0;
 640		break;
 641	default:
 642		BUG_ON(1);
 643	}
 644
 645 out:
 646	if (urbstat != -EINPROGRESS) {
 647		DBG(2, "%s: Finishing ep %p req %d urb %p status %d\n", __func__,
 648		    ep, ep->num_req, urb, urbstat);
 649		finish_request(isp1362_hcd, ep, urb, urbstat);
 650	}
 651}
 652
 653static void finish_unlinks(struct isp1362_hcd *isp1362_hcd)
 654{
 655	struct isp1362_ep *ep;
 656	struct isp1362_ep *tmp;
 657
 658	list_for_each_entry_safe(ep, tmp, &isp1362_hcd->remove_list, remove_list) {
 659		struct isp1362_ep_queue *epq =
 660			get_ptd_queue(isp1362_hcd, ep->ptd_offset);
 661		int index = ep->ptd_index;
 662
 663		BUG_ON(epq == NULL);
 664		if (index >= 0) {
 665			DBG(1, "%s: remove PTD[%d] $%04x\n", __func__, index, ep->ptd_offset);
 666			BUG_ON(ep->num_ptds == 0);
 667			release_ptd_buffers(epq, ep);
 668		}
 669		if (!list_empty(&ep->hep->urb_list)) {
 670			struct urb *urb = get_urb(ep);
 671
 672			DBG(1, "%s: Finishing req %d ep %p from remove_list\n", __func__,
 673			    ep->num_req, ep);
 674			finish_request(isp1362_hcd, ep, urb, -ESHUTDOWN);
 675		}
 676		WARN_ON(list_empty(&ep->active));
 677		if (!list_empty(&ep->active)) {
 678			list_del_init(&ep->active);
 679			DBG(1, "%s: ep %p removed from active list\n", __func__, ep);
 680		}
 681		list_del_init(&ep->remove_list);
 682		DBG(1, "%s: ep %p removed from remove_list\n", __func__, ep);
 683	}
 684	DBG(1, "%s: Done\n", __func__);
 685}
 686
 687static inline void enable_atl_transfers(struct isp1362_hcd *isp1362_hcd, int count)
 688{
 689	if (count > 0) {
 690		if (count < isp1362_hcd->atl_queue.ptd_count)
 691			isp1362_write_reg16(isp1362_hcd, HCATLDTC, count);
 692		isp1362_enable_int(isp1362_hcd, HCuPINT_ATL);
 693		isp1362_write_reg32(isp1362_hcd, HCATLSKIP, isp1362_hcd->atl_queue.skip_map);
 694		isp1362_set_mask16(isp1362_hcd, HCBUFSTAT, HCBUFSTAT_ATL_ACTIVE);
 695	} else
 696		isp1362_enable_int(isp1362_hcd, HCuPINT_SOF);
 697}
 698
 699static inline void enable_intl_transfers(struct isp1362_hcd *isp1362_hcd)
 700{
 701	isp1362_enable_int(isp1362_hcd, HCuPINT_INTL);
 702	isp1362_set_mask16(isp1362_hcd, HCBUFSTAT, HCBUFSTAT_INTL_ACTIVE);
 703	isp1362_write_reg32(isp1362_hcd, HCINTLSKIP, isp1362_hcd->intl_queue.skip_map);
 704}
 705
 706static inline void enable_istl_transfers(struct isp1362_hcd *isp1362_hcd, int flip)
 707{
 708	isp1362_enable_int(isp1362_hcd, flip ? HCuPINT_ISTL1 : HCuPINT_ISTL0);
 709	isp1362_set_mask16(isp1362_hcd, HCBUFSTAT, flip ?
 710			   HCBUFSTAT_ISTL1_FULL : HCBUFSTAT_ISTL0_FULL);
 711}
 712
 713static int submit_req(struct isp1362_hcd *isp1362_hcd, struct urb *urb,
 714		      struct isp1362_ep *ep, struct isp1362_ep_queue *epq)
 715{
 716	int index;
 717
 718	prepare_ptd(isp1362_hcd, urb, ep, epq, 0);
 719	index = claim_ptd_buffers(epq, ep, ep->length);
 720	if (index == -ENOMEM) {
 721		DBG(1, "%s: req %d No free %s PTD available: %d, %08lx:%08lx\n", __func__,
 722		    ep->num_req, epq->name, ep->num_ptds, epq->buf_map, epq->skip_map);
 723		return index;
 724	} else if (index == -EOVERFLOW) {
 725		DBG(1, "%s: req %d Not enough space for %d byte %s PTD %d %08lx:%08lx\n",
 726		    __func__, ep->num_req, ep->length, epq->name, ep->num_ptds,
 727		    epq->buf_map, epq->skip_map);
 728		return index;
 729	} else
 730		BUG_ON(index < 0);
 731	list_add_tail(&ep->active, &epq->active);
 732	DBG(1, "%s: ep %p req %d len %d added to active list %p\n", __func__,
 733	    ep, ep->num_req, ep->length, &epq->active);
 734	DBG(1, "%s: Submitting %s PTD $%04x for ep %p req %d\n", __func__, epq->name,
 735	    ep->ptd_offset, ep, ep->num_req);
 736	isp1362_write_ptd(isp1362_hcd, ep, epq);
 737	__clear_bit(ep->ptd_index, &epq->skip_map);
 738
 739	return 0;
 740}
 741
 742static void start_atl_transfers(struct isp1362_hcd *isp1362_hcd)
 743{
 744	int ptd_count = 0;
 745	struct isp1362_ep_queue *epq = &isp1362_hcd->atl_queue;
 746	struct isp1362_ep *ep;
 747	int defer = 0;
 748
 749	if (atomic_read(&epq->finishing)) {
 750		DBG(1, "%s: finish_transfers is active for %s\n", __func__, epq->name);
 751		return;
 752	}
 753
 754	list_for_each_entry(ep, &isp1362_hcd->async, schedule) {
 755		struct urb *urb = get_urb(ep);
 756		int ret;
 757
 758		if (!list_empty(&ep->active)) {
 759			DBG(2, "%s: Skipping active %s ep %p\n", __func__, epq->name, ep);
 760			continue;
 761		}
 762
 763		DBG(1, "%s: Processing %s ep %p req %d\n", __func__, epq->name,
 764		    ep, ep->num_req);
 765
 766		ret = submit_req(isp1362_hcd, urb, ep, epq);
 767		if (ret == -ENOMEM) {
 768			defer = 1;
 769			break;
 770		} else if (ret == -EOVERFLOW) {
 771			defer = 1;
 772			continue;
 773		}
 774#ifdef BUGGY_PXA2XX_UDC_USBTEST
 775		defer = ep->nextpid == USB_PID_SETUP;
 776#endif
 777		ptd_count++;
 778	}
 779
 780	/* Avoid starving of endpoints */
 781	if (isp1362_hcd->async.next != isp1362_hcd->async.prev) {
 782		DBG(2, "%s: Cycling ASYNC schedule %d\n", __func__, ptd_count);
 783		list_move(&isp1362_hcd->async, isp1362_hcd->async.next);
 784	}
 785	if (ptd_count || defer)
 786		enable_atl_transfers(isp1362_hcd, defer ? 0 : ptd_count);
 787
 788	epq->ptd_count += ptd_count;
 789	if (epq->ptd_count > epq->stat_maxptds) {
 790		epq->stat_maxptds = epq->ptd_count;
 791		DBG(0, "%s: max_ptds: %d\n", __func__, epq->stat_maxptds);
 792	}
 793}
 794
 795static void start_intl_transfers(struct isp1362_hcd *isp1362_hcd)
 796{
 797	int ptd_count = 0;
 798	struct isp1362_ep_queue *epq = &isp1362_hcd->intl_queue;
 799	struct isp1362_ep *ep;
 800
 801	if (atomic_read(&epq->finishing)) {
 802		DBG(1, "%s: finish_transfers is active for %s\n", __func__, epq->name);
 803		return;
 804	}
 805
 806	list_for_each_entry(ep, &isp1362_hcd->periodic, schedule) {
 807		struct urb *urb = get_urb(ep);
 808		int ret;
 809
 810		if (!list_empty(&ep->active)) {
 811			DBG(1, "%s: Skipping active %s ep %p\n", __func__,
 812			    epq->name, ep);
 813			continue;
 814		}
 815
 816		DBG(1, "%s: Processing %s ep %p req %d\n", __func__,
 817		    epq->name, ep, ep->num_req);
 818		ret = submit_req(isp1362_hcd, urb, ep, epq);
 819		if (ret == -ENOMEM)
 820			break;
 821		else if (ret == -EOVERFLOW)
 822			continue;
 823		ptd_count++;
 824	}
 825
 826	if (ptd_count) {
 827		static int last_count;
 828
 829		if (ptd_count != last_count) {
 830			DBG(0, "%s: ptd_count: %d\n", __func__, ptd_count);
 831			last_count = ptd_count;
 832		}
 833		enable_intl_transfers(isp1362_hcd);
 834	}
 835
 836	epq->ptd_count += ptd_count;
 837	if (epq->ptd_count > epq->stat_maxptds)
 838		epq->stat_maxptds = epq->ptd_count;
 839}
 840
 841static inline int next_ptd(struct isp1362_ep_queue *epq, struct isp1362_ep *ep)
 842{
 843	u16 ptd_offset = ep->ptd_offset;
 844	int num_ptds = (ep->length + PTD_HEADER_SIZE + (epq->blk_size - 1)) / epq->blk_size;
 845
 846	DBG(2, "%s: PTD offset $%04x + %04x => %d * %04x -> $%04x\n", __func__, ptd_offset,
 847	    ep->length, num_ptds, epq->blk_size, ptd_offset + num_ptds * epq->blk_size);
 848
 849	ptd_offset += num_ptds * epq->blk_size;
 850	if (ptd_offset < epq->buf_start + epq->buf_size)
 851		return ptd_offset;
 852	else
 853		return -ENOMEM;
 854}
 855
 856static void start_iso_transfers(struct isp1362_hcd *isp1362_hcd)
 857{
 858	int ptd_count = 0;
 859	int flip = isp1362_hcd->istl_flip;
 860	struct isp1362_ep_queue *epq;
 861	int ptd_offset;
 862	struct isp1362_ep *ep;
 863	struct isp1362_ep *tmp;
 864	u16 fno = isp1362_read_reg32(isp1362_hcd, HCFMNUM);
 865
 866 fill2:
 867	epq = &isp1362_hcd->istl_queue[flip];
 868	if (atomic_read(&epq->finishing)) {
 869		DBG(1, "%s: finish_transfers is active for %s\n", __func__, epq->name);
 870		return;
 871	}
 872
 873	if (!list_empty(&epq->active))
 874		return;
 875
 876	ptd_offset = epq->buf_start;
 877	list_for_each_entry_safe(ep, tmp, &isp1362_hcd->isoc, schedule) {
 878		struct urb *urb = get_urb(ep);
 879		s16 diff = fno - (u16)urb->start_frame;
 880
 881		DBG(1, "%s: Processing %s ep %p\n", __func__, epq->name, ep);
 882
 883		if (diff > urb->number_of_packets) {
 884			/* time frame for this URB has elapsed */
 885			finish_request(isp1362_hcd, ep, urb, -EOVERFLOW);
 886			continue;
 887		} else if (diff < -1) {
 888			/* URB is not due in this frame or the next one.
 889			 * Comparing with '-1' instead of '0' accounts for double
 890			 * buffering in the ISP1362 which enables us to queue the PTD
 891			 * one frame ahead of time
 892			 */
 893		} else if (diff == -1) {
 894			/* submit PTD's that are due in the next frame */
 895			prepare_ptd(isp1362_hcd, urb, ep, epq, fno);
 896			if (ptd_offset + PTD_HEADER_SIZE + ep->length >
 897			    epq->buf_start + epq->buf_size) {
 898				pr_err("%s: Not enough ISO buffer space for %d byte PTD\n",
 899				    __func__, ep->length);
 900				continue;
 901			}
 902			ep->ptd_offset = ptd_offset;
 903			list_add_tail(&ep->active, &epq->active);
 904
 905			ptd_offset = next_ptd(epq, ep);
 906			if (ptd_offset < 0) {
 907				pr_warn("%s: req %d No more %s PTD buffers available\n",
 908					__func__, ep->num_req, epq->name);
 909				break;
 910			}
 911		}
 912	}
 913	list_for_each_entry(ep, &epq->active, active) {
 914		if (epq->active.next == &ep->active)
 915			ep->ptd.mps |= PTD_LAST_MSK;
 916		isp1362_write_ptd(isp1362_hcd, ep, epq);
 917		ptd_count++;
 918	}
 919
 920	if (ptd_count)
 921		enable_istl_transfers(isp1362_hcd, flip);
 922
 923	epq->ptd_count += ptd_count;
 924	if (epq->ptd_count > epq->stat_maxptds)
 925		epq->stat_maxptds = epq->ptd_count;
 926
 927	/* check, whether the second ISTL buffer may also be filled */
 928	if (!(isp1362_read_reg16(isp1362_hcd, HCBUFSTAT) &
 929	      (flip ? HCBUFSTAT_ISTL0_FULL : HCBUFSTAT_ISTL1_FULL))) {
 930		fno++;
 931		ptd_count = 0;
 932		flip = 1 - flip;
 933		goto fill2;
 934	}
 935}
 936
 937static void finish_transfers(struct isp1362_hcd *isp1362_hcd, unsigned long done_map,
 938			     struct isp1362_ep_queue *epq)
 939{
 940	struct isp1362_ep *ep;
 941	struct isp1362_ep *tmp;
 942
 943	if (list_empty(&epq->active)) {
 944		DBG(1, "%s: Nothing to do for %s queue\n", __func__, epq->name);
 945		return;
 946	}
 947
 948	DBG(1, "%s: Finishing %s transfers %08lx\n", __func__, epq->name, done_map);
 949
 950	atomic_inc(&epq->finishing);
 951	list_for_each_entry_safe(ep, tmp, &epq->active, active) {
 952		int index = ep->ptd_index;
 953
 954		DBG(1, "%s: Checking %s PTD[%02x] $%04x\n", __func__, epq->name,
 955		    index, ep->ptd_offset);
 956
 957		BUG_ON(index < 0);
 958		if (__test_and_clear_bit(index, &done_map)) {
 959			isp1362_read_ptd(isp1362_hcd, ep, epq);
 960			epq->free_ptd = index;
 961			BUG_ON(ep->num_ptds == 0);
 962			release_ptd_buffers(epq, ep);
 963
 964			DBG(1, "%s: ep %p req %d removed from active list\n", __func__,
 965			    ep, ep->num_req);
 966			if (!list_empty(&ep->remove_list)) {
 967				list_del_init(&ep->remove_list);
 968				DBG(1, "%s: ep %p removed from remove list\n", __func__, ep);
 969			}
 970			DBG(1, "%s: Postprocessing %s ep %p req %d\n", __func__, epq->name,
 971			    ep, ep->num_req);
 972			postproc_ep(isp1362_hcd, ep);
 973		}
 974		if (!done_map)
 975			break;
 976	}
 977	if (done_map)
 978		pr_warn("%s: done_map not clear: %08lx:%08lx\n",
 979			__func__, done_map, epq->skip_map);
 980	atomic_dec(&epq->finishing);
 981}
 982
 983static void finish_iso_transfers(struct isp1362_hcd *isp1362_hcd, struct isp1362_ep_queue *epq)
 984{
 985	struct isp1362_ep *ep;
 986	struct isp1362_ep *tmp;
 987
 988	if (list_empty(&epq->active)) {
 989		DBG(1, "%s: Nothing to do for %s queue\n", __func__, epq->name);
 990		return;
 991	}
 992
 993	DBG(1, "%s: Finishing %s transfers\n", __func__, epq->name);
 994
 995	atomic_inc(&epq->finishing);
 996	list_for_each_entry_safe(ep, tmp, &epq->active, active) {
 997		DBG(1, "%s: Checking PTD $%04x\n", __func__, ep->ptd_offset);
 998
 999		isp1362_read_ptd(isp1362_hcd, ep, epq);
1000		DBG(1, "%s: Postprocessing %s ep %p\n", __func__, epq->name, ep);
1001		postproc_ep(isp1362_hcd, ep);
1002	}
1003	WARN_ON(epq->blk_size != 0);
1004	atomic_dec(&epq->finishing);
1005}
1006
1007static irqreturn_t isp1362_irq(struct usb_hcd *hcd)
1008{
1009	int handled = 0;
1010	struct isp1362_hcd *isp1362_hcd = hcd_to_isp1362_hcd(hcd);
1011	u16 irqstat;
1012	u16 svc_mask;
1013
1014	spin_lock(&isp1362_hcd->lock);
1015
1016	BUG_ON(isp1362_hcd->irq_active++);
1017
1018	isp1362_write_reg16(isp1362_hcd, HCuPINTENB, 0);
1019
1020	irqstat = isp1362_read_reg16(isp1362_hcd, HCuPINT);
1021	DBG(3, "%s: got IRQ %04x:%04x\n", __func__, irqstat, isp1362_hcd->irqenb);
1022
1023	/* only handle interrupts that are currently enabled */
1024	irqstat &= isp1362_hcd->irqenb;
1025	isp1362_write_reg16(isp1362_hcd, HCuPINT, irqstat);
1026	svc_mask = irqstat;
1027
1028	if (irqstat & HCuPINT_SOF) {
1029		isp1362_hcd->irqenb &= ~HCuPINT_SOF;
1030		isp1362_hcd->irq_stat[ISP1362_INT_SOF]++;
1031		handled = 1;
1032		svc_mask &= ~HCuPINT_SOF;
1033		DBG(3, "%s: SOF\n", __func__);
1034		isp1362_hcd->fmindex = isp1362_read_reg32(isp1362_hcd, HCFMNUM);
1035		if (!list_empty(&isp1362_hcd->remove_list))
1036			finish_unlinks(isp1362_hcd);
1037		if (!list_empty(&isp1362_hcd->async) && !(irqstat & HCuPINT_ATL)) {
1038			if (list_empty(&isp1362_hcd->atl_queue.active)) {
1039				start_atl_transfers(isp1362_hcd);
1040			} else {
1041				isp1362_enable_int(isp1362_hcd, HCuPINT_ATL);
1042				isp1362_write_reg32(isp1362_hcd, HCATLSKIP,
1043						    isp1362_hcd->atl_queue.skip_map);
1044				isp1362_set_mask16(isp1362_hcd, HCBUFSTAT, HCBUFSTAT_ATL_ACTIVE);
1045			}
1046		}
1047	}
1048
1049	if (irqstat & HCuPINT_ISTL0) {
1050		isp1362_hcd->irq_stat[ISP1362_INT_ISTL0]++;
1051		handled = 1;
1052		svc_mask &= ~HCuPINT_ISTL0;
1053		isp1362_clr_mask16(isp1362_hcd, HCBUFSTAT, HCBUFSTAT_ISTL0_FULL);
1054		DBG(1, "%s: ISTL0\n", __func__);
1055		WARN_ON((int)!!isp1362_hcd->istl_flip);
1056		WARN_ON(isp1362_read_reg16(isp1362_hcd, HCBUFSTAT) &
1057			HCBUFSTAT_ISTL0_ACTIVE);
1058		WARN_ON(!(isp1362_read_reg16(isp1362_hcd, HCBUFSTAT) &
1059			HCBUFSTAT_ISTL0_DONE));
1060		isp1362_hcd->irqenb &= ~HCuPINT_ISTL0;
1061	}
1062
1063	if (irqstat & HCuPINT_ISTL1) {
1064		isp1362_hcd->irq_stat[ISP1362_INT_ISTL1]++;
1065		handled = 1;
1066		svc_mask &= ~HCuPINT_ISTL1;
1067		isp1362_clr_mask16(isp1362_hcd, HCBUFSTAT, HCBUFSTAT_ISTL1_FULL);
1068		DBG(1, "%s: ISTL1\n", __func__);
1069		WARN_ON(!(int)isp1362_hcd->istl_flip);
1070		WARN_ON(isp1362_read_reg16(isp1362_hcd, HCBUFSTAT) &
1071			HCBUFSTAT_ISTL1_ACTIVE);
1072		WARN_ON(!(isp1362_read_reg16(isp1362_hcd, HCBUFSTAT) &
1073			HCBUFSTAT_ISTL1_DONE));
1074		isp1362_hcd->irqenb &= ~HCuPINT_ISTL1;
1075	}
1076
1077	if (irqstat & (HCuPINT_ISTL0 | HCuPINT_ISTL1)) {
1078		WARN_ON((irqstat & (HCuPINT_ISTL0 | HCuPINT_ISTL1)) ==
1079			(HCuPINT_ISTL0 | HCuPINT_ISTL1));
1080		finish_iso_transfers(isp1362_hcd,
1081				     &isp1362_hcd->istl_queue[isp1362_hcd->istl_flip]);
1082		start_iso_transfers(isp1362_hcd);
1083		isp1362_hcd->istl_flip = 1 - isp1362_hcd->istl_flip;
1084	}
1085
1086	if (irqstat & HCuPINT_INTL) {
1087		u32 done_map = isp1362_read_reg32(isp1362_hcd, HCINTLDONE);
1088		u32 skip_map = isp1362_read_reg32(isp1362_hcd, HCINTLSKIP);
1089		isp1362_hcd->irq_stat[ISP1362_INT_INTL]++;
1090
1091		DBG(2, "%s: INTL\n", __func__);
1092
1093		svc_mask &= ~HCuPINT_INTL;
1094
1095		isp1362_write_reg32(isp1362_hcd, HCINTLSKIP, skip_map | done_map);
1096		if (~(done_map | skip_map) == 0)
1097			/* All PTDs are finished, disable INTL processing entirely */
1098			isp1362_clr_mask16(isp1362_hcd, HCBUFSTAT, HCBUFSTAT_INTL_ACTIVE);
1099
1100		handled = 1;
1101		WARN_ON(!done_map);
1102		if (done_map) {
1103			DBG(3, "%s: INTL done_map %08x\n", __func__, done_map);
1104			finish_transfers(isp1362_hcd, done_map, &isp1362_hcd->intl_queue);
1105			start_intl_transfers(isp1362_hcd);
1106		}
1107	}
1108
1109	if (irqstat & HCuPINT_ATL) {
1110		u32 done_map = isp1362_read_reg32(isp1362_hcd, HCATLDONE);
1111		u32 skip_map = isp1362_read_reg32(isp1362_hcd, HCATLSKIP);
1112		isp1362_hcd->irq_stat[ISP1362_INT_ATL]++;
1113
1114		DBG(2, "%s: ATL\n", __func__);
1115
1116		svc_mask &= ~HCuPINT_ATL;
1117
1118		isp1362_write_reg32(isp1362_hcd, HCATLSKIP, skip_map | done_map);
1119		if (~(done_map | skip_map) == 0)
1120			isp1362_clr_mask16(isp1362_hcd, HCBUFSTAT, HCBUFSTAT_ATL_ACTIVE);
1121		if (done_map) {
1122			DBG(3, "%s: ATL done_map %08x\n", __func__, done_map);
1123			finish_transfers(isp1362_hcd, done_map, &isp1362_hcd->atl_queue);
1124			start_atl_transfers(isp1362_hcd);
1125		}
1126		handled = 1;
1127	}
1128
1129	if (irqstat & HCuPINT_OPR) {
1130		u32 intstat = isp1362_read_reg32(isp1362_hcd, HCINTSTAT);
1131		isp1362_hcd->irq_stat[ISP1362_INT_OPR]++;
1132
1133		svc_mask &= ~HCuPINT_OPR;
1134		DBG(2, "%s: OPR %08x:%08x\n", __func__, intstat, isp1362_hcd->intenb);
1135		intstat &= isp1362_hcd->intenb;
1136		if (intstat & OHCI_INTR_UE) {
1137			pr_err("Unrecoverable error\n");
1138			/* FIXME: do here reset or cleanup or whatever */
1139		}
1140		if (intstat & OHCI_INTR_RHSC) {
1141			isp1362_hcd->rhstatus = isp1362_read_reg32(isp1362_hcd, HCRHSTATUS);
1142			isp1362_hcd->rhport[0] = isp1362_read_reg32(isp1362_hcd, HCRHPORT1);
1143			isp1362_hcd->rhport[1] = isp1362_read_reg32(isp1362_hcd, HCRHPORT2);
1144		}
1145		if (intstat & OHCI_INTR_RD) {
1146			pr_info("%s: RESUME DETECTED\n", __func__);
1147			isp1362_show_reg(isp1362_hcd, HCCONTROL);
1148			usb_hcd_resume_root_hub(hcd);
1149		}
1150		isp1362_write_reg32(isp1362_hcd, HCINTSTAT, intstat);
1151		irqstat &= ~HCuPINT_OPR;
1152		handled = 1;
1153	}
1154
1155	if (irqstat & HCuPINT_SUSP) {
1156		isp1362_hcd->irq_stat[ISP1362_INT_SUSP]++;
1157		handled = 1;
1158		svc_mask &= ~HCuPINT_SUSP;
1159
1160		pr_info("%s: SUSPEND IRQ\n", __func__);
1161	}
1162
1163	if (irqstat & HCuPINT_CLKRDY) {
1164		isp1362_hcd->irq_stat[ISP1362_INT_CLKRDY]++;
1165		handled = 1;
1166		isp1362_hcd->irqenb &= ~HCuPINT_CLKRDY;
1167		svc_mask &= ~HCuPINT_CLKRDY;
1168		pr_info("%s: CLKRDY IRQ\n", __func__);
1169	}
1170
1171	if (svc_mask)
1172		pr_err("%s: Unserviced interrupt(s) %04x\n", __func__, svc_mask);
1173
1174	isp1362_write_reg16(isp1362_hcd, HCuPINTENB, isp1362_hcd->irqenb);
1175	isp1362_hcd->irq_active--;
1176	spin_unlock(&isp1362_hcd->lock);
1177
1178	return IRQ_RETVAL(handled);
1179}
1180
1181/*-------------------------------------------------------------------------*/
1182
1183#define	MAX_PERIODIC_LOAD	900	/* out of 1000 usec */
1184static int balance(struct isp1362_hcd *isp1362_hcd, u16 interval, u16 load)
1185{
1186	int i, branch = -ENOSPC;
1187
1188	/* search for the least loaded schedule branch of that interval
1189	 * which has enough bandwidth left unreserved.
1190	 */
1191	for (i = 0; i < interval; i++) {
1192		if (branch < 0 || isp1362_hcd->load[branch] > isp1362_hcd->load[i]) {
1193			int j;
1194
1195			for (j = i; j < PERIODIC_SIZE; j += interval) {
1196				if ((isp1362_hcd->load[j] + load) > MAX_PERIODIC_LOAD) {
1197					pr_err("%s: new load %d load[%02x] %d max %d\n", __func__,
1198					    load, j, isp1362_hcd->load[j], MAX_PERIODIC_LOAD);
1199					break;
1200				}
1201			}
1202			if (j < PERIODIC_SIZE)
1203				continue;
1204			branch = i;
1205		}
1206	}
1207	return branch;
1208}
1209
1210/* NB! ALL the code above this point runs with isp1362_hcd->lock
1211   held, irqs off
1212*/
1213
1214/*-------------------------------------------------------------------------*/
1215
1216static int isp1362_urb_enqueue(struct usb_hcd *hcd,
1217			       struct urb *urb,
1218			       gfp_t mem_flags)
1219{
1220	struct isp1362_hcd *isp1362_hcd = hcd_to_isp1362_hcd(hcd);
1221	struct usb_device *udev = urb->dev;
1222	unsigned int pipe = urb->pipe;
1223	int is_out = !usb_pipein(pipe);
1224	int type = usb_pipetype(pipe);
1225	int epnum = usb_pipeendpoint(pipe);
1226	struct usb_host_endpoint *hep = urb->ep;
1227	struct isp1362_ep *ep = NULL;
1228	unsigned long flags;
1229	int retval = 0;
1230
1231	DBG(3, "%s: urb %p\n", __func__, urb);
1232
1233	if (type == PIPE_ISOCHRONOUS) {
1234		pr_err("Isochronous transfers not supported\n");
1235		return -ENOSPC;
1236	}
1237
1238	URB_DBG("%s: FA %d ep%d%s %s: len %d %s%s\n", __func__,
1239		usb_pipedevice(pipe), epnum,
1240		is_out ? "out" : "in",
1241		usb_pipecontrol(pipe) ? "ctrl" :
1242			usb_pipeint(pipe) ? "int" :
1243			usb_pipebulk(pipe) ? "bulk" :
1244			"iso",
1245		urb->transfer_buffer_length,
1246		(urb->transfer_flags & URB_ZERO_PACKET) ? "ZERO_PACKET " : "",
1247		!(urb->transfer_flags & URB_SHORT_NOT_OK) ?
1248		"short_ok" : "");
1249
1250	/* avoid all allocations within spinlocks: request or endpoint */
1251	if (!hep->hcpriv) {
1252		ep = kzalloc(sizeof *ep, mem_flags);
1253		if (!ep)
1254			return -ENOMEM;
1255	}
1256	spin_lock_irqsave(&isp1362_hcd->lock, flags);
1257
1258	/* don't submit to a dead or disabled port */
1259	if (!((isp1362_hcd->rhport[0] | isp1362_hcd->rhport[1]) &
1260	      USB_PORT_STAT_ENABLE) ||
1261	    !HC_IS_RUNNING(hcd->state)) {
1262		kfree(ep);
1263		retval = -ENODEV;
1264		goto fail_not_linked;
1265	}
1266
1267	retval = usb_hcd_link_urb_to_ep(hcd, urb);
1268	if (retval) {
1269		kfree(ep);
1270		goto fail_not_linked;
1271	}
1272
1273	if (hep->hcpriv) {
1274		ep = hep->hcpriv;
1275	} else {
1276		INIT_LIST_HEAD(&ep->schedule);
1277		INIT_LIST_HEAD(&ep->active);
1278		INIT_LIST_HEAD(&ep->remove_list);
1279		ep->udev = usb_get_dev(udev);
1280		ep->hep = hep;
1281		ep->epnum = epnum;
1282		ep->maxpacket = usb_maxpacket(udev, urb->pipe);
1283		ep->ptd_offset = -EINVAL;
1284		ep->ptd_index = -EINVAL;
1285		usb_settoggle(udev, epnum, is_out, 0);
1286
1287		if (type == PIPE_CONTROL)
1288			ep->nextpid = USB_PID_SETUP;
1289		else if (is_out)
1290			ep->nextpid = USB_PID_OUT;
1291		else
1292			ep->nextpid = USB_PID_IN;
1293
1294		switch (type) {
1295		case PIPE_ISOCHRONOUS:
1296		case PIPE_INTERRUPT:
1297			if (urb->interval > PERIODIC_SIZE)
1298				urb->interval = PERIODIC_SIZE;
1299			ep->interval = urb->interval;
1300			ep->branch = PERIODIC_SIZE;
1301			ep->load = usb_calc_bus_time(udev->speed, !is_out,
1302						     type == PIPE_ISOCHRONOUS,
1303						     usb_maxpacket(udev, pipe)) / 1000;
1304			break;
1305		}
1306		hep->hcpriv = ep;
1307	}
1308	ep->num_req = isp1362_hcd->req_serial++;
1309
1310	/* maybe put endpoint into schedule */
1311	switch (type) {
1312	case PIPE_CONTROL:
1313	case PIPE_BULK:
1314		if (list_empty(&ep->schedule)) {
1315			DBG(1, "%s: Adding ep %p req %d to async schedule\n",
1316				__func__, ep, ep->num_req);
1317			list_add_tail(&ep->schedule, &isp1362_hcd->async);
1318		}
1319		break;
1320	case PIPE_ISOCHRONOUS:
1321	case PIPE_INTERRUPT:
1322		urb->interval = ep->interval;
1323
1324		/* urb submitted for already existing EP */
1325		if (ep->branch < PERIODIC_SIZE)
1326			break;
1327
1328		retval = balance(isp1362_hcd, ep->interval, ep->load);
1329		if (retval < 0) {
1330			pr_err("%s: balance returned %d\n", __func__, retval);
1331			goto fail;
1332		}
1333		ep->branch = retval;
1334		retval = 0;
1335		isp1362_hcd->fmindex = isp1362_read_reg32(isp1362_hcd, HCFMNUM);
1336		DBG(1, "%s: Current frame %04x branch %02x start_frame %04x(%04x)\n",
1337		    __func__, isp1362_hcd->fmindex, ep->branch,
1338		    ((isp1362_hcd->fmindex + PERIODIC_SIZE - 1) &
1339		     ~(PERIODIC_SIZE - 1)) + ep->branch,
1340		    (isp1362_hcd->fmindex & (PERIODIC_SIZE - 1)) + ep->branch);
1341
1342		if (list_empty(&ep->schedule)) {
1343			if (type == PIPE_ISOCHRONOUS) {
1344				u16 frame = isp1362_hcd->fmindex;
1345
1346				frame += max_t(u16, 8, ep->interval);
1347				frame &= ~(ep->interval - 1);
1348				frame |= ep->branch;
1349				if (frame_before(frame, isp1362_hcd->fmindex))
1350					frame += ep->interval;
1351				urb->start_frame = frame;
1352
1353				DBG(1, "%s: Adding ep %p to isoc schedule\n", __func__, ep);
1354				list_add_tail(&ep->schedule, &isp1362_hcd->isoc);
1355			} else {
1356				DBG(1, "%s: Adding ep %p to periodic schedule\n", __func__, ep);
1357				list_add_tail(&ep->schedule, &isp1362_hcd->periodic);
1358			}
1359		} else
1360			DBG(1, "%s: ep %p already scheduled\n", __func__, ep);
1361
1362		DBG(2, "%s: load %d bandwidth %d -> %d\n", __func__,
1363		    ep->load / ep->interval, isp1362_hcd->load[ep->branch],
1364		    isp1362_hcd->load[ep->branch] + ep->load);
1365		isp1362_hcd->load[ep->branch] += ep->load;
1366	}
1367
1368	urb->hcpriv = hep;
1369	ALIGNSTAT(isp1362_hcd, urb->transfer_buffer);
1370
1371	switch (type) {
1372	case PIPE_CONTROL:
1373	case PIPE_BULK:
1374		start_atl_transfers(isp1362_hcd);
1375		break;
1376	case PIPE_INTERRUPT:
1377		start_intl_transfers(isp1362_hcd);
1378		break;
1379	case PIPE_ISOCHRONOUS:
1380		start_iso_transfers(isp1362_hcd);
1381		break;
1382	default:
1383		BUG();
1384	}
1385 fail:
1386	if (retval)
1387		usb_hcd_unlink_urb_from_ep(hcd, urb);
1388
1389
1390 fail_not_linked:
1391	spin_unlock_irqrestore(&isp1362_hcd->lock, flags);
1392	if (retval)
1393		DBG(0, "%s: urb %p failed with %d\n", __func__, urb, retval);
1394	return retval;
1395}
1396
1397static int isp1362_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status)
1398{
1399	struct isp1362_hcd *isp1362_hcd = hcd_to_isp1362_hcd(hcd);
1400	struct usb_host_endpoint *hep;
1401	unsigned long flags;
1402	struct isp1362_ep *ep;
1403	int retval = 0;
1404
1405	DBG(3, "%s: urb %p\n", __func__, urb);
1406
1407	spin_lock_irqsave(&isp1362_hcd->lock, flags);
1408	retval = usb_hcd_check_unlink_urb(hcd, urb, status);
1409	if (retval)
1410		goto done;
1411
1412	hep = urb->hcpriv;
1413
1414	if (!hep) {
1415		spin_unlock_irqrestore(&isp1362_hcd->lock, flags);
1416		return -EIDRM;
1417	}
1418
1419	ep = hep->hcpriv;
1420	if (ep) {
1421		/* In front of queue? */
1422		if (ep->hep->urb_list.next == &urb->urb_list) {
1423			if (!list_empty(&ep->active)) {
1424				DBG(1, "%s: urb %p ep %p req %d active PTD[%d] $%04x\n", __func__,
1425				    urb, ep, ep->num_req, ep->ptd_index, ep->ptd_offset);
1426				/* disable processing and queue PTD for removal */
1427				remove_ptd(isp1362_hcd, ep);
1428				urb = NULL;
1429			}
1430		}
1431		if (urb) {
1432			DBG(1, "%s: Finishing ep %p req %d\n", __func__, ep,
1433			    ep->num_req);
1434			finish_request(isp1362_hcd, ep, urb, status);
1435		} else
1436			DBG(1, "%s: urb %p active; wait4irq\n", __func__, urb);
1437	} else {
1438		pr_warn("%s: No EP in URB %p\n", __func__, urb);
1439		retval = -EINVAL;
1440	}
1441done:
1442	spin_unlock_irqrestore(&isp1362_hcd->lock, flags);
1443
1444	DBG(3, "%s: exit\n", __func__);
1445
1446	return retval;
1447}
1448
1449static void isp1362_endpoint_disable(struct usb_hcd *hcd, struct usb_host_endpoint *hep)
1450{
1451	struct isp1362_ep *ep = hep->hcpriv;
1452	struct isp1362_hcd *isp1362_hcd = hcd_to_isp1362_hcd(hcd);
1453	unsigned long flags;
1454
1455	DBG(1, "%s: ep %p\n", __func__, ep);
1456	if (!ep)
1457		return;
1458	spin_lock_irqsave(&isp1362_hcd->lock, flags);
1459	if (!list_empty(&hep->urb_list)) {
1460		if (!list_empty(&ep->active) && list_empty(&ep->remove_list)) {
1461			DBG(1, "%s: Removing ep %p req %d PTD[%d] $%04x\n", __func__,
1462			    ep, ep->num_req, ep->ptd_index, ep->ptd_offset);
1463			remove_ptd(isp1362_hcd, ep);
1464			pr_info("%s: Waiting for Interrupt to clean up\n", __func__);
1465		}
1466	}
1467	spin_unlock_irqrestore(&isp1362_hcd->lock, flags);
1468	/* Wait for interrupt to clear out active list */
1469	while (!list_empty(&ep->active))
1470		msleep(1);
1471
1472	DBG(1, "%s: Freeing EP %p\n", __func__, ep);
1473
1474	usb_put_dev(ep->udev);
1475	kfree(ep);
1476	hep->hcpriv = NULL;
1477}
1478
1479static int isp1362_get_frame(struct usb_hcd *hcd)
1480{
1481	struct isp1362_hcd *isp1362_hcd = hcd_to_isp1362_hcd(hcd);
1482	u32 fmnum;
1483	unsigned long flags;
1484
1485	spin_lock_irqsave(&isp1362_hcd->lock, flags);
1486	fmnum = isp1362_read_reg32(isp1362_hcd, HCFMNUM);
1487	spin_unlock_irqrestore(&isp1362_hcd->lock, flags);
1488
1489	return (int)fmnum;
1490}
1491
1492/*-------------------------------------------------------------------------*/
1493
1494/* Adapted from ohci-hub.c */
1495static int isp1362_hub_status_data(struct usb_hcd *hcd, char *buf)
1496{
1497	struct isp1362_hcd *isp1362_hcd = hcd_to_isp1362_hcd(hcd);
1498	int ports, i, changed = 0;
1499	unsigned long flags;
1500
1501	if (!HC_IS_RUNNING(hcd->state))
1502		return -ESHUTDOWN;
1503
1504	/* Report no status change now, if we are scheduled to be
1505	   called later */
1506	if (timer_pending(&hcd->rh_timer))
1507		return 0;
1508
1509	ports = isp1362_hcd->rhdesca & RH_A_NDP;
1510	BUG_ON(ports > 2);
1511
1512	spin_lock_irqsave(&isp1362_hcd->lock, flags);
1513	/* init status */
1514	if (isp1362_hcd->rhstatus & (RH_HS_LPSC | RH_HS_OCIC))
1515		buf[0] = changed = 1;
1516	else
1517		buf[0] = 0;
1518
1519	for (i = 0; i < ports; i++) {
1520		u32 status = isp1362_hcd->rhport[i];
1521
1522		if (status & (RH_PS_CSC | RH_PS_PESC | RH_PS_PSSC |
1523			      RH_PS_OCIC | RH_PS_PRSC)) {
1524			changed = 1;
1525			buf[0] |= 1 << (i + 1);
1526			continue;
1527		}
1528
1529		if (!(status & RH_PS_CCS))
1530			continue;
1531	}
1532	spin_unlock_irqrestore(&isp1362_hcd->lock, flags);
1533	return changed;
1534}
1535
1536static void isp1362_hub_descriptor(struct isp1362_hcd *isp1362_hcd,
1537				   struct usb_hub_descriptor *desc)
1538{
1539	u32 reg = isp1362_hcd->rhdesca;
1540
1541	DBG(3, "%s: enter\n", __func__);
1542
1543	desc->bDescriptorType = USB_DT_HUB;
1544	desc->bDescLength = 9;
1545	desc->bHubContrCurrent = 0;
1546	desc->bNbrPorts = reg & 0x3;
1547	/* Power switching, device type, overcurrent. */
1548	desc->wHubCharacteristics = cpu_to_le16((reg >> 8) &
1549						(HUB_CHAR_LPSM |
1550						 HUB_CHAR_COMPOUND |
1551						 HUB_CHAR_OCPM));
1552	DBG(0, "%s: hubcharacteristics = %02x\n", __func__,
1553			desc->wHubCharacteristics);
1554	desc->bPwrOn2PwrGood = (reg >> 24) & 0xff;
1555	/* ports removable, and legacy PortPwrCtrlMask */
1556	desc->u.hs.DeviceRemovable[0] = desc->bNbrPorts == 1 ? 1 << 1 : 3 << 1;
1557	desc->u.hs.DeviceRemovable[1] = ~0;
1558
1559	DBG(3, "%s: exit\n", __func__);
1560}
1561
1562/* Adapted from ohci-hub.c */
1563static int isp1362_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue,
1564			       u16 wIndex, char *buf, u16 wLength)
1565{
1566	struct isp1362_hcd *isp1362_hcd = hcd_to_isp1362_hcd(hcd);
1567	int retval = 0;
1568	unsigned long flags;
1569	unsigned long t1;
1570	int ports = isp1362_hcd->rhdesca & RH_A_NDP;
1571	u32 tmp = 0;
1572
1573	switch (typeReq) {
1574	case ClearHubFeature:
1575		DBG(0, "ClearHubFeature: ");
1576		switch (wValue) {
1577		case C_HUB_OVER_CURRENT:
1578			DBG(0, "C_HUB_OVER_CURRENT\n");
1579			spin_lock_irqsave(&isp1362_hcd->lock, flags);
1580			isp1362_write_reg32(isp1362_hcd, HCRHSTATUS, RH_HS_OCIC);
1581			spin_unlock_irqrestore(&isp1362_hcd->lock, flags);
1582			break;
1583		case C_HUB_LOCAL_POWER:
1584			DBG(0, "C_HUB_LOCAL_POWER\n");
1585			break;
1586		default:
1587			goto error;
1588		}
1589		break;
1590	case SetHubFeature:
1591		DBG(0, "SetHubFeature: ");
1592		switch (wValue) {
1593		case C_HUB_OVER_CURRENT:
1594		case C_HUB_LOCAL_POWER:
1595			DBG(0, "C_HUB_OVER_CURRENT or C_HUB_LOCAL_POWER\n");
1596			break;
1597		default:
1598			goto error;
1599		}
1600		break;
1601	case GetHubDescriptor:
1602		DBG(0, "GetHubDescriptor\n");
1603		isp1362_hub_descriptor(isp1362_hcd, (struct usb_hub_descriptor *)buf);
1604		break;
1605	case GetHubStatus:
1606		DBG(0, "GetHubStatus\n");
1607		put_unaligned(cpu_to_le32(0), (__le32 *) buf);
1608		break;
1609	case GetPortStatus:
1610#ifndef VERBOSE
1611		DBG(0, "GetPortStatus\n");
1612#endif
1613		if (!wIndex || wIndex > ports)
1614			goto error;
1615		tmp = isp1362_hcd->rhport[--wIndex];
1616		put_unaligned(cpu_to_le32(tmp), (__le32 *) buf);
1617		break;
1618	case ClearPortFeature:
1619		DBG(0, "ClearPortFeature: ");
1620		if (!wIndex || wIndex > ports)
1621			goto error;
1622		wIndex--;
1623
1624		switch (wValue) {
1625		case USB_PORT_FEAT_ENABLE:
1626			DBG(0, "USB_PORT_FEAT_ENABLE\n");
1627			tmp = RH_PS_CCS;
1628			break;
1629		case USB_PORT_FEAT_C_ENABLE:
1630			DBG(0, "USB_PORT_FEAT_C_ENABLE\n");
1631			tmp = RH_PS_PESC;
1632			break;
1633		case USB_PORT_FEAT_SUSPEND:
1634			DBG(0, "USB_PORT_FEAT_SUSPEND\n");
1635			tmp = RH_PS_POCI;
1636			break;
1637		case USB_PORT_FEAT_C_SUSPEND:
1638			DBG(0, "USB_PORT_FEAT_C_SUSPEND\n");
1639			tmp = RH_PS_PSSC;
1640			break;
1641		case USB_PORT_FEAT_POWER:
1642			DBG(0, "USB_PORT_FEAT_POWER\n");
1643			tmp = RH_PS_LSDA;
1644
1645			break;
1646		case USB_PORT_FEAT_C_CONNECTION:
1647			DBG(0, "USB_PORT_FEAT_C_CONNECTION\n");
1648			tmp = RH_PS_CSC;
1649			break;
1650		case USB_PORT_FEAT_C_OVER_CURRENT:
1651			DBG(0, "USB_PORT_FEAT_C_OVER_CURRENT\n");
1652			tmp = RH_PS_OCIC;
1653			break;
1654		case USB_PORT_FEAT_C_RESET:
1655			DBG(0, "USB_PORT_FEAT_C_RESET\n");
1656			tmp = RH_PS_PRSC;
1657			break;
1658		default:
1659			goto error;
1660		}
1661
1662		spin_lock_irqsave(&isp1362_hcd->lock, flags);
1663		isp1362_write_reg32(isp1362_hcd, HCRHPORT1 + wIndex, tmp);
1664		isp1362_hcd->rhport[wIndex] =
1665			isp1362_read_reg32(isp1362_hcd, HCRHPORT1 + wIndex);
1666		spin_unlock_irqrestore(&isp1362_hcd->lock, flags);
1667		break;
1668	case SetPortFeature:
1669		DBG(0, "SetPortFeature: ");
1670		if (!wIndex || wIndex > ports)
1671			goto error;
1672		wIndex--;
1673		switch (wValue) {
1674		case USB_PORT_FEAT_SUSPEND:
1675			DBG(0, "USB_PORT_FEAT_SUSPEND\n");
1676			spin_lock_irqsave(&isp1362_hcd->lock, flags);
1677			isp1362_write_reg32(isp1362_hcd, HCRHPORT1 + wIndex, RH_PS_PSS);
1678			isp1362_hcd->rhport[wIndex] =
1679				isp1362_read_reg32(isp1362_hcd, HCRHPORT1 + wIndex);
1680			spin_unlock_irqrestore(&isp1362_hcd->lock, flags);
1681			break;
1682		case USB_PORT_FEAT_POWER:
1683			DBG(0, "USB_PORT_FEAT_POWER\n");
1684			spin_lock_irqsave(&isp1362_hcd->lock, flags);
1685			isp1362_write_reg32(isp1362_hcd, HCRHPORT1 + wIndex, RH_PS_PPS);
1686			isp1362_hcd->rhport[wIndex] =
1687				isp1362_read_reg32(isp1362_hcd, HCRHPORT1 + wIndex);
1688			spin_unlock_irqrestore(&isp1362_hcd->lock, flags);
1689			break;
1690		case USB_PORT_FEAT_RESET:
1691			DBG(0, "USB_PORT_FEAT_RESET\n");
1692			spin_lock_irqsave(&isp1362_hcd->lock, flags);
1693
1694			t1 = jiffies + msecs_to_jiffies(USB_RESET_WIDTH);
1695			while (time_before(jiffies, t1)) {
1696				/* spin until any current reset finishes */
1697				for (;;) {
1698					tmp = isp1362_read_reg32(isp1362_hcd, HCRHPORT1 + wIndex);
1699					if (!(tmp & RH_PS_PRS))
1700						break;
1701					udelay(500);
1702				}
1703				if (!(tmp & RH_PS_CCS))
1704					break;
1705				/* Reset lasts 10ms (claims datasheet) */
1706				isp1362_write_reg32(isp1362_hcd, HCRHPORT1 + wIndex, (RH_PS_PRS));
1707
1708				spin_unlock_irqrestore(&isp1362_hcd->lock, flags);
1709				msleep(10);
1710				spin_lock_irqsave(&isp1362_hcd->lock, flags);
1711			}
1712
1713			isp1362_hcd->rhport[wIndex] = isp1362_read_reg32(isp1362_hcd,
1714									 HCRHPORT1 + wIndex);
1715			spin_unlock_irqrestore(&isp1362_hcd->lock, flags);
1716			break;
1717		default:
1718			goto error;
1719		}
1720		break;
1721
1722	default:
1723 error:
1724		/* "protocol stall" on error */
1725		DBG(0, "PROTOCOL STALL\n");
1726		retval = -EPIPE;
1727	}
1728
1729	return retval;
1730}
1731
1732#ifdef	CONFIG_PM
1733static int isp1362_bus_suspend(struct usb_hcd *hcd)
1734{
1735	int status = 0;
1736	struct isp1362_hcd *isp1362_hcd = hcd_to_isp1362_hcd(hcd);
1737	unsigned long flags;
1738
1739	if (time_before(jiffies, isp1362_hcd->next_statechange))
1740		msleep(5);
1741
1742	spin_lock_irqsave(&isp1362_hcd->lock, flags);
1743
1744	isp1362_hcd->hc_control = isp1362_read_reg32(isp1362_hcd, HCCONTROL);
1745	switch (isp1362_hcd->hc_control & OHCI_CTRL_HCFS) {
1746	case OHCI_USB_RESUME:
1747		DBG(0, "%s: resume/suspend?\n", __func__);
1748		isp1362_hcd->hc_control &= ~OHCI_CTRL_HCFS;
1749		isp1362_hcd->hc_control |= OHCI_USB_RESET;
1750		isp1362_write_reg32(isp1362_hcd, HCCONTROL, isp1362_hcd->hc_control);
1751		fallthrough;
1752	case OHCI_USB_RESET:
1753		status = -EBUSY;
1754		pr_warn("%s: needs reinit!\n", __func__);
1755		goto done;
1756	case OHCI_USB_SUSPEND:
1757		pr_warn("%s: already suspended?\n", __func__);
1758		goto done;
1759	}
1760	DBG(0, "%s: suspend root hub\n", __func__);
1761
1762	/* First stop any processing */
1763	hcd->state = HC_STATE_QUIESCING;
1764	if (!list_empty(&isp1362_hcd->atl_queue.active) ||
1765	    !list_empty(&isp1362_hcd->intl_queue.active) ||
1766	    !list_empty(&isp1362_hcd->istl_queue[0] .active) ||
1767	    !list_empty(&isp1362_hcd->istl_queue[1] .active)) {
1768		int limit;
1769
1770		isp1362_write_reg32(isp1362_hcd, HCATLSKIP, ~0);
1771		isp1362_write_reg32(isp1362_hcd, HCINTLSKIP, ~0);
1772		isp1362_write_reg16(isp1362_hcd, HCBUFSTAT, 0);
1773		isp1362_write_reg16(isp1362_hcd, HCuPINTENB, 0);
1774		isp1362_write_reg32(isp1362_hcd, HCINTSTAT, OHCI_INTR_SF);
1775
1776		DBG(0, "%s: stopping schedules ...\n", __func__);
1777		limit = 2000;
1778		while (limit > 0) {
1779			udelay(250);
1780			limit -= 250;
1781			if (isp1362_read_reg32(isp1362_hcd, HCINTSTAT) & OHCI_INTR_SF)
1782				break;
1783		}
1784		mdelay(7);
1785		if (isp1362_read_reg16(isp1362_hcd, HCuPINT) & HCuPINT_ATL) {
1786			u32 done_map = isp1362_read_reg32(isp1362_hcd, HCATLDONE);
1787			finish_transfers(isp1362_hcd, done_map, &isp1362_hcd->atl_queue);
1788		}
1789		if (isp1362_read_reg16(isp1362_hcd, HCuPINT) & HCuPINT_INTL) {
1790			u32 done_map = isp1362_read_reg32(isp1362_hcd, HCINTLDONE);
1791			finish_transfers(isp1362_hcd, done_map, &isp1362_hcd->intl_queue);
1792		}
1793		if (isp1362_read_reg16(isp1362_hcd, HCuPINT) & HCuPINT_ISTL0)
1794			finish_iso_transfers(isp1362_hcd, &isp1362_hcd->istl_queue[0]);
1795		if (isp1362_read_reg16(isp1362_hcd, HCuPINT) & HCuPINT_ISTL1)
1796			finish_iso_transfers(isp1362_hcd, &isp1362_hcd->istl_queue[1]);
1797	}
1798	DBG(0, "%s: HCINTSTAT: %08x\n", __func__,
1799		    isp1362_read_reg32(isp1362_hcd, HCINTSTAT));
1800	isp1362_write_reg32(isp1362_hcd, HCINTSTAT,
1801			    isp1362_read_reg32(isp1362_hcd, HCINTSTAT));
1802
1803	/* Suspend hub */
1804	isp1362_hcd->hc_control = OHCI_USB_SUSPEND;
1805	isp1362_show_reg(isp1362_hcd, HCCONTROL);
1806	isp1362_write_reg32(isp1362_hcd, HCCONTROL, isp1362_hcd->hc_control);
1807	isp1362_show_reg(isp1362_hcd, HCCONTROL);
1808
1809#if 1
1810	isp1362_hcd->hc_control = isp1362_read_reg32(isp1362_hcd, HCCONTROL);
1811	if ((isp1362_hcd->hc_control & OHCI_CTRL_HCFS) != OHCI_USB_SUSPEND) {
1812		pr_err("%s: controller won't suspend %08x\n", __func__,
1813		    isp1362_hcd->hc_control);
1814		status = -EBUSY;
1815	} else
1816#endif
1817	{
1818		/* no resumes until devices finish suspending */
1819		isp1362_hcd->next_statechange = jiffies + msecs_to_jiffies(5);
1820	}
1821done:
1822	if (status == 0) {
1823		hcd->state = HC_STATE_SUSPENDED;
1824		DBG(0, "%s: HCD suspended: %08x\n", __func__,
1825		    isp1362_read_reg32(isp1362_hcd, HCCONTROL));
1826	}
1827	spin_unlock_irqrestore(&isp1362_hcd->lock, flags);
1828	return status;
1829}
1830
1831static int isp1362_bus_resume(struct usb_hcd *hcd)
1832{
1833	struct isp1362_hcd *isp1362_hcd = hcd_to_isp1362_hcd(hcd);
1834	u32 port;
1835	unsigned long flags;
1836	int status = -EINPROGRESS;
1837
1838	if (time_before(jiffies, isp1362_hcd->next_statechange))
1839		msleep(5);
1840
1841	spin_lock_irqsave(&isp1362_hcd->lock, flags);
1842	isp1362_hcd->hc_control = isp1362_read_reg32(isp1362_hcd, HCCONTROL);
1843	pr_info("%s: HCCONTROL: %08x\n", __func__, isp1362_hcd->hc_control);
1844	if (hcd->state == HC_STATE_RESUMING) {
1845		pr_warn("%s: duplicate resume\n", __func__);
1846		status = 0;
1847	} else
1848		switch (isp1362_hcd->hc_control & OHCI_CTRL_HCFS) {
1849		case OHCI_USB_SUSPEND:
1850			DBG(0, "%s: resume root hub\n", __func__);
1851			isp1362_hcd->hc_control &= ~OHCI_CTRL_HCFS;
1852			isp1362_hcd->hc_control |= OHCI_USB_RESUME;
1853			isp1362_write_reg32(isp1362_hcd, HCCONTROL, isp1362_hcd->hc_control);
1854			break;
1855		case OHCI_USB_RESUME:
1856			/* HCFS changes sometime after INTR_RD */
1857			DBG(0, "%s: remote wakeup\n", __func__);
1858			break;
1859		case OHCI_USB_OPER:
1860			DBG(0, "%s: odd resume\n", __func__);
1861			status = 0;
1862			hcd->self.root_hub->dev.power.power_state = PMSG_ON;
1863			break;
1864		default:		/* RESET, we lost power */
1865			DBG(0, "%s: root hub hardware reset\n", __func__);
1866			status = -EBUSY;
1867		}
1868	spin_unlock_irqrestore(&isp1362_hcd->lock, flags);
1869	if (status == -EBUSY) {
1870		DBG(0, "%s: Restarting HC\n", __func__);
1871		isp1362_hc_stop(hcd);
1872		return isp1362_hc_start(hcd);
1873	}
1874	if (status != -EINPROGRESS)
1875		return status;
1876	spin_lock_irqsave(&isp1362_hcd->lock, flags);
1877	port = isp1362_read_reg32(isp1362_hcd, HCRHDESCA) & RH_A_NDP;
1878	while (port--) {
1879		u32 stat = isp1362_read_reg32(isp1362_hcd, HCRHPORT1 + port);
1880
1881		/* force global, not selective, resume */
1882		if (!(stat & RH_PS_PSS)) {
1883			DBG(0, "%s: Not Resuming RH port %d\n", __func__, port);
1884			continue;
1885		}
1886		DBG(0, "%s: Resuming RH port %d\n", __func__, port);
1887		isp1362_write_reg32(isp1362_hcd, HCRHPORT1 + port, RH_PS_POCI);
1888	}
1889	spin_unlock_irqrestore(&isp1362_hcd->lock, flags);
1890
1891	/* Some controllers (lucent) need extra-long delays */
1892	hcd->state = HC_STATE_RESUMING;
1893	mdelay(20 /* usb 11.5.1.10 */ + 15);
1894
1895	isp1362_hcd->hc_control = OHCI_USB_OPER;
1896	spin_lock_irqsave(&isp1362_hcd->lock, flags);
1897	isp1362_show_reg(isp1362_hcd, HCCONTROL);
1898	isp1362_write_reg32(isp1362_hcd, HCCONTROL, isp1362_hcd->hc_control);
1899	spin_unlock_irqrestore(&isp1362_hcd->lock, flags);
1900	/* TRSMRCY */
1901	msleep(10);
1902
1903	/* keep it alive for ~5x suspend + resume costs */
1904	isp1362_hcd->next_statechange = jiffies + msecs_to_jiffies(250);
1905
1906	hcd->self.root_hub->dev.power.power_state = PMSG_ON;
1907	hcd->state = HC_STATE_RUNNING;
1908	return 0;
1909}
1910#else
1911#define	isp1362_bus_suspend	NULL
1912#define	isp1362_bus_resume	NULL
1913#endif
1914
1915/*-------------------------------------------------------------------------*/
1916
1917static void dump_irq(struct seq_file *s, char *label, u16 mask)
1918{
1919	seq_printf(s, "%-15s %04x%s%s%s%s%s%s\n", label, mask,
1920		   mask & HCuPINT_CLKRDY ? " clkrdy" : "",
1921		   mask & HCuPINT_SUSP ? " susp" : "",
1922		   mask & HCuPINT_OPR ? " opr" : "",
1923		   mask & HCuPINT_EOT ? " eot" : "",
1924		   mask & HCuPINT_ATL ? " atl" : "",
1925		   mask & HCuPINT_SOF ? " sof" : "");
1926}
1927
1928static void dump_int(struct seq_file *s, char *label, u32 mask)
1929{
1930	seq_printf(s, "%-15s %08x%s%s%s%s%s%s%s\n", label, mask,
1931		   mask & OHCI_INTR_MIE ? " MIE" : "",
1932		   mask & OHCI_INTR_RHSC ? " rhsc" : "",
1933		   mask & OHCI_INTR_FNO ? " fno" : "",
1934		   mask & OHCI_INTR_UE ? " ue" : "",
1935		   mask & OHCI_INTR_RD ? " rd" : "",
1936		   mask & OHCI_INTR_SF ? " sof" : "",
1937		   mask & OHCI_INTR_SO ? " so" : "");
1938}
1939
1940static void dump_ctrl(struct seq_file *s, char *label, u32 mask)
1941{
1942	seq_printf(s, "%-15s %08x%s%s%s\n", label, mask,
1943		   mask & OHCI_CTRL_RWC ? " rwc" : "",
1944		   mask & OHCI_CTRL_RWE ? " rwe" : "",
1945		   ({
1946			   char *hcfs;
1947			   switch (mask & OHCI_CTRL_HCFS) {
1948			   case OHCI_USB_OPER:
1949				   hcfs = " oper";
1950				   break;
1951			   case OHCI_USB_RESET:
1952				   hcfs = " reset";
1953				   break;
1954			   case OHCI_USB_RESUME:
1955				   hcfs = " resume";
1956				   break;
1957			   case OHCI_USB_SUSPEND:
1958				   hcfs = " suspend";
1959				   break;
1960			   default:
1961				   hcfs = " ?";
1962			   }
1963			   hcfs;
1964		   }));
1965}
1966
1967static void dump_regs(struct seq_file *s, struct isp1362_hcd *isp1362_hcd)
1968{
1969	seq_printf(s, "HCREVISION [%02x] %08x\n", ISP1362_REG_NO(ISP1362_REG_HCREVISION),
1970		   isp1362_read_reg32(isp1362_hcd, HCREVISION));
1971	seq_printf(s, "HCCONTROL  [%02x] %08x\n", ISP1362_REG_NO(ISP1362_REG_HCCONTROL),
1972		   isp1362_read_reg32(isp1362_hcd, HCCONTROL));
1973	seq_printf(s, "HCCMDSTAT  [%02x] %08x\n", ISP1362_REG_NO(ISP1362_REG_HCCMDSTAT),
1974		   isp1362_read_reg32(isp1362_hcd, HCCMDSTAT));
1975	seq_printf(s, "HCINTSTAT  [%02x] %08x\n", ISP1362_REG_NO(ISP1362_REG_HCINTSTAT),
1976		   isp1362_read_reg32(isp1362_hcd, HCINTSTAT));
1977	seq_printf(s, "HCINTENB   [%02x] %08x\n", ISP1362_REG_NO(ISP1362_REG_HCINTENB),
1978		   isp1362_read_reg32(isp1362_hcd, HCINTENB));
1979	seq_printf(s, "HCFMINTVL  [%02x] %08x\n", ISP1362_REG_NO(ISP1362_REG_HCFMINTVL),
1980		   isp1362_read_reg32(isp1362_hcd, HCFMINTVL));
1981	seq_printf(s, "HCFMREM    [%02x] %08x\n", ISP1362_REG_NO(ISP1362_REG_HCFMREM),
1982		   isp1362_read_reg32(isp1362_hcd, HCFMREM));
1983	seq_printf(s, "HCFMNUM    [%02x] %08x\n", ISP1362_REG_NO(ISP1362_REG_HCFMNUM),
1984		   isp1362_read_reg32(isp1362_hcd, HCFMNUM));
1985	seq_printf(s, "HCLSTHRESH [%02x] %08x\n", ISP1362_REG_NO(ISP1362_REG_HCLSTHRESH),
1986		   isp1362_read_reg32(isp1362_hcd, HCLSTHRESH));
1987	seq_printf(s, "HCRHDESCA  [%02x] %08x\n", ISP1362_REG_NO(ISP1362_REG_HCRHDESCA),
1988		   isp1362_read_reg32(isp1362_hcd, HCRHDESCA));
1989	seq_printf(s, "HCRHDESCB  [%02x] %08x\n", ISP1362_REG_NO(ISP1362_REG_HCRHDESCB),
1990		   isp1362_read_reg32(isp1362_hcd, HCRHDESCB));
1991	seq_printf(s, "HCRHSTATUS [%02x] %08x\n", ISP1362_REG_NO(ISP1362_REG_HCRHSTATUS),
1992		   isp1362_read_reg32(isp1362_hcd, HCRHSTATUS));
1993	seq_printf(s, "HCRHPORT1  [%02x] %08x\n", ISP1362_REG_NO(ISP1362_REG_HCRHPORT1),
1994		   isp1362_read_reg32(isp1362_hcd, HCRHPORT1));
1995	seq_printf(s, "HCRHPORT2  [%02x] %08x\n", ISP1362_REG_NO(ISP1362_REG_HCRHPORT2),
1996		   isp1362_read_reg32(isp1362_hcd, HCRHPORT2));
1997	seq_printf(s, "\n");
1998	seq_printf(s, "HCHWCFG    [%02x]     %04x\n", ISP1362_REG_NO(ISP1362_REG_HCHWCFG),
1999		   isp1362_read_reg16(isp1362_hcd, HCHWCFG));
2000	seq_printf(s, "HCDMACFG   [%02x]     %04x\n", ISP1362_REG_NO(ISP1362_REG_HCDMACFG),
2001		   isp1362_read_reg16(isp1362_hcd, HCDMACFG));
2002	seq_printf(s, "HCXFERCTR  [%02x]     %04x\n", ISP1362_REG_NO(ISP1362_REG_HCXFERCTR),
2003		   isp1362_read_reg16(isp1362_hcd, HCXFERCTR));
2004	seq_printf(s, "HCuPINT    [%02x]     %04x\n", ISP1362_REG_NO(ISP1362_REG_HCuPINT),
2005		   isp1362_read_reg16(isp1362_hcd, HCuPINT));
2006	seq_printf(s, "HCuPINTENB [%02x]     %04x\n", ISP1362_REG_NO(ISP1362_REG_HCuPINTENB),
2007		   isp1362_read_reg16(isp1362_hcd, HCuPINTENB));
2008	seq_printf(s, "HCCHIPID   [%02x]     %04x\n", ISP1362_REG_NO(ISP1362_REG_HCCHIPID),
2009		   isp1362_read_reg16(isp1362_hcd, HCCHIPID));
2010	seq_printf(s, "HCSCRATCH  [%02x]     %04x\n", ISP1362_REG_NO(ISP1362_REG_HCSCRATCH),
2011		   isp1362_read_reg16(isp1362_hcd, HCSCRATCH));
2012	seq_printf(s, "HCBUFSTAT  [%02x]     %04x\n", ISP1362_REG_NO(ISP1362_REG_HCBUFSTAT),
2013		   isp1362_read_reg16(isp1362_hcd, HCBUFSTAT));
2014	seq_printf(s, "HCDIRADDR  [%02x] %08x\n", ISP1362_REG_NO(ISP1362_REG_HCDIRADDR),
2015		   isp1362_read_reg32(isp1362_hcd, HCDIRADDR));
2016#if 0
2017	seq_printf(s, "HCDIRDATA  [%02x]     %04x\n", ISP1362_REG_NO(HCDIRDATA),
2018		   isp1362_read_reg16(isp1362_hcd, HCDIRDATA));
2019#endif
2020	seq_printf(s, "HCISTLBUFSZ[%02x]     %04x\n", ISP1362_REG_NO(ISP1362_REG_HCISTLBUFSZ),
2021		   isp1362_read_reg16(isp1362_hcd, HCISTLBUFSZ));
2022	seq_printf(s, "HCISTLRATE [%02x]     %04x\n", ISP1362_REG_NO(ISP1362_REG_HCISTLRATE),
2023		   isp1362_read_reg16(isp1362_hcd, HCISTLRATE));
2024	seq_printf(s, "\n");
2025	seq_printf(s, "HCINTLBUFSZ[%02x]     %04x\n", ISP1362_REG_NO(ISP1362_REG_HCINTLBUFSZ),
2026		   isp1362_read_reg16(isp1362_hcd, HCINTLBUFSZ));
2027	seq_printf(s, "HCINTLBLKSZ[%02x]     %04x\n", ISP1362_REG_NO(ISP1362_REG_HCINTLBLKSZ),
2028		   isp1362_read_reg16(isp1362_hcd, HCINTLBLKSZ));
2029	seq_printf(s, "HCINTLDONE [%02x] %08x\n", ISP1362_REG_NO(ISP1362_REG_HCINTLDONE),
2030		   isp1362_read_reg32(isp1362_hcd, HCINTLDONE));
2031	seq_printf(s, "HCINTLSKIP [%02x] %08x\n", ISP1362_REG_NO(ISP1362_REG_HCINTLSKIP),
2032		   isp1362_read_reg32(isp1362_hcd, HCINTLSKIP));
2033	seq_printf(s, "HCINTLLAST [%02x] %08x\n", ISP1362_REG_NO(ISP1362_REG_HCINTLLAST),
2034		   isp1362_read_reg32(isp1362_hcd, HCINTLLAST));
2035	seq_printf(s, "HCINTLCURR [%02x]     %04x\n", ISP1362_REG_NO(ISP1362_REG_HCINTLCURR),
2036		   isp1362_read_reg16(isp1362_hcd, HCINTLCURR));
2037	seq_printf(s, "\n");
2038	seq_printf(s, "HCATLBUFSZ [%02x]     %04x\n", ISP1362_REG_NO(ISP1362_REG_HCATLBUFSZ),
2039		   isp1362_read_reg16(isp1362_hcd, HCATLBUFSZ));
2040	seq_printf(s, "HCATLBLKSZ [%02x]     %04x\n", ISP1362_REG_NO(ISP1362_REG_HCATLBLKSZ),
2041		   isp1362_read_reg16(isp1362_hcd, HCATLBLKSZ));
2042#if 0
2043	seq_printf(s, "HCATLDONE  [%02x] %08x\n", ISP1362_REG_NO(ISP1362_REG_HCATLDONE),
2044		   isp1362_read_reg32(isp1362_hcd, HCATLDONE));
2045#endif
2046	seq_printf(s, "HCATLSKIP  [%02x] %08x\n", ISP1362_REG_NO(ISP1362_REG_HCATLSKIP),
2047		   isp1362_read_reg32(isp1362_hcd, HCATLSKIP));
2048	seq_printf(s, "HCATLLAST  [%02x] %08x\n", ISP1362_REG_NO(ISP1362_REG_HCATLLAST),
2049		   isp1362_read_reg32(isp1362_hcd, HCATLLAST));
2050	seq_printf(s, "HCATLCURR  [%02x]     %04x\n", ISP1362_REG_NO(ISP1362_REG_HCATLCURR),
2051		   isp1362_read_reg16(isp1362_hcd, HCATLCURR));
2052	seq_printf(s, "\n");
2053	seq_printf(s, "HCATLDTC   [%02x]     %04x\n", ISP1362_REG_NO(ISP1362_REG_HCATLDTC),
2054		   isp1362_read_reg16(isp1362_hcd, HCATLDTC));
2055	seq_printf(s, "HCATLDTCTO [%02x]     %04x\n", ISP1362_REG_NO(ISP1362_REG_HCATLDTCTO),
2056		   isp1362_read_reg16(isp1362_hcd, HCATLDTCTO));
2057}
2058
2059static int isp1362_show(struct seq_file *s, void *unused)
2060{
2061	struct isp1362_hcd *isp1362_hcd = s->private;
2062	struct isp1362_ep *ep;
2063	int i;
2064
2065	seq_printf(s, "%s\n%s version %s\n",
2066		   isp1362_hcd_to_hcd(isp1362_hcd)->product_desc, hcd_name, DRIVER_VERSION);
2067
2068	/* collect statistics to help estimate potential win for
2069	 * DMA engines that care about alignment (PXA)
2070	 */
2071	seq_printf(s, "alignment:  16b/%ld 8b/%ld 4b/%ld 2b/%ld 1b/%ld\n",
2072		   isp1362_hcd->stat16, isp1362_hcd->stat8, isp1362_hcd->stat4,
2073		   isp1362_hcd->stat2, isp1362_hcd->stat1);
2074	seq_printf(s, "max # ptds in ATL  fifo: %d\n", isp1362_hcd->atl_queue.stat_maxptds);
2075	seq_printf(s, "max # ptds in INTL fifo: %d\n", isp1362_hcd->intl_queue.stat_maxptds);
2076	seq_printf(s, "max # ptds in ISTL fifo: %d\n",
2077		   max(isp1362_hcd->istl_queue[0] .stat_maxptds,
2078		       isp1362_hcd->istl_queue[1] .stat_maxptds));
2079
2080	/* FIXME: don't show the following in suspended state */
2081	spin_lock_irq(&isp1362_hcd->lock);
2082
2083	dump_irq(s, "hc_irq_enable", isp1362_read_reg16(isp1362_hcd, HCuPINTENB));
2084	dump_irq(s, "hc_irq_status", isp1362_read_reg16(isp1362_hcd, HCuPINT));
2085	dump_int(s, "ohci_int_enable", isp1362_read_reg32(isp1362_hcd, HCINTENB));
2086	dump_int(s, "ohci_int_status", isp1362_read_reg32(isp1362_hcd, HCINTSTAT));
2087	dump_ctrl(s, "ohci_control", isp1362_read_reg32(isp1362_hcd, HCCONTROL));
2088
2089	for (i = 0; i < NUM_ISP1362_IRQS; i++)
2090		if (isp1362_hcd->irq_stat[i])
2091			seq_printf(s, "%-15s: %d\n",
2092				   ISP1362_INT_NAME(i), isp1362_hcd->irq_stat[i]);
2093
2094	dump_regs(s, isp1362_hcd);
2095	list_for_each_entry(ep, &isp1362_hcd->async, schedule) {
2096		struct urb *urb;
2097
2098		seq_printf(s, "%p, ep%d%s, maxpacket %d:\n", ep, ep->epnum,
2099			   ({
2100				   char *s;
2101				   switch (ep->nextpid) {
2102				   case USB_PID_IN:
2103					   s = "in";
2104					   break;
2105				   case USB_PID_OUT:
2106					   s = "out";
2107					   break;
2108				   case USB_PID_SETUP:
2109					   s = "setup";
2110					   break;
2111				   case USB_PID_ACK:
2112					   s = "status";
2113					   break;
2114				   default:
2115					   s = "?";
2116					   break;
2117				   }
2118				   s;}), ep->maxpacket) ;
2119		list_for_each_entry(urb, &ep->hep->urb_list, urb_list) {
2120			seq_printf(s, "  urb%p, %d/%d\n", urb,
2121				   urb->actual_length,
2122				   urb->transfer_buffer_length);
2123		}
2124	}
2125	if (!list_empty(&isp1362_hcd->async))
2126		seq_printf(s, "\n");
2127	dump_ptd_queue(&isp1362_hcd->atl_queue);
2128
2129	seq_printf(s, "periodic size= %d\n", PERIODIC_SIZE);
2130
2131	list_for_each_entry(ep, &isp1362_hcd->periodic, schedule) {
2132		seq_printf(s, "branch:%2d load:%3d PTD[%d] $%04x:\n", ep->branch,
2133			   isp1362_hcd->load[ep->branch], ep->ptd_index, ep->ptd_offset);
2134
2135		seq_printf(s, "   %d/%p (%sdev%d ep%d%s max %d)\n",
2136			   ep->interval, ep,
2137			   (ep->udev->speed == USB_SPEED_FULL) ? "" : "ls ",
2138			   ep->udev->devnum, ep->epnum,
2139			   (ep->epnum == 0) ? "" :
2140			   ((ep->nextpid == USB_PID_IN) ?
2141			    "in" : "out"), ep->maxpacket);
2142	}
2143	dump_ptd_queue(&isp1362_hcd->intl_queue);
2144
2145	seq_printf(s, "ISO:\n");
2146
2147	list_for_each_entry(ep, &isp1362_hcd->isoc, schedule) {
2148		seq_printf(s, "   %d/%p (%sdev%d ep%d%s max %d)\n",
2149			   ep->interval, ep,
2150			   (ep->udev->speed == USB_SPEED_FULL) ? "" : "ls ",
2151			   ep->udev->devnum, ep->epnum,
2152			   (ep->epnum == 0) ? "" :
2153			   ((ep->nextpid == USB_PID_IN) ?
2154			    "in" : "out"), ep->maxpacket);
2155	}
2156
2157	spin_unlock_irq(&isp1362_hcd->lock);
2158	seq_printf(s, "\n");
2159
2160	return 0;
2161}
2162DEFINE_SHOW_ATTRIBUTE(isp1362);
 
 
 
 
 
 
 
 
 
 
 
2163
2164/* expect just one isp1362_hcd per system */
2165static void create_debug_file(struct isp1362_hcd *isp1362_hcd)
2166{
2167	debugfs_create_file("isp1362", S_IRUGO, usb_debug_root, isp1362_hcd,
2168			    &isp1362_fops);
 
2169}
2170
2171static void remove_debug_file(struct isp1362_hcd *isp1362_hcd)
2172{
2173	debugfs_remove(debugfs_lookup("isp1362", usb_debug_root));
2174}
2175
2176/*-------------------------------------------------------------------------*/
2177
2178static void __isp1362_sw_reset(struct isp1362_hcd *isp1362_hcd)
2179{
2180	int tmp = 20;
2181
2182	isp1362_write_reg16(isp1362_hcd, HCSWRES, HCSWRES_MAGIC);
2183	isp1362_write_reg32(isp1362_hcd, HCCMDSTAT, OHCI_HCR);
2184	while (--tmp) {
2185		mdelay(1);
2186		if (!(isp1362_read_reg32(isp1362_hcd, HCCMDSTAT) & OHCI_HCR))
2187			break;
2188	}
2189	if (!tmp)
2190		pr_err("Software reset timeout\n");
2191}
2192
2193static void isp1362_sw_reset(struct isp1362_hcd *isp1362_hcd)
2194{
2195	unsigned long flags;
2196
2197	spin_lock_irqsave(&isp1362_hcd->lock, flags);
2198	__isp1362_sw_reset(isp1362_hcd);
2199	spin_unlock_irqrestore(&isp1362_hcd->lock, flags);
2200}
2201
2202static int isp1362_mem_config(struct usb_hcd *hcd)
2203{
2204	struct isp1362_hcd *isp1362_hcd = hcd_to_isp1362_hcd(hcd);
2205	unsigned long flags;
2206	u32 total;
2207	u16 istl_size = ISP1362_ISTL_BUFSIZE;
2208	u16 intl_blksize = ISP1362_INTL_BLKSIZE + PTD_HEADER_SIZE;
2209	u16 intl_size = ISP1362_INTL_BUFFERS * intl_blksize;
2210	u16 atl_blksize = ISP1362_ATL_BLKSIZE + PTD_HEADER_SIZE;
2211	u16 atl_buffers = (ISP1362_BUF_SIZE - (istl_size + intl_size)) / atl_blksize;
2212	u16 atl_size;
2213	int i;
2214
2215	WARN_ON(istl_size & 3);
2216	WARN_ON(atl_blksize & 3);
2217	WARN_ON(intl_blksize & 3);
2218	WARN_ON(atl_blksize < PTD_HEADER_SIZE);
2219	WARN_ON(intl_blksize < PTD_HEADER_SIZE);
2220
2221	BUG_ON((unsigned)ISP1362_INTL_BUFFERS > 32);
2222	if (atl_buffers > 32)
2223		atl_buffers = 32;
2224	atl_size = atl_buffers * atl_blksize;
2225	total = atl_size + intl_size + istl_size;
2226	dev_info(hcd->self.controller, "ISP1362 Memory usage:\n");
2227	dev_info(hcd->self.controller, "  ISTL:    2 * %4d:     %4d @ $%04x:$%04x\n",
2228		 istl_size / 2, istl_size, 0, istl_size / 2);
2229	dev_info(hcd->self.controller, "  INTL: %4d * (%3zu+8):  %4d @ $%04x\n",
2230		 ISP1362_INTL_BUFFERS, intl_blksize - PTD_HEADER_SIZE,
2231		 intl_size, istl_size);
2232	dev_info(hcd->self.controller, "  ATL : %4d * (%3zu+8):  %4d @ $%04x\n",
2233		 atl_buffers, atl_blksize - PTD_HEADER_SIZE,
2234		 atl_size, istl_size + intl_size);
2235	dev_info(hcd->self.controller, "  USED/FREE:   %4d      %4d\n", total,
2236		 ISP1362_BUF_SIZE - total);
2237
2238	if (total > ISP1362_BUF_SIZE) {
2239		dev_err(hcd->self.controller, "%s: Memory requested: %d, available %d\n",
2240			__func__, total, ISP1362_BUF_SIZE);
2241		return -ENOMEM;
2242	}
2243
 
2244	spin_lock_irqsave(&isp1362_hcd->lock, flags);
2245
2246	for (i = 0; i < 2; i++) {
2247		isp1362_hcd->istl_queue[i].buf_start = i * istl_size / 2,
2248		isp1362_hcd->istl_queue[i].buf_size = istl_size / 2;
2249		isp1362_hcd->istl_queue[i].blk_size = 4;
2250		INIT_LIST_HEAD(&isp1362_hcd->istl_queue[i].active);
2251		snprintf(isp1362_hcd->istl_queue[i].name,
2252			 sizeof(isp1362_hcd->istl_queue[i].name), "ISTL%d", i);
2253		DBG(3, "%s: %5s buf $%04x %d\n", __func__,
2254		     isp1362_hcd->istl_queue[i].name,
2255		     isp1362_hcd->istl_queue[i].buf_start,
2256		     isp1362_hcd->istl_queue[i].buf_size);
2257	}
2258	isp1362_write_reg16(isp1362_hcd, HCISTLBUFSZ, istl_size / 2);
2259
2260	isp1362_hcd->intl_queue.buf_start = istl_size;
2261	isp1362_hcd->intl_queue.buf_size = intl_size;
2262	isp1362_hcd->intl_queue.buf_count = ISP1362_INTL_BUFFERS;
2263	isp1362_hcd->intl_queue.blk_size = intl_blksize;
2264	isp1362_hcd->intl_queue.buf_avail = isp1362_hcd->intl_queue.buf_count;
2265	isp1362_hcd->intl_queue.skip_map = ~0;
2266	INIT_LIST_HEAD(&isp1362_hcd->intl_queue.active);
2267
2268	isp1362_write_reg16(isp1362_hcd, HCINTLBUFSZ,
2269			    isp1362_hcd->intl_queue.buf_size);
2270	isp1362_write_reg16(isp1362_hcd, HCINTLBLKSZ,
2271			    isp1362_hcd->intl_queue.blk_size - PTD_HEADER_SIZE);
2272	isp1362_write_reg32(isp1362_hcd, HCINTLSKIP, ~0);
2273	isp1362_write_reg32(isp1362_hcd, HCINTLLAST,
2274			    1 << (ISP1362_INTL_BUFFERS - 1));
2275
2276	isp1362_hcd->atl_queue.buf_start = istl_size + intl_size;
2277	isp1362_hcd->atl_queue.buf_size = atl_size;
2278	isp1362_hcd->atl_queue.buf_count = atl_buffers;
2279	isp1362_hcd->atl_queue.blk_size = atl_blksize;
2280	isp1362_hcd->atl_queue.buf_avail = isp1362_hcd->atl_queue.buf_count;
2281	isp1362_hcd->atl_queue.skip_map = ~0;
2282	INIT_LIST_HEAD(&isp1362_hcd->atl_queue.active);
2283
2284	isp1362_write_reg16(isp1362_hcd, HCATLBUFSZ,
2285			    isp1362_hcd->atl_queue.buf_size);
2286	isp1362_write_reg16(isp1362_hcd, HCATLBLKSZ,
2287			    isp1362_hcd->atl_queue.blk_size - PTD_HEADER_SIZE);
2288	isp1362_write_reg32(isp1362_hcd, HCATLSKIP, ~0);
2289	isp1362_write_reg32(isp1362_hcd, HCATLLAST,
2290			    1 << (atl_buffers - 1));
2291
2292	snprintf(isp1362_hcd->atl_queue.name,
2293		 sizeof(isp1362_hcd->atl_queue.name), "ATL");
2294	snprintf(isp1362_hcd->intl_queue.name,
2295		 sizeof(isp1362_hcd->intl_queue.name), "INTL");
2296	DBG(3, "%s: %5s buf $%04x %2d * %4d = %4d\n", __func__,
2297	     isp1362_hcd->intl_queue.name,
2298	     isp1362_hcd->intl_queue.buf_start,
2299	     ISP1362_INTL_BUFFERS, isp1362_hcd->intl_queue.blk_size,
2300	     isp1362_hcd->intl_queue.buf_size);
2301	DBG(3, "%s: %5s buf $%04x %2d * %4d = %4d\n", __func__,
2302	     isp1362_hcd->atl_queue.name,
2303	     isp1362_hcd->atl_queue.buf_start,
2304	     atl_buffers, isp1362_hcd->atl_queue.blk_size,
2305	     isp1362_hcd->atl_queue.buf_size);
2306
2307	spin_unlock_irqrestore(&isp1362_hcd->lock, flags);
2308
2309	return 0;
2310}
2311
2312static int isp1362_hc_reset(struct usb_hcd *hcd)
2313{
2314	int ret = 0;
2315	struct isp1362_hcd *isp1362_hcd = hcd_to_isp1362_hcd(hcd);
2316	unsigned long t;
2317	unsigned long timeout = 100;
2318	unsigned long flags;
2319	int clkrdy = 0;
2320
2321	pr_debug("%s:\n", __func__);
2322
2323	if (isp1362_hcd->board && isp1362_hcd->board->reset) {
2324		isp1362_hcd->board->reset(hcd->self.controller, 1);
2325		msleep(20);
2326		if (isp1362_hcd->board->clock)
2327			isp1362_hcd->board->clock(hcd->self.controller, 1);
2328		isp1362_hcd->board->reset(hcd->self.controller, 0);
2329	} else
2330		isp1362_sw_reset(isp1362_hcd);
2331
2332	/* chip has been reset. First we need to see a clock */
2333	t = jiffies + msecs_to_jiffies(timeout);
2334	while (!clkrdy && time_before_eq(jiffies, t)) {
2335		spin_lock_irqsave(&isp1362_hcd->lock, flags);
2336		clkrdy = isp1362_read_reg16(isp1362_hcd, HCuPINT) & HCuPINT_CLKRDY;
2337		spin_unlock_irqrestore(&isp1362_hcd->lock, flags);
2338		if (!clkrdy)
2339			msleep(4);
2340	}
2341
2342	spin_lock_irqsave(&isp1362_hcd->lock, flags);
2343	isp1362_write_reg16(isp1362_hcd, HCuPINT, HCuPINT_CLKRDY);
2344	spin_unlock_irqrestore(&isp1362_hcd->lock, flags);
2345	if (!clkrdy) {
2346		pr_err("Clock not ready after %lums\n", timeout);
2347		ret = -ENODEV;
2348	}
2349	return ret;
2350}
2351
2352static void isp1362_hc_stop(struct usb_hcd *hcd)
2353{
2354	struct isp1362_hcd *isp1362_hcd = hcd_to_isp1362_hcd(hcd);
2355	unsigned long flags;
2356	u32 tmp;
2357
2358	pr_debug("%s:\n", __func__);
2359
2360	del_timer_sync(&hcd->rh_timer);
2361
2362	spin_lock_irqsave(&isp1362_hcd->lock, flags);
2363
2364	isp1362_write_reg16(isp1362_hcd, HCuPINTENB, 0);
2365
2366	/* Switch off power for all ports */
2367	tmp = isp1362_read_reg32(isp1362_hcd, HCRHDESCA);
2368	tmp &= ~(RH_A_NPS | RH_A_PSM);
2369	isp1362_write_reg32(isp1362_hcd, HCRHDESCA, tmp);
2370	isp1362_write_reg32(isp1362_hcd, HCRHSTATUS, RH_HS_LPS);
2371
2372	/* Reset the chip */
2373	if (isp1362_hcd->board && isp1362_hcd->board->reset)
2374		isp1362_hcd->board->reset(hcd->self.controller, 1);
2375	else
2376		__isp1362_sw_reset(isp1362_hcd);
2377
2378	if (isp1362_hcd->board && isp1362_hcd->board->clock)
2379		isp1362_hcd->board->clock(hcd->self.controller, 0);
2380
2381	spin_unlock_irqrestore(&isp1362_hcd->lock, flags);
2382}
2383
2384#ifdef CHIP_BUFFER_TEST
2385static int isp1362_chip_test(struct isp1362_hcd *isp1362_hcd)
2386{
2387	int ret = 0;
2388	u16 *ref;
2389	unsigned long flags;
2390
2391	ref = kmalloc(2 * ISP1362_BUF_SIZE, GFP_KERNEL);
2392	if (ref) {
2393		int offset;
2394		u16 *tst = &ref[ISP1362_BUF_SIZE / 2];
2395
2396		for (offset = 0; offset < ISP1362_BUF_SIZE / 2; offset++) {
2397			ref[offset] = ~offset;
2398			tst[offset] = offset;
2399		}
2400
2401		for (offset = 0; offset < 4; offset++) {
2402			int j;
2403
2404			for (j = 0; j < 8; j++) {
2405				spin_lock_irqsave(&isp1362_hcd->lock, flags);
2406				isp1362_write_buffer(isp1362_hcd, (u8 *)ref + offset, 0, j);
2407				isp1362_read_buffer(isp1362_hcd, (u8 *)tst + offset, 0, j);
2408				spin_unlock_irqrestore(&isp1362_hcd->lock, flags);
2409
2410				if (memcmp(ref, tst, j)) {
2411					ret = -ENODEV;
2412					pr_err("%s: memory check with %d byte offset %d failed\n",
2413					    __func__, j, offset);
2414					dump_data((u8 *)ref + offset, j);
2415					dump_data((u8 *)tst + offset, j);
2416				}
2417			}
2418		}
2419
2420		spin_lock_irqsave(&isp1362_hcd->lock, flags);
2421		isp1362_write_buffer(isp1362_hcd, ref, 0, ISP1362_BUF_SIZE);
2422		isp1362_read_buffer(isp1362_hcd, tst, 0, ISP1362_BUF_SIZE);
2423		spin_unlock_irqrestore(&isp1362_hcd->lock, flags);
2424
2425		if (memcmp(ref, tst, ISP1362_BUF_SIZE)) {
2426			ret = -ENODEV;
2427			pr_err("%s: memory check failed\n", __func__);
2428			dump_data((u8 *)tst, ISP1362_BUF_SIZE / 2);
2429		}
2430
2431		for (offset = 0; offset < 256; offset++) {
2432			int test_size = 0;
2433
2434			yield();
2435
2436			memset(tst, 0, ISP1362_BUF_SIZE);
2437			spin_lock_irqsave(&isp1362_hcd->lock, flags);
2438			isp1362_write_buffer(isp1362_hcd, tst, 0, ISP1362_BUF_SIZE);
2439			isp1362_read_buffer(isp1362_hcd, tst, 0, ISP1362_BUF_SIZE);
2440			spin_unlock_irqrestore(&isp1362_hcd->lock, flags);
2441			if (memcmp(tst, tst + (ISP1362_BUF_SIZE / (2 * sizeof(*tst))),
2442				   ISP1362_BUF_SIZE / 2)) {
2443				pr_err("%s: Failed to clear buffer\n", __func__);
2444				dump_data((u8 *)tst, ISP1362_BUF_SIZE);
2445				break;
2446			}
2447			spin_lock_irqsave(&isp1362_hcd->lock, flags);
2448			isp1362_write_buffer(isp1362_hcd, ref, offset * 2, PTD_HEADER_SIZE);
2449			isp1362_write_buffer(isp1362_hcd, ref + PTD_HEADER_SIZE / sizeof(*ref),
2450					     offset * 2 + PTD_HEADER_SIZE, test_size);
2451			isp1362_read_buffer(isp1362_hcd, tst, offset * 2,
2452					    PTD_HEADER_SIZE + test_size);
2453			spin_unlock_irqrestore(&isp1362_hcd->lock, flags);
2454			if (memcmp(ref, tst, PTD_HEADER_SIZE + test_size)) {
2455				dump_data(((u8 *)ref) + offset, PTD_HEADER_SIZE + test_size);
2456				dump_data((u8 *)tst, PTD_HEADER_SIZE + test_size);
2457				spin_lock_irqsave(&isp1362_hcd->lock, flags);
2458				isp1362_read_buffer(isp1362_hcd, tst, offset * 2,
2459						    PTD_HEADER_SIZE + test_size);
2460				spin_unlock_irqrestore(&isp1362_hcd->lock, flags);
2461				if (memcmp(ref, tst, PTD_HEADER_SIZE + test_size)) {
2462					ret = -ENODEV;
2463					pr_err("%s: memory check with offset %02x failed\n",
2464					    __func__, offset);
2465					break;
2466				}
2467				pr_warn("%s: memory check with offset %02x ok after second read\n",
2468					__func__, offset);
2469			}
2470		}
2471		kfree(ref);
2472	}
2473	return ret;
2474}
2475#endif
2476
2477static int isp1362_hc_start(struct usb_hcd *hcd)
2478{
2479	int ret;
2480	struct isp1362_hcd *isp1362_hcd = hcd_to_isp1362_hcd(hcd);
2481	struct isp1362_platform_data *board = isp1362_hcd->board;
2482	u16 hwcfg;
2483	u16 chipid;
2484	unsigned long flags;
2485
2486	pr_debug("%s:\n", __func__);
2487
2488	spin_lock_irqsave(&isp1362_hcd->lock, flags);
2489	chipid = isp1362_read_reg16(isp1362_hcd, HCCHIPID);
2490	spin_unlock_irqrestore(&isp1362_hcd->lock, flags);
2491
2492	if ((chipid & HCCHIPID_MASK) != HCCHIPID_MAGIC) {
2493		pr_err("%s: Invalid chip ID %04x\n", __func__, chipid);
2494		return -ENODEV;
2495	}
2496
2497#ifdef CHIP_BUFFER_TEST
2498	ret = isp1362_chip_test(isp1362_hcd);
2499	if (ret)
2500		return -ENODEV;
2501#endif
2502	spin_lock_irqsave(&isp1362_hcd->lock, flags);
2503	/* clear interrupt status and disable all interrupt sources */
2504	isp1362_write_reg16(isp1362_hcd, HCuPINT, 0xff);
2505	isp1362_write_reg16(isp1362_hcd, HCuPINTENB, 0);
2506
2507	/* HW conf */
2508	hwcfg = HCHWCFG_INT_ENABLE | HCHWCFG_DBWIDTH(1);
2509	if (board->sel15Kres)
2510		hwcfg |= HCHWCFG_PULLDOWN_DS2 |
2511			((MAX_ROOT_PORTS > 1) ? HCHWCFG_PULLDOWN_DS1 : 0);
2512	if (board->clknotstop)
2513		hwcfg |= HCHWCFG_CLKNOTSTOP;
2514	if (board->oc_enable)
2515		hwcfg |= HCHWCFG_ANALOG_OC;
2516	if (board->int_act_high)
2517		hwcfg |= HCHWCFG_INT_POL;
2518	if (board->int_edge_triggered)
2519		hwcfg |= HCHWCFG_INT_TRIGGER;
2520	if (board->dreq_act_high)
2521		hwcfg |= HCHWCFG_DREQ_POL;
2522	if (board->dack_act_high)
2523		hwcfg |= HCHWCFG_DACK_POL;
2524	isp1362_write_reg16(isp1362_hcd, HCHWCFG, hwcfg);
2525	isp1362_show_reg(isp1362_hcd, HCHWCFG);
2526	isp1362_write_reg16(isp1362_hcd, HCDMACFG, 0);
2527	spin_unlock_irqrestore(&isp1362_hcd->lock, flags);
2528
2529	ret = isp1362_mem_config(hcd);
2530	if (ret)
2531		return ret;
2532
2533	spin_lock_irqsave(&isp1362_hcd->lock, flags);
2534
2535	/* Root hub conf */
2536	isp1362_hcd->rhdesca = 0;
2537	if (board->no_power_switching)
2538		isp1362_hcd->rhdesca |= RH_A_NPS;
2539	if (board->power_switching_mode)
2540		isp1362_hcd->rhdesca |= RH_A_PSM;
2541	if (board->potpg)
2542		isp1362_hcd->rhdesca |= (board->potpg << 24) & RH_A_POTPGT;
2543	else
2544		isp1362_hcd->rhdesca |= (25 << 24) & RH_A_POTPGT;
2545
2546	isp1362_write_reg32(isp1362_hcd, HCRHDESCA, isp1362_hcd->rhdesca & ~RH_A_OCPM);
2547	isp1362_write_reg32(isp1362_hcd, HCRHDESCA, isp1362_hcd->rhdesca | RH_A_OCPM);
2548	isp1362_hcd->rhdesca = isp1362_read_reg32(isp1362_hcd, HCRHDESCA);
2549
2550	isp1362_hcd->rhdescb = RH_B_PPCM;
2551	isp1362_write_reg32(isp1362_hcd, HCRHDESCB, isp1362_hcd->rhdescb);
2552	isp1362_hcd->rhdescb = isp1362_read_reg32(isp1362_hcd, HCRHDESCB);
2553
2554	isp1362_read_reg32(isp1362_hcd, HCFMINTVL);
2555	isp1362_write_reg32(isp1362_hcd, HCFMINTVL, (FSMP(FI) << 16) | FI);
2556	isp1362_write_reg32(isp1362_hcd, HCLSTHRESH, LSTHRESH);
2557
2558	spin_unlock_irqrestore(&isp1362_hcd->lock, flags);
2559
2560	isp1362_hcd->hc_control = OHCI_USB_OPER;
2561	hcd->state = HC_STATE_RUNNING;
2562
2563	spin_lock_irqsave(&isp1362_hcd->lock, flags);
2564	/* Set up interrupts */
2565	isp1362_hcd->intenb = OHCI_INTR_MIE | OHCI_INTR_RHSC | OHCI_INTR_UE;
2566	isp1362_hcd->intenb |= OHCI_INTR_RD;
2567	isp1362_hcd->irqenb = HCuPINT_OPR | HCuPINT_SUSP;
2568	isp1362_write_reg32(isp1362_hcd, HCINTENB, isp1362_hcd->intenb);
2569	isp1362_write_reg16(isp1362_hcd, HCuPINTENB, isp1362_hcd->irqenb);
2570
2571	/* Go operational */
2572	isp1362_write_reg32(isp1362_hcd, HCCONTROL, isp1362_hcd->hc_control);
2573	/* enable global power */
2574	isp1362_write_reg32(isp1362_hcd, HCRHSTATUS, RH_HS_LPSC | RH_HS_DRWE);
2575
2576	spin_unlock_irqrestore(&isp1362_hcd->lock, flags);
2577
2578	return 0;
2579}
2580
2581/*-------------------------------------------------------------------------*/
2582
2583static const struct hc_driver isp1362_hc_driver = {
2584	.description =		hcd_name,
2585	.product_desc =		"ISP1362 Host Controller",
2586	.hcd_priv_size =	sizeof(struct isp1362_hcd),
2587
2588	.irq =			isp1362_irq,
2589	.flags =		HCD_USB11 | HCD_MEMORY,
2590
2591	.reset =		isp1362_hc_reset,
2592	.start =		isp1362_hc_start,
2593	.stop =			isp1362_hc_stop,
2594
2595	.urb_enqueue =		isp1362_urb_enqueue,
2596	.urb_dequeue =		isp1362_urb_dequeue,
2597	.endpoint_disable =	isp1362_endpoint_disable,
2598
2599	.get_frame_number =	isp1362_get_frame,
2600
2601	.hub_status_data =	isp1362_hub_status_data,
2602	.hub_control =		isp1362_hub_control,
2603	.bus_suspend =		isp1362_bus_suspend,
2604	.bus_resume =		isp1362_bus_resume,
2605};
2606
2607/*-------------------------------------------------------------------------*/
2608
2609static int isp1362_remove(struct platform_device *pdev)
2610{
2611	struct usb_hcd *hcd = platform_get_drvdata(pdev);
2612	struct isp1362_hcd *isp1362_hcd = hcd_to_isp1362_hcd(hcd);
2613
2614	remove_debug_file(isp1362_hcd);
2615	DBG(0, "%s: Removing HCD\n", __func__);
2616	usb_remove_hcd(hcd);
2617	DBG(0, "%s: put_hcd\n", __func__);
2618	usb_put_hcd(hcd);
2619	DBG(0, "%s: Done\n", __func__);
2620
2621	return 0;
2622}
2623
2624static int isp1362_probe(struct platform_device *pdev)
2625{
2626	struct usb_hcd *hcd;
2627	struct isp1362_hcd *isp1362_hcd;
2628	struct resource *data, *irq_res;
2629	void __iomem *addr_reg;
2630	void __iomem *data_reg;
2631	int irq;
2632	int retval = 0;
2633	unsigned int irq_flags = 0;
2634
2635	if (usb_disabled())
2636		return -ENODEV;
2637
2638	/* basic sanity checks first.  board-specific init logic should
2639	 * have initialized this the three resources and probably board
2640	 * specific platform_data.  we don't probe for IRQs, and do only
2641	 * minimal sanity checking.
2642	 */
2643	if (pdev->num_resources < 3)
2644		return -ENODEV;
2645
 
 
 
 
 
2646	irq_res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
2647	if (!irq_res)
2648		return -ENODEV;
2649
2650	irq = irq_res->start;
2651
2652	addr_reg = devm_platform_ioremap_resource(pdev, 1);
 
2653	if (IS_ERR(addr_reg))
2654		return PTR_ERR(addr_reg);
2655
2656	data = platform_get_resource(pdev, IORESOURCE_MEM, 0);
2657	data_reg = devm_ioremap_resource(&pdev->dev, data);
2658	if (IS_ERR(data_reg))
2659		return PTR_ERR(data_reg);
2660
2661	/* allocate and initialize hcd */
2662	hcd = usb_create_hcd(&isp1362_hc_driver, &pdev->dev, dev_name(&pdev->dev));
2663	if (!hcd)
2664		return -ENOMEM;
2665
2666	hcd->rsrc_start = data->start;
2667	isp1362_hcd = hcd_to_isp1362_hcd(hcd);
2668	isp1362_hcd->data_reg = data_reg;
2669	isp1362_hcd->addr_reg = addr_reg;
2670
2671	isp1362_hcd->next_statechange = jiffies;
2672	spin_lock_init(&isp1362_hcd->lock);
2673	INIT_LIST_HEAD(&isp1362_hcd->async);
2674	INIT_LIST_HEAD(&isp1362_hcd->periodic);
2675	INIT_LIST_HEAD(&isp1362_hcd->isoc);
2676	INIT_LIST_HEAD(&isp1362_hcd->remove_list);
2677	isp1362_hcd->board = dev_get_platdata(&pdev->dev);
2678#if USE_PLATFORM_DELAY
2679	if (!isp1362_hcd->board->delay) {
2680		dev_err(hcd->self.controller, "No platform delay function given\n");
2681		retval = -ENODEV;
2682		goto err;
2683	}
2684#endif
2685
2686	if (irq_res->flags & IORESOURCE_IRQ_HIGHEDGE)
2687		irq_flags |= IRQF_TRIGGER_RISING;
2688	if (irq_res->flags & IORESOURCE_IRQ_LOWEDGE)
2689		irq_flags |= IRQF_TRIGGER_FALLING;
2690	if (irq_res->flags & IORESOURCE_IRQ_HIGHLEVEL)
2691		irq_flags |= IRQF_TRIGGER_HIGH;
2692	if (irq_res->flags & IORESOURCE_IRQ_LOWLEVEL)
2693		irq_flags |= IRQF_TRIGGER_LOW;
2694
2695	retval = usb_add_hcd(hcd, irq, irq_flags | IRQF_SHARED);
2696	if (retval != 0)
2697		goto err;
2698	device_wakeup_enable(hcd->self.controller);
2699
2700	dev_info(&pdev->dev, "%s, irq %d\n", hcd->product_desc, irq);
2701
2702	create_debug_file(isp1362_hcd);
2703
2704	return 0;
2705
2706 err:
2707	usb_put_hcd(hcd);
2708
2709	return retval;
2710}
2711
2712#ifdef	CONFIG_PM
2713static int isp1362_suspend(struct platform_device *pdev, pm_message_t state)
2714{
2715	struct usb_hcd *hcd = platform_get_drvdata(pdev);
2716	struct isp1362_hcd *isp1362_hcd = hcd_to_isp1362_hcd(hcd);
2717	unsigned long flags;
2718	int retval = 0;
2719
2720	DBG(0, "%s: Suspending device\n", __func__);
2721
2722	if (state.event == PM_EVENT_FREEZE) {
2723		DBG(0, "%s: Suspending root hub\n", __func__);
2724		retval = isp1362_bus_suspend(hcd);
2725	} else {
2726		DBG(0, "%s: Suspending RH ports\n", __func__);
2727		spin_lock_irqsave(&isp1362_hcd->lock, flags);
2728		isp1362_write_reg32(isp1362_hcd, HCRHSTATUS, RH_HS_LPS);
2729		spin_unlock_irqrestore(&isp1362_hcd->lock, flags);
2730	}
2731	if (retval == 0)
2732		pdev->dev.power.power_state = state;
2733	return retval;
2734}
2735
2736static int isp1362_resume(struct platform_device *pdev)
2737{
2738	struct usb_hcd *hcd = platform_get_drvdata(pdev);
2739	struct isp1362_hcd *isp1362_hcd = hcd_to_isp1362_hcd(hcd);
2740	unsigned long flags;
2741
2742	DBG(0, "%s: Resuming\n", __func__);
2743
2744	if (pdev->dev.power.power_state.event == PM_EVENT_SUSPEND) {
2745		DBG(0, "%s: Resume RH ports\n", __func__);
2746		spin_lock_irqsave(&isp1362_hcd->lock, flags);
2747		isp1362_write_reg32(isp1362_hcd, HCRHSTATUS, RH_HS_LPSC);
2748		spin_unlock_irqrestore(&isp1362_hcd->lock, flags);
2749		return 0;
2750	}
2751
2752	pdev->dev.power.power_state = PMSG_ON;
2753
2754	return isp1362_bus_resume(isp1362_hcd_to_hcd(isp1362_hcd));
2755}
2756#else
2757#define	isp1362_suspend	NULL
2758#define	isp1362_resume	NULL
2759#endif
2760
2761static struct platform_driver isp1362_driver = {
2762	.probe = isp1362_probe,
2763	.remove = isp1362_remove,
2764
2765	.suspend = isp1362_suspend,
2766	.resume = isp1362_resume,
2767	.driver = {
2768		.name = hcd_name,
2769	},
2770};
2771
2772module_platform_driver(isp1362_driver);
v4.10.11
 
   1/*
   2 * ISP1362 HCD (Host Controller Driver) for USB.
   3 *
   4 * Copyright (C) 2005 Lothar Wassmann <LW@KARO-electronics.de>
   5 *
   6 * Derived from the SL811 HCD, rewritten for ISP116x.
   7 * Copyright (C) 2005 Olav Kongas <ok@artecdesign.ee>
   8 *
   9 * Portions:
  10 * Copyright (C) 2004 Psion Teklogix (for NetBook PRO)
  11 * Copyright (C) 2004 David Brownell
  12 */
  13
  14/*
  15 * The ISP1362 chip requires a large delay (300ns and 462ns) between
  16 * accesses to the address and data register.
  17 * The following timing options exist:
  18 *
  19 * 1. Configure your memory controller to add such delays if it can (the best)
  20 * 2. Implement platform-specific delay function possibly
  21 *    combined with configuring the memory controller; see
  22 *    include/linux/usb_isp1362.h for more info.
  23 * 3. Use ndelay (easiest, poorest).
  24 *
  25 * Use the corresponding macros USE_PLATFORM_DELAY and USE_NDELAY in the
  26 * platform specific section of isp1362.h to select the appropriate variant.
  27 *
  28 * Also note that according to the Philips "ISP1362 Errata" document
  29 * Rev 1.00 from 27 May data corruption may occur when the #WR signal
  30 * is reasserted (even with #CS deasserted) within 132ns after a
  31 * write cycle to any controller register. If the hardware doesn't
  32 * implement the recommended fix (gating the #WR with #CS) software
  33 * must ensure that no further write cycle (not necessarily to the chip!)
  34 * is issued by the CPU within this interval.
  35
  36 * For PXA25x this can be ensured by using VLIO with the maximum
  37 * recovery time (MSCx = 0x7f8c) with a memory clock of 99.53 MHz.
  38 */
  39
  40#undef ISP1362_DEBUG
  41
  42/*
  43 * The PXA255 UDC apparently doesn't handle GET_STATUS, GET_CONFIG and
  44 * GET_INTERFACE requests correctly when the SETUP and DATA stages of the
  45 * requests are carried out in separate frames. This will delay any SETUP
  46 * packets until the start of the next frame so that this situation is
  47 * unlikely to occur (and makes usbtest happy running with a PXA255 target
  48 * device).
  49 */
  50#undef BUGGY_PXA2XX_UDC_USBTEST
  51
  52#undef PTD_TRACE
  53#undef URB_TRACE
  54#undef VERBOSE
  55#undef REGISTERS
  56
  57/* This enables a memory test on the ISP1362 chip memory to make sure the
  58 * chip access timing is correct.
  59 */
  60#undef CHIP_BUFFER_TEST
  61
  62#include <linux/module.h>
  63#include <linux/moduleparam.h>
  64#include <linux/kernel.h>
  65#include <linux/delay.h>
  66#include <linux/ioport.h>
  67#include <linux/sched.h>
  68#include <linux/slab.h>
  69#include <linux/errno.h>
  70#include <linux/list.h>
  71#include <linux/interrupt.h>
  72#include <linux/usb.h>
  73#include <linux/usb/isp1362.h>
  74#include <linux/usb/hcd.h>
  75#include <linux/platform_device.h>
  76#include <linux/pm.h>
  77#include <linux/io.h>
  78#include <linux/bitmap.h>
  79#include <linux/prefetch.h>
  80#include <linux/debugfs.h>
  81#include <linux/seq_file.h>
  82
  83#include <asm/irq.h>
  84#include <asm/byteorder.h>
  85#include <asm/unaligned.h>
  86
  87static int dbg_level;
  88#ifdef ISP1362_DEBUG
  89module_param(dbg_level, int, 0644);
  90#else
  91module_param(dbg_level, int, 0);
  92#endif
  93
  94#include "../core/usb.h"
  95#include "isp1362.h"
  96
  97
  98#define DRIVER_VERSION	"2005-04-04"
  99#define DRIVER_DESC	"ISP1362 USB Host Controller Driver"
 100
 101MODULE_DESCRIPTION(DRIVER_DESC);
 102MODULE_LICENSE("GPL");
 103
 104static const char hcd_name[] = "isp1362-hcd";
 105
 106static void isp1362_hc_stop(struct usb_hcd *hcd);
 107static int isp1362_hc_start(struct usb_hcd *hcd);
 108
 109/*-------------------------------------------------------------------------*/
 110
 111/*
 112 * When called from the interrupthandler only isp1362_hcd->irqenb is modified,
 113 * since the interrupt handler will write isp1362_hcd->irqenb to HCuPINT upon
 114 * completion.
 115 * We don't need a 'disable' counterpart, since interrupts will be disabled
 116 * only by the interrupt handler.
 117 */
 118static inline void isp1362_enable_int(struct isp1362_hcd *isp1362_hcd, u16 mask)
 119{
 120	if ((isp1362_hcd->irqenb | mask) == isp1362_hcd->irqenb)
 121		return;
 122	if (mask & ~isp1362_hcd->irqenb)
 123		isp1362_write_reg16(isp1362_hcd, HCuPINT, mask & ~isp1362_hcd->irqenb);
 124	isp1362_hcd->irqenb |= mask;
 125	if (isp1362_hcd->irq_active)
 126		return;
 127	isp1362_write_reg16(isp1362_hcd, HCuPINTENB, isp1362_hcd->irqenb);
 128}
 129
 130/*-------------------------------------------------------------------------*/
 131
 132static inline struct isp1362_ep_queue *get_ptd_queue(struct isp1362_hcd *isp1362_hcd,
 133						     u16 offset)
 134{
 135	struct isp1362_ep_queue *epq = NULL;
 136
 137	if (offset < isp1362_hcd->istl_queue[1].buf_start)
 138		epq = &isp1362_hcd->istl_queue[0];
 139	else if (offset < isp1362_hcd->intl_queue.buf_start)
 140		epq = &isp1362_hcd->istl_queue[1];
 141	else if (offset < isp1362_hcd->atl_queue.buf_start)
 142		epq = &isp1362_hcd->intl_queue;
 143	else if (offset < isp1362_hcd->atl_queue.buf_start +
 144		   isp1362_hcd->atl_queue.buf_size)
 145		epq = &isp1362_hcd->atl_queue;
 146
 147	if (epq)
 148		DBG(1, "%s: PTD $%04x is on %s queue\n", __func__, offset, epq->name);
 149	else
 150		pr_warn("%s: invalid PTD $%04x\n", __func__, offset);
 151
 152	return epq;
 153}
 154
 155static inline int get_ptd_offset(struct isp1362_ep_queue *epq, u8 index)
 156{
 157	int offset;
 158
 159	if (index * epq->blk_size > epq->buf_size) {
 160		pr_warn("%s: Bad %s index %d(%d)\n",
 161			__func__, epq->name, index,
 162			epq->buf_size / epq->blk_size);
 163		return -EINVAL;
 164	}
 165	offset = epq->buf_start + index * epq->blk_size;
 166	DBG(3, "%s: %s PTD[%02x] # %04x\n", __func__, epq->name, index, offset);
 167
 168	return offset;
 169}
 170
 171/*-------------------------------------------------------------------------*/
 172
 173static inline u16 max_transfer_size(struct isp1362_ep_queue *epq, size_t size,
 174				    int mps)
 175{
 176	u16 xfer_size = min_t(size_t, MAX_XFER_SIZE, size);
 177
 178	xfer_size = min_t(size_t, xfer_size, epq->buf_avail * epq->blk_size - PTD_HEADER_SIZE);
 179	if (xfer_size < size && xfer_size % mps)
 180		xfer_size -= xfer_size % mps;
 181
 182	return xfer_size;
 183}
 184
 185static int claim_ptd_buffers(struct isp1362_ep_queue *epq,
 186			     struct isp1362_ep *ep, u16 len)
 187{
 188	int ptd_offset = -EINVAL;
 189	int num_ptds = ((len + PTD_HEADER_SIZE - 1) / epq->blk_size) + 1;
 190	int found;
 191
 192	BUG_ON(len > epq->buf_size);
 193
 194	if (!epq->buf_avail)
 195		return -ENOMEM;
 196
 197	if (ep->num_ptds)
 198		pr_err("%s: %s len %d/%d num_ptds %d buf_map %08lx skip_map %08lx\n", __func__,
 199		    epq->name, len, epq->blk_size, num_ptds, epq->buf_map, epq->skip_map);
 200	BUG_ON(ep->num_ptds != 0);
 201
 202	found = bitmap_find_next_zero_area(&epq->buf_map, epq->buf_count, 0,
 203						num_ptds, 0);
 204	if (found >= epq->buf_count)
 205		return -EOVERFLOW;
 206
 207	DBG(1, "%s: Found %d PTDs[%d] for %d/%d byte\n", __func__,
 208	    num_ptds, found, len, (int)(epq->blk_size - PTD_HEADER_SIZE));
 209	ptd_offset = get_ptd_offset(epq, found);
 210	WARN_ON(ptd_offset < 0);
 211	ep->ptd_offset = ptd_offset;
 212	ep->num_ptds += num_ptds;
 213	epq->buf_avail -= num_ptds;
 214	BUG_ON(epq->buf_avail > epq->buf_count);
 215	ep->ptd_index = found;
 216	bitmap_set(&epq->buf_map, found, num_ptds);
 217	DBG(1, "%s: Done %s PTD[%d] $%04x, avail %d count %d claimed %d %08lx:%08lx\n",
 218	    __func__, epq->name, ep->ptd_index, ep->ptd_offset,
 219	    epq->buf_avail, epq->buf_count, num_ptds, epq->buf_map, epq->skip_map);
 220
 221	return found;
 222}
 223
 224static inline void release_ptd_buffers(struct isp1362_ep_queue *epq, struct isp1362_ep *ep)
 225{
 226	int last = ep->ptd_index + ep->num_ptds;
 227
 228	if (last > epq->buf_count)
 229		pr_err("%s: ep %p req %d len %d %s PTD[%d] $%04x num_ptds %d buf_count %d buf_avail %d buf_map %08lx skip_map %08lx\n",
 230		    __func__, ep, ep->num_req, ep->length, epq->name, ep->ptd_index,
 231		    ep->ptd_offset, ep->num_ptds, epq->buf_count, epq->buf_avail,
 232		    epq->buf_map, epq->skip_map);
 233	BUG_ON(last > epq->buf_count);
 234
 235	bitmap_clear(&epq->buf_map, ep->ptd_index, ep->num_ptds);
 236	bitmap_set(&epq->skip_map, ep->ptd_index, ep->num_ptds);
 237	epq->buf_avail += ep->num_ptds;
 238	epq->ptd_count--;
 239
 240	BUG_ON(epq->buf_avail > epq->buf_count);
 241	BUG_ON(epq->ptd_count > epq->buf_count);
 242
 243	DBG(1, "%s: Done %s PTDs $%04x released %d avail %d count %d\n",
 244	    __func__, epq->name,
 245	    ep->ptd_offset, ep->num_ptds, epq->buf_avail, epq->buf_count);
 246	DBG(1, "%s: buf_map %08lx skip_map %08lx\n", __func__,
 247	    epq->buf_map, epq->skip_map);
 248
 249	ep->num_ptds = 0;
 250	ep->ptd_offset = -EINVAL;
 251	ep->ptd_index = -EINVAL;
 252}
 253
 254/*-------------------------------------------------------------------------*/
 255
 256/*
 257  Set up PTD's.
 258*/
 259static void prepare_ptd(struct isp1362_hcd *isp1362_hcd, struct urb *urb,
 260			struct isp1362_ep *ep, struct isp1362_ep_queue *epq,
 261			u16 fno)
 262{
 263	struct ptd *ptd;
 264	int toggle;
 265	int dir;
 266	u16 len;
 267	size_t buf_len = urb->transfer_buffer_length - urb->actual_length;
 268
 269	DBG(3, "%s: %s ep %p\n", __func__, epq->name, ep);
 270
 271	ptd = &ep->ptd;
 272
 273	ep->data = (unsigned char *)urb->transfer_buffer + urb->actual_length;
 274
 275	switch (ep->nextpid) {
 276	case USB_PID_IN:
 277		toggle = usb_gettoggle(urb->dev, ep->epnum, 0);
 278		dir = PTD_DIR_IN;
 279		if (usb_pipecontrol(urb->pipe)) {
 280			len = min_t(size_t, ep->maxpacket, buf_len);
 281		} else if (usb_pipeisoc(urb->pipe)) {
 282			len = min_t(size_t, urb->iso_frame_desc[fno].length, MAX_XFER_SIZE);
 283			ep->data = urb->transfer_buffer + urb->iso_frame_desc[fno].offset;
 284		} else
 285			len = max_transfer_size(epq, buf_len, ep->maxpacket);
 286		DBG(1, "%s: IN    len %d/%d/%d from URB\n", __func__, len, ep->maxpacket,
 287		    (int)buf_len);
 288		break;
 289	case USB_PID_OUT:
 290		toggle = usb_gettoggle(urb->dev, ep->epnum, 1);
 291		dir = PTD_DIR_OUT;
 292		if (usb_pipecontrol(urb->pipe))
 293			len = min_t(size_t, ep->maxpacket, buf_len);
 294		else if (usb_pipeisoc(urb->pipe))
 295			len = min_t(size_t, urb->iso_frame_desc[0].length, MAX_XFER_SIZE);
 296		else
 297			len = max_transfer_size(epq, buf_len, ep->maxpacket);
 298		if (len == 0)
 299			pr_info("%s: Sending ZERO packet: %d\n", __func__,
 300			     urb->transfer_flags & URB_ZERO_PACKET);
 301		DBG(1, "%s: OUT   len %d/%d/%d from URB\n", __func__, len, ep->maxpacket,
 302		    (int)buf_len);
 303		break;
 304	case USB_PID_SETUP:
 305		toggle = 0;
 306		dir = PTD_DIR_SETUP;
 307		len = sizeof(struct usb_ctrlrequest);
 308		DBG(1, "%s: SETUP len %d\n", __func__, len);
 309		ep->data = urb->setup_packet;
 310		break;
 311	case USB_PID_ACK:
 312		toggle = 1;
 313		len = 0;
 314		dir = (urb->transfer_buffer_length && usb_pipein(urb->pipe)) ?
 315			PTD_DIR_OUT : PTD_DIR_IN;
 316		DBG(1, "%s: ACK   len %d\n", __func__, len);
 317		break;
 318	default:
 319		toggle = dir = len = 0;
 320		pr_err("%s@%d: ep->nextpid %02x\n", __func__, __LINE__, ep->nextpid);
 321		BUG_ON(1);
 322	}
 323
 324	ep->length = len;
 325	if (!len)
 326		ep->data = NULL;
 327
 328	ptd->count = PTD_CC_MSK | PTD_ACTIVE_MSK | PTD_TOGGLE(toggle);
 329	ptd->mps = PTD_MPS(ep->maxpacket) | PTD_SPD(urb->dev->speed == USB_SPEED_LOW) |
 330		PTD_EP(ep->epnum);
 331	ptd->len = PTD_LEN(len) | PTD_DIR(dir);
 332	ptd->faddr = PTD_FA(usb_pipedevice(urb->pipe));
 333
 334	if (usb_pipeint(urb->pipe)) {
 335		ptd->faddr |= PTD_SF_INT(ep->branch);
 336		ptd->faddr |= PTD_PR(ep->interval ? __ffs(ep->interval) : 0);
 337	}
 338	if (usb_pipeisoc(urb->pipe))
 339		ptd->faddr |= PTD_SF_ISO(fno);
 340
 341	DBG(1, "%s: Finished\n", __func__);
 342}
 343
 344static void isp1362_write_ptd(struct isp1362_hcd *isp1362_hcd, struct isp1362_ep *ep,
 345			      struct isp1362_ep_queue *epq)
 346{
 347	struct ptd *ptd = &ep->ptd;
 348	int len = PTD_GET_DIR(ptd) == PTD_DIR_IN ? 0 : ep->length;
 349
 350	prefetch(ptd);
 351	isp1362_write_buffer(isp1362_hcd, ptd, ep->ptd_offset, PTD_HEADER_SIZE);
 352	if (len)
 353		isp1362_write_buffer(isp1362_hcd, ep->data,
 354				     ep->ptd_offset + PTD_HEADER_SIZE, len);
 355
 356	dump_ptd(ptd);
 357	dump_ptd_out_data(ptd, ep->data);
 358}
 359
 360static void isp1362_read_ptd(struct isp1362_hcd *isp1362_hcd, struct isp1362_ep *ep,
 361			     struct isp1362_ep_queue *epq)
 362{
 363	struct ptd *ptd = &ep->ptd;
 364	int act_len;
 365
 366	WARN_ON(list_empty(&ep->active));
 367	BUG_ON(ep->ptd_offset < 0);
 368
 369	list_del_init(&ep->active);
 370	DBG(1, "%s: ep %p removed from active list %p\n", __func__, ep, &epq->active);
 371
 372	prefetchw(ptd);
 373	isp1362_read_buffer(isp1362_hcd, ptd, ep->ptd_offset, PTD_HEADER_SIZE);
 374	dump_ptd(ptd);
 375	act_len = PTD_GET_COUNT(ptd);
 376	if (PTD_GET_DIR(ptd) != PTD_DIR_IN || act_len == 0)
 377		return;
 378	if (act_len > ep->length)
 379		pr_err("%s: ep %p PTD $%04x act_len %d ep->length %d\n", __func__, ep,
 380			 ep->ptd_offset, act_len, ep->length);
 381	BUG_ON(act_len > ep->length);
 382	/* Only transfer the amount of data that has actually been overwritten
 383	 * in the chip buffer. We don't want any data that doesn't belong to the
 384	 * transfer to leak out of the chip to the callers transfer buffer!
 385	 */
 386	prefetchw(ep->data);
 387	isp1362_read_buffer(isp1362_hcd, ep->data,
 388			    ep->ptd_offset + PTD_HEADER_SIZE, act_len);
 389	dump_ptd_in_data(ptd, ep->data);
 390}
 391
 392/*
 393 * INT PTDs will stay in the chip until data is available.
 394 * This function will remove a PTD from the chip when the URB is dequeued.
 395 * Must be called with the spinlock held and IRQs disabled
 396 */
 397static void remove_ptd(struct isp1362_hcd *isp1362_hcd, struct isp1362_ep *ep)
 398
 399{
 400	int index;
 401	struct isp1362_ep_queue *epq;
 402
 403	DBG(1, "%s: ep %p PTD[%d] $%04x\n", __func__, ep, ep->ptd_index, ep->ptd_offset);
 404	BUG_ON(ep->ptd_offset < 0);
 405
 406	epq = get_ptd_queue(isp1362_hcd, ep->ptd_offset);
 407	BUG_ON(!epq);
 408
 409	/* put ep in remove_list for cleanup */
 410	WARN_ON(!list_empty(&ep->remove_list));
 411	list_add_tail(&ep->remove_list, &isp1362_hcd->remove_list);
 412	/* let SOF interrupt handle the cleanup */
 413	isp1362_enable_int(isp1362_hcd, HCuPINT_SOF);
 414
 415	index = ep->ptd_index;
 416	if (index < 0)
 417		/* ISO queues don't have SKIP registers */
 418		return;
 419
 420	DBG(1, "%s: Disabling PTD[%02x] $%04x %08lx|%08x\n", __func__,
 421	    index, ep->ptd_offset, epq->skip_map, 1 << index);
 422
 423	/* prevent further processing of PTD (will be effective after next SOF) */
 424	epq->skip_map |= 1 << index;
 425	if (epq == &isp1362_hcd->atl_queue) {
 426		DBG(2, "%s: ATLSKIP = %08x -> %08lx\n", __func__,
 427		    isp1362_read_reg32(isp1362_hcd, HCATLSKIP), epq->skip_map);
 428		isp1362_write_reg32(isp1362_hcd, HCATLSKIP, epq->skip_map);
 429		if (~epq->skip_map == 0)
 430			isp1362_clr_mask16(isp1362_hcd, HCBUFSTAT, HCBUFSTAT_ATL_ACTIVE);
 431	} else if (epq == &isp1362_hcd->intl_queue) {
 432		DBG(2, "%s: INTLSKIP = %08x -> %08lx\n", __func__,
 433		    isp1362_read_reg32(isp1362_hcd, HCINTLSKIP), epq->skip_map);
 434		isp1362_write_reg32(isp1362_hcd, HCINTLSKIP, epq->skip_map);
 435		if (~epq->skip_map == 0)
 436			isp1362_clr_mask16(isp1362_hcd, HCBUFSTAT, HCBUFSTAT_INTL_ACTIVE);
 437	}
 438}
 439
 440/*
 441  Take done or failed requests out of schedule. Give back
 442  processed urbs.
 443*/
 444static void finish_request(struct isp1362_hcd *isp1362_hcd, struct isp1362_ep *ep,
 445			   struct urb *urb, int status)
 446     __releases(isp1362_hcd->lock)
 447     __acquires(isp1362_hcd->lock)
 448{
 449	urb->hcpriv = NULL;
 450	ep->error_count = 0;
 451
 452	if (usb_pipecontrol(urb->pipe))
 453		ep->nextpid = USB_PID_SETUP;
 454
 455	URB_DBG("%s: req %d FA %d ep%d%s %s: len %d/%d %s stat %d\n", __func__,
 456		ep->num_req, usb_pipedevice(urb->pipe),
 457		usb_pipeendpoint(urb->pipe),
 458		!usb_pipein(urb->pipe) ? "out" : "in",
 459		usb_pipecontrol(urb->pipe) ? "ctrl" :
 460			usb_pipeint(urb->pipe) ? "int" :
 461			usb_pipebulk(urb->pipe) ? "bulk" :
 462			"iso",
 463		urb->actual_length, urb->transfer_buffer_length,
 464		!(urb->transfer_flags & URB_SHORT_NOT_OK) ?
 465		"short_ok" : "", urb->status);
 466
 467
 468	usb_hcd_unlink_urb_from_ep(isp1362_hcd_to_hcd(isp1362_hcd), urb);
 469	spin_unlock(&isp1362_hcd->lock);
 470	usb_hcd_giveback_urb(isp1362_hcd_to_hcd(isp1362_hcd), urb, status);
 471	spin_lock(&isp1362_hcd->lock);
 472
 473	/* take idle endpoints out of the schedule right away */
 474	if (!list_empty(&ep->hep->urb_list))
 475		return;
 476
 477	/* async deschedule */
 478	if (!list_empty(&ep->schedule)) {
 479		list_del_init(&ep->schedule);
 480		return;
 481	}
 482
 483
 484	if (ep->interval) {
 485		/* periodic deschedule */
 486		DBG(1, "deschedule qh%d/%p branch %d load %d bandwidth %d -> %d\n", ep->interval,
 487		    ep, ep->branch, ep->load,
 488		    isp1362_hcd->load[ep->branch],
 489		    isp1362_hcd->load[ep->branch] - ep->load);
 490		isp1362_hcd->load[ep->branch] -= ep->load;
 491		ep->branch = PERIODIC_SIZE;
 492	}
 493}
 494
 495/*
 496 * Analyze transfer results, handle partial transfers and errors
 497*/
 498static void postproc_ep(struct isp1362_hcd *isp1362_hcd, struct isp1362_ep *ep)
 499{
 500	struct urb *urb = get_urb(ep);
 501	struct usb_device *udev;
 502	struct ptd *ptd;
 503	int short_ok;
 504	u16 len;
 505	int urbstat = -EINPROGRESS;
 506	u8 cc;
 507
 508	DBG(2, "%s: ep %p req %d\n", __func__, ep, ep->num_req);
 509
 510	udev = urb->dev;
 511	ptd = &ep->ptd;
 512	cc = PTD_GET_CC(ptd);
 513	if (cc == PTD_NOTACCESSED) {
 514		pr_err("%s: req %d PTD %p Untouched by ISP1362\n", __func__,
 515		    ep->num_req, ptd);
 516		cc = PTD_DEVNOTRESP;
 517	}
 518
 519	short_ok = !(urb->transfer_flags & URB_SHORT_NOT_OK);
 520	len = urb->transfer_buffer_length - urb->actual_length;
 521
 522	/* Data underrun is special. For allowed underrun
 523	   we clear the error and continue as normal. For
 524	   forbidden underrun we finish the DATA stage
 525	   immediately while for control transfer,
 526	   we do a STATUS stage.
 527	*/
 528	if (cc == PTD_DATAUNDERRUN) {
 529		if (short_ok) {
 530			DBG(1, "%s: req %d Allowed data underrun short_%sok %d/%d/%d byte\n",
 531			    __func__, ep->num_req, short_ok ? "" : "not_",
 532			    PTD_GET_COUNT(ptd), ep->maxpacket, len);
 533			cc = PTD_CC_NOERROR;
 534			urbstat = 0;
 535		} else {
 536			DBG(1, "%s: req %d Data Underrun %s nextpid %02x short_%sok %d/%d/%d byte\n",
 537			    __func__, ep->num_req,
 538			    usb_pipein(urb->pipe) ? "IN" : "OUT", ep->nextpid,
 539			    short_ok ? "" : "not_",
 540			    PTD_GET_COUNT(ptd), ep->maxpacket, len);
 541			/* save the data underrun error code for later and
 542			 * proceed with the status stage
 543			 */
 544			urb->actual_length += PTD_GET_COUNT(ptd);
 545			if (usb_pipecontrol(urb->pipe)) {
 546				ep->nextpid = USB_PID_ACK;
 547				BUG_ON(urb->actual_length > urb->transfer_buffer_length);
 548
 549				if (urb->status == -EINPROGRESS)
 550					urb->status = cc_to_error[PTD_DATAUNDERRUN];
 551			} else {
 552				usb_settoggle(udev, ep->epnum, ep->nextpid == USB_PID_OUT,
 553					      PTD_GET_TOGGLE(ptd));
 554				urbstat = cc_to_error[PTD_DATAUNDERRUN];
 555			}
 556			goto out;
 557		}
 558	}
 559
 560	if (cc != PTD_CC_NOERROR) {
 561		if (++ep->error_count >= 3 || cc == PTD_CC_STALL || cc == PTD_DATAOVERRUN) {
 562			urbstat = cc_to_error[cc];
 563			DBG(1, "%s: req %d nextpid %02x, status %d, error %d, error_count %d\n",
 564			    __func__, ep->num_req, ep->nextpid, urbstat, cc,
 565			    ep->error_count);
 566		}
 567		goto out;
 568	}
 569
 570	switch (ep->nextpid) {
 571	case USB_PID_OUT:
 572		if (PTD_GET_COUNT(ptd) != ep->length)
 573			pr_err("%s: count=%d len=%d\n", __func__,
 574			   PTD_GET_COUNT(ptd), ep->length);
 575		BUG_ON(PTD_GET_COUNT(ptd) != ep->length);
 576		urb->actual_length += ep->length;
 577		BUG_ON(urb->actual_length > urb->transfer_buffer_length);
 578		usb_settoggle(udev, ep->epnum, 1, PTD_GET_TOGGLE(ptd));
 579		if (urb->actual_length == urb->transfer_buffer_length) {
 580			DBG(3, "%s: req %d xfer complete %d/%d status %d -> 0\n", __func__,
 581			    ep->num_req, len, ep->maxpacket, urbstat);
 582			if (usb_pipecontrol(urb->pipe)) {
 583				DBG(3, "%s: req %d %s Wait for ACK\n", __func__,
 584				    ep->num_req,
 585				    usb_pipein(urb->pipe) ? "IN" : "OUT");
 586				ep->nextpid = USB_PID_ACK;
 587			} else {
 588				if (len % ep->maxpacket ||
 589				    !(urb->transfer_flags & URB_ZERO_PACKET)) {
 590					urbstat = 0;
 591					DBG(3, "%s: req %d URB %s status %d count %d/%d/%d\n",
 592					    __func__, ep->num_req, usb_pipein(urb->pipe) ? "IN" : "OUT",
 593					    urbstat, len, ep->maxpacket, urb->actual_length);
 594				}
 595			}
 596		}
 597		break;
 598	case USB_PID_IN:
 599		len = PTD_GET_COUNT(ptd);
 600		BUG_ON(len > ep->length);
 601		urb->actual_length += len;
 602		BUG_ON(urb->actual_length > urb->transfer_buffer_length);
 603		usb_settoggle(udev, ep->epnum, 0, PTD_GET_TOGGLE(ptd));
 604		/* if transfer completed or (allowed) data underrun */
 605		if ((urb->transfer_buffer_length == urb->actual_length) ||
 606		    len % ep->maxpacket) {
 607			DBG(3, "%s: req %d xfer complete %d/%d status %d -> 0\n", __func__,
 608			    ep->num_req, len, ep->maxpacket, urbstat);
 609			if (usb_pipecontrol(urb->pipe)) {
 610				DBG(3, "%s: req %d %s Wait for ACK\n", __func__,
 611				    ep->num_req,
 612				    usb_pipein(urb->pipe) ? "IN" : "OUT");
 613				ep->nextpid = USB_PID_ACK;
 614			} else {
 615				urbstat = 0;
 616				DBG(3, "%s: req %d URB %s status %d count %d/%d/%d\n",
 617				    __func__, ep->num_req, usb_pipein(urb->pipe) ? "IN" : "OUT",
 618				    urbstat, len, ep->maxpacket, urb->actual_length);
 619			}
 620		}
 621		break;
 622	case USB_PID_SETUP:
 623		if (urb->transfer_buffer_length == urb->actual_length) {
 624			ep->nextpid = USB_PID_ACK;
 625		} else if (usb_pipeout(urb->pipe)) {
 626			usb_settoggle(udev, 0, 1, 1);
 627			ep->nextpid = USB_PID_OUT;
 628		} else {
 629			usb_settoggle(udev, 0, 0, 1);
 630			ep->nextpid = USB_PID_IN;
 631		}
 632		break;
 633	case USB_PID_ACK:
 634		DBG(3, "%s: req %d got ACK %d -> 0\n", __func__, ep->num_req,
 635		    urbstat);
 636		WARN_ON(urbstat != -EINPROGRESS);
 637		urbstat = 0;
 638		ep->nextpid = 0;
 639		break;
 640	default:
 641		BUG_ON(1);
 642	}
 643
 644 out:
 645	if (urbstat != -EINPROGRESS) {
 646		DBG(2, "%s: Finishing ep %p req %d urb %p status %d\n", __func__,
 647		    ep, ep->num_req, urb, urbstat);
 648		finish_request(isp1362_hcd, ep, urb, urbstat);
 649	}
 650}
 651
 652static void finish_unlinks(struct isp1362_hcd *isp1362_hcd)
 653{
 654	struct isp1362_ep *ep;
 655	struct isp1362_ep *tmp;
 656
 657	list_for_each_entry_safe(ep, tmp, &isp1362_hcd->remove_list, remove_list) {
 658		struct isp1362_ep_queue *epq =
 659			get_ptd_queue(isp1362_hcd, ep->ptd_offset);
 660		int index = ep->ptd_index;
 661
 662		BUG_ON(epq == NULL);
 663		if (index >= 0) {
 664			DBG(1, "%s: remove PTD[%d] $%04x\n", __func__, index, ep->ptd_offset);
 665			BUG_ON(ep->num_ptds == 0);
 666			release_ptd_buffers(epq, ep);
 667		}
 668		if (!list_empty(&ep->hep->urb_list)) {
 669			struct urb *urb = get_urb(ep);
 670
 671			DBG(1, "%s: Finishing req %d ep %p from remove_list\n", __func__,
 672			    ep->num_req, ep);
 673			finish_request(isp1362_hcd, ep, urb, -ESHUTDOWN);
 674		}
 675		WARN_ON(list_empty(&ep->active));
 676		if (!list_empty(&ep->active)) {
 677			list_del_init(&ep->active);
 678			DBG(1, "%s: ep %p removed from active list\n", __func__, ep);
 679		}
 680		list_del_init(&ep->remove_list);
 681		DBG(1, "%s: ep %p removed from remove_list\n", __func__, ep);
 682	}
 683	DBG(1, "%s: Done\n", __func__);
 684}
 685
 686static inline void enable_atl_transfers(struct isp1362_hcd *isp1362_hcd, int count)
 687{
 688	if (count > 0) {
 689		if (count < isp1362_hcd->atl_queue.ptd_count)
 690			isp1362_write_reg16(isp1362_hcd, HCATLDTC, count);
 691		isp1362_enable_int(isp1362_hcd, HCuPINT_ATL);
 692		isp1362_write_reg32(isp1362_hcd, HCATLSKIP, isp1362_hcd->atl_queue.skip_map);
 693		isp1362_set_mask16(isp1362_hcd, HCBUFSTAT, HCBUFSTAT_ATL_ACTIVE);
 694	} else
 695		isp1362_enable_int(isp1362_hcd, HCuPINT_SOF);
 696}
 697
 698static inline void enable_intl_transfers(struct isp1362_hcd *isp1362_hcd)
 699{
 700	isp1362_enable_int(isp1362_hcd, HCuPINT_INTL);
 701	isp1362_set_mask16(isp1362_hcd, HCBUFSTAT, HCBUFSTAT_INTL_ACTIVE);
 702	isp1362_write_reg32(isp1362_hcd, HCINTLSKIP, isp1362_hcd->intl_queue.skip_map);
 703}
 704
 705static inline void enable_istl_transfers(struct isp1362_hcd *isp1362_hcd, int flip)
 706{
 707	isp1362_enable_int(isp1362_hcd, flip ? HCuPINT_ISTL1 : HCuPINT_ISTL0);
 708	isp1362_set_mask16(isp1362_hcd, HCBUFSTAT, flip ?
 709			   HCBUFSTAT_ISTL1_FULL : HCBUFSTAT_ISTL0_FULL);
 710}
 711
 712static int submit_req(struct isp1362_hcd *isp1362_hcd, struct urb *urb,
 713		      struct isp1362_ep *ep, struct isp1362_ep_queue *epq)
 714{
 715	int index = epq->free_ptd;
 716
 717	prepare_ptd(isp1362_hcd, urb, ep, epq, 0);
 718	index = claim_ptd_buffers(epq, ep, ep->length);
 719	if (index == -ENOMEM) {
 720		DBG(1, "%s: req %d No free %s PTD available: %d, %08lx:%08lx\n", __func__,
 721		    ep->num_req, epq->name, ep->num_ptds, epq->buf_map, epq->skip_map);
 722		return index;
 723	} else if (index == -EOVERFLOW) {
 724		DBG(1, "%s: req %d Not enough space for %d byte %s PTD %d %08lx:%08lx\n",
 725		    __func__, ep->num_req, ep->length, epq->name, ep->num_ptds,
 726		    epq->buf_map, epq->skip_map);
 727		return index;
 728	} else
 729		BUG_ON(index < 0);
 730	list_add_tail(&ep->active, &epq->active);
 731	DBG(1, "%s: ep %p req %d len %d added to active list %p\n", __func__,
 732	    ep, ep->num_req, ep->length, &epq->active);
 733	DBG(1, "%s: Submitting %s PTD $%04x for ep %p req %d\n", __func__, epq->name,
 734	    ep->ptd_offset, ep, ep->num_req);
 735	isp1362_write_ptd(isp1362_hcd, ep, epq);
 736	__clear_bit(ep->ptd_index, &epq->skip_map);
 737
 738	return 0;
 739}
 740
 741static void start_atl_transfers(struct isp1362_hcd *isp1362_hcd)
 742{
 743	int ptd_count = 0;
 744	struct isp1362_ep_queue *epq = &isp1362_hcd->atl_queue;
 745	struct isp1362_ep *ep;
 746	int defer = 0;
 747
 748	if (atomic_read(&epq->finishing)) {
 749		DBG(1, "%s: finish_transfers is active for %s\n", __func__, epq->name);
 750		return;
 751	}
 752
 753	list_for_each_entry(ep, &isp1362_hcd->async, schedule) {
 754		struct urb *urb = get_urb(ep);
 755		int ret;
 756
 757		if (!list_empty(&ep->active)) {
 758			DBG(2, "%s: Skipping active %s ep %p\n", __func__, epq->name, ep);
 759			continue;
 760		}
 761
 762		DBG(1, "%s: Processing %s ep %p req %d\n", __func__, epq->name,
 763		    ep, ep->num_req);
 764
 765		ret = submit_req(isp1362_hcd, urb, ep, epq);
 766		if (ret == -ENOMEM) {
 767			defer = 1;
 768			break;
 769		} else if (ret == -EOVERFLOW) {
 770			defer = 1;
 771			continue;
 772		}
 773#ifdef BUGGY_PXA2XX_UDC_USBTEST
 774		defer = ep->nextpid == USB_PID_SETUP;
 775#endif
 776		ptd_count++;
 777	}
 778
 779	/* Avoid starving of endpoints */
 780	if (isp1362_hcd->async.next != isp1362_hcd->async.prev) {
 781		DBG(2, "%s: Cycling ASYNC schedule %d\n", __func__, ptd_count);
 782		list_move(&isp1362_hcd->async, isp1362_hcd->async.next);
 783	}
 784	if (ptd_count || defer)
 785		enable_atl_transfers(isp1362_hcd, defer ? 0 : ptd_count);
 786
 787	epq->ptd_count += ptd_count;
 788	if (epq->ptd_count > epq->stat_maxptds) {
 789		epq->stat_maxptds = epq->ptd_count;
 790		DBG(0, "%s: max_ptds: %d\n", __func__, epq->stat_maxptds);
 791	}
 792}
 793
 794static void start_intl_transfers(struct isp1362_hcd *isp1362_hcd)
 795{
 796	int ptd_count = 0;
 797	struct isp1362_ep_queue *epq = &isp1362_hcd->intl_queue;
 798	struct isp1362_ep *ep;
 799
 800	if (atomic_read(&epq->finishing)) {
 801		DBG(1, "%s: finish_transfers is active for %s\n", __func__, epq->name);
 802		return;
 803	}
 804
 805	list_for_each_entry(ep, &isp1362_hcd->periodic, schedule) {
 806		struct urb *urb = get_urb(ep);
 807		int ret;
 808
 809		if (!list_empty(&ep->active)) {
 810			DBG(1, "%s: Skipping active %s ep %p\n", __func__,
 811			    epq->name, ep);
 812			continue;
 813		}
 814
 815		DBG(1, "%s: Processing %s ep %p req %d\n", __func__,
 816		    epq->name, ep, ep->num_req);
 817		ret = submit_req(isp1362_hcd, urb, ep, epq);
 818		if (ret == -ENOMEM)
 819			break;
 820		else if (ret == -EOVERFLOW)
 821			continue;
 822		ptd_count++;
 823	}
 824
 825	if (ptd_count) {
 826		static int last_count;
 827
 828		if (ptd_count != last_count) {
 829			DBG(0, "%s: ptd_count: %d\n", __func__, ptd_count);
 830			last_count = ptd_count;
 831		}
 832		enable_intl_transfers(isp1362_hcd);
 833	}
 834
 835	epq->ptd_count += ptd_count;
 836	if (epq->ptd_count > epq->stat_maxptds)
 837		epq->stat_maxptds = epq->ptd_count;
 838}
 839
 840static inline int next_ptd(struct isp1362_ep_queue *epq, struct isp1362_ep *ep)
 841{
 842	u16 ptd_offset = ep->ptd_offset;
 843	int num_ptds = (ep->length + PTD_HEADER_SIZE + (epq->blk_size - 1)) / epq->blk_size;
 844
 845	DBG(2, "%s: PTD offset $%04x + %04x => %d * %04x -> $%04x\n", __func__, ptd_offset,
 846	    ep->length, num_ptds, epq->blk_size, ptd_offset + num_ptds * epq->blk_size);
 847
 848	ptd_offset += num_ptds * epq->blk_size;
 849	if (ptd_offset < epq->buf_start + epq->buf_size)
 850		return ptd_offset;
 851	else
 852		return -ENOMEM;
 853}
 854
 855static void start_iso_transfers(struct isp1362_hcd *isp1362_hcd)
 856{
 857	int ptd_count = 0;
 858	int flip = isp1362_hcd->istl_flip;
 859	struct isp1362_ep_queue *epq;
 860	int ptd_offset;
 861	struct isp1362_ep *ep;
 862	struct isp1362_ep *tmp;
 863	u16 fno = isp1362_read_reg32(isp1362_hcd, HCFMNUM);
 864
 865 fill2:
 866	epq = &isp1362_hcd->istl_queue[flip];
 867	if (atomic_read(&epq->finishing)) {
 868		DBG(1, "%s: finish_transfers is active for %s\n", __func__, epq->name);
 869		return;
 870	}
 871
 872	if (!list_empty(&epq->active))
 873		return;
 874
 875	ptd_offset = epq->buf_start;
 876	list_for_each_entry_safe(ep, tmp, &isp1362_hcd->isoc, schedule) {
 877		struct urb *urb = get_urb(ep);
 878		s16 diff = fno - (u16)urb->start_frame;
 879
 880		DBG(1, "%s: Processing %s ep %p\n", __func__, epq->name, ep);
 881
 882		if (diff > urb->number_of_packets) {
 883			/* time frame for this URB has elapsed */
 884			finish_request(isp1362_hcd, ep, urb, -EOVERFLOW);
 885			continue;
 886		} else if (diff < -1) {
 887			/* URB is not due in this frame or the next one.
 888			 * Comparing with '-1' instead of '0' accounts for double
 889			 * buffering in the ISP1362 which enables us to queue the PTD
 890			 * one frame ahead of time
 891			 */
 892		} else if (diff == -1) {
 893			/* submit PTD's that are due in the next frame */
 894			prepare_ptd(isp1362_hcd, urb, ep, epq, fno);
 895			if (ptd_offset + PTD_HEADER_SIZE + ep->length >
 896			    epq->buf_start + epq->buf_size) {
 897				pr_err("%s: Not enough ISO buffer space for %d byte PTD\n",
 898				    __func__, ep->length);
 899				continue;
 900			}
 901			ep->ptd_offset = ptd_offset;
 902			list_add_tail(&ep->active, &epq->active);
 903
 904			ptd_offset = next_ptd(epq, ep);
 905			if (ptd_offset < 0) {
 906				pr_warn("%s: req %d No more %s PTD buffers available\n",
 907					__func__, ep->num_req, epq->name);
 908				break;
 909			}
 910		}
 911	}
 912	list_for_each_entry(ep, &epq->active, active) {
 913		if (epq->active.next == &ep->active)
 914			ep->ptd.mps |= PTD_LAST_MSK;
 915		isp1362_write_ptd(isp1362_hcd, ep, epq);
 916		ptd_count++;
 917	}
 918
 919	if (ptd_count)
 920		enable_istl_transfers(isp1362_hcd, flip);
 921
 922	epq->ptd_count += ptd_count;
 923	if (epq->ptd_count > epq->stat_maxptds)
 924		epq->stat_maxptds = epq->ptd_count;
 925
 926	/* check, whether the second ISTL buffer may also be filled */
 927	if (!(isp1362_read_reg16(isp1362_hcd, HCBUFSTAT) &
 928	      (flip ? HCBUFSTAT_ISTL0_FULL : HCBUFSTAT_ISTL1_FULL))) {
 929		fno++;
 930		ptd_count = 0;
 931		flip = 1 - flip;
 932		goto fill2;
 933	}
 934}
 935
 936static void finish_transfers(struct isp1362_hcd *isp1362_hcd, unsigned long done_map,
 937			     struct isp1362_ep_queue *epq)
 938{
 939	struct isp1362_ep *ep;
 940	struct isp1362_ep *tmp;
 941
 942	if (list_empty(&epq->active)) {
 943		DBG(1, "%s: Nothing to do for %s queue\n", __func__, epq->name);
 944		return;
 945	}
 946
 947	DBG(1, "%s: Finishing %s transfers %08lx\n", __func__, epq->name, done_map);
 948
 949	atomic_inc(&epq->finishing);
 950	list_for_each_entry_safe(ep, tmp, &epq->active, active) {
 951		int index = ep->ptd_index;
 952
 953		DBG(1, "%s: Checking %s PTD[%02x] $%04x\n", __func__, epq->name,
 954		    index, ep->ptd_offset);
 955
 956		BUG_ON(index < 0);
 957		if (__test_and_clear_bit(index, &done_map)) {
 958			isp1362_read_ptd(isp1362_hcd, ep, epq);
 959			epq->free_ptd = index;
 960			BUG_ON(ep->num_ptds == 0);
 961			release_ptd_buffers(epq, ep);
 962
 963			DBG(1, "%s: ep %p req %d removed from active list\n", __func__,
 964			    ep, ep->num_req);
 965			if (!list_empty(&ep->remove_list)) {
 966				list_del_init(&ep->remove_list);
 967				DBG(1, "%s: ep %p removed from remove list\n", __func__, ep);
 968			}
 969			DBG(1, "%s: Postprocessing %s ep %p req %d\n", __func__, epq->name,
 970			    ep, ep->num_req);
 971			postproc_ep(isp1362_hcd, ep);
 972		}
 973		if (!done_map)
 974			break;
 975	}
 976	if (done_map)
 977		pr_warn("%s: done_map not clear: %08lx:%08lx\n",
 978			__func__, done_map, epq->skip_map);
 979	atomic_dec(&epq->finishing);
 980}
 981
 982static void finish_iso_transfers(struct isp1362_hcd *isp1362_hcd, struct isp1362_ep_queue *epq)
 983{
 984	struct isp1362_ep *ep;
 985	struct isp1362_ep *tmp;
 986
 987	if (list_empty(&epq->active)) {
 988		DBG(1, "%s: Nothing to do for %s queue\n", __func__, epq->name);
 989		return;
 990	}
 991
 992	DBG(1, "%s: Finishing %s transfers\n", __func__, epq->name);
 993
 994	atomic_inc(&epq->finishing);
 995	list_for_each_entry_safe(ep, tmp, &epq->active, active) {
 996		DBG(1, "%s: Checking PTD $%04x\n", __func__, ep->ptd_offset);
 997
 998		isp1362_read_ptd(isp1362_hcd, ep, epq);
 999		DBG(1, "%s: Postprocessing %s ep %p\n", __func__, epq->name, ep);
1000		postproc_ep(isp1362_hcd, ep);
1001	}
1002	WARN_ON(epq->blk_size != 0);
1003	atomic_dec(&epq->finishing);
1004}
1005
1006static irqreturn_t isp1362_irq(struct usb_hcd *hcd)
1007{
1008	int handled = 0;
1009	struct isp1362_hcd *isp1362_hcd = hcd_to_isp1362_hcd(hcd);
1010	u16 irqstat;
1011	u16 svc_mask;
1012
1013	spin_lock(&isp1362_hcd->lock);
1014
1015	BUG_ON(isp1362_hcd->irq_active++);
1016
1017	isp1362_write_reg16(isp1362_hcd, HCuPINTENB, 0);
1018
1019	irqstat = isp1362_read_reg16(isp1362_hcd, HCuPINT);
1020	DBG(3, "%s: got IRQ %04x:%04x\n", __func__, irqstat, isp1362_hcd->irqenb);
1021
1022	/* only handle interrupts that are currently enabled */
1023	irqstat &= isp1362_hcd->irqenb;
1024	isp1362_write_reg16(isp1362_hcd, HCuPINT, irqstat);
1025	svc_mask = irqstat;
1026
1027	if (irqstat & HCuPINT_SOF) {
1028		isp1362_hcd->irqenb &= ~HCuPINT_SOF;
1029		isp1362_hcd->irq_stat[ISP1362_INT_SOF]++;
1030		handled = 1;
1031		svc_mask &= ~HCuPINT_SOF;
1032		DBG(3, "%s: SOF\n", __func__);
1033		isp1362_hcd->fmindex = isp1362_read_reg32(isp1362_hcd, HCFMNUM);
1034		if (!list_empty(&isp1362_hcd->remove_list))
1035			finish_unlinks(isp1362_hcd);
1036		if (!list_empty(&isp1362_hcd->async) && !(irqstat & HCuPINT_ATL)) {
1037			if (list_empty(&isp1362_hcd->atl_queue.active)) {
1038				start_atl_transfers(isp1362_hcd);
1039			} else {
1040				isp1362_enable_int(isp1362_hcd, HCuPINT_ATL);
1041				isp1362_write_reg32(isp1362_hcd, HCATLSKIP,
1042						    isp1362_hcd->atl_queue.skip_map);
1043				isp1362_set_mask16(isp1362_hcd, HCBUFSTAT, HCBUFSTAT_ATL_ACTIVE);
1044			}
1045		}
1046	}
1047
1048	if (irqstat & HCuPINT_ISTL0) {
1049		isp1362_hcd->irq_stat[ISP1362_INT_ISTL0]++;
1050		handled = 1;
1051		svc_mask &= ~HCuPINT_ISTL0;
1052		isp1362_clr_mask16(isp1362_hcd, HCBUFSTAT, HCBUFSTAT_ISTL0_FULL);
1053		DBG(1, "%s: ISTL0\n", __func__);
1054		WARN_ON((int)!!isp1362_hcd->istl_flip);
1055		WARN_ON(isp1362_read_reg16(isp1362_hcd, HCBUFSTAT) &
1056			HCBUFSTAT_ISTL0_ACTIVE);
1057		WARN_ON(!(isp1362_read_reg16(isp1362_hcd, HCBUFSTAT) &
1058			HCBUFSTAT_ISTL0_DONE));
1059		isp1362_hcd->irqenb &= ~HCuPINT_ISTL0;
1060	}
1061
1062	if (irqstat & HCuPINT_ISTL1) {
1063		isp1362_hcd->irq_stat[ISP1362_INT_ISTL1]++;
1064		handled = 1;
1065		svc_mask &= ~HCuPINT_ISTL1;
1066		isp1362_clr_mask16(isp1362_hcd, HCBUFSTAT, HCBUFSTAT_ISTL1_FULL);
1067		DBG(1, "%s: ISTL1\n", __func__);
1068		WARN_ON(!(int)isp1362_hcd->istl_flip);
1069		WARN_ON(isp1362_read_reg16(isp1362_hcd, HCBUFSTAT) &
1070			HCBUFSTAT_ISTL1_ACTIVE);
1071		WARN_ON(!(isp1362_read_reg16(isp1362_hcd, HCBUFSTAT) &
1072			HCBUFSTAT_ISTL1_DONE));
1073		isp1362_hcd->irqenb &= ~HCuPINT_ISTL1;
1074	}
1075
1076	if (irqstat & (HCuPINT_ISTL0 | HCuPINT_ISTL1)) {
1077		WARN_ON((irqstat & (HCuPINT_ISTL0 | HCuPINT_ISTL1)) ==
1078			(HCuPINT_ISTL0 | HCuPINT_ISTL1));
1079		finish_iso_transfers(isp1362_hcd,
1080				     &isp1362_hcd->istl_queue[isp1362_hcd->istl_flip]);
1081		start_iso_transfers(isp1362_hcd);
1082		isp1362_hcd->istl_flip = 1 - isp1362_hcd->istl_flip;
1083	}
1084
1085	if (irqstat & HCuPINT_INTL) {
1086		u32 done_map = isp1362_read_reg32(isp1362_hcd, HCINTLDONE);
1087		u32 skip_map = isp1362_read_reg32(isp1362_hcd, HCINTLSKIP);
1088		isp1362_hcd->irq_stat[ISP1362_INT_INTL]++;
1089
1090		DBG(2, "%s: INTL\n", __func__);
1091
1092		svc_mask &= ~HCuPINT_INTL;
1093
1094		isp1362_write_reg32(isp1362_hcd, HCINTLSKIP, skip_map | done_map);
1095		if (~(done_map | skip_map) == 0)
1096			/* All PTDs are finished, disable INTL processing entirely */
1097			isp1362_clr_mask16(isp1362_hcd, HCBUFSTAT, HCBUFSTAT_INTL_ACTIVE);
1098
1099		handled = 1;
1100		WARN_ON(!done_map);
1101		if (done_map) {
1102			DBG(3, "%s: INTL done_map %08x\n", __func__, done_map);
1103			finish_transfers(isp1362_hcd, done_map, &isp1362_hcd->intl_queue);
1104			start_intl_transfers(isp1362_hcd);
1105		}
1106	}
1107
1108	if (irqstat & HCuPINT_ATL) {
1109		u32 done_map = isp1362_read_reg32(isp1362_hcd, HCATLDONE);
1110		u32 skip_map = isp1362_read_reg32(isp1362_hcd, HCATLSKIP);
1111		isp1362_hcd->irq_stat[ISP1362_INT_ATL]++;
1112
1113		DBG(2, "%s: ATL\n", __func__);
1114
1115		svc_mask &= ~HCuPINT_ATL;
1116
1117		isp1362_write_reg32(isp1362_hcd, HCATLSKIP, skip_map | done_map);
1118		if (~(done_map | skip_map) == 0)
1119			isp1362_clr_mask16(isp1362_hcd, HCBUFSTAT, HCBUFSTAT_ATL_ACTIVE);
1120		if (done_map) {
1121			DBG(3, "%s: ATL done_map %08x\n", __func__, done_map);
1122			finish_transfers(isp1362_hcd, done_map, &isp1362_hcd->atl_queue);
1123			start_atl_transfers(isp1362_hcd);
1124		}
1125		handled = 1;
1126	}
1127
1128	if (irqstat & HCuPINT_OPR) {
1129		u32 intstat = isp1362_read_reg32(isp1362_hcd, HCINTSTAT);
1130		isp1362_hcd->irq_stat[ISP1362_INT_OPR]++;
1131
1132		svc_mask &= ~HCuPINT_OPR;
1133		DBG(2, "%s: OPR %08x:%08x\n", __func__, intstat, isp1362_hcd->intenb);
1134		intstat &= isp1362_hcd->intenb;
1135		if (intstat & OHCI_INTR_UE) {
1136			pr_err("Unrecoverable error\n");
1137			/* FIXME: do here reset or cleanup or whatever */
1138		}
1139		if (intstat & OHCI_INTR_RHSC) {
1140			isp1362_hcd->rhstatus = isp1362_read_reg32(isp1362_hcd, HCRHSTATUS);
1141			isp1362_hcd->rhport[0] = isp1362_read_reg32(isp1362_hcd, HCRHPORT1);
1142			isp1362_hcd->rhport[1] = isp1362_read_reg32(isp1362_hcd, HCRHPORT2);
1143		}
1144		if (intstat & OHCI_INTR_RD) {
1145			pr_info("%s: RESUME DETECTED\n", __func__);
1146			isp1362_show_reg(isp1362_hcd, HCCONTROL);
1147			usb_hcd_resume_root_hub(hcd);
1148		}
1149		isp1362_write_reg32(isp1362_hcd, HCINTSTAT, intstat);
1150		irqstat &= ~HCuPINT_OPR;
1151		handled = 1;
1152	}
1153
1154	if (irqstat & HCuPINT_SUSP) {
1155		isp1362_hcd->irq_stat[ISP1362_INT_SUSP]++;
1156		handled = 1;
1157		svc_mask &= ~HCuPINT_SUSP;
1158
1159		pr_info("%s: SUSPEND IRQ\n", __func__);
1160	}
1161
1162	if (irqstat & HCuPINT_CLKRDY) {
1163		isp1362_hcd->irq_stat[ISP1362_INT_CLKRDY]++;
1164		handled = 1;
1165		isp1362_hcd->irqenb &= ~HCuPINT_CLKRDY;
1166		svc_mask &= ~HCuPINT_CLKRDY;
1167		pr_info("%s: CLKRDY IRQ\n", __func__);
1168	}
1169
1170	if (svc_mask)
1171		pr_err("%s: Unserviced interrupt(s) %04x\n", __func__, svc_mask);
1172
1173	isp1362_write_reg16(isp1362_hcd, HCuPINTENB, isp1362_hcd->irqenb);
1174	isp1362_hcd->irq_active--;
1175	spin_unlock(&isp1362_hcd->lock);
1176
1177	return IRQ_RETVAL(handled);
1178}
1179
1180/*-------------------------------------------------------------------------*/
1181
1182#define	MAX_PERIODIC_LOAD	900	/* out of 1000 usec */
1183static int balance(struct isp1362_hcd *isp1362_hcd, u16 interval, u16 load)
1184{
1185	int i, branch = -ENOSPC;
1186
1187	/* search for the least loaded schedule branch of that interval
1188	 * which has enough bandwidth left unreserved.
1189	 */
1190	for (i = 0; i < interval; i++) {
1191		if (branch < 0 || isp1362_hcd->load[branch] > isp1362_hcd->load[i]) {
1192			int j;
1193
1194			for (j = i; j < PERIODIC_SIZE; j += interval) {
1195				if ((isp1362_hcd->load[j] + load) > MAX_PERIODIC_LOAD) {
1196					pr_err("%s: new load %d load[%02x] %d max %d\n", __func__,
1197					    load, j, isp1362_hcd->load[j], MAX_PERIODIC_LOAD);
1198					break;
1199				}
1200			}
1201			if (j < PERIODIC_SIZE)
1202				continue;
1203			branch = i;
1204		}
1205	}
1206	return branch;
1207}
1208
1209/* NB! ALL the code above this point runs with isp1362_hcd->lock
1210   held, irqs off
1211*/
1212
1213/*-------------------------------------------------------------------------*/
1214
1215static int isp1362_urb_enqueue(struct usb_hcd *hcd,
1216			       struct urb *urb,
1217			       gfp_t mem_flags)
1218{
1219	struct isp1362_hcd *isp1362_hcd = hcd_to_isp1362_hcd(hcd);
1220	struct usb_device *udev = urb->dev;
1221	unsigned int pipe = urb->pipe;
1222	int is_out = !usb_pipein(pipe);
1223	int type = usb_pipetype(pipe);
1224	int epnum = usb_pipeendpoint(pipe);
1225	struct usb_host_endpoint *hep = urb->ep;
1226	struct isp1362_ep *ep = NULL;
1227	unsigned long flags;
1228	int retval = 0;
1229
1230	DBG(3, "%s: urb %p\n", __func__, urb);
1231
1232	if (type == PIPE_ISOCHRONOUS) {
1233		pr_err("Isochronous transfers not supported\n");
1234		return -ENOSPC;
1235	}
1236
1237	URB_DBG("%s: FA %d ep%d%s %s: len %d %s%s\n", __func__,
1238		usb_pipedevice(pipe), epnum,
1239		is_out ? "out" : "in",
1240		usb_pipecontrol(pipe) ? "ctrl" :
1241			usb_pipeint(pipe) ? "int" :
1242			usb_pipebulk(pipe) ? "bulk" :
1243			"iso",
1244		urb->transfer_buffer_length,
1245		(urb->transfer_flags & URB_ZERO_PACKET) ? "ZERO_PACKET " : "",
1246		!(urb->transfer_flags & URB_SHORT_NOT_OK) ?
1247		"short_ok" : "");
1248
1249	/* avoid all allocations within spinlocks: request or endpoint */
1250	if (!hep->hcpriv) {
1251		ep = kzalloc(sizeof *ep, mem_flags);
1252		if (!ep)
1253			return -ENOMEM;
1254	}
1255	spin_lock_irqsave(&isp1362_hcd->lock, flags);
1256
1257	/* don't submit to a dead or disabled port */
1258	if (!((isp1362_hcd->rhport[0] | isp1362_hcd->rhport[1]) &
1259	      USB_PORT_STAT_ENABLE) ||
1260	    !HC_IS_RUNNING(hcd->state)) {
1261		kfree(ep);
1262		retval = -ENODEV;
1263		goto fail_not_linked;
1264	}
1265
1266	retval = usb_hcd_link_urb_to_ep(hcd, urb);
1267	if (retval) {
1268		kfree(ep);
1269		goto fail_not_linked;
1270	}
1271
1272	if (hep->hcpriv) {
1273		ep = hep->hcpriv;
1274	} else {
1275		INIT_LIST_HEAD(&ep->schedule);
1276		INIT_LIST_HEAD(&ep->active);
1277		INIT_LIST_HEAD(&ep->remove_list);
1278		ep->udev = usb_get_dev(udev);
1279		ep->hep = hep;
1280		ep->epnum = epnum;
1281		ep->maxpacket = usb_maxpacket(udev, urb->pipe, is_out);
1282		ep->ptd_offset = -EINVAL;
1283		ep->ptd_index = -EINVAL;
1284		usb_settoggle(udev, epnum, is_out, 0);
1285
1286		if (type == PIPE_CONTROL)
1287			ep->nextpid = USB_PID_SETUP;
1288		else if (is_out)
1289			ep->nextpid = USB_PID_OUT;
1290		else
1291			ep->nextpid = USB_PID_IN;
1292
1293		switch (type) {
1294		case PIPE_ISOCHRONOUS:
1295		case PIPE_INTERRUPT:
1296			if (urb->interval > PERIODIC_SIZE)
1297				urb->interval = PERIODIC_SIZE;
1298			ep->interval = urb->interval;
1299			ep->branch = PERIODIC_SIZE;
1300			ep->load = usb_calc_bus_time(udev->speed, !is_out,
1301						     (type == PIPE_ISOCHRONOUS),
1302						     usb_maxpacket(udev, pipe, is_out)) / 1000;
1303			break;
1304		}
1305		hep->hcpriv = ep;
1306	}
1307	ep->num_req = isp1362_hcd->req_serial++;
1308
1309	/* maybe put endpoint into schedule */
1310	switch (type) {
1311	case PIPE_CONTROL:
1312	case PIPE_BULK:
1313		if (list_empty(&ep->schedule)) {
1314			DBG(1, "%s: Adding ep %p req %d to async schedule\n",
1315				__func__, ep, ep->num_req);
1316			list_add_tail(&ep->schedule, &isp1362_hcd->async);
1317		}
1318		break;
1319	case PIPE_ISOCHRONOUS:
1320	case PIPE_INTERRUPT:
1321		urb->interval = ep->interval;
1322
1323		/* urb submitted for already existing EP */
1324		if (ep->branch < PERIODIC_SIZE)
1325			break;
1326
1327		retval = balance(isp1362_hcd, ep->interval, ep->load);
1328		if (retval < 0) {
1329			pr_err("%s: balance returned %d\n", __func__, retval);
1330			goto fail;
1331		}
1332		ep->branch = retval;
1333		retval = 0;
1334		isp1362_hcd->fmindex = isp1362_read_reg32(isp1362_hcd, HCFMNUM);
1335		DBG(1, "%s: Current frame %04x branch %02x start_frame %04x(%04x)\n",
1336		    __func__, isp1362_hcd->fmindex, ep->branch,
1337		    ((isp1362_hcd->fmindex + PERIODIC_SIZE - 1) &
1338		     ~(PERIODIC_SIZE - 1)) + ep->branch,
1339		    (isp1362_hcd->fmindex & (PERIODIC_SIZE - 1)) + ep->branch);
1340
1341		if (list_empty(&ep->schedule)) {
1342			if (type == PIPE_ISOCHRONOUS) {
1343				u16 frame = isp1362_hcd->fmindex;
1344
1345				frame += max_t(u16, 8, ep->interval);
1346				frame &= ~(ep->interval - 1);
1347				frame |= ep->branch;
1348				if (frame_before(frame, isp1362_hcd->fmindex))
1349					frame += ep->interval;
1350				urb->start_frame = frame;
1351
1352				DBG(1, "%s: Adding ep %p to isoc schedule\n", __func__, ep);
1353				list_add_tail(&ep->schedule, &isp1362_hcd->isoc);
1354			} else {
1355				DBG(1, "%s: Adding ep %p to periodic schedule\n", __func__, ep);
1356				list_add_tail(&ep->schedule, &isp1362_hcd->periodic);
1357			}
1358		} else
1359			DBG(1, "%s: ep %p already scheduled\n", __func__, ep);
1360
1361		DBG(2, "%s: load %d bandwidth %d -> %d\n", __func__,
1362		    ep->load / ep->interval, isp1362_hcd->load[ep->branch],
1363		    isp1362_hcd->load[ep->branch] + ep->load);
1364		isp1362_hcd->load[ep->branch] += ep->load;
1365	}
1366
1367	urb->hcpriv = hep;
1368	ALIGNSTAT(isp1362_hcd, urb->transfer_buffer);
1369
1370	switch (type) {
1371	case PIPE_CONTROL:
1372	case PIPE_BULK:
1373		start_atl_transfers(isp1362_hcd);
1374		break;
1375	case PIPE_INTERRUPT:
1376		start_intl_transfers(isp1362_hcd);
1377		break;
1378	case PIPE_ISOCHRONOUS:
1379		start_iso_transfers(isp1362_hcd);
1380		break;
1381	default:
1382		BUG();
1383	}
1384 fail:
1385	if (retval)
1386		usb_hcd_unlink_urb_from_ep(hcd, urb);
1387
1388
1389 fail_not_linked:
1390	spin_unlock_irqrestore(&isp1362_hcd->lock, flags);
1391	if (retval)
1392		DBG(0, "%s: urb %p failed with %d\n", __func__, urb, retval);
1393	return retval;
1394}
1395
1396static int isp1362_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status)
1397{
1398	struct isp1362_hcd *isp1362_hcd = hcd_to_isp1362_hcd(hcd);
1399	struct usb_host_endpoint *hep;
1400	unsigned long flags;
1401	struct isp1362_ep *ep;
1402	int retval = 0;
1403
1404	DBG(3, "%s: urb %p\n", __func__, urb);
1405
1406	spin_lock_irqsave(&isp1362_hcd->lock, flags);
1407	retval = usb_hcd_check_unlink_urb(hcd, urb, status);
1408	if (retval)
1409		goto done;
1410
1411	hep = urb->hcpriv;
1412
1413	if (!hep) {
1414		spin_unlock_irqrestore(&isp1362_hcd->lock, flags);
1415		return -EIDRM;
1416	}
1417
1418	ep = hep->hcpriv;
1419	if (ep) {
1420		/* In front of queue? */
1421		if (ep->hep->urb_list.next == &urb->urb_list) {
1422			if (!list_empty(&ep->active)) {
1423				DBG(1, "%s: urb %p ep %p req %d active PTD[%d] $%04x\n", __func__,
1424				    urb, ep, ep->num_req, ep->ptd_index, ep->ptd_offset);
1425				/* disable processing and queue PTD for removal */
1426				remove_ptd(isp1362_hcd, ep);
1427				urb = NULL;
1428			}
1429		}
1430		if (urb) {
1431			DBG(1, "%s: Finishing ep %p req %d\n", __func__, ep,
1432			    ep->num_req);
1433			finish_request(isp1362_hcd, ep, urb, status);
1434		} else
1435			DBG(1, "%s: urb %p active; wait4irq\n", __func__, urb);
1436	} else {
1437		pr_warn("%s: No EP in URB %p\n", __func__, urb);
1438		retval = -EINVAL;
1439	}
1440done:
1441	spin_unlock_irqrestore(&isp1362_hcd->lock, flags);
1442
1443	DBG(3, "%s: exit\n", __func__);
1444
1445	return retval;
1446}
1447
1448static void isp1362_endpoint_disable(struct usb_hcd *hcd, struct usb_host_endpoint *hep)
1449{
1450	struct isp1362_ep *ep = hep->hcpriv;
1451	struct isp1362_hcd *isp1362_hcd = hcd_to_isp1362_hcd(hcd);
1452	unsigned long flags;
1453
1454	DBG(1, "%s: ep %p\n", __func__, ep);
1455	if (!ep)
1456		return;
1457	spin_lock_irqsave(&isp1362_hcd->lock, flags);
1458	if (!list_empty(&hep->urb_list)) {
1459		if (!list_empty(&ep->active) && list_empty(&ep->remove_list)) {
1460			DBG(1, "%s: Removing ep %p req %d PTD[%d] $%04x\n", __func__,
1461			    ep, ep->num_req, ep->ptd_index, ep->ptd_offset);
1462			remove_ptd(isp1362_hcd, ep);
1463			pr_info("%s: Waiting for Interrupt to clean up\n", __func__);
1464		}
1465	}
1466	spin_unlock_irqrestore(&isp1362_hcd->lock, flags);
1467	/* Wait for interrupt to clear out active list */
1468	while (!list_empty(&ep->active))
1469		msleep(1);
1470
1471	DBG(1, "%s: Freeing EP %p\n", __func__, ep);
1472
1473	usb_put_dev(ep->udev);
1474	kfree(ep);
1475	hep->hcpriv = NULL;
1476}
1477
1478static int isp1362_get_frame(struct usb_hcd *hcd)
1479{
1480	struct isp1362_hcd *isp1362_hcd = hcd_to_isp1362_hcd(hcd);
1481	u32 fmnum;
1482	unsigned long flags;
1483
1484	spin_lock_irqsave(&isp1362_hcd->lock, flags);
1485	fmnum = isp1362_read_reg32(isp1362_hcd, HCFMNUM);
1486	spin_unlock_irqrestore(&isp1362_hcd->lock, flags);
1487
1488	return (int)fmnum;
1489}
1490
1491/*-------------------------------------------------------------------------*/
1492
1493/* Adapted from ohci-hub.c */
1494static int isp1362_hub_status_data(struct usb_hcd *hcd, char *buf)
1495{
1496	struct isp1362_hcd *isp1362_hcd = hcd_to_isp1362_hcd(hcd);
1497	int ports, i, changed = 0;
1498	unsigned long flags;
1499
1500	if (!HC_IS_RUNNING(hcd->state))
1501		return -ESHUTDOWN;
1502
1503	/* Report no status change now, if we are scheduled to be
1504	   called later */
1505	if (timer_pending(&hcd->rh_timer))
1506		return 0;
1507
1508	ports = isp1362_hcd->rhdesca & RH_A_NDP;
1509	BUG_ON(ports > 2);
1510
1511	spin_lock_irqsave(&isp1362_hcd->lock, flags);
1512	/* init status */
1513	if (isp1362_hcd->rhstatus & (RH_HS_LPSC | RH_HS_OCIC))
1514		buf[0] = changed = 1;
1515	else
1516		buf[0] = 0;
1517
1518	for (i = 0; i < ports; i++) {
1519		u32 status = isp1362_hcd->rhport[i];
1520
1521		if (status & (RH_PS_CSC | RH_PS_PESC | RH_PS_PSSC |
1522			      RH_PS_OCIC | RH_PS_PRSC)) {
1523			changed = 1;
1524			buf[0] |= 1 << (i + 1);
1525			continue;
1526		}
1527
1528		if (!(status & RH_PS_CCS))
1529			continue;
1530	}
1531	spin_unlock_irqrestore(&isp1362_hcd->lock, flags);
1532	return changed;
1533}
1534
1535static void isp1362_hub_descriptor(struct isp1362_hcd *isp1362_hcd,
1536				   struct usb_hub_descriptor *desc)
1537{
1538	u32 reg = isp1362_hcd->rhdesca;
1539
1540	DBG(3, "%s: enter\n", __func__);
1541
1542	desc->bDescriptorType = USB_DT_HUB;
1543	desc->bDescLength = 9;
1544	desc->bHubContrCurrent = 0;
1545	desc->bNbrPorts = reg & 0x3;
1546	/* Power switching, device type, overcurrent. */
1547	desc->wHubCharacteristics = cpu_to_le16((reg >> 8) &
1548						(HUB_CHAR_LPSM |
1549						 HUB_CHAR_COMPOUND |
1550						 HUB_CHAR_OCPM));
1551	DBG(0, "%s: hubcharacteristics = %02x\n", __func__,
1552			desc->wHubCharacteristics);
1553	desc->bPwrOn2PwrGood = (reg >> 24) & 0xff;
1554	/* ports removable, and legacy PortPwrCtrlMask */
1555	desc->u.hs.DeviceRemovable[0] = desc->bNbrPorts == 1 ? 1 << 1 : 3 << 1;
1556	desc->u.hs.DeviceRemovable[1] = ~0;
1557
1558	DBG(3, "%s: exit\n", __func__);
1559}
1560
1561/* Adapted from ohci-hub.c */
1562static int isp1362_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue,
1563			       u16 wIndex, char *buf, u16 wLength)
1564{
1565	struct isp1362_hcd *isp1362_hcd = hcd_to_isp1362_hcd(hcd);
1566	int retval = 0;
1567	unsigned long flags;
1568	unsigned long t1;
1569	int ports = isp1362_hcd->rhdesca & RH_A_NDP;
1570	u32 tmp = 0;
1571
1572	switch (typeReq) {
1573	case ClearHubFeature:
1574		DBG(0, "ClearHubFeature: ");
1575		switch (wValue) {
1576		case C_HUB_OVER_CURRENT:
1577			DBG(0, "C_HUB_OVER_CURRENT\n");
1578			spin_lock_irqsave(&isp1362_hcd->lock, flags);
1579			isp1362_write_reg32(isp1362_hcd, HCRHSTATUS, RH_HS_OCIC);
1580			spin_unlock_irqrestore(&isp1362_hcd->lock, flags);
 
1581		case C_HUB_LOCAL_POWER:
1582			DBG(0, "C_HUB_LOCAL_POWER\n");
1583			break;
1584		default:
1585			goto error;
1586		}
1587		break;
1588	case SetHubFeature:
1589		DBG(0, "SetHubFeature: ");
1590		switch (wValue) {
1591		case C_HUB_OVER_CURRENT:
1592		case C_HUB_LOCAL_POWER:
1593			DBG(0, "C_HUB_OVER_CURRENT or C_HUB_LOCAL_POWER\n");
1594			break;
1595		default:
1596			goto error;
1597		}
1598		break;
1599	case GetHubDescriptor:
1600		DBG(0, "GetHubDescriptor\n");
1601		isp1362_hub_descriptor(isp1362_hcd, (struct usb_hub_descriptor *)buf);
1602		break;
1603	case GetHubStatus:
1604		DBG(0, "GetHubStatus\n");
1605		put_unaligned(cpu_to_le32(0), (__le32 *) buf);
1606		break;
1607	case GetPortStatus:
1608#ifndef VERBOSE
1609		DBG(0, "GetPortStatus\n");
1610#endif
1611		if (!wIndex || wIndex > ports)
1612			goto error;
1613		tmp = isp1362_hcd->rhport[--wIndex];
1614		put_unaligned(cpu_to_le32(tmp), (__le32 *) buf);
1615		break;
1616	case ClearPortFeature:
1617		DBG(0, "ClearPortFeature: ");
1618		if (!wIndex || wIndex > ports)
1619			goto error;
1620		wIndex--;
1621
1622		switch (wValue) {
1623		case USB_PORT_FEAT_ENABLE:
1624			DBG(0, "USB_PORT_FEAT_ENABLE\n");
1625			tmp = RH_PS_CCS;
1626			break;
1627		case USB_PORT_FEAT_C_ENABLE:
1628			DBG(0, "USB_PORT_FEAT_C_ENABLE\n");
1629			tmp = RH_PS_PESC;
1630			break;
1631		case USB_PORT_FEAT_SUSPEND:
1632			DBG(0, "USB_PORT_FEAT_SUSPEND\n");
1633			tmp = RH_PS_POCI;
1634			break;
1635		case USB_PORT_FEAT_C_SUSPEND:
1636			DBG(0, "USB_PORT_FEAT_C_SUSPEND\n");
1637			tmp = RH_PS_PSSC;
1638			break;
1639		case USB_PORT_FEAT_POWER:
1640			DBG(0, "USB_PORT_FEAT_POWER\n");
1641			tmp = RH_PS_LSDA;
1642
1643			break;
1644		case USB_PORT_FEAT_C_CONNECTION:
1645			DBG(0, "USB_PORT_FEAT_C_CONNECTION\n");
1646			tmp = RH_PS_CSC;
1647			break;
1648		case USB_PORT_FEAT_C_OVER_CURRENT:
1649			DBG(0, "USB_PORT_FEAT_C_OVER_CURRENT\n");
1650			tmp = RH_PS_OCIC;
1651			break;
1652		case USB_PORT_FEAT_C_RESET:
1653			DBG(0, "USB_PORT_FEAT_C_RESET\n");
1654			tmp = RH_PS_PRSC;
1655			break;
1656		default:
1657			goto error;
1658		}
1659
1660		spin_lock_irqsave(&isp1362_hcd->lock, flags);
1661		isp1362_write_reg32(isp1362_hcd, HCRHPORT1 + wIndex, tmp);
1662		isp1362_hcd->rhport[wIndex] =
1663			isp1362_read_reg32(isp1362_hcd, HCRHPORT1 + wIndex);
1664		spin_unlock_irqrestore(&isp1362_hcd->lock, flags);
1665		break;
1666	case SetPortFeature:
1667		DBG(0, "SetPortFeature: ");
1668		if (!wIndex || wIndex > ports)
1669			goto error;
1670		wIndex--;
1671		switch (wValue) {
1672		case USB_PORT_FEAT_SUSPEND:
1673			DBG(0, "USB_PORT_FEAT_SUSPEND\n");
1674			spin_lock_irqsave(&isp1362_hcd->lock, flags);
1675			isp1362_write_reg32(isp1362_hcd, HCRHPORT1 + wIndex, RH_PS_PSS);
1676			isp1362_hcd->rhport[wIndex] =
1677				isp1362_read_reg32(isp1362_hcd, HCRHPORT1 + wIndex);
1678			spin_unlock_irqrestore(&isp1362_hcd->lock, flags);
1679			break;
1680		case USB_PORT_FEAT_POWER:
1681			DBG(0, "USB_PORT_FEAT_POWER\n");
1682			spin_lock_irqsave(&isp1362_hcd->lock, flags);
1683			isp1362_write_reg32(isp1362_hcd, HCRHPORT1 + wIndex, RH_PS_PPS);
1684			isp1362_hcd->rhport[wIndex] =
1685				isp1362_read_reg32(isp1362_hcd, HCRHPORT1 + wIndex);
1686			spin_unlock_irqrestore(&isp1362_hcd->lock, flags);
1687			break;
1688		case USB_PORT_FEAT_RESET:
1689			DBG(0, "USB_PORT_FEAT_RESET\n");
1690			spin_lock_irqsave(&isp1362_hcd->lock, flags);
1691
1692			t1 = jiffies + msecs_to_jiffies(USB_RESET_WIDTH);
1693			while (time_before(jiffies, t1)) {
1694				/* spin until any current reset finishes */
1695				for (;;) {
1696					tmp = isp1362_read_reg32(isp1362_hcd, HCRHPORT1 + wIndex);
1697					if (!(tmp & RH_PS_PRS))
1698						break;
1699					udelay(500);
1700				}
1701				if (!(tmp & RH_PS_CCS))
1702					break;
1703				/* Reset lasts 10ms (claims datasheet) */
1704				isp1362_write_reg32(isp1362_hcd, HCRHPORT1 + wIndex, (RH_PS_PRS));
1705
1706				spin_unlock_irqrestore(&isp1362_hcd->lock, flags);
1707				msleep(10);
1708				spin_lock_irqsave(&isp1362_hcd->lock, flags);
1709			}
1710
1711			isp1362_hcd->rhport[wIndex] = isp1362_read_reg32(isp1362_hcd,
1712									 HCRHPORT1 + wIndex);
1713			spin_unlock_irqrestore(&isp1362_hcd->lock, flags);
1714			break;
1715		default:
1716			goto error;
1717		}
1718		break;
1719
1720	default:
1721 error:
1722		/* "protocol stall" on error */
1723		DBG(0, "PROTOCOL STALL\n");
1724		retval = -EPIPE;
1725	}
1726
1727	return retval;
1728}
1729
1730#ifdef	CONFIG_PM
1731static int isp1362_bus_suspend(struct usb_hcd *hcd)
1732{
1733	int status = 0;
1734	struct isp1362_hcd *isp1362_hcd = hcd_to_isp1362_hcd(hcd);
1735	unsigned long flags;
1736
1737	if (time_before(jiffies, isp1362_hcd->next_statechange))
1738		msleep(5);
1739
1740	spin_lock_irqsave(&isp1362_hcd->lock, flags);
1741
1742	isp1362_hcd->hc_control = isp1362_read_reg32(isp1362_hcd, HCCONTROL);
1743	switch (isp1362_hcd->hc_control & OHCI_CTRL_HCFS) {
1744	case OHCI_USB_RESUME:
1745		DBG(0, "%s: resume/suspend?\n", __func__);
1746		isp1362_hcd->hc_control &= ~OHCI_CTRL_HCFS;
1747		isp1362_hcd->hc_control |= OHCI_USB_RESET;
1748		isp1362_write_reg32(isp1362_hcd, HCCONTROL, isp1362_hcd->hc_control);
1749		/* FALL THROUGH */
1750	case OHCI_USB_RESET:
1751		status = -EBUSY;
1752		pr_warn("%s: needs reinit!\n", __func__);
1753		goto done;
1754	case OHCI_USB_SUSPEND:
1755		pr_warn("%s: already suspended?\n", __func__);
1756		goto done;
1757	}
1758	DBG(0, "%s: suspend root hub\n", __func__);
1759
1760	/* First stop any processing */
1761	hcd->state = HC_STATE_QUIESCING;
1762	if (!list_empty(&isp1362_hcd->atl_queue.active) ||
1763	    !list_empty(&isp1362_hcd->intl_queue.active) ||
1764	    !list_empty(&isp1362_hcd->istl_queue[0] .active) ||
1765	    !list_empty(&isp1362_hcd->istl_queue[1] .active)) {
1766		int limit;
1767
1768		isp1362_write_reg32(isp1362_hcd, HCATLSKIP, ~0);
1769		isp1362_write_reg32(isp1362_hcd, HCINTLSKIP, ~0);
1770		isp1362_write_reg16(isp1362_hcd, HCBUFSTAT, 0);
1771		isp1362_write_reg16(isp1362_hcd, HCuPINTENB, 0);
1772		isp1362_write_reg32(isp1362_hcd, HCINTSTAT, OHCI_INTR_SF);
1773
1774		DBG(0, "%s: stopping schedules ...\n", __func__);
1775		limit = 2000;
1776		while (limit > 0) {
1777			udelay(250);
1778			limit -= 250;
1779			if (isp1362_read_reg32(isp1362_hcd, HCINTSTAT) & OHCI_INTR_SF)
1780				break;
1781		}
1782		mdelay(7);
1783		if (isp1362_read_reg16(isp1362_hcd, HCuPINT) & HCuPINT_ATL) {
1784			u32 done_map = isp1362_read_reg32(isp1362_hcd, HCATLDONE);
1785			finish_transfers(isp1362_hcd, done_map, &isp1362_hcd->atl_queue);
1786		}
1787		if (isp1362_read_reg16(isp1362_hcd, HCuPINT) & HCuPINT_INTL) {
1788			u32 done_map = isp1362_read_reg32(isp1362_hcd, HCINTLDONE);
1789			finish_transfers(isp1362_hcd, done_map, &isp1362_hcd->intl_queue);
1790		}
1791		if (isp1362_read_reg16(isp1362_hcd, HCuPINT) & HCuPINT_ISTL0)
1792			finish_iso_transfers(isp1362_hcd, &isp1362_hcd->istl_queue[0]);
1793		if (isp1362_read_reg16(isp1362_hcd, HCuPINT) & HCuPINT_ISTL1)
1794			finish_iso_transfers(isp1362_hcd, &isp1362_hcd->istl_queue[1]);
1795	}
1796	DBG(0, "%s: HCINTSTAT: %08x\n", __func__,
1797		    isp1362_read_reg32(isp1362_hcd, HCINTSTAT));
1798	isp1362_write_reg32(isp1362_hcd, HCINTSTAT,
1799			    isp1362_read_reg32(isp1362_hcd, HCINTSTAT));
1800
1801	/* Suspend hub */
1802	isp1362_hcd->hc_control = OHCI_USB_SUSPEND;
1803	isp1362_show_reg(isp1362_hcd, HCCONTROL);
1804	isp1362_write_reg32(isp1362_hcd, HCCONTROL, isp1362_hcd->hc_control);
1805	isp1362_show_reg(isp1362_hcd, HCCONTROL);
1806
1807#if 1
1808	isp1362_hcd->hc_control = isp1362_read_reg32(isp1362_hcd, HCCONTROL);
1809	if ((isp1362_hcd->hc_control & OHCI_CTRL_HCFS) != OHCI_USB_SUSPEND) {
1810		pr_err("%s: controller won't suspend %08x\n", __func__,
1811		    isp1362_hcd->hc_control);
1812		status = -EBUSY;
1813	} else
1814#endif
1815	{
1816		/* no resumes until devices finish suspending */
1817		isp1362_hcd->next_statechange = jiffies + msecs_to_jiffies(5);
1818	}
1819done:
1820	if (status == 0) {
1821		hcd->state = HC_STATE_SUSPENDED;
1822		DBG(0, "%s: HCD suspended: %08x\n", __func__,
1823		    isp1362_read_reg32(isp1362_hcd, HCCONTROL));
1824	}
1825	spin_unlock_irqrestore(&isp1362_hcd->lock, flags);
1826	return status;
1827}
1828
1829static int isp1362_bus_resume(struct usb_hcd *hcd)
1830{
1831	struct isp1362_hcd *isp1362_hcd = hcd_to_isp1362_hcd(hcd);
1832	u32 port;
1833	unsigned long flags;
1834	int status = -EINPROGRESS;
1835
1836	if (time_before(jiffies, isp1362_hcd->next_statechange))
1837		msleep(5);
1838
1839	spin_lock_irqsave(&isp1362_hcd->lock, flags);
1840	isp1362_hcd->hc_control = isp1362_read_reg32(isp1362_hcd, HCCONTROL);
1841	pr_info("%s: HCCONTROL: %08x\n", __func__, isp1362_hcd->hc_control);
1842	if (hcd->state == HC_STATE_RESUMING) {
1843		pr_warn("%s: duplicate resume\n", __func__);
1844		status = 0;
1845	} else
1846		switch (isp1362_hcd->hc_control & OHCI_CTRL_HCFS) {
1847		case OHCI_USB_SUSPEND:
1848			DBG(0, "%s: resume root hub\n", __func__);
1849			isp1362_hcd->hc_control &= ~OHCI_CTRL_HCFS;
1850			isp1362_hcd->hc_control |= OHCI_USB_RESUME;
1851			isp1362_write_reg32(isp1362_hcd, HCCONTROL, isp1362_hcd->hc_control);
1852			break;
1853		case OHCI_USB_RESUME:
1854			/* HCFS changes sometime after INTR_RD */
1855			DBG(0, "%s: remote wakeup\n", __func__);
1856			break;
1857		case OHCI_USB_OPER:
1858			DBG(0, "%s: odd resume\n", __func__);
1859			status = 0;
1860			hcd->self.root_hub->dev.power.power_state = PMSG_ON;
1861			break;
1862		default:		/* RESET, we lost power */
1863			DBG(0, "%s: root hub hardware reset\n", __func__);
1864			status = -EBUSY;
1865		}
1866	spin_unlock_irqrestore(&isp1362_hcd->lock, flags);
1867	if (status == -EBUSY) {
1868		DBG(0, "%s: Restarting HC\n", __func__);
1869		isp1362_hc_stop(hcd);
1870		return isp1362_hc_start(hcd);
1871	}
1872	if (status != -EINPROGRESS)
1873		return status;
1874	spin_lock_irqsave(&isp1362_hcd->lock, flags);
1875	port = isp1362_read_reg32(isp1362_hcd, HCRHDESCA) & RH_A_NDP;
1876	while (port--) {
1877		u32 stat = isp1362_read_reg32(isp1362_hcd, HCRHPORT1 + port);
1878
1879		/* force global, not selective, resume */
1880		if (!(stat & RH_PS_PSS)) {
1881			DBG(0, "%s: Not Resuming RH port %d\n", __func__, port);
1882			continue;
1883		}
1884		DBG(0, "%s: Resuming RH port %d\n", __func__, port);
1885		isp1362_write_reg32(isp1362_hcd, HCRHPORT1 + port, RH_PS_POCI);
1886	}
1887	spin_unlock_irqrestore(&isp1362_hcd->lock, flags);
1888
1889	/* Some controllers (lucent) need extra-long delays */
1890	hcd->state = HC_STATE_RESUMING;
1891	mdelay(20 /* usb 11.5.1.10 */ + 15);
1892
1893	isp1362_hcd->hc_control = OHCI_USB_OPER;
1894	spin_lock_irqsave(&isp1362_hcd->lock, flags);
1895	isp1362_show_reg(isp1362_hcd, HCCONTROL);
1896	isp1362_write_reg32(isp1362_hcd, HCCONTROL, isp1362_hcd->hc_control);
1897	spin_unlock_irqrestore(&isp1362_hcd->lock, flags);
1898	/* TRSMRCY */
1899	msleep(10);
1900
1901	/* keep it alive for ~5x suspend + resume costs */
1902	isp1362_hcd->next_statechange = jiffies + msecs_to_jiffies(250);
1903
1904	hcd->self.root_hub->dev.power.power_state = PMSG_ON;
1905	hcd->state = HC_STATE_RUNNING;
1906	return 0;
1907}
1908#else
1909#define	isp1362_bus_suspend	NULL
1910#define	isp1362_bus_resume	NULL
1911#endif
1912
1913/*-------------------------------------------------------------------------*/
1914
1915static void dump_irq(struct seq_file *s, char *label, u16 mask)
1916{
1917	seq_printf(s, "%-15s %04x%s%s%s%s%s%s\n", label, mask,
1918		   mask & HCuPINT_CLKRDY ? " clkrdy" : "",
1919		   mask & HCuPINT_SUSP ? " susp" : "",
1920		   mask & HCuPINT_OPR ? " opr" : "",
1921		   mask & HCuPINT_EOT ? " eot" : "",
1922		   mask & HCuPINT_ATL ? " atl" : "",
1923		   mask & HCuPINT_SOF ? " sof" : "");
1924}
1925
1926static void dump_int(struct seq_file *s, char *label, u32 mask)
1927{
1928	seq_printf(s, "%-15s %08x%s%s%s%s%s%s%s\n", label, mask,
1929		   mask & OHCI_INTR_MIE ? " MIE" : "",
1930		   mask & OHCI_INTR_RHSC ? " rhsc" : "",
1931		   mask & OHCI_INTR_FNO ? " fno" : "",
1932		   mask & OHCI_INTR_UE ? " ue" : "",
1933		   mask & OHCI_INTR_RD ? " rd" : "",
1934		   mask & OHCI_INTR_SF ? " sof" : "",
1935		   mask & OHCI_INTR_SO ? " so" : "");
1936}
1937
1938static void dump_ctrl(struct seq_file *s, char *label, u32 mask)
1939{
1940	seq_printf(s, "%-15s %08x%s%s%s\n", label, mask,
1941		   mask & OHCI_CTRL_RWC ? " rwc" : "",
1942		   mask & OHCI_CTRL_RWE ? " rwe" : "",
1943		   ({
1944			   char *hcfs;
1945			   switch (mask & OHCI_CTRL_HCFS) {
1946			   case OHCI_USB_OPER:
1947				   hcfs = " oper";
1948				   break;
1949			   case OHCI_USB_RESET:
1950				   hcfs = " reset";
1951				   break;
1952			   case OHCI_USB_RESUME:
1953				   hcfs = " resume";
1954				   break;
1955			   case OHCI_USB_SUSPEND:
1956				   hcfs = " suspend";
1957				   break;
1958			   default:
1959				   hcfs = " ?";
1960			   }
1961			   hcfs;
1962		   }));
1963}
1964
1965static void dump_regs(struct seq_file *s, struct isp1362_hcd *isp1362_hcd)
1966{
1967	seq_printf(s, "HCREVISION [%02x] %08x\n", ISP1362_REG_NO(ISP1362_REG_HCREVISION),
1968		   isp1362_read_reg32(isp1362_hcd, HCREVISION));
1969	seq_printf(s, "HCCONTROL  [%02x] %08x\n", ISP1362_REG_NO(ISP1362_REG_HCCONTROL),
1970		   isp1362_read_reg32(isp1362_hcd, HCCONTROL));
1971	seq_printf(s, "HCCMDSTAT  [%02x] %08x\n", ISP1362_REG_NO(ISP1362_REG_HCCMDSTAT),
1972		   isp1362_read_reg32(isp1362_hcd, HCCMDSTAT));
1973	seq_printf(s, "HCINTSTAT  [%02x] %08x\n", ISP1362_REG_NO(ISP1362_REG_HCINTSTAT),
1974		   isp1362_read_reg32(isp1362_hcd, HCINTSTAT));
1975	seq_printf(s, "HCINTENB   [%02x] %08x\n", ISP1362_REG_NO(ISP1362_REG_HCINTENB),
1976		   isp1362_read_reg32(isp1362_hcd, HCINTENB));
1977	seq_printf(s, "HCFMINTVL  [%02x] %08x\n", ISP1362_REG_NO(ISP1362_REG_HCFMINTVL),
1978		   isp1362_read_reg32(isp1362_hcd, HCFMINTVL));
1979	seq_printf(s, "HCFMREM    [%02x] %08x\n", ISP1362_REG_NO(ISP1362_REG_HCFMREM),
1980		   isp1362_read_reg32(isp1362_hcd, HCFMREM));
1981	seq_printf(s, "HCFMNUM    [%02x] %08x\n", ISP1362_REG_NO(ISP1362_REG_HCFMNUM),
1982		   isp1362_read_reg32(isp1362_hcd, HCFMNUM));
1983	seq_printf(s, "HCLSTHRESH [%02x] %08x\n", ISP1362_REG_NO(ISP1362_REG_HCLSTHRESH),
1984		   isp1362_read_reg32(isp1362_hcd, HCLSTHRESH));
1985	seq_printf(s, "HCRHDESCA  [%02x] %08x\n", ISP1362_REG_NO(ISP1362_REG_HCRHDESCA),
1986		   isp1362_read_reg32(isp1362_hcd, HCRHDESCA));
1987	seq_printf(s, "HCRHDESCB  [%02x] %08x\n", ISP1362_REG_NO(ISP1362_REG_HCRHDESCB),
1988		   isp1362_read_reg32(isp1362_hcd, HCRHDESCB));
1989	seq_printf(s, "HCRHSTATUS [%02x] %08x\n", ISP1362_REG_NO(ISP1362_REG_HCRHSTATUS),
1990		   isp1362_read_reg32(isp1362_hcd, HCRHSTATUS));
1991	seq_printf(s, "HCRHPORT1  [%02x] %08x\n", ISP1362_REG_NO(ISP1362_REG_HCRHPORT1),
1992		   isp1362_read_reg32(isp1362_hcd, HCRHPORT1));
1993	seq_printf(s, "HCRHPORT2  [%02x] %08x\n", ISP1362_REG_NO(ISP1362_REG_HCRHPORT2),
1994		   isp1362_read_reg32(isp1362_hcd, HCRHPORT2));
1995	seq_printf(s, "\n");
1996	seq_printf(s, "HCHWCFG    [%02x]     %04x\n", ISP1362_REG_NO(ISP1362_REG_HCHWCFG),
1997		   isp1362_read_reg16(isp1362_hcd, HCHWCFG));
1998	seq_printf(s, "HCDMACFG   [%02x]     %04x\n", ISP1362_REG_NO(ISP1362_REG_HCDMACFG),
1999		   isp1362_read_reg16(isp1362_hcd, HCDMACFG));
2000	seq_printf(s, "HCXFERCTR  [%02x]     %04x\n", ISP1362_REG_NO(ISP1362_REG_HCXFERCTR),
2001		   isp1362_read_reg16(isp1362_hcd, HCXFERCTR));
2002	seq_printf(s, "HCuPINT    [%02x]     %04x\n", ISP1362_REG_NO(ISP1362_REG_HCuPINT),
2003		   isp1362_read_reg16(isp1362_hcd, HCuPINT));
2004	seq_printf(s, "HCuPINTENB [%02x]     %04x\n", ISP1362_REG_NO(ISP1362_REG_HCuPINTENB),
2005		   isp1362_read_reg16(isp1362_hcd, HCuPINTENB));
2006	seq_printf(s, "HCCHIPID   [%02x]     %04x\n", ISP1362_REG_NO(ISP1362_REG_HCCHIPID),
2007		   isp1362_read_reg16(isp1362_hcd, HCCHIPID));
2008	seq_printf(s, "HCSCRATCH  [%02x]     %04x\n", ISP1362_REG_NO(ISP1362_REG_HCSCRATCH),
2009		   isp1362_read_reg16(isp1362_hcd, HCSCRATCH));
2010	seq_printf(s, "HCBUFSTAT  [%02x]     %04x\n", ISP1362_REG_NO(ISP1362_REG_HCBUFSTAT),
2011		   isp1362_read_reg16(isp1362_hcd, HCBUFSTAT));
2012	seq_printf(s, "HCDIRADDR  [%02x] %08x\n", ISP1362_REG_NO(ISP1362_REG_HCDIRADDR),
2013		   isp1362_read_reg32(isp1362_hcd, HCDIRADDR));
2014#if 0
2015	seq_printf(s, "HCDIRDATA  [%02x]     %04x\n", ISP1362_REG_NO(HCDIRDATA),
2016		   isp1362_read_reg16(isp1362_hcd, HCDIRDATA));
2017#endif
2018	seq_printf(s, "HCISTLBUFSZ[%02x]     %04x\n", ISP1362_REG_NO(ISP1362_REG_HCISTLBUFSZ),
2019		   isp1362_read_reg16(isp1362_hcd, HCISTLBUFSZ));
2020	seq_printf(s, "HCISTLRATE [%02x]     %04x\n", ISP1362_REG_NO(ISP1362_REG_HCISTLRATE),
2021		   isp1362_read_reg16(isp1362_hcd, HCISTLRATE));
2022	seq_printf(s, "\n");
2023	seq_printf(s, "HCINTLBUFSZ[%02x]     %04x\n", ISP1362_REG_NO(ISP1362_REG_HCINTLBUFSZ),
2024		   isp1362_read_reg16(isp1362_hcd, HCINTLBUFSZ));
2025	seq_printf(s, "HCINTLBLKSZ[%02x]     %04x\n", ISP1362_REG_NO(ISP1362_REG_HCINTLBLKSZ),
2026		   isp1362_read_reg16(isp1362_hcd, HCINTLBLKSZ));
2027	seq_printf(s, "HCINTLDONE [%02x] %08x\n", ISP1362_REG_NO(ISP1362_REG_HCINTLDONE),
2028		   isp1362_read_reg32(isp1362_hcd, HCINTLDONE));
2029	seq_printf(s, "HCINTLSKIP [%02x] %08x\n", ISP1362_REG_NO(ISP1362_REG_HCINTLSKIP),
2030		   isp1362_read_reg32(isp1362_hcd, HCINTLSKIP));
2031	seq_printf(s, "HCINTLLAST [%02x] %08x\n", ISP1362_REG_NO(ISP1362_REG_HCINTLLAST),
2032		   isp1362_read_reg32(isp1362_hcd, HCINTLLAST));
2033	seq_printf(s, "HCINTLCURR [%02x]     %04x\n", ISP1362_REG_NO(ISP1362_REG_HCINTLCURR),
2034		   isp1362_read_reg16(isp1362_hcd, HCINTLCURR));
2035	seq_printf(s, "\n");
2036	seq_printf(s, "HCATLBUFSZ [%02x]     %04x\n", ISP1362_REG_NO(ISP1362_REG_HCATLBUFSZ),
2037		   isp1362_read_reg16(isp1362_hcd, HCATLBUFSZ));
2038	seq_printf(s, "HCATLBLKSZ [%02x]     %04x\n", ISP1362_REG_NO(ISP1362_REG_HCATLBLKSZ),
2039		   isp1362_read_reg16(isp1362_hcd, HCATLBLKSZ));
2040#if 0
2041	seq_printf(s, "HCATLDONE  [%02x] %08x\n", ISP1362_REG_NO(ISP1362_REG_HCATLDONE),
2042		   isp1362_read_reg32(isp1362_hcd, HCATLDONE));
2043#endif
2044	seq_printf(s, "HCATLSKIP  [%02x] %08x\n", ISP1362_REG_NO(ISP1362_REG_HCATLSKIP),
2045		   isp1362_read_reg32(isp1362_hcd, HCATLSKIP));
2046	seq_printf(s, "HCATLLAST  [%02x] %08x\n", ISP1362_REG_NO(ISP1362_REG_HCATLLAST),
2047		   isp1362_read_reg32(isp1362_hcd, HCATLLAST));
2048	seq_printf(s, "HCATLCURR  [%02x]     %04x\n", ISP1362_REG_NO(ISP1362_REG_HCATLCURR),
2049		   isp1362_read_reg16(isp1362_hcd, HCATLCURR));
2050	seq_printf(s, "\n");
2051	seq_printf(s, "HCATLDTC   [%02x]     %04x\n", ISP1362_REG_NO(ISP1362_REG_HCATLDTC),
2052		   isp1362_read_reg16(isp1362_hcd, HCATLDTC));
2053	seq_printf(s, "HCATLDTCTO [%02x]     %04x\n", ISP1362_REG_NO(ISP1362_REG_HCATLDTCTO),
2054		   isp1362_read_reg16(isp1362_hcd, HCATLDTCTO));
2055}
2056
2057static int isp1362_show(struct seq_file *s, void *unused)
2058{
2059	struct isp1362_hcd *isp1362_hcd = s->private;
2060	struct isp1362_ep *ep;
2061	int i;
2062
2063	seq_printf(s, "%s\n%s version %s\n",
2064		   isp1362_hcd_to_hcd(isp1362_hcd)->product_desc, hcd_name, DRIVER_VERSION);
2065
2066	/* collect statistics to help estimate potential win for
2067	 * DMA engines that care about alignment (PXA)
2068	 */
2069	seq_printf(s, "alignment:  16b/%ld 8b/%ld 4b/%ld 2b/%ld 1b/%ld\n",
2070		   isp1362_hcd->stat16, isp1362_hcd->stat8, isp1362_hcd->stat4,
2071		   isp1362_hcd->stat2, isp1362_hcd->stat1);
2072	seq_printf(s, "max # ptds in ATL  fifo: %d\n", isp1362_hcd->atl_queue.stat_maxptds);
2073	seq_printf(s, "max # ptds in INTL fifo: %d\n", isp1362_hcd->intl_queue.stat_maxptds);
2074	seq_printf(s, "max # ptds in ISTL fifo: %d\n",
2075		   max(isp1362_hcd->istl_queue[0] .stat_maxptds,
2076		       isp1362_hcd->istl_queue[1] .stat_maxptds));
2077
2078	/* FIXME: don't show the following in suspended state */
2079	spin_lock_irq(&isp1362_hcd->lock);
2080
2081	dump_irq(s, "hc_irq_enable", isp1362_read_reg16(isp1362_hcd, HCuPINTENB));
2082	dump_irq(s, "hc_irq_status", isp1362_read_reg16(isp1362_hcd, HCuPINT));
2083	dump_int(s, "ohci_int_enable", isp1362_read_reg32(isp1362_hcd, HCINTENB));
2084	dump_int(s, "ohci_int_status", isp1362_read_reg32(isp1362_hcd, HCINTSTAT));
2085	dump_ctrl(s, "ohci_control", isp1362_read_reg32(isp1362_hcd, HCCONTROL));
2086
2087	for (i = 0; i < NUM_ISP1362_IRQS; i++)
2088		if (isp1362_hcd->irq_stat[i])
2089			seq_printf(s, "%-15s: %d\n",
2090				   ISP1362_INT_NAME(i), isp1362_hcd->irq_stat[i]);
2091
2092	dump_regs(s, isp1362_hcd);
2093	list_for_each_entry(ep, &isp1362_hcd->async, schedule) {
2094		struct urb *urb;
2095
2096		seq_printf(s, "%p, ep%d%s, maxpacket %d:\n", ep, ep->epnum,
2097			   ({
2098				   char *s;
2099				   switch (ep->nextpid) {
2100				   case USB_PID_IN:
2101					   s = "in";
2102					   break;
2103				   case USB_PID_OUT:
2104					   s = "out";
2105					   break;
2106				   case USB_PID_SETUP:
2107					   s = "setup";
2108					   break;
2109				   case USB_PID_ACK:
2110					   s = "status";
2111					   break;
2112				   default:
2113					   s = "?";
2114					   break;
2115				   }
2116				   s;}), ep->maxpacket) ;
2117		list_for_each_entry(urb, &ep->hep->urb_list, urb_list) {
2118			seq_printf(s, "  urb%p, %d/%d\n", urb,
2119				   urb->actual_length,
2120				   urb->transfer_buffer_length);
2121		}
2122	}
2123	if (!list_empty(&isp1362_hcd->async))
2124		seq_printf(s, "\n");
2125	dump_ptd_queue(&isp1362_hcd->atl_queue);
2126
2127	seq_printf(s, "periodic size= %d\n", PERIODIC_SIZE);
2128
2129	list_for_each_entry(ep, &isp1362_hcd->periodic, schedule) {
2130		seq_printf(s, "branch:%2d load:%3d PTD[%d] $%04x:\n", ep->branch,
2131			   isp1362_hcd->load[ep->branch], ep->ptd_index, ep->ptd_offset);
2132
2133		seq_printf(s, "   %d/%p (%sdev%d ep%d%s max %d)\n",
2134			   ep->interval, ep,
2135			   (ep->udev->speed == USB_SPEED_FULL) ? "" : "ls ",
2136			   ep->udev->devnum, ep->epnum,
2137			   (ep->epnum == 0) ? "" :
2138			   ((ep->nextpid == USB_PID_IN) ?
2139			    "in" : "out"), ep->maxpacket);
2140	}
2141	dump_ptd_queue(&isp1362_hcd->intl_queue);
2142
2143	seq_printf(s, "ISO:\n");
2144
2145	list_for_each_entry(ep, &isp1362_hcd->isoc, schedule) {
2146		seq_printf(s, "   %d/%p (%sdev%d ep%d%s max %d)\n",
2147			   ep->interval, ep,
2148			   (ep->udev->speed == USB_SPEED_FULL) ? "" : "ls ",
2149			   ep->udev->devnum, ep->epnum,
2150			   (ep->epnum == 0) ? "" :
2151			   ((ep->nextpid == USB_PID_IN) ?
2152			    "in" : "out"), ep->maxpacket);
2153	}
2154
2155	spin_unlock_irq(&isp1362_hcd->lock);
2156	seq_printf(s, "\n");
2157
2158	return 0;
2159}
2160
2161static int isp1362_open(struct inode *inode, struct file *file)
2162{
2163	return single_open(file, isp1362_show, inode);
2164}
2165
2166static const struct file_operations debug_ops = {
2167	.open = isp1362_open,
2168	.read = seq_read,
2169	.llseek = seq_lseek,
2170	.release = single_release,
2171};
2172
2173/* expect just one isp1362_hcd per system */
2174static void create_debug_file(struct isp1362_hcd *isp1362_hcd)
2175{
2176	isp1362_hcd->debug_file = debugfs_create_file("isp1362", S_IRUGO,
2177						      usb_debug_root,
2178						      isp1362_hcd, &debug_ops);
2179}
2180
2181static void remove_debug_file(struct isp1362_hcd *isp1362_hcd)
2182{
2183	debugfs_remove(isp1362_hcd->debug_file);
2184}
2185
2186/*-------------------------------------------------------------------------*/
2187
2188static void __isp1362_sw_reset(struct isp1362_hcd *isp1362_hcd)
2189{
2190	int tmp = 20;
2191
2192	isp1362_write_reg16(isp1362_hcd, HCSWRES, HCSWRES_MAGIC);
2193	isp1362_write_reg32(isp1362_hcd, HCCMDSTAT, OHCI_HCR);
2194	while (--tmp) {
2195		mdelay(1);
2196		if (!(isp1362_read_reg32(isp1362_hcd, HCCMDSTAT) & OHCI_HCR))
2197			break;
2198	}
2199	if (!tmp)
2200		pr_err("Software reset timeout\n");
2201}
2202
2203static void isp1362_sw_reset(struct isp1362_hcd *isp1362_hcd)
2204{
2205	unsigned long flags;
2206
2207	spin_lock_irqsave(&isp1362_hcd->lock, flags);
2208	__isp1362_sw_reset(isp1362_hcd);
2209	spin_unlock_irqrestore(&isp1362_hcd->lock, flags);
2210}
2211
2212static int isp1362_mem_config(struct usb_hcd *hcd)
2213{
2214	struct isp1362_hcd *isp1362_hcd = hcd_to_isp1362_hcd(hcd);
2215	unsigned long flags;
2216	u32 total;
2217	u16 istl_size = ISP1362_ISTL_BUFSIZE;
2218	u16 intl_blksize = ISP1362_INTL_BLKSIZE + PTD_HEADER_SIZE;
2219	u16 intl_size = ISP1362_INTL_BUFFERS * intl_blksize;
2220	u16 atl_blksize = ISP1362_ATL_BLKSIZE + PTD_HEADER_SIZE;
2221	u16 atl_buffers = (ISP1362_BUF_SIZE - (istl_size + intl_size)) / atl_blksize;
2222	u16 atl_size;
2223	int i;
2224
2225	WARN_ON(istl_size & 3);
2226	WARN_ON(atl_blksize & 3);
2227	WARN_ON(intl_blksize & 3);
2228	WARN_ON(atl_blksize < PTD_HEADER_SIZE);
2229	WARN_ON(intl_blksize < PTD_HEADER_SIZE);
2230
2231	BUG_ON((unsigned)ISP1362_INTL_BUFFERS > 32);
2232	if (atl_buffers > 32)
2233		atl_buffers = 32;
2234	atl_size = atl_buffers * atl_blksize;
2235	total = atl_size + intl_size + istl_size;
2236	dev_info(hcd->self.controller, "ISP1362 Memory usage:\n");
2237	dev_info(hcd->self.controller, "  ISTL:    2 * %4d:     %4d @ $%04x:$%04x\n",
2238		 istl_size / 2, istl_size, 0, istl_size / 2);
2239	dev_info(hcd->self.controller, "  INTL: %4d * (%3zu+8):  %4d @ $%04x\n",
2240		 ISP1362_INTL_BUFFERS, intl_blksize - PTD_HEADER_SIZE,
2241		 intl_size, istl_size);
2242	dev_info(hcd->self.controller, "  ATL : %4d * (%3zu+8):  %4d @ $%04x\n",
2243		 atl_buffers, atl_blksize - PTD_HEADER_SIZE,
2244		 atl_size, istl_size + intl_size);
2245	dev_info(hcd->self.controller, "  USED/FREE:   %4d      %4d\n", total,
2246		 ISP1362_BUF_SIZE - total);
2247
2248	if (total > ISP1362_BUF_SIZE) {
2249		dev_err(hcd->self.controller, "%s: Memory requested: %d, available %d\n",
2250			__func__, total, ISP1362_BUF_SIZE);
2251		return -ENOMEM;
2252	}
2253
2254	total = istl_size + intl_size + atl_size;
2255	spin_lock_irqsave(&isp1362_hcd->lock, flags);
2256
2257	for (i = 0; i < 2; i++) {
2258		isp1362_hcd->istl_queue[i].buf_start = i * istl_size / 2,
2259		isp1362_hcd->istl_queue[i].buf_size = istl_size / 2;
2260		isp1362_hcd->istl_queue[i].blk_size = 4;
2261		INIT_LIST_HEAD(&isp1362_hcd->istl_queue[i].active);
2262		snprintf(isp1362_hcd->istl_queue[i].name,
2263			 sizeof(isp1362_hcd->istl_queue[i].name), "ISTL%d", i);
2264		DBG(3, "%s: %5s buf $%04x %d\n", __func__,
2265		     isp1362_hcd->istl_queue[i].name,
2266		     isp1362_hcd->istl_queue[i].buf_start,
2267		     isp1362_hcd->istl_queue[i].buf_size);
2268	}
2269	isp1362_write_reg16(isp1362_hcd, HCISTLBUFSZ, istl_size / 2);
2270
2271	isp1362_hcd->intl_queue.buf_start = istl_size;
2272	isp1362_hcd->intl_queue.buf_size = intl_size;
2273	isp1362_hcd->intl_queue.buf_count = ISP1362_INTL_BUFFERS;
2274	isp1362_hcd->intl_queue.blk_size = intl_blksize;
2275	isp1362_hcd->intl_queue.buf_avail = isp1362_hcd->intl_queue.buf_count;
2276	isp1362_hcd->intl_queue.skip_map = ~0;
2277	INIT_LIST_HEAD(&isp1362_hcd->intl_queue.active);
2278
2279	isp1362_write_reg16(isp1362_hcd, HCINTLBUFSZ,
2280			    isp1362_hcd->intl_queue.buf_size);
2281	isp1362_write_reg16(isp1362_hcd, HCINTLBLKSZ,
2282			    isp1362_hcd->intl_queue.blk_size - PTD_HEADER_SIZE);
2283	isp1362_write_reg32(isp1362_hcd, HCINTLSKIP, ~0);
2284	isp1362_write_reg32(isp1362_hcd, HCINTLLAST,
2285			    1 << (ISP1362_INTL_BUFFERS - 1));
2286
2287	isp1362_hcd->atl_queue.buf_start = istl_size + intl_size;
2288	isp1362_hcd->atl_queue.buf_size = atl_size;
2289	isp1362_hcd->atl_queue.buf_count = atl_buffers;
2290	isp1362_hcd->atl_queue.blk_size = atl_blksize;
2291	isp1362_hcd->atl_queue.buf_avail = isp1362_hcd->atl_queue.buf_count;
2292	isp1362_hcd->atl_queue.skip_map = ~0;
2293	INIT_LIST_HEAD(&isp1362_hcd->atl_queue.active);
2294
2295	isp1362_write_reg16(isp1362_hcd, HCATLBUFSZ,
2296			    isp1362_hcd->atl_queue.buf_size);
2297	isp1362_write_reg16(isp1362_hcd, HCATLBLKSZ,
2298			    isp1362_hcd->atl_queue.blk_size - PTD_HEADER_SIZE);
2299	isp1362_write_reg32(isp1362_hcd, HCATLSKIP, ~0);
2300	isp1362_write_reg32(isp1362_hcd, HCATLLAST,
2301			    1 << (atl_buffers - 1));
2302
2303	snprintf(isp1362_hcd->atl_queue.name,
2304		 sizeof(isp1362_hcd->atl_queue.name), "ATL");
2305	snprintf(isp1362_hcd->intl_queue.name,
2306		 sizeof(isp1362_hcd->intl_queue.name), "INTL");
2307	DBG(3, "%s: %5s buf $%04x %2d * %4d = %4d\n", __func__,
2308	     isp1362_hcd->intl_queue.name,
2309	     isp1362_hcd->intl_queue.buf_start,
2310	     ISP1362_INTL_BUFFERS, isp1362_hcd->intl_queue.blk_size,
2311	     isp1362_hcd->intl_queue.buf_size);
2312	DBG(3, "%s: %5s buf $%04x %2d * %4d = %4d\n", __func__,
2313	     isp1362_hcd->atl_queue.name,
2314	     isp1362_hcd->atl_queue.buf_start,
2315	     atl_buffers, isp1362_hcd->atl_queue.blk_size,
2316	     isp1362_hcd->atl_queue.buf_size);
2317
2318	spin_unlock_irqrestore(&isp1362_hcd->lock, flags);
2319
2320	return 0;
2321}
2322
2323static int isp1362_hc_reset(struct usb_hcd *hcd)
2324{
2325	int ret = 0;
2326	struct isp1362_hcd *isp1362_hcd = hcd_to_isp1362_hcd(hcd);
2327	unsigned long t;
2328	unsigned long timeout = 100;
2329	unsigned long flags;
2330	int clkrdy = 0;
2331
2332	pr_debug("%s:\n", __func__);
2333
2334	if (isp1362_hcd->board && isp1362_hcd->board->reset) {
2335		isp1362_hcd->board->reset(hcd->self.controller, 1);
2336		msleep(20);
2337		if (isp1362_hcd->board->clock)
2338			isp1362_hcd->board->clock(hcd->self.controller, 1);
2339		isp1362_hcd->board->reset(hcd->self.controller, 0);
2340	} else
2341		isp1362_sw_reset(isp1362_hcd);
2342
2343	/* chip has been reset. First we need to see a clock */
2344	t = jiffies + msecs_to_jiffies(timeout);
2345	while (!clkrdy && time_before_eq(jiffies, t)) {
2346		spin_lock_irqsave(&isp1362_hcd->lock, flags);
2347		clkrdy = isp1362_read_reg16(isp1362_hcd, HCuPINT) & HCuPINT_CLKRDY;
2348		spin_unlock_irqrestore(&isp1362_hcd->lock, flags);
2349		if (!clkrdy)
2350			msleep(4);
2351	}
2352
2353	spin_lock_irqsave(&isp1362_hcd->lock, flags);
2354	isp1362_write_reg16(isp1362_hcd, HCuPINT, HCuPINT_CLKRDY);
2355	spin_unlock_irqrestore(&isp1362_hcd->lock, flags);
2356	if (!clkrdy) {
2357		pr_err("Clock not ready after %lums\n", timeout);
2358		ret = -ENODEV;
2359	}
2360	return ret;
2361}
2362
2363static void isp1362_hc_stop(struct usb_hcd *hcd)
2364{
2365	struct isp1362_hcd *isp1362_hcd = hcd_to_isp1362_hcd(hcd);
2366	unsigned long flags;
2367	u32 tmp;
2368
2369	pr_debug("%s:\n", __func__);
2370
2371	del_timer_sync(&hcd->rh_timer);
2372
2373	spin_lock_irqsave(&isp1362_hcd->lock, flags);
2374
2375	isp1362_write_reg16(isp1362_hcd, HCuPINTENB, 0);
2376
2377	/* Switch off power for all ports */
2378	tmp = isp1362_read_reg32(isp1362_hcd, HCRHDESCA);
2379	tmp &= ~(RH_A_NPS | RH_A_PSM);
2380	isp1362_write_reg32(isp1362_hcd, HCRHDESCA, tmp);
2381	isp1362_write_reg32(isp1362_hcd, HCRHSTATUS, RH_HS_LPS);
2382
2383	/* Reset the chip */
2384	if (isp1362_hcd->board && isp1362_hcd->board->reset)
2385		isp1362_hcd->board->reset(hcd->self.controller, 1);
2386	else
2387		__isp1362_sw_reset(isp1362_hcd);
2388
2389	if (isp1362_hcd->board && isp1362_hcd->board->clock)
2390		isp1362_hcd->board->clock(hcd->self.controller, 0);
2391
2392	spin_unlock_irqrestore(&isp1362_hcd->lock, flags);
2393}
2394
2395#ifdef CHIP_BUFFER_TEST
2396static int isp1362_chip_test(struct isp1362_hcd *isp1362_hcd)
2397{
2398	int ret = 0;
2399	u16 *ref;
2400	unsigned long flags;
2401
2402	ref = kmalloc(2 * ISP1362_BUF_SIZE, GFP_KERNEL);
2403	if (ref) {
2404		int offset;
2405		u16 *tst = &ref[ISP1362_BUF_SIZE / 2];
2406
2407		for (offset = 0; offset < ISP1362_BUF_SIZE / 2; offset++) {
2408			ref[offset] = ~offset;
2409			tst[offset] = offset;
2410		}
2411
2412		for (offset = 0; offset < 4; offset++) {
2413			int j;
2414
2415			for (j = 0; j < 8; j++) {
2416				spin_lock_irqsave(&isp1362_hcd->lock, flags);
2417				isp1362_write_buffer(isp1362_hcd, (u8 *)ref + offset, 0, j);
2418				isp1362_read_buffer(isp1362_hcd, (u8 *)tst + offset, 0, j);
2419				spin_unlock_irqrestore(&isp1362_hcd->lock, flags);
2420
2421				if (memcmp(ref, tst, j)) {
2422					ret = -ENODEV;
2423					pr_err("%s: memory check with %d byte offset %d failed\n",
2424					    __func__, j, offset);
2425					dump_data((u8 *)ref + offset, j);
2426					dump_data((u8 *)tst + offset, j);
2427				}
2428			}
2429		}
2430
2431		spin_lock_irqsave(&isp1362_hcd->lock, flags);
2432		isp1362_write_buffer(isp1362_hcd, ref, 0, ISP1362_BUF_SIZE);
2433		isp1362_read_buffer(isp1362_hcd, tst, 0, ISP1362_BUF_SIZE);
2434		spin_unlock_irqrestore(&isp1362_hcd->lock, flags);
2435
2436		if (memcmp(ref, tst, ISP1362_BUF_SIZE)) {
2437			ret = -ENODEV;
2438			pr_err("%s: memory check failed\n", __func__);
2439			dump_data((u8 *)tst, ISP1362_BUF_SIZE / 2);
2440		}
2441
2442		for (offset = 0; offset < 256; offset++) {
2443			int test_size = 0;
2444
2445			yield();
2446
2447			memset(tst, 0, ISP1362_BUF_SIZE);
2448			spin_lock_irqsave(&isp1362_hcd->lock, flags);
2449			isp1362_write_buffer(isp1362_hcd, tst, 0, ISP1362_BUF_SIZE);
2450			isp1362_read_buffer(isp1362_hcd, tst, 0, ISP1362_BUF_SIZE);
2451			spin_unlock_irqrestore(&isp1362_hcd->lock, flags);
2452			if (memcmp(tst, tst + (ISP1362_BUF_SIZE / (2 * sizeof(*tst))),
2453				   ISP1362_BUF_SIZE / 2)) {
2454				pr_err("%s: Failed to clear buffer\n", __func__);
2455				dump_data((u8 *)tst, ISP1362_BUF_SIZE);
2456				break;
2457			}
2458			spin_lock_irqsave(&isp1362_hcd->lock, flags);
2459			isp1362_write_buffer(isp1362_hcd, ref, offset * 2, PTD_HEADER_SIZE);
2460			isp1362_write_buffer(isp1362_hcd, ref + PTD_HEADER_SIZE / sizeof(*ref),
2461					     offset * 2 + PTD_HEADER_SIZE, test_size);
2462			isp1362_read_buffer(isp1362_hcd, tst, offset * 2,
2463					    PTD_HEADER_SIZE + test_size);
2464			spin_unlock_irqrestore(&isp1362_hcd->lock, flags);
2465			if (memcmp(ref, tst, PTD_HEADER_SIZE + test_size)) {
2466				dump_data(((u8 *)ref) + offset, PTD_HEADER_SIZE + test_size);
2467				dump_data((u8 *)tst, PTD_HEADER_SIZE + test_size);
2468				spin_lock_irqsave(&isp1362_hcd->lock, flags);
2469				isp1362_read_buffer(isp1362_hcd, tst, offset * 2,
2470						    PTD_HEADER_SIZE + test_size);
2471				spin_unlock_irqrestore(&isp1362_hcd->lock, flags);
2472				if (memcmp(ref, tst, PTD_HEADER_SIZE + test_size)) {
2473					ret = -ENODEV;
2474					pr_err("%s: memory check with offset %02x failed\n",
2475					    __func__, offset);
2476					break;
2477				}
2478				pr_warn("%s: memory check with offset %02x ok after second read\n",
2479					__func__, offset);
2480			}
2481		}
2482		kfree(ref);
2483	}
2484	return ret;
2485}
2486#endif
2487
2488static int isp1362_hc_start(struct usb_hcd *hcd)
2489{
2490	int ret;
2491	struct isp1362_hcd *isp1362_hcd = hcd_to_isp1362_hcd(hcd);
2492	struct isp1362_platform_data *board = isp1362_hcd->board;
2493	u16 hwcfg;
2494	u16 chipid;
2495	unsigned long flags;
2496
2497	pr_debug("%s:\n", __func__);
2498
2499	spin_lock_irqsave(&isp1362_hcd->lock, flags);
2500	chipid = isp1362_read_reg16(isp1362_hcd, HCCHIPID);
2501	spin_unlock_irqrestore(&isp1362_hcd->lock, flags);
2502
2503	if ((chipid & HCCHIPID_MASK) != HCCHIPID_MAGIC) {
2504		pr_err("%s: Invalid chip ID %04x\n", __func__, chipid);
2505		return -ENODEV;
2506	}
2507
2508#ifdef CHIP_BUFFER_TEST
2509	ret = isp1362_chip_test(isp1362_hcd);
2510	if (ret)
2511		return -ENODEV;
2512#endif
2513	spin_lock_irqsave(&isp1362_hcd->lock, flags);
2514	/* clear interrupt status and disable all interrupt sources */
2515	isp1362_write_reg16(isp1362_hcd, HCuPINT, 0xff);
2516	isp1362_write_reg16(isp1362_hcd, HCuPINTENB, 0);
2517
2518	/* HW conf */
2519	hwcfg = HCHWCFG_INT_ENABLE | HCHWCFG_DBWIDTH(1);
2520	if (board->sel15Kres)
2521		hwcfg |= HCHWCFG_PULLDOWN_DS2 |
2522			((MAX_ROOT_PORTS > 1) ? HCHWCFG_PULLDOWN_DS1 : 0);
2523	if (board->clknotstop)
2524		hwcfg |= HCHWCFG_CLKNOTSTOP;
2525	if (board->oc_enable)
2526		hwcfg |= HCHWCFG_ANALOG_OC;
2527	if (board->int_act_high)
2528		hwcfg |= HCHWCFG_INT_POL;
2529	if (board->int_edge_triggered)
2530		hwcfg |= HCHWCFG_INT_TRIGGER;
2531	if (board->dreq_act_high)
2532		hwcfg |= HCHWCFG_DREQ_POL;
2533	if (board->dack_act_high)
2534		hwcfg |= HCHWCFG_DACK_POL;
2535	isp1362_write_reg16(isp1362_hcd, HCHWCFG, hwcfg);
2536	isp1362_show_reg(isp1362_hcd, HCHWCFG);
2537	isp1362_write_reg16(isp1362_hcd, HCDMACFG, 0);
2538	spin_unlock_irqrestore(&isp1362_hcd->lock, flags);
2539
2540	ret = isp1362_mem_config(hcd);
2541	if (ret)
2542		return ret;
2543
2544	spin_lock_irqsave(&isp1362_hcd->lock, flags);
2545
2546	/* Root hub conf */
2547	isp1362_hcd->rhdesca = 0;
2548	if (board->no_power_switching)
2549		isp1362_hcd->rhdesca |= RH_A_NPS;
2550	if (board->power_switching_mode)
2551		isp1362_hcd->rhdesca |= RH_A_PSM;
2552	if (board->potpg)
2553		isp1362_hcd->rhdesca |= (board->potpg << 24) & RH_A_POTPGT;
2554	else
2555		isp1362_hcd->rhdesca |= (25 << 24) & RH_A_POTPGT;
2556
2557	isp1362_write_reg32(isp1362_hcd, HCRHDESCA, isp1362_hcd->rhdesca & ~RH_A_OCPM);
2558	isp1362_write_reg32(isp1362_hcd, HCRHDESCA, isp1362_hcd->rhdesca | RH_A_OCPM);
2559	isp1362_hcd->rhdesca = isp1362_read_reg32(isp1362_hcd, HCRHDESCA);
2560
2561	isp1362_hcd->rhdescb = RH_B_PPCM;
2562	isp1362_write_reg32(isp1362_hcd, HCRHDESCB, isp1362_hcd->rhdescb);
2563	isp1362_hcd->rhdescb = isp1362_read_reg32(isp1362_hcd, HCRHDESCB);
2564
2565	isp1362_read_reg32(isp1362_hcd, HCFMINTVL);
2566	isp1362_write_reg32(isp1362_hcd, HCFMINTVL, (FSMP(FI) << 16) | FI);
2567	isp1362_write_reg32(isp1362_hcd, HCLSTHRESH, LSTHRESH);
2568
2569	spin_unlock_irqrestore(&isp1362_hcd->lock, flags);
2570
2571	isp1362_hcd->hc_control = OHCI_USB_OPER;
2572	hcd->state = HC_STATE_RUNNING;
2573
2574	spin_lock_irqsave(&isp1362_hcd->lock, flags);
2575	/* Set up interrupts */
2576	isp1362_hcd->intenb = OHCI_INTR_MIE | OHCI_INTR_RHSC | OHCI_INTR_UE;
2577	isp1362_hcd->intenb |= OHCI_INTR_RD;
2578	isp1362_hcd->irqenb = HCuPINT_OPR | HCuPINT_SUSP;
2579	isp1362_write_reg32(isp1362_hcd, HCINTENB, isp1362_hcd->intenb);
2580	isp1362_write_reg16(isp1362_hcd, HCuPINTENB, isp1362_hcd->irqenb);
2581
2582	/* Go operational */
2583	isp1362_write_reg32(isp1362_hcd, HCCONTROL, isp1362_hcd->hc_control);
2584	/* enable global power */
2585	isp1362_write_reg32(isp1362_hcd, HCRHSTATUS, RH_HS_LPSC | RH_HS_DRWE);
2586
2587	spin_unlock_irqrestore(&isp1362_hcd->lock, flags);
2588
2589	return 0;
2590}
2591
2592/*-------------------------------------------------------------------------*/
2593
2594static struct hc_driver isp1362_hc_driver = {
2595	.description =		hcd_name,
2596	.product_desc =		"ISP1362 Host Controller",
2597	.hcd_priv_size =	sizeof(struct isp1362_hcd),
2598
2599	.irq =			isp1362_irq,
2600	.flags =		HCD_USB11 | HCD_MEMORY,
2601
2602	.reset =		isp1362_hc_reset,
2603	.start =		isp1362_hc_start,
2604	.stop =			isp1362_hc_stop,
2605
2606	.urb_enqueue =		isp1362_urb_enqueue,
2607	.urb_dequeue =		isp1362_urb_dequeue,
2608	.endpoint_disable =	isp1362_endpoint_disable,
2609
2610	.get_frame_number =	isp1362_get_frame,
2611
2612	.hub_status_data =	isp1362_hub_status_data,
2613	.hub_control =		isp1362_hub_control,
2614	.bus_suspend =		isp1362_bus_suspend,
2615	.bus_resume =		isp1362_bus_resume,
2616};
2617
2618/*-------------------------------------------------------------------------*/
2619
2620static int isp1362_remove(struct platform_device *pdev)
2621{
2622	struct usb_hcd *hcd = platform_get_drvdata(pdev);
2623	struct isp1362_hcd *isp1362_hcd = hcd_to_isp1362_hcd(hcd);
2624
2625	remove_debug_file(isp1362_hcd);
2626	DBG(0, "%s: Removing HCD\n", __func__);
2627	usb_remove_hcd(hcd);
2628	DBG(0, "%s: put_hcd\n", __func__);
2629	usb_put_hcd(hcd);
2630	DBG(0, "%s: Done\n", __func__);
2631
2632	return 0;
2633}
2634
2635static int isp1362_probe(struct platform_device *pdev)
2636{
2637	struct usb_hcd *hcd;
2638	struct isp1362_hcd *isp1362_hcd;
2639	struct resource *addr, *data, *irq_res;
2640	void __iomem *addr_reg;
2641	void __iomem *data_reg;
2642	int irq;
2643	int retval = 0;
2644	unsigned int irq_flags = 0;
2645
2646	if (usb_disabled())
2647		return -ENODEV;
2648
2649	/* basic sanity checks first.  board-specific init logic should
2650	 * have initialized this the three resources and probably board
2651	 * specific platform_data.  we don't probe for IRQs, and do only
2652	 * minimal sanity checking.
2653	 */
2654	if (pdev->num_resources < 3)
2655		return -ENODEV;
2656
2657	if (pdev->dev.dma_mask) {
2658		DBG(1, "won't do DMA");
2659		return -ENODEV;
2660	}
2661
2662	irq_res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
2663	if (!irq_res)
2664		return -ENODEV;
2665
2666	irq = irq_res->start;
2667
2668	addr = platform_get_resource(pdev, IORESOURCE_MEM, 1);
2669	addr_reg = devm_ioremap_resource(&pdev->dev, addr);
2670	if (IS_ERR(addr_reg))
2671		return PTR_ERR(addr_reg);
2672
2673	data = platform_get_resource(pdev, IORESOURCE_MEM, 0);
2674	data_reg = devm_ioremap_resource(&pdev->dev, data);
2675	if (IS_ERR(data_reg))
2676		return PTR_ERR(data_reg);
2677
2678	/* allocate and initialize hcd */
2679	hcd = usb_create_hcd(&isp1362_hc_driver, &pdev->dev, dev_name(&pdev->dev));
2680	if (!hcd)
2681		return -ENOMEM;
2682
2683	hcd->rsrc_start = data->start;
2684	isp1362_hcd = hcd_to_isp1362_hcd(hcd);
2685	isp1362_hcd->data_reg = data_reg;
2686	isp1362_hcd->addr_reg = addr_reg;
2687
2688	isp1362_hcd->next_statechange = jiffies;
2689	spin_lock_init(&isp1362_hcd->lock);
2690	INIT_LIST_HEAD(&isp1362_hcd->async);
2691	INIT_LIST_HEAD(&isp1362_hcd->periodic);
2692	INIT_LIST_HEAD(&isp1362_hcd->isoc);
2693	INIT_LIST_HEAD(&isp1362_hcd->remove_list);
2694	isp1362_hcd->board = dev_get_platdata(&pdev->dev);
2695#if USE_PLATFORM_DELAY
2696	if (!isp1362_hcd->board->delay) {
2697		dev_err(hcd->self.controller, "No platform delay function given\n");
2698		retval = -ENODEV;
2699		goto err;
2700	}
2701#endif
2702
2703	if (irq_res->flags & IORESOURCE_IRQ_HIGHEDGE)
2704		irq_flags |= IRQF_TRIGGER_RISING;
2705	if (irq_res->flags & IORESOURCE_IRQ_LOWEDGE)
2706		irq_flags |= IRQF_TRIGGER_FALLING;
2707	if (irq_res->flags & IORESOURCE_IRQ_HIGHLEVEL)
2708		irq_flags |= IRQF_TRIGGER_HIGH;
2709	if (irq_res->flags & IORESOURCE_IRQ_LOWLEVEL)
2710		irq_flags |= IRQF_TRIGGER_LOW;
2711
2712	retval = usb_add_hcd(hcd, irq, irq_flags | IRQF_SHARED);
2713	if (retval != 0)
2714		goto err;
2715	device_wakeup_enable(hcd->self.controller);
2716
2717	dev_info(&pdev->dev, "%s, irq %d\n", hcd->product_desc, irq);
2718
2719	create_debug_file(isp1362_hcd);
2720
2721	return 0;
2722
2723 err:
2724	usb_put_hcd(hcd);
2725
2726	return retval;
2727}
2728
2729#ifdef	CONFIG_PM
2730static int isp1362_suspend(struct platform_device *pdev, pm_message_t state)
2731{
2732	struct usb_hcd *hcd = platform_get_drvdata(pdev);
2733	struct isp1362_hcd *isp1362_hcd = hcd_to_isp1362_hcd(hcd);
2734	unsigned long flags;
2735	int retval = 0;
2736
2737	DBG(0, "%s: Suspending device\n", __func__);
2738
2739	if (state.event == PM_EVENT_FREEZE) {
2740		DBG(0, "%s: Suspending root hub\n", __func__);
2741		retval = isp1362_bus_suspend(hcd);
2742	} else {
2743		DBG(0, "%s: Suspending RH ports\n", __func__);
2744		spin_lock_irqsave(&isp1362_hcd->lock, flags);
2745		isp1362_write_reg32(isp1362_hcd, HCRHSTATUS, RH_HS_LPS);
2746		spin_unlock_irqrestore(&isp1362_hcd->lock, flags);
2747	}
2748	if (retval == 0)
2749		pdev->dev.power.power_state = state;
2750	return retval;
2751}
2752
2753static int isp1362_resume(struct platform_device *pdev)
2754{
2755	struct usb_hcd *hcd = platform_get_drvdata(pdev);
2756	struct isp1362_hcd *isp1362_hcd = hcd_to_isp1362_hcd(hcd);
2757	unsigned long flags;
2758
2759	DBG(0, "%s: Resuming\n", __func__);
2760
2761	if (pdev->dev.power.power_state.event == PM_EVENT_SUSPEND) {
2762		DBG(0, "%s: Resume RH ports\n", __func__);
2763		spin_lock_irqsave(&isp1362_hcd->lock, flags);
2764		isp1362_write_reg32(isp1362_hcd, HCRHSTATUS, RH_HS_LPSC);
2765		spin_unlock_irqrestore(&isp1362_hcd->lock, flags);
2766		return 0;
2767	}
2768
2769	pdev->dev.power.power_state = PMSG_ON;
2770
2771	return isp1362_bus_resume(isp1362_hcd_to_hcd(isp1362_hcd));
2772}
2773#else
2774#define	isp1362_suspend	NULL
2775#define	isp1362_resume	NULL
2776#endif
2777
2778static struct platform_driver isp1362_driver = {
2779	.probe = isp1362_probe,
2780	.remove = isp1362_remove,
2781
2782	.suspend = isp1362_suspend,
2783	.resume = isp1362_resume,
2784	.driver = {
2785		.name = hcd_name,
2786	},
2787};
2788
2789module_platform_driver(isp1362_driver);