Linux Audio

Check our new training course

Loading...
v3.1
 
   1/*
   2 * ISP1362 HCD (Host Controller Driver) for USB.
   3 *
   4 * Copyright (C) 2005 Lothar Wassmann <LW@KARO-electronics.de>
   5 *
   6 * Derived from the SL811 HCD, rewritten for ISP116x.
   7 * Copyright (C) 2005 Olav Kongas <ok@artecdesign.ee>
   8 *
   9 * Portions:
  10 * Copyright (C) 2004 Psion Teklogix (for NetBook PRO)
  11 * Copyright (C) 2004 David Brownell
  12 */
  13
  14/*
  15 * The ISP1362 chip requires a large delay (300ns and 462ns) between
  16 * accesses to the address and data register.
  17 * The following timing options exist:
  18 *
  19 * 1. Configure your memory controller to add such delays if it can (the best)
  20 * 2. Implement platform-specific delay function possibly
  21 *    combined with configuring the memory controller; see
  22 *    include/linux/usb_isp1362.h for more info.
  23 * 3. Use ndelay (easiest, poorest).
  24 *
  25 * Use the corresponding macros USE_PLATFORM_DELAY and USE_NDELAY in the
  26 * platform specific section of isp1362.h to select the appropriate variant.
  27 *
  28 * Also note that according to the Philips "ISP1362 Errata" document
  29 * Rev 1.00 from 27 May data corruption may occur when the #WR signal
  30 * is reasserted (even with #CS deasserted) within 132ns after a
  31 * write cycle to any controller register. If the hardware doesn't
  32 * implement the recommended fix (gating the #WR with #CS) software
  33 * must ensure that no further write cycle (not necessarily to the chip!)
  34 * is issued by the CPU within this interval.
  35
  36 * For PXA25x this can be ensured by using VLIO with the maximum
  37 * recovery time (MSCx = 0x7f8c) with a memory clock of 99.53 MHz.
  38 */
  39
  40#ifdef CONFIG_USB_DEBUG
  41# define ISP1362_DEBUG
  42#else
  43# undef ISP1362_DEBUG
  44#endif
  45
  46/*
  47 * The PXA255 UDC apparently doesn't handle GET_STATUS, GET_CONFIG and
  48 * GET_INTERFACE requests correctly when the SETUP and DATA stages of the
  49 * requests are carried out in separate frames. This will delay any SETUP
  50 * packets until the start of the next frame so that this situation is
  51 * unlikely to occur (and makes usbtest happy running with a PXA255 target
  52 * device).
  53 */
  54#undef BUGGY_PXA2XX_UDC_USBTEST
  55
  56#undef PTD_TRACE
  57#undef URB_TRACE
  58#undef VERBOSE
  59#undef REGISTERS
  60
  61/* This enables a memory test on the ISP1362 chip memory to make sure the
  62 * chip access timing is correct.
  63 */
  64#undef CHIP_BUFFER_TEST
  65
  66#include <linux/module.h>
  67#include <linux/moduleparam.h>
  68#include <linux/kernel.h>
  69#include <linux/delay.h>
  70#include <linux/ioport.h>
  71#include <linux/sched.h>
  72#include <linux/slab.h>
  73#include <linux/errno.h>
  74#include <linux/init.h>
  75#include <linux/list.h>
  76#include <linux/interrupt.h>
  77#include <linux/usb.h>
  78#include <linux/usb/isp1362.h>
  79#include <linux/usb/hcd.h>
  80#include <linux/platform_device.h>
  81#include <linux/pm.h>
  82#include <linux/io.h>
  83#include <linux/bitmap.h>
  84#include <linux/prefetch.h>
 
 
  85
  86#include <asm/irq.h>
  87#include <asm/system.h>
  88#include <asm/byteorder.h>
  89#include <asm/unaligned.h>
  90
  91static int dbg_level;
  92#ifdef ISP1362_DEBUG
  93module_param(dbg_level, int, 0644);
  94#else
  95module_param(dbg_level, int, 0);
  96#define	STUB_DEBUG_FILE
  97#endif
  98
  99#include "../core/usb.h"
 100#include "isp1362.h"
 101
 102
 103#define DRIVER_VERSION	"2005-04-04"
 104#define DRIVER_DESC	"ISP1362 USB Host Controller Driver"
 105
 106MODULE_DESCRIPTION(DRIVER_DESC);
 107MODULE_LICENSE("GPL");
 108
 109static const char hcd_name[] = "isp1362-hcd";
 110
 111static void isp1362_hc_stop(struct usb_hcd *hcd);
 112static int isp1362_hc_start(struct usb_hcd *hcd);
 113
 114/*-------------------------------------------------------------------------*/
 115
 116/*
 117 * When called from the interrupthandler only isp1362_hcd->irqenb is modified,
 118 * since the interrupt handler will write isp1362_hcd->irqenb to HCuPINT upon
 119 * completion.
 120 * We don't need a 'disable' counterpart, since interrupts will be disabled
 121 * only by the interrupt handler.
 122 */
 123static inline void isp1362_enable_int(struct isp1362_hcd *isp1362_hcd, u16 mask)
 124{
 125	if ((isp1362_hcd->irqenb | mask) == isp1362_hcd->irqenb)
 126		return;
 127	if (mask & ~isp1362_hcd->irqenb)
 128		isp1362_write_reg16(isp1362_hcd, HCuPINT, mask & ~isp1362_hcd->irqenb);
 129	isp1362_hcd->irqenb |= mask;
 130	if (isp1362_hcd->irq_active)
 131		return;
 132	isp1362_write_reg16(isp1362_hcd, HCuPINTENB, isp1362_hcd->irqenb);
 133}
 134
 135/*-------------------------------------------------------------------------*/
 136
 137static inline struct isp1362_ep_queue *get_ptd_queue(struct isp1362_hcd *isp1362_hcd,
 138						     u16 offset)
 139{
 140	struct isp1362_ep_queue *epq = NULL;
 141
 142	if (offset < isp1362_hcd->istl_queue[1].buf_start)
 143		epq = &isp1362_hcd->istl_queue[0];
 144	else if (offset < isp1362_hcd->intl_queue.buf_start)
 145		epq = &isp1362_hcd->istl_queue[1];
 146	else if (offset < isp1362_hcd->atl_queue.buf_start)
 147		epq = &isp1362_hcd->intl_queue;
 148	else if (offset < isp1362_hcd->atl_queue.buf_start +
 149		   isp1362_hcd->atl_queue.buf_size)
 150		epq = &isp1362_hcd->atl_queue;
 151
 152	if (epq)
 153		DBG(1, "%s: PTD $%04x is on %s queue\n", __func__, offset, epq->name);
 154	else
 155		pr_warning("%s: invalid PTD $%04x\n", __func__, offset);
 156
 157	return epq;
 158}
 159
 160static inline int get_ptd_offset(struct isp1362_ep_queue *epq, u8 index)
 161{
 162	int offset;
 163
 164	if (index * epq->blk_size > epq->buf_size) {
 165		pr_warning("%s: Bad %s index %d(%d)\n", __func__, epq->name, index,
 166		     epq->buf_size / epq->blk_size);
 
 167		return -EINVAL;
 168	}
 169	offset = epq->buf_start + index * epq->blk_size;
 170	DBG(3, "%s: %s PTD[%02x] # %04x\n", __func__, epq->name, index, offset);
 171
 172	return offset;
 173}
 174
 175/*-------------------------------------------------------------------------*/
 176
 177static inline u16 max_transfer_size(struct isp1362_ep_queue *epq, size_t size,
 178				    int mps)
 179{
 180	u16 xfer_size = min_t(size_t, MAX_XFER_SIZE, size);
 181
 182	xfer_size = min_t(size_t, xfer_size, epq->buf_avail * epq->blk_size - PTD_HEADER_SIZE);
 183	if (xfer_size < size && xfer_size % mps)
 184		xfer_size -= xfer_size % mps;
 185
 186	return xfer_size;
 187}
 188
 189static int claim_ptd_buffers(struct isp1362_ep_queue *epq,
 190			     struct isp1362_ep *ep, u16 len)
 191{
 192	int ptd_offset = -EINVAL;
 193	int num_ptds = ((len + PTD_HEADER_SIZE - 1) / epq->blk_size) + 1;
 194	int found;
 195
 196	BUG_ON(len > epq->buf_size);
 197
 198	if (!epq->buf_avail)
 199		return -ENOMEM;
 200
 201	if (ep->num_ptds)
 202		pr_err("%s: %s len %d/%d num_ptds %d buf_map %08lx skip_map %08lx\n", __func__,
 203		    epq->name, len, epq->blk_size, num_ptds, epq->buf_map, epq->skip_map);
 204	BUG_ON(ep->num_ptds != 0);
 205
 206	found = bitmap_find_next_zero_area(&epq->buf_map, epq->buf_count, 0,
 207						num_ptds, 0);
 208	if (found >= epq->buf_count)
 209		return -EOVERFLOW;
 210
 211	DBG(1, "%s: Found %d PTDs[%d] for %d/%d byte\n", __func__,
 212	    num_ptds, found, len, (int)(epq->blk_size - PTD_HEADER_SIZE));
 213	ptd_offset = get_ptd_offset(epq, found);
 214	WARN_ON(ptd_offset < 0);
 215	ep->ptd_offset = ptd_offset;
 216	ep->num_ptds += num_ptds;
 217	epq->buf_avail -= num_ptds;
 218	BUG_ON(epq->buf_avail > epq->buf_count);
 219	ep->ptd_index = found;
 220	bitmap_set(&epq->buf_map, found, num_ptds);
 221	DBG(1, "%s: Done %s PTD[%d] $%04x, avail %d count %d claimed %d %08lx:%08lx\n",
 222	    __func__, epq->name, ep->ptd_index, ep->ptd_offset,
 223	    epq->buf_avail, epq->buf_count, num_ptds, epq->buf_map, epq->skip_map);
 224
 225	return found;
 226}
 227
 228static inline void release_ptd_buffers(struct isp1362_ep_queue *epq, struct isp1362_ep *ep)
 229{
 230	int last = ep->ptd_index + ep->num_ptds;
 231
 232	if (last > epq->buf_count)
 233		pr_err("%s: ep %p req %d len %d %s PTD[%d] $%04x num_ptds %d buf_count %d buf_avail %d buf_map %08lx skip_map %08lx\n",
 234		    __func__, ep, ep->num_req, ep->length, epq->name, ep->ptd_index,
 235		    ep->ptd_offset, ep->num_ptds, epq->buf_count, epq->buf_avail,
 236		    epq->buf_map, epq->skip_map);
 237	BUG_ON(last > epq->buf_count);
 238
 239	bitmap_clear(&epq->buf_map, ep->ptd_index, ep->num_ptds);
 240	bitmap_set(&epq->skip_map, ep->ptd_index, ep->num_ptds);
 241	epq->buf_avail += ep->num_ptds;
 242	epq->ptd_count--;
 243
 244	BUG_ON(epq->buf_avail > epq->buf_count);
 245	BUG_ON(epq->ptd_count > epq->buf_count);
 246
 247	DBG(1, "%s: Done %s PTDs $%04x released %d avail %d count %d\n",
 248	    __func__, epq->name,
 249	    ep->ptd_offset, ep->num_ptds, epq->buf_avail, epq->buf_count);
 250	DBG(1, "%s: buf_map %08lx skip_map %08lx\n", __func__,
 251	    epq->buf_map, epq->skip_map);
 252
 253	ep->num_ptds = 0;
 254	ep->ptd_offset = -EINVAL;
 255	ep->ptd_index = -EINVAL;
 256}
 257
 258/*-------------------------------------------------------------------------*/
 259
 260/*
 261  Set up PTD's.
 262*/
 263static void prepare_ptd(struct isp1362_hcd *isp1362_hcd, struct urb *urb,
 264			struct isp1362_ep *ep, struct isp1362_ep_queue *epq,
 265			u16 fno)
 266{
 267	struct ptd *ptd;
 268	int toggle;
 269	int dir;
 270	u16 len;
 271	size_t buf_len = urb->transfer_buffer_length - urb->actual_length;
 272
 273	DBG(3, "%s: %s ep %p\n", __func__, epq->name, ep);
 274
 275	ptd = &ep->ptd;
 276
 277	ep->data = (unsigned char *)urb->transfer_buffer + urb->actual_length;
 278
 279	switch (ep->nextpid) {
 280	case USB_PID_IN:
 281		toggle = usb_gettoggle(urb->dev, ep->epnum, 0);
 282		dir = PTD_DIR_IN;
 283		if (usb_pipecontrol(urb->pipe)) {
 284			len = min_t(size_t, ep->maxpacket, buf_len);
 285		} else if (usb_pipeisoc(urb->pipe)) {
 286			len = min_t(size_t, urb->iso_frame_desc[fno].length, MAX_XFER_SIZE);
 287			ep->data = urb->transfer_buffer + urb->iso_frame_desc[fno].offset;
 288		} else
 289			len = max_transfer_size(epq, buf_len, ep->maxpacket);
 290		DBG(1, "%s: IN    len %d/%d/%d from URB\n", __func__, len, ep->maxpacket,
 291		    (int)buf_len);
 292		break;
 293	case USB_PID_OUT:
 294		toggle = usb_gettoggle(urb->dev, ep->epnum, 1);
 295		dir = PTD_DIR_OUT;
 296		if (usb_pipecontrol(urb->pipe))
 297			len = min_t(size_t, ep->maxpacket, buf_len);
 298		else if (usb_pipeisoc(urb->pipe))
 299			len = min_t(size_t, urb->iso_frame_desc[0].length, MAX_XFER_SIZE);
 300		else
 301			len = max_transfer_size(epq, buf_len, ep->maxpacket);
 302		if (len == 0)
 303			pr_info("%s: Sending ZERO packet: %d\n", __func__,
 304			     urb->transfer_flags & URB_ZERO_PACKET);
 305		DBG(1, "%s: OUT   len %d/%d/%d from URB\n", __func__, len, ep->maxpacket,
 306		    (int)buf_len);
 307		break;
 308	case USB_PID_SETUP:
 309		toggle = 0;
 310		dir = PTD_DIR_SETUP;
 311		len = sizeof(struct usb_ctrlrequest);
 312		DBG(1, "%s: SETUP len %d\n", __func__, len);
 313		ep->data = urb->setup_packet;
 314		break;
 315	case USB_PID_ACK:
 316		toggle = 1;
 317		len = 0;
 318		dir = (urb->transfer_buffer_length && usb_pipein(urb->pipe)) ?
 319			PTD_DIR_OUT : PTD_DIR_IN;
 320		DBG(1, "%s: ACK   len %d\n", __func__, len);
 321		break;
 322	default:
 323		toggle = dir = len = 0;
 324		pr_err("%s@%d: ep->nextpid %02x\n", __func__, __LINE__, ep->nextpid);
 325		BUG_ON(1);
 326	}
 327
 328	ep->length = len;
 329	if (!len)
 330		ep->data = NULL;
 331
 332	ptd->count = PTD_CC_MSK | PTD_ACTIVE_MSK | PTD_TOGGLE(toggle);
 333	ptd->mps = PTD_MPS(ep->maxpacket) | PTD_SPD(urb->dev->speed == USB_SPEED_LOW) |
 334		PTD_EP(ep->epnum);
 335	ptd->len = PTD_LEN(len) | PTD_DIR(dir);
 336	ptd->faddr = PTD_FA(usb_pipedevice(urb->pipe));
 337
 338	if (usb_pipeint(urb->pipe)) {
 339		ptd->faddr |= PTD_SF_INT(ep->branch);
 340		ptd->faddr |= PTD_PR(ep->interval ? __ffs(ep->interval) : 0);
 341	}
 342	if (usb_pipeisoc(urb->pipe))
 343		ptd->faddr |= PTD_SF_ISO(fno);
 344
 345	DBG(1, "%s: Finished\n", __func__);
 346}
 347
 348static void isp1362_write_ptd(struct isp1362_hcd *isp1362_hcd, struct isp1362_ep *ep,
 349			      struct isp1362_ep_queue *epq)
 350{
 351	struct ptd *ptd = &ep->ptd;
 352	int len = PTD_GET_DIR(ptd) == PTD_DIR_IN ? 0 : ep->length;
 353
 354	_BUG_ON(ep->ptd_offset < 0);
 355
 356	prefetch(ptd);
 357	isp1362_write_buffer(isp1362_hcd, ptd, ep->ptd_offset, PTD_HEADER_SIZE);
 358	if (len)
 359		isp1362_write_buffer(isp1362_hcd, ep->data,
 360				     ep->ptd_offset + PTD_HEADER_SIZE, len);
 361
 362	dump_ptd(ptd);
 363	dump_ptd_out_data(ptd, ep->data);
 364}
 365
 366static void isp1362_read_ptd(struct isp1362_hcd *isp1362_hcd, struct isp1362_ep *ep,
 367			     struct isp1362_ep_queue *epq)
 368{
 369	struct ptd *ptd = &ep->ptd;
 370	int act_len;
 371
 372	WARN_ON(list_empty(&ep->active));
 373	BUG_ON(ep->ptd_offset < 0);
 374
 375	list_del_init(&ep->active);
 376	DBG(1, "%s: ep %p removed from active list %p\n", __func__, ep, &epq->active);
 377
 378	prefetchw(ptd);
 379	isp1362_read_buffer(isp1362_hcd, ptd, ep->ptd_offset, PTD_HEADER_SIZE);
 380	dump_ptd(ptd);
 381	act_len = PTD_GET_COUNT(ptd);
 382	if (PTD_GET_DIR(ptd) != PTD_DIR_IN || act_len == 0)
 383		return;
 384	if (act_len > ep->length)
 385		pr_err("%s: ep %p PTD $%04x act_len %d ep->length %d\n", __func__, ep,
 386			 ep->ptd_offset, act_len, ep->length);
 387	BUG_ON(act_len > ep->length);
 388	/* Only transfer the amount of data that has actually been overwritten
 389	 * in the chip buffer. We don't want any data that doesn't belong to the
 390	 * transfer to leak out of the chip to the callers transfer buffer!
 391	 */
 392	prefetchw(ep->data);
 393	isp1362_read_buffer(isp1362_hcd, ep->data,
 394			    ep->ptd_offset + PTD_HEADER_SIZE, act_len);
 395	dump_ptd_in_data(ptd, ep->data);
 396}
 397
 398/*
 399 * INT PTDs will stay in the chip until data is available.
 400 * This function will remove a PTD from the chip when the URB is dequeued.
 401 * Must be called with the spinlock held and IRQs disabled
 402 */
 403static void remove_ptd(struct isp1362_hcd *isp1362_hcd, struct isp1362_ep *ep)
 404
 405{
 406	int index;
 407	struct isp1362_ep_queue *epq;
 408
 409	DBG(1, "%s: ep %p PTD[%d] $%04x\n", __func__, ep, ep->ptd_index, ep->ptd_offset);
 410	BUG_ON(ep->ptd_offset < 0);
 411
 412	epq = get_ptd_queue(isp1362_hcd, ep->ptd_offset);
 413	BUG_ON(!epq);
 414
 415	/* put ep in remove_list for cleanup */
 416	WARN_ON(!list_empty(&ep->remove_list));
 417	list_add_tail(&ep->remove_list, &isp1362_hcd->remove_list);
 418	/* let SOF interrupt handle the cleanup */
 419	isp1362_enable_int(isp1362_hcd, HCuPINT_SOF);
 420
 421	index = ep->ptd_index;
 422	if (index < 0)
 423		/* ISO queues don't have SKIP registers */
 424		return;
 425
 426	DBG(1, "%s: Disabling PTD[%02x] $%04x %08lx|%08x\n", __func__,
 427	    index, ep->ptd_offset, epq->skip_map, 1 << index);
 428
 429	/* prevent further processing of PTD (will be effective after next SOF) */
 430	epq->skip_map |= 1 << index;
 431	if (epq == &isp1362_hcd->atl_queue) {
 432		DBG(2, "%s: ATLSKIP = %08x -> %08lx\n", __func__,
 433		    isp1362_read_reg32(isp1362_hcd, HCATLSKIP), epq->skip_map);
 434		isp1362_write_reg32(isp1362_hcd, HCATLSKIP, epq->skip_map);
 435		if (~epq->skip_map == 0)
 436			isp1362_clr_mask16(isp1362_hcd, HCBUFSTAT, HCBUFSTAT_ATL_ACTIVE);
 437	} else if (epq == &isp1362_hcd->intl_queue) {
 438		DBG(2, "%s: INTLSKIP = %08x -> %08lx\n", __func__,
 439		    isp1362_read_reg32(isp1362_hcd, HCINTLSKIP), epq->skip_map);
 440		isp1362_write_reg32(isp1362_hcd, HCINTLSKIP, epq->skip_map);
 441		if (~epq->skip_map == 0)
 442			isp1362_clr_mask16(isp1362_hcd, HCBUFSTAT, HCBUFSTAT_INTL_ACTIVE);
 443	}
 444}
 445
 446/*
 447  Take done or failed requests out of schedule. Give back
 448  processed urbs.
 449*/
 450static void finish_request(struct isp1362_hcd *isp1362_hcd, struct isp1362_ep *ep,
 451			   struct urb *urb, int status)
 452     __releases(isp1362_hcd->lock)
 453     __acquires(isp1362_hcd->lock)
 454{
 455	urb->hcpriv = NULL;
 456	ep->error_count = 0;
 457
 458	if (usb_pipecontrol(urb->pipe))
 459		ep->nextpid = USB_PID_SETUP;
 460
 461	URB_DBG("%s: req %d FA %d ep%d%s %s: len %d/%d %s stat %d\n", __func__,
 462		ep->num_req, usb_pipedevice(urb->pipe),
 463		usb_pipeendpoint(urb->pipe),
 464		!usb_pipein(urb->pipe) ? "out" : "in",
 465		usb_pipecontrol(urb->pipe) ? "ctrl" :
 466			usb_pipeint(urb->pipe) ? "int" :
 467			usb_pipebulk(urb->pipe) ? "bulk" :
 468			"iso",
 469		urb->actual_length, urb->transfer_buffer_length,
 470		!(urb->transfer_flags & URB_SHORT_NOT_OK) ?
 471		"short_ok" : "", urb->status);
 472
 473
 474	usb_hcd_unlink_urb_from_ep(isp1362_hcd_to_hcd(isp1362_hcd), urb);
 475	spin_unlock(&isp1362_hcd->lock);
 476	usb_hcd_giveback_urb(isp1362_hcd_to_hcd(isp1362_hcd), urb, status);
 477	spin_lock(&isp1362_hcd->lock);
 478
 479	/* take idle endpoints out of the schedule right away */
 480	if (!list_empty(&ep->hep->urb_list))
 481		return;
 482
 483	/* async deschedule */
 484	if (!list_empty(&ep->schedule)) {
 485		list_del_init(&ep->schedule);
 486		return;
 487	}
 488
 489
 490	if (ep->interval) {
 491		/* periodic deschedule */
 492		DBG(1, "deschedule qh%d/%p branch %d load %d bandwidth %d -> %d\n", ep->interval,
 493		    ep, ep->branch, ep->load,
 494		    isp1362_hcd->load[ep->branch],
 495		    isp1362_hcd->load[ep->branch] - ep->load);
 496		isp1362_hcd->load[ep->branch] -= ep->load;
 497		ep->branch = PERIODIC_SIZE;
 498	}
 499}
 500
 501/*
 502 * Analyze transfer results, handle partial transfers and errors
 503*/
 504static void postproc_ep(struct isp1362_hcd *isp1362_hcd, struct isp1362_ep *ep)
 505{
 506	struct urb *urb = get_urb(ep);
 507	struct usb_device *udev;
 508	struct ptd *ptd;
 509	int short_ok;
 510	u16 len;
 511	int urbstat = -EINPROGRESS;
 512	u8 cc;
 513
 514	DBG(2, "%s: ep %p req %d\n", __func__, ep, ep->num_req);
 515
 516	udev = urb->dev;
 517	ptd = &ep->ptd;
 518	cc = PTD_GET_CC(ptd);
 519	if (cc == PTD_NOTACCESSED) {
 520		pr_err("%s: req %d PTD %p Untouched by ISP1362\n", __func__,
 521		    ep->num_req, ptd);
 522		cc = PTD_DEVNOTRESP;
 523	}
 524
 525	short_ok = !(urb->transfer_flags & URB_SHORT_NOT_OK);
 526	len = urb->transfer_buffer_length - urb->actual_length;
 527
 528	/* Data underrun is special. For allowed underrun
 529	   we clear the error and continue as normal. For
 530	   forbidden underrun we finish the DATA stage
 531	   immediately while for control transfer,
 532	   we do a STATUS stage.
 533	*/
 534	if (cc == PTD_DATAUNDERRUN) {
 535		if (short_ok) {
 536			DBG(1, "%s: req %d Allowed data underrun short_%sok %d/%d/%d byte\n",
 537			    __func__, ep->num_req, short_ok ? "" : "not_",
 538			    PTD_GET_COUNT(ptd), ep->maxpacket, len);
 539			cc = PTD_CC_NOERROR;
 540			urbstat = 0;
 541		} else {
 542			DBG(1, "%s: req %d Data Underrun %s nextpid %02x short_%sok %d/%d/%d byte\n",
 543			    __func__, ep->num_req,
 544			    usb_pipein(urb->pipe) ? "IN" : "OUT", ep->nextpid,
 545			    short_ok ? "" : "not_",
 546			    PTD_GET_COUNT(ptd), ep->maxpacket, len);
 
 
 
 
 547			if (usb_pipecontrol(urb->pipe)) {
 548				ep->nextpid = USB_PID_ACK;
 549				/* save the data underrun error code for later and
 550				 * proceed with the status stage
 551				 */
 552				urb->actual_length += PTD_GET_COUNT(ptd);
 553				BUG_ON(urb->actual_length > urb->transfer_buffer_length);
 554
 555				if (urb->status == -EINPROGRESS)
 556					urb->status = cc_to_error[PTD_DATAUNDERRUN];
 557			} else {
 558				usb_settoggle(udev, ep->epnum, ep->nextpid == USB_PID_OUT,
 559					      PTD_GET_TOGGLE(ptd));
 560				urbstat = cc_to_error[PTD_DATAUNDERRUN];
 561			}
 562			goto out;
 563		}
 564	}
 565
 566	if (cc != PTD_CC_NOERROR) {
 567		if (++ep->error_count >= 3 || cc == PTD_CC_STALL || cc == PTD_DATAOVERRUN) {
 568			urbstat = cc_to_error[cc];
 569			DBG(1, "%s: req %d nextpid %02x, status %d, error %d, error_count %d\n",
 570			    __func__, ep->num_req, ep->nextpid, urbstat, cc,
 571			    ep->error_count);
 572		}
 573		goto out;
 574	}
 575
 576	switch (ep->nextpid) {
 577	case USB_PID_OUT:
 578		if (PTD_GET_COUNT(ptd) != ep->length)
 579			pr_err("%s: count=%d len=%d\n", __func__,
 580			   PTD_GET_COUNT(ptd), ep->length);
 581		BUG_ON(PTD_GET_COUNT(ptd) != ep->length);
 582		urb->actual_length += ep->length;
 583		BUG_ON(urb->actual_length > urb->transfer_buffer_length);
 584		usb_settoggle(udev, ep->epnum, 1, PTD_GET_TOGGLE(ptd));
 585		if (urb->actual_length == urb->transfer_buffer_length) {
 586			DBG(3, "%s: req %d xfer complete %d/%d status %d -> 0\n", __func__,
 587			    ep->num_req, len, ep->maxpacket, urbstat);
 588			if (usb_pipecontrol(urb->pipe)) {
 589				DBG(3, "%s: req %d %s Wait for ACK\n", __func__,
 590				    ep->num_req,
 591				    usb_pipein(urb->pipe) ? "IN" : "OUT");
 592				ep->nextpid = USB_PID_ACK;
 593			} else {
 594				if (len % ep->maxpacket ||
 595				    !(urb->transfer_flags & URB_ZERO_PACKET)) {
 596					urbstat = 0;
 597					DBG(3, "%s: req %d URB %s status %d count %d/%d/%d\n",
 598					    __func__, ep->num_req, usb_pipein(urb->pipe) ? "IN" : "OUT",
 599					    urbstat, len, ep->maxpacket, urb->actual_length);
 600				}
 601			}
 602		}
 603		break;
 604	case USB_PID_IN:
 605		len = PTD_GET_COUNT(ptd);
 606		BUG_ON(len > ep->length);
 607		urb->actual_length += len;
 608		BUG_ON(urb->actual_length > urb->transfer_buffer_length);
 609		usb_settoggle(udev, ep->epnum, 0, PTD_GET_TOGGLE(ptd));
 610		/* if transfer completed or (allowed) data underrun */
 611		if ((urb->transfer_buffer_length == urb->actual_length) ||
 612		    len % ep->maxpacket) {
 613			DBG(3, "%s: req %d xfer complete %d/%d status %d -> 0\n", __func__,
 614			    ep->num_req, len, ep->maxpacket, urbstat);
 615			if (usb_pipecontrol(urb->pipe)) {
 616				DBG(3, "%s: req %d %s Wait for ACK\n", __func__,
 617				    ep->num_req,
 618				    usb_pipein(urb->pipe) ? "IN" : "OUT");
 619				ep->nextpid = USB_PID_ACK;
 620			} else {
 621				urbstat = 0;
 622				DBG(3, "%s: req %d URB %s status %d count %d/%d/%d\n",
 623				    __func__, ep->num_req, usb_pipein(urb->pipe) ? "IN" : "OUT",
 624				    urbstat, len, ep->maxpacket, urb->actual_length);
 625			}
 626		}
 627		break;
 628	case USB_PID_SETUP:
 629		if (urb->transfer_buffer_length == urb->actual_length) {
 630			ep->nextpid = USB_PID_ACK;
 631		} else if (usb_pipeout(urb->pipe)) {
 632			usb_settoggle(udev, 0, 1, 1);
 633			ep->nextpid = USB_PID_OUT;
 634		} else {
 635			usb_settoggle(udev, 0, 0, 1);
 636			ep->nextpid = USB_PID_IN;
 637		}
 638		break;
 639	case USB_PID_ACK:
 640		DBG(3, "%s: req %d got ACK %d -> 0\n", __func__, ep->num_req,
 641		    urbstat);
 642		WARN_ON(urbstat != -EINPROGRESS);
 643		urbstat = 0;
 644		ep->nextpid = 0;
 645		break;
 646	default:
 647		BUG_ON(1);
 648	}
 649
 650 out:
 651	if (urbstat != -EINPROGRESS) {
 652		DBG(2, "%s: Finishing ep %p req %d urb %p status %d\n", __func__,
 653		    ep, ep->num_req, urb, urbstat);
 654		finish_request(isp1362_hcd, ep, urb, urbstat);
 655	}
 656}
 657
 658static void finish_unlinks(struct isp1362_hcd *isp1362_hcd)
 659{
 660	struct isp1362_ep *ep;
 661	struct isp1362_ep *tmp;
 662
 663	list_for_each_entry_safe(ep, tmp, &isp1362_hcd->remove_list, remove_list) {
 664		struct isp1362_ep_queue *epq =
 665			get_ptd_queue(isp1362_hcd, ep->ptd_offset);
 666		int index = ep->ptd_index;
 667
 668		BUG_ON(epq == NULL);
 669		if (index >= 0) {
 670			DBG(1, "%s: remove PTD[%d] $%04x\n", __func__, index, ep->ptd_offset);
 671			BUG_ON(ep->num_ptds == 0);
 672			release_ptd_buffers(epq, ep);
 673		}
 674		if (!list_empty(&ep->hep->urb_list)) {
 675			struct urb *urb = get_urb(ep);
 676
 677			DBG(1, "%s: Finishing req %d ep %p from remove_list\n", __func__,
 678			    ep->num_req, ep);
 679			finish_request(isp1362_hcd, ep, urb, -ESHUTDOWN);
 680		}
 681		WARN_ON(list_empty(&ep->active));
 682		if (!list_empty(&ep->active)) {
 683			list_del_init(&ep->active);
 684			DBG(1, "%s: ep %p removed from active list\n", __func__, ep);
 685		}
 686		list_del_init(&ep->remove_list);
 687		DBG(1, "%s: ep %p removed from remove_list\n", __func__, ep);
 688	}
 689	DBG(1, "%s: Done\n", __func__);
 690}
 691
 692static inline void enable_atl_transfers(struct isp1362_hcd *isp1362_hcd, int count)
 693{
 694	if (count > 0) {
 695		if (count < isp1362_hcd->atl_queue.ptd_count)
 696			isp1362_write_reg16(isp1362_hcd, HCATLDTC, count);
 697		isp1362_enable_int(isp1362_hcd, HCuPINT_ATL);
 698		isp1362_write_reg32(isp1362_hcd, HCATLSKIP, isp1362_hcd->atl_queue.skip_map);
 699		isp1362_set_mask16(isp1362_hcd, HCBUFSTAT, HCBUFSTAT_ATL_ACTIVE);
 700	} else
 701		isp1362_enable_int(isp1362_hcd, HCuPINT_SOF);
 702}
 703
 704static inline void enable_intl_transfers(struct isp1362_hcd *isp1362_hcd)
 705{
 706	isp1362_enable_int(isp1362_hcd, HCuPINT_INTL);
 707	isp1362_set_mask16(isp1362_hcd, HCBUFSTAT, HCBUFSTAT_INTL_ACTIVE);
 708	isp1362_write_reg32(isp1362_hcd, HCINTLSKIP, isp1362_hcd->intl_queue.skip_map);
 709}
 710
 711static inline void enable_istl_transfers(struct isp1362_hcd *isp1362_hcd, int flip)
 712{
 713	isp1362_enable_int(isp1362_hcd, flip ? HCuPINT_ISTL1 : HCuPINT_ISTL0);
 714	isp1362_set_mask16(isp1362_hcd, HCBUFSTAT, flip ?
 715			   HCBUFSTAT_ISTL1_FULL : HCBUFSTAT_ISTL0_FULL);
 716}
 717
 718static int submit_req(struct isp1362_hcd *isp1362_hcd, struct urb *urb,
 719		      struct isp1362_ep *ep, struct isp1362_ep_queue *epq)
 720{
 721	int index = epq->free_ptd;
 722
 723	prepare_ptd(isp1362_hcd, urb, ep, epq, 0);
 724	index = claim_ptd_buffers(epq, ep, ep->length);
 725	if (index == -ENOMEM) {
 726		DBG(1, "%s: req %d No free %s PTD available: %d, %08lx:%08lx\n", __func__,
 727		    ep->num_req, epq->name, ep->num_ptds, epq->buf_map, epq->skip_map);
 728		return index;
 729	} else if (index == -EOVERFLOW) {
 730		DBG(1, "%s: req %d Not enough space for %d byte %s PTD %d %08lx:%08lx\n",
 731		    __func__, ep->num_req, ep->length, epq->name, ep->num_ptds,
 732		    epq->buf_map, epq->skip_map);
 733		return index;
 734	} else
 735		BUG_ON(index < 0);
 736	list_add_tail(&ep->active, &epq->active);
 737	DBG(1, "%s: ep %p req %d len %d added to active list %p\n", __func__,
 738	    ep, ep->num_req, ep->length, &epq->active);
 739	DBG(1, "%s: Submitting %s PTD $%04x for ep %p req %d\n", __func__, epq->name,
 740	    ep->ptd_offset, ep, ep->num_req);
 741	isp1362_write_ptd(isp1362_hcd, ep, epq);
 742	__clear_bit(ep->ptd_index, &epq->skip_map);
 743
 744	return 0;
 745}
 746
 747static void start_atl_transfers(struct isp1362_hcd *isp1362_hcd)
 748{
 749	int ptd_count = 0;
 750	struct isp1362_ep_queue *epq = &isp1362_hcd->atl_queue;
 751	struct isp1362_ep *ep;
 752	int defer = 0;
 753
 754	if (atomic_read(&epq->finishing)) {
 755		DBG(1, "%s: finish_transfers is active for %s\n", __func__, epq->name);
 756		return;
 757	}
 758
 759	list_for_each_entry(ep, &isp1362_hcd->async, schedule) {
 760		struct urb *urb = get_urb(ep);
 761		int ret;
 762
 763		if (!list_empty(&ep->active)) {
 764			DBG(2, "%s: Skipping active %s ep %p\n", __func__, epq->name, ep);
 765			continue;
 766		}
 767
 768		DBG(1, "%s: Processing %s ep %p req %d\n", __func__, epq->name,
 769		    ep, ep->num_req);
 770
 771		ret = submit_req(isp1362_hcd, urb, ep, epq);
 772		if (ret == -ENOMEM) {
 773			defer = 1;
 774			break;
 775		} else if (ret == -EOVERFLOW) {
 776			defer = 1;
 777			continue;
 778		}
 779#ifdef BUGGY_PXA2XX_UDC_USBTEST
 780		defer = ep->nextpid == USB_PID_SETUP;
 781#endif
 782		ptd_count++;
 783	}
 784
 785	/* Avoid starving of endpoints */
 786	if (isp1362_hcd->async.next != isp1362_hcd->async.prev) {
 787		DBG(2, "%s: Cycling ASYNC schedule %d\n", __func__, ptd_count);
 788		list_move(&isp1362_hcd->async, isp1362_hcd->async.next);
 789	}
 790	if (ptd_count || defer)
 791		enable_atl_transfers(isp1362_hcd, defer ? 0 : ptd_count);
 792
 793	epq->ptd_count += ptd_count;
 794	if (epq->ptd_count > epq->stat_maxptds) {
 795		epq->stat_maxptds = epq->ptd_count;
 796		DBG(0, "%s: max_ptds: %d\n", __func__, epq->stat_maxptds);
 797	}
 798}
 799
 800static void start_intl_transfers(struct isp1362_hcd *isp1362_hcd)
 801{
 802	int ptd_count = 0;
 803	struct isp1362_ep_queue *epq = &isp1362_hcd->intl_queue;
 804	struct isp1362_ep *ep;
 805
 806	if (atomic_read(&epq->finishing)) {
 807		DBG(1, "%s: finish_transfers is active for %s\n", __func__, epq->name);
 808		return;
 809	}
 810
 811	list_for_each_entry(ep, &isp1362_hcd->periodic, schedule) {
 812		struct urb *urb = get_urb(ep);
 813		int ret;
 814
 815		if (!list_empty(&ep->active)) {
 816			DBG(1, "%s: Skipping active %s ep %p\n", __func__,
 817			    epq->name, ep);
 818			continue;
 819		}
 820
 821		DBG(1, "%s: Processing %s ep %p req %d\n", __func__,
 822		    epq->name, ep, ep->num_req);
 823		ret = submit_req(isp1362_hcd, urb, ep, epq);
 824		if (ret == -ENOMEM)
 825			break;
 826		else if (ret == -EOVERFLOW)
 827			continue;
 828		ptd_count++;
 829	}
 830
 831	if (ptd_count) {
 832		static int last_count;
 833
 834		if (ptd_count != last_count) {
 835			DBG(0, "%s: ptd_count: %d\n", __func__, ptd_count);
 836			last_count = ptd_count;
 837		}
 838		enable_intl_transfers(isp1362_hcd);
 839	}
 840
 841	epq->ptd_count += ptd_count;
 842	if (epq->ptd_count > epq->stat_maxptds)
 843		epq->stat_maxptds = epq->ptd_count;
 844}
 845
 846static inline int next_ptd(struct isp1362_ep_queue *epq, struct isp1362_ep *ep)
 847{
 848	u16 ptd_offset = ep->ptd_offset;
 849	int num_ptds = (ep->length + PTD_HEADER_SIZE + (epq->blk_size - 1)) / epq->blk_size;
 850
 851	DBG(2, "%s: PTD offset $%04x + %04x => %d * %04x -> $%04x\n", __func__, ptd_offset,
 852	    ep->length, num_ptds, epq->blk_size, ptd_offset + num_ptds * epq->blk_size);
 853
 854	ptd_offset += num_ptds * epq->blk_size;
 855	if (ptd_offset < epq->buf_start + epq->buf_size)
 856		return ptd_offset;
 857	else
 858		return -ENOMEM;
 859}
 860
 861static void start_iso_transfers(struct isp1362_hcd *isp1362_hcd)
 862{
 863	int ptd_count = 0;
 864	int flip = isp1362_hcd->istl_flip;
 865	struct isp1362_ep_queue *epq;
 866	int ptd_offset;
 867	struct isp1362_ep *ep;
 868	struct isp1362_ep *tmp;
 869	u16 fno = isp1362_read_reg32(isp1362_hcd, HCFMNUM);
 870
 871 fill2:
 872	epq = &isp1362_hcd->istl_queue[flip];
 873	if (atomic_read(&epq->finishing)) {
 874		DBG(1, "%s: finish_transfers is active for %s\n", __func__, epq->name);
 875		return;
 876	}
 877
 878	if (!list_empty(&epq->active))
 879		return;
 880
 881	ptd_offset = epq->buf_start;
 882	list_for_each_entry_safe(ep, tmp, &isp1362_hcd->isoc, schedule) {
 883		struct urb *urb = get_urb(ep);
 884		s16 diff = fno - (u16)urb->start_frame;
 885
 886		DBG(1, "%s: Processing %s ep %p\n", __func__, epq->name, ep);
 887
 888		if (diff > urb->number_of_packets) {
 889			/* time frame for this URB has elapsed */
 890			finish_request(isp1362_hcd, ep, urb, -EOVERFLOW);
 891			continue;
 892		} else if (diff < -1) {
 893			/* URB is not due in this frame or the next one.
 894			 * Comparing with '-1' instead of '0' accounts for double
 895			 * buffering in the ISP1362 which enables us to queue the PTD
 896			 * one frame ahead of time
 897			 */
 898		} else if (diff == -1) {
 899			/* submit PTD's that are due in the next frame */
 900			prepare_ptd(isp1362_hcd, urb, ep, epq, fno);
 901			if (ptd_offset + PTD_HEADER_SIZE + ep->length >
 902			    epq->buf_start + epq->buf_size) {
 903				pr_err("%s: Not enough ISO buffer space for %d byte PTD\n",
 904				    __func__, ep->length);
 905				continue;
 906			}
 907			ep->ptd_offset = ptd_offset;
 908			list_add_tail(&ep->active, &epq->active);
 909
 910			ptd_offset = next_ptd(epq, ep);
 911			if (ptd_offset < 0) {
 912				pr_warning("%s: req %d No more %s PTD buffers available\n", __func__,
 913				     ep->num_req, epq->name);
 914				break;
 915			}
 916		}
 917	}
 918	list_for_each_entry(ep, &epq->active, active) {
 919		if (epq->active.next == &ep->active)
 920			ep->ptd.mps |= PTD_LAST_MSK;
 921		isp1362_write_ptd(isp1362_hcd, ep, epq);
 922		ptd_count++;
 923	}
 924
 925	if (ptd_count)
 926		enable_istl_transfers(isp1362_hcd, flip);
 927
 928	epq->ptd_count += ptd_count;
 929	if (epq->ptd_count > epq->stat_maxptds)
 930		epq->stat_maxptds = epq->ptd_count;
 931
 932	/* check, whether the second ISTL buffer may also be filled */
 933	if (!(isp1362_read_reg16(isp1362_hcd, HCBUFSTAT) &
 934	      (flip ? HCBUFSTAT_ISTL0_FULL : HCBUFSTAT_ISTL1_FULL))) {
 935		fno++;
 936		ptd_count = 0;
 937		flip = 1 - flip;
 938		goto fill2;
 939	}
 940}
 941
 942static void finish_transfers(struct isp1362_hcd *isp1362_hcd, unsigned long done_map,
 943			     struct isp1362_ep_queue *epq)
 944{
 945	struct isp1362_ep *ep;
 946	struct isp1362_ep *tmp;
 947
 948	if (list_empty(&epq->active)) {
 949		DBG(1, "%s: Nothing to do for %s queue\n", __func__, epq->name);
 950		return;
 951	}
 952
 953	DBG(1, "%s: Finishing %s transfers %08lx\n", __func__, epq->name, done_map);
 954
 955	atomic_inc(&epq->finishing);
 956	list_for_each_entry_safe(ep, tmp, &epq->active, active) {
 957		int index = ep->ptd_index;
 958
 959		DBG(1, "%s: Checking %s PTD[%02x] $%04x\n", __func__, epq->name,
 960		    index, ep->ptd_offset);
 961
 962		BUG_ON(index < 0);
 963		if (__test_and_clear_bit(index, &done_map)) {
 964			isp1362_read_ptd(isp1362_hcd, ep, epq);
 965			epq->free_ptd = index;
 966			BUG_ON(ep->num_ptds == 0);
 967			release_ptd_buffers(epq, ep);
 968
 969			DBG(1, "%s: ep %p req %d removed from active list\n", __func__,
 970			    ep, ep->num_req);
 971			if (!list_empty(&ep->remove_list)) {
 972				list_del_init(&ep->remove_list);
 973				DBG(1, "%s: ep %p removed from remove list\n", __func__, ep);
 974			}
 975			DBG(1, "%s: Postprocessing %s ep %p req %d\n", __func__, epq->name,
 976			    ep, ep->num_req);
 977			postproc_ep(isp1362_hcd, ep);
 978		}
 979		if (!done_map)
 980			break;
 981	}
 982	if (done_map)
 983		pr_warning("%s: done_map not clear: %08lx:%08lx\n", __func__, done_map,
 984		     epq->skip_map);
 985	atomic_dec(&epq->finishing);
 986}
 987
 988static void finish_iso_transfers(struct isp1362_hcd *isp1362_hcd, struct isp1362_ep_queue *epq)
 989{
 990	struct isp1362_ep *ep;
 991	struct isp1362_ep *tmp;
 992
 993	if (list_empty(&epq->active)) {
 994		DBG(1, "%s: Nothing to do for %s queue\n", __func__, epq->name);
 995		return;
 996	}
 997
 998	DBG(1, "%s: Finishing %s transfers\n", __func__, epq->name);
 999
1000	atomic_inc(&epq->finishing);
1001	list_for_each_entry_safe(ep, tmp, &epq->active, active) {
1002		DBG(1, "%s: Checking PTD $%04x\n", __func__, ep->ptd_offset);
1003
1004		isp1362_read_ptd(isp1362_hcd, ep, epq);
1005		DBG(1, "%s: Postprocessing %s ep %p\n", __func__, epq->name, ep);
1006		postproc_ep(isp1362_hcd, ep);
1007	}
1008	WARN_ON(epq->blk_size != 0);
1009	atomic_dec(&epq->finishing);
1010}
1011
1012static irqreturn_t isp1362_irq(struct usb_hcd *hcd)
1013{
1014	int handled = 0;
1015	struct isp1362_hcd *isp1362_hcd = hcd_to_isp1362_hcd(hcd);
1016	u16 irqstat;
1017	u16 svc_mask;
1018
1019	spin_lock(&isp1362_hcd->lock);
1020
1021	BUG_ON(isp1362_hcd->irq_active++);
1022
1023	isp1362_write_reg16(isp1362_hcd, HCuPINTENB, 0);
1024
1025	irqstat = isp1362_read_reg16(isp1362_hcd, HCuPINT);
1026	DBG(3, "%s: got IRQ %04x:%04x\n", __func__, irqstat, isp1362_hcd->irqenb);
1027
1028	/* only handle interrupts that are currently enabled */
1029	irqstat &= isp1362_hcd->irqenb;
1030	isp1362_write_reg16(isp1362_hcd, HCuPINT, irqstat);
1031	svc_mask = irqstat;
1032
1033	if (irqstat & HCuPINT_SOF) {
1034		isp1362_hcd->irqenb &= ~HCuPINT_SOF;
1035		isp1362_hcd->irq_stat[ISP1362_INT_SOF]++;
1036		handled = 1;
1037		svc_mask &= ~HCuPINT_SOF;
1038		DBG(3, "%s: SOF\n", __func__);
1039		isp1362_hcd->fmindex = isp1362_read_reg32(isp1362_hcd, HCFMNUM);
1040		if (!list_empty(&isp1362_hcd->remove_list))
1041			finish_unlinks(isp1362_hcd);
1042		if (!list_empty(&isp1362_hcd->async) && !(irqstat & HCuPINT_ATL)) {
1043			if (list_empty(&isp1362_hcd->atl_queue.active)) {
1044				start_atl_transfers(isp1362_hcd);
1045			} else {
1046				isp1362_enable_int(isp1362_hcd, HCuPINT_ATL);
1047				isp1362_write_reg32(isp1362_hcd, HCATLSKIP,
1048						    isp1362_hcd->atl_queue.skip_map);
1049				isp1362_set_mask16(isp1362_hcd, HCBUFSTAT, HCBUFSTAT_ATL_ACTIVE);
1050			}
1051		}
1052	}
1053
1054	if (irqstat & HCuPINT_ISTL0) {
1055		isp1362_hcd->irq_stat[ISP1362_INT_ISTL0]++;
1056		handled = 1;
1057		svc_mask &= ~HCuPINT_ISTL0;
1058		isp1362_clr_mask16(isp1362_hcd, HCBUFSTAT, HCBUFSTAT_ISTL0_FULL);
1059		DBG(1, "%s: ISTL0\n", __func__);
1060		WARN_ON((int)!!isp1362_hcd->istl_flip);
1061		WARN_ON(isp1362_read_reg16(isp1362_hcd, HCBUFSTAT) &
1062			HCBUFSTAT_ISTL0_ACTIVE);
1063		WARN_ON(!(isp1362_read_reg16(isp1362_hcd, HCBUFSTAT) &
1064			HCBUFSTAT_ISTL0_DONE));
1065		isp1362_hcd->irqenb &= ~HCuPINT_ISTL0;
1066	}
1067
1068	if (irqstat & HCuPINT_ISTL1) {
1069		isp1362_hcd->irq_stat[ISP1362_INT_ISTL1]++;
1070		handled = 1;
1071		svc_mask &= ~HCuPINT_ISTL1;
1072		isp1362_clr_mask16(isp1362_hcd, HCBUFSTAT, HCBUFSTAT_ISTL1_FULL);
1073		DBG(1, "%s: ISTL1\n", __func__);
1074		WARN_ON(!(int)isp1362_hcd->istl_flip);
1075		WARN_ON(isp1362_read_reg16(isp1362_hcd, HCBUFSTAT) &
1076			HCBUFSTAT_ISTL1_ACTIVE);
1077		WARN_ON(!(isp1362_read_reg16(isp1362_hcd, HCBUFSTAT) &
1078			HCBUFSTAT_ISTL1_DONE));
1079		isp1362_hcd->irqenb &= ~HCuPINT_ISTL1;
1080	}
1081
1082	if (irqstat & (HCuPINT_ISTL0 | HCuPINT_ISTL1)) {
1083		WARN_ON((irqstat & (HCuPINT_ISTL0 | HCuPINT_ISTL1)) ==
1084			(HCuPINT_ISTL0 | HCuPINT_ISTL1));
1085		finish_iso_transfers(isp1362_hcd,
1086				     &isp1362_hcd->istl_queue[isp1362_hcd->istl_flip]);
1087		start_iso_transfers(isp1362_hcd);
1088		isp1362_hcd->istl_flip = 1 - isp1362_hcd->istl_flip;
1089	}
1090
1091	if (irqstat & HCuPINT_INTL) {
1092		u32 done_map = isp1362_read_reg32(isp1362_hcd, HCINTLDONE);
1093		u32 skip_map = isp1362_read_reg32(isp1362_hcd, HCINTLSKIP);
1094		isp1362_hcd->irq_stat[ISP1362_INT_INTL]++;
1095
1096		DBG(2, "%s: INTL\n", __func__);
1097
1098		svc_mask &= ~HCuPINT_INTL;
1099
1100		isp1362_write_reg32(isp1362_hcd, HCINTLSKIP, skip_map | done_map);
1101		if (~(done_map | skip_map) == 0)
1102			/* All PTDs are finished, disable INTL processing entirely */
1103			isp1362_clr_mask16(isp1362_hcd, HCBUFSTAT, HCBUFSTAT_INTL_ACTIVE);
1104
1105		handled = 1;
1106		WARN_ON(!done_map);
1107		if (done_map) {
1108			DBG(3, "%s: INTL done_map %08x\n", __func__, done_map);
1109			finish_transfers(isp1362_hcd, done_map, &isp1362_hcd->intl_queue);
1110			start_intl_transfers(isp1362_hcd);
1111		}
1112	}
1113
1114	if (irqstat & HCuPINT_ATL) {
1115		u32 done_map = isp1362_read_reg32(isp1362_hcd, HCATLDONE);
1116		u32 skip_map = isp1362_read_reg32(isp1362_hcd, HCATLSKIP);
1117		isp1362_hcd->irq_stat[ISP1362_INT_ATL]++;
1118
1119		DBG(2, "%s: ATL\n", __func__);
1120
1121		svc_mask &= ~HCuPINT_ATL;
1122
1123		isp1362_write_reg32(isp1362_hcd, HCATLSKIP, skip_map | done_map);
1124		if (~(done_map | skip_map) == 0)
1125			isp1362_clr_mask16(isp1362_hcd, HCBUFSTAT, HCBUFSTAT_ATL_ACTIVE);
1126		if (done_map) {
1127			DBG(3, "%s: ATL done_map %08x\n", __func__, done_map);
1128			finish_transfers(isp1362_hcd, done_map, &isp1362_hcd->atl_queue);
1129			start_atl_transfers(isp1362_hcd);
1130		}
1131		handled = 1;
1132	}
1133
1134	if (irqstat & HCuPINT_OPR) {
1135		u32 intstat = isp1362_read_reg32(isp1362_hcd, HCINTSTAT);
1136		isp1362_hcd->irq_stat[ISP1362_INT_OPR]++;
1137
1138		svc_mask &= ~HCuPINT_OPR;
1139		DBG(2, "%s: OPR %08x:%08x\n", __func__, intstat, isp1362_hcd->intenb);
1140		intstat &= isp1362_hcd->intenb;
1141		if (intstat & OHCI_INTR_UE) {
1142			pr_err("Unrecoverable error\n");
1143			/* FIXME: do here reset or cleanup or whatever */
1144		}
1145		if (intstat & OHCI_INTR_RHSC) {
1146			isp1362_hcd->rhstatus = isp1362_read_reg32(isp1362_hcd, HCRHSTATUS);
1147			isp1362_hcd->rhport[0] = isp1362_read_reg32(isp1362_hcd, HCRHPORT1);
1148			isp1362_hcd->rhport[1] = isp1362_read_reg32(isp1362_hcd, HCRHPORT2);
1149		}
1150		if (intstat & OHCI_INTR_RD) {
1151			pr_info("%s: RESUME DETECTED\n", __func__);
1152			isp1362_show_reg(isp1362_hcd, HCCONTROL);
1153			usb_hcd_resume_root_hub(hcd);
1154		}
1155		isp1362_write_reg32(isp1362_hcd, HCINTSTAT, intstat);
1156		irqstat &= ~HCuPINT_OPR;
1157		handled = 1;
1158	}
1159
1160	if (irqstat & HCuPINT_SUSP) {
1161		isp1362_hcd->irq_stat[ISP1362_INT_SUSP]++;
1162		handled = 1;
1163		svc_mask &= ~HCuPINT_SUSP;
1164
1165		pr_info("%s: SUSPEND IRQ\n", __func__);
1166	}
1167
1168	if (irqstat & HCuPINT_CLKRDY) {
1169		isp1362_hcd->irq_stat[ISP1362_INT_CLKRDY]++;
1170		handled = 1;
1171		isp1362_hcd->irqenb &= ~HCuPINT_CLKRDY;
1172		svc_mask &= ~HCuPINT_CLKRDY;
1173		pr_info("%s: CLKRDY IRQ\n", __func__);
1174	}
1175
1176	if (svc_mask)
1177		pr_err("%s: Unserviced interrupt(s) %04x\n", __func__, svc_mask);
1178
1179	isp1362_write_reg16(isp1362_hcd, HCuPINTENB, isp1362_hcd->irqenb);
1180	isp1362_hcd->irq_active--;
1181	spin_unlock(&isp1362_hcd->lock);
1182
1183	return IRQ_RETVAL(handled);
1184}
1185
1186/*-------------------------------------------------------------------------*/
1187
1188#define	MAX_PERIODIC_LOAD	900	/* out of 1000 usec */
1189static int balance(struct isp1362_hcd *isp1362_hcd, u16 interval, u16 load)
1190{
1191	int i, branch = -ENOSPC;
1192
1193	/* search for the least loaded schedule branch of that interval
1194	 * which has enough bandwidth left unreserved.
1195	 */
1196	for (i = 0; i < interval; i++) {
1197		if (branch < 0 || isp1362_hcd->load[branch] > isp1362_hcd->load[i]) {
1198			int j;
1199
1200			for (j = i; j < PERIODIC_SIZE; j += interval) {
1201				if ((isp1362_hcd->load[j] + load) > MAX_PERIODIC_LOAD) {
1202					pr_err("%s: new load %d load[%02x] %d max %d\n", __func__,
1203					    load, j, isp1362_hcd->load[j], MAX_PERIODIC_LOAD);
1204					break;
1205				}
1206			}
1207			if (j < PERIODIC_SIZE)
1208				continue;
1209			branch = i;
1210		}
1211	}
1212	return branch;
1213}
1214
1215/* NB! ALL the code above this point runs with isp1362_hcd->lock
1216   held, irqs off
1217*/
1218
1219/*-------------------------------------------------------------------------*/
1220
1221static int isp1362_urb_enqueue(struct usb_hcd *hcd,
1222			       struct urb *urb,
1223			       gfp_t mem_flags)
1224{
1225	struct isp1362_hcd *isp1362_hcd = hcd_to_isp1362_hcd(hcd);
1226	struct usb_device *udev = urb->dev;
1227	unsigned int pipe = urb->pipe;
1228	int is_out = !usb_pipein(pipe);
1229	int type = usb_pipetype(pipe);
1230	int epnum = usb_pipeendpoint(pipe);
1231	struct usb_host_endpoint *hep = urb->ep;
1232	struct isp1362_ep *ep = NULL;
1233	unsigned long flags;
1234	int retval = 0;
1235
1236	DBG(3, "%s: urb %p\n", __func__, urb);
1237
1238	if (type == PIPE_ISOCHRONOUS) {
1239		pr_err("Isochronous transfers not supported\n");
1240		return -ENOSPC;
1241	}
1242
1243	URB_DBG("%s: FA %d ep%d%s %s: len %d %s%s\n", __func__,
1244		usb_pipedevice(pipe), epnum,
1245		is_out ? "out" : "in",
1246		usb_pipecontrol(pipe) ? "ctrl" :
1247			usb_pipeint(pipe) ? "int" :
1248			usb_pipebulk(pipe) ? "bulk" :
1249			"iso",
1250		urb->transfer_buffer_length,
1251		(urb->transfer_flags & URB_ZERO_PACKET) ? "ZERO_PACKET " : "",
1252		!(urb->transfer_flags & URB_SHORT_NOT_OK) ?
1253		"short_ok" : "");
1254
1255	/* avoid all allocations within spinlocks: request or endpoint */
1256	if (!hep->hcpriv) {
1257		ep = kzalloc(sizeof *ep, mem_flags);
1258		if (!ep)
1259			return -ENOMEM;
1260	}
1261	spin_lock_irqsave(&isp1362_hcd->lock, flags);
1262
1263	/* don't submit to a dead or disabled port */
1264	if (!((isp1362_hcd->rhport[0] | isp1362_hcd->rhport[1]) &
1265	      USB_PORT_STAT_ENABLE) ||
1266	    !HC_IS_RUNNING(hcd->state)) {
1267		kfree(ep);
1268		retval = -ENODEV;
1269		goto fail_not_linked;
1270	}
1271
1272	retval = usb_hcd_link_urb_to_ep(hcd, urb);
1273	if (retval) {
1274		kfree(ep);
1275		goto fail_not_linked;
1276	}
1277
1278	if (hep->hcpriv) {
1279		ep = hep->hcpriv;
1280	} else {
1281		INIT_LIST_HEAD(&ep->schedule);
1282		INIT_LIST_HEAD(&ep->active);
1283		INIT_LIST_HEAD(&ep->remove_list);
1284		ep->udev = usb_get_dev(udev);
1285		ep->hep = hep;
1286		ep->epnum = epnum;
1287		ep->maxpacket = usb_maxpacket(udev, urb->pipe, is_out);
1288		ep->ptd_offset = -EINVAL;
1289		ep->ptd_index = -EINVAL;
1290		usb_settoggle(udev, epnum, is_out, 0);
1291
1292		if (type == PIPE_CONTROL)
1293			ep->nextpid = USB_PID_SETUP;
1294		else if (is_out)
1295			ep->nextpid = USB_PID_OUT;
1296		else
1297			ep->nextpid = USB_PID_IN;
1298
1299		switch (type) {
1300		case PIPE_ISOCHRONOUS:
1301		case PIPE_INTERRUPT:
1302			if (urb->interval > PERIODIC_SIZE)
1303				urb->interval = PERIODIC_SIZE;
1304			ep->interval = urb->interval;
1305			ep->branch = PERIODIC_SIZE;
1306			ep->load = usb_calc_bus_time(udev->speed, !is_out,
1307						     (type == PIPE_ISOCHRONOUS),
1308						     usb_maxpacket(udev, pipe, is_out)) / 1000;
1309			break;
1310		}
1311		hep->hcpriv = ep;
1312	}
1313	ep->num_req = isp1362_hcd->req_serial++;
1314
1315	/* maybe put endpoint into schedule */
1316	switch (type) {
1317	case PIPE_CONTROL:
1318	case PIPE_BULK:
1319		if (list_empty(&ep->schedule)) {
1320			DBG(1, "%s: Adding ep %p req %d to async schedule\n",
1321				__func__, ep, ep->num_req);
1322			list_add_tail(&ep->schedule, &isp1362_hcd->async);
1323		}
1324		break;
1325	case PIPE_ISOCHRONOUS:
1326	case PIPE_INTERRUPT:
1327		urb->interval = ep->interval;
1328
1329		/* urb submitted for already existing EP */
1330		if (ep->branch < PERIODIC_SIZE)
1331			break;
1332
1333		retval = balance(isp1362_hcd, ep->interval, ep->load);
1334		if (retval < 0) {
1335			pr_err("%s: balance returned %d\n", __func__, retval);
1336			goto fail;
1337		}
1338		ep->branch = retval;
1339		retval = 0;
1340		isp1362_hcd->fmindex = isp1362_read_reg32(isp1362_hcd, HCFMNUM);
1341		DBG(1, "%s: Current frame %04x branch %02x start_frame %04x(%04x)\n",
1342		    __func__, isp1362_hcd->fmindex, ep->branch,
1343		    ((isp1362_hcd->fmindex + PERIODIC_SIZE - 1) &
1344		     ~(PERIODIC_SIZE - 1)) + ep->branch,
1345		    (isp1362_hcd->fmindex & (PERIODIC_SIZE - 1)) + ep->branch);
1346
1347		if (list_empty(&ep->schedule)) {
1348			if (type == PIPE_ISOCHRONOUS) {
1349				u16 frame = isp1362_hcd->fmindex;
1350
1351				frame += max_t(u16, 8, ep->interval);
1352				frame &= ~(ep->interval - 1);
1353				frame |= ep->branch;
1354				if (frame_before(frame, isp1362_hcd->fmindex))
1355					frame += ep->interval;
1356				urb->start_frame = frame;
1357
1358				DBG(1, "%s: Adding ep %p to isoc schedule\n", __func__, ep);
1359				list_add_tail(&ep->schedule, &isp1362_hcd->isoc);
1360			} else {
1361				DBG(1, "%s: Adding ep %p to periodic schedule\n", __func__, ep);
1362				list_add_tail(&ep->schedule, &isp1362_hcd->periodic);
1363			}
1364		} else
1365			DBG(1, "%s: ep %p already scheduled\n", __func__, ep);
1366
1367		DBG(2, "%s: load %d bandwidth %d -> %d\n", __func__,
1368		    ep->load / ep->interval, isp1362_hcd->load[ep->branch],
1369		    isp1362_hcd->load[ep->branch] + ep->load);
1370		isp1362_hcd->load[ep->branch] += ep->load;
1371	}
1372
1373	urb->hcpriv = hep;
1374	ALIGNSTAT(isp1362_hcd, urb->transfer_buffer);
1375
1376	switch (type) {
1377	case PIPE_CONTROL:
1378	case PIPE_BULK:
1379		start_atl_transfers(isp1362_hcd);
1380		break;
1381	case PIPE_INTERRUPT:
1382		start_intl_transfers(isp1362_hcd);
1383		break;
1384	case PIPE_ISOCHRONOUS:
1385		start_iso_transfers(isp1362_hcd);
1386		break;
1387	default:
1388		BUG();
1389	}
1390 fail:
1391	if (retval)
1392		usb_hcd_unlink_urb_from_ep(hcd, urb);
1393
1394
1395 fail_not_linked:
1396	spin_unlock_irqrestore(&isp1362_hcd->lock, flags);
1397	if (retval)
1398		DBG(0, "%s: urb %p failed with %d\n", __func__, urb, retval);
1399	return retval;
1400}
1401
1402static int isp1362_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status)
1403{
1404	struct isp1362_hcd *isp1362_hcd = hcd_to_isp1362_hcd(hcd);
1405	struct usb_host_endpoint *hep;
1406	unsigned long flags;
1407	struct isp1362_ep *ep;
1408	int retval = 0;
1409
1410	DBG(3, "%s: urb %p\n", __func__, urb);
1411
1412	spin_lock_irqsave(&isp1362_hcd->lock, flags);
1413	retval = usb_hcd_check_unlink_urb(hcd, urb, status);
1414	if (retval)
1415		goto done;
1416
1417	hep = urb->hcpriv;
1418
1419	if (!hep) {
1420		spin_unlock_irqrestore(&isp1362_hcd->lock, flags);
1421		return -EIDRM;
1422	}
1423
1424	ep = hep->hcpriv;
1425	if (ep) {
1426		/* In front of queue? */
1427		if (ep->hep->urb_list.next == &urb->urb_list) {
1428			if (!list_empty(&ep->active)) {
1429				DBG(1, "%s: urb %p ep %p req %d active PTD[%d] $%04x\n", __func__,
1430				    urb, ep, ep->num_req, ep->ptd_index, ep->ptd_offset);
1431				/* disable processing and queue PTD for removal */
1432				remove_ptd(isp1362_hcd, ep);
1433				urb = NULL;
1434			}
1435		}
1436		if (urb) {
1437			DBG(1, "%s: Finishing ep %p req %d\n", __func__, ep,
1438			    ep->num_req);
1439			finish_request(isp1362_hcd, ep, urb, status);
1440		} else
1441			DBG(1, "%s: urb %p active; wait4irq\n", __func__, urb);
1442	} else {
1443		pr_warning("%s: No EP in URB %p\n", __func__, urb);
1444		retval = -EINVAL;
1445	}
1446done:
1447	spin_unlock_irqrestore(&isp1362_hcd->lock, flags);
1448
1449	DBG(3, "%s: exit\n", __func__);
1450
1451	return retval;
1452}
1453
1454static void isp1362_endpoint_disable(struct usb_hcd *hcd, struct usb_host_endpoint *hep)
1455{
1456	struct isp1362_ep *ep = hep->hcpriv;
1457	struct isp1362_hcd *isp1362_hcd = hcd_to_isp1362_hcd(hcd);
1458	unsigned long flags;
1459
1460	DBG(1, "%s: ep %p\n", __func__, ep);
1461	if (!ep)
1462		return;
1463	spin_lock_irqsave(&isp1362_hcd->lock, flags);
1464	if (!list_empty(&hep->urb_list)) {
1465		if (!list_empty(&ep->active) && list_empty(&ep->remove_list)) {
1466			DBG(1, "%s: Removing ep %p req %d PTD[%d] $%04x\n", __func__,
1467			    ep, ep->num_req, ep->ptd_index, ep->ptd_offset);
1468			remove_ptd(isp1362_hcd, ep);
1469			pr_info("%s: Waiting for Interrupt to clean up\n", __func__);
1470		}
1471	}
1472	spin_unlock_irqrestore(&isp1362_hcd->lock, flags);
1473	/* Wait for interrupt to clear out active list */
1474	while (!list_empty(&ep->active))
1475		msleep(1);
1476
1477	DBG(1, "%s: Freeing EP %p\n", __func__, ep);
1478
1479	usb_put_dev(ep->udev);
1480	kfree(ep);
1481	hep->hcpriv = NULL;
1482}
1483
1484static int isp1362_get_frame(struct usb_hcd *hcd)
1485{
1486	struct isp1362_hcd *isp1362_hcd = hcd_to_isp1362_hcd(hcd);
1487	u32 fmnum;
1488	unsigned long flags;
1489
1490	spin_lock_irqsave(&isp1362_hcd->lock, flags);
1491	fmnum = isp1362_read_reg32(isp1362_hcd, HCFMNUM);
1492	spin_unlock_irqrestore(&isp1362_hcd->lock, flags);
1493
1494	return (int)fmnum;
1495}
1496
1497/*-------------------------------------------------------------------------*/
1498
1499/* Adapted from ohci-hub.c */
1500static int isp1362_hub_status_data(struct usb_hcd *hcd, char *buf)
1501{
1502	struct isp1362_hcd *isp1362_hcd = hcd_to_isp1362_hcd(hcd);
1503	int ports, i, changed = 0;
1504	unsigned long flags;
1505
1506	if (!HC_IS_RUNNING(hcd->state))
1507		return -ESHUTDOWN;
1508
1509	/* Report no status change now, if we are scheduled to be
1510	   called later */
1511	if (timer_pending(&hcd->rh_timer))
1512		return 0;
1513
1514	ports = isp1362_hcd->rhdesca & RH_A_NDP;
1515	BUG_ON(ports > 2);
1516
1517	spin_lock_irqsave(&isp1362_hcd->lock, flags);
1518	/* init status */
1519	if (isp1362_hcd->rhstatus & (RH_HS_LPSC | RH_HS_OCIC))
1520		buf[0] = changed = 1;
1521	else
1522		buf[0] = 0;
1523
1524	for (i = 0; i < ports; i++) {
1525		u32 status = isp1362_hcd->rhport[i];
1526
1527		if (status & (RH_PS_CSC | RH_PS_PESC | RH_PS_PSSC |
1528			      RH_PS_OCIC | RH_PS_PRSC)) {
1529			changed = 1;
1530			buf[0] |= 1 << (i + 1);
1531			continue;
1532		}
1533
1534		if (!(status & RH_PS_CCS))
1535			continue;
1536	}
1537	spin_unlock_irqrestore(&isp1362_hcd->lock, flags);
1538	return changed;
1539}
1540
1541static void isp1362_hub_descriptor(struct isp1362_hcd *isp1362_hcd,
1542				   struct usb_hub_descriptor *desc)
1543{
1544	u32 reg = isp1362_hcd->rhdesca;
1545
1546	DBG(3, "%s: enter\n", __func__);
1547
1548	desc->bDescriptorType = 0x29;
1549	desc->bDescLength = 9;
1550	desc->bHubContrCurrent = 0;
1551	desc->bNbrPorts = reg & 0x3;
1552	/* Power switching, device type, overcurrent. */
1553	desc->wHubCharacteristics = cpu_to_le16((reg >> 8) & 0x1f);
1554	DBG(0, "%s: hubcharacteristics = %02x\n", __func__, cpu_to_le16((reg >> 8) & 0x1f));
 
 
 
 
1555	desc->bPwrOn2PwrGood = (reg >> 24) & 0xff;
1556	/* ports removable, and legacy PortPwrCtrlMask */
1557	desc->u.hs.DeviceRemovable[0] = desc->bNbrPorts == 1 ? 1 << 1 : 3 << 1;
1558	desc->u.hs.DeviceRemovable[1] = ~0;
1559
1560	DBG(3, "%s: exit\n", __func__);
1561}
1562
1563/* Adapted from ohci-hub.c */
1564static int isp1362_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue,
1565			       u16 wIndex, char *buf, u16 wLength)
1566{
1567	struct isp1362_hcd *isp1362_hcd = hcd_to_isp1362_hcd(hcd);
1568	int retval = 0;
1569	unsigned long flags;
1570	unsigned long t1;
1571	int ports = isp1362_hcd->rhdesca & RH_A_NDP;
1572	u32 tmp = 0;
1573
1574	switch (typeReq) {
1575	case ClearHubFeature:
1576		DBG(0, "ClearHubFeature: ");
1577		switch (wValue) {
1578		case C_HUB_OVER_CURRENT:
1579			_DBG(0, "C_HUB_OVER_CURRENT\n");
1580			spin_lock_irqsave(&isp1362_hcd->lock, flags);
1581			isp1362_write_reg32(isp1362_hcd, HCRHSTATUS, RH_HS_OCIC);
1582			spin_unlock_irqrestore(&isp1362_hcd->lock, flags);
 
1583		case C_HUB_LOCAL_POWER:
1584			_DBG(0, "C_HUB_LOCAL_POWER\n");
1585			break;
1586		default:
1587			goto error;
1588		}
1589		break;
1590	case SetHubFeature:
1591		DBG(0, "SetHubFeature: ");
1592		switch (wValue) {
1593		case C_HUB_OVER_CURRENT:
1594		case C_HUB_LOCAL_POWER:
1595			_DBG(0, "C_HUB_OVER_CURRENT or C_HUB_LOCAL_POWER\n");
1596			break;
1597		default:
1598			goto error;
1599		}
1600		break;
1601	case GetHubDescriptor:
1602		DBG(0, "GetHubDescriptor\n");
1603		isp1362_hub_descriptor(isp1362_hcd, (struct usb_hub_descriptor *)buf);
1604		break;
1605	case GetHubStatus:
1606		DBG(0, "GetHubStatus\n");
1607		put_unaligned(cpu_to_le32(0), (__le32 *) buf);
1608		break;
1609	case GetPortStatus:
1610#ifndef VERBOSE
1611		DBG(0, "GetPortStatus\n");
1612#endif
1613		if (!wIndex || wIndex > ports)
1614			goto error;
1615		tmp = isp1362_hcd->rhport[--wIndex];
1616		put_unaligned(cpu_to_le32(tmp), (__le32 *) buf);
1617		break;
1618	case ClearPortFeature:
1619		DBG(0, "ClearPortFeature: ");
1620		if (!wIndex || wIndex > ports)
1621			goto error;
1622		wIndex--;
1623
1624		switch (wValue) {
1625		case USB_PORT_FEAT_ENABLE:
1626			_DBG(0, "USB_PORT_FEAT_ENABLE\n");
1627			tmp = RH_PS_CCS;
1628			break;
1629		case USB_PORT_FEAT_C_ENABLE:
1630			_DBG(0, "USB_PORT_FEAT_C_ENABLE\n");
1631			tmp = RH_PS_PESC;
1632			break;
1633		case USB_PORT_FEAT_SUSPEND:
1634			_DBG(0, "USB_PORT_FEAT_SUSPEND\n");
1635			tmp = RH_PS_POCI;
1636			break;
1637		case USB_PORT_FEAT_C_SUSPEND:
1638			_DBG(0, "USB_PORT_FEAT_C_SUSPEND\n");
1639			tmp = RH_PS_PSSC;
1640			break;
1641		case USB_PORT_FEAT_POWER:
1642			_DBG(0, "USB_PORT_FEAT_POWER\n");
1643			tmp = RH_PS_LSDA;
1644
1645			break;
1646		case USB_PORT_FEAT_C_CONNECTION:
1647			_DBG(0, "USB_PORT_FEAT_C_CONNECTION\n");
1648			tmp = RH_PS_CSC;
1649			break;
1650		case USB_PORT_FEAT_C_OVER_CURRENT:
1651			_DBG(0, "USB_PORT_FEAT_C_OVER_CURRENT\n");
1652			tmp = RH_PS_OCIC;
1653			break;
1654		case USB_PORT_FEAT_C_RESET:
1655			_DBG(0, "USB_PORT_FEAT_C_RESET\n");
1656			tmp = RH_PS_PRSC;
1657			break;
1658		default:
1659			goto error;
1660		}
1661
1662		spin_lock_irqsave(&isp1362_hcd->lock, flags);
1663		isp1362_write_reg32(isp1362_hcd, HCRHPORT1 + wIndex, tmp);
1664		isp1362_hcd->rhport[wIndex] =
1665			isp1362_read_reg32(isp1362_hcd, HCRHPORT1 + wIndex);
1666		spin_unlock_irqrestore(&isp1362_hcd->lock, flags);
1667		break;
1668	case SetPortFeature:
1669		DBG(0, "SetPortFeature: ");
1670		if (!wIndex || wIndex > ports)
1671			goto error;
1672		wIndex--;
1673		switch (wValue) {
1674		case USB_PORT_FEAT_SUSPEND:
1675			_DBG(0, "USB_PORT_FEAT_SUSPEND\n");
1676			spin_lock_irqsave(&isp1362_hcd->lock, flags);
1677			isp1362_write_reg32(isp1362_hcd, HCRHPORT1 + wIndex, RH_PS_PSS);
1678			isp1362_hcd->rhport[wIndex] =
1679				isp1362_read_reg32(isp1362_hcd, HCRHPORT1 + wIndex);
1680			spin_unlock_irqrestore(&isp1362_hcd->lock, flags);
1681			break;
1682		case USB_PORT_FEAT_POWER:
1683			_DBG(0, "USB_PORT_FEAT_POWER\n");
1684			spin_lock_irqsave(&isp1362_hcd->lock, flags);
1685			isp1362_write_reg32(isp1362_hcd, HCRHPORT1 + wIndex, RH_PS_PPS);
1686			isp1362_hcd->rhport[wIndex] =
1687				isp1362_read_reg32(isp1362_hcd, HCRHPORT1 + wIndex);
1688			spin_unlock_irqrestore(&isp1362_hcd->lock, flags);
1689			break;
1690		case USB_PORT_FEAT_RESET:
1691			_DBG(0, "USB_PORT_FEAT_RESET\n");
1692			spin_lock_irqsave(&isp1362_hcd->lock, flags);
1693
1694			t1 = jiffies + msecs_to_jiffies(USB_RESET_WIDTH);
1695			while (time_before(jiffies, t1)) {
1696				/* spin until any current reset finishes */
1697				for (;;) {
1698					tmp = isp1362_read_reg32(isp1362_hcd, HCRHPORT1 + wIndex);
1699					if (!(tmp & RH_PS_PRS))
1700						break;
1701					udelay(500);
1702				}
1703				if (!(tmp & RH_PS_CCS))
1704					break;
1705				/* Reset lasts 10ms (claims datasheet) */
1706				isp1362_write_reg32(isp1362_hcd, HCRHPORT1 + wIndex, (RH_PS_PRS));
1707
1708				spin_unlock_irqrestore(&isp1362_hcd->lock, flags);
1709				msleep(10);
1710				spin_lock_irqsave(&isp1362_hcd->lock, flags);
1711			}
1712
1713			isp1362_hcd->rhport[wIndex] = isp1362_read_reg32(isp1362_hcd,
1714									 HCRHPORT1 + wIndex);
1715			spin_unlock_irqrestore(&isp1362_hcd->lock, flags);
1716			break;
1717		default:
1718			goto error;
1719		}
1720		break;
1721
1722	default:
1723 error:
1724		/* "protocol stall" on error */
1725		_DBG(0, "PROTOCOL STALL\n");
1726		retval = -EPIPE;
1727	}
1728
1729	return retval;
1730}
1731
1732#ifdef	CONFIG_PM
1733static int isp1362_bus_suspend(struct usb_hcd *hcd)
1734{
1735	int status = 0;
1736	struct isp1362_hcd *isp1362_hcd = hcd_to_isp1362_hcd(hcd);
1737	unsigned long flags;
1738
1739	if (time_before(jiffies, isp1362_hcd->next_statechange))
1740		msleep(5);
1741
1742	spin_lock_irqsave(&isp1362_hcd->lock, flags);
1743
1744	isp1362_hcd->hc_control = isp1362_read_reg32(isp1362_hcd, HCCONTROL);
1745	switch (isp1362_hcd->hc_control & OHCI_CTRL_HCFS) {
1746	case OHCI_USB_RESUME:
1747		DBG(0, "%s: resume/suspend?\n", __func__);
1748		isp1362_hcd->hc_control &= ~OHCI_CTRL_HCFS;
1749		isp1362_hcd->hc_control |= OHCI_USB_RESET;
1750		isp1362_write_reg32(isp1362_hcd, HCCONTROL, isp1362_hcd->hc_control);
1751		/* FALL THROUGH */
1752	case OHCI_USB_RESET:
1753		status = -EBUSY;
1754		pr_warning("%s: needs reinit!\n", __func__);
1755		goto done;
1756	case OHCI_USB_SUSPEND:
1757		pr_warning("%s: already suspended?\n", __func__);
1758		goto done;
1759	}
1760	DBG(0, "%s: suspend root hub\n", __func__);
1761
1762	/* First stop any processing */
1763	hcd->state = HC_STATE_QUIESCING;
1764	if (!list_empty(&isp1362_hcd->atl_queue.active) ||
1765	    !list_empty(&isp1362_hcd->intl_queue.active) ||
1766	    !list_empty(&isp1362_hcd->istl_queue[0] .active) ||
1767	    !list_empty(&isp1362_hcd->istl_queue[1] .active)) {
1768		int limit;
1769
1770		isp1362_write_reg32(isp1362_hcd, HCATLSKIP, ~0);
1771		isp1362_write_reg32(isp1362_hcd, HCINTLSKIP, ~0);
1772		isp1362_write_reg16(isp1362_hcd, HCBUFSTAT, 0);
1773		isp1362_write_reg16(isp1362_hcd, HCuPINTENB, 0);
1774		isp1362_write_reg32(isp1362_hcd, HCINTSTAT, OHCI_INTR_SF);
1775
1776		DBG(0, "%s: stopping schedules ...\n", __func__);
1777		limit = 2000;
1778		while (limit > 0) {
1779			udelay(250);
1780			limit -= 250;
1781			if (isp1362_read_reg32(isp1362_hcd, HCINTSTAT) & OHCI_INTR_SF)
1782				break;
1783		}
1784		mdelay(7);
1785		if (isp1362_read_reg16(isp1362_hcd, HCuPINT) & HCuPINT_ATL) {
1786			u32 done_map = isp1362_read_reg32(isp1362_hcd, HCATLDONE);
1787			finish_transfers(isp1362_hcd, done_map, &isp1362_hcd->atl_queue);
1788		}
1789		if (isp1362_read_reg16(isp1362_hcd, HCuPINT) & HCuPINT_INTL) {
1790			u32 done_map = isp1362_read_reg32(isp1362_hcd, HCINTLDONE);
1791			finish_transfers(isp1362_hcd, done_map, &isp1362_hcd->intl_queue);
1792		}
1793		if (isp1362_read_reg16(isp1362_hcd, HCuPINT) & HCuPINT_ISTL0)
1794			finish_iso_transfers(isp1362_hcd, &isp1362_hcd->istl_queue[0]);
1795		if (isp1362_read_reg16(isp1362_hcd, HCuPINT) & HCuPINT_ISTL1)
1796			finish_iso_transfers(isp1362_hcd, &isp1362_hcd->istl_queue[1]);
1797	}
1798	DBG(0, "%s: HCINTSTAT: %08x\n", __func__,
1799		    isp1362_read_reg32(isp1362_hcd, HCINTSTAT));
1800	isp1362_write_reg32(isp1362_hcd, HCINTSTAT,
1801			    isp1362_read_reg32(isp1362_hcd, HCINTSTAT));
1802
1803	/* Suspend hub */
1804	isp1362_hcd->hc_control = OHCI_USB_SUSPEND;
1805	isp1362_show_reg(isp1362_hcd, HCCONTROL);
1806	isp1362_write_reg32(isp1362_hcd, HCCONTROL, isp1362_hcd->hc_control);
1807	isp1362_show_reg(isp1362_hcd, HCCONTROL);
1808
1809#if 1
1810	isp1362_hcd->hc_control = isp1362_read_reg32(isp1362_hcd, HCCONTROL);
1811	if ((isp1362_hcd->hc_control & OHCI_CTRL_HCFS) != OHCI_USB_SUSPEND) {
1812		pr_err("%s: controller won't suspend %08x\n", __func__,
1813		    isp1362_hcd->hc_control);
1814		status = -EBUSY;
1815	} else
1816#endif
1817	{
1818		/* no resumes until devices finish suspending */
1819		isp1362_hcd->next_statechange = jiffies + msecs_to_jiffies(5);
1820	}
1821done:
1822	if (status == 0) {
1823		hcd->state = HC_STATE_SUSPENDED;
1824		DBG(0, "%s: HCD suspended: %08x\n", __func__,
1825		    isp1362_read_reg32(isp1362_hcd, HCCONTROL));
1826	}
1827	spin_unlock_irqrestore(&isp1362_hcd->lock, flags);
1828	return status;
1829}
1830
1831static int isp1362_bus_resume(struct usb_hcd *hcd)
1832{
1833	struct isp1362_hcd *isp1362_hcd = hcd_to_isp1362_hcd(hcd);
1834	u32 port;
1835	unsigned long flags;
1836	int status = -EINPROGRESS;
1837
1838	if (time_before(jiffies, isp1362_hcd->next_statechange))
1839		msleep(5);
1840
1841	spin_lock_irqsave(&isp1362_hcd->lock, flags);
1842	isp1362_hcd->hc_control = isp1362_read_reg32(isp1362_hcd, HCCONTROL);
1843	pr_info("%s: HCCONTROL: %08x\n", __func__, isp1362_hcd->hc_control);
1844	if (hcd->state == HC_STATE_RESUMING) {
1845		pr_warning("%s: duplicate resume\n", __func__);
1846		status = 0;
1847	} else
1848		switch (isp1362_hcd->hc_control & OHCI_CTRL_HCFS) {
1849		case OHCI_USB_SUSPEND:
1850			DBG(0, "%s: resume root hub\n", __func__);
1851			isp1362_hcd->hc_control &= ~OHCI_CTRL_HCFS;
1852			isp1362_hcd->hc_control |= OHCI_USB_RESUME;
1853			isp1362_write_reg32(isp1362_hcd, HCCONTROL, isp1362_hcd->hc_control);
1854			break;
1855		case OHCI_USB_RESUME:
1856			/* HCFS changes sometime after INTR_RD */
1857			DBG(0, "%s: remote wakeup\n", __func__);
1858			break;
1859		case OHCI_USB_OPER:
1860			DBG(0, "%s: odd resume\n", __func__);
1861			status = 0;
1862			hcd->self.root_hub->dev.power.power_state = PMSG_ON;
1863			break;
1864		default:		/* RESET, we lost power */
1865			DBG(0, "%s: root hub hardware reset\n", __func__);
1866			status = -EBUSY;
1867		}
1868	spin_unlock_irqrestore(&isp1362_hcd->lock, flags);
1869	if (status == -EBUSY) {
1870		DBG(0, "%s: Restarting HC\n", __func__);
1871		isp1362_hc_stop(hcd);
1872		return isp1362_hc_start(hcd);
1873	}
1874	if (status != -EINPROGRESS)
1875		return status;
1876	spin_lock_irqsave(&isp1362_hcd->lock, flags);
1877	port = isp1362_read_reg32(isp1362_hcd, HCRHDESCA) & RH_A_NDP;
1878	while (port--) {
1879		u32 stat = isp1362_read_reg32(isp1362_hcd, HCRHPORT1 + port);
1880
1881		/* force global, not selective, resume */
1882		if (!(stat & RH_PS_PSS)) {
1883			DBG(0, "%s: Not Resuming RH port %d\n", __func__, port);
1884			continue;
1885		}
1886		DBG(0, "%s: Resuming RH port %d\n", __func__, port);
1887		isp1362_write_reg32(isp1362_hcd, HCRHPORT1 + port, RH_PS_POCI);
1888	}
1889	spin_unlock_irqrestore(&isp1362_hcd->lock, flags);
1890
1891	/* Some controllers (lucent) need extra-long delays */
1892	hcd->state = HC_STATE_RESUMING;
1893	mdelay(20 /* usb 11.5.1.10 */ + 15);
1894
1895	isp1362_hcd->hc_control = OHCI_USB_OPER;
1896	spin_lock_irqsave(&isp1362_hcd->lock, flags);
1897	isp1362_show_reg(isp1362_hcd, HCCONTROL);
1898	isp1362_write_reg32(isp1362_hcd, HCCONTROL, isp1362_hcd->hc_control);
1899	spin_unlock_irqrestore(&isp1362_hcd->lock, flags);
1900	/* TRSMRCY */
1901	msleep(10);
1902
1903	/* keep it alive for ~5x suspend + resume costs */
1904	isp1362_hcd->next_statechange = jiffies + msecs_to_jiffies(250);
1905
1906	hcd->self.root_hub->dev.power.power_state = PMSG_ON;
1907	hcd->state = HC_STATE_RUNNING;
1908	return 0;
1909}
1910#else
1911#define	isp1362_bus_suspend	NULL
1912#define	isp1362_bus_resume	NULL
1913#endif
1914
1915/*-------------------------------------------------------------------------*/
1916
1917#ifdef STUB_DEBUG_FILE
1918
1919static inline void create_debug_file(struct isp1362_hcd *isp1362_hcd)
1920{
1921}
1922static inline void remove_debug_file(struct isp1362_hcd *isp1362_hcd)
1923{
1924}
1925
1926#else
1927
1928#include <linux/proc_fs.h>
1929#include <linux/seq_file.h>
1930
1931static void dump_irq(struct seq_file *s, char *label, u16 mask)
1932{
1933	seq_printf(s, "%-15s %04x%s%s%s%s%s%s\n", label, mask,
1934		   mask & HCuPINT_CLKRDY ? " clkrdy" : "",
1935		   mask & HCuPINT_SUSP ? " susp" : "",
1936		   mask & HCuPINT_OPR ? " opr" : "",
1937		   mask & HCuPINT_EOT ? " eot" : "",
1938		   mask & HCuPINT_ATL ? " atl" : "",
1939		   mask & HCuPINT_SOF ? " sof" : "");
1940}
1941
1942static void dump_int(struct seq_file *s, char *label, u32 mask)
1943{
1944	seq_printf(s, "%-15s %08x%s%s%s%s%s%s%s\n", label, mask,
1945		   mask & OHCI_INTR_MIE ? " MIE" : "",
1946		   mask & OHCI_INTR_RHSC ? " rhsc" : "",
1947		   mask & OHCI_INTR_FNO ? " fno" : "",
1948		   mask & OHCI_INTR_UE ? " ue" : "",
1949		   mask & OHCI_INTR_RD ? " rd" : "",
1950		   mask & OHCI_INTR_SF ? " sof" : "",
1951		   mask & OHCI_INTR_SO ? " so" : "");
1952}
1953
1954static void dump_ctrl(struct seq_file *s, char *label, u32 mask)
1955{
1956	seq_printf(s, "%-15s %08x%s%s%s\n", label, mask,
1957		   mask & OHCI_CTRL_RWC ? " rwc" : "",
1958		   mask & OHCI_CTRL_RWE ? " rwe" : "",
1959		   ({
1960			   char *hcfs;
1961			   switch (mask & OHCI_CTRL_HCFS) {
1962			   case OHCI_USB_OPER:
1963				   hcfs = " oper";
1964				   break;
1965			   case OHCI_USB_RESET:
1966				   hcfs = " reset";
1967				   break;
1968			   case OHCI_USB_RESUME:
1969				   hcfs = " resume";
1970				   break;
1971			   case OHCI_USB_SUSPEND:
1972				   hcfs = " suspend";
1973				   break;
1974			   default:
1975				   hcfs = " ?";
1976			   }
1977			   hcfs;
1978		   }));
1979}
1980
1981static void dump_regs(struct seq_file *s, struct isp1362_hcd *isp1362_hcd)
1982{
1983	seq_printf(s, "HCREVISION [%02x] %08x\n", ISP1362_REG_NO(ISP1362_REG_HCREVISION),
1984		   isp1362_read_reg32(isp1362_hcd, HCREVISION));
1985	seq_printf(s, "HCCONTROL  [%02x] %08x\n", ISP1362_REG_NO(ISP1362_REG_HCCONTROL),
1986		   isp1362_read_reg32(isp1362_hcd, HCCONTROL));
1987	seq_printf(s, "HCCMDSTAT  [%02x] %08x\n", ISP1362_REG_NO(ISP1362_REG_HCCMDSTAT),
1988		   isp1362_read_reg32(isp1362_hcd, HCCMDSTAT));
1989	seq_printf(s, "HCINTSTAT  [%02x] %08x\n", ISP1362_REG_NO(ISP1362_REG_HCINTSTAT),
1990		   isp1362_read_reg32(isp1362_hcd, HCINTSTAT));
1991	seq_printf(s, "HCINTENB   [%02x] %08x\n", ISP1362_REG_NO(ISP1362_REG_HCINTENB),
1992		   isp1362_read_reg32(isp1362_hcd, HCINTENB));
1993	seq_printf(s, "HCFMINTVL  [%02x] %08x\n", ISP1362_REG_NO(ISP1362_REG_HCFMINTVL),
1994		   isp1362_read_reg32(isp1362_hcd, HCFMINTVL));
1995	seq_printf(s, "HCFMREM    [%02x] %08x\n", ISP1362_REG_NO(ISP1362_REG_HCFMREM),
1996		   isp1362_read_reg32(isp1362_hcd, HCFMREM));
1997	seq_printf(s, "HCFMNUM    [%02x] %08x\n", ISP1362_REG_NO(ISP1362_REG_HCFMNUM),
1998		   isp1362_read_reg32(isp1362_hcd, HCFMNUM));
1999	seq_printf(s, "HCLSTHRESH [%02x] %08x\n", ISP1362_REG_NO(ISP1362_REG_HCLSTHRESH),
2000		   isp1362_read_reg32(isp1362_hcd, HCLSTHRESH));
2001	seq_printf(s, "HCRHDESCA  [%02x] %08x\n", ISP1362_REG_NO(ISP1362_REG_HCRHDESCA),
2002		   isp1362_read_reg32(isp1362_hcd, HCRHDESCA));
2003	seq_printf(s, "HCRHDESCB  [%02x] %08x\n", ISP1362_REG_NO(ISP1362_REG_HCRHDESCB),
2004		   isp1362_read_reg32(isp1362_hcd, HCRHDESCB));
2005	seq_printf(s, "HCRHSTATUS [%02x] %08x\n", ISP1362_REG_NO(ISP1362_REG_HCRHSTATUS),
2006		   isp1362_read_reg32(isp1362_hcd, HCRHSTATUS));
2007	seq_printf(s, "HCRHPORT1  [%02x] %08x\n", ISP1362_REG_NO(ISP1362_REG_HCRHPORT1),
2008		   isp1362_read_reg32(isp1362_hcd, HCRHPORT1));
2009	seq_printf(s, "HCRHPORT2  [%02x] %08x\n", ISP1362_REG_NO(ISP1362_REG_HCRHPORT2),
2010		   isp1362_read_reg32(isp1362_hcd, HCRHPORT2));
2011	seq_printf(s, "\n");
2012	seq_printf(s, "HCHWCFG    [%02x]     %04x\n", ISP1362_REG_NO(ISP1362_REG_HCHWCFG),
2013		   isp1362_read_reg16(isp1362_hcd, HCHWCFG));
2014	seq_printf(s, "HCDMACFG   [%02x]     %04x\n", ISP1362_REG_NO(ISP1362_REG_HCDMACFG),
2015		   isp1362_read_reg16(isp1362_hcd, HCDMACFG));
2016	seq_printf(s, "HCXFERCTR  [%02x]     %04x\n", ISP1362_REG_NO(ISP1362_REG_HCXFERCTR),
2017		   isp1362_read_reg16(isp1362_hcd, HCXFERCTR));
2018	seq_printf(s, "HCuPINT    [%02x]     %04x\n", ISP1362_REG_NO(ISP1362_REG_HCuPINT),
2019		   isp1362_read_reg16(isp1362_hcd, HCuPINT));
2020	seq_printf(s, "HCuPINTENB [%02x]     %04x\n", ISP1362_REG_NO(ISP1362_REG_HCuPINTENB),
2021		   isp1362_read_reg16(isp1362_hcd, HCuPINTENB));
2022	seq_printf(s, "HCCHIPID   [%02x]     %04x\n", ISP1362_REG_NO(ISP1362_REG_HCCHIPID),
2023		   isp1362_read_reg16(isp1362_hcd, HCCHIPID));
2024	seq_printf(s, "HCSCRATCH  [%02x]     %04x\n", ISP1362_REG_NO(ISP1362_REG_HCSCRATCH),
2025		   isp1362_read_reg16(isp1362_hcd, HCSCRATCH));
2026	seq_printf(s, "HCBUFSTAT  [%02x]     %04x\n", ISP1362_REG_NO(ISP1362_REG_HCBUFSTAT),
2027		   isp1362_read_reg16(isp1362_hcd, HCBUFSTAT));
2028	seq_printf(s, "HCDIRADDR  [%02x] %08x\n", ISP1362_REG_NO(ISP1362_REG_HCDIRADDR),
2029		   isp1362_read_reg32(isp1362_hcd, HCDIRADDR));
2030#if 0
2031	seq_printf(s, "HCDIRDATA  [%02x]     %04x\n", ISP1362_REG_NO(HCDIRDATA),
2032		   isp1362_read_reg16(isp1362_hcd, HCDIRDATA));
2033#endif
2034	seq_printf(s, "HCISTLBUFSZ[%02x]     %04x\n", ISP1362_REG_NO(ISP1362_REG_HCISTLBUFSZ),
2035		   isp1362_read_reg16(isp1362_hcd, HCISTLBUFSZ));
2036	seq_printf(s, "HCISTLRATE [%02x]     %04x\n", ISP1362_REG_NO(ISP1362_REG_HCISTLRATE),
2037		   isp1362_read_reg16(isp1362_hcd, HCISTLRATE));
2038	seq_printf(s, "\n");
2039	seq_printf(s, "HCINTLBUFSZ[%02x]     %04x\n", ISP1362_REG_NO(ISP1362_REG_HCINTLBUFSZ),
2040		   isp1362_read_reg16(isp1362_hcd, HCINTLBUFSZ));
2041	seq_printf(s, "HCINTLBLKSZ[%02x]     %04x\n", ISP1362_REG_NO(ISP1362_REG_HCINTLBLKSZ),
2042		   isp1362_read_reg16(isp1362_hcd, HCINTLBLKSZ));
2043	seq_printf(s, "HCINTLDONE [%02x] %08x\n", ISP1362_REG_NO(ISP1362_REG_HCINTLDONE),
2044		   isp1362_read_reg32(isp1362_hcd, HCINTLDONE));
2045	seq_printf(s, "HCINTLSKIP [%02x] %08x\n", ISP1362_REG_NO(ISP1362_REG_HCINTLSKIP),
2046		   isp1362_read_reg32(isp1362_hcd, HCINTLSKIP));
2047	seq_printf(s, "HCINTLLAST [%02x] %08x\n", ISP1362_REG_NO(ISP1362_REG_HCINTLLAST),
2048		   isp1362_read_reg32(isp1362_hcd, HCINTLLAST));
2049	seq_printf(s, "HCINTLCURR [%02x]     %04x\n", ISP1362_REG_NO(ISP1362_REG_HCINTLCURR),
2050		   isp1362_read_reg16(isp1362_hcd, HCINTLCURR));
2051	seq_printf(s, "\n");
2052	seq_printf(s, "HCATLBUFSZ [%02x]     %04x\n", ISP1362_REG_NO(ISP1362_REG_HCATLBUFSZ),
2053		   isp1362_read_reg16(isp1362_hcd, HCATLBUFSZ));
2054	seq_printf(s, "HCATLBLKSZ [%02x]     %04x\n", ISP1362_REG_NO(ISP1362_REG_HCATLBLKSZ),
2055		   isp1362_read_reg16(isp1362_hcd, HCATLBLKSZ));
2056#if 0
2057	seq_printf(s, "HCATLDONE  [%02x] %08x\n", ISP1362_REG_NO(ISP1362_REG_HCATLDONE),
2058		   isp1362_read_reg32(isp1362_hcd, HCATLDONE));
2059#endif
2060	seq_printf(s, "HCATLSKIP  [%02x] %08x\n", ISP1362_REG_NO(ISP1362_REG_HCATLSKIP),
2061		   isp1362_read_reg32(isp1362_hcd, HCATLSKIP));
2062	seq_printf(s, "HCATLLAST  [%02x] %08x\n", ISP1362_REG_NO(ISP1362_REG_HCATLLAST),
2063		   isp1362_read_reg32(isp1362_hcd, HCATLLAST));
2064	seq_printf(s, "HCATLCURR  [%02x]     %04x\n", ISP1362_REG_NO(ISP1362_REG_HCATLCURR),
2065		   isp1362_read_reg16(isp1362_hcd, HCATLCURR));
2066	seq_printf(s, "\n");
2067	seq_printf(s, "HCATLDTC   [%02x]     %04x\n", ISP1362_REG_NO(ISP1362_REG_HCATLDTC),
2068		   isp1362_read_reg16(isp1362_hcd, HCATLDTC));
2069	seq_printf(s, "HCATLDTCTO [%02x]     %04x\n", ISP1362_REG_NO(ISP1362_REG_HCATLDTCTO),
2070		   isp1362_read_reg16(isp1362_hcd, HCATLDTCTO));
2071}
2072
2073static int proc_isp1362_show(struct seq_file *s, void *unused)
2074{
2075	struct isp1362_hcd *isp1362_hcd = s->private;
2076	struct isp1362_ep *ep;
2077	int i;
2078
2079	seq_printf(s, "%s\n%s version %s\n",
2080		   isp1362_hcd_to_hcd(isp1362_hcd)->product_desc, hcd_name, DRIVER_VERSION);
2081
2082	/* collect statistics to help estimate potential win for
2083	 * DMA engines that care about alignment (PXA)
2084	 */
2085	seq_printf(s, "alignment:  16b/%ld 8b/%ld 4b/%ld 2b/%ld 1b/%ld\n",
2086		   isp1362_hcd->stat16, isp1362_hcd->stat8, isp1362_hcd->stat4,
2087		   isp1362_hcd->stat2, isp1362_hcd->stat1);
2088	seq_printf(s, "max # ptds in ATL  fifo: %d\n", isp1362_hcd->atl_queue.stat_maxptds);
2089	seq_printf(s, "max # ptds in INTL fifo: %d\n", isp1362_hcd->intl_queue.stat_maxptds);
2090	seq_printf(s, "max # ptds in ISTL fifo: %d\n",
2091		   max(isp1362_hcd->istl_queue[0] .stat_maxptds,
2092		       isp1362_hcd->istl_queue[1] .stat_maxptds));
2093
2094	/* FIXME: don't show the following in suspended state */
2095	spin_lock_irq(&isp1362_hcd->lock);
2096
2097	dump_irq(s, "hc_irq_enable", isp1362_read_reg16(isp1362_hcd, HCuPINTENB));
2098	dump_irq(s, "hc_irq_status", isp1362_read_reg16(isp1362_hcd, HCuPINT));
2099	dump_int(s, "ohci_int_enable", isp1362_read_reg32(isp1362_hcd, HCINTENB));
2100	dump_int(s, "ohci_int_status", isp1362_read_reg32(isp1362_hcd, HCINTSTAT));
2101	dump_ctrl(s, "ohci_control", isp1362_read_reg32(isp1362_hcd, HCCONTROL));
2102
2103	for (i = 0; i < NUM_ISP1362_IRQS; i++)
2104		if (isp1362_hcd->irq_stat[i])
2105			seq_printf(s, "%-15s: %d\n",
2106				   ISP1362_INT_NAME(i), isp1362_hcd->irq_stat[i]);
2107
2108	dump_regs(s, isp1362_hcd);
2109	list_for_each_entry(ep, &isp1362_hcd->async, schedule) {
2110		struct urb *urb;
2111
2112		seq_printf(s, "%p, ep%d%s, maxpacket %d:\n", ep, ep->epnum,
2113			   ({
2114				   char *s;
2115				   switch (ep->nextpid) {
2116				   case USB_PID_IN:
2117					   s = "in";
2118					   break;
2119				   case USB_PID_OUT:
2120					   s = "out";
2121					   break;
2122				   case USB_PID_SETUP:
2123					   s = "setup";
2124					   break;
2125				   case USB_PID_ACK:
2126					   s = "status";
2127					   break;
2128				   default:
2129					   s = "?";
2130					   break;
2131				   };
2132				   s;}), ep->maxpacket) ;
2133		list_for_each_entry(urb, &ep->hep->urb_list, urb_list) {
2134			seq_printf(s, "  urb%p, %d/%d\n", urb,
2135				   urb->actual_length,
2136				   urb->transfer_buffer_length);
2137		}
2138	}
2139	if (!list_empty(&isp1362_hcd->async))
2140		seq_printf(s, "\n");
2141	dump_ptd_queue(&isp1362_hcd->atl_queue);
2142
2143	seq_printf(s, "periodic size= %d\n", PERIODIC_SIZE);
2144
2145	list_for_each_entry(ep, &isp1362_hcd->periodic, schedule) {
2146		seq_printf(s, "branch:%2d load:%3d PTD[%d] $%04x:\n", ep->branch,
2147			   isp1362_hcd->load[ep->branch], ep->ptd_index, ep->ptd_offset);
2148
2149		seq_printf(s, "   %d/%p (%sdev%d ep%d%s max %d)\n",
2150			   ep->interval, ep,
2151			   (ep->udev->speed == USB_SPEED_FULL) ? "" : "ls ",
2152			   ep->udev->devnum, ep->epnum,
2153			   (ep->epnum == 0) ? "" :
2154			   ((ep->nextpid == USB_PID_IN) ?
2155			    "in" : "out"), ep->maxpacket);
2156	}
2157	dump_ptd_queue(&isp1362_hcd->intl_queue);
2158
2159	seq_printf(s, "ISO:\n");
2160
2161	list_for_each_entry(ep, &isp1362_hcd->isoc, schedule) {
2162		seq_printf(s, "   %d/%p (%sdev%d ep%d%s max %d)\n",
2163			   ep->interval, ep,
2164			   (ep->udev->speed == USB_SPEED_FULL) ? "" : "ls ",
2165			   ep->udev->devnum, ep->epnum,
2166			   (ep->epnum == 0) ? "" :
2167			   ((ep->nextpid == USB_PID_IN) ?
2168			    "in" : "out"), ep->maxpacket);
2169	}
2170
2171	spin_unlock_irq(&isp1362_hcd->lock);
2172	seq_printf(s, "\n");
2173
2174	return 0;
2175}
2176
2177static int proc_isp1362_open(struct inode *inode, struct file *file)
2178{
2179	return single_open(file, proc_isp1362_show, PDE(inode)->data);
2180}
2181
2182static const struct file_operations proc_ops = {
2183	.open = proc_isp1362_open,
2184	.read = seq_read,
2185	.llseek = seq_lseek,
2186	.release = single_release,
2187};
2188
2189/* expect just one isp1362_hcd per system */
2190static const char proc_filename[] = "driver/isp1362";
2191
2192static void create_debug_file(struct isp1362_hcd *isp1362_hcd)
2193{
2194	struct proc_dir_entry *pde;
2195
2196	pde = create_proc_entry(proc_filename, 0, NULL);
2197	if (pde == NULL) {
2198		pr_warning("%s: Failed to create debug file '%s'\n", __func__, proc_filename);
2199		return;
2200	}
2201
2202	pde->proc_fops = &proc_ops;
2203	pde->data = isp1362_hcd;
2204	isp1362_hcd->pde = pde;
2205}
2206
2207static void remove_debug_file(struct isp1362_hcd *isp1362_hcd)
2208{
2209	if (isp1362_hcd->pde)
2210		remove_proc_entry(proc_filename, NULL);
2211}
2212
2213#endif
2214
2215/*-------------------------------------------------------------------------*/
2216
2217static void __isp1362_sw_reset(struct isp1362_hcd *isp1362_hcd)
2218{
2219	int tmp = 20;
2220
2221	isp1362_write_reg16(isp1362_hcd, HCSWRES, HCSWRES_MAGIC);
2222	isp1362_write_reg32(isp1362_hcd, HCCMDSTAT, OHCI_HCR);
2223	while (--tmp) {
2224		mdelay(1);
2225		if (!(isp1362_read_reg32(isp1362_hcd, HCCMDSTAT) & OHCI_HCR))
2226			break;
2227	}
2228	if (!tmp)
2229		pr_err("Software reset timeout\n");
2230}
2231
2232static void isp1362_sw_reset(struct isp1362_hcd *isp1362_hcd)
2233{
2234	unsigned long flags;
2235
2236	spin_lock_irqsave(&isp1362_hcd->lock, flags);
2237	__isp1362_sw_reset(isp1362_hcd);
2238	spin_unlock_irqrestore(&isp1362_hcd->lock, flags);
2239}
2240
2241static int isp1362_mem_config(struct usb_hcd *hcd)
2242{
2243	struct isp1362_hcd *isp1362_hcd = hcd_to_isp1362_hcd(hcd);
2244	unsigned long flags;
2245	u32 total;
2246	u16 istl_size = ISP1362_ISTL_BUFSIZE;
2247	u16 intl_blksize = ISP1362_INTL_BLKSIZE + PTD_HEADER_SIZE;
2248	u16 intl_size = ISP1362_INTL_BUFFERS * intl_blksize;
2249	u16 atl_blksize = ISP1362_ATL_BLKSIZE + PTD_HEADER_SIZE;
2250	u16 atl_buffers = (ISP1362_BUF_SIZE - (istl_size + intl_size)) / atl_blksize;
2251	u16 atl_size;
2252	int i;
2253
2254	WARN_ON(istl_size & 3);
2255	WARN_ON(atl_blksize & 3);
2256	WARN_ON(intl_blksize & 3);
2257	WARN_ON(atl_blksize < PTD_HEADER_SIZE);
2258	WARN_ON(intl_blksize < PTD_HEADER_SIZE);
2259
2260	BUG_ON((unsigned)ISP1362_INTL_BUFFERS > 32);
2261	if (atl_buffers > 32)
2262		atl_buffers = 32;
2263	atl_size = atl_buffers * atl_blksize;
2264	total = atl_size + intl_size + istl_size;
2265	dev_info(hcd->self.controller, "ISP1362 Memory usage:\n");
2266	dev_info(hcd->self.controller, "  ISTL:    2 * %4d:     %4d @ $%04x:$%04x\n",
2267		 istl_size / 2, istl_size, 0, istl_size / 2);
2268	dev_info(hcd->self.controller, "  INTL: %4d * (%3zu+8):  %4d @ $%04x\n",
2269		 ISP1362_INTL_BUFFERS, intl_blksize - PTD_HEADER_SIZE,
2270		 intl_size, istl_size);
2271	dev_info(hcd->self.controller, "  ATL : %4d * (%3zu+8):  %4d @ $%04x\n",
2272		 atl_buffers, atl_blksize - PTD_HEADER_SIZE,
2273		 atl_size, istl_size + intl_size);
2274	dev_info(hcd->self.controller, "  USED/FREE:   %4d      %4d\n", total,
2275		 ISP1362_BUF_SIZE - total);
2276
2277	if (total > ISP1362_BUF_SIZE) {
2278		dev_err(hcd->self.controller, "%s: Memory requested: %d, available %d\n",
2279			__func__, total, ISP1362_BUF_SIZE);
2280		return -ENOMEM;
2281	}
2282
2283	total = istl_size + intl_size + atl_size;
2284	spin_lock_irqsave(&isp1362_hcd->lock, flags);
2285
2286	for (i = 0; i < 2; i++) {
2287		isp1362_hcd->istl_queue[i].buf_start = i * istl_size / 2,
2288		isp1362_hcd->istl_queue[i].buf_size = istl_size / 2;
2289		isp1362_hcd->istl_queue[i].blk_size = 4;
2290		INIT_LIST_HEAD(&isp1362_hcd->istl_queue[i].active);
2291		snprintf(isp1362_hcd->istl_queue[i].name,
2292			 sizeof(isp1362_hcd->istl_queue[i].name), "ISTL%d", i);
2293		DBG(3, "%s: %5s buf $%04x %d\n", __func__,
2294		     isp1362_hcd->istl_queue[i].name,
2295		     isp1362_hcd->istl_queue[i].buf_start,
2296		     isp1362_hcd->istl_queue[i].buf_size);
2297	}
2298	isp1362_write_reg16(isp1362_hcd, HCISTLBUFSZ, istl_size / 2);
2299
2300	isp1362_hcd->intl_queue.buf_start = istl_size;
2301	isp1362_hcd->intl_queue.buf_size = intl_size;
2302	isp1362_hcd->intl_queue.buf_count = ISP1362_INTL_BUFFERS;
2303	isp1362_hcd->intl_queue.blk_size = intl_blksize;
2304	isp1362_hcd->intl_queue.buf_avail = isp1362_hcd->intl_queue.buf_count;
2305	isp1362_hcd->intl_queue.skip_map = ~0;
2306	INIT_LIST_HEAD(&isp1362_hcd->intl_queue.active);
2307
2308	isp1362_write_reg16(isp1362_hcd, HCINTLBUFSZ,
2309			    isp1362_hcd->intl_queue.buf_size);
2310	isp1362_write_reg16(isp1362_hcd, HCINTLBLKSZ,
2311			    isp1362_hcd->intl_queue.blk_size - PTD_HEADER_SIZE);
2312	isp1362_write_reg32(isp1362_hcd, HCINTLSKIP, ~0);
2313	isp1362_write_reg32(isp1362_hcd, HCINTLLAST,
2314			    1 << (ISP1362_INTL_BUFFERS - 1));
2315
2316	isp1362_hcd->atl_queue.buf_start = istl_size + intl_size;
2317	isp1362_hcd->atl_queue.buf_size = atl_size;
2318	isp1362_hcd->atl_queue.buf_count = atl_buffers;
2319	isp1362_hcd->atl_queue.blk_size = atl_blksize;
2320	isp1362_hcd->atl_queue.buf_avail = isp1362_hcd->atl_queue.buf_count;
2321	isp1362_hcd->atl_queue.skip_map = ~0;
2322	INIT_LIST_HEAD(&isp1362_hcd->atl_queue.active);
2323
2324	isp1362_write_reg16(isp1362_hcd, HCATLBUFSZ,
2325			    isp1362_hcd->atl_queue.buf_size);
2326	isp1362_write_reg16(isp1362_hcd, HCATLBLKSZ,
2327			    isp1362_hcd->atl_queue.blk_size - PTD_HEADER_SIZE);
2328	isp1362_write_reg32(isp1362_hcd, HCATLSKIP, ~0);
2329	isp1362_write_reg32(isp1362_hcd, HCATLLAST,
2330			    1 << (atl_buffers - 1));
2331
2332	snprintf(isp1362_hcd->atl_queue.name,
2333		 sizeof(isp1362_hcd->atl_queue.name), "ATL");
2334	snprintf(isp1362_hcd->intl_queue.name,
2335		 sizeof(isp1362_hcd->intl_queue.name), "INTL");
2336	DBG(3, "%s: %5s buf $%04x %2d * %4d = %4d\n", __func__,
2337	     isp1362_hcd->intl_queue.name,
2338	     isp1362_hcd->intl_queue.buf_start,
2339	     ISP1362_INTL_BUFFERS, isp1362_hcd->intl_queue.blk_size,
2340	     isp1362_hcd->intl_queue.buf_size);
2341	DBG(3, "%s: %5s buf $%04x %2d * %4d = %4d\n", __func__,
2342	     isp1362_hcd->atl_queue.name,
2343	     isp1362_hcd->atl_queue.buf_start,
2344	     atl_buffers, isp1362_hcd->atl_queue.blk_size,
2345	     isp1362_hcd->atl_queue.buf_size);
2346
2347	spin_unlock_irqrestore(&isp1362_hcd->lock, flags);
2348
2349	return 0;
2350}
2351
2352static int isp1362_hc_reset(struct usb_hcd *hcd)
2353{
2354	int ret = 0;
2355	struct isp1362_hcd *isp1362_hcd = hcd_to_isp1362_hcd(hcd);
2356	unsigned long t;
2357	unsigned long timeout = 100;
2358	unsigned long flags;
2359	int clkrdy = 0;
2360
2361	pr_info("%s:\n", __func__);
2362
2363	if (isp1362_hcd->board && isp1362_hcd->board->reset) {
2364		isp1362_hcd->board->reset(hcd->self.controller, 1);
2365		msleep(20);
2366		if (isp1362_hcd->board->clock)
2367			isp1362_hcd->board->clock(hcd->self.controller, 1);
2368		isp1362_hcd->board->reset(hcd->self.controller, 0);
2369	} else
2370		isp1362_sw_reset(isp1362_hcd);
2371
2372	/* chip has been reset. First we need to see a clock */
2373	t = jiffies + msecs_to_jiffies(timeout);
2374	while (!clkrdy && time_before_eq(jiffies, t)) {
2375		spin_lock_irqsave(&isp1362_hcd->lock, flags);
2376		clkrdy = isp1362_read_reg16(isp1362_hcd, HCuPINT) & HCuPINT_CLKRDY;
2377		spin_unlock_irqrestore(&isp1362_hcd->lock, flags);
2378		if (!clkrdy)
2379			msleep(4);
2380	}
2381
2382	spin_lock_irqsave(&isp1362_hcd->lock, flags);
2383	isp1362_write_reg16(isp1362_hcd, HCuPINT, HCuPINT_CLKRDY);
2384	spin_unlock_irqrestore(&isp1362_hcd->lock, flags);
2385	if (!clkrdy) {
2386		pr_err("Clock not ready after %lums\n", timeout);
2387		ret = -ENODEV;
2388	}
2389	return ret;
2390}
2391
2392static void isp1362_hc_stop(struct usb_hcd *hcd)
2393{
2394	struct isp1362_hcd *isp1362_hcd = hcd_to_isp1362_hcd(hcd);
2395	unsigned long flags;
2396	u32 tmp;
2397
2398	pr_info("%s:\n", __func__);
2399
2400	del_timer_sync(&hcd->rh_timer);
2401
2402	spin_lock_irqsave(&isp1362_hcd->lock, flags);
2403
2404	isp1362_write_reg16(isp1362_hcd, HCuPINTENB, 0);
2405
2406	/* Switch off power for all ports */
2407	tmp = isp1362_read_reg32(isp1362_hcd, HCRHDESCA);
2408	tmp &= ~(RH_A_NPS | RH_A_PSM);
2409	isp1362_write_reg32(isp1362_hcd, HCRHDESCA, tmp);
2410	isp1362_write_reg32(isp1362_hcd, HCRHSTATUS, RH_HS_LPS);
2411
2412	/* Reset the chip */
2413	if (isp1362_hcd->board && isp1362_hcd->board->reset)
2414		isp1362_hcd->board->reset(hcd->self.controller, 1);
2415	else
2416		__isp1362_sw_reset(isp1362_hcd);
2417
2418	if (isp1362_hcd->board && isp1362_hcd->board->clock)
2419		isp1362_hcd->board->clock(hcd->self.controller, 0);
2420
2421	spin_unlock_irqrestore(&isp1362_hcd->lock, flags);
2422}
2423
2424#ifdef CHIP_BUFFER_TEST
2425static int isp1362_chip_test(struct isp1362_hcd *isp1362_hcd)
2426{
2427	int ret = 0;
2428	u16 *ref;
2429	unsigned long flags;
2430
2431	ref = kmalloc(2 * ISP1362_BUF_SIZE, GFP_KERNEL);
2432	if (ref) {
2433		int offset;
2434		u16 *tst = &ref[ISP1362_BUF_SIZE / 2];
2435
2436		for (offset = 0; offset < ISP1362_BUF_SIZE / 2; offset++) {
2437			ref[offset] = ~offset;
2438			tst[offset] = offset;
2439		}
2440
2441		for (offset = 0; offset < 4; offset++) {
2442			int j;
2443
2444			for (j = 0; j < 8; j++) {
2445				spin_lock_irqsave(&isp1362_hcd->lock, flags);
2446				isp1362_write_buffer(isp1362_hcd, (u8 *)ref + offset, 0, j);
2447				isp1362_read_buffer(isp1362_hcd, (u8 *)tst + offset, 0, j);
2448				spin_unlock_irqrestore(&isp1362_hcd->lock, flags);
2449
2450				if (memcmp(ref, tst, j)) {
2451					ret = -ENODEV;
2452					pr_err("%s: memory check with %d byte offset %d failed\n",
2453					    __func__, j, offset);
2454					dump_data((u8 *)ref + offset, j);
2455					dump_data((u8 *)tst + offset, j);
2456				}
2457			}
2458		}
2459
2460		spin_lock_irqsave(&isp1362_hcd->lock, flags);
2461		isp1362_write_buffer(isp1362_hcd, ref, 0, ISP1362_BUF_SIZE);
2462		isp1362_read_buffer(isp1362_hcd, tst, 0, ISP1362_BUF_SIZE);
2463		spin_unlock_irqrestore(&isp1362_hcd->lock, flags);
2464
2465		if (memcmp(ref, tst, ISP1362_BUF_SIZE)) {
2466			ret = -ENODEV;
2467			pr_err("%s: memory check failed\n", __func__);
2468			dump_data((u8 *)tst, ISP1362_BUF_SIZE / 2);
2469		}
2470
2471		for (offset = 0; offset < 256; offset++) {
2472			int test_size = 0;
2473
2474			yield();
2475
2476			memset(tst, 0, ISP1362_BUF_SIZE);
2477			spin_lock_irqsave(&isp1362_hcd->lock, flags);
2478			isp1362_write_buffer(isp1362_hcd, tst, 0, ISP1362_BUF_SIZE);
2479			isp1362_read_buffer(isp1362_hcd, tst, 0, ISP1362_BUF_SIZE);
2480			spin_unlock_irqrestore(&isp1362_hcd->lock, flags);
2481			if (memcmp(tst, tst + (ISP1362_BUF_SIZE / (2 * sizeof(*tst))),
2482				   ISP1362_BUF_SIZE / 2)) {
2483				pr_err("%s: Failed to clear buffer\n", __func__);
2484				dump_data((u8 *)tst, ISP1362_BUF_SIZE);
2485				break;
2486			}
2487			spin_lock_irqsave(&isp1362_hcd->lock, flags);
2488			isp1362_write_buffer(isp1362_hcd, ref, offset * 2, PTD_HEADER_SIZE);
2489			isp1362_write_buffer(isp1362_hcd, ref + PTD_HEADER_SIZE / sizeof(*ref),
2490					     offset * 2 + PTD_HEADER_SIZE, test_size);
2491			isp1362_read_buffer(isp1362_hcd, tst, offset * 2,
2492					    PTD_HEADER_SIZE + test_size);
2493			spin_unlock_irqrestore(&isp1362_hcd->lock, flags);
2494			if (memcmp(ref, tst, PTD_HEADER_SIZE + test_size)) {
2495				dump_data(((u8 *)ref) + offset, PTD_HEADER_SIZE + test_size);
2496				dump_data((u8 *)tst, PTD_HEADER_SIZE + test_size);
2497				spin_lock_irqsave(&isp1362_hcd->lock, flags);
2498				isp1362_read_buffer(isp1362_hcd, tst, offset * 2,
2499						    PTD_HEADER_SIZE + test_size);
2500				spin_unlock_irqrestore(&isp1362_hcd->lock, flags);
2501				if (memcmp(ref, tst, PTD_HEADER_SIZE + test_size)) {
2502					ret = -ENODEV;
2503					pr_err("%s: memory check with offset %02x failed\n",
2504					    __func__, offset);
2505					break;
2506				}
2507				pr_warning("%s: memory check with offset %02x ok after second read\n",
2508				     __func__, offset);
2509			}
2510		}
2511		kfree(ref);
2512	}
2513	return ret;
2514}
2515#endif
2516
2517static int isp1362_hc_start(struct usb_hcd *hcd)
2518{
2519	int ret;
2520	struct isp1362_hcd *isp1362_hcd = hcd_to_isp1362_hcd(hcd);
2521	struct isp1362_platform_data *board = isp1362_hcd->board;
2522	u16 hwcfg;
2523	u16 chipid;
2524	unsigned long flags;
2525
2526	pr_info("%s:\n", __func__);
2527
2528	spin_lock_irqsave(&isp1362_hcd->lock, flags);
2529	chipid = isp1362_read_reg16(isp1362_hcd, HCCHIPID);
2530	spin_unlock_irqrestore(&isp1362_hcd->lock, flags);
2531
2532	if ((chipid & HCCHIPID_MASK) != HCCHIPID_MAGIC) {
2533		pr_err("%s: Invalid chip ID %04x\n", __func__, chipid);
2534		return -ENODEV;
2535	}
2536
2537#ifdef CHIP_BUFFER_TEST
2538	ret = isp1362_chip_test(isp1362_hcd);
2539	if (ret)
2540		return -ENODEV;
2541#endif
2542	spin_lock_irqsave(&isp1362_hcd->lock, flags);
2543	/* clear interrupt status and disable all interrupt sources */
2544	isp1362_write_reg16(isp1362_hcd, HCuPINT, 0xff);
2545	isp1362_write_reg16(isp1362_hcd, HCuPINTENB, 0);
2546
2547	/* HW conf */
2548	hwcfg = HCHWCFG_INT_ENABLE | HCHWCFG_DBWIDTH(1);
2549	if (board->sel15Kres)
2550		hwcfg |= HCHWCFG_PULLDOWN_DS2 |
2551			((MAX_ROOT_PORTS > 1) ? HCHWCFG_PULLDOWN_DS1 : 0);
2552	if (board->clknotstop)
2553		hwcfg |= HCHWCFG_CLKNOTSTOP;
2554	if (board->oc_enable)
2555		hwcfg |= HCHWCFG_ANALOG_OC;
2556	if (board->int_act_high)
2557		hwcfg |= HCHWCFG_INT_POL;
2558	if (board->int_edge_triggered)
2559		hwcfg |= HCHWCFG_INT_TRIGGER;
2560	if (board->dreq_act_high)
2561		hwcfg |= HCHWCFG_DREQ_POL;
2562	if (board->dack_act_high)
2563		hwcfg |= HCHWCFG_DACK_POL;
2564	isp1362_write_reg16(isp1362_hcd, HCHWCFG, hwcfg);
2565	isp1362_show_reg(isp1362_hcd, HCHWCFG);
2566	isp1362_write_reg16(isp1362_hcd, HCDMACFG, 0);
2567	spin_unlock_irqrestore(&isp1362_hcd->lock, flags);
2568
2569	ret = isp1362_mem_config(hcd);
2570	if (ret)
2571		return ret;
2572
2573	spin_lock_irqsave(&isp1362_hcd->lock, flags);
2574
2575	/* Root hub conf */
2576	isp1362_hcd->rhdesca = 0;
2577	if (board->no_power_switching)
2578		isp1362_hcd->rhdesca |= RH_A_NPS;
2579	if (board->power_switching_mode)
2580		isp1362_hcd->rhdesca |= RH_A_PSM;
2581	if (board->potpg)
2582		isp1362_hcd->rhdesca |= (board->potpg << 24) & RH_A_POTPGT;
2583	else
2584		isp1362_hcd->rhdesca |= (25 << 24) & RH_A_POTPGT;
2585
2586	isp1362_write_reg32(isp1362_hcd, HCRHDESCA, isp1362_hcd->rhdesca & ~RH_A_OCPM);
2587	isp1362_write_reg32(isp1362_hcd, HCRHDESCA, isp1362_hcd->rhdesca | RH_A_OCPM);
2588	isp1362_hcd->rhdesca = isp1362_read_reg32(isp1362_hcd, HCRHDESCA);
2589
2590	isp1362_hcd->rhdescb = RH_B_PPCM;
2591	isp1362_write_reg32(isp1362_hcd, HCRHDESCB, isp1362_hcd->rhdescb);
2592	isp1362_hcd->rhdescb = isp1362_read_reg32(isp1362_hcd, HCRHDESCB);
2593
2594	isp1362_read_reg32(isp1362_hcd, HCFMINTVL);
2595	isp1362_write_reg32(isp1362_hcd, HCFMINTVL, (FSMP(FI) << 16) | FI);
2596	isp1362_write_reg32(isp1362_hcd, HCLSTHRESH, LSTHRESH);
2597
2598	spin_unlock_irqrestore(&isp1362_hcd->lock, flags);
2599
2600	isp1362_hcd->hc_control = OHCI_USB_OPER;
2601	hcd->state = HC_STATE_RUNNING;
2602
2603	spin_lock_irqsave(&isp1362_hcd->lock, flags);
2604	/* Set up interrupts */
2605	isp1362_hcd->intenb = OHCI_INTR_MIE | OHCI_INTR_RHSC | OHCI_INTR_UE;
2606	isp1362_hcd->intenb |= OHCI_INTR_RD;
2607	isp1362_hcd->irqenb = HCuPINT_OPR | HCuPINT_SUSP;
2608	isp1362_write_reg32(isp1362_hcd, HCINTENB, isp1362_hcd->intenb);
2609	isp1362_write_reg16(isp1362_hcd, HCuPINTENB, isp1362_hcd->irqenb);
2610
2611	/* Go operational */
2612	isp1362_write_reg32(isp1362_hcd, HCCONTROL, isp1362_hcd->hc_control);
2613	/* enable global power */
2614	isp1362_write_reg32(isp1362_hcd, HCRHSTATUS, RH_HS_LPSC | RH_HS_DRWE);
2615
2616	spin_unlock_irqrestore(&isp1362_hcd->lock, flags);
2617
2618	return 0;
2619}
2620
2621/*-------------------------------------------------------------------------*/
2622
2623static struct hc_driver isp1362_hc_driver = {
2624	.description =		hcd_name,
2625	.product_desc =		"ISP1362 Host Controller",
2626	.hcd_priv_size =	sizeof(struct isp1362_hcd),
2627
2628	.irq =			isp1362_irq,
2629	.flags =		HCD_USB11 | HCD_MEMORY,
2630
2631	.reset =		isp1362_hc_reset,
2632	.start =		isp1362_hc_start,
2633	.stop =			isp1362_hc_stop,
2634
2635	.urb_enqueue =		isp1362_urb_enqueue,
2636	.urb_dequeue =		isp1362_urb_dequeue,
2637	.endpoint_disable =	isp1362_endpoint_disable,
2638
2639	.get_frame_number =	isp1362_get_frame,
2640
2641	.hub_status_data =	isp1362_hub_status_data,
2642	.hub_control =		isp1362_hub_control,
2643	.bus_suspend =		isp1362_bus_suspend,
2644	.bus_resume =		isp1362_bus_resume,
2645};
2646
2647/*-------------------------------------------------------------------------*/
2648
2649static int __devexit isp1362_remove(struct platform_device *pdev)
2650{
2651	struct usb_hcd *hcd = platform_get_drvdata(pdev);
2652	struct isp1362_hcd *isp1362_hcd = hcd_to_isp1362_hcd(hcd);
2653	struct resource *res;
2654
2655	remove_debug_file(isp1362_hcd);
2656	DBG(0, "%s: Removing HCD\n", __func__);
2657	usb_remove_hcd(hcd);
2658
2659	DBG(0, "%s: Unmapping data_reg @ %p\n", __func__,
2660	    isp1362_hcd->data_reg);
2661	iounmap(isp1362_hcd->data_reg);
2662
2663	DBG(0, "%s: Unmapping addr_reg @ %p\n", __func__,
2664	    isp1362_hcd->addr_reg);
2665	iounmap(isp1362_hcd->addr_reg);
2666
2667	res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
2668	DBG(0, "%s: release mem_region: %08lx\n", __func__, (long unsigned int)res->start);
2669	if (res)
2670		release_mem_region(res->start, resource_size(res));
2671
2672	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
2673	DBG(0, "%s: release mem_region: %08lx\n", __func__, (long unsigned int)res->start);
2674	if (res)
2675		release_mem_region(res->start, resource_size(res));
2676
2677	DBG(0, "%s: put_hcd\n", __func__);
2678	usb_put_hcd(hcd);
2679	DBG(0, "%s: Done\n", __func__);
2680
2681	return 0;
2682}
2683
2684static int __devinit isp1362_probe(struct platform_device *pdev)
2685{
2686	struct usb_hcd *hcd;
2687	struct isp1362_hcd *isp1362_hcd;
2688	struct resource *addr, *data;
2689	void __iomem *addr_reg;
2690	void __iomem *data_reg;
2691	int irq;
2692	int retval = 0;
2693	struct resource *irq_res;
2694	unsigned int irq_flags = 0;
2695
 
 
 
2696	/* basic sanity checks first.  board-specific init logic should
2697	 * have initialized this the three resources and probably board
2698	 * specific platform_data.  we don't probe for IRQs, and do only
2699	 * minimal sanity checking.
2700	 */
2701	if (pdev->num_resources < 3) {
2702		retval = -ENODEV;
2703		goto err1;
2704	}
2705
2706	data = platform_get_resource(pdev, IORESOURCE_MEM, 0);
2707	addr = platform_get_resource(pdev, IORESOURCE_MEM, 1);
2708	irq_res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
2709	if (!addr || !data || !irq_res) {
2710		retval = -ENODEV;
2711		goto err1;
2712	}
2713	irq = irq_res->start;
2714
2715	if (pdev->dev.dma_mask) {
2716		DBG(1, "won't do DMA");
2717		retval = -ENODEV;
2718		goto err1;
2719	}
2720
2721	if (!request_mem_region(addr->start, resource_size(addr), hcd_name)) {
2722		retval = -EBUSY;
2723		goto err1;
2724	}
2725	addr_reg = ioremap(addr->start, resource_size(addr));
2726	if (addr_reg == NULL) {
2727		retval = -ENOMEM;
2728		goto err2;
2729	}
2730
2731	if (!request_mem_region(data->start, resource_size(data), hcd_name)) {
2732		retval = -EBUSY;
2733		goto err3;
2734	}
2735	data_reg = ioremap(data->start, resource_size(data));
2736	if (data_reg == NULL) {
2737		retval = -ENOMEM;
2738		goto err4;
2739	}
2740
2741	/* allocate and initialize hcd */
2742	hcd = usb_create_hcd(&isp1362_hc_driver, &pdev->dev, dev_name(&pdev->dev));
2743	if (!hcd) {
2744		retval = -ENOMEM;
2745		goto err5;
2746	}
2747	hcd->rsrc_start = data->start;
2748	isp1362_hcd = hcd_to_isp1362_hcd(hcd);
2749	isp1362_hcd->data_reg = data_reg;
2750	isp1362_hcd->addr_reg = addr_reg;
2751
2752	isp1362_hcd->next_statechange = jiffies;
2753	spin_lock_init(&isp1362_hcd->lock);
2754	INIT_LIST_HEAD(&isp1362_hcd->async);
2755	INIT_LIST_HEAD(&isp1362_hcd->periodic);
2756	INIT_LIST_HEAD(&isp1362_hcd->isoc);
2757	INIT_LIST_HEAD(&isp1362_hcd->remove_list);
2758	isp1362_hcd->board = pdev->dev.platform_data;
2759#if USE_PLATFORM_DELAY
2760	if (!isp1362_hcd->board->delay) {
2761		dev_err(hcd->self.controller, "No platform delay function given\n");
2762		retval = -ENODEV;
2763		goto err6;
2764	}
2765#endif
2766
2767	if (irq_res->flags & IORESOURCE_IRQ_HIGHEDGE)
2768		irq_flags |= IRQF_TRIGGER_RISING;
2769	if (irq_res->flags & IORESOURCE_IRQ_LOWEDGE)
2770		irq_flags |= IRQF_TRIGGER_FALLING;
2771	if (irq_res->flags & IORESOURCE_IRQ_HIGHLEVEL)
2772		irq_flags |= IRQF_TRIGGER_HIGH;
2773	if (irq_res->flags & IORESOURCE_IRQ_LOWLEVEL)
2774		irq_flags |= IRQF_TRIGGER_LOW;
2775
2776	retval = usb_add_hcd(hcd, irq, irq_flags | IRQF_DISABLED | IRQF_SHARED);
2777	if (retval != 0)
2778		goto err6;
2779	pr_info("%s, irq %d\n", hcd->product_desc, irq);
 
 
2780
2781	create_debug_file(isp1362_hcd);
2782
2783	return 0;
2784
2785 err6:
2786	DBG(0, "%s: Freeing dev %p\n", __func__, isp1362_hcd);
2787	usb_put_hcd(hcd);
2788 err5:
2789	DBG(0, "%s: Unmapping data_reg @ %p\n", __func__, data_reg);
2790	iounmap(data_reg);
2791 err4:
2792	DBG(0, "%s: Releasing mem region %08lx\n", __func__, (long unsigned int)data->start);
2793	release_mem_region(data->start, resource_size(data));
2794 err3:
2795	DBG(0, "%s: Unmapping addr_reg @ %p\n", __func__, addr_reg);
2796	iounmap(addr_reg);
2797 err2:
2798	DBG(0, "%s: Releasing mem region %08lx\n", __func__, (long unsigned int)addr->start);
2799	release_mem_region(addr->start, resource_size(addr));
2800 err1:
2801	pr_err("%s: init error, %d\n", __func__, retval);
2802
2803	return retval;
2804}
2805
2806#ifdef	CONFIG_PM
2807static int isp1362_suspend(struct platform_device *pdev, pm_message_t state)
2808{
2809	struct usb_hcd *hcd = platform_get_drvdata(pdev);
2810	struct isp1362_hcd *isp1362_hcd = hcd_to_isp1362_hcd(hcd);
2811	unsigned long flags;
2812	int retval = 0;
2813
2814	DBG(0, "%s: Suspending device\n", __func__);
2815
2816	if (state.event == PM_EVENT_FREEZE) {
2817		DBG(0, "%s: Suspending root hub\n", __func__);
2818		retval = isp1362_bus_suspend(hcd);
2819	} else {
2820		DBG(0, "%s: Suspending RH ports\n", __func__);
2821		spin_lock_irqsave(&isp1362_hcd->lock, flags);
2822		isp1362_write_reg32(isp1362_hcd, HCRHSTATUS, RH_HS_LPS);
2823		spin_unlock_irqrestore(&isp1362_hcd->lock, flags);
2824	}
2825	if (retval == 0)
2826		pdev->dev.power.power_state = state;
2827	return retval;
2828}
2829
2830static int isp1362_resume(struct platform_device *pdev)
2831{
2832	struct usb_hcd *hcd = platform_get_drvdata(pdev);
2833	struct isp1362_hcd *isp1362_hcd = hcd_to_isp1362_hcd(hcd);
2834	unsigned long flags;
2835
2836	DBG(0, "%s: Resuming\n", __func__);
2837
2838	if (pdev->dev.power.power_state.event == PM_EVENT_SUSPEND) {
2839		DBG(0, "%s: Resume RH ports\n", __func__);
2840		spin_lock_irqsave(&isp1362_hcd->lock, flags);
2841		isp1362_write_reg32(isp1362_hcd, HCRHSTATUS, RH_HS_LPSC);
2842		spin_unlock_irqrestore(&isp1362_hcd->lock, flags);
2843		return 0;
2844	}
2845
2846	pdev->dev.power.power_state = PMSG_ON;
2847
2848	return isp1362_bus_resume(isp1362_hcd_to_hcd(isp1362_hcd));
2849}
2850#else
2851#define	isp1362_suspend	NULL
2852#define	isp1362_resume	NULL
2853#endif
2854
2855static struct platform_driver isp1362_driver = {
2856	.probe = isp1362_probe,
2857	.remove = __devexit_p(isp1362_remove),
2858
2859	.suspend = isp1362_suspend,
2860	.resume = isp1362_resume,
2861	.driver = {
2862		.name = (char *)hcd_name,
2863		.owner = THIS_MODULE,
2864	},
2865};
2866
2867/*-------------------------------------------------------------------------*/
2868
2869static int __init isp1362_init(void)
2870{
2871	if (usb_disabled())
2872		return -ENODEV;
2873	pr_info("driver %s, %s\n", hcd_name, DRIVER_VERSION);
2874	return platform_driver_register(&isp1362_driver);
2875}
2876module_init(isp1362_init);
2877
2878static void __exit isp1362_cleanup(void)
2879{
2880	platform_driver_unregister(&isp1362_driver);
2881}
2882module_exit(isp1362_cleanup);
v5.4
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 * ISP1362 HCD (Host Controller Driver) for USB.
   4 *
   5 * Copyright (C) 2005 Lothar Wassmann <LW@KARO-electronics.de>
   6 *
   7 * Derived from the SL811 HCD, rewritten for ISP116x.
   8 * Copyright (C) 2005 Olav Kongas <ok@artecdesign.ee>
   9 *
  10 * Portions:
  11 * Copyright (C) 2004 Psion Teklogix (for NetBook PRO)
  12 * Copyright (C) 2004 David Brownell
  13 */
  14
  15/*
  16 * The ISP1362 chip requires a large delay (300ns and 462ns) between
  17 * accesses to the address and data register.
  18 * The following timing options exist:
  19 *
  20 * 1. Configure your memory controller to add such delays if it can (the best)
  21 * 2. Implement platform-specific delay function possibly
  22 *    combined with configuring the memory controller; see
  23 *    include/linux/usb_isp1362.h for more info.
  24 * 3. Use ndelay (easiest, poorest).
  25 *
  26 * Use the corresponding macros USE_PLATFORM_DELAY and USE_NDELAY in the
  27 * platform specific section of isp1362.h to select the appropriate variant.
  28 *
  29 * Also note that according to the Philips "ISP1362 Errata" document
  30 * Rev 1.00 from 27 May data corruption may occur when the #WR signal
  31 * is reasserted (even with #CS deasserted) within 132ns after a
  32 * write cycle to any controller register. If the hardware doesn't
  33 * implement the recommended fix (gating the #WR with #CS) software
  34 * must ensure that no further write cycle (not necessarily to the chip!)
  35 * is issued by the CPU within this interval.
  36
  37 * For PXA25x this can be ensured by using VLIO with the maximum
  38 * recovery time (MSCx = 0x7f8c) with a memory clock of 99.53 MHz.
  39 */
  40
  41#undef ISP1362_DEBUG
 
 
 
 
  42
  43/*
  44 * The PXA255 UDC apparently doesn't handle GET_STATUS, GET_CONFIG and
  45 * GET_INTERFACE requests correctly when the SETUP and DATA stages of the
  46 * requests are carried out in separate frames. This will delay any SETUP
  47 * packets until the start of the next frame so that this situation is
  48 * unlikely to occur (and makes usbtest happy running with a PXA255 target
  49 * device).
  50 */
  51#undef BUGGY_PXA2XX_UDC_USBTEST
  52
  53#undef PTD_TRACE
  54#undef URB_TRACE
  55#undef VERBOSE
  56#undef REGISTERS
  57
  58/* This enables a memory test on the ISP1362 chip memory to make sure the
  59 * chip access timing is correct.
  60 */
  61#undef CHIP_BUFFER_TEST
  62
  63#include <linux/module.h>
  64#include <linux/moduleparam.h>
  65#include <linux/kernel.h>
  66#include <linux/delay.h>
  67#include <linux/ioport.h>
  68#include <linux/sched.h>
  69#include <linux/slab.h>
  70#include <linux/errno.h>
 
  71#include <linux/list.h>
  72#include <linux/interrupt.h>
  73#include <linux/usb.h>
  74#include <linux/usb/isp1362.h>
  75#include <linux/usb/hcd.h>
  76#include <linux/platform_device.h>
  77#include <linux/pm.h>
  78#include <linux/io.h>
  79#include <linux/bitmap.h>
  80#include <linux/prefetch.h>
  81#include <linux/debugfs.h>
  82#include <linux/seq_file.h>
  83
  84#include <asm/irq.h>
 
  85#include <asm/byteorder.h>
  86#include <asm/unaligned.h>
  87
  88static int dbg_level;
  89#ifdef ISP1362_DEBUG
  90module_param(dbg_level, int, 0644);
  91#else
  92module_param(dbg_level, int, 0);
 
  93#endif
  94
  95#include "../core/usb.h"
  96#include "isp1362.h"
  97
  98
  99#define DRIVER_VERSION	"2005-04-04"
 100#define DRIVER_DESC	"ISP1362 USB Host Controller Driver"
 101
 102MODULE_DESCRIPTION(DRIVER_DESC);
 103MODULE_LICENSE("GPL");
 104
 105static const char hcd_name[] = "isp1362-hcd";
 106
 107static void isp1362_hc_stop(struct usb_hcd *hcd);
 108static int isp1362_hc_start(struct usb_hcd *hcd);
 109
 110/*-------------------------------------------------------------------------*/
 111
 112/*
 113 * When called from the interrupthandler only isp1362_hcd->irqenb is modified,
 114 * since the interrupt handler will write isp1362_hcd->irqenb to HCuPINT upon
 115 * completion.
 116 * We don't need a 'disable' counterpart, since interrupts will be disabled
 117 * only by the interrupt handler.
 118 */
 119static inline void isp1362_enable_int(struct isp1362_hcd *isp1362_hcd, u16 mask)
 120{
 121	if ((isp1362_hcd->irqenb | mask) == isp1362_hcd->irqenb)
 122		return;
 123	if (mask & ~isp1362_hcd->irqenb)
 124		isp1362_write_reg16(isp1362_hcd, HCuPINT, mask & ~isp1362_hcd->irqenb);
 125	isp1362_hcd->irqenb |= mask;
 126	if (isp1362_hcd->irq_active)
 127		return;
 128	isp1362_write_reg16(isp1362_hcd, HCuPINTENB, isp1362_hcd->irqenb);
 129}
 130
 131/*-------------------------------------------------------------------------*/
 132
 133static inline struct isp1362_ep_queue *get_ptd_queue(struct isp1362_hcd *isp1362_hcd,
 134						     u16 offset)
 135{
 136	struct isp1362_ep_queue *epq = NULL;
 137
 138	if (offset < isp1362_hcd->istl_queue[1].buf_start)
 139		epq = &isp1362_hcd->istl_queue[0];
 140	else if (offset < isp1362_hcd->intl_queue.buf_start)
 141		epq = &isp1362_hcd->istl_queue[1];
 142	else if (offset < isp1362_hcd->atl_queue.buf_start)
 143		epq = &isp1362_hcd->intl_queue;
 144	else if (offset < isp1362_hcd->atl_queue.buf_start +
 145		   isp1362_hcd->atl_queue.buf_size)
 146		epq = &isp1362_hcd->atl_queue;
 147
 148	if (epq)
 149		DBG(1, "%s: PTD $%04x is on %s queue\n", __func__, offset, epq->name);
 150	else
 151		pr_warn("%s: invalid PTD $%04x\n", __func__, offset);
 152
 153	return epq;
 154}
 155
 156static inline int get_ptd_offset(struct isp1362_ep_queue *epq, u8 index)
 157{
 158	int offset;
 159
 160	if (index * epq->blk_size > epq->buf_size) {
 161		pr_warn("%s: Bad %s index %d(%d)\n",
 162			__func__, epq->name, index,
 163			epq->buf_size / epq->blk_size);
 164		return -EINVAL;
 165	}
 166	offset = epq->buf_start + index * epq->blk_size;
 167	DBG(3, "%s: %s PTD[%02x] # %04x\n", __func__, epq->name, index, offset);
 168
 169	return offset;
 170}
 171
 172/*-------------------------------------------------------------------------*/
 173
 174static inline u16 max_transfer_size(struct isp1362_ep_queue *epq, size_t size,
 175				    int mps)
 176{
 177	u16 xfer_size = min_t(size_t, MAX_XFER_SIZE, size);
 178
 179	xfer_size = min_t(size_t, xfer_size, epq->buf_avail * epq->blk_size - PTD_HEADER_SIZE);
 180	if (xfer_size < size && xfer_size % mps)
 181		xfer_size -= xfer_size % mps;
 182
 183	return xfer_size;
 184}
 185
 186static int claim_ptd_buffers(struct isp1362_ep_queue *epq,
 187			     struct isp1362_ep *ep, u16 len)
 188{
 189	int ptd_offset = -EINVAL;
 190	int num_ptds = ((len + PTD_HEADER_SIZE - 1) / epq->blk_size) + 1;
 191	int found;
 192
 193	BUG_ON(len > epq->buf_size);
 194
 195	if (!epq->buf_avail)
 196		return -ENOMEM;
 197
 198	if (ep->num_ptds)
 199		pr_err("%s: %s len %d/%d num_ptds %d buf_map %08lx skip_map %08lx\n", __func__,
 200		    epq->name, len, epq->blk_size, num_ptds, epq->buf_map, epq->skip_map);
 201	BUG_ON(ep->num_ptds != 0);
 202
 203	found = bitmap_find_next_zero_area(&epq->buf_map, epq->buf_count, 0,
 204						num_ptds, 0);
 205	if (found >= epq->buf_count)
 206		return -EOVERFLOW;
 207
 208	DBG(1, "%s: Found %d PTDs[%d] for %d/%d byte\n", __func__,
 209	    num_ptds, found, len, (int)(epq->blk_size - PTD_HEADER_SIZE));
 210	ptd_offset = get_ptd_offset(epq, found);
 211	WARN_ON(ptd_offset < 0);
 212	ep->ptd_offset = ptd_offset;
 213	ep->num_ptds += num_ptds;
 214	epq->buf_avail -= num_ptds;
 215	BUG_ON(epq->buf_avail > epq->buf_count);
 216	ep->ptd_index = found;
 217	bitmap_set(&epq->buf_map, found, num_ptds);
 218	DBG(1, "%s: Done %s PTD[%d] $%04x, avail %d count %d claimed %d %08lx:%08lx\n",
 219	    __func__, epq->name, ep->ptd_index, ep->ptd_offset,
 220	    epq->buf_avail, epq->buf_count, num_ptds, epq->buf_map, epq->skip_map);
 221
 222	return found;
 223}
 224
 225static inline void release_ptd_buffers(struct isp1362_ep_queue *epq, struct isp1362_ep *ep)
 226{
 227	int last = ep->ptd_index + ep->num_ptds;
 228
 229	if (last > epq->buf_count)
 230		pr_err("%s: ep %p req %d len %d %s PTD[%d] $%04x num_ptds %d buf_count %d buf_avail %d buf_map %08lx skip_map %08lx\n",
 231		    __func__, ep, ep->num_req, ep->length, epq->name, ep->ptd_index,
 232		    ep->ptd_offset, ep->num_ptds, epq->buf_count, epq->buf_avail,
 233		    epq->buf_map, epq->skip_map);
 234	BUG_ON(last > epq->buf_count);
 235
 236	bitmap_clear(&epq->buf_map, ep->ptd_index, ep->num_ptds);
 237	bitmap_set(&epq->skip_map, ep->ptd_index, ep->num_ptds);
 238	epq->buf_avail += ep->num_ptds;
 239	epq->ptd_count--;
 240
 241	BUG_ON(epq->buf_avail > epq->buf_count);
 242	BUG_ON(epq->ptd_count > epq->buf_count);
 243
 244	DBG(1, "%s: Done %s PTDs $%04x released %d avail %d count %d\n",
 245	    __func__, epq->name,
 246	    ep->ptd_offset, ep->num_ptds, epq->buf_avail, epq->buf_count);
 247	DBG(1, "%s: buf_map %08lx skip_map %08lx\n", __func__,
 248	    epq->buf_map, epq->skip_map);
 249
 250	ep->num_ptds = 0;
 251	ep->ptd_offset = -EINVAL;
 252	ep->ptd_index = -EINVAL;
 253}
 254
 255/*-------------------------------------------------------------------------*/
 256
 257/*
 258  Set up PTD's.
 259*/
 260static void prepare_ptd(struct isp1362_hcd *isp1362_hcd, struct urb *urb,
 261			struct isp1362_ep *ep, struct isp1362_ep_queue *epq,
 262			u16 fno)
 263{
 264	struct ptd *ptd;
 265	int toggle;
 266	int dir;
 267	u16 len;
 268	size_t buf_len = urb->transfer_buffer_length - urb->actual_length;
 269
 270	DBG(3, "%s: %s ep %p\n", __func__, epq->name, ep);
 271
 272	ptd = &ep->ptd;
 273
 274	ep->data = (unsigned char *)urb->transfer_buffer + urb->actual_length;
 275
 276	switch (ep->nextpid) {
 277	case USB_PID_IN:
 278		toggle = usb_gettoggle(urb->dev, ep->epnum, 0);
 279		dir = PTD_DIR_IN;
 280		if (usb_pipecontrol(urb->pipe)) {
 281			len = min_t(size_t, ep->maxpacket, buf_len);
 282		} else if (usb_pipeisoc(urb->pipe)) {
 283			len = min_t(size_t, urb->iso_frame_desc[fno].length, MAX_XFER_SIZE);
 284			ep->data = urb->transfer_buffer + urb->iso_frame_desc[fno].offset;
 285		} else
 286			len = max_transfer_size(epq, buf_len, ep->maxpacket);
 287		DBG(1, "%s: IN    len %d/%d/%d from URB\n", __func__, len, ep->maxpacket,
 288		    (int)buf_len);
 289		break;
 290	case USB_PID_OUT:
 291		toggle = usb_gettoggle(urb->dev, ep->epnum, 1);
 292		dir = PTD_DIR_OUT;
 293		if (usb_pipecontrol(urb->pipe))
 294			len = min_t(size_t, ep->maxpacket, buf_len);
 295		else if (usb_pipeisoc(urb->pipe))
 296			len = min_t(size_t, urb->iso_frame_desc[0].length, MAX_XFER_SIZE);
 297		else
 298			len = max_transfer_size(epq, buf_len, ep->maxpacket);
 299		if (len == 0)
 300			pr_info("%s: Sending ZERO packet: %d\n", __func__,
 301			     urb->transfer_flags & URB_ZERO_PACKET);
 302		DBG(1, "%s: OUT   len %d/%d/%d from URB\n", __func__, len, ep->maxpacket,
 303		    (int)buf_len);
 304		break;
 305	case USB_PID_SETUP:
 306		toggle = 0;
 307		dir = PTD_DIR_SETUP;
 308		len = sizeof(struct usb_ctrlrequest);
 309		DBG(1, "%s: SETUP len %d\n", __func__, len);
 310		ep->data = urb->setup_packet;
 311		break;
 312	case USB_PID_ACK:
 313		toggle = 1;
 314		len = 0;
 315		dir = (urb->transfer_buffer_length && usb_pipein(urb->pipe)) ?
 316			PTD_DIR_OUT : PTD_DIR_IN;
 317		DBG(1, "%s: ACK   len %d\n", __func__, len);
 318		break;
 319	default:
 320		toggle = dir = len = 0;
 321		pr_err("%s@%d: ep->nextpid %02x\n", __func__, __LINE__, ep->nextpid);
 322		BUG_ON(1);
 323	}
 324
 325	ep->length = len;
 326	if (!len)
 327		ep->data = NULL;
 328
 329	ptd->count = PTD_CC_MSK | PTD_ACTIVE_MSK | PTD_TOGGLE(toggle);
 330	ptd->mps = PTD_MPS(ep->maxpacket) | PTD_SPD(urb->dev->speed == USB_SPEED_LOW) |
 331		PTD_EP(ep->epnum);
 332	ptd->len = PTD_LEN(len) | PTD_DIR(dir);
 333	ptd->faddr = PTD_FA(usb_pipedevice(urb->pipe));
 334
 335	if (usb_pipeint(urb->pipe)) {
 336		ptd->faddr |= PTD_SF_INT(ep->branch);
 337		ptd->faddr |= PTD_PR(ep->interval ? __ffs(ep->interval) : 0);
 338	}
 339	if (usb_pipeisoc(urb->pipe))
 340		ptd->faddr |= PTD_SF_ISO(fno);
 341
 342	DBG(1, "%s: Finished\n", __func__);
 343}
 344
 345static void isp1362_write_ptd(struct isp1362_hcd *isp1362_hcd, struct isp1362_ep *ep,
 346			      struct isp1362_ep_queue *epq)
 347{
 348	struct ptd *ptd = &ep->ptd;
 349	int len = PTD_GET_DIR(ptd) == PTD_DIR_IN ? 0 : ep->length;
 350
 
 
 351	prefetch(ptd);
 352	isp1362_write_buffer(isp1362_hcd, ptd, ep->ptd_offset, PTD_HEADER_SIZE);
 353	if (len)
 354		isp1362_write_buffer(isp1362_hcd, ep->data,
 355				     ep->ptd_offset + PTD_HEADER_SIZE, len);
 356
 357	dump_ptd(ptd);
 358	dump_ptd_out_data(ptd, ep->data);
 359}
 360
 361static void isp1362_read_ptd(struct isp1362_hcd *isp1362_hcd, struct isp1362_ep *ep,
 362			     struct isp1362_ep_queue *epq)
 363{
 364	struct ptd *ptd = &ep->ptd;
 365	int act_len;
 366
 367	WARN_ON(list_empty(&ep->active));
 368	BUG_ON(ep->ptd_offset < 0);
 369
 370	list_del_init(&ep->active);
 371	DBG(1, "%s: ep %p removed from active list %p\n", __func__, ep, &epq->active);
 372
 373	prefetchw(ptd);
 374	isp1362_read_buffer(isp1362_hcd, ptd, ep->ptd_offset, PTD_HEADER_SIZE);
 375	dump_ptd(ptd);
 376	act_len = PTD_GET_COUNT(ptd);
 377	if (PTD_GET_DIR(ptd) != PTD_DIR_IN || act_len == 0)
 378		return;
 379	if (act_len > ep->length)
 380		pr_err("%s: ep %p PTD $%04x act_len %d ep->length %d\n", __func__, ep,
 381			 ep->ptd_offset, act_len, ep->length);
 382	BUG_ON(act_len > ep->length);
 383	/* Only transfer the amount of data that has actually been overwritten
 384	 * in the chip buffer. We don't want any data that doesn't belong to the
 385	 * transfer to leak out of the chip to the callers transfer buffer!
 386	 */
 387	prefetchw(ep->data);
 388	isp1362_read_buffer(isp1362_hcd, ep->data,
 389			    ep->ptd_offset + PTD_HEADER_SIZE, act_len);
 390	dump_ptd_in_data(ptd, ep->data);
 391}
 392
 393/*
 394 * INT PTDs will stay in the chip until data is available.
 395 * This function will remove a PTD from the chip when the URB is dequeued.
 396 * Must be called with the spinlock held and IRQs disabled
 397 */
 398static void remove_ptd(struct isp1362_hcd *isp1362_hcd, struct isp1362_ep *ep)
 399
 400{
 401	int index;
 402	struct isp1362_ep_queue *epq;
 403
 404	DBG(1, "%s: ep %p PTD[%d] $%04x\n", __func__, ep, ep->ptd_index, ep->ptd_offset);
 405	BUG_ON(ep->ptd_offset < 0);
 406
 407	epq = get_ptd_queue(isp1362_hcd, ep->ptd_offset);
 408	BUG_ON(!epq);
 409
 410	/* put ep in remove_list for cleanup */
 411	WARN_ON(!list_empty(&ep->remove_list));
 412	list_add_tail(&ep->remove_list, &isp1362_hcd->remove_list);
 413	/* let SOF interrupt handle the cleanup */
 414	isp1362_enable_int(isp1362_hcd, HCuPINT_SOF);
 415
 416	index = ep->ptd_index;
 417	if (index < 0)
 418		/* ISO queues don't have SKIP registers */
 419		return;
 420
 421	DBG(1, "%s: Disabling PTD[%02x] $%04x %08lx|%08x\n", __func__,
 422	    index, ep->ptd_offset, epq->skip_map, 1 << index);
 423
 424	/* prevent further processing of PTD (will be effective after next SOF) */
 425	epq->skip_map |= 1 << index;
 426	if (epq == &isp1362_hcd->atl_queue) {
 427		DBG(2, "%s: ATLSKIP = %08x -> %08lx\n", __func__,
 428		    isp1362_read_reg32(isp1362_hcd, HCATLSKIP), epq->skip_map);
 429		isp1362_write_reg32(isp1362_hcd, HCATLSKIP, epq->skip_map);
 430		if (~epq->skip_map == 0)
 431			isp1362_clr_mask16(isp1362_hcd, HCBUFSTAT, HCBUFSTAT_ATL_ACTIVE);
 432	} else if (epq == &isp1362_hcd->intl_queue) {
 433		DBG(2, "%s: INTLSKIP = %08x -> %08lx\n", __func__,
 434		    isp1362_read_reg32(isp1362_hcd, HCINTLSKIP), epq->skip_map);
 435		isp1362_write_reg32(isp1362_hcd, HCINTLSKIP, epq->skip_map);
 436		if (~epq->skip_map == 0)
 437			isp1362_clr_mask16(isp1362_hcd, HCBUFSTAT, HCBUFSTAT_INTL_ACTIVE);
 438	}
 439}
 440
 441/*
 442  Take done or failed requests out of schedule. Give back
 443  processed urbs.
 444*/
 445static void finish_request(struct isp1362_hcd *isp1362_hcd, struct isp1362_ep *ep,
 446			   struct urb *urb, int status)
 447     __releases(isp1362_hcd->lock)
 448     __acquires(isp1362_hcd->lock)
 449{
 450	urb->hcpriv = NULL;
 451	ep->error_count = 0;
 452
 453	if (usb_pipecontrol(urb->pipe))
 454		ep->nextpid = USB_PID_SETUP;
 455
 456	URB_DBG("%s: req %d FA %d ep%d%s %s: len %d/%d %s stat %d\n", __func__,
 457		ep->num_req, usb_pipedevice(urb->pipe),
 458		usb_pipeendpoint(urb->pipe),
 459		!usb_pipein(urb->pipe) ? "out" : "in",
 460		usb_pipecontrol(urb->pipe) ? "ctrl" :
 461			usb_pipeint(urb->pipe) ? "int" :
 462			usb_pipebulk(urb->pipe) ? "bulk" :
 463			"iso",
 464		urb->actual_length, urb->transfer_buffer_length,
 465		!(urb->transfer_flags & URB_SHORT_NOT_OK) ?
 466		"short_ok" : "", urb->status);
 467
 468
 469	usb_hcd_unlink_urb_from_ep(isp1362_hcd_to_hcd(isp1362_hcd), urb);
 470	spin_unlock(&isp1362_hcd->lock);
 471	usb_hcd_giveback_urb(isp1362_hcd_to_hcd(isp1362_hcd), urb, status);
 472	spin_lock(&isp1362_hcd->lock);
 473
 474	/* take idle endpoints out of the schedule right away */
 475	if (!list_empty(&ep->hep->urb_list))
 476		return;
 477
 478	/* async deschedule */
 479	if (!list_empty(&ep->schedule)) {
 480		list_del_init(&ep->schedule);
 481		return;
 482	}
 483
 484
 485	if (ep->interval) {
 486		/* periodic deschedule */
 487		DBG(1, "deschedule qh%d/%p branch %d load %d bandwidth %d -> %d\n", ep->interval,
 488		    ep, ep->branch, ep->load,
 489		    isp1362_hcd->load[ep->branch],
 490		    isp1362_hcd->load[ep->branch] - ep->load);
 491		isp1362_hcd->load[ep->branch] -= ep->load;
 492		ep->branch = PERIODIC_SIZE;
 493	}
 494}
 495
 496/*
 497 * Analyze transfer results, handle partial transfers and errors
 498*/
 499static void postproc_ep(struct isp1362_hcd *isp1362_hcd, struct isp1362_ep *ep)
 500{
 501	struct urb *urb = get_urb(ep);
 502	struct usb_device *udev;
 503	struct ptd *ptd;
 504	int short_ok;
 505	u16 len;
 506	int urbstat = -EINPROGRESS;
 507	u8 cc;
 508
 509	DBG(2, "%s: ep %p req %d\n", __func__, ep, ep->num_req);
 510
 511	udev = urb->dev;
 512	ptd = &ep->ptd;
 513	cc = PTD_GET_CC(ptd);
 514	if (cc == PTD_NOTACCESSED) {
 515		pr_err("%s: req %d PTD %p Untouched by ISP1362\n", __func__,
 516		    ep->num_req, ptd);
 517		cc = PTD_DEVNOTRESP;
 518	}
 519
 520	short_ok = !(urb->transfer_flags & URB_SHORT_NOT_OK);
 521	len = urb->transfer_buffer_length - urb->actual_length;
 522
 523	/* Data underrun is special. For allowed underrun
 524	   we clear the error and continue as normal. For
 525	   forbidden underrun we finish the DATA stage
 526	   immediately while for control transfer,
 527	   we do a STATUS stage.
 528	*/
 529	if (cc == PTD_DATAUNDERRUN) {
 530		if (short_ok) {
 531			DBG(1, "%s: req %d Allowed data underrun short_%sok %d/%d/%d byte\n",
 532			    __func__, ep->num_req, short_ok ? "" : "not_",
 533			    PTD_GET_COUNT(ptd), ep->maxpacket, len);
 534			cc = PTD_CC_NOERROR;
 535			urbstat = 0;
 536		} else {
 537			DBG(1, "%s: req %d Data Underrun %s nextpid %02x short_%sok %d/%d/%d byte\n",
 538			    __func__, ep->num_req,
 539			    usb_pipein(urb->pipe) ? "IN" : "OUT", ep->nextpid,
 540			    short_ok ? "" : "not_",
 541			    PTD_GET_COUNT(ptd), ep->maxpacket, len);
 542			/* save the data underrun error code for later and
 543			 * proceed with the status stage
 544			 */
 545			urb->actual_length += PTD_GET_COUNT(ptd);
 546			if (usb_pipecontrol(urb->pipe)) {
 547				ep->nextpid = USB_PID_ACK;
 
 
 
 
 548				BUG_ON(urb->actual_length > urb->transfer_buffer_length);
 549
 550				if (urb->status == -EINPROGRESS)
 551					urb->status = cc_to_error[PTD_DATAUNDERRUN];
 552			} else {
 553				usb_settoggle(udev, ep->epnum, ep->nextpid == USB_PID_OUT,
 554					      PTD_GET_TOGGLE(ptd));
 555				urbstat = cc_to_error[PTD_DATAUNDERRUN];
 556			}
 557			goto out;
 558		}
 559	}
 560
 561	if (cc != PTD_CC_NOERROR) {
 562		if (++ep->error_count >= 3 || cc == PTD_CC_STALL || cc == PTD_DATAOVERRUN) {
 563			urbstat = cc_to_error[cc];
 564			DBG(1, "%s: req %d nextpid %02x, status %d, error %d, error_count %d\n",
 565			    __func__, ep->num_req, ep->nextpid, urbstat, cc,
 566			    ep->error_count);
 567		}
 568		goto out;
 569	}
 570
 571	switch (ep->nextpid) {
 572	case USB_PID_OUT:
 573		if (PTD_GET_COUNT(ptd) != ep->length)
 574			pr_err("%s: count=%d len=%d\n", __func__,
 575			   PTD_GET_COUNT(ptd), ep->length);
 576		BUG_ON(PTD_GET_COUNT(ptd) != ep->length);
 577		urb->actual_length += ep->length;
 578		BUG_ON(urb->actual_length > urb->transfer_buffer_length);
 579		usb_settoggle(udev, ep->epnum, 1, PTD_GET_TOGGLE(ptd));
 580		if (urb->actual_length == urb->transfer_buffer_length) {
 581			DBG(3, "%s: req %d xfer complete %d/%d status %d -> 0\n", __func__,
 582			    ep->num_req, len, ep->maxpacket, urbstat);
 583			if (usb_pipecontrol(urb->pipe)) {
 584				DBG(3, "%s: req %d %s Wait for ACK\n", __func__,
 585				    ep->num_req,
 586				    usb_pipein(urb->pipe) ? "IN" : "OUT");
 587				ep->nextpid = USB_PID_ACK;
 588			} else {
 589				if (len % ep->maxpacket ||
 590				    !(urb->transfer_flags & URB_ZERO_PACKET)) {
 591					urbstat = 0;
 592					DBG(3, "%s: req %d URB %s status %d count %d/%d/%d\n",
 593					    __func__, ep->num_req, usb_pipein(urb->pipe) ? "IN" : "OUT",
 594					    urbstat, len, ep->maxpacket, urb->actual_length);
 595				}
 596			}
 597		}
 598		break;
 599	case USB_PID_IN:
 600		len = PTD_GET_COUNT(ptd);
 601		BUG_ON(len > ep->length);
 602		urb->actual_length += len;
 603		BUG_ON(urb->actual_length > urb->transfer_buffer_length);
 604		usb_settoggle(udev, ep->epnum, 0, PTD_GET_TOGGLE(ptd));
 605		/* if transfer completed or (allowed) data underrun */
 606		if ((urb->transfer_buffer_length == urb->actual_length) ||
 607		    len % ep->maxpacket) {
 608			DBG(3, "%s: req %d xfer complete %d/%d status %d -> 0\n", __func__,
 609			    ep->num_req, len, ep->maxpacket, urbstat);
 610			if (usb_pipecontrol(urb->pipe)) {
 611				DBG(3, "%s: req %d %s Wait for ACK\n", __func__,
 612				    ep->num_req,
 613				    usb_pipein(urb->pipe) ? "IN" : "OUT");
 614				ep->nextpid = USB_PID_ACK;
 615			} else {
 616				urbstat = 0;
 617				DBG(3, "%s: req %d URB %s status %d count %d/%d/%d\n",
 618				    __func__, ep->num_req, usb_pipein(urb->pipe) ? "IN" : "OUT",
 619				    urbstat, len, ep->maxpacket, urb->actual_length);
 620			}
 621		}
 622		break;
 623	case USB_PID_SETUP:
 624		if (urb->transfer_buffer_length == urb->actual_length) {
 625			ep->nextpid = USB_PID_ACK;
 626		} else if (usb_pipeout(urb->pipe)) {
 627			usb_settoggle(udev, 0, 1, 1);
 628			ep->nextpid = USB_PID_OUT;
 629		} else {
 630			usb_settoggle(udev, 0, 0, 1);
 631			ep->nextpid = USB_PID_IN;
 632		}
 633		break;
 634	case USB_PID_ACK:
 635		DBG(3, "%s: req %d got ACK %d -> 0\n", __func__, ep->num_req,
 636		    urbstat);
 637		WARN_ON(urbstat != -EINPROGRESS);
 638		urbstat = 0;
 639		ep->nextpid = 0;
 640		break;
 641	default:
 642		BUG_ON(1);
 643	}
 644
 645 out:
 646	if (urbstat != -EINPROGRESS) {
 647		DBG(2, "%s: Finishing ep %p req %d urb %p status %d\n", __func__,
 648		    ep, ep->num_req, urb, urbstat);
 649		finish_request(isp1362_hcd, ep, urb, urbstat);
 650	}
 651}
 652
 653static void finish_unlinks(struct isp1362_hcd *isp1362_hcd)
 654{
 655	struct isp1362_ep *ep;
 656	struct isp1362_ep *tmp;
 657
 658	list_for_each_entry_safe(ep, tmp, &isp1362_hcd->remove_list, remove_list) {
 659		struct isp1362_ep_queue *epq =
 660			get_ptd_queue(isp1362_hcd, ep->ptd_offset);
 661		int index = ep->ptd_index;
 662
 663		BUG_ON(epq == NULL);
 664		if (index >= 0) {
 665			DBG(1, "%s: remove PTD[%d] $%04x\n", __func__, index, ep->ptd_offset);
 666			BUG_ON(ep->num_ptds == 0);
 667			release_ptd_buffers(epq, ep);
 668		}
 669		if (!list_empty(&ep->hep->urb_list)) {
 670			struct urb *urb = get_urb(ep);
 671
 672			DBG(1, "%s: Finishing req %d ep %p from remove_list\n", __func__,
 673			    ep->num_req, ep);
 674			finish_request(isp1362_hcd, ep, urb, -ESHUTDOWN);
 675		}
 676		WARN_ON(list_empty(&ep->active));
 677		if (!list_empty(&ep->active)) {
 678			list_del_init(&ep->active);
 679			DBG(1, "%s: ep %p removed from active list\n", __func__, ep);
 680		}
 681		list_del_init(&ep->remove_list);
 682		DBG(1, "%s: ep %p removed from remove_list\n", __func__, ep);
 683	}
 684	DBG(1, "%s: Done\n", __func__);
 685}
 686
 687static inline void enable_atl_transfers(struct isp1362_hcd *isp1362_hcd, int count)
 688{
 689	if (count > 0) {
 690		if (count < isp1362_hcd->atl_queue.ptd_count)
 691			isp1362_write_reg16(isp1362_hcd, HCATLDTC, count);
 692		isp1362_enable_int(isp1362_hcd, HCuPINT_ATL);
 693		isp1362_write_reg32(isp1362_hcd, HCATLSKIP, isp1362_hcd->atl_queue.skip_map);
 694		isp1362_set_mask16(isp1362_hcd, HCBUFSTAT, HCBUFSTAT_ATL_ACTIVE);
 695	} else
 696		isp1362_enable_int(isp1362_hcd, HCuPINT_SOF);
 697}
 698
 699static inline void enable_intl_transfers(struct isp1362_hcd *isp1362_hcd)
 700{
 701	isp1362_enable_int(isp1362_hcd, HCuPINT_INTL);
 702	isp1362_set_mask16(isp1362_hcd, HCBUFSTAT, HCBUFSTAT_INTL_ACTIVE);
 703	isp1362_write_reg32(isp1362_hcd, HCINTLSKIP, isp1362_hcd->intl_queue.skip_map);
 704}
 705
 706static inline void enable_istl_transfers(struct isp1362_hcd *isp1362_hcd, int flip)
 707{
 708	isp1362_enable_int(isp1362_hcd, flip ? HCuPINT_ISTL1 : HCuPINT_ISTL0);
 709	isp1362_set_mask16(isp1362_hcd, HCBUFSTAT, flip ?
 710			   HCBUFSTAT_ISTL1_FULL : HCBUFSTAT_ISTL0_FULL);
 711}
 712
 713static int submit_req(struct isp1362_hcd *isp1362_hcd, struct urb *urb,
 714		      struct isp1362_ep *ep, struct isp1362_ep_queue *epq)
 715{
 716	int index;
 717
 718	prepare_ptd(isp1362_hcd, urb, ep, epq, 0);
 719	index = claim_ptd_buffers(epq, ep, ep->length);
 720	if (index == -ENOMEM) {
 721		DBG(1, "%s: req %d No free %s PTD available: %d, %08lx:%08lx\n", __func__,
 722		    ep->num_req, epq->name, ep->num_ptds, epq->buf_map, epq->skip_map);
 723		return index;
 724	} else if (index == -EOVERFLOW) {
 725		DBG(1, "%s: req %d Not enough space for %d byte %s PTD %d %08lx:%08lx\n",
 726		    __func__, ep->num_req, ep->length, epq->name, ep->num_ptds,
 727		    epq->buf_map, epq->skip_map);
 728		return index;
 729	} else
 730		BUG_ON(index < 0);
 731	list_add_tail(&ep->active, &epq->active);
 732	DBG(1, "%s: ep %p req %d len %d added to active list %p\n", __func__,
 733	    ep, ep->num_req, ep->length, &epq->active);
 734	DBG(1, "%s: Submitting %s PTD $%04x for ep %p req %d\n", __func__, epq->name,
 735	    ep->ptd_offset, ep, ep->num_req);
 736	isp1362_write_ptd(isp1362_hcd, ep, epq);
 737	__clear_bit(ep->ptd_index, &epq->skip_map);
 738
 739	return 0;
 740}
 741
 742static void start_atl_transfers(struct isp1362_hcd *isp1362_hcd)
 743{
 744	int ptd_count = 0;
 745	struct isp1362_ep_queue *epq = &isp1362_hcd->atl_queue;
 746	struct isp1362_ep *ep;
 747	int defer = 0;
 748
 749	if (atomic_read(&epq->finishing)) {
 750		DBG(1, "%s: finish_transfers is active for %s\n", __func__, epq->name);
 751		return;
 752	}
 753
 754	list_for_each_entry(ep, &isp1362_hcd->async, schedule) {
 755		struct urb *urb = get_urb(ep);
 756		int ret;
 757
 758		if (!list_empty(&ep->active)) {
 759			DBG(2, "%s: Skipping active %s ep %p\n", __func__, epq->name, ep);
 760			continue;
 761		}
 762
 763		DBG(1, "%s: Processing %s ep %p req %d\n", __func__, epq->name,
 764		    ep, ep->num_req);
 765
 766		ret = submit_req(isp1362_hcd, urb, ep, epq);
 767		if (ret == -ENOMEM) {
 768			defer = 1;
 769			break;
 770		} else if (ret == -EOVERFLOW) {
 771			defer = 1;
 772			continue;
 773		}
 774#ifdef BUGGY_PXA2XX_UDC_USBTEST
 775		defer = ep->nextpid == USB_PID_SETUP;
 776#endif
 777		ptd_count++;
 778	}
 779
 780	/* Avoid starving of endpoints */
 781	if (isp1362_hcd->async.next != isp1362_hcd->async.prev) {
 782		DBG(2, "%s: Cycling ASYNC schedule %d\n", __func__, ptd_count);
 783		list_move(&isp1362_hcd->async, isp1362_hcd->async.next);
 784	}
 785	if (ptd_count || defer)
 786		enable_atl_transfers(isp1362_hcd, defer ? 0 : ptd_count);
 787
 788	epq->ptd_count += ptd_count;
 789	if (epq->ptd_count > epq->stat_maxptds) {
 790		epq->stat_maxptds = epq->ptd_count;
 791		DBG(0, "%s: max_ptds: %d\n", __func__, epq->stat_maxptds);
 792	}
 793}
 794
 795static void start_intl_transfers(struct isp1362_hcd *isp1362_hcd)
 796{
 797	int ptd_count = 0;
 798	struct isp1362_ep_queue *epq = &isp1362_hcd->intl_queue;
 799	struct isp1362_ep *ep;
 800
 801	if (atomic_read(&epq->finishing)) {
 802		DBG(1, "%s: finish_transfers is active for %s\n", __func__, epq->name);
 803		return;
 804	}
 805
 806	list_for_each_entry(ep, &isp1362_hcd->periodic, schedule) {
 807		struct urb *urb = get_urb(ep);
 808		int ret;
 809
 810		if (!list_empty(&ep->active)) {
 811			DBG(1, "%s: Skipping active %s ep %p\n", __func__,
 812			    epq->name, ep);
 813			continue;
 814		}
 815
 816		DBG(1, "%s: Processing %s ep %p req %d\n", __func__,
 817		    epq->name, ep, ep->num_req);
 818		ret = submit_req(isp1362_hcd, urb, ep, epq);
 819		if (ret == -ENOMEM)
 820			break;
 821		else if (ret == -EOVERFLOW)
 822			continue;
 823		ptd_count++;
 824	}
 825
 826	if (ptd_count) {
 827		static int last_count;
 828
 829		if (ptd_count != last_count) {
 830			DBG(0, "%s: ptd_count: %d\n", __func__, ptd_count);
 831			last_count = ptd_count;
 832		}
 833		enable_intl_transfers(isp1362_hcd);
 834	}
 835
 836	epq->ptd_count += ptd_count;
 837	if (epq->ptd_count > epq->stat_maxptds)
 838		epq->stat_maxptds = epq->ptd_count;
 839}
 840
 841static inline int next_ptd(struct isp1362_ep_queue *epq, struct isp1362_ep *ep)
 842{
 843	u16 ptd_offset = ep->ptd_offset;
 844	int num_ptds = (ep->length + PTD_HEADER_SIZE + (epq->blk_size - 1)) / epq->blk_size;
 845
 846	DBG(2, "%s: PTD offset $%04x + %04x => %d * %04x -> $%04x\n", __func__, ptd_offset,
 847	    ep->length, num_ptds, epq->blk_size, ptd_offset + num_ptds * epq->blk_size);
 848
 849	ptd_offset += num_ptds * epq->blk_size;
 850	if (ptd_offset < epq->buf_start + epq->buf_size)
 851		return ptd_offset;
 852	else
 853		return -ENOMEM;
 854}
 855
 856static void start_iso_transfers(struct isp1362_hcd *isp1362_hcd)
 857{
 858	int ptd_count = 0;
 859	int flip = isp1362_hcd->istl_flip;
 860	struct isp1362_ep_queue *epq;
 861	int ptd_offset;
 862	struct isp1362_ep *ep;
 863	struct isp1362_ep *tmp;
 864	u16 fno = isp1362_read_reg32(isp1362_hcd, HCFMNUM);
 865
 866 fill2:
 867	epq = &isp1362_hcd->istl_queue[flip];
 868	if (atomic_read(&epq->finishing)) {
 869		DBG(1, "%s: finish_transfers is active for %s\n", __func__, epq->name);
 870		return;
 871	}
 872
 873	if (!list_empty(&epq->active))
 874		return;
 875
 876	ptd_offset = epq->buf_start;
 877	list_for_each_entry_safe(ep, tmp, &isp1362_hcd->isoc, schedule) {
 878		struct urb *urb = get_urb(ep);
 879		s16 diff = fno - (u16)urb->start_frame;
 880
 881		DBG(1, "%s: Processing %s ep %p\n", __func__, epq->name, ep);
 882
 883		if (diff > urb->number_of_packets) {
 884			/* time frame for this URB has elapsed */
 885			finish_request(isp1362_hcd, ep, urb, -EOVERFLOW);
 886			continue;
 887		} else if (diff < -1) {
 888			/* URB is not due in this frame or the next one.
 889			 * Comparing with '-1' instead of '0' accounts for double
 890			 * buffering in the ISP1362 which enables us to queue the PTD
 891			 * one frame ahead of time
 892			 */
 893		} else if (diff == -1) {
 894			/* submit PTD's that are due in the next frame */
 895			prepare_ptd(isp1362_hcd, urb, ep, epq, fno);
 896			if (ptd_offset + PTD_HEADER_SIZE + ep->length >
 897			    epq->buf_start + epq->buf_size) {
 898				pr_err("%s: Not enough ISO buffer space for %d byte PTD\n",
 899				    __func__, ep->length);
 900				continue;
 901			}
 902			ep->ptd_offset = ptd_offset;
 903			list_add_tail(&ep->active, &epq->active);
 904
 905			ptd_offset = next_ptd(epq, ep);
 906			if (ptd_offset < 0) {
 907				pr_warn("%s: req %d No more %s PTD buffers available\n",
 908					__func__, ep->num_req, epq->name);
 909				break;
 910			}
 911		}
 912	}
 913	list_for_each_entry(ep, &epq->active, active) {
 914		if (epq->active.next == &ep->active)
 915			ep->ptd.mps |= PTD_LAST_MSK;
 916		isp1362_write_ptd(isp1362_hcd, ep, epq);
 917		ptd_count++;
 918	}
 919
 920	if (ptd_count)
 921		enable_istl_transfers(isp1362_hcd, flip);
 922
 923	epq->ptd_count += ptd_count;
 924	if (epq->ptd_count > epq->stat_maxptds)
 925		epq->stat_maxptds = epq->ptd_count;
 926
 927	/* check, whether the second ISTL buffer may also be filled */
 928	if (!(isp1362_read_reg16(isp1362_hcd, HCBUFSTAT) &
 929	      (flip ? HCBUFSTAT_ISTL0_FULL : HCBUFSTAT_ISTL1_FULL))) {
 930		fno++;
 931		ptd_count = 0;
 932		flip = 1 - flip;
 933		goto fill2;
 934	}
 935}
 936
 937static void finish_transfers(struct isp1362_hcd *isp1362_hcd, unsigned long done_map,
 938			     struct isp1362_ep_queue *epq)
 939{
 940	struct isp1362_ep *ep;
 941	struct isp1362_ep *tmp;
 942
 943	if (list_empty(&epq->active)) {
 944		DBG(1, "%s: Nothing to do for %s queue\n", __func__, epq->name);
 945		return;
 946	}
 947
 948	DBG(1, "%s: Finishing %s transfers %08lx\n", __func__, epq->name, done_map);
 949
 950	atomic_inc(&epq->finishing);
 951	list_for_each_entry_safe(ep, tmp, &epq->active, active) {
 952		int index = ep->ptd_index;
 953
 954		DBG(1, "%s: Checking %s PTD[%02x] $%04x\n", __func__, epq->name,
 955		    index, ep->ptd_offset);
 956
 957		BUG_ON(index < 0);
 958		if (__test_and_clear_bit(index, &done_map)) {
 959			isp1362_read_ptd(isp1362_hcd, ep, epq);
 960			epq->free_ptd = index;
 961			BUG_ON(ep->num_ptds == 0);
 962			release_ptd_buffers(epq, ep);
 963
 964			DBG(1, "%s: ep %p req %d removed from active list\n", __func__,
 965			    ep, ep->num_req);
 966			if (!list_empty(&ep->remove_list)) {
 967				list_del_init(&ep->remove_list);
 968				DBG(1, "%s: ep %p removed from remove list\n", __func__, ep);
 969			}
 970			DBG(1, "%s: Postprocessing %s ep %p req %d\n", __func__, epq->name,
 971			    ep, ep->num_req);
 972			postproc_ep(isp1362_hcd, ep);
 973		}
 974		if (!done_map)
 975			break;
 976	}
 977	if (done_map)
 978		pr_warn("%s: done_map not clear: %08lx:%08lx\n",
 979			__func__, done_map, epq->skip_map);
 980	atomic_dec(&epq->finishing);
 981}
 982
 983static void finish_iso_transfers(struct isp1362_hcd *isp1362_hcd, struct isp1362_ep_queue *epq)
 984{
 985	struct isp1362_ep *ep;
 986	struct isp1362_ep *tmp;
 987
 988	if (list_empty(&epq->active)) {
 989		DBG(1, "%s: Nothing to do for %s queue\n", __func__, epq->name);
 990		return;
 991	}
 992
 993	DBG(1, "%s: Finishing %s transfers\n", __func__, epq->name);
 994
 995	atomic_inc(&epq->finishing);
 996	list_for_each_entry_safe(ep, tmp, &epq->active, active) {
 997		DBG(1, "%s: Checking PTD $%04x\n", __func__, ep->ptd_offset);
 998
 999		isp1362_read_ptd(isp1362_hcd, ep, epq);
1000		DBG(1, "%s: Postprocessing %s ep %p\n", __func__, epq->name, ep);
1001		postproc_ep(isp1362_hcd, ep);
1002	}
1003	WARN_ON(epq->blk_size != 0);
1004	atomic_dec(&epq->finishing);
1005}
1006
1007static irqreturn_t isp1362_irq(struct usb_hcd *hcd)
1008{
1009	int handled = 0;
1010	struct isp1362_hcd *isp1362_hcd = hcd_to_isp1362_hcd(hcd);
1011	u16 irqstat;
1012	u16 svc_mask;
1013
1014	spin_lock(&isp1362_hcd->lock);
1015
1016	BUG_ON(isp1362_hcd->irq_active++);
1017
1018	isp1362_write_reg16(isp1362_hcd, HCuPINTENB, 0);
1019
1020	irqstat = isp1362_read_reg16(isp1362_hcd, HCuPINT);
1021	DBG(3, "%s: got IRQ %04x:%04x\n", __func__, irqstat, isp1362_hcd->irqenb);
1022
1023	/* only handle interrupts that are currently enabled */
1024	irqstat &= isp1362_hcd->irqenb;
1025	isp1362_write_reg16(isp1362_hcd, HCuPINT, irqstat);
1026	svc_mask = irqstat;
1027
1028	if (irqstat & HCuPINT_SOF) {
1029		isp1362_hcd->irqenb &= ~HCuPINT_SOF;
1030		isp1362_hcd->irq_stat[ISP1362_INT_SOF]++;
1031		handled = 1;
1032		svc_mask &= ~HCuPINT_SOF;
1033		DBG(3, "%s: SOF\n", __func__);
1034		isp1362_hcd->fmindex = isp1362_read_reg32(isp1362_hcd, HCFMNUM);
1035		if (!list_empty(&isp1362_hcd->remove_list))
1036			finish_unlinks(isp1362_hcd);
1037		if (!list_empty(&isp1362_hcd->async) && !(irqstat & HCuPINT_ATL)) {
1038			if (list_empty(&isp1362_hcd->atl_queue.active)) {
1039				start_atl_transfers(isp1362_hcd);
1040			} else {
1041				isp1362_enable_int(isp1362_hcd, HCuPINT_ATL);
1042				isp1362_write_reg32(isp1362_hcd, HCATLSKIP,
1043						    isp1362_hcd->atl_queue.skip_map);
1044				isp1362_set_mask16(isp1362_hcd, HCBUFSTAT, HCBUFSTAT_ATL_ACTIVE);
1045			}
1046		}
1047	}
1048
1049	if (irqstat & HCuPINT_ISTL0) {
1050		isp1362_hcd->irq_stat[ISP1362_INT_ISTL0]++;
1051		handled = 1;
1052		svc_mask &= ~HCuPINT_ISTL0;
1053		isp1362_clr_mask16(isp1362_hcd, HCBUFSTAT, HCBUFSTAT_ISTL0_FULL);
1054		DBG(1, "%s: ISTL0\n", __func__);
1055		WARN_ON((int)!!isp1362_hcd->istl_flip);
1056		WARN_ON(isp1362_read_reg16(isp1362_hcd, HCBUFSTAT) &
1057			HCBUFSTAT_ISTL0_ACTIVE);
1058		WARN_ON(!(isp1362_read_reg16(isp1362_hcd, HCBUFSTAT) &
1059			HCBUFSTAT_ISTL0_DONE));
1060		isp1362_hcd->irqenb &= ~HCuPINT_ISTL0;
1061	}
1062
1063	if (irqstat & HCuPINT_ISTL1) {
1064		isp1362_hcd->irq_stat[ISP1362_INT_ISTL1]++;
1065		handled = 1;
1066		svc_mask &= ~HCuPINT_ISTL1;
1067		isp1362_clr_mask16(isp1362_hcd, HCBUFSTAT, HCBUFSTAT_ISTL1_FULL);
1068		DBG(1, "%s: ISTL1\n", __func__);
1069		WARN_ON(!(int)isp1362_hcd->istl_flip);
1070		WARN_ON(isp1362_read_reg16(isp1362_hcd, HCBUFSTAT) &
1071			HCBUFSTAT_ISTL1_ACTIVE);
1072		WARN_ON(!(isp1362_read_reg16(isp1362_hcd, HCBUFSTAT) &
1073			HCBUFSTAT_ISTL1_DONE));
1074		isp1362_hcd->irqenb &= ~HCuPINT_ISTL1;
1075	}
1076
1077	if (irqstat & (HCuPINT_ISTL0 | HCuPINT_ISTL1)) {
1078		WARN_ON((irqstat & (HCuPINT_ISTL0 | HCuPINT_ISTL1)) ==
1079			(HCuPINT_ISTL0 | HCuPINT_ISTL1));
1080		finish_iso_transfers(isp1362_hcd,
1081				     &isp1362_hcd->istl_queue[isp1362_hcd->istl_flip]);
1082		start_iso_transfers(isp1362_hcd);
1083		isp1362_hcd->istl_flip = 1 - isp1362_hcd->istl_flip;
1084	}
1085
1086	if (irqstat & HCuPINT_INTL) {
1087		u32 done_map = isp1362_read_reg32(isp1362_hcd, HCINTLDONE);
1088		u32 skip_map = isp1362_read_reg32(isp1362_hcd, HCINTLSKIP);
1089		isp1362_hcd->irq_stat[ISP1362_INT_INTL]++;
1090
1091		DBG(2, "%s: INTL\n", __func__);
1092
1093		svc_mask &= ~HCuPINT_INTL;
1094
1095		isp1362_write_reg32(isp1362_hcd, HCINTLSKIP, skip_map | done_map);
1096		if (~(done_map | skip_map) == 0)
1097			/* All PTDs are finished, disable INTL processing entirely */
1098			isp1362_clr_mask16(isp1362_hcd, HCBUFSTAT, HCBUFSTAT_INTL_ACTIVE);
1099
1100		handled = 1;
1101		WARN_ON(!done_map);
1102		if (done_map) {
1103			DBG(3, "%s: INTL done_map %08x\n", __func__, done_map);
1104			finish_transfers(isp1362_hcd, done_map, &isp1362_hcd->intl_queue);
1105			start_intl_transfers(isp1362_hcd);
1106		}
1107	}
1108
1109	if (irqstat & HCuPINT_ATL) {
1110		u32 done_map = isp1362_read_reg32(isp1362_hcd, HCATLDONE);
1111		u32 skip_map = isp1362_read_reg32(isp1362_hcd, HCATLSKIP);
1112		isp1362_hcd->irq_stat[ISP1362_INT_ATL]++;
1113
1114		DBG(2, "%s: ATL\n", __func__);
1115
1116		svc_mask &= ~HCuPINT_ATL;
1117
1118		isp1362_write_reg32(isp1362_hcd, HCATLSKIP, skip_map | done_map);
1119		if (~(done_map | skip_map) == 0)
1120			isp1362_clr_mask16(isp1362_hcd, HCBUFSTAT, HCBUFSTAT_ATL_ACTIVE);
1121		if (done_map) {
1122			DBG(3, "%s: ATL done_map %08x\n", __func__, done_map);
1123			finish_transfers(isp1362_hcd, done_map, &isp1362_hcd->atl_queue);
1124			start_atl_transfers(isp1362_hcd);
1125		}
1126		handled = 1;
1127	}
1128
1129	if (irqstat & HCuPINT_OPR) {
1130		u32 intstat = isp1362_read_reg32(isp1362_hcd, HCINTSTAT);
1131		isp1362_hcd->irq_stat[ISP1362_INT_OPR]++;
1132
1133		svc_mask &= ~HCuPINT_OPR;
1134		DBG(2, "%s: OPR %08x:%08x\n", __func__, intstat, isp1362_hcd->intenb);
1135		intstat &= isp1362_hcd->intenb;
1136		if (intstat & OHCI_INTR_UE) {
1137			pr_err("Unrecoverable error\n");
1138			/* FIXME: do here reset or cleanup or whatever */
1139		}
1140		if (intstat & OHCI_INTR_RHSC) {
1141			isp1362_hcd->rhstatus = isp1362_read_reg32(isp1362_hcd, HCRHSTATUS);
1142			isp1362_hcd->rhport[0] = isp1362_read_reg32(isp1362_hcd, HCRHPORT1);
1143			isp1362_hcd->rhport[1] = isp1362_read_reg32(isp1362_hcd, HCRHPORT2);
1144		}
1145		if (intstat & OHCI_INTR_RD) {
1146			pr_info("%s: RESUME DETECTED\n", __func__);
1147			isp1362_show_reg(isp1362_hcd, HCCONTROL);
1148			usb_hcd_resume_root_hub(hcd);
1149		}
1150		isp1362_write_reg32(isp1362_hcd, HCINTSTAT, intstat);
1151		irqstat &= ~HCuPINT_OPR;
1152		handled = 1;
1153	}
1154
1155	if (irqstat & HCuPINT_SUSP) {
1156		isp1362_hcd->irq_stat[ISP1362_INT_SUSP]++;
1157		handled = 1;
1158		svc_mask &= ~HCuPINT_SUSP;
1159
1160		pr_info("%s: SUSPEND IRQ\n", __func__);
1161	}
1162
1163	if (irqstat & HCuPINT_CLKRDY) {
1164		isp1362_hcd->irq_stat[ISP1362_INT_CLKRDY]++;
1165		handled = 1;
1166		isp1362_hcd->irqenb &= ~HCuPINT_CLKRDY;
1167		svc_mask &= ~HCuPINT_CLKRDY;
1168		pr_info("%s: CLKRDY IRQ\n", __func__);
1169	}
1170
1171	if (svc_mask)
1172		pr_err("%s: Unserviced interrupt(s) %04x\n", __func__, svc_mask);
1173
1174	isp1362_write_reg16(isp1362_hcd, HCuPINTENB, isp1362_hcd->irqenb);
1175	isp1362_hcd->irq_active--;
1176	spin_unlock(&isp1362_hcd->lock);
1177
1178	return IRQ_RETVAL(handled);
1179}
1180
1181/*-------------------------------------------------------------------------*/
1182
1183#define	MAX_PERIODIC_LOAD	900	/* out of 1000 usec */
1184static int balance(struct isp1362_hcd *isp1362_hcd, u16 interval, u16 load)
1185{
1186	int i, branch = -ENOSPC;
1187
1188	/* search for the least loaded schedule branch of that interval
1189	 * which has enough bandwidth left unreserved.
1190	 */
1191	for (i = 0; i < interval; i++) {
1192		if (branch < 0 || isp1362_hcd->load[branch] > isp1362_hcd->load[i]) {
1193			int j;
1194
1195			for (j = i; j < PERIODIC_SIZE; j += interval) {
1196				if ((isp1362_hcd->load[j] + load) > MAX_PERIODIC_LOAD) {
1197					pr_err("%s: new load %d load[%02x] %d max %d\n", __func__,
1198					    load, j, isp1362_hcd->load[j], MAX_PERIODIC_LOAD);
1199					break;
1200				}
1201			}
1202			if (j < PERIODIC_SIZE)
1203				continue;
1204			branch = i;
1205		}
1206	}
1207	return branch;
1208}
1209
1210/* NB! ALL the code above this point runs with isp1362_hcd->lock
1211   held, irqs off
1212*/
1213
1214/*-------------------------------------------------------------------------*/
1215
1216static int isp1362_urb_enqueue(struct usb_hcd *hcd,
1217			       struct urb *urb,
1218			       gfp_t mem_flags)
1219{
1220	struct isp1362_hcd *isp1362_hcd = hcd_to_isp1362_hcd(hcd);
1221	struct usb_device *udev = urb->dev;
1222	unsigned int pipe = urb->pipe;
1223	int is_out = !usb_pipein(pipe);
1224	int type = usb_pipetype(pipe);
1225	int epnum = usb_pipeendpoint(pipe);
1226	struct usb_host_endpoint *hep = urb->ep;
1227	struct isp1362_ep *ep = NULL;
1228	unsigned long flags;
1229	int retval = 0;
1230
1231	DBG(3, "%s: urb %p\n", __func__, urb);
1232
1233	if (type == PIPE_ISOCHRONOUS) {
1234		pr_err("Isochronous transfers not supported\n");
1235		return -ENOSPC;
1236	}
1237
1238	URB_DBG("%s: FA %d ep%d%s %s: len %d %s%s\n", __func__,
1239		usb_pipedevice(pipe), epnum,
1240		is_out ? "out" : "in",
1241		usb_pipecontrol(pipe) ? "ctrl" :
1242			usb_pipeint(pipe) ? "int" :
1243			usb_pipebulk(pipe) ? "bulk" :
1244			"iso",
1245		urb->transfer_buffer_length,
1246		(urb->transfer_flags & URB_ZERO_PACKET) ? "ZERO_PACKET " : "",
1247		!(urb->transfer_flags & URB_SHORT_NOT_OK) ?
1248		"short_ok" : "");
1249
1250	/* avoid all allocations within spinlocks: request or endpoint */
1251	if (!hep->hcpriv) {
1252		ep = kzalloc(sizeof *ep, mem_flags);
1253		if (!ep)
1254			return -ENOMEM;
1255	}
1256	spin_lock_irqsave(&isp1362_hcd->lock, flags);
1257
1258	/* don't submit to a dead or disabled port */
1259	if (!((isp1362_hcd->rhport[0] | isp1362_hcd->rhport[1]) &
1260	      USB_PORT_STAT_ENABLE) ||
1261	    !HC_IS_RUNNING(hcd->state)) {
1262		kfree(ep);
1263		retval = -ENODEV;
1264		goto fail_not_linked;
1265	}
1266
1267	retval = usb_hcd_link_urb_to_ep(hcd, urb);
1268	if (retval) {
1269		kfree(ep);
1270		goto fail_not_linked;
1271	}
1272
1273	if (hep->hcpriv) {
1274		ep = hep->hcpriv;
1275	} else {
1276		INIT_LIST_HEAD(&ep->schedule);
1277		INIT_LIST_HEAD(&ep->active);
1278		INIT_LIST_HEAD(&ep->remove_list);
1279		ep->udev = usb_get_dev(udev);
1280		ep->hep = hep;
1281		ep->epnum = epnum;
1282		ep->maxpacket = usb_maxpacket(udev, urb->pipe, is_out);
1283		ep->ptd_offset = -EINVAL;
1284		ep->ptd_index = -EINVAL;
1285		usb_settoggle(udev, epnum, is_out, 0);
1286
1287		if (type == PIPE_CONTROL)
1288			ep->nextpid = USB_PID_SETUP;
1289		else if (is_out)
1290			ep->nextpid = USB_PID_OUT;
1291		else
1292			ep->nextpid = USB_PID_IN;
1293
1294		switch (type) {
1295		case PIPE_ISOCHRONOUS:
1296		case PIPE_INTERRUPT:
1297			if (urb->interval > PERIODIC_SIZE)
1298				urb->interval = PERIODIC_SIZE;
1299			ep->interval = urb->interval;
1300			ep->branch = PERIODIC_SIZE;
1301			ep->load = usb_calc_bus_time(udev->speed, !is_out,
1302						     (type == PIPE_ISOCHRONOUS),
1303						     usb_maxpacket(udev, pipe, is_out)) / 1000;
1304			break;
1305		}
1306		hep->hcpriv = ep;
1307	}
1308	ep->num_req = isp1362_hcd->req_serial++;
1309
1310	/* maybe put endpoint into schedule */
1311	switch (type) {
1312	case PIPE_CONTROL:
1313	case PIPE_BULK:
1314		if (list_empty(&ep->schedule)) {
1315			DBG(1, "%s: Adding ep %p req %d to async schedule\n",
1316				__func__, ep, ep->num_req);
1317			list_add_tail(&ep->schedule, &isp1362_hcd->async);
1318		}
1319		break;
1320	case PIPE_ISOCHRONOUS:
1321	case PIPE_INTERRUPT:
1322		urb->interval = ep->interval;
1323
1324		/* urb submitted for already existing EP */
1325		if (ep->branch < PERIODIC_SIZE)
1326			break;
1327
1328		retval = balance(isp1362_hcd, ep->interval, ep->load);
1329		if (retval < 0) {
1330			pr_err("%s: balance returned %d\n", __func__, retval);
1331			goto fail;
1332		}
1333		ep->branch = retval;
1334		retval = 0;
1335		isp1362_hcd->fmindex = isp1362_read_reg32(isp1362_hcd, HCFMNUM);
1336		DBG(1, "%s: Current frame %04x branch %02x start_frame %04x(%04x)\n",
1337		    __func__, isp1362_hcd->fmindex, ep->branch,
1338		    ((isp1362_hcd->fmindex + PERIODIC_SIZE - 1) &
1339		     ~(PERIODIC_SIZE - 1)) + ep->branch,
1340		    (isp1362_hcd->fmindex & (PERIODIC_SIZE - 1)) + ep->branch);
1341
1342		if (list_empty(&ep->schedule)) {
1343			if (type == PIPE_ISOCHRONOUS) {
1344				u16 frame = isp1362_hcd->fmindex;
1345
1346				frame += max_t(u16, 8, ep->interval);
1347				frame &= ~(ep->interval - 1);
1348				frame |= ep->branch;
1349				if (frame_before(frame, isp1362_hcd->fmindex))
1350					frame += ep->interval;
1351				urb->start_frame = frame;
1352
1353				DBG(1, "%s: Adding ep %p to isoc schedule\n", __func__, ep);
1354				list_add_tail(&ep->schedule, &isp1362_hcd->isoc);
1355			} else {
1356				DBG(1, "%s: Adding ep %p to periodic schedule\n", __func__, ep);
1357				list_add_tail(&ep->schedule, &isp1362_hcd->periodic);
1358			}
1359		} else
1360			DBG(1, "%s: ep %p already scheduled\n", __func__, ep);
1361
1362		DBG(2, "%s: load %d bandwidth %d -> %d\n", __func__,
1363		    ep->load / ep->interval, isp1362_hcd->load[ep->branch],
1364		    isp1362_hcd->load[ep->branch] + ep->load);
1365		isp1362_hcd->load[ep->branch] += ep->load;
1366	}
1367
1368	urb->hcpriv = hep;
1369	ALIGNSTAT(isp1362_hcd, urb->transfer_buffer);
1370
1371	switch (type) {
1372	case PIPE_CONTROL:
1373	case PIPE_BULK:
1374		start_atl_transfers(isp1362_hcd);
1375		break;
1376	case PIPE_INTERRUPT:
1377		start_intl_transfers(isp1362_hcd);
1378		break;
1379	case PIPE_ISOCHRONOUS:
1380		start_iso_transfers(isp1362_hcd);
1381		break;
1382	default:
1383		BUG();
1384	}
1385 fail:
1386	if (retval)
1387		usb_hcd_unlink_urb_from_ep(hcd, urb);
1388
1389
1390 fail_not_linked:
1391	spin_unlock_irqrestore(&isp1362_hcd->lock, flags);
1392	if (retval)
1393		DBG(0, "%s: urb %p failed with %d\n", __func__, urb, retval);
1394	return retval;
1395}
1396
1397static int isp1362_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status)
1398{
1399	struct isp1362_hcd *isp1362_hcd = hcd_to_isp1362_hcd(hcd);
1400	struct usb_host_endpoint *hep;
1401	unsigned long flags;
1402	struct isp1362_ep *ep;
1403	int retval = 0;
1404
1405	DBG(3, "%s: urb %p\n", __func__, urb);
1406
1407	spin_lock_irqsave(&isp1362_hcd->lock, flags);
1408	retval = usb_hcd_check_unlink_urb(hcd, urb, status);
1409	if (retval)
1410		goto done;
1411
1412	hep = urb->hcpriv;
1413
1414	if (!hep) {
1415		spin_unlock_irqrestore(&isp1362_hcd->lock, flags);
1416		return -EIDRM;
1417	}
1418
1419	ep = hep->hcpriv;
1420	if (ep) {
1421		/* In front of queue? */
1422		if (ep->hep->urb_list.next == &urb->urb_list) {
1423			if (!list_empty(&ep->active)) {
1424				DBG(1, "%s: urb %p ep %p req %d active PTD[%d] $%04x\n", __func__,
1425				    urb, ep, ep->num_req, ep->ptd_index, ep->ptd_offset);
1426				/* disable processing and queue PTD for removal */
1427				remove_ptd(isp1362_hcd, ep);
1428				urb = NULL;
1429			}
1430		}
1431		if (urb) {
1432			DBG(1, "%s: Finishing ep %p req %d\n", __func__, ep,
1433			    ep->num_req);
1434			finish_request(isp1362_hcd, ep, urb, status);
1435		} else
1436			DBG(1, "%s: urb %p active; wait4irq\n", __func__, urb);
1437	} else {
1438		pr_warn("%s: No EP in URB %p\n", __func__, urb);
1439		retval = -EINVAL;
1440	}
1441done:
1442	spin_unlock_irqrestore(&isp1362_hcd->lock, flags);
1443
1444	DBG(3, "%s: exit\n", __func__);
1445
1446	return retval;
1447}
1448
1449static void isp1362_endpoint_disable(struct usb_hcd *hcd, struct usb_host_endpoint *hep)
1450{
1451	struct isp1362_ep *ep = hep->hcpriv;
1452	struct isp1362_hcd *isp1362_hcd = hcd_to_isp1362_hcd(hcd);
1453	unsigned long flags;
1454
1455	DBG(1, "%s: ep %p\n", __func__, ep);
1456	if (!ep)
1457		return;
1458	spin_lock_irqsave(&isp1362_hcd->lock, flags);
1459	if (!list_empty(&hep->urb_list)) {
1460		if (!list_empty(&ep->active) && list_empty(&ep->remove_list)) {
1461			DBG(1, "%s: Removing ep %p req %d PTD[%d] $%04x\n", __func__,
1462			    ep, ep->num_req, ep->ptd_index, ep->ptd_offset);
1463			remove_ptd(isp1362_hcd, ep);
1464			pr_info("%s: Waiting for Interrupt to clean up\n", __func__);
1465		}
1466	}
1467	spin_unlock_irqrestore(&isp1362_hcd->lock, flags);
1468	/* Wait for interrupt to clear out active list */
1469	while (!list_empty(&ep->active))
1470		msleep(1);
1471
1472	DBG(1, "%s: Freeing EP %p\n", __func__, ep);
1473
1474	usb_put_dev(ep->udev);
1475	kfree(ep);
1476	hep->hcpriv = NULL;
1477}
1478
1479static int isp1362_get_frame(struct usb_hcd *hcd)
1480{
1481	struct isp1362_hcd *isp1362_hcd = hcd_to_isp1362_hcd(hcd);
1482	u32 fmnum;
1483	unsigned long flags;
1484
1485	spin_lock_irqsave(&isp1362_hcd->lock, flags);
1486	fmnum = isp1362_read_reg32(isp1362_hcd, HCFMNUM);
1487	spin_unlock_irqrestore(&isp1362_hcd->lock, flags);
1488
1489	return (int)fmnum;
1490}
1491
1492/*-------------------------------------------------------------------------*/
1493
1494/* Adapted from ohci-hub.c */
1495static int isp1362_hub_status_data(struct usb_hcd *hcd, char *buf)
1496{
1497	struct isp1362_hcd *isp1362_hcd = hcd_to_isp1362_hcd(hcd);
1498	int ports, i, changed = 0;
1499	unsigned long flags;
1500
1501	if (!HC_IS_RUNNING(hcd->state))
1502		return -ESHUTDOWN;
1503
1504	/* Report no status change now, if we are scheduled to be
1505	   called later */
1506	if (timer_pending(&hcd->rh_timer))
1507		return 0;
1508
1509	ports = isp1362_hcd->rhdesca & RH_A_NDP;
1510	BUG_ON(ports > 2);
1511
1512	spin_lock_irqsave(&isp1362_hcd->lock, flags);
1513	/* init status */
1514	if (isp1362_hcd->rhstatus & (RH_HS_LPSC | RH_HS_OCIC))
1515		buf[0] = changed = 1;
1516	else
1517		buf[0] = 0;
1518
1519	for (i = 0; i < ports; i++) {
1520		u32 status = isp1362_hcd->rhport[i];
1521
1522		if (status & (RH_PS_CSC | RH_PS_PESC | RH_PS_PSSC |
1523			      RH_PS_OCIC | RH_PS_PRSC)) {
1524			changed = 1;
1525			buf[0] |= 1 << (i + 1);
1526			continue;
1527		}
1528
1529		if (!(status & RH_PS_CCS))
1530			continue;
1531	}
1532	spin_unlock_irqrestore(&isp1362_hcd->lock, flags);
1533	return changed;
1534}
1535
1536static void isp1362_hub_descriptor(struct isp1362_hcd *isp1362_hcd,
1537				   struct usb_hub_descriptor *desc)
1538{
1539	u32 reg = isp1362_hcd->rhdesca;
1540
1541	DBG(3, "%s: enter\n", __func__);
1542
1543	desc->bDescriptorType = USB_DT_HUB;
1544	desc->bDescLength = 9;
1545	desc->bHubContrCurrent = 0;
1546	desc->bNbrPorts = reg & 0x3;
1547	/* Power switching, device type, overcurrent. */
1548	desc->wHubCharacteristics = cpu_to_le16((reg >> 8) &
1549						(HUB_CHAR_LPSM |
1550						 HUB_CHAR_COMPOUND |
1551						 HUB_CHAR_OCPM));
1552	DBG(0, "%s: hubcharacteristics = %02x\n", __func__,
1553			desc->wHubCharacteristics);
1554	desc->bPwrOn2PwrGood = (reg >> 24) & 0xff;
1555	/* ports removable, and legacy PortPwrCtrlMask */
1556	desc->u.hs.DeviceRemovable[0] = desc->bNbrPorts == 1 ? 1 << 1 : 3 << 1;
1557	desc->u.hs.DeviceRemovable[1] = ~0;
1558
1559	DBG(3, "%s: exit\n", __func__);
1560}
1561
1562/* Adapted from ohci-hub.c */
1563static int isp1362_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue,
1564			       u16 wIndex, char *buf, u16 wLength)
1565{
1566	struct isp1362_hcd *isp1362_hcd = hcd_to_isp1362_hcd(hcd);
1567	int retval = 0;
1568	unsigned long flags;
1569	unsigned long t1;
1570	int ports = isp1362_hcd->rhdesca & RH_A_NDP;
1571	u32 tmp = 0;
1572
1573	switch (typeReq) {
1574	case ClearHubFeature:
1575		DBG(0, "ClearHubFeature: ");
1576		switch (wValue) {
1577		case C_HUB_OVER_CURRENT:
1578			DBG(0, "C_HUB_OVER_CURRENT\n");
1579			spin_lock_irqsave(&isp1362_hcd->lock, flags);
1580			isp1362_write_reg32(isp1362_hcd, HCRHSTATUS, RH_HS_OCIC);
1581			spin_unlock_irqrestore(&isp1362_hcd->lock, flags);
1582			break;
1583		case C_HUB_LOCAL_POWER:
1584			DBG(0, "C_HUB_LOCAL_POWER\n");
1585			break;
1586		default:
1587			goto error;
1588		}
1589		break;
1590	case SetHubFeature:
1591		DBG(0, "SetHubFeature: ");
1592		switch (wValue) {
1593		case C_HUB_OVER_CURRENT:
1594		case C_HUB_LOCAL_POWER:
1595			DBG(0, "C_HUB_OVER_CURRENT or C_HUB_LOCAL_POWER\n");
1596			break;
1597		default:
1598			goto error;
1599		}
1600		break;
1601	case GetHubDescriptor:
1602		DBG(0, "GetHubDescriptor\n");
1603		isp1362_hub_descriptor(isp1362_hcd, (struct usb_hub_descriptor *)buf);
1604		break;
1605	case GetHubStatus:
1606		DBG(0, "GetHubStatus\n");
1607		put_unaligned(cpu_to_le32(0), (__le32 *) buf);
1608		break;
1609	case GetPortStatus:
1610#ifndef VERBOSE
1611		DBG(0, "GetPortStatus\n");
1612#endif
1613		if (!wIndex || wIndex > ports)
1614			goto error;
1615		tmp = isp1362_hcd->rhport[--wIndex];
1616		put_unaligned(cpu_to_le32(tmp), (__le32 *) buf);
1617		break;
1618	case ClearPortFeature:
1619		DBG(0, "ClearPortFeature: ");
1620		if (!wIndex || wIndex > ports)
1621			goto error;
1622		wIndex--;
1623
1624		switch (wValue) {
1625		case USB_PORT_FEAT_ENABLE:
1626			DBG(0, "USB_PORT_FEAT_ENABLE\n");
1627			tmp = RH_PS_CCS;
1628			break;
1629		case USB_PORT_FEAT_C_ENABLE:
1630			DBG(0, "USB_PORT_FEAT_C_ENABLE\n");
1631			tmp = RH_PS_PESC;
1632			break;
1633		case USB_PORT_FEAT_SUSPEND:
1634			DBG(0, "USB_PORT_FEAT_SUSPEND\n");
1635			tmp = RH_PS_POCI;
1636			break;
1637		case USB_PORT_FEAT_C_SUSPEND:
1638			DBG(0, "USB_PORT_FEAT_C_SUSPEND\n");
1639			tmp = RH_PS_PSSC;
1640			break;
1641		case USB_PORT_FEAT_POWER:
1642			DBG(0, "USB_PORT_FEAT_POWER\n");
1643			tmp = RH_PS_LSDA;
1644
1645			break;
1646		case USB_PORT_FEAT_C_CONNECTION:
1647			DBG(0, "USB_PORT_FEAT_C_CONNECTION\n");
1648			tmp = RH_PS_CSC;
1649			break;
1650		case USB_PORT_FEAT_C_OVER_CURRENT:
1651			DBG(0, "USB_PORT_FEAT_C_OVER_CURRENT\n");
1652			tmp = RH_PS_OCIC;
1653			break;
1654		case USB_PORT_FEAT_C_RESET:
1655			DBG(0, "USB_PORT_FEAT_C_RESET\n");
1656			tmp = RH_PS_PRSC;
1657			break;
1658		default:
1659			goto error;
1660		}
1661
1662		spin_lock_irqsave(&isp1362_hcd->lock, flags);
1663		isp1362_write_reg32(isp1362_hcd, HCRHPORT1 + wIndex, tmp);
1664		isp1362_hcd->rhport[wIndex] =
1665			isp1362_read_reg32(isp1362_hcd, HCRHPORT1 + wIndex);
1666		spin_unlock_irqrestore(&isp1362_hcd->lock, flags);
1667		break;
1668	case SetPortFeature:
1669		DBG(0, "SetPortFeature: ");
1670		if (!wIndex || wIndex > ports)
1671			goto error;
1672		wIndex--;
1673		switch (wValue) {
1674		case USB_PORT_FEAT_SUSPEND:
1675			DBG(0, "USB_PORT_FEAT_SUSPEND\n");
1676			spin_lock_irqsave(&isp1362_hcd->lock, flags);
1677			isp1362_write_reg32(isp1362_hcd, HCRHPORT1 + wIndex, RH_PS_PSS);
1678			isp1362_hcd->rhport[wIndex] =
1679				isp1362_read_reg32(isp1362_hcd, HCRHPORT1 + wIndex);
1680			spin_unlock_irqrestore(&isp1362_hcd->lock, flags);
1681			break;
1682		case USB_PORT_FEAT_POWER:
1683			DBG(0, "USB_PORT_FEAT_POWER\n");
1684			spin_lock_irqsave(&isp1362_hcd->lock, flags);
1685			isp1362_write_reg32(isp1362_hcd, HCRHPORT1 + wIndex, RH_PS_PPS);
1686			isp1362_hcd->rhport[wIndex] =
1687				isp1362_read_reg32(isp1362_hcd, HCRHPORT1 + wIndex);
1688			spin_unlock_irqrestore(&isp1362_hcd->lock, flags);
1689			break;
1690		case USB_PORT_FEAT_RESET:
1691			DBG(0, "USB_PORT_FEAT_RESET\n");
1692			spin_lock_irqsave(&isp1362_hcd->lock, flags);
1693
1694			t1 = jiffies + msecs_to_jiffies(USB_RESET_WIDTH);
1695			while (time_before(jiffies, t1)) {
1696				/* spin until any current reset finishes */
1697				for (;;) {
1698					tmp = isp1362_read_reg32(isp1362_hcd, HCRHPORT1 + wIndex);
1699					if (!(tmp & RH_PS_PRS))
1700						break;
1701					udelay(500);
1702				}
1703				if (!(tmp & RH_PS_CCS))
1704					break;
1705				/* Reset lasts 10ms (claims datasheet) */
1706				isp1362_write_reg32(isp1362_hcd, HCRHPORT1 + wIndex, (RH_PS_PRS));
1707
1708				spin_unlock_irqrestore(&isp1362_hcd->lock, flags);
1709				msleep(10);
1710				spin_lock_irqsave(&isp1362_hcd->lock, flags);
1711			}
1712
1713			isp1362_hcd->rhport[wIndex] = isp1362_read_reg32(isp1362_hcd,
1714									 HCRHPORT1 + wIndex);
1715			spin_unlock_irqrestore(&isp1362_hcd->lock, flags);
1716			break;
1717		default:
1718			goto error;
1719		}
1720		break;
1721
1722	default:
1723 error:
1724		/* "protocol stall" on error */
1725		DBG(0, "PROTOCOL STALL\n");
1726		retval = -EPIPE;
1727	}
1728
1729	return retval;
1730}
1731
1732#ifdef	CONFIG_PM
1733static int isp1362_bus_suspend(struct usb_hcd *hcd)
1734{
1735	int status = 0;
1736	struct isp1362_hcd *isp1362_hcd = hcd_to_isp1362_hcd(hcd);
1737	unsigned long flags;
1738
1739	if (time_before(jiffies, isp1362_hcd->next_statechange))
1740		msleep(5);
1741
1742	spin_lock_irqsave(&isp1362_hcd->lock, flags);
1743
1744	isp1362_hcd->hc_control = isp1362_read_reg32(isp1362_hcd, HCCONTROL);
1745	switch (isp1362_hcd->hc_control & OHCI_CTRL_HCFS) {
1746	case OHCI_USB_RESUME:
1747		DBG(0, "%s: resume/suspend?\n", __func__);
1748		isp1362_hcd->hc_control &= ~OHCI_CTRL_HCFS;
1749		isp1362_hcd->hc_control |= OHCI_USB_RESET;
1750		isp1362_write_reg32(isp1362_hcd, HCCONTROL, isp1362_hcd->hc_control);
1751		/* FALL THROUGH */
1752	case OHCI_USB_RESET:
1753		status = -EBUSY;
1754		pr_warn("%s: needs reinit!\n", __func__);
1755		goto done;
1756	case OHCI_USB_SUSPEND:
1757		pr_warn("%s: already suspended?\n", __func__);
1758		goto done;
1759	}
1760	DBG(0, "%s: suspend root hub\n", __func__);
1761
1762	/* First stop any processing */
1763	hcd->state = HC_STATE_QUIESCING;
1764	if (!list_empty(&isp1362_hcd->atl_queue.active) ||
1765	    !list_empty(&isp1362_hcd->intl_queue.active) ||
1766	    !list_empty(&isp1362_hcd->istl_queue[0] .active) ||
1767	    !list_empty(&isp1362_hcd->istl_queue[1] .active)) {
1768		int limit;
1769
1770		isp1362_write_reg32(isp1362_hcd, HCATLSKIP, ~0);
1771		isp1362_write_reg32(isp1362_hcd, HCINTLSKIP, ~0);
1772		isp1362_write_reg16(isp1362_hcd, HCBUFSTAT, 0);
1773		isp1362_write_reg16(isp1362_hcd, HCuPINTENB, 0);
1774		isp1362_write_reg32(isp1362_hcd, HCINTSTAT, OHCI_INTR_SF);
1775
1776		DBG(0, "%s: stopping schedules ...\n", __func__);
1777		limit = 2000;
1778		while (limit > 0) {
1779			udelay(250);
1780			limit -= 250;
1781			if (isp1362_read_reg32(isp1362_hcd, HCINTSTAT) & OHCI_INTR_SF)
1782				break;
1783		}
1784		mdelay(7);
1785		if (isp1362_read_reg16(isp1362_hcd, HCuPINT) & HCuPINT_ATL) {
1786			u32 done_map = isp1362_read_reg32(isp1362_hcd, HCATLDONE);
1787			finish_transfers(isp1362_hcd, done_map, &isp1362_hcd->atl_queue);
1788		}
1789		if (isp1362_read_reg16(isp1362_hcd, HCuPINT) & HCuPINT_INTL) {
1790			u32 done_map = isp1362_read_reg32(isp1362_hcd, HCINTLDONE);
1791			finish_transfers(isp1362_hcd, done_map, &isp1362_hcd->intl_queue);
1792		}
1793		if (isp1362_read_reg16(isp1362_hcd, HCuPINT) & HCuPINT_ISTL0)
1794			finish_iso_transfers(isp1362_hcd, &isp1362_hcd->istl_queue[0]);
1795		if (isp1362_read_reg16(isp1362_hcd, HCuPINT) & HCuPINT_ISTL1)
1796			finish_iso_transfers(isp1362_hcd, &isp1362_hcd->istl_queue[1]);
1797	}
1798	DBG(0, "%s: HCINTSTAT: %08x\n", __func__,
1799		    isp1362_read_reg32(isp1362_hcd, HCINTSTAT));
1800	isp1362_write_reg32(isp1362_hcd, HCINTSTAT,
1801			    isp1362_read_reg32(isp1362_hcd, HCINTSTAT));
1802
1803	/* Suspend hub */
1804	isp1362_hcd->hc_control = OHCI_USB_SUSPEND;
1805	isp1362_show_reg(isp1362_hcd, HCCONTROL);
1806	isp1362_write_reg32(isp1362_hcd, HCCONTROL, isp1362_hcd->hc_control);
1807	isp1362_show_reg(isp1362_hcd, HCCONTROL);
1808
1809#if 1
1810	isp1362_hcd->hc_control = isp1362_read_reg32(isp1362_hcd, HCCONTROL);
1811	if ((isp1362_hcd->hc_control & OHCI_CTRL_HCFS) != OHCI_USB_SUSPEND) {
1812		pr_err("%s: controller won't suspend %08x\n", __func__,
1813		    isp1362_hcd->hc_control);
1814		status = -EBUSY;
1815	} else
1816#endif
1817	{
1818		/* no resumes until devices finish suspending */
1819		isp1362_hcd->next_statechange = jiffies + msecs_to_jiffies(5);
1820	}
1821done:
1822	if (status == 0) {
1823		hcd->state = HC_STATE_SUSPENDED;
1824		DBG(0, "%s: HCD suspended: %08x\n", __func__,
1825		    isp1362_read_reg32(isp1362_hcd, HCCONTROL));
1826	}
1827	spin_unlock_irqrestore(&isp1362_hcd->lock, flags);
1828	return status;
1829}
1830
1831static int isp1362_bus_resume(struct usb_hcd *hcd)
1832{
1833	struct isp1362_hcd *isp1362_hcd = hcd_to_isp1362_hcd(hcd);
1834	u32 port;
1835	unsigned long flags;
1836	int status = -EINPROGRESS;
1837
1838	if (time_before(jiffies, isp1362_hcd->next_statechange))
1839		msleep(5);
1840
1841	spin_lock_irqsave(&isp1362_hcd->lock, flags);
1842	isp1362_hcd->hc_control = isp1362_read_reg32(isp1362_hcd, HCCONTROL);
1843	pr_info("%s: HCCONTROL: %08x\n", __func__, isp1362_hcd->hc_control);
1844	if (hcd->state == HC_STATE_RESUMING) {
1845		pr_warn("%s: duplicate resume\n", __func__);
1846		status = 0;
1847	} else
1848		switch (isp1362_hcd->hc_control & OHCI_CTRL_HCFS) {
1849		case OHCI_USB_SUSPEND:
1850			DBG(0, "%s: resume root hub\n", __func__);
1851			isp1362_hcd->hc_control &= ~OHCI_CTRL_HCFS;
1852			isp1362_hcd->hc_control |= OHCI_USB_RESUME;
1853			isp1362_write_reg32(isp1362_hcd, HCCONTROL, isp1362_hcd->hc_control);
1854			break;
1855		case OHCI_USB_RESUME:
1856			/* HCFS changes sometime after INTR_RD */
1857			DBG(0, "%s: remote wakeup\n", __func__);
1858			break;
1859		case OHCI_USB_OPER:
1860			DBG(0, "%s: odd resume\n", __func__);
1861			status = 0;
1862			hcd->self.root_hub->dev.power.power_state = PMSG_ON;
1863			break;
1864		default:		/* RESET, we lost power */
1865			DBG(0, "%s: root hub hardware reset\n", __func__);
1866			status = -EBUSY;
1867		}
1868	spin_unlock_irqrestore(&isp1362_hcd->lock, flags);
1869	if (status == -EBUSY) {
1870		DBG(0, "%s: Restarting HC\n", __func__);
1871		isp1362_hc_stop(hcd);
1872		return isp1362_hc_start(hcd);
1873	}
1874	if (status != -EINPROGRESS)
1875		return status;
1876	spin_lock_irqsave(&isp1362_hcd->lock, flags);
1877	port = isp1362_read_reg32(isp1362_hcd, HCRHDESCA) & RH_A_NDP;
1878	while (port--) {
1879		u32 stat = isp1362_read_reg32(isp1362_hcd, HCRHPORT1 + port);
1880
1881		/* force global, not selective, resume */
1882		if (!(stat & RH_PS_PSS)) {
1883			DBG(0, "%s: Not Resuming RH port %d\n", __func__, port);
1884			continue;
1885		}
1886		DBG(0, "%s: Resuming RH port %d\n", __func__, port);
1887		isp1362_write_reg32(isp1362_hcd, HCRHPORT1 + port, RH_PS_POCI);
1888	}
1889	spin_unlock_irqrestore(&isp1362_hcd->lock, flags);
1890
1891	/* Some controllers (lucent) need extra-long delays */
1892	hcd->state = HC_STATE_RESUMING;
1893	mdelay(20 /* usb 11.5.1.10 */ + 15);
1894
1895	isp1362_hcd->hc_control = OHCI_USB_OPER;
1896	spin_lock_irqsave(&isp1362_hcd->lock, flags);
1897	isp1362_show_reg(isp1362_hcd, HCCONTROL);
1898	isp1362_write_reg32(isp1362_hcd, HCCONTROL, isp1362_hcd->hc_control);
1899	spin_unlock_irqrestore(&isp1362_hcd->lock, flags);
1900	/* TRSMRCY */
1901	msleep(10);
1902
1903	/* keep it alive for ~5x suspend + resume costs */
1904	isp1362_hcd->next_statechange = jiffies + msecs_to_jiffies(250);
1905
1906	hcd->self.root_hub->dev.power.power_state = PMSG_ON;
1907	hcd->state = HC_STATE_RUNNING;
1908	return 0;
1909}
1910#else
1911#define	isp1362_bus_suspend	NULL
1912#define	isp1362_bus_resume	NULL
1913#endif
1914
1915/*-------------------------------------------------------------------------*/
1916
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1917static void dump_irq(struct seq_file *s, char *label, u16 mask)
1918{
1919	seq_printf(s, "%-15s %04x%s%s%s%s%s%s\n", label, mask,
1920		   mask & HCuPINT_CLKRDY ? " clkrdy" : "",
1921		   mask & HCuPINT_SUSP ? " susp" : "",
1922		   mask & HCuPINT_OPR ? " opr" : "",
1923		   mask & HCuPINT_EOT ? " eot" : "",
1924		   mask & HCuPINT_ATL ? " atl" : "",
1925		   mask & HCuPINT_SOF ? " sof" : "");
1926}
1927
1928static void dump_int(struct seq_file *s, char *label, u32 mask)
1929{
1930	seq_printf(s, "%-15s %08x%s%s%s%s%s%s%s\n", label, mask,
1931		   mask & OHCI_INTR_MIE ? " MIE" : "",
1932		   mask & OHCI_INTR_RHSC ? " rhsc" : "",
1933		   mask & OHCI_INTR_FNO ? " fno" : "",
1934		   mask & OHCI_INTR_UE ? " ue" : "",
1935		   mask & OHCI_INTR_RD ? " rd" : "",
1936		   mask & OHCI_INTR_SF ? " sof" : "",
1937		   mask & OHCI_INTR_SO ? " so" : "");
1938}
1939
1940static void dump_ctrl(struct seq_file *s, char *label, u32 mask)
1941{
1942	seq_printf(s, "%-15s %08x%s%s%s\n", label, mask,
1943		   mask & OHCI_CTRL_RWC ? " rwc" : "",
1944		   mask & OHCI_CTRL_RWE ? " rwe" : "",
1945		   ({
1946			   char *hcfs;
1947			   switch (mask & OHCI_CTRL_HCFS) {
1948			   case OHCI_USB_OPER:
1949				   hcfs = " oper";
1950				   break;
1951			   case OHCI_USB_RESET:
1952				   hcfs = " reset";
1953				   break;
1954			   case OHCI_USB_RESUME:
1955				   hcfs = " resume";
1956				   break;
1957			   case OHCI_USB_SUSPEND:
1958				   hcfs = " suspend";
1959				   break;
1960			   default:
1961				   hcfs = " ?";
1962			   }
1963			   hcfs;
1964		   }));
1965}
1966
1967static void dump_regs(struct seq_file *s, struct isp1362_hcd *isp1362_hcd)
1968{
1969	seq_printf(s, "HCREVISION [%02x] %08x\n", ISP1362_REG_NO(ISP1362_REG_HCREVISION),
1970		   isp1362_read_reg32(isp1362_hcd, HCREVISION));
1971	seq_printf(s, "HCCONTROL  [%02x] %08x\n", ISP1362_REG_NO(ISP1362_REG_HCCONTROL),
1972		   isp1362_read_reg32(isp1362_hcd, HCCONTROL));
1973	seq_printf(s, "HCCMDSTAT  [%02x] %08x\n", ISP1362_REG_NO(ISP1362_REG_HCCMDSTAT),
1974		   isp1362_read_reg32(isp1362_hcd, HCCMDSTAT));
1975	seq_printf(s, "HCINTSTAT  [%02x] %08x\n", ISP1362_REG_NO(ISP1362_REG_HCINTSTAT),
1976		   isp1362_read_reg32(isp1362_hcd, HCINTSTAT));
1977	seq_printf(s, "HCINTENB   [%02x] %08x\n", ISP1362_REG_NO(ISP1362_REG_HCINTENB),
1978		   isp1362_read_reg32(isp1362_hcd, HCINTENB));
1979	seq_printf(s, "HCFMINTVL  [%02x] %08x\n", ISP1362_REG_NO(ISP1362_REG_HCFMINTVL),
1980		   isp1362_read_reg32(isp1362_hcd, HCFMINTVL));
1981	seq_printf(s, "HCFMREM    [%02x] %08x\n", ISP1362_REG_NO(ISP1362_REG_HCFMREM),
1982		   isp1362_read_reg32(isp1362_hcd, HCFMREM));
1983	seq_printf(s, "HCFMNUM    [%02x] %08x\n", ISP1362_REG_NO(ISP1362_REG_HCFMNUM),
1984		   isp1362_read_reg32(isp1362_hcd, HCFMNUM));
1985	seq_printf(s, "HCLSTHRESH [%02x] %08x\n", ISP1362_REG_NO(ISP1362_REG_HCLSTHRESH),
1986		   isp1362_read_reg32(isp1362_hcd, HCLSTHRESH));
1987	seq_printf(s, "HCRHDESCA  [%02x] %08x\n", ISP1362_REG_NO(ISP1362_REG_HCRHDESCA),
1988		   isp1362_read_reg32(isp1362_hcd, HCRHDESCA));
1989	seq_printf(s, "HCRHDESCB  [%02x] %08x\n", ISP1362_REG_NO(ISP1362_REG_HCRHDESCB),
1990		   isp1362_read_reg32(isp1362_hcd, HCRHDESCB));
1991	seq_printf(s, "HCRHSTATUS [%02x] %08x\n", ISP1362_REG_NO(ISP1362_REG_HCRHSTATUS),
1992		   isp1362_read_reg32(isp1362_hcd, HCRHSTATUS));
1993	seq_printf(s, "HCRHPORT1  [%02x] %08x\n", ISP1362_REG_NO(ISP1362_REG_HCRHPORT1),
1994		   isp1362_read_reg32(isp1362_hcd, HCRHPORT1));
1995	seq_printf(s, "HCRHPORT2  [%02x] %08x\n", ISP1362_REG_NO(ISP1362_REG_HCRHPORT2),
1996		   isp1362_read_reg32(isp1362_hcd, HCRHPORT2));
1997	seq_printf(s, "\n");
1998	seq_printf(s, "HCHWCFG    [%02x]     %04x\n", ISP1362_REG_NO(ISP1362_REG_HCHWCFG),
1999		   isp1362_read_reg16(isp1362_hcd, HCHWCFG));
2000	seq_printf(s, "HCDMACFG   [%02x]     %04x\n", ISP1362_REG_NO(ISP1362_REG_HCDMACFG),
2001		   isp1362_read_reg16(isp1362_hcd, HCDMACFG));
2002	seq_printf(s, "HCXFERCTR  [%02x]     %04x\n", ISP1362_REG_NO(ISP1362_REG_HCXFERCTR),
2003		   isp1362_read_reg16(isp1362_hcd, HCXFERCTR));
2004	seq_printf(s, "HCuPINT    [%02x]     %04x\n", ISP1362_REG_NO(ISP1362_REG_HCuPINT),
2005		   isp1362_read_reg16(isp1362_hcd, HCuPINT));
2006	seq_printf(s, "HCuPINTENB [%02x]     %04x\n", ISP1362_REG_NO(ISP1362_REG_HCuPINTENB),
2007		   isp1362_read_reg16(isp1362_hcd, HCuPINTENB));
2008	seq_printf(s, "HCCHIPID   [%02x]     %04x\n", ISP1362_REG_NO(ISP1362_REG_HCCHIPID),
2009		   isp1362_read_reg16(isp1362_hcd, HCCHIPID));
2010	seq_printf(s, "HCSCRATCH  [%02x]     %04x\n", ISP1362_REG_NO(ISP1362_REG_HCSCRATCH),
2011		   isp1362_read_reg16(isp1362_hcd, HCSCRATCH));
2012	seq_printf(s, "HCBUFSTAT  [%02x]     %04x\n", ISP1362_REG_NO(ISP1362_REG_HCBUFSTAT),
2013		   isp1362_read_reg16(isp1362_hcd, HCBUFSTAT));
2014	seq_printf(s, "HCDIRADDR  [%02x] %08x\n", ISP1362_REG_NO(ISP1362_REG_HCDIRADDR),
2015		   isp1362_read_reg32(isp1362_hcd, HCDIRADDR));
2016#if 0
2017	seq_printf(s, "HCDIRDATA  [%02x]     %04x\n", ISP1362_REG_NO(HCDIRDATA),
2018		   isp1362_read_reg16(isp1362_hcd, HCDIRDATA));
2019#endif
2020	seq_printf(s, "HCISTLBUFSZ[%02x]     %04x\n", ISP1362_REG_NO(ISP1362_REG_HCISTLBUFSZ),
2021		   isp1362_read_reg16(isp1362_hcd, HCISTLBUFSZ));
2022	seq_printf(s, "HCISTLRATE [%02x]     %04x\n", ISP1362_REG_NO(ISP1362_REG_HCISTLRATE),
2023		   isp1362_read_reg16(isp1362_hcd, HCISTLRATE));
2024	seq_printf(s, "\n");
2025	seq_printf(s, "HCINTLBUFSZ[%02x]     %04x\n", ISP1362_REG_NO(ISP1362_REG_HCINTLBUFSZ),
2026		   isp1362_read_reg16(isp1362_hcd, HCINTLBUFSZ));
2027	seq_printf(s, "HCINTLBLKSZ[%02x]     %04x\n", ISP1362_REG_NO(ISP1362_REG_HCINTLBLKSZ),
2028		   isp1362_read_reg16(isp1362_hcd, HCINTLBLKSZ));
2029	seq_printf(s, "HCINTLDONE [%02x] %08x\n", ISP1362_REG_NO(ISP1362_REG_HCINTLDONE),
2030		   isp1362_read_reg32(isp1362_hcd, HCINTLDONE));
2031	seq_printf(s, "HCINTLSKIP [%02x] %08x\n", ISP1362_REG_NO(ISP1362_REG_HCINTLSKIP),
2032		   isp1362_read_reg32(isp1362_hcd, HCINTLSKIP));
2033	seq_printf(s, "HCINTLLAST [%02x] %08x\n", ISP1362_REG_NO(ISP1362_REG_HCINTLLAST),
2034		   isp1362_read_reg32(isp1362_hcd, HCINTLLAST));
2035	seq_printf(s, "HCINTLCURR [%02x]     %04x\n", ISP1362_REG_NO(ISP1362_REG_HCINTLCURR),
2036		   isp1362_read_reg16(isp1362_hcd, HCINTLCURR));
2037	seq_printf(s, "\n");
2038	seq_printf(s, "HCATLBUFSZ [%02x]     %04x\n", ISP1362_REG_NO(ISP1362_REG_HCATLBUFSZ),
2039		   isp1362_read_reg16(isp1362_hcd, HCATLBUFSZ));
2040	seq_printf(s, "HCATLBLKSZ [%02x]     %04x\n", ISP1362_REG_NO(ISP1362_REG_HCATLBLKSZ),
2041		   isp1362_read_reg16(isp1362_hcd, HCATLBLKSZ));
2042#if 0
2043	seq_printf(s, "HCATLDONE  [%02x] %08x\n", ISP1362_REG_NO(ISP1362_REG_HCATLDONE),
2044		   isp1362_read_reg32(isp1362_hcd, HCATLDONE));
2045#endif
2046	seq_printf(s, "HCATLSKIP  [%02x] %08x\n", ISP1362_REG_NO(ISP1362_REG_HCATLSKIP),
2047		   isp1362_read_reg32(isp1362_hcd, HCATLSKIP));
2048	seq_printf(s, "HCATLLAST  [%02x] %08x\n", ISP1362_REG_NO(ISP1362_REG_HCATLLAST),
2049		   isp1362_read_reg32(isp1362_hcd, HCATLLAST));
2050	seq_printf(s, "HCATLCURR  [%02x]     %04x\n", ISP1362_REG_NO(ISP1362_REG_HCATLCURR),
2051		   isp1362_read_reg16(isp1362_hcd, HCATLCURR));
2052	seq_printf(s, "\n");
2053	seq_printf(s, "HCATLDTC   [%02x]     %04x\n", ISP1362_REG_NO(ISP1362_REG_HCATLDTC),
2054		   isp1362_read_reg16(isp1362_hcd, HCATLDTC));
2055	seq_printf(s, "HCATLDTCTO [%02x]     %04x\n", ISP1362_REG_NO(ISP1362_REG_HCATLDTCTO),
2056		   isp1362_read_reg16(isp1362_hcd, HCATLDTCTO));
2057}
2058
2059static int isp1362_show(struct seq_file *s, void *unused)
2060{
2061	struct isp1362_hcd *isp1362_hcd = s->private;
2062	struct isp1362_ep *ep;
2063	int i;
2064
2065	seq_printf(s, "%s\n%s version %s\n",
2066		   isp1362_hcd_to_hcd(isp1362_hcd)->product_desc, hcd_name, DRIVER_VERSION);
2067
2068	/* collect statistics to help estimate potential win for
2069	 * DMA engines that care about alignment (PXA)
2070	 */
2071	seq_printf(s, "alignment:  16b/%ld 8b/%ld 4b/%ld 2b/%ld 1b/%ld\n",
2072		   isp1362_hcd->stat16, isp1362_hcd->stat8, isp1362_hcd->stat4,
2073		   isp1362_hcd->stat2, isp1362_hcd->stat1);
2074	seq_printf(s, "max # ptds in ATL  fifo: %d\n", isp1362_hcd->atl_queue.stat_maxptds);
2075	seq_printf(s, "max # ptds in INTL fifo: %d\n", isp1362_hcd->intl_queue.stat_maxptds);
2076	seq_printf(s, "max # ptds in ISTL fifo: %d\n",
2077		   max(isp1362_hcd->istl_queue[0] .stat_maxptds,
2078		       isp1362_hcd->istl_queue[1] .stat_maxptds));
2079
2080	/* FIXME: don't show the following in suspended state */
2081	spin_lock_irq(&isp1362_hcd->lock);
2082
2083	dump_irq(s, "hc_irq_enable", isp1362_read_reg16(isp1362_hcd, HCuPINTENB));
2084	dump_irq(s, "hc_irq_status", isp1362_read_reg16(isp1362_hcd, HCuPINT));
2085	dump_int(s, "ohci_int_enable", isp1362_read_reg32(isp1362_hcd, HCINTENB));
2086	dump_int(s, "ohci_int_status", isp1362_read_reg32(isp1362_hcd, HCINTSTAT));
2087	dump_ctrl(s, "ohci_control", isp1362_read_reg32(isp1362_hcd, HCCONTROL));
2088
2089	for (i = 0; i < NUM_ISP1362_IRQS; i++)
2090		if (isp1362_hcd->irq_stat[i])
2091			seq_printf(s, "%-15s: %d\n",
2092				   ISP1362_INT_NAME(i), isp1362_hcd->irq_stat[i]);
2093
2094	dump_regs(s, isp1362_hcd);
2095	list_for_each_entry(ep, &isp1362_hcd->async, schedule) {
2096		struct urb *urb;
2097
2098		seq_printf(s, "%p, ep%d%s, maxpacket %d:\n", ep, ep->epnum,
2099			   ({
2100				   char *s;
2101				   switch (ep->nextpid) {
2102				   case USB_PID_IN:
2103					   s = "in";
2104					   break;
2105				   case USB_PID_OUT:
2106					   s = "out";
2107					   break;
2108				   case USB_PID_SETUP:
2109					   s = "setup";
2110					   break;
2111				   case USB_PID_ACK:
2112					   s = "status";
2113					   break;
2114				   default:
2115					   s = "?";
2116					   break;
2117				   }
2118				   s;}), ep->maxpacket) ;
2119		list_for_each_entry(urb, &ep->hep->urb_list, urb_list) {
2120			seq_printf(s, "  urb%p, %d/%d\n", urb,
2121				   urb->actual_length,
2122				   urb->transfer_buffer_length);
2123		}
2124	}
2125	if (!list_empty(&isp1362_hcd->async))
2126		seq_printf(s, "\n");
2127	dump_ptd_queue(&isp1362_hcd->atl_queue);
2128
2129	seq_printf(s, "periodic size= %d\n", PERIODIC_SIZE);
2130
2131	list_for_each_entry(ep, &isp1362_hcd->periodic, schedule) {
2132		seq_printf(s, "branch:%2d load:%3d PTD[%d] $%04x:\n", ep->branch,
2133			   isp1362_hcd->load[ep->branch], ep->ptd_index, ep->ptd_offset);
2134
2135		seq_printf(s, "   %d/%p (%sdev%d ep%d%s max %d)\n",
2136			   ep->interval, ep,
2137			   (ep->udev->speed == USB_SPEED_FULL) ? "" : "ls ",
2138			   ep->udev->devnum, ep->epnum,
2139			   (ep->epnum == 0) ? "" :
2140			   ((ep->nextpid == USB_PID_IN) ?
2141			    "in" : "out"), ep->maxpacket);
2142	}
2143	dump_ptd_queue(&isp1362_hcd->intl_queue);
2144
2145	seq_printf(s, "ISO:\n");
2146
2147	list_for_each_entry(ep, &isp1362_hcd->isoc, schedule) {
2148		seq_printf(s, "   %d/%p (%sdev%d ep%d%s max %d)\n",
2149			   ep->interval, ep,
2150			   (ep->udev->speed == USB_SPEED_FULL) ? "" : "ls ",
2151			   ep->udev->devnum, ep->epnum,
2152			   (ep->epnum == 0) ? "" :
2153			   ((ep->nextpid == USB_PID_IN) ?
2154			    "in" : "out"), ep->maxpacket);
2155	}
2156
2157	spin_unlock_irq(&isp1362_hcd->lock);
2158	seq_printf(s, "\n");
2159
2160	return 0;
2161}
2162DEFINE_SHOW_ATTRIBUTE(isp1362);
 
 
 
 
 
 
 
 
 
 
 
2163
2164/* expect just one isp1362_hcd per system */
 
 
2165static void create_debug_file(struct isp1362_hcd *isp1362_hcd)
2166{
2167	isp1362_hcd->debug_file = debugfs_create_file("isp1362", S_IRUGO,
2168						      usb_debug_root,
2169						      isp1362_hcd,
2170						      &isp1362_fops);
 
 
 
 
 
 
 
2171}
2172
2173static void remove_debug_file(struct isp1362_hcd *isp1362_hcd)
2174{
2175	debugfs_remove(isp1362_hcd->debug_file);
 
2176}
2177
 
 
2178/*-------------------------------------------------------------------------*/
2179
2180static void __isp1362_sw_reset(struct isp1362_hcd *isp1362_hcd)
2181{
2182	int tmp = 20;
2183
2184	isp1362_write_reg16(isp1362_hcd, HCSWRES, HCSWRES_MAGIC);
2185	isp1362_write_reg32(isp1362_hcd, HCCMDSTAT, OHCI_HCR);
2186	while (--tmp) {
2187		mdelay(1);
2188		if (!(isp1362_read_reg32(isp1362_hcd, HCCMDSTAT) & OHCI_HCR))
2189			break;
2190	}
2191	if (!tmp)
2192		pr_err("Software reset timeout\n");
2193}
2194
2195static void isp1362_sw_reset(struct isp1362_hcd *isp1362_hcd)
2196{
2197	unsigned long flags;
2198
2199	spin_lock_irqsave(&isp1362_hcd->lock, flags);
2200	__isp1362_sw_reset(isp1362_hcd);
2201	spin_unlock_irqrestore(&isp1362_hcd->lock, flags);
2202}
2203
2204static int isp1362_mem_config(struct usb_hcd *hcd)
2205{
2206	struct isp1362_hcd *isp1362_hcd = hcd_to_isp1362_hcd(hcd);
2207	unsigned long flags;
2208	u32 total;
2209	u16 istl_size = ISP1362_ISTL_BUFSIZE;
2210	u16 intl_blksize = ISP1362_INTL_BLKSIZE + PTD_HEADER_SIZE;
2211	u16 intl_size = ISP1362_INTL_BUFFERS * intl_blksize;
2212	u16 atl_blksize = ISP1362_ATL_BLKSIZE + PTD_HEADER_SIZE;
2213	u16 atl_buffers = (ISP1362_BUF_SIZE - (istl_size + intl_size)) / atl_blksize;
2214	u16 atl_size;
2215	int i;
2216
2217	WARN_ON(istl_size & 3);
2218	WARN_ON(atl_blksize & 3);
2219	WARN_ON(intl_blksize & 3);
2220	WARN_ON(atl_blksize < PTD_HEADER_SIZE);
2221	WARN_ON(intl_blksize < PTD_HEADER_SIZE);
2222
2223	BUG_ON((unsigned)ISP1362_INTL_BUFFERS > 32);
2224	if (atl_buffers > 32)
2225		atl_buffers = 32;
2226	atl_size = atl_buffers * atl_blksize;
2227	total = atl_size + intl_size + istl_size;
2228	dev_info(hcd->self.controller, "ISP1362 Memory usage:\n");
2229	dev_info(hcd->self.controller, "  ISTL:    2 * %4d:     %4d @ $%04x:$%04x\n",
2230		 istl_size / 2, istl_size, 0, istl_size / 2);
2231	dev_info(hcd->self.controller, "  INTL: %4d * (%3zu+8):  %4d @ $%04x\n",
2232		 ISP1362_INTL_BUFFERS, intl_blksize - PTD_HEADER_SIZE,
2233		 intl_size, istl_size);
2234	dev_info(hcd->self.controller, "  ATL : %4d * (%3zu+8):  %4d @ $%04x\n",
2235		 atl_buffers, atl_blksize - PTD_HEADER_SIZE,
2236		 atl_size, istl_size + intl_size);
2237	dev_info(hcd->self.controller, "  USED/FREE:   %4d      %4d\n", total,
2238		 ISP1362_BUF_SIZE - total);
2239
2240	if (total > ISP1362_BUF_SIZE) {
2241		dev_err(hcd->self.controller, "%s: Memory requested: %d, available %d\n",
2242			__func__, total, ISP1362_BUF_SIZE);
2243		return -ENOMEM;
2244	}
2245
 
2246	spin_lock_irqsave(&isp1362_hcd->lock, flags);
2247
2248	for (i = 0; i < 2; i++) {
2249		isp1362_hcd->istl_queue[i].buf_start = i * istl_size / 2,
2250		isp1362_hcd->istl_queue[i].buf_size = istl_size / 2;
2251		isp1362_hcd->istl_queue[i].blk_size = 4;
2252		INIT_LIST_HEAD(&isp1362_hcd->istl_queue[i].active);
2253		snprintf(isp1362_hcd->istl_queue[i].name,
2254			 sizeof(isp1362_hcd->istl_queue[i].name), "ISTL%d", i);
2255		DBG(3, "%s: %5s buf $%04x %d\n", __func__,
2256		     isp1362_hcd->istl_queue[i].name,
2257		     isp1362_hcd->istl_queue[i].buf_start,
2258		     isp1362_hcd->istl_queue[i].buf_size);
2259	}
2260	isp1362_write_reg16(isp1362_hcd, HCISTLBUFSZ, istl_size / 2);
2261
2262	isp1362_hcd->intl_queue.buf_start = istl_size;
2263	isp1362_hcd->intl_queue.buf_size = intl_size;
2264	isp1362_hcd->intl_queue.buf_count = ISP1362_INTL_BUFFERS;
2265	isp1362_hcd->intl_queue.blk_size = intl_blksize;
2266	isp1362_hcd->intl_queue.buf_avail = isp1362_hcd->intl_queue.buf_count;
2267	isp1362_hcd->intl_queue.skip_map = ~0;
2268	INIT_LIST_HEAD(&isp1362_hcd->intl_queue.active);
2269
2270	isp1362_write_reg16(isp1362_hcd, HCINTLBUFSZ,
2271			    isp1362_hcd->intl_queue.buf_size);
2272	isp1362_write_reg16(isp1362_hcd, HCINTLBLKSZ,
2273			    isp1362_hcd->intl_queue.blk_size - PTD_HEADER_SIZE);
2274	isp1362_write_reg32(isp1362_hcd, HCINTLSKIP, ~0);
2275	isp1362_write_reg32(isp1362_hcd, HCINTLLAST,
2276			    1 << (ISP1362_INTL_BUFFERS - 1));
2277
2278	isp1362_hcd->atl_queue.buf_start = istl_size + intl_size;
2279	isp1362_hcd->atl_queue.buf_size = atl_size;
2280	isp1362_hcd->atl_queue.buf_count = atl_buffers;
2281	isp1362_hcd->atl_queue.blk_size = atl_blksize;
2282	isp1362_hcd->atl_queue.buf_avail = isp1362_hcd->atl_queue.buf_count;
2283	isp1362_hcd->atl_queue.skip_map = ~0;
2284	INIT_LIST_HEAD(&isp1362_hcd->atl_queue.active);
2285
2286	isp1362_write_reg16(isp1362_hcd, HCATLBUFSZ,
2287			    isp1362_hcd->atl_queue.buf_size);
2288	isp1362_write_reg16(isp1362_hcd, HCATLBLKSZ,
2289			    isp1362_hcd->atl_queue.blk_size - PTD_HEADER_SIZE);
2290	isp1362_write_reg32(isp1362_hcd, HCATLSKIP, ~0);
2291	isp1362_write_reg32(isp1362_hcd, HCATLLAST,
2292			    1 << (atl_buffers - 1));
2293
2294	snprintf(isp1362_hcd->atl_queue.name,
2295		 sizeof(isp1362_hcd->atl_queue.name), "ATL");
2296	snprintf(isp1362_hcd->intl_queue.name,
2297		 sizeof(isp1362_hcd->intl_queue.name), "INTL");
2298	DBG(3, "%s: %5s buf $%04x %2d * %4d = %4d\n", __func__,
2299	     isp1362_hcd->intl_queue.name,
2300	     isp1362_hcd->intl_queue.buf_start,
2301	     ISP1362_INTL_BUFFERS, isp1362_hcd->intl_queue.blk_size,
2302	     isp1362_hcd->intl_queue.buf_size);
2303	DBG(3, "%s: %5s buf $%04x %2d * %4d = %4d\n", __func__,
2304	     isp1362_hcd->atl_queue.name,
2305	     isp1362_hcd->atl_queue.buf_start,
2306	     atl_buffers, isp1362_hcd->atl_queue.blk_size,
2307	     isp1362_hcd->atl_queue.buf_size);
2308
2309	spin_unlock_irqrestore(&isp1362_hcd->lock, flags);
2310
2311	return 0;
2312}
2313
2314static int isp1362_hc_reset(struct usb_hcd *hcd)
2315{
2316	int ret = 0;
2317	struct isp1362_hcd *isp1362_hcd = hcd_to_isp1362_hcd(hcd);
2318	unsigned long t;
2319	unsigned long timeout = 100;
2320	unsigned long flags;
2321	int clkrdy = 0;
2322
2323	pr_debug("%s:\n", __func__);
2324
2325	if (isp1362_hcd->board && isp1362_hcd->board->reset) {
2326		isp1362_hcd->board->reset(hcd->self.controller, 1);
2327		msleep(20);
2328		if (isp1362_hcd->board->clock)
2329			isp1362_hcd->board->clock(hcd->self.controller, 1);
2330		isp1362_hcd->board->reset(hcd->self.controller, 0);
2331	} else
2332		isp1362_sw_reset(isp1362_hcd);
2333
2334	/* chip has been reset. First we need to see a clock */
2335	t = jiffies + msecs_to_jiffies(timeout);
2336	while (!clkrdy && time_before_eq(jiffies, t)) {
2337		spin_lock_irqsave(&isp1362_hcd->lock, flags);
2338		clkrdy = isp1362_read_reg16(isp1362_hcd, HCuPINT) & HCuPINT_CLKRDY;
2339		spin_unlock_irqrestore(&isp1362_hcd->lock, flags);
2340		if (!clkrdy)
2341			msleep(4);
2342	}
2343
2344	spin_lock_irqsave(&isp1362_hcd->lock, flags);
2345	isp1362_write_reg16(isp1362_hcd, HCuPINT, HCuPINT_CLKRDY);
2346	spin_unlock_irqrestore(&isp1362_hcd->lock, flags);
2347	if (!clkrdy) {
2348		pr_err("Clock not ready after %lums\n", timeout);
2349		ret = -ENODEV;
2350	}
2351	return ret;
2352}
2353
2354static void isp1362_hc_stop(struct usb_hcd *hcd)
2355{
2356	struct isp1362_hcd *isp1362_hcd = hcd_to_isp1362_hcd(hcd);
2357	unsigned long flags;
2358	u32 tmp;
2359
2360	pr_debug("%s:\n", __func__);
2361
2362	del_timer_sync(&hcd->rh_timer);
2363
2364	spin_lock_irqsave(&isp1362_hcd->lock, flags);
2365
2366	isp1362_write_reg16(isp1362_hcd, HCuPINTENB, 0);
2367
2368	/* Switch off power for all ports */
2369	tmp = isp1362_read_reg32(isp1362_hcd, HCRHDESCA);
2370	tmp &= ~(RH_A_NPS | RH_A_PSM);
2371	isp1362_write_reg32(isp1362_hcd, HCRHDESCA, tmp);
2372	isp1362_write_reg32(isp1362_hcd, HCRHSTATUS, RH_HS_LPS);
2373
2374	/* Reset the chip */
2375	if (isp1362_hcd->board && isp1362_hcd->board->reset)
2376		isp1362_hcd->board->reset(hcd->self.controller, 1);
2377	else
2378		__isp1362_sw_reset(isp1362_hcd);
2379
2380	if (isp1362_hcd->board && isp1362_hcd->board->clock)
2381		isp1362_hcd->board->clock(hcd->self.controller, 0);
2382
2383	spin_unlock_irqrestore(&isp1362_hcd->lock, flags);
2384}
2385
2386#ifdef CHIP_BUFFER_TEST
2387static int isp1362_chip_test(struct isp1362_hcd *isp1362_hcd)
2388{
2389	int ret = 0;
2390	u16 *ref;
2391	unsigned long flags;
2392
2393	ref = kmalloc(2 * ISP1362_BUF_SIZE, GFP_KERNEL);
2394	if (ref) {
2395		int offset;
2396		u16 *tst = &ref[ISP1362_BUF_SIZE / 2];
2397
2398		for (offset = 0; offset < ISP1362_BUF_SIZE / 2; offset++) {
2399			ref[offset] = ~offset;
2400			tst[offset] = offset;
2401		}
2402
2403		for (offset = 0; offset < 4; offset++) {
2404			int j;
2405
2406			for (j = 0; j < 8; j++) {
2407				spin_lock_irqsave(&isp1362_hcd->lock, flags);
2408				isp1362_write_buffer(isp1362_hcd, (u8 *)ref + offset, 0, j);
2409				isp1362_read_buffer(isp1362_hcd, (u8 *)tst + offset, 0, j);
2410				spin_unlock_irqrestore(&isp1362_hcd->lock, flags);
2411
2412				if (memcmp(ref, tst, j)) {
2413					ret = -ENODEV;
2414					pr_err("%s: memory check with %d byte offset %d failed\n",
2415					    __func__, j, offset);
2416					dump_data((u8 *)ref + offset, j);
2417					dump_data((u8 *)tst + offset, j);
2418				}
2419			}
2420		}
2421
2422		spin_lock_irqsave(&isp1362_hcd->lock, flags);
2423		isp1362_write_buffer(isp1362_hcd, ref, 0, ISP1362_BUF_SIZE);
2424		isp1362_read_buffer(isp1362_hcd, tst, 0, ISP1362_BUF_SIZE);
2425		spin_unlock_irqrestore(&isp1362_hcd->lock, flags);
2426
2427		if (memcmp(ref, tst, ISP1362_BUF_SIZE)) {
2428			ret = -ENODEV;
2429			pr_err("%s: memory check failed\n", __func__);
2430			dump_data((u8 *)tst, ISP1362_BUF_SIZE / 2);
2431		}
2432
2433		for (offset = 0; offset < 256; offset++) {
2434			int test_size = 0;
2435
2436			yield();
2437
2438			memset(tst, 0, ISP1362_BUF_SIZE);
2439			spin_lock_irqsave(&isp1362_hcd->lock, flags);
2440			isp1362_write_buffer(isp1362_hcd, tst, 0, ISP1362_BUF_SIZE);
2441			isp1362_read_buffer(isp1362_hcd, tst, 0, ISP1362_BUF_SIZE);
2442			spin_unlock_irqrestore(&isp1362_hcd->lock, flags);
2443			if (memcmp(tst, tst + (ISP1362_BUF_SIZE / (2 * sizeof(*tst))),
2444				   ISP1362_BUF_SIZE / 2)) {
2445				pr_err("%s: Failed to clear buffer\n", __func__);
2446				dump_data((u8 *)tst, ISP1362_BUF_SIZE);
2447				break;
2448			}
2449			spin_lock_irqsave(&isp1362_hcd->lock, flags);
2450			isp1362_write_buffer(isp1362_hcd, ref, offset * 2, PTD_HEADER_SIZE);
2451			isp1362_write_buffer(isp1362_hcd, ref + PTD_HEADER_SIZE / sizeof(*ref),
2452					     offset * 2 + PTD_HEADER_SIZE, test_size);
2453			isp1362_read_buffer(isp1362_hcd, tst, offset * 2,
2454					    PTD_HEADER_SIZE + test_size);
2455			spin_unlock_irqrestore(&isp1362_hcd->lock, flags);
2456			if (memcmp(ref, tst, PTD_HEADER_SIZE + test_size)) {
2457				dump_data(((u8 *)ref) + offset, PTD_HEADER_SIZE + test_size);
2458				dump_data((u8 *)tst, PTD_HEADER_SIZE + test_size);
2459				spin_lock_irqsave(&isp1362_hcd->lock, flags);
2460				isp1362_read_buffer(isp1362_hcd, tst, offset * 2,
2461						    PTD_HEADER_SIZE + test_size);
2462				spin_unlock_irqrestore(&isp1362_hcd->lock, flags);
2463				if (memcmp(ref, tst, PTD_HEADER_SIZE + test_size)) {
2464					ret = -ENODEV;
2465					pr_err("%s: memory check with offset %02x failed\n",
2466					    __func__, offset);
2467					break;
2468				}
2469				pr_warn("%s: memory check with offset %02x ok after second read\n",
2470					__func__, offset);
2471			}
2472		}
2473		kfree(ref);
2474	}
2475	return ret;
2476}
2477#endif
2478
2479static int isp1362_hc_start(struct usb_hcd *hcd)
2480{
2481	int ret;
2482	struct isp1362_hcd *isp1362_hcd = hcd_to_isp1362_hcd(hcd);
2483	struct isp1362_platform_data *board = isp1362_hcd->board;
2484	u16 hwcfg;
2485	u16 chipid;
2486	unsigned long flags;
2487
2488	pr_debug("%s:\n", __func__);
2489
2490	spin_lock_irqsave(&isp1362_hcd->lock, flags);
2491	chipid = isp1362_read_reg16(isp1362_hcd, HCCHIPID);
2492	spin_unlock_irqrestore(&isp1362_hcd->lock, flags);
2493
2494	if ((chipid & HCCHIPID_MASK) != HCCHIPID_MAGIC) {
2495		pr_err("%s: Invalid chip ID %04x\n", __func__, chipid);
2496		return -ENODEV;
2497	}
2498
2499#ifdef CHIP_BUFFER_TEST
2500	ret = isp1362_chip_test(isp1362_hcd);
2501	if (ret)
2502		return -ENODEV;
2503#endif
2504	spin_lock_irqsave(&isp1362_hcd->lock, flags);
2505	/* clear interrupt status and disable all interrupt sources */
2506	isp1362_write_reg16(isp1362_hcd, HCuPINT, 0xff);
2507	isp1362_write_reg16(isp1362_hcd, HCuPINTENB, 0);
2508
2509	/* HW conf */
2510	hwcfg = HCHWCFG_INT_ENABLE | HCHWCFG_DBWIDTH(1);
2511	if (board->sel15Kres)
2512		hwcfg |= HCHWCFG_PULLDOWN_DS2 |
2513			((MAX_ROOT_PORTS > 1) ? HCHWCFG_PULLDOWN_DS1 : 0);
2514	if (board->clknotstop)
2515		hwcfg |= HCHWCFG_CLKNOTSTOP;
2516	if (board->oc_enable)
2517		hwcfg |= HCHWCFG_ANALOG_OC;
2518	if (board->int_act_high)
2519		hwcfg |= HCHWCFG_INT_POL;
2520	if (board->int_edge_triggered)
2521		hwcfg |= HCHWCFG_INT_TRIGGER;
2522	if (board->dreq_act_high)
2523		hwcfg |= HCHWCFG_DREQ_POL;
2524	if (board->dack_act_high)
2525		hwcfg |= HCHWCFG_DACK_POL;
2526	isp1362_write_reg16(isp1362_hcd, HCHWCFG, hwcfg);
2527	isp1362_show_reg(isp1362_hcd, HCHWCFG);
2528	isp1362_write_reg16(isp1362_hcd, HCDMACFG, 0);
2529	spin_unlock_irqrestore(&isp1362_hcd->lock, flags);
2530
2531	ret = isp1362_mem_config(hcd);
2532	if (ret)
2533		return ret;
2534
2535	spin_lock_irqsave(&isp1362_hcd->lock, flags);
2536
2537	/* Root hub conf */
2538	isp1362_hcd->rhdesca = 0;
2539	if (board->no_power_switching)
2540		isp1362_hcd->rhdesca |= RH_A_NPS;
2541	if (board->power_switching_mode)
2542		isp1362_hcd->rhdesca |= RH_A_PSM;
2543	if (board->potpg)
2544		isp1362_hcd->rhdesca |= (board->potpg << 24) & RH_A_POTPGT;
2545	else
2546		isp1362_hcd->rhdesca |= (25 << 24) & RH_A_POTPGT;
2547
2548	isp1362_write_reg32(isp1362_hcd, HCRHDESCA, isp1362_hcd->rhdesca & ~RH_A_OCPM);
2549	isp1362_write_reg32(isp1362_hcd, HCRHDESCA, isp1362_hcd->rhdesca | RH_A_OCPM);
2550	isp1362_hcd->rhdesca = isp1362_read_reg32(isp1362_hcd, HCRHDESCA);
2551
2552	isp1362_hcd->rhdescb = RH_B_PPCM;
2553	isp1362_write_reg32(isp1362_hcd, HCRHDESCB, isp1362_hcd->rhdescb);
2554	isp1362_hcd->rhdescb = isp1362_read_reg32(isp1362_hcd, HCRHDESCB);
2555
2556	isp1362_read_reg32(isp1362_hcd, HCFMINTVL);
2557	isp1362_write_reg32(isp1362_hcd, HCFMINTVL, (FSMP(FI) << 16) | FI);
2558	isp1362_write_reg32(isp1362_hcd, HCLSTHRESH, LSTHRESH);
2559
2560	spin_unlock_irqrestore(&isp1362_hcd->lock, flags);
2561
2562	isp1362_hcd->hc_control = OHCI_USB_OPER;
2563	hcd->state = HC_STATE_RUNNING;
2564
2565	spin_lock_irqsave(&isp1362_hcd->lock, flags);
2566	/* Set up interrupts */
2567	isp1362_hcd->intenb = OHCI_INTR_MIE | OHCI_INTR_RHSC | OHCI_INTR_UE;
2568	isp1362_hcd->intenb |= OHCI_INTR_RD;
2569	isp1362_hcd->irqenb = HCuPINT_OPR | HCuPINT_SUSP;
2570	isp1362_write_reg32(isp1362_hcd, HCINTENB, isp1362_hcd->intenb);
2571	isp1362_write_reg16(isp1362_hcd, HCuPINTENB, isp1362_hcd->irqenb);
2572
2573	/* Go operational */
2574	isp1362_write_reg32(isp1362_hcd, HCCONTROL, isp1362_hcd->hc_control);
2575	/* enable global power */
2576	isp1362_write_reg32(isp1362_hcd, HCRHSTATUS, RH_HS_LPSC | RH_HS_DRWE);
2577
2578	spin_unlock_irqrestore(&isp1362_hcd->lock, flags);
2579
2580	return 0;
2581}
2582
2583/*-------------------------------------------------------------------------*/
2584
2585static const struct hc_driver isp1362_hc_driver = {
2586	.description =		hcd_name,
2587	.product_desc =		"ISP1362 Host Controller",
2588	.hcd_priv_size =	sizeof(struct isp1362_hcd),
2589
2590	.irq =			isp1362_irq,
2591	.flags =		HCD_USB11 | HCD_MEMORY,
2592
2593	.reset =		isp1362_hc_reset,
2594	.start =		isp1362_hc_start,
2595	.stop =			isp1362_hc_stop,
2596
2597	.urb_enqueue =		isp1362_urb_enqueue,
2598	.urb_dequeue =		isp1362_urb_dequeue,
2599	.endpoint_disable =	isp1362_endpoint_disable,
2600
2601	.get_frame_number =	isp1362_get_frame,
2602
2603	.hub_status_data =	isp1362_hub_status_data,
2604	.hub_control =		isp1362_hub_control,
2605	.bus_suspend =		isp1362_bus_suspend,
2606	.bus_resume =		isp1362_bus_resume,
2607};
2608
2609/*-------------------------------------------------------------------------*/
2610
2611static int isp1362_remove(struct platform_device *pdev)
2612{
2613	struct usb_hcd *hcd = platform_get_drvdata(pdev);
2614	struct isp1362_hcd *isp1362_hcd = hcd_to_isp1362_hcd(hcd);
 
2615
2616	remove_debug_file(isp1362_hcd);
2617	DBG(0, "%s: Removing HCD\n", __func__);
2618	usb_remove_hcd(hcd);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2619	DBG(0, "%s: put_hcd\n", __func__);
2620	usb_put_hcd(hcd);
2621	DBG(0, "%s: Done\n", __func__);
2622
2623	return 0;
2624}
2625
2626static int isp1362_probe(struct platform_device *pdev)
2627{
2628	struct usb_hcd *hcd;
2629	struct isp1362_hcd *isp1362_hcd;
2630	struct resource *addr, *data, *irq_res;
2631	void __iomem *addr_reg;
2632	void __iomem *data_reg;
2633	int irq;
2634	int retval = 0;
 
2635	unsigned int irq_flags = 0;
2636
2637	if (usb_disabled())
2638		return -ENODEV;
2639
2640	/* basic sanity checks first.  board-specific init logic should
2641	 * have initialized this the three resources and probably board
2642	 * specific platform_data.  we don't probe for IRQs, and do only
2643	 * minimal sanity checking.
2644	 */
2645	if (pdev->num_resources < 3)
2646		return -ENODEV;
 
 
2647
 
 
2648	irq_res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
2649	if (!irq_res)
2650		return -ENODEV;
 
 
 
2651
2652	irq = irq_res->start;
 
 
 
 
2653
2654	addr = platform_get_resource(pdev, IORESOURCE_MEM, 1);
2655	addr_reg = devm_ioremap_resource(&pdev->dev, addr);
2656	if (IS_ERR(addr_reg))
2657		return PTR_ERR(addr_reg);
 
 
 
 
 
2658
2659	data = platform_get_resource(pdev, IORESOURCE_MEM, 0);
2660	data_reg = devm_ioremap_resource(&pdev->dev, data);
2661	if (IS_ERR(data_reg))
2662		return PTR_ERR(data_reg);
 
 
 
 
 
2663
2664	/* allocate and initialize hcd */
2665	hcd = usb_create_hcd(&isp1362_hc_driver, &pdev->dev, dev_name(&pdev->dev));
2666	if (!hcd)
2667		return -ENOMEM;
2668
 
2669	hcd->rsrc_start = data->start;
2670	isp1362_hcd = hcd_to_isp1362_hcd(hcd);
2671	isp1362_hcd->data_reg = data_reg;
2672	isp1362_hcd->addr_reg = addr_reg;
2673
2674	isp1362_hcd->next_statechange = jiffies;
2675	spin_lock_init(&isp1362_hcd->lock);
2676	INIT_LIST_HEAD(&isp1362_hcd->async);
2677	INIT_LIST_HEAD(&isp1362_hcd->periodic);
2678	INIT_LIST_HEAD(&isp1362_hcd->isoc);
2679	INIT_LIST_HEAD(&isp1362_hcd->remove_list);
2680	isp1362_hcd->board = dev_get_platdata(&pdev->dev);
2681#if USE_PLATFORM_DELAY
2682	if (!isp1362_hcd->board->delay) {
2683		dev_err(hcd->self.controller, "No platform delay function given\n");
2684		retval = -ENODEV;
2685		goto err;
2686	}
2687#endif
2688
2689	if (irq_res->flags & IORESOURCE_IRQ_HIGHEDGE)
2690		irq_flags |= IRQF_TRIGGER_RISING;
2691	if (irq_res->flags & IORESOURCE_IRQ_LOWEDGE)
2692		irq_flags |= IRQF_TRIGGER_FALLING;
2693	if (irq_res->flags & IORESOURCE_IRQ_HIGHLEVEL)
2694		irq_flags |= IRQF_TRIGGER_HIGH;
2695	if (irq_res->flags & IORESOURCE_IRQ_LOWLEVEL)
2696		irq_flags |= IRQF_TRIGGER_LOW;
2697
2698	retval = usb_add_hcd(hcd, irq, irq_flags | IRQF_SHARED);
2699	if (retval != 0)
2700		goto err;
2701	device_wakeup_enable(hcd->self.controller);
2702
2703	dev_info(&pdev->dev, "%s, irq %d\n", hcd->product_desc, irq);
2704
2705	create_debug_file(isp1362_hcd);
2706
2707	return 0;
2708
2709 err:
 
2710	usb_put_hcd(hcd);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2711
2712	return retval;
2713}
2714
2715#ifdef	CONFIG_PM
2716static int isp1362_suspend(struct platform_device *pdev, pm_message_t state)
2717{
2718	struct usb_hcd *hcd = platform_get_drvdata(pdev);
2719	struct isp1362_hcd *isp1362_hcd = hcd_to_isp1362_hcd(hcd);
2720	unsigned long flags;
2721	int retval = 0;
2722
2723	DBG(0, "%s: Suspending device\n", __func__);
2724
2725	if (state.event == PM_EVENT_FREEZE) {
2726		DBG(0, "%s: Suspending root hub\n", __func__);
2727		retval = isp1362_bus_suspend(hcd);
2728	} else {
2729		DBG(0, "%s: Suspending RH ports\n", __func__);
2730		spin_lock_irqsave(&isp1362_hcd->lock, flags);
2731		isp1362_write_reg32(isp1362_hcd, HCRHSTATUS, RH_HS_LPS);
2732		spin_unlock_irqrestore(&isp1362_hcd->lock, flags);
2733	}
2734	if (retval == 0)
2735		pdev->dev.power.power_state = state;
2736	return retval;
2737}
2738
2739static int isp1362_resume(struct platform_device *pdev)
2740{
2741	struct usb_hcd *hcd = platform_get_drvdata(pdev);
2742	struct isp1362_hcd *isp1362_hcd = hcd_to_isp1362_hcd(hcd);
2743	unsigned long flags;
2744
2745	DBG(0, "%s: Resuming\n", __func__);
2746
2747	if (pdev->dev.power.power_state.event == PM_EVENT_SUSPEND) {
2748		DBG(0, "%s: Resume RH ports\n", __func__);
2749		spin_lock_irqsave(&isp1362_hcd->lock, flags);
2750		isp1362_write_reg32(isp1362_hcd, HCRHSTATUS, RH_HS_LPSC);
2751		spin_unlock_irqrestore(&isp1362_hcd->lock, flags);
2752		return 0;
2753	}
2754
2755	pdev->dev.power.power_state = PMSG_ON;
2756
2757	return isp1362_bus_resume(isp1362_hcd_to_hcd(isp1362_hcd));
2758}
2759#else
2760#define	isp1362_suspend	NULL
2761#define	isp1362_resume	NULL
2762#endif
2763
2764static struct platform_driver isp1362_driver = {
2765	.probe = isp1362_probe,
2766	.remove = isp1362_remove,
2767
2768	.suspend = isp1362_suspend,
2769	.resume = isp1362_resume,
2770	.driver = {
2771		.name = hcd_name,
 
2772	},
2773};
2774
2775module_platform_driver(isp1362_driver);