Linux Audio

Check our new training course

Open-source upstreaming

Need help get the support for your hardware in upstream Linux?
Loading...
v4.6
   1/*
   2 * ISP1362 HCD (Host Controller Driver) for USB.
   3 *
   4 * Copyright (C) 2005 Lothar Wassmann <LW@KARO-electronics.de>
   5 *
   6 * Derived from the SL811 HCD, rewritten for ISP116x.
   7 * Copyright (C) 2005 Olav Kongas <ok@artecdesign.ee>
   8 *
   9 * Portions:
  10 * Copyright (C) 2004 Psion Teklogix (for NetBook PRO)
  11 * Copyright (C) 2004 David Brownell
  12 */
  13
  14/*
  15 * The ISP1362 chip requires a large delay (300ns and 462ns) between
  16 * accesses to the address and data register.
  17 * The following timing options exist:
  18 *
  19 * 1. Configure your memory controller to add such delays if it can (the best)
  20 * 2. Implement platform-specific delay function possibly
  21 *    combined with configuring the memory controller; see
  22 *    include/linux/usb_isp1362.h for more info.
  23 * 3. Use ndelay (easiest, poorest).
  24 *
  25 * Use the corresponding macros USE_PLATFORM_DELAY and USE_NDELAY in the
  26 * platform specific section of isp1362.h to select the appropriate variant.
  27 *
  28 * Also note that according to the Philips "ISP1362 Errata" document
  29 * Rev 1.00 from 27 May data corruption may occur when the #WR signal
  30 * is reasserted (even with #CS deasserted) within 132ns after a
  31 * write cycle to any controller register. If the hardware doesn't
  32 * implement the recommended fix (gating the #WR with #CS) software
  33 * must ensure that no further write cycle (not necessarily to the chip!)
  34 * is issued by the CPU within this interval.
  35
  36 * For PXA25x this can be ensured by using VLIO with the maximum
  37 * recovery time (MSCx = 0x7f8c) with a memory clock of 99.53 MHz.
  38 */
  39
  40#undef ISP1362_DEBUG
 
 
 
 
  41
  42/*
  43 * The PXA255 UDC apparently doesn't handle GET_STATUS, GET_CONFIG and
  44 * GET_INTERFACE requests correctly when the SETUP and DATA stages of the
  45 * requests are carried out in separate frames. This will delay any SETUP
  46 * packets until the start of the next frame so that this situation is
  47 * unlikely to occur (and makes usbtest happy running with a PXA255 target
  48 * device).
  49 */
  50#undef BUGGY_PXA2XX_UDC_USBTEST
  51
  52#undef PTD_TRACE
  53#undef URB_TRACE
  54#undef VERBOSE
  55#undef REGISTERS
  56
  57/* This enables a memory test on the ISP1362 chip memory to make sure the
  58 * chip access timing is correct.
  59 */
  60#undef CHIP_BUFFER_TEST
  61
  62#include <linux/module.h>
  63#include <linux/moduleparam.h>
  64#include <linux/kernel.h>
  65#include <linux/delay.h>
  66#include <linux/ioport.h>
  67#include <linux/sched.h>
  68#include <linux/slab.h>
  69#include <linux/errno.h>
 
  70#include <linux/list.h>
  71#include <linux/interrupt.h>
  72#include <linux/usb.h>
  73#include <linux/usb/isp1362.h>
  74#include <linux/usb/hcd.h>
  75#include <linux/platform_device.h>
  76#include <linux/pm.h>
  77#include <linux/io.h>
  78#include <linux/bitmap.h>
  79#include <linux/prefetch.h>
  80#include <linux/debugfs.h>
  81#include <linux/seq_file.h>
  82
  83#include <asm/irq.h>
  84#include <asm/byteorder.h>
  85#include <asm/unaligned.h>
  86
  87static int dbg_level;
  88#ifdef ISP1362_DEBUG
  89module_param(dbg_level, int, 0644);
  90#else
  91module_param(dbg_level, int, 0);
 
  92#endif
  93
  94#include "../core/usb.h"
  95#include "isp1362.h"
  96
  97
  98#define DRIVER_VERSION	"2005-04-04"
  99#define DRIVER_DESC	"ISP1362 USB Host Controller Driver"
 100
 101MODULE_DESCRIPTION(DRIVER_DESC);
 102MODULE_LICENSE("GPL");
 103
 104static const char hcd_name[] = "isp1362-hcd";
 105
 106static void isp1362_hc_stop(struct usb_hcd *hcd);
 107static int isp1362_hc_start(struct usb_hcd *hcd);
 108
 109/*-------------------------------------------------------------------------*/
 110
 111/*
 112 * When called from the interrupthandler only isp1362_hcd->irqenb is modified,
 113 * since the interrupt handler will write isp1362_hcd->irqenb to HCuPINT upon
 114 * completion.
 115 * We don't need a 'disable' counterpart, since interrupts will be disabled
 116 * only by the interrupt handler.
 117 */
 118static inline void isp1362_enable_int(struct isp1362_hcd *isp1362_hcd, u16 mask)
 119{
 120	if ((isp1362_hcd->irqenb | mask) == isp1362_hcd->irqenb)
 121		return;
 122	if (mask & ~isp1362_hcd->irqenb)
 123		isp1362_write_reg16(isp1362_hcd, HCuPINT, mask & ~isp1362_hcd->irqenb);
 124	isp1362_hcd->irqenb |= mask;
 125	if (isp1362_hcd->irq_active)
 126		return;
 127	isp1362_write_reg16(isp1362_hcd, HCuPINTENB, isp1362_hcd->irqenb);
 128}
 129
 130/*-------------------------------------------------------------------------*/
 131
 132static inline struct isp1362_ep_queue *get_ptd_queue(struct isp1362_hcd *isp1362_hcd,
 133						     u16 offset)
 134{
 135	struct isp1362_ep_queue *epq = NULL;
 136
 137	if (offset < isp1362_hcd->istl_queue[1].buf_start)
 138		epq = &isp1362_hcd->istl_queue[0];
 139	else if (offset < isp1362_hcd->intl_queue.buf_start)
 140		epq = &isp1362_hcd->istl_queue[1];
 141	else if (offset < isp1362_hcd->atl_queue.buf_start)
 142		epq = &isp1362_hcd->intl_queue;
 143	else if (offset < isp1362_hcd->atl_queue.buf_start +
 144		   isp1362_hcd->atl_queue.buf_size)
 145		epq = &isp1362_hcd->atl_queue;
 146
 147	if (epq)
 148		DBG(1, "%s: PTD $%04x is on %s queue\n", __func__, offset, epq->name);
 149	else
 150		pr_warning("%s: invalid PTD $%04x\n", __func__, offset);
 151
 152	return epq;
 153}
 154
 155static inline int get_ptd_offset(struct isp1362_ep_queue *epq, u8 index)
 156{
 157	int offset;
 158
 159	if (index * epq->blk_size > epq->buf_size) {
 160		pr_warning("%s: Bad %s index %d(%d)\n", __func__, epq->name, index,
 161		     epq->buf_size / epq->blk_size);
 162		return -EINVAL;
 163	}
 164	offset = epq->buf_start + index * epq->blk_size;
 165	DBG(3, "%s: %s PTD[%02x] # %04x\n", __func__, epq->name, index, offset);
 166
 167	return offset;
 168}
 169
 170/*-------------------------------------------------------------------------*/
 171
 172static inline u16 max_transfer_size(struct isp1362_ep_queue *epq, size_t size,
 173				    int mps)
 174{
 175	u16 xfer_size = min_t(size_t, MAX_XFER_SIZE, size);
 176
 177	xfer_size = min_t(size_t, xfer_size, epq->buf_avail * epq->blk_size - PTD_HEADER_SIZE);
 178	if (xfer_size < size && xfer_size % mps)
 179		xfer_size -= xfer_size % mps;
 180
 181	return xfer_size;
 182}
 183
 184static int claim_ptd_buffers(struct isp1362_ep_queue *epq,
 185			     struct isp1362_ep *ep, u16 len)
 186{
 187	int ptd_offset = -EINVAL;
 188	int num_ptds = ((len + PTD_HEADER_SIZE - 1) / epq->blk_size) + 1;
 189	int found;
 190
 191	BUG_ON(len > epq->buf_size);
 192
 193	if (!epq->buf_avail)
 194		return -ENOMEM;
 195
 196	if (ep->num_ptds)
 197		pr_err("%s: %s len %d/%d num_ptds %d buf_map %08lx skip_map %08lx\n", __func__,
 198		    epq->name, len, epq->blk_size, num_ptds, epq->buf_map, epq->skip_map);
 199	BUG_ON(ep->num_ptds != 0);
 200
 201	found = bitmap_find_next_zero_area(&epq->buf_map, epq->buf_count, 0,
 202						num_ptds, 0);
 203	if (found >= epq->buf_count)
 204		return -EOVERFLOW;
 205
 206	DBG(1, "%s: Found %d PTDs[%d] for %d/%d byte\n", __func__,
 207	    num_ptds, found, len, (int)(epq->blk_size - PTD_HEADER_SIZE));
 208	ptd_offset = get_ptd_offset(epq, found);
 209	WARN_ON(ptd_offset < 0);
 210	ep->ptd_offset = ptd_offset;
 211	ep->num_ptds += num_ptds;
 212	epq->buf_avail -= num_ptds;
 213	BUG_ON(epq->buf_avail > epq->buf_count);
 214	ep->ptd_index = found;
 215	bitmap_set(&epq->buf_map, found, num_ptds);
 216	DBG(1, "%s: Done %s PTD[%d] $%04x, avail %d count %d claimed %d %08lx:%08lx\n",
 217	    __func__, epq->name, ep->ptd_index, ep->ptd_offset,
 218	    epq->buf_avail, epq->buf_count, num_ptds, epq->buf_map, epq->skip_map);
 219
 220	return found;
 221}
 222
 223static inline void release_ptd_buffers(struct isp1362_ep_queue *epq, struct isp1362_ep *ep)
 224{
 225	int last = ep->ptd_index + ep->num_ptds;
 226
 227	if (last > epq->buf_count)
 228		pr_err("%s: ep %p req %d len %d %s PTD[%d] $%04x num_ptds %d buf_count %d buf_avail %d buf_map %08lx skip_map %08lx\n",
 229		    __func__, ep, ep->num_req, ep->length, epq->name, ep->ptd_index,
 230		    ep->ptd_offset, ep->num_ptds, epq->buf_count, epq->buf_avail,
 231		    epq->buf_map, epq->skip_map);
 232	BUG_ON(last > epq->buf_count);
 233
 234	bitmap_clear(&epq->buf_map, ep->ptd_index, ep->num_ptds);
 235	bitmap_set(&epq->skip_map, ep->ptd_index, ep->num_ptds);
 236	epq->buf_avail += ep->num_ptds;
 237	epq->ptd_count--;
 238
 239	BUG_ON(epq->buf_avail > epq->buf_count);
 240	BUG_ON(epq->ptd_count > epq->buf_count);
 241
 242	DBG(1, "%s: Done %s PTDs $%04x released %d avail %d count %d\n",
 243	    __func__, epq->name,
 244	    ep->ptd_offset, ep->num_ptds, epq->buf_avail, epq->buf_count);
 245	DBG(1, "%s: buf_map %08lx skip_map %08lx\n", __func__,
 246	    epq->buf_map, epq->skip_map);
 247
 248	ep->num_ptds = 0;
 249	ep->ptd_offset = -EINVAL;
 250	ep->ptd_index = -EINVAL;
 251}
 252
 253/*-------------------------------------------------------------------------*/
 254
 255/*
 256  Set up PTD's.
 257*/
 258static void prepare_ptd(struct isp1362_hcd *isp1362_hcd, struct urb *urb,
 259			struct isp1362_ep *ep, struct isp1362_ep_queue *epq,
 260			u16 fno)
 261{
 262	struct ptd *ptd;
 263	int toggle;
 264	int dir;
 265	u16 len;
 266	size_t buf_len = urb->transfer_buffer_length - urb->actual_length;
 267
 268	DBG(3, "%s: %s ep %p\n", __func__, epq->name, ep);
 269
 270	ptd = &ep->ptd;
 271
 272	ep->data = (unsigned char *)urb->transfer_buffer + urb->actual_length;
 273
 274	switch (ep->nextpid) {
 275	case USB_PID_IN:
 276		toggle = usb_gettoggle(urb->dev, ep->epnum, 0);
 277		dir = PTD_DIR_IN;
 278		if (usb_pipecontrol(urb->pipe)) {
 279			len = min_t(size_t, ep->maxpacket, buf_len);
 280		} else if (usb_pipeisoc(urb->pipe)) {
 281			len = min_t(size_t, urb->iso_frame_desc[fno].length, MAX_XFER_SIZE);
 282			ep->data = urb->transfer_buffer + urb->iso_frame_desc[fno].offset;
 283		} else
 284			len = max_transfer_size(epq, buf_len, ep->maxpacket);
 285		DBG(1, "%s: IN    len %d/%d/%d from URB\n", __func__, len, ep->maxpacket,
 286		    (int)buf_len);
 287		break;
 288	case USB_PID_OUT:
 289		toggle = usb_gettoggle(urb->dev, ep->epnum, 1);
 290		dir = PTD_DIR_OUT;
 291		if (usb_pipecontrol(urb->pipe))
 292			len = min_t(size_t, ep->maxpacket, buf_len);
 293		else if (usb_pipeisoc(urb->pipe))
 294			len = min_t(size_t, urb->iso_frame_desc[0].length, MAX_XFER_SIZE);
 295		else
 296			len = max_transfer_size(epq, buf_len, ep->maxpacket);
 297		if (len == 0)
 298			pr_info("%s: Sending ZERO packet: %d\n", __func__,
 299			     urb->transfer_flags & URB_ZERO_PACKET);
 300		DBG(1, "%s: OUT   len %d/%d/%d from URB\n", __func__, len, ep->maxpacket,
 301		    (int)buf_len);
 302		break;
 303	case USB_PID_SETUP:
 304		toggle = 0;
 305		dir = PTD_DIR_SETUP;
 306		len = sizeof(struct usb_ctrlrequest);
 307		DBG(1, "%s: SETUP len %d\n", __func__, len);
 308		ep->data = urb->setup_packet;
 309		break;
 310	case USB_PID_ACK:
 311		toggle = 1;
 312		len = 0;
 313		dir = (urb->transfer_buffer_length && usb_pipein(urb->pipe)) ?
 314			PTD_DIR_OUT : PTD_DIR_IN;
 315		DBG(1, "%s: ACK   len %d\n", __func__, len);
 316		break;
 317	default:
 318		toggle = dir = len = 0;
 319		pr_err("%s@%d: ep->nextpid %02x\n", __func__, __LINE__, ep->nextpid);
 320		BUG_ON(1);
 321	}
 322
 323	ep->length = len;
 324	if (!len)
 325		ep->data = NULL;
 326
 327	ptd->count = PTD_CC_MSK | PTD_ACTIVE_MSK | PTD_TOGGLE(toggle);
 328	ptd->mps = PTD_MPS(ep->maxpacket) | PTD_SPD(urb->dev->speed == USB_SPEED_LOW) |
 329		PTD_EP(ep->epnum);
 330	ptd->len = PTD_LEN(len) | PTD_DIR(dir);
 331	ptd->faddr = PTD_FA(usb_pipedevice(urb->pipe));
 332
 333	if (usb_pipeint(urb->pipe)) {
 334		ptd->faddr |= PTD_SF_INT(ep->branch);
 335		ptd->faddr |= PTD_PR(ep->interval ? __ffs(ep->interval) : 0);
 336	}
 337	if (usb_pipeisoc(urb->pipe))
 338		ptd->faddr |= PTD_SF_ISO(fno);
 339
 340	DBG(1, "%s: Finished\n", __func__);
 341}
 342
 343static void isp1362_write_ptd(struct isp1362_hcd *isp1362_hcd, struct isp1362_ep *ep,
 344			      struct isp1362_ep_queue *epq)
 345{
 346	struct ptd *ptd = &ep->ptd;
 347	int len = PTD_GET_DIR(ptd) == PTD_DIR_IN ? 0 : ep->length;
 348
 
 
 349	prefetch(ptd);
 350	isp1362_write_buffer(isp1362_hcd, ptd, ep->ptd_offset, PTD_HEADER_SIZE);
 351	if (len)
 352		isp1362_write_buffer(isp1362_hcd, ep->data,
 353				     ep->ptd_offset + PTD_HEADER_SIZE, len);
 354
 355	dump_ptd(ptd);
 356	dump_ptd_out_data(ptd, ep->data);
 357}
 358
 359static void isp1362_read_ptd(struct isp1362_hcd *isp1362_hcd, struct isp1362_ep *ep,
 360			     struct isp1362_ep_queue *epq)
 361{
 362	struct ptd *ptd = &ep->ptd;
 363	int act_len;
 364
 365	WARN_ON(list_empty(&ep->active));
 366	BUG_ON(ep->ptd_offset < 0);
 367
 368	list_del_init(&ep->active);
 369	DBG(1, "%s: ep %p removed from active list %p\n", __func__, ep, &epq->active);
 370
 371	prefetchw(ptd);
 372	isp1362_read_buffer(isp1362_hcd, ptd, ep->ptd_offset, PTD_HEADER_SIZE);
 373	dump_ptd(ptd);
 374	act_len = PTD_GET_COUNT(ptd);
 375	if (PTD_GET_DIR(ptd) != PTD_DIR_IN || act_len == 0)
 376		return;
 377	if (act_len > ep->length)
 378		pr_err("%s: ep %p PTD $%04x act_len %d ep->length %d\n", __func__, ep,
 379			 ep->ptd_offset, act_len, ep->length);
 380	BUG_ON(act_len > ep->length);
 381	/* Only transfer the amount of data that has actually been overwritten
 382	 * in the chip buffer. We don't want any data that doesn't belong to the
 383	 * transfer to leak out of the chip to the callers transfer buffer!
 384	 */
 385	prefetchw(ep->data);
 386	isp1362_read_buffer(isp1362_hcd, ep->data,
 387			    ep->ptd_offset + PTD_HEADER_SIZE, act_len);
 388	dump_ptd_in_data(ptd, ep->data);
 389}
 390
 391/*
 392 * INT PTDs will stay in the chip until data is available.
 393 * This function will remove a PTD from the chip when the URB is dequeued.
 394 * Must be called with the spinlock held and IRQs disabled
 395 */
 396static void remove_ptd(struct isp1362_hcd *isp1362_hcd, struct isp1362_ep *ep)
 397
 398{
 399	int index;
 400	struct isp1362_ep_queue *epq;
 401
 402	DBG(1, "%s: ep %p PTD[%d] $%04x\n", __func__, ep, ep->ptd_index, ep->ptd_offset);
 403	BUG_ON(ep->ptd_offset < 0);
 404
 405	epq = get_ptd_queue(isp1362_hcd, ep->ptd_offset);
 406	BUG_ON(!epq);
 407
 408	/* put ep in remove_list for cleanup */
 409	WARN_ON(!list_empty(&ep->remove_list));
 410	list_add_tail(&ep->remove_list, &isp1362_hcd->remove_list);
 411	/* let SOF interrupt handle the cleanup */
 412	isp1362_enable_int(isp1362_hcd, HCuPINT_SOF);
 413
 414	index = ep->ptd_index;
 415	if (index < 0)
 416		/* ISO queues don't have SKIP registers */
 417		return;
 418
 419	DBG(1, "%s: Disabling PTD[%02x] $%04x %08lx|%08x\n", __func__,
 420	    index, ep->ptd_offset, epq->skip_map, 1 << index);
 421
 422	/* prevent further processing of PTD (will be effective after next SOF) */
 423	epq->skip_map |= 1 << index;
 424	if (epq == &isp1362_hcd->atl_queue) {
 425		DBG(2, "%s: ATLSKIP = %08x -> %08lx\n", __func__,
 426		    isp1362_read_reg32(isp1362_hcd, HCATLSKIP), epq->skip_map);
 427		isp1362_write_reg32(isp1362_hcd, HCATLSKIP, epq->skip_map);
 428		if (~epq->skip_map == 0)
 429			isp1362_clr_mask16(isp1362_hcd, HCBUFSTAT, HCBUFSTAT_ATL_ACTIVE);
 430	} else if (epq == &isp1362_hcd->intl_queue) {
 431		DBG(2, "%s: INTLSKIP = %08x -> %08lx\n", __func__,
 432		    isp1362_read_reg32(isp1362_hcd, HCINTLSKIP), epq->skip_map);
 433		isp1362_write_reg32(isp1362_hcd, HCINTLSKIP, epq->skip_map);
 434		if (~epq->skip_map == 0)
 435			isp1362_clr_mask16(isp1362_hcd, HCBUFSTAT, HCBUFSTAT_INTL_ACTIVE);
 436	}
 437}
 438
 439/*
 440  Take done or failed requests out of schedule. Give back
 441  processed urbs.
 442*/
 443static void finish_request(struct isp1362_hcd *isp1362_hcd, struct isp1362_ep *ep,
 444			   struct urb *urb, int status)
 445     __releases(isp1362_hcd->lock)
 446     __acquires(isp1362_hcd->lock)
 447{
 448	urb->hcpriv = NULL;
 449	ep->error_count = 0;
 450
 451	if (usb_pipecontrol(urb->pipe))
 452		ep->nextpid = USB_PID_SETUP;
 453
 454	URB_DBG("%s: req %d FA %d ep%d%s %s: len %d/%d %s stat %d\n", __func__,
 455		ep->num_req, usb_pipedevice(urb->pipe),
 456		usb_pipeendpoint(urb->pipe),
 457		!usb_pipein(urb->pipe) ? "out" : "in",
 458		usb_pipecontrol(urb->pipe) ? "ctrl" :
 459			usb_pipeint(urb->pipe) ? "int" :
 460			usb_pipebulk(urb->pipe) ? "bulk" :
 461			"iso",
 462		urb->actual_length, urb->transfer_buffer_length,
 463		!(urb->transfer_flags & URB_SHORT_NOT_OK) ?
 464		"short_ok" : "", urb->status);
 465
 466
 467	usb_hcd_unlink_urb_from_ep(isp1362_hcd_to_hcd(isp1362_hcd), urb);
 468	spin_unlock(&isp1362_hcd->lock);
 469	usb_hcd_giveback_urb(isp1362_hcd_to_hcd(isp1362_hcd), urb, status);
 470	spin_lock(&isp1362_hcd->lock);
 471
 472	/* take idle endpoints out of the schedule right away */
 473	if (!list_empty(&ep->hep->urb_list))
 474		return;
 475
 476	/* async deschedule */
 477	if (!list_empty(&ep->schedule)) {
 478		list_del_init(&ep->schedule);
 479		return;
 480	}
 481
 482
 483	if (ep->interval) {
 484		/* periodic deschedule */
 485		DBG(1, "deschedule qh%d/%p branch %d load %d bandwidth %d -> %d\n", ep->interval,
 486		    ep, ep->branch, ep->load,
 487		    isp1362_hcd->load[ep->branch],
 488		    isp1362_hcd->load[ep->branch] - ep->load);
 489		isp1362_hcd->load[ep->branch] -= ep->load;
 490		ep->branch = PERIODIC_SIZE;
 491	}
 492}
 493
 494/*
 495 * Analyze transfer results, handle partial transfers and errors
 496*/
 497static void postproc_ep(struct isp1362_hcd *isp1362_hcd, struct isp1362_ep *ep)
 498{
 499	struct urb *urb = get_urb(ep);
 500	struct usb_device *udev;
 501	struct ptd *ptd;
 502	int short_ok;
 503	u16 len;
 504	int urbstat = -EINPROGRESS;
 505	u8 cc;
 506
 507	DBG(2, "%s: ep %p req %d\n", __func__, ep, ep->num_req);
 508
 509	udev = urb->dev;
 510	ptd = &ep->ptd;
 511	cc = PTD_GET_CC(ptd);
 512	if (cc == PTD_NOTACCESSED) {
 513		pr_err("%s: req %d PTD %p Untouched by ISP1362\n", __func__,
 514		    ep->num_req, ptd);
 515		cc = PTD_DEVNOTRESP;
 516	}
 517
 518	short_ok = !(urb->transfer_flags & URB_SHORT_NOT_OK);
 519	len = urb->transfer_buffer_length - urb->actual_length;
 520
 521	/* Data underrun is special. For allowed underrun
 522	   we clear the error and continue as normal. For
 523	   forbidden underrun we finish the DATA stage
 524	   immediately while for control transfer,
 525	   we do a STATUS stage.
 526	*/
 527	if (cc == PTD_DATAUNDERRUN) {
 528		if (short_ok) {
 529			DBG(1, "%s: req %d Allowed data underrun short_%sok %d/%d/%d byte\n",
 530			    __func__, ep->num_req, short_ok ? "" : "not_",
 531			    PTD_GET_COUNT(ptd), ep->maxpacket, len);
 532			cc = PTD_CC_NOERROR;
 533			urbstat = 0;
 534		} else {
 535			DBG(1, "%s: req %d Data Underrun %s nextpid %02x short_%sok %d/%d/%d byte\n",
 536			    __func__, ep->num_req,
 537			    usb_pipein(urb->pipe) ? "IN" : "OUT", ep->nextpid,
 538			    short_ok ? "" : "not_",
 539			    PTD_GET_COUNT(ptd), ep->maxpacket, len);
 540			/* save the data underrun error code for later and
 541			 * proceed with the status stage
 542			 */
 543			urb->actual_length += PTD_GET_COUNT(ptd);
 544			if (usb_pipecontrol(urb->pipe)) {
 545				ep->nextpid = USB_PID_ACK;
 
 
 
 
 546				BUG_ON(urb->actual_length > urb->transfer_buffer_length);
 547
 548				if (urb->status == -EINPROGRESS)
 549					urb->status = cc_to_error[PTD_DATAUNDERRUN];
 550			} else {
 551				usb_settoggle(udev, ep->epnum, ep->nextpid == USB_PID_OUT,
 552					      PTD_GET_TOGGLE(ptd));
 553				urbstat = cc_to_error[PTD_DATAUNDERRUN];
 554			}
 555			goto out;
 556		}
 557	}
 558
 559	if (cc != PTD_CC_NOERROR) {
 560		if (++ep->error_count >= 3 || cc == PTD_CC_STALL || cc == PTD_DATAOVERRUN) {
 561			urbstat = cc_to_error[cc];
 562			DBG(1, "%s: req %d nextpid %02x, status %d, error %d, error_count %d\n",
 563			    __func__, ep->num_req, ep->nextpid, urbstat, cc,
 564			    ep->error_count);
 565		}
 566		goto out;
 567	}
 568
 569	switch (ep->nextpid) {
 570	case USB_PID_OUT:
 571		if (PTD_GET_COUNT(ptd) != ep->length)
 572			pr_err("%s: count=%d len=%d\n", __func__,
 573			   PTD_GET_COUNT(ptd), ep->length);
 574		BUG_ON(PTD_GET_COUNT(ptd) != ep->length);
 575		urb->actual_length += ep->length;
 576		BUG_ON(urb->actual_length > urb->transfer_buffer_length);
 577		usb_settoggle(udev, ep->epnum, 1, PTD_GET_TOGGLE(ptd));
 578		if (urb->actual_length == urb->transfer_buffer_length) {
 579			DBG(3, "%s: req %d xfer complete %d/%d status %d -> 0\n", __func__,
 580			    ep->num_req, len, ep->maxpacket, urbstat);
 581			if (usb_pipecontrol(urb->pipe)) {
 582				DBG(3, "%s: req %d %s Wait for ACK\n", __func__,
 583				    ep->num_req,
 584				    usb_pipein(urb->pipe) ? "IN" : "OUT");
 585				ep->nextpid = USB_PID_ACK;
 586			} else {
 587				if (len % ep->maxpacket ||
 588				    !(urb->transfer_flags & URB_ZERO_PACKET)) {
 589					urbstat = 0;
 590					DBG(3, "%s: req %d URB %s status %d count %d/%d/%d\n",
 591					    __func__, ep->num_req, usb_pipein(urb->pipe) ? "IN" : "OUT",
 592					    urbstat, len, ep->maxpacket, urb->actual_length);
 593				}
 594			}
 595		}
 596		break;
 597	case USB_PID_IN:
 598		len = PTD_GET_COUNT(ptd);
 599		BUG_ON(len > ep->length);
 600		urb->actual_length += len;
 601		BUG_ON(urb->actual_length > urb->transfer_buffer_length);
 602		usb_settoggle(udev, ep->epnum, 0, PTD_GET_TOGGLE(ptd));
 603		/* if transfer completed or (allowed) data underrun */
 604		if ((urb->transfer_buffer_length == urb->actual_length) ||
 605		    len % ep->maxpacket) {
 606			DBG(3, "%s: req %d xfer complete %d/%d status %d -> 0\n", __func__,
 607			    ep->num_req, len, ep->maxpacket, urbstat);
 608			if (usb_pipecontrol(urb->pipe)) {
 609				DBG(3, "%s: req %d %s Wait for ACK\n", __func__,
 610				    ep->num_req,
 611				    usb_pipein(urb->pipe) ? "IN" : "OUT");
 612				ep->nextpid = USB_PID_ACK;
 613			} else {
 614				urbstat = 0;
 615				DBG(3, "%s: req %d URB %s status %d count %d/%d/%d\n",
 616				    __func__, ep->num_req, usb_pipein(urb->pipe) ? "IN" : "OUT",
 617				    urbstat, len, ep->maxpacket, urb->actual_length);
 618			}
 619		}
 620		break;
 621	case USB_PID_SETUP:
 622		if (urb->transfer_buffer_length == urb->actual_length) {
 623			ep->nextpid = USB_PID_ACK;
 624		} else if (usb_pipeout(urb->pipe)) {
 625			usb_settoggle(udev, 0, 1, 1);
 626			ep->nextpid = USB_PID_OUT;
 627		} else {
 628			usb_settoggle(udev, 0, 0, 1);
 629			ep->nextpid = USB_PID_IN;
 630		}
 631		break;
 632	case USB_PID_ACK:
 633		DBG(3, "%s: req %d got ACK %d -> 0\n", __func__, ep->num_req,
 634		    urbstat);
 635		WARN_ON(urbstat != -EINPROGRESS);
 636		urbstat = 0;
 637		ep->nextpid = 0;
 638		break;
 639	default:
 640		BUG_ON(1);
 641	}
 642
 643 out:
 644	if (urbstat != -EINPROGRESS) {
 645		DBG(2, "%s: Finishing ep %p req %d urb %p status %d\n", __func__,
 646		    ep, ep->num_req, urb, urbstat);
 647		finish_request(isp1362_hcd, ep, urb, urbstat);
 648	}
 649}
 650
 651static void finish_unlinks(struct isp1362_hcd *isp1362_hcd)
 652{
 653	struct isp1362_ep *ep;
 654	struct isp1362_ep *tmp;
 655
 656	list_for_each_entry_safe(ep, tmp, &isp1362_hcd->remove_list, remove_list) {
 657		struct isp1362_ep_queue *epq =
 658			get_ptd_queue(isp1362_hcd, ep->ptd_offset);
 659		int index = ep->ptd_index;
 660
 661		BUG_ON(epq == NULL);
 662		if (index >= 0) {
 663			DBG(1, "%s: remove PTD[%d] $%04x\n", __func__, index, ep->ptd_offset);
 664			BUG_ON(ep->num_ptds == 0);
 665			release_ptd_buffers(epq, ep);
 666		}
 667		if (!list_empty(&ep->hep->urb_list)) {
 668			struct urb *urb = get_urb(ep);
 669
 670			DBG(1, "%s: Finishing req %d ep %p from remove_list\n", __func__,
 671			    ep->num_req, ep);
 672			finish_request(isp1362_hcd, ep, urb, -ESHUTDOWN);
 673		}
 674		WARN_ON(list_empty(&ep->active));
 675		if (!list_empty(&ep->active)) {
 676			list_del_init(&ep->active);
 677			DBG(1, "%s: ep %p removed from active list\n", __func__, ep);
 678		}
 679		list_del_init(&ep->remove_list);
 680		DBG(1, "%s: ep %p removed from remove_list\n", __func__, ep);
 681	}
 682	DBG(1, "%s: Done\n", __func__);
 683}
 684
 685static inline void enable_atl_transfers(struct isp1362_hcd *isp1362_hcd, int count)
 686{
 687	if (count > 0) {
 688		if (count < isp1362_hcd->atl_queue.ptd_count)
 689			isp1362_write_reg16(isp1362_hcd, HCATLDTC, count);
 690		isp1362_enable_int(isp1362_hcd, HCuPINT_ATL);
 691		isp1362_write_reg32(isp1362_hcd, HCATLSKIP, isp1362_hcd->atl_queue.skip_map);
 692		isp1362_set_mask16(isp1362_hcd, HCBUFSTAT, HCBUFSTAT_ATL_ACTIVE);
 693	} else
 694		isp1362_enable_int(isp1362_hcd, HCuPINT_SOF);
 695}
 696
 697static inline void enable_intl_transfers(struct isp1362_hcd *isp1362_hcd)
 698{
 699	isp1362_enable_int(isp1362_hcd, HCuPINT_INTL);
 700	isp1362_set_mask16(isp1362_hcd, HCBUFSTAT, HCBUFSTAT_INTL_ACTIVE);
 701	isp1362_write_reg32(isp1362_hcd, HCINTLSKIP, isp1362_hcd->intl_queue.skip_map);
 702}
 703
 704static inline void enable_istl_transfers(struct isp1362_hcd *isp1362_hcd, int flip)
 705{
 706	isp1362_enable_int(isp1362_hcd, flip ? HCuPINT_ISTL1 : HCuPINT_ISTL0);
 707	isp1362_set_mask16(isp1362_hcd, HCBUFSTAT, flip ?
 708			   HCBUFSTAT_ISTL1_FULL : HCBUFSTAT_ISTL0_FULL);
 709}
 710
 711static int submit_req(struct isp1362_hcd *isp1362_hcd, struct urb *urb,
 712		      struct isp1362_ep *ep, struct isp1362_ep_queue *epq)
 713{
 714	int index = epq->free_ptd;
 715
 716	prepare_ptd(isp1362_hcd, urb, ep, epq, 0);
 717	index = claim_ptd_buffers(epq, ep, ep->length);
 718	if (index == -ENOMEM) {
 719		DBG(1, "%s: req %d No free %s PTD available: %d, %08lx:%08lx\n", __func__,
 720		    ep->num_req, epq->name, ep->num_ptds, epq->buf_map, epq->skip_map);
 721		return index;
 722	} else if (index == -EOVERFLOW) {
 723		DBG(1, "%s: req %d Not enough space for %d byte %s PTD %d %08lx:%08lx\n",
 724		    __func__, ep->num_req, ep->length, epq->name, ep->num_ptds,
 725		    epq->buf_map, epq->skip_map);
 726		return index;
 727	} else
 728		BUG_ON(index < 0);
 729	list_add_tail(&ep->active, &epq->active);
 730	DBG(1, "%s: ep %p req %d len %d added to active list %p\n", __func__,
 731	    ep, ep->num_req, ep->length, &epq->active);
 732	DBG(1, "%s: Submitting %s PTD $%04x for ep %p req %d\n", __func__, epq->name,
 733	    ep->ptd_offset, ep, ep->num_req);
 734	isp1362_write_ptd(isp1362_hcd, ep, epq);
 735	__clear_bit(ep->ptd_index, &epq->skip_map);
 736
 737	return 0;
 738}
 739
 740static void start_atl_transfers(struct isp1362_hcd *isp1362_hcd)
 741{
 742	int ptd_count = 0;
 743	struct isp1362_ep_queue *epq = &isp1362_hcd->atl_queue;
 744	struct isp1362_ep *ep;
 745	int defer = 0;
 746
 747	if (atomic_read(&epq->finishing)) {
 748		DBG(1, "%s: finish_transfers is active for %s\n", __func__, epq->name);
 749		return;
 750	}
 751
 752	list_for_each_entry(ep, &isp1362_hcd->async, schedule) {
 753		struct urb *urb = get_urb(ep);
 754		int ret;
 755
 756		if (!list_empty(&ep->active)) {
 757			DBG(2, "%s: Skipping active %s ep %p\n", __func__, epq->name, ep);
 758			continue;
 759		}
 760
 761		DBG(1, "%s: Processing %s ep %p req %d\n", __func__, epq->name,
 762		    ep, ep->num_req);
 763
 764		ret = submit_req(isp1362_hcd, urb, ep, epq);
 765		if (ret == -ENOMEM) {
 766			defer = 1;
 767			break;
 768		} else if (ret == -EOVERFLOW) {
 769			defer = 1;
 770			continue;
 771		}
 772#ifdef BUGGY_PXA2XX_UDC_USBTEST
 773		defer = ep->nextpid == USB_PID_SETUP;
 774#endif
 775		ptd_count++;
 776	}
 777
 778	/* Avoid starving of endpoints */
 779	if (isp1362_hcd->async.next != isp1362_hcd->async.prev) {
 780		DBG(2, "%s: Cycling ASYNC schedule %d\n", __func__, ptd_count);
 781		list_move(&isp1362_hcd->async, isp1362_hcd->async.next);
 782	}
 783	if (ptd_count || defer)
 784		enable_atl_transfers(isp1362_hcd, defer ? 0 : ptd_count);
 785
 786	epq->ptd_count += ptd_count;
 787	if (epq->ptd_count > epq->stat_maxptds) {
 788		epq->stat_maxptds = epq->ptd_count;
 789		DBG(0, "%s: max_ptds: %d\n", __func__, epq->stat_maxptds);
 790	}
 791}
 792
 793static void start_intl_transfers(struct isp1362_hcd *isp1362_hcd)
 794{
 795	int ptd_count = 0;
 796	struct isp1362_ep_queue *epq = &isp1362_hcd->intl_queue;
 797	struct isp1362_ep *ep;
 798
 799	if (atomic_read(&epq->finishing)) {
 800		DBG(1, "%s: finish_transfers is active for %s\n", __func__, epq->name);
 801		return;
 802	}
 803
 804	list_for_each_entry(ep, &isp1362_hcd->periodic, schedule) {
 805		struct urb *urb = get_urb(ep);
 806		int ret;
 807
 808		if (!list_empty(&ep->active)) {
 809			DBG(1, "%s: Skipping active %s ep %p\n", __func__,
 810			    epq->name, ep);
 811			continue;
 812		}
 813
 814		DBG(1, "%s: Processing %s ep %p req %d\n", __func__,
 815		    epq->name, ep, ep->num_req);
 816		ret = submit_req(isp1362_hcd, urb, ep, epq);
 817		if (ret == -ENOMEM)
 818			break;
 819		else if (ret == -EOVERFLOW)
 820			continue;
 821		ptd_count++;
 822	}
 823
 824	if (ptd_count) {
 825		static int last_count;
 826
 827		if (ptd_count != last_count) {
 828			DBG(0, "%s: ptd_count: %d\n", __func__, ptd_count);
 829			last_count = ptd_count;
 830		}
 831		enable_intl_transfers(isp1362_hcd);
 832	}
 833
 834	epq->ptd_count += ptd_count;
 835	if (epq->ptd_count > epq->stat_maxptds)
 836		epq->stat_maxptds = epq->ptd_count;
 837}
 838
 839static inline int next_ptd(struct isp1362_ep_queue *epq, struct isp1362_ep *ep)
 840{
 841	u16 ptd_offset = ep->ptd_offset;
 842	int num_ptds = (ep->length + PTD_HEADER_SIZE + (epq->blk_size - 1)) / epq->blk_size;
 843
 844	DBG(2, "%s: PTD offset $%04x + %04x => %d * %04x -> $%04x\n", __func__, ptd_offset,
 845	    ep->length, num_ptds, epq->blk_size, ptd_offset + num_ptds * epq->blk_size);
 846
 847	ptd_offset += num_ptds * epq->blk_size;
 848	if (ptd_offset < epq->buf_start + epq->buf_size)
 849		return ptd_offset;
 850	else
 851		return -ENOMEM;
 852}
 853
 854static void start_iso_transfers(struct isp1362_hcd *isp1362_hcd)
 855{
 856	int ptd_count = 0;
 857	int flip = isp1362_hcd->istl_flip;
 858	struct isp1362_ep_queue *epq;
 859	int ptd_offset;
 860	struct isp1362_ep *ep;
 861	struct isp1362_ep *tmp;
 862	u16 fno = isp1362_read_reg32(isp1362_hcd, HCFMNUM);
 863
 864 fill2:
 865	epq = &isp1362_hcd->istl_queue[flip];
 866	if (atomic_read(&epq->finishing)) {
 867		DBG(1, "%s: finish_transfers is active for %s\n", __func__, epq->name);
 868		return;
 869	}
 870
 871	if (!list_empty(&epq->active))
 872		return;
 873
 874	ptd_offset = epq->buf_start;
 875	list_for_each_entry_safe(ep, tmp, &isp1362_hcd->isoc, schedule) {
 876		struct urb *urb = get_urb(ep);
 877		s16 diff = fno - (u16)urb->start_frame;
 878
 879		DBG(1, "%s: Processing %s ep %p\n", __func__, epq->name, ep);
 880
 881		if (diff > urb->number_of_packets) {
 882			/* time frame for this URB has elapsed */
 883			finish_request(isp1362_hcd, ep, urb, -EOVERFLOW);
 884			continue;
 885		} else if (diff < -1) {
 886			/* URB is not due in this frame or the next one.
 887			 * Comparing with '-1' instead of '0' accounts for double
 888			 * buffering in the ISP1362 which enables us to queue the PTD
 889			 * one frame ahead of time
 890			 */
 891		} else if (diff == -1) {
 892			/* submit PTD's that are due in the next frame */
 893			prepare_ptd(isp1362_hcd, urb, ep, epq, fno);
 894			if (ptd_offset + PTD_HEADER_SIZE + ep->length >
 895			    epq->buf_start + epq->buf_size) {
 896				pr_err("%s: Not enough ISO buffer space for %d byte PTD\n",
 897				    __func__, ep->length);
 898				continue;
 899			}
 900			ep->ptd_offset = ptd_offset;
 901			list_add_tail(&ep->active, &epq->active);
 902
 903			ptd_offset = next_ptd(epq, ep);
 904			if (ptd_offset < 0) {
 905				pr_warning("%s: req %d No more %s PTD buffers available\n", __func__,
 906				     ep->num_req, epq->name);
 907				break;
 908			}
 909		}
 910	}
 911	list_for_each_entry(ep, &epq->active, active) {
 912		if (epq->active.next == &ep->active)
 913			ep->ptd.mps |= PTD_LAST_MSK;
 914		isp1362_write_ptd(isp1362_hcd, ep, epq);
 915		ptd_count++;
 916	}
 917
 918	if (ptd_count)
 919		enable_istl_transfers(isp1362_hcd, flip);
 920
 921	epq->ptd_count += ptd_count;
 922	if (epq->ptd_count > epq->stat_maxptds)
 923		epq->stat_maxptds = epq->ptd_count;
 924
 925	/* check, whether the second ISTL buffer may also be filled */
 926	if (!(isp1362_read_reg16(isp1362_hcd, HCBUFSTAT) &
 927	      (flip ? HCBUFSTAT_ISTL0_FULL : HCBUFSTAT_ISTL1_FULL))) {
 928		fno++;
 929		ptd_count = 0;
 930		flip = 1 - flip;
 931		goto fill2;
 932	}
 933}
 934
 935static void finish_transfers(struct isp1362_hcd *isp1362_hcd, unsigned long done_map,
 936			     struct isp1362_ep_queue *epq)
 937{
 938	struct isp1362_ep *ep;
 939	struct isp1362_ep *tmp;
 940
 941	if (list_empty(&epq->active)) {
 942		DBG(1, "%s: Nothing to do for %s queue\n", __func__, epq->name);
 943		return;
 944	}
 945
 946	DBG(1, "%s: Finishing %s transfers %08lx\n", __func__, epq->name, done_map);
 947
 948	atomic_inc(&epq->finishing);
 949	list_for_each_entry_safe(ep, tmp, &epq->active, active) {
 950		int index = ep->ptd_index;
 951
 952		DBG(1, "%s: Checking %s PTD[%02x] $%04x\n", __func__, epq->name,
 953		    index, ep->ptd_offset);
 954
 955		BUG_ON(index < 0);
 956		if (__test_and_clear_bit(index, &done_map)) {
 957			isp1362_read_ptd(isp1362_hcd, ep, epq);
 958			epq->free_ptd = index;
 959			BUG_ON(ep->num_ptds == 0);
 960			release_ptd_buffers(epq, ep);
 961
 962			DBG(1, "%s: ep %p req %d removed from active list\n", __func__,
 963			    ep, ep->num_req);
 964			if (!list_empty(&ep->remove_list)) {
 965				list_del_init(&ep->remove_list);
 966				DBG(1, "%s: ep %p removed from remove list\n", __func__, ep);
 967			}
 968			DBG(1, "%s: Postprocessing %s ep %p req %d\n", __func__, epq->name,
 969			    ep, ep->num_req);
 970			postproc_ep(isp1362_hcd, ep);
 971		}
 972		if (!done_map)
 973			break;
 974	}
 975	if (done_map)
 976		pr_warning("%s: done_map not clear: %08lx:%08lx\n", __func__, done_map,
 977		     epq->skip_map);
 978	atomic_dec(&epq->finishing);
 979}
 980
 981static void finish_iso_transfers(struct isp1362_hcd *isp1362_hcd, struct isp1362_ep_queue *epq)
 982{
 983	struct isp1362_ep *ep;
 984	struct isp1362_ep *tmp;
 985
 986	if (list_empty(&epq->active)) {
 987		DBG(1, "%s: Nothing to do for %s queue\n", __func__, epq->name);
 988		return;
 989	}
 990
 991	DBG(1, "%s: Finishing %s transfers\n", __func__, epq->name);
 992
 993	atomic_inc(&epq->finishing);
 994	list_for_each_entry_safe(ep, tmp, &epq->active, active) {
 995		DBG(1, "%s: Checking PTD $%04x\n", __func__, ep->ptd_offset);
 996
 997		isp1362_read_ptd(isp1362_hcd, ep, epq);
 998		DBG(1, "%s: Postprocessing %s ep %p\n", __func__, epq->name, ep);
 999		postproc_ep(isp1362_hcd, ep);
1000	}
1001	WARN_ON(epq->blk_size != 0);
1002	atomic_dec(&epq->finishing);
1003}
1004
1005static irqreturn_t isp1362_irq(struct usb_hcd *hcd)
1006{
1007	int handled = 0;
1008	struct isp1362_hcd *isp1362_hcd = hcd_to_isp1362_hcd(hcd);
1009	u16 irqstat;
1010	u16 svc_mask;
1011
1012	spin_lock(&isp1362_hcd->lock);
1013
1014	BUG_ON(isp1362_hcd->irq_active++);
1015
1016	isp1362_write_reg16(isp1362_hcd, HCuPINTENB, 0);
1017
1018	irqstat = isp1362_read_reg16(isp1362_hcd, HCuPINT);
1019	DBG(3, "%s: got IRQ %04x:%04x\n", __func__, irqstat, isp1362_hcd->irqenb);
1020
1021	/* only handle interrupts that are currently enabled */
1022	irqstat &= isp1362_hcd->irqenb;
1023	isp1362_write_reg16(isp1362_hcd, HCuPINT, irqstat);
1024	svc_mask = irqstat;
1025
1026	if (irqstat & HCuPINT_SOF) {
1027		isp1362_hcd->irqenb &= ~HCuPINT_SOF;
1028		isp1362_hcd->irq_stat[ISP1362_INT_SOF]++;
1029		handled = 1;
1030		svc_mask &= ~HCuPINT_SOF;
1031		DBG(3, "%s: SOF\n", __func__);
1032		isp1362_hcd->fmindex = isp1362_read_reg32(isp1362_hcd, HCFMNUM);
1033		if (!list_empty(&isp1362_hcd->remove_list))
1034			finish_unlinks(isp1362_hcd);
1035		if (!list_empty(&isp1362_hcd->async) && !(irqstat & HCuPINT_ATL)) {
1036			if (list_empty(&isp1362_hcd->atl_queue.active)) {
1037				start_atl_transfers(isp1362_hcd);
1038			} else {
1039				isp1362_enable_int(isp1362_hcd, HCuPINT_ATL);
1040				isp1362_write_reg32(isp1362_hcd, HCATLSKIP,
1041						    isp1362_hcd->atl_queue.skip_map);
1042				isp1362_set_mask16(isp1362_hcd, HCBUFSTAT, HCBUFSTAT_ATL_ACTIVE);
1043			}
1044		}
1045	}
1046
1047	if (irqstat & HCuPINT_ISTL0) {
1048		isp1362_hcd->irq_stat[ISP1362_INT_ISTL0]++;
1049		handled = 1;
1050		svc_mask &= ~HCuPINT_ISTL0;
1051		isp1362_clr_mask16(isp1362_hcd, HCBUFSTAT, HCBUFSTAT_ISTL0_FULL);
1052		DBG(1, "%s: ISTL0\n", __func__);
1053		WARN_ON((int)!!isp1362_hcd->istl_flip);
1054		WARN_ON(isp1362_read_reg16(isp1362_hcd, HCBUFSTAT) &
1055			HCBUFSTAT_ISTL0_ACTIVE);
1056		WARN_ON(!(isp1362_read_reg16(isp1362_hcd, HCBUFSTAT) &
1057			HCBUFSTAT_ISTL0_DONE));
1058		isp1362_hcd->irqenb &= ~HCuPINT_ISTL0;
1059	}
1060
1061	if (irqstat & HCuPINT_ISTL1) {
1062		isp1362_hcd->irq_stat[ISP1362_INT_ISTL1]++;
1063		handled = 1;
1064		svc_mask &= ~HCuPINT_ISTL1;
1065		isp1362_clr_mask16(isp1362_hcd, HCBUFSTAT, HCBUFSTAT_ISTL1_FULL);
1066		DBG(1, "%s: ISTL1\n", __func__);
1067		WARN_ON(!(int)isp1362_hcd->istl_flip);
1068		WARN_ON(isp1362_read_reg16(isp1362_hcd, HCBUFSTAT) &
1069			HCBUFSTAT_ISTL1_ACTIVE);
1070		WARN_ON(!(isp1362_read_reg16(isp1362_hcd, HCBUFSTAT) &
1071			HCBUFSTAT_ISTL1_DONE));
1072		isp1362_hcd->irqenb &= ~HCuPINT_ISTL1;
1073	}
1074
1075	if (irqstat & (HCuPINT_ISTL0 | HCuPINT_ISTL1)) {
1076		WARN_ON((irqstat & (HCuPINT_ISTL0 | HCuPINT_ISTL1)) ==
1077			(HCuPINT_ISTL0 | HCuPINT_ISTL1));
1078		finish_iso_transfers(isp1362_hcd,
1079				     &isp1362_hcd->istl_queue[isp1362_hcd->istl_flip]);
1080		start_iso_transfers(isp1362_hcd);
1081		isp1362_hcd->istl_flip = 1 - isp1362_hcd->istl_flip;
1082	}
1083
1084	if (irqstat & HCuPINT_INTL) {
1085		u32 done_map = isp1362_read_reg32(isp1362_hcd, HCINTLDONE);
1086		u32 skip_map = isp1362_read_reg32(isp1362_hcd, HCINTLSKIP);
1087		isp1362_hcd->irq_stat[ISP1362_INT_INTL]++;
1088
1089		DBG(2, "%s: INTL\n", __func__);
1090
1091		svc_mask &= ~HCuPINT_INTL;
1092
1093		isp1362_write_reg32(isp1362_hcd, HCINTLSKIP, skip_map | done_map);
1094		if (~(done_map | skip_map) == 0)
1095			/* All PTDs are finished, disable INTL processing entirely */
1096			isp1362_clr_mask16(isp1362_hcd, HCBUFSTAT, HCBUFSTAT_INTL_ACTIVE);
1097
1098		handled = 1;
1099		WARN_ON(!done_map);
1100		if (done_map) {
1101			DBG(3, "%s: INTL done_map %08x\n", __func__, done_map);
1102			finish_transfers(isp1362_hcd, done_map, &isp1362_hcd->intl_queue);
1103			start_intl_transfers(isp1362_hcd);
1104		}
1105	}
1106
1107	if (irqstat & HCuPINT_ATL) {
1108		u32 done_map = isp1362_read_reg32(isp1362_hcd, HCATLDONE);
1109		u32 skip_map = isp1362_read_reg32(isp1362_hcd, HCATLSKIP);
1110		isp1362_hcd->irq_stat[ISP1362_INT_ATL]++;
1111
1112		DBG(2, "%s: ATL\n", __func__);
1113
1114		svc_mask &= ~HCuPINT_ATL;
1115
1116		isp1362_write_reg32(isp1362_hcd, HCATLSKIP, skip_map | done_map);
1117		if (~(done_map | skip_map) == 0)
1118			isp1362_clr_mask16(isp1362_hcd, HCBUFSTAT, HCBUFSTAT_ATL_ACTIVE);
1119		if (done_map) {
1120			DBG(3, "%s: ATL done_map %08x\n", __func__, done_map);
1121			finish_transfers(isp1362_hcd, done_map, &isp1362_hcd->atl_queue);
1122			start_atl_transfers(isp1362_hcd);
1123		}
1124		handled = 1;
1125	}
1126
1127	if (irqstat & HCuPINT_OPR) {
1128		u32 intstat = isp1362_read_reg32(isp1362_hcd, HCINTSTAT);
1129		isp1362_hcd->irq_stat[ISP1362_INT_OPR]++;
1130
1131		svc_mask &= ~HCuPINT_OPR;
1132		DBG(2, "%s: OPR %08x:%08x\n", __func__, intstat, isp1362_hcd->intenb);
1133		intstat &= isp1362_hcd->intenb;
1134		if (intstat & OHCI_INTR_UE) {
1135			pr_err("Unrecoverable error\n");
1136			/* FIXME: do here reset or cleanup or whatever */
1137		}
1138		if (intstat & OHCI_INTR_RHSC) {
1139			isp1362_hcd->rhstatus = isp1362_read_reg32(isp1362_hcd, HCRHSTATUS);
1140			isp1362_hcd->rhport[0] = isp1362_read_reg32(isp1362_hcd, HCRHPORT1);
1141			isp1362_hcd->rhport[1] = isp1362_read_reg32(isp1362_hcd, HCRHPORT2);
1142		}
1143		if (intstat & OHCI_INTR_RD) {
1144			pr_info("%s: RESUME DETECTED\n", __func__);
1145			isp1362_show_reg(isp1362_hcd, HCCONTROL);
1146			usb_hcd_resume_root_hub(hcd);
1147		}
1148		isp1362_write_reg32(isp1362_hcd, HCINTSTAT, intstat);
1149		irqstat &= ~HCuPINT_OPR;
1150		handled = 1;
1151	}
1152
1153	if (irqstat & HCuPINT_SUSP) {
1154		isp1362_hcd->irq_stat[ISP1362_INT_SUSP]++;
1155		handled = 1;
1156		svc_mask &= ~HCuPINT_SUSP;
1157
1158		pr_info("%s: SUSPEND IRQ\n", __func__);
1159	}
1160
1161	if (irqstat & HCuPINT_CLKRDY) {
1162		isp1362_hcd->irq_stat[ISP1362_INT_CLKRDY]++;
1163		handled = 1;
1164		isp1362_hcd->irqenb &= ~HCuPINT_CLKRDY;
1165		svc_mask &= ~HCuPINT_CLKRDY;
1166		pr_info("%s: CLKRDY IRQ\n", __func__);
1167	}
1168
1169	if (svc_mask)
1170		pr_err("%s: Unserviced interrupt(s) %04x\n", __func__, svc_mask);
1171
1172	isp1362_write_reg16(isp1362_hcd, HCuPINTENB, isp1362_hcd->irqenb);
1173	isp1362_hcd->irq_active--;
1174	spin_unlock(&isp1362_hcd->lock);
1175
1176	return IRQ_RETVAL(handled);
1177}
1178
1179/*-------------------------------------------------------------------------*/
1180
1181#define	MAX_PERIODIC_LOAD	900	/* out of 1000 usec */
1182static int balance(struct isp1362_hcd *isp1362_hcd, u16 interval, u16 load)
1183{
1184	int i, branch = -ENOSPC;
1185
1186	/* search for the least loaded schedule branch of that interval
1187	 * which has enough bandwidth left unreserved.
1188	 */
1189	for (i = 0; i < interval; i++) {
1190		if (branch < 0 || isp1362_hcd->load[branch] > isp1362_hcd->load[i]) {
1191			int j;
1192
1193			for (j = i; j < PERIODIC_SIZE; j += interval) {
1194				if ((isp1362_hcd->load[j] + load) > MAX_PERIODIC_LOAD) {
1195					pr_err("%s: new load %d load[%02x] %d max %d\n", __func__,
1196					    load, j, isp1362_hcd->load[j], MAX_PERIODIC_LOAD);
1197					break;
1198				}
1199			}
1200			if (j < PERIODIC_SIZE)
1201				continue;
1202			branch = i;
1203		}
1204	}
1205	return branch;
1206}
1207
1208/* NB! ALL the code above this point runs with isp1362_hcd->lock
1209   held, irqs off
1210*/
1211
1212/*-------------------------------------------------------------------------*/
1213
1214static int isp1362_urb_enqueue(struct usb_hcd *hcd,
1215			       struct urb *urb,
1216			       gfp_t mem_flags)
1217{
1218	struct isp1362_hcd *isp1362_hcd = hcd_to_isp1362_hcd(hcd);
1219	struct usb_device *udev = urb->dev;
1220	unsigned int pipe = urb->pipe;
1221	int is_out = !usb_pipein(pipe);
1222	int type = usb_pipetype(pipe);
1223	int epnum = usb_pipeendpoint(pipe);
1224	struct usb_host_endpoint *hep = urb->ep;
1225	struct isp1362_ep *ep = NULL;
1226	unsigned long flags;
1227	int retval = 0;
1228
1229	DBG(3, "%s: urb %p\n", __func__, urb);
1230
1231	if (type == PIPE_ISOCHRONOUS) {
1232		pr_err("Isochronous transfers not supported\n");
1233		return -ENOSPC;
1234	}
1235
1236	URB_DBG("%s: FA %d ep%d%s %s: len %d %s%s\n", __func__,
1237		usb_pipedevice(pipe), epnum,
1238		is_out ? "out" : "in",
1239		usb_pipecontrol(pipe) ? "ctrl" :
1240			usb_pipeint(pipe) ? "int" :
1241			usb_pipebulk(pipe) ? "bulk" :
1242			"iso",
1243		urb->transfer_buffer_length,
1244		(urb->transfer_flags & URB_ZERO_PACKET) ? "ZERO_PACKET " : "",
1245		!(urb->transfer_flags & URB_SHORT_NOT_OK) ?
1246		"short_ok" : "");
1247
1248	/* avoid all allocations within spinlocks: request or endpoint */
1249	if (!hep->hcpriv) {
1250		ep = kzalloc(sizeof *ep, mem_flags);
1251		if (!ep)
1252			return -ENOMEM;
1253	}
1254	spin_lock_irqsave(&isp1362_hcd->lock, flags);
1255
1256	/* don't submit to a dead or disabled port */
1257	if (!((isp1362_hcd->rhport[0] | isp1362_hcd->rhport[1]) &
1258	      USB_PORT_STAT_ENABLE) ||
1259	    !HC_IS_RUNNING(hcd->state)) {
1260		kfree(ep);
1261		retval = -ENODEV;
1262		goto fail_not_linked;
1263	}
1264
1265	retval = usb_hcd_link_urb_to_ep(hcd, urb);
1266	if (retval) {
1267		kfree(ep);
1268		goto fail_not_linked;
1269	}
1270
1271	if (hep->hcpriv) {
1272		ep = hep->hcpriv;
1273	} else {
1274		INIT_LIST_HEAD(&ep->schedule);
1275		INIT_LIST_HEAD(&ep->active);
1276		INIT_LIST_HEAD(&ep->remove_list);
1277		ep->udev = usb_get_dev(udev);
1278		ep->hep = hep;
1279		ep->epnum = epnum;
1280		ep->maxpacket = usb_maxpacket(udev, urb->pipe, is_out);
1281		ep->ptd_offset = -EINVAL;
1282		ep->ptd_index = -EINVAL;
1283		usb_settoggle(udev, epnum, is_out, 0);
1284
1285		if (type == PIPE_CONTROL)
1286			ep->nextpid = USB_PID_SETUP;
1287		else if (is_out)
1288			ep->nextpid = USB_PID_OUT;
1289		else
1290			ep->nextpid = USB_PID_IN;
1291
1292		switch (type) {
1293		case PIPE_ISOCHRONOUS:
1294		case PIPE_INTERRUPT:
1295			if (urb->interval > PERIODIC_SIZE)
1296				urb->interval = PERIODIC_SIZE;
1297			ep->interval = urb->interval;
1298			ep->branch = PERIODIC_SIZE;
1299			ep->load = usb_calc_bus_time(udev->speed, !is_out,
1300						     (type == PIPE_ISOCHRONOUS),
1301						     usb_maxpacket(udev, pipe, is_out)) / 1000;
1302			break;
1303		}
1304		hep->hcpriv = ep;
1305	}
1306	ep->num_req = isp1362_hcd->req_serial++;
1307
1308	/* maybe put endpoint into schedule */
1309	switch (type) {
1310	case PIPE_CONTROL:
1311	case PIPE_BULK:
1312		if (list_empty(&ep->schedule)) {
1313			DBG(1, "%s: Adding ep %p req %d to async schedule\n",
1314				__func__, ep, ep->num_req);
1315			list_add_tail(&ep->schedule, &isp1362_hcd->async);
1316		}
1317		break;
1318	case PIPE_ISOCHRONOUS:
1319	case PIPE_INTERRUPT:
1320		urb->interval = ep->interval;
1321
1322		/* urb submitted for already existing EP */
1323		if (ep->branch < PERIODIC_SIZE)
1324			break;
1325
1326		retval = balance(isp1362_hcd, ep->interval, ep->load);
1327		if (retval < 0) {
1328			pr_err("%s: balance returned %d\n", __func__, retval);
1329			goto fail;
1330		}
1331		ep->branch = retval;
1332		retval = 0;
1333		isp1362_hcd->fmindex = isp1362_read_reg32(isp1362_hcd, HCFMNUM);
1334		DBG(1, "%s: Current frame %04x branch %02x start_frame %04x(%04x)\n",
1335		    __func__, isp1362_hcd->fmindex, ep->branch,
1336		    ((isp1362_hcd->fmindex + PERIODIC_SIZE - 1) &
1337		     ~(PERIODIC_SIZE - 1)) + ep->branch,
1338		    (isp1362_hcd->fmindex & (PERIODIC_SIZE - 1)) + ep->branch);
1339
1340		if (list_empty(&ep->schedule)) {
1341			if (type == PIPE_ISOCHRONOUS) {
1342				u16 frame = isp1362_hcd->fmindex;
1343
1344				frame += max_t(u16, 8, ep->interval);
1345				frame &= ~(ep->interval - 1);
1346				frame |= ep->branch;
1347				if (frame_before(frame, isp1362_hcd->fmindex))
1348					frame += ep->interval;
1349				urb->start_frame = frame;
1350
1351				DBG(1, "%s: Adding ep %p to isoc schedule\n", __func__, ep);
1352				list_add_tail(&ep->schedule, &isp1362_hcd->isoc);
1353			} else {
1354				DBG(1, "%s: Adding ep %p to periodic schedule\n", __func__, ep);
1355				list_add_tail(&ep->schedule, &isp1362_hcd->periodic);
1356			}
1357		} else
1358			DBG(1, "%s: ep %p already scheduled\n", __func__, ep);
1359
1360		DBG(2, "%s: load %d bandwidth %d -> %d\n", __func__,
1361		    ep->load / ep->interval, isp1362_hcd->load[ep->branch],
1362		    isp1362_hcd->load[ep->branch] + ep->load);
1363		isp1362_hcd->load[ep->branch] += ep->load;
1364	}
1365
1366	urb->hcpriv = hep;
1367	ALIGNSTAT(isp1362_hcd, urb->transfer_buffer);
1368
1369	switch (type) {
1370	case PIPE_CONTROL:
1371	case PIPE_BULK:
1372		start_atl_transfers(isp1362_hcd);
1373		break;
1374	case PIPE_INTERRUPT:
1375		start_intl_transfers(isp1362_hcd);
1376		break;
1377	case PIPE_ISOCHRONOUS:
1378		start_iso_transfers(isp1362_hcd);
1379		break;
1380	default:
1381		BUG();
1382	}
1383 fail:
1384	if (retval)
1385		usb_hcd_unlink_urb_from_ep(hcd, urb);
1386
1387
1388 fail_not_linked:
1389	spin_unlock_irqrestore(&isp1362_hcd->lock, flags);
1390	if (retval)
1391		DBG(0, "%s: urb %p failed with %d\n", __func__, urb, retval);
1392	return retval;
1393}
1394
1395static int isp1362_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status)
1396{
1397	struct isp1362_hcd *isp1362_hcd = hcd_to_isp1362_hcd(hcd);
1398	struct usb_host_endpoint *hep;
1399	unsigned long flags;
1400	struct isp1362_ep *ep;
1401	int retval = 0;
1402
1403	DBG(3, "%s: urb %p\n", __func__, urb);
1404
1405	spin_lock_irqsave(&isp1362_hcd->lock, flags);
1406	retval = usb_hcd_check_unlink_urb(hcd, urb, status);
1407	if (retval)
1408		goto done;
1409
1410	hep = urb->hcpriv;
1411
1412	if (!hep) {
1413		spin_unlock_irqrestore(&isp1362_hcd->lock, flags);
1414		return -EIDRM;
1415	}
1416
1417	ep = hep->hcpriv;
1418	if (ep) {
1419		/* In front of queue? */
1420		if (ep->hep->urb_list.next == &urb->urb_list) {
1421			if (!list_empty(&ep->active)) {
1422				DBG(1, "%s: urb %p ep %p req %d active PTD[%d] $%04x\n", __func__,
1423				    urb, ep, ep->num_req, ep->ptd_index, ep->ptd_offset);
1424				/* disable processing and queue PTD for removal */
1425				remove_ptd(isp1362_hcd, ep);
1426				urb = NULL;
1427			}
1428		}
1429		if (urb) {
1430			DBG(1, "%s: Finishing ep %p req %d\n", __func__, ep,
1431			    ep->num_req);
1432			finish_request(isp1362_hcd, ep, urb, status);
1433		} else
1434			DBG(1, "%s: urb %p active; wait4irq\n", __func__, urb);
1435	} else {
1436		pr_warning("%s: No EP in URB %p\n", __func__, urb);
1437		retval = -EINVAL;
1438	}
1439done:
1440	spin_unlock_irqrestore(&isp1362_hcd->lock, flags);
1441
1442	DBG(3, "%s: exit\n", __func__);
1443
1444	return retval;
1445}
1446
1447static void isp1362_endpoint_disable(struct usb_hcd *hcd, struct usb_host_endpoint *hep)
1448{
1449	struct isp1362_ep *ep = hep->hcpriv;
1450	struct isp1362_hcd *isp1362_hcd = hcd_to_isp1362_hcd(hcd);
1451	unsigned long flags;
1452
1453	DBG(1, "%s: ep %p\n", __func__, ep);
1454	if (!ep)
1455		return;
1456	spin_lock_irqsave(&isp1362_hcd->lock, flags);
1457	if (!list_empty(&hep->urb_list)) {
1458		if (!list_empty(&ep->active) && list_empty(&ep->remove_list)) {
1459			DBG(1, "%s: Removing ep %p req %d PTD[%d] $%04x\n", __func__,
1460			    ep, ep->num_req, ep->ptd_index, ep->ptd_offset);
1461			remove_ptd(isp1362_hcd, ep);
1462			pr_info("%s: Waiting for Interrupt to clean up\n", __func__);
1463		}
1464	}
1465	spin_unlock_irqrestore(&isp1362_hcd->lock, flags);
1466	/* Wait for interrupt to clear out active list */
1467	while (!list_empty(&ep->active))
1468		msleep(1);
1469
1470	DBG(1, "%s: Freeing EP %p\n", __func__, ep);
1471
1472	usb_put_dev(ep->udev);
1473	kfree(ep);
1474	hep->hcpriv = NULL;
1475}
1476
1477static int isp1362_get_frame(struct usb_hcd *hcd)
1478{
1479	struct isp1362_hcd *isp1362_hcd = hcd_to_isp1362_hcd(hcd);
1480	u32 fmnum;
1481	unsigned long flags;
1482
1483	spin_lock_irqsave(&isp1362_hcd->lock, flags);
1484	fmnum = isp1362_read_reg32(isp1362_hcd, HCFMNUM);
1485	spin_unlock_irqrestore(&isp1362_hcd->lock, flags);
1486
1487	return (int)fmnum;
1488}
1489
1490/*-------------------------------------------------------------------------*/
1491
1492/* Adapted from ohci-hub.c */
1493static int isp1362_hub_status_data(struct usb_hcd *hcd, char *buf)
1494{
1495	struct isp1362_hcd *isp1362_hcd = hcd_to_isp1362_hcd(hcd);
1496	int ports, i, changed = 0;
1497	unsigned long flags;
1498
1499	if (!HC_IS_RUNNING(hcd->state))
1500		return -ESHUTDOWN;
1501
1502	/* Report no status change now, if we are scheduled to be
1503	   called later */
1504	if (timer_pending(&hcd->rh_timer))
1505		return 0;
1506
1507	ports = isp1362_hcd->rhdesca & RH_A_NDP;
1508	BUG_ON(ports > 2);
1509
1510	spin_lock_irqsave(&isp1362_hcd->lock, flags);
1511	/* init status */
1512	if (isp1362_hcd->rhstatus & (RH_HS_LPSC | RH_HS_OCIC))
1513		buf[0] = changed = 1;
1514	else
1515		buf[0] = 0;
1516
1517	for (i = 0; i < ports; i++) {
1518		u32 status = isp1362_hcd->rhport[i];
1519
1520		if (status & (RH_PS_CSC | RH_PS_PESC | RH_PS_PSSC |
1521			      RH_PS_OCIC | RH_PS_PRSC)) {
1522			changed = 1;
1523			buf[0] |= 1 << (i + 1);
1524			continue;
1525		}
1526
1527		if (!(status & RH_PS_CCS))
1528			continue;
1529	}
1530	spin_unlock_irqrestore(&isp1362_hcd->lock, flags);
1531	return changed;
1532}
1533
1534static void isp1362_hub_descriptor(struct isp1362_hcd *isp1362_hcd,
1535				   struct usb_hub_descriptor *desc)
1536{
1537	u32 reg = isp1362_hcd->rhdesca;
1538
1539	DBG(3, "%s: enter\n", __func__);
1540
1541	desc->bDescriptorType = USB_DT_HUB;
1542	desc->bDescLength = 9;
1543	desc->bHubContrCurrent = 0;
1544	desc->bNbrPorts = reg & 0x3;
1545	/* Power switching, device type, overcurrent. */
1546	desc->wHubCharacteristics = cpu_to_le16((reg >> 8) &
1547						(HUB_CHAR_LPSM |
1548						 HUB_CHAR_COMPOUND |
1549						 HUB_CHAR_OCPM));
1550	DBG(0, "%s: hubcharacteristics = %02x\n", __func__,
1551			desc->wHubCharacteristics);
1552	desc->bPwrOn2PwrGood = (reg >> 24) & 0xff;
1553	/* ports removable, and legacy PortPwrCtrlMask */
1554	desc->u.hs.DeviceRemovable[0] = desc->bNbrPorts == 1 ? 1 << 1 : 3 << 1;
1555	desc->u.hs.DeviceRemovable[1] = ~0;
1556
1557	DBG(3, "%s: exit\n", __func__);
1558}
1559
1560/* Adapted from ohci-hub.c */
1561static int isp1362_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue,
1562			       u16 wIndex, char *buf, u16 wLength)
1563{
1564	struct isp1362_hcd *isp1362_hcd = hcd_to_isp1362_hcd(hcd);
1565	int retval = 0;
1566	unsigned long flags;
1567	unsigned long t1;
1568	int ports = isp1362_hcd->rhdesca & RH_A_NDP;
1569	u32 tmp = 0;
1570
1571	switch (typeReq) {
1572	case ClearHubFeature:
1573		DBG(0, "ClearHubFeature: ");
1574		switch (wValue) {
1575		case C_HUB_OVER_CURRENT:
1576			DBG(0, "C_HUB_OVER_CURRENT\n");
1577			spin_lock_irqsave(&isp1362_hcd->lock, flags);
1578			isp1362_write_reg32(isp1362_hcd, HCRHSTATUS, RH_HS_OCIC);
1579			spin_unlock_irqrestore(&isp1362_hcd->lock, flags);
1580		case C_HUB_LOCAL_POWER:
1581			DBG(0, "C_HUB_LOCAL_POWER\n");
1582			break;
1583		default:
1584			goto error;
1585		}
1586		break;
1587	case SetHubFeature:
1588		DBG(0, "SetHubFeature: ");
1589		switch (wValue) {
1590		case C_HUB_OVER_CURRENT:
1591		case C_HUB_LOCAL_POWER:
1592			DBG(0, "C_HUB_OVER_CURRENT or C_HUB_LOCAL_POWER\n");
1593			break;
1594		default:
1595			goto error;
1596		}
1597		break;
1598	case GetHubDescriptor:
1599		DBG(0, "GetHubDescriptor\n");
1600		isp1362_hub_descriptor(isp1362_hcd, (struct usb_hub_descriptor *)buf);
1601		break;
1602	case GetHubStatus:
1603		DBG(0, "GetHubStatus\n");
1604		put_unaligned(cpu_to_le32(0), (__le32 *) buf);
1605		break;
1606	case GetPortStatus:
1607#ifndef VERBOSE
1608		DBG(0, "GetPortStatus\n");
1609#endif
1610		if (!wIndex || wIndex > ports)
1611			goto error;
1612		tmp = isp1362_hcd->rhport[--wIndex];
1613		put_unaligned(cpu_to_le32(tmp), (__le32 *) buf);
1614		break;
1615	case ClearPortFeature:
1616		DBG(0, "ClearPortFeature: ");
1617		if (!wIndex || wIndex > ports)
1618			goto error;
1619		wIndex--;
1620
1621		switch (wValue) {
1622		case USB_PORT_FEAT_ENABLE:
1623			DBG(0, "USB_PORT_FEAT_ENABLE\n");
1624			tmp = RH_PS_CCS;
1625			break;
1626		case USB_PORT_FEAT_C_ENABLE:
1627			DBG(0, "USB_PORT_FEAT_C_ENABLE\n");
1628			tmp = RH_PS_PESC;
1629			break;
1630		case USB_PORT_FEAT_SUSPEND:
1631			DBG(0, "USB_PORT_FEAT_SUSPEND\n");
1632			tmp = RH_PS_POCI;
1633			break;
1634		case USB_PORT_FEAT_C_SUSPEND:
1635			DBG(0, "USB_PORT_FEAT_C_SUSPEND\n");
1636			tmp = RH_PS_PSSC;
1637			break;
1638		case USB_PORT_FEAT_POWER:
1639			DBG(0, "USB_PORT_FEAT_POWER\n");
1640			tmp = RH_PS_LSDA;
1641
1642			break;
1643		case USB_PORT_FEAT_C_CONNECTION:
1644			DBG(0, "USB_PORT_FEAT_C_CONNECTION\n");
1645			tmp = RH_PS_CSC;
1646			break;
1647		case USB_PORT_FEAT_C_OVER_CURRENT:
1648			DBG(0, "USB_PORT_FEAT_C_OVER_CURRENT\n");
1649			tmp = RH_PS_OCIC;
1650			break;
1651		case USB_PORT_FEAT_C_RESET:
1652			DBG(0, "USB_PORT_FEAT_C_RESET\n");
1653			tmp = RH_PS_PRSC;
1654			break;
1655		default:
1656			goto error;
1657		}
1658
1659		spin_lock_irqsave(&isp1362_hcd->lock, flags);
1660		isp1362_write_reg32(isp1362_hcd, HCRHPORT1 + wIndex, tmp);
1661		isp1362_hcd->rhport[wIndex] =
1662			isp1362_read_reg32(isp1362_hcd, HCRHPORT1 + wIndex);
1663		spin_unlock_irqrestore(&isp1362_hcd->lock, flags);
1664		break;
1665	case SetPortFeature:
1666		DBG(0, "SetPortFeature: ");
1667		if (!wIndex || wIndex > ports)
1668			goto error;
1669		wIndex--;
1670		switch (wValue) {
1671		case USB_PORT_FEAT_SUSPEND:
1672			DBG(0, "USB_PORT_FEAT_SUSPEND\n");
1673			spin_lock_irqsave(&isp1362_hcd->lock, flags);
1674			isp1362_write_reg32(isp1362_hcd, HCRHPORT1 + wIndex, RH_PS_PSS);
1675			isp1362_hcd->rhport[wIndex] =
1676				isp1362_read_reg32(isp1362_hcd, HCRHPORT1 + wIndex);
1677			spin_unlock_irqrestore(&isp1362_hcd->lock, flags);
1678			break;
1679		case USB_PORT_FEAT_POWER:
1680			DBG(0, "USB_PORT_FEAT_POWER\n");
1681			spin_lock_irqsave(&isp1362_hcd->lock, flags);
1682			isp1362_write_reg32(isp1362_hcd, HCRHPORT1 + wIndex, RH_PS_PPS);
1683			isp1362_hcd->rhport[wIndex] =
1684				isp1362_read_reg32(isp1362_hcd, HCRHPORT1 + wIndex);
1685			spin_unlock_irqrestore(&isp1362_hcd->lock, flags);
1686			break;
1687		case USB_PORT_FEAT_RESET:
1688			DBG(0, "USB_PORT_FEAT_RESET\n");
1689			spin_lock_irqsave(&isp1362_hcd->lock, flags);
1690
1691			t1 = jiffies + msecs_to_jiffies(USB_RESET_WIDTH);
1692			while (time_before(jiffies, t1)) {
1693				/* spin until any current reset finishes */
1694				for (;;) {
1695					tmp = isp1362_read_reg32(isp1362_hcd, HCRHPORT1 + wIndex);
1696					if (!(tmp & RH_PS_PRS))
1697						break;
1698					udelay(500);
1699				}
1700				if (!(tmp & RH_PS_CCS))
1701					break;
1702				/* Reset lasts 10ms (claims datasheet) */
1703				isp1362_write_reg32(isp1362_hcd, HCRHPORT1 + wIndex, (RH_PS_PRS));
1704
1705				spin_unlock_irqrestore(&isp1362_hcd->lock, flags);
1706				msleep(10);
1707				spin_lock_irqsave(&isp1362_hcd->lock, flags);
1708			}
1709
1710			isp1362_hcd->rhport[wIndex] = isp1362_read_reg32(isp1362_hcd,
1711									 HCRHPORT1 + wIndex);
1712			spin_unlock_irqrestore(&isp1362_hcd->lock, flags);
1713			break;
1714		default:
1715			goto error;
1716		}
1717		break;
1718
1719	default:
1720 error:
1721		/* "protocol stall" on error */
1722		DBG(0, "PROTOCOL STALL\n");
1723		retval = -EPIPE;
1724	}
1725
1726	return retval;
1727}
1728
1729#ifdef	CONFIG_PM
1730static int isp1362_bus_suspend(struct usb_hcd *hcd)
1731{
1732	int status = 0;
1733	struct isp1362_hcd *isp1362_hcd = hcd_to_isp1362_hcd(hcd);
1734	unsigned long flags;
1735
1736	if (time_before(jiffies, isp1362_hcd->next_statechange))
1737		msleep(5);
1738
1739	spin_lock_irqsave(&isp1362_hcd->lock, flags);
1740
1741	isp1362_hcd->hc_control = isp1362_read_reg32(isp1362_hcd, HCCONTROL);
1742	switch (isp1362_hcd->hc_control & OHCI_CTRL_HCFS) {
1743	case OHCI_USB_RESUME:
1744		DBG(0, "%s: resume/suspend?\n", __func__);
1745		isp1362_hcd->hc_control &= ~OHCI_CTRL_HCFS;
1746		isp1362_hcd->hc_control |= OHCI_USB_RESET;
1747		isp1362_write_reg32(isp1362_hcd, HCCONTROL, isp1362_hcd->hc_control);
1748		/* FALL THROUGH */
1749	case OHCI_USB_RESET:
1750		status = -EBUSY;
1751		pr_warning("%s: needs reinit!\n", __func__);
1752		goto done;
1753	case OHCI_USB_SUSPEND:
1754		pr_warning("%s: already suspended?\n", __func__);
1755		goto done;
1756	}
1757	DBG(0, "%s: suspend root hub\n", __func__);
1758
1759	/* First stop any processing */
1760	hcd->state = HC_STATE_QUIESCING;
1761	if (!list_empty(&isp1362_hcd->atl_queue.active) ||
1762	    !list_empty(&isp1362_hcd->intl_queue.active) ||
1763	    !list_empty(&isp1362_hcd->istl_queue[0] .active) ||
1764	    !list_empty(&isp1362_hcd->istl_queue[1] .active)) {
1765		int limit;
1766
1767		isp1362_write_reg32(isp1362_hcd, HCATLSKIP, ~0);
1768		isp1362_write_reg32(isp1362_hcd, HCINTLSKIP, ~0);
1769		isp1362_write_reg16(isp1362_hcd, HCBUFSTAT, 0);
1770		isp1362_write_reg16(isp1362_hcd, HCuPINTENB, 0);
1771		isp1362_write_reg32(isp1362_hcd, HCINTSTAT, OHCI_INTR_SF);
1772
1773		DBG(0, "%s: stopping schedules ...\n", __func__);
1774		limit = 2000;
1775		while (limit > 0) {
1776			udelay(250);
1777			limit -= 250;
1778			if (isp1362_read_reg32(isp1362_hcd, HCINTSTAT) & OHCI_INTR_SF)
1779				break;
1780		}
1781		mdelay(7);
1782		if (isp1362_read_reg16(isp1362_hcd, HCuPINT) & HCuPINT_ATL) {
1783			u32 done_map = isp1362_read_reg32(isp1362_hcd, HCATLDONE);
1784			finish_transfers(isp1362_hcd, done_map, &isp1362_hcd->atl_queue);
1785		}
1786		if (isp1362_read_reg16(isp1362_hcd, HCuPINT) & HCuPINT_INTL) {
1787			u32 done_map = isp1362_read_reg32(isp1362_hcd, HCINTLDONE);
1788			finish_transfers(isp1362_hcd, done_map, &isp1362_hcd->intl_queue);
1789		}
1790		if (isp1362_read_reg16(isp1362_hcd, HCuPINT) & HCuPINT_ISTL0)
1791			finish_iso_transfers(isp1362_hcd, &isp1362_hcd->istl_queue[0]);
1792		if (isp1362_read_reg16(isp1362_hcd, HCuPINT) & HCuPINT_ISTL1)
1793			finish_iso_transfers(isp1362_hcd, &isp1362_hcd->istl_queue[1]);
1794	}
1795	DBG(0, "%s: HCINTSTAT: %08x\n", __func__,
1796		    isp1362_read_reg32(isp1362_hcd, HCINTSTAT));
1797	isp1362_write_reg32(isp1362_hcd, HCINTSTAT,
1798			    isp1362_read_reg32(isp1362_hcd, HCINTSTAT));
1799
1800	/* Suspend hub */
1801	isp1362_hcd->hc_control = OHCI_USB_SUSPEND;
1802	isp1362_show_reg(isp1362_hcd, HCCONTROL);
1803	isp1362_write_reg32(isp1362_hcd, HCCONTROL, isp1362_hcd->hc_control);
1804	isp1362_show_reg(isp1362_hcd, HCCONTROL);
1805
1806#if 1
1807	isp1362_hcd->hc_control = isp1362_read_reg32(isp1362_hcd, HCCONTROL);
1808	if ((isp1362_hcd->hc_control & OHCI_CTRL_HCFS) != OHCI_USB_SUSPEND) {
1809		pr_err("%s: controller won't suspend %08x\n", __func__,
1810		    isp1362_hcd->hc_control);
1811		status = -EBUSY;
1812	} else
1813#endif
1814	{
1815		/* no resumes until devices finish suspending */
1816		isp1362_hcd->next_statechange = jiffies + msecs_to_jiffies(5);
1817	}
1818done:
1819	if (status == 0) {
1820		hcd->state = HC_STATE_SUSPENDED;
1821		DBG(0, "%s: HCD suspended: %08x\n", __func__,
1822		    isp1362_read_reg32(isp1362_hcd, HCCONTROL));
1823	}
1824	spin_unlock_irqrestore(&isp1362_hcd->lock, flags);
1825	return status;
1826}
1827
1828static int isp1362_bus_resume(struct usb_hcd *hcd)
1829{
1830	struct isp1362_hcd *isp1362_hcd = hcd_to_isp1362_hcd(hcd);
1831	u32 port;
1832	unsigned long flags;
1833	int status = -EINPROGRESS;
1834
1835	if (time_before(jiffies, isp1362_hcd->next_statechange))
1836		msleep(5);
1837
1838	spin_lock_irqsave(&isp1362_hcd->lock, flags);
1839	isp1362_hcd->hc_control = isp1362_read_reg32(isp1362_hcd, HCCONTROL);
1840	pr_info("%s: HCCONTROL: %08x\n", __func__, isp1362_hcd->hc_control);
1841	if (hcd->state == HC_STATE_RESUMING) {
1842		pr_warning("%s: duplicate resume\n", __func__);
1843		status = 0;
1844	} else
1845		switch (isp1362_hcd->hc_control & OHCI_CTRL_HCFS) {
1846		case OHCI_USB_SUSPEND:
1847			DBG(0, "%s: resume root hub\n", __func__);
1848			isp1362_hcd->hc_control &= ~OHCI_CTRL_HCFS;
1849			isp1362_hcd->hc_control |= OHCI_USB_RESUME;
1850			isp1362_write_reg32(isp1362_hcd, HCCONTROL, isp1362_hcd->hc_control);
1851			break;
1852		case OHCI_USB_RESUME:
1853			/* HCFS changes sometime after INTR_RD */
1854			DBG(0, "%s: remote wakeup\n", __func__);
1855			break;
1856		case OHCI_USB_OPER:
1857			DBG(0, "%s: odd resume\n", __func__);
1858			status = 0;
1859			hcd->self.root_hub->dev.power.power_state = PMSG_ON;
1860			break;
1861		default:		/* RESET, we lost power */
1862			DBG(0, "%s: root hub hardware reset\n", __func__);
1863			status = -EBUSY;
1864		}
1865	spin_unlock_irqrestore(&isp1362_hcd->lock, flags);
1866	if (status == -EBUSY) {
1867		DBG(0, "%s: Restarting HC\n", __func__);
1868		isp1362_hc_stop(hcd);
1869		return isp1362_hc_start(hcd);
1870	}
1871	if (status != -EINPROGRESS)
1872		return status;
1873	spin_lock_irqsave(&isp1362_hcd->lock, flags);
1874	port = isp1362_read_reg32(isp1362_hcd, HCRHDESCA) & RH_A_NDP;
1875	while (port--) {
1876		u32 stat = isp1362_read_reg32(isp1362_hcd, HCRHPORT1 + port);
1877
1878		/* force global, not selective, resume */
1879		if (!(stat & RH_PS_PSS)) {
1880			DBG(0, "%s: Not Resuming RH port %d\n", __func__, port);
1881			continue;
1882		}
1883		DBG(0, "%s: Resuming RH port %d\n", __func__, port);
1884		isp1362_write_reg32(isp1362_hcd, HCRHPORT1 + port, RH_PS_POCI);
1885	}
1886	spin_unlock_irqrestore(&isp1362_hcd->lock, flags);
1887
1888	/* Some controllers (lucent) need extra-long delays */
1889	hcd->state = HC_STATE_RESUMING;
1890	mdelay(20 /* usb 11.5.1.10 */ + 15);
1891
1892	isp1362_hcd->hc_control = OHCI_USB_OPER;
1893	spin_lock_irqsave(&isp1362_hcd->lock, flags);
1894	isp1362_show_reg(isp1362_hcd, HCCONTROL);
1895	isp1362_write_reg32(isp1362_hcd, HCCONTROL, isp1362_hcd->hc_control);
1896	spin_unlock_irqrestore(&isp1362_hcd->lock, flags);
1897	/* TRSMRCY */
1898	msleep(10);
1899
1900	/* keep it alive for ~5x suspend + resume costs */
1901	isp1362_hcd->next_statechange = jiffies + msecs_to_jiffies(250);
1902
1903	hcd->self.root_hub->dev.power.power_state = PMSG_ON;
1904	hcd->state = HC_STATE_RUNNING;
1905	return 0;
1906}
1907#else
1908#define	isp1362_bus_suspend	NULL
1909#define	isp1362_bus_resume	NULL
1910#endif
1911
1912/*-------------------------------------------------------------------------*/
1913
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1914static void dump_irq(struct seq_file *s, char *label, u16 mask)
1915{
1916	seq_printf(s, "%-15s %04x%s%s%s%s%s%s\n", label, mask,
1917		   mask & HCuPINT_CLKRDY ? " clkrdy" : "",
1918		   mask & HCuPINT_SUSP ? " susp" : "",
1919		   mask & HCuPINT_OPR ? " opr" : "",
1920		   mask & HCuPINT_EOT ? " eot" : "",
1921		   mask & HCuPINT_ATL ? " atl" : "",
1922		   mask & HCuPINT_SOF ? " sof" : "");
1923}
1924
1925static void dump_int(struct seq_file *s, char *label, u32 mask)
1926{
1927	seq_printf(s, "%-15s %08x%s%s%s%s%s%s%s\n", label, mask,
1928		   mask & OHCI_INTR_MIE ? " MIE" : "",
1929		   mask & OHCI_INTR_RHSC ? " rhsc" : "",
1930		   mask & OHCI_INTR_FNO ? " fno" : "",
1931		   mask & OHCI_INTR_UE ? " ue" : "",
1932		   mask & OHCI_INTR_RD ? " rd" : "",
1933		   mask & OHCI_INTR_SF ? " sof" : "",
1934		   mask & OHCI_INTR_SO ? " so" : "");
1935}
1936
1937static void dump_ctrl(struct seq_file *s, char *label, u32 mask)
1938{
1939	seq_printf(s, "%-15s %08x%s%s%s\n", label, mask,
1940		   mask & OHCI_CTRL_RWC ? " rwc" : "",
1941		   mask & OHCI_CTRL_RWE ? " rwe" : "",
1942		   ({
1943			   char *hcfs;
1944			   switch (mask & OHCI_CTRL_HCFS) {
1945			   case OHCI_USB_OPER:
1946				   hcfs = " oper";
1947				   break;
1948			   case OHCI_USB_RESET:
1949				   hcfs = " reset";
1950				   break;
1951			   case OHCI_USB_RESUME:
1952				   hcfs = " resume";
1953				   break;
1954			   case OHCI_USB_SUSPEND:
1955				   hcfs = " suspend";
1956				   break;
1957			   default:
1958				   hcfs = " ?";
1959			   }
1960			   hcfs;
1961		   }));
1962}
1963
1964static void dump_regs(struct seq_file *s, struct isp1362_hcd *isp1362_hcd)
1965{
1966	seq_printf(s, "HCREVISION [%02x] %08x\n", ISP1362_REG_NO(ISP1362_REG_HCREVISION),
1967		   isp1362_read_reg32(isp1362_hcd, HCREVISION));
1968	seq_printf(s, "HCCONTROL  [%02x] %08x\n", ISP1362_REG_NO(ISP1362_REG_HCCONTROL),
1969		   isp1362_read_reg32(isp1362_hcd, HCCONTROL));
1970	seq_printf(s, "HCCMDSTAT  [%02x] %08x\n", ISP1362_REG_NO(ISP1362_REG_HCCMDSTAT),
1971		   isp1362_read_reg32(isp1362_hcd, HCCMDSTAT));
1972	seq_printf(s, "HCINTSTAT  [%02x] %08x\n", ISP1362_REG_NO(ISP1362_REG_HCINTSTAT),
1973		   isp1362_read_reg32(isp1362_hcd, HCINTSTAT));
1974	seq_printf(s, "HCINTENB   [%02x] %08x\n", ISP1362_REG_NO(ISP1362_REG_HCINTENB),
1975		   isp1362_read_reg32(isp1362_hcd, HCINTENB));
1976	seq_printf(s, "HCFMINTVL  [%02x] %08x\n", ISP1362_REG_NO(ISP1362_REG_HCFMINTVL),
1977		   isp1362_read_reg32(isp1362_hcd, HCFMINTVL));
1978	seq_printf(s, "HCFMREM    [%02x] %08x\n", ISP1362_REG_NO(ISP1362_REG_HCFMREM),
1979		   isp1362_read_reg32(isp1362_hcd, HCFMREM));
1980	seq_printf(s, "HCFMNUM    [%02x] %08x\n", ISP1362_REG_NO(ISP1362_REG_HCFMNUM),
1981		   isp1362_read_reg32(isp1362_hcd, HCFMNUM));
1982	seq_printf(s, "HCLSTHRESH [%02x] %08x\n", ISP1362_REG_NO(ISP1362_REG_HCLSTHRESH),
1983		   isp1362_read_reg32(isp1362_hcd, HCLSTHRESH));
1984	seq_printf(s, "HCRHDESCA  [%02x] %08x\n", ISP1362_REG_NO(ISP1362_REG_HCRHDESCA),
1985		   isp1362_read_reg32(isp1362_hcd, HCRHDESCA));
1986	seq_printf(s, "HCRHDESCB  [%02x] %08x\n", ISP1362_REG_NO(ISP1362_REG_HCRHDESCB),
1987		   isp1362_read_reg32(isp1362_hcd, HCRHDESCB));
1988	seq_printf(s, "HCRHSTATUS [%02x] %08x\n", ISP1362_REG_NO(ISP1362_REG_HCRHSTATUS),
1989		   isp1362_read_reg32(isp1362_hcd, HCRHSTATUS));
1990	seq_printf(s, "HCRHPORT1  [%02x] %08x\n", ISP1362_REG_NO(ISP1362_REG_HCRHPORT1),
1991		   isp1362_read_reg32(isp1362_hcd, HCRHPORT1));
1992	seq_printf(s, "HCRHPORT2  [%02x] %08x\n", ISP1362_REG_NO(ISP1362_REG_HCRHPORT2),
1993		   isp1362_read_reg32(isp1362_hcd, HCRHPORT2));
1994	seq_printf(s, "\n");
1995	seq_printf(s, "HCHWCFG    [%02x]     %04x\n", ISP1362_REG_NO(ISP1362_REG_HCHWCFG),
1996		   isp1362_read_reg16(isp1362_hcd, HCHWCFG));
1997	seq_printf(s, "HCDMACFG   [%02x]     %04x\n", ISP1362_REG_NO(ISP1362_REG_HCDMACFG),
1998		   isp1362_read_reg16(isp1362_hcd, HCDMACFG));
1999	seq_printf(s, "HCXFERCTR  [%02x]     %04x\n", ISP1362_REG_NO(ISP1362_REG_HCXFERCTR),
2000		   isp1362_read_reg16(isp1362_hcd, HCXFERCTR));
2001	seq_printf(s, "HCuPINT    [%02x]     %04x\n", ISP1362_REG_NO(ISP1362_REG_HCuPINT),
2002		   isp1362_read_reg16(isp1362_hcd, HCuPINT));
2003	seq_printf(s, "HCuPINTENB [%02x]     %04x\n", ISP1362_REG_NO(ISP1362_REG_HCuPINTENB),
2004		   isp1362_read_reg16(isp1362_hcd, HCuPINTENB));
2005	seq_printf(s, "HCCHIPID   [%02x]     %04x\n", ISP1362_REG_NO(ISP1362_REG_HCCHIPID),
2006		   isp1362_read_reg16(isp1362_hcd, HCCHIPID));
2007	seq_printf(s, "HCSCRATCH  [%02x]     %04x\n", ISP1362_REG_NO(ISP1362_REG_HCSCRATCH),
2008		   isp1362_read_reg16(isp1362_hcd, HCSCRATCH));
2009	seq_printf(s, "HCBUFSTAT  [%02x]     %04x\n", ISP1362_REG_NO(ISP1362_REG_HCBUFSTAT),
2010		   isp1362_read_reg16(isp1362_hcd, HCBUFSTAT));
2011	seq_printf(s, "HCDIRADDR  [%02x] %08x\n", ISP1362_REG_NO(ISP1362_REG_HCDIRADDR),
2012		   isp1362_read_reg32(isp1362_hcd, HCDIRADDR));
2013#if 0
2014	seq_printf(s, "HCDIRDATA  [%02x]     %04x\n", ISP1362_REG_NO(HCDIRDATA),
2015		   isp1362_read_reg16(isp1362_hcd, HCDIRDATA));
2016#endif
2017	seq_printf(s, "HCISTLBUFSZ[%02x]     %04x\n", ISP1362_REG_NO(ISP1362_REG_HCISTLBUFSZ),
2018		   isp1362_read_reg16(isp1362_hcd, HCISTLBUFSZ));
2019	seq_printf(s, "HCISTLRATE [%02x]     %04x\n", ISP1362_REG_NO(ISP1362_REG_HCISTLRATE),
2020		   isp1362_read_reg16(isp1362_hcd, HCISTLRATE));
2021	seq_printf(s, "\n");
2022	seq_printf(s, "HCINTLBUFSZ[%02x]     %04x\n", ISP1362_REG_NO(ISP1362_REG_HCINTLBUFSZ),
2023		   isp1362_read_reg16(isp1362_hcd, HCINTLBUFSZ));
2024	seq_printf(s, "HCINTLBLKSZ[%02x]     %04x\n", ISP1362_REG_NO(ISP1362_REG_HCINTLBLKSZ),
2025		   isp1362_read_reg16(isp1362_hcd, HCINTLBLKSZ));
2026	seq_printf(s, "HCINTLDONE [%02x] %08x\n", ISP1362_REG_NO(ISP1362_REG_HCINTLDONE),
2027		   isp1362_read_reg32(isp1362_hcd, HCINTLDONE));
2028	seq_printf(s, "HCINTLSKIP [%02x] %08x\n", ISP1362_REG_NO(ISP1362_REG_HCINTLSKIP),
2029		   isp1362_read_reg32(isp1362_hcd, HCINTLSKIP));
2030	seq_printf(s, "HCINTLLAST [%02x] %08x\n", ISP1362_REG_NO(ISP1362_REG_HCINTLLAST),
2031		   isp1362_read_reg32(isp1362_hcd, HCINTLLAST));
2032	seq_printf(s, "HCINTLCURR [%02x]     %04x\n", ISP1362_REG_NO(ISP1362_REG_HCINTLCURR),
2033		   isp1362_read_reg16(isp1362_hcd, HCINTLCURR));
2034	seq_printf(s, "\n");
2035	seq_printf(s, "HCATLBUFSZ [%02x]     %04x\n", ISP1362_REG_NO(ISP1362_REG_HCATLBUFSZ),
2036		   isp1362_read_reg16(isp1362_hcd, HCATLBUFSZ));
2037	seq_printf(s, "HCATLBLKSZ [%02x]     %04x\n", ISP1362_REG_NO(ISP1362_REG_HCATLBLKSZ),
2038		   isp1362_read_reg16(isp1362_hcd, HCATLBLKSZ));
2039#if 0
2040	seq_printf(s, "HCATLDONE  [%02x] %08x\n", ISP1362_REG_NO(ISP1362_REG_HCATLDONE),
2041		   isp1362_read_reg32(isp1362_hcd, HCATLDONE));
2042#endif
2043	seq_printf(s, "HCATLSKIP  [%02x] %08x\n", ISP1362_REG_NO(ISP1362_REG_HCATLSKIP),
2044		   isp1362_read_reg32(isp1362_hcd, HCATLSKIP));
2045	seq_printf(s, "HCATLLAST  [%02x] %08x\n", ISP1362_REG_NO(ISP1362_REG_HCATLLAST),
2046		   isp1362_read_reg32(isp1362_hcd, HCATLLAST));
2047	seq_printf(s, "HCATLCURR  [%02x]     %04x\n", ISP1362_REG_NO(ISP1362_REG_HCATLCURR),
2048		   isp1362_read_reg16(isp1362_hcd, HCATLCURR));
2049	seq_printf(s, "\n");
2050	seq_printf(s, "HCATLDTC   [%02x]     %04x\n", ISP1362_REG_NO(ISP1362_REG_HCATLDTC),
2051		   isp1362_read_reg16(isp1362_hcd, HCATLDTC));
2052	seq_printf(s, "HCATLDTCTO [%02x]     %04x\n", ISP1362_REG_NO(ISP1362_REG_HCATLDTCTO),
2053		   isp1362_read_reg16(isp1362_hcd, HCATLDTCTO));
2054}
2055
2056static int isp1362_show(struct seq_file *s, void *unused)
2057{
2058	struct isp1362_hcd *isp1362_hcd = s->private;
2059	struct isp1362_ep *ep;
2060	int i;
2061
2062	seq_printf(s, "%s\n%s version %s\n",
2063		   isp1362_hcd_to_hcd(isp1362_hcd)->product_desc, hcd_name, DRIVER_VERSION);
2064
2065	/* collect statistics to help estimate potential win for
2066	 * DMA engines that care about alignment (PXA)
2067	 */
2068	seq_printf(s, "alignment:  16b/%ld 8b/%ld 4b/%ld 2b/%ld 1b/%ld\n",
2069		   isp1362_hcd->stat16, isp1362_hcd->stat8, isp1362_hcd->stat4,
2070		   isp1362_hcd->stat2, isp1362_hcd->stat1);
2071	seq_printf(s, "max # ptds in ATL  fifo: %d\n", isp1362_hcd->atl_queue.stat_maxptds);
2072	seq_printf(s, "max # ptds in INTL fifo: %d\n", isp1362_hcd->intl_queue.stat_maxptds);
2073	seq_printf(s, "max # ptds in ISTL fifo: %d\n",
2074		   max(isp1362_hcd->istl_queue[0] .stat_maxptds,
2075		       isp1362_hcd->istl_queue[1] .stat_maxptds));
2076
2077	/* FIXME: don't show the following in suspended state */
2078	spin_lock_irq(&isp1362_hcd->lock);
2079
2080	dump_irq(s, "hc_irq_enable", isp1362_read_reg16(isp1362_hcd, HCuPINTENB));
2081	dump_irq(s, "hc_irq_status", isp1362_read_reg16(isp1362_hcd, HCuPINT));
2082	dump_int(s, "ohci_int_enable", isp1362_read_reg32(isp1362_hcd, HCINTENB));
2083	dump_int(s, "ohci_int_status", isp1362_read_reg32(isp1362_hcd, HCINTSTAT));
2084	dump_ctrl(s, "ohci_control", isp1362_read_reg32(isp1362_hcd, HCCONTROL));
2085
2086	for (i = 0; i < NUM_ISP1362_IRQS; i++)
2087		if (isp1362_hcd->irq_stat[i])
2088			seq_printf(s, "%-15s: %d\n",
2089				   ISP1362_INT_NAME(i), isp1362_hcd->irq_stat[i]);
2090
2091	dump_regs(s, isp1362_hcd);
2092	list_for_each_entry(ep, &isp1362_hcd->async, schedule) {
2093		struct urb *urb;
2094
2095		seq_printf(s, "%p, ep%d%s, maxpacket %d:\n", ep, ep->epnum,
2096			   ({
2097				   char *s;
2098				   switch (ep->nextpid) {
2099				   case USB_PID_IN:
2100					   s = "in";
2101					   break;
2102				   case USB_PID_OUT:
2103					   s = "out";
2104					   break;
2105				   case USB_PID_SETUP:
2106					   s = "setup";
2107					   break;
2108				   case USB_PID_ACK:
2109					   s = "status";
2110					   break;
2111				   default:
2112					   s = "?";
2113					   break;
2114				   }
2115				   s;}), ep->maxpacket) ;
2116		list_for_each_entry(urb, &ep->hep->urb_list, urb_list) {
2117			seq_printf(s, "  urb%p, %d/%d\n", urb,
2118				   urb->actual_length,
2119				   urb->transfer_buffer_length);
2120		}
2121	}
2122	if (!list_empty(&isp1362_hcd->async))
2123		seq_printf(s, "\n");
2124	dump_ptd_queue(&isp1362_hcd->atl_queue);
2125
2126	seq_printf(s, "periodic size= %d\n", PERIODIC_SIZE);
2127
2128	list_for_each_entry(ep, &isp1362_hcd->periodic, schedule) {
2129		seq_printf(s, "branch:%2d load:%3d PTD[%d] $%04x:\n", ep->branch,
2130			   isp1362_hcd->load[ep->branch], ep->ptd_index, ep->ptd_offset);
2131
2132		seq_printf(s, "   %d/%p (%sdev%d ep%d%s max %d)\n",
2133			   ep->interval, ep,
2134			   (ep->udev->speed == USB_SPEED_FULL) ? "" : "ls ",
2135			   ep->udev->devnum, ep->epnum,
2136			   (ep->epnum == 0) ? "" :
2137			   ((ep->nextpid == USB_PID_IN) ?
2138			    "in" : "out"), ep->maxpacket);
2139	}
2140	dump_ptd_queue(&isp1362_hcd->intl_queue);
2141
2142	seq_printf(s, "ISO:\n");
2143
2144	list_for_each_entry(ep, &isp1362_hcd->isoc, schedule) {
2145		seq_printf(s, "   %d/%p (%sdev%d ep%d%s max %d)\n",
2146			   ep->interval, ep,
2147			   (ep->udev->speed == USB_SPEED_FULL) ? "" : "ls ",
2148			   ep->udev->devnum, ep->epnum,
2149			   (ep->epnum == 0) ? "" :
2150			   ((ep->nextpid == USB_PID_IN) ?
2151			    "in" : "out"), ep->maxpacket);
2152	}
2153
2154	spin_unlock_irq(&isp1362_hcd->lock);
2155	seq_printf(s, "\n");
2156
2157	return 0;
2158}
2159
2160static int isp1362_open(struct inode *inode, struct file *file)
2161{
2162	return single_open(file, isp1362_show, inode);
2163}
2164
2165static const struct file_operations debug_ops = {
2166	.open = isp1362_open,
2167	.read = seq_read,
2168	.llseek = seq_lseek,
2169	.release = single_release,
2170};
2171
2172/* expect just one isp1362_hcd per system */
 
 
2173static void create_debug_file(struct isp1362_hcd *isp1362_hcd)
2174{
2175	isp1362_hcd->debug_file = debugfs_create_file("isp1362", S_IRUGO,
2176						      usb_debug_root,
2177						      isp1362_hcd, &debug_ops);
 
 
 
 
 
 
 
 
2178}
2179
2180static void remove_debug_file(struct isp1362_hcd *isp1362_hcd)
2181{
2182	debugfs_remove(isp1362_hcd->debug_file);
 
2183}
2184
 
 
2185/*-------------------------------------------------------------------------*/
2186
2187static void __isp1362_sw_reset(struct isp1362_hcd *isp1362_hcd)
2188{
2189	int tmp = 20;
2190
2191	isp1362_write_reg16(isp1362_hcd, HCSWRES, HCSWRES_MAGIC);
2192	isp1362_write_reg32(isp1362_hcd, HCCMDSTAT, OHCI_HCR);
2193	while (--tmp) {
2194		mdelay(1);
2195		if (!(isp1362_read_reg32(isp1362_hcd, HCCMDSTAT) & OHCI_HCR))
2196			break;
2197	}
2198	if (!tmp)
2199		pr_err("Software reset timeout\n");
2200}
2201
2202static void isp1362_sw_reset(struct isp1362_hcd *isp1362_hcd)
2203{
2204	unsigned long flags;
2205
2206	spin_lock_irqsave(&isp1362_hcd->lock, flags);
2207	__isp1362_sw_reset(isp1362_hcd);
2208	spin_unlock_irqrestore(&isp1362_hcd->lock, flags);
2209}
2210
2211static int isp1362_mem_config(struct usb_hcd *hcd)
2212{
2213	struct isp1362_hcd *isp1362_hcd = hcd_to_isp1362_hcd(hcd);
2214	unsigned long flags;
2215	u32 total;
2216	u16 istl_size = ISP1362_ISTL_BUFSIZE;
2217	u16 intl_blksize = ISP1362_INTL_BLKSIZE + PTD_HEADER_SIZE;
2218	u16 intl_size = ISP1362_INTL_BUFFERS * intl_blksize;
2219	u16 atl_blksize = ISP1362_ATL_BLKSIZE + PTD_HEADER_SIZE;
2220	u16 atl_buffers = (ISP1362_BUF_SIZE - (istl_size + intl_size)) / atl_blksize;
2221	u16 atl_size;
2222	int i;
2223
2224	WARN_ON(istl_size & 3);
2225	WARN_ON(atl_blksize & 3);
2226	WARN_ON(intl_blksize & 3);
2227	WARN_ON(atl_blksize < PTD_HEADER_SIZE);
2228	WARN_ON(intl_blksize < PTD_HEADER_SIZE);
2229
2230	BUG_ON((unsigned)ISP1362_INTL_BUFFERS > 32);
2231	if (atl_buffers > 32)
2232		atl_buffers = 32;
2233	atl_size = atl_buffers * atl_blksize;
2234	total = atl_size + intl_size + istl_size;
2235	dev_info(hcd->self.controller, "ISP1362 Memory usage:\n");
2236	dev_info(hcd->self.controller, "  ISTL:    2 * %4d:     %4d @ $%04x:$%04x\n",
2237		 istl_size / 2, istl_size, 0, istl_size / 2);
2238	dev_info(hcd->self.controller, "  INTL: %4d * (%3zu+8):  %4d @ $%04x\n",
2239		 ISP1362_INTL_BUFFERS, intl_blksize - PTD_HEADER_SIZE,
2240		 intl_size, istl_size);
2241	dev_info(hcd->self.controller, "  ATL : %4d * (%3zu+8):  %4d @ $%04x\n",
2242		 atl_buffers, atl_blksize - PTD_HEADER_SIZE,
2243		 atl_size, istl_size + intl_size);
2244	dev_info(hcd->self.controller, "  USED/FREE:   %4d      %4d\n", total,
2245		 ISP1362_BUF_SIZE - total);
2246
2247	if (total > ISP1362_BUF_SIZE) {
2248		dev_err(hcd->self.controller, "%s: Memory requested: %d, available %d\n",
2249			__func__, total, ISP1362_BUF_SIZE);
2250		return -ENOMEM;
2251	}
2252
2253	total = istl_size + intl_size + atl_size;
2254	spin_lock_irqsave(&isp1362_hcd->lock, flags);
2255
2256	for (i = 0; i < 2; i++) {
2257		isp1362_hcd->istl_queue[i].buf_start = i * istl_size / 2,
2258		isp1362_hcd->istl_queue[i].buf_size = istl_size / 2;
2259		isp1362_hcd->istl_queue[i].blk_size = 4;
2260		INIT_LIST_HEAD(&isp1362_hcd->istl_queue[i].active);
2261		snprintf(isp1362_hcd->istl_queue[i].name,
2262			 sizeof(isp1362_hcd->istl_queue[i].name), "ISTL%d", i);
2263		DBG(3, "%s: %5s buf $%04x %d\n", __func__,
2264		     isp1362_hcd->istl_queue[i].name,
2265		     isp1362_hcd->istl_queue[i].buf_start,
2266		     isp1362_hcd->istl_queue[i].buf_size);
2267	}
2268	isp1362_write_reg16(isp1362_hcd, HCISTLBUFSZ, istl_size / 2);
2269
2270	isp1362_hcd->intl_queue.buf_start = istl_size;
2271	isp1362_hcd->intl_queue.buf_size = intl_size;
2272	isp1362_hcd->intl_queue.buf_count = ISP1362_INTL_BUFFERS;
2273	isp1362_hcd->intl_queue.blk_size = intl_blksize;
2274	isp1362_hcd->intl_queue.buf_avail = isp1362_hcd->intl_queue.buf_count;
2275	isp1362_hcd->intl_queue.skip_map = ~0;
2276	INIT_LIST_HEAD(&isp1362_hcd->intl_queue.active);
2277
2278	isp1362_write_reg16(isp1362_hcd, HCINTLBUFSZ,
2279			    isp1362_hcd->intl_queue.buf_size);
2280	isp1362_write_reg16(isp1362_hcd, HCINTLBLKSZ,
2281			    isp1362_hcd->intl_queue.blk_size - PTD_HEADER_SIZE);
2282	isp1362_write_reg32(isp1362_hcd, HCINTLSKIP, ~0);
2283	isp1362_write_reg32(isp1362_hcd, HCINTLLAST,
2284			    1 << (ISP1362_INTL_BUFFERS - 1));
2285
2286	isp1362_hcd->atl_queue.buf_start = istl_size + intl_size;
2287	isp1362_hcd->atl_queue.buf_size = atl_size;
2288	isp1362_hcd->atl_queue.buf_count = atl_buffers;
2289	isp1362_hcd->atl_queue.blk_size = atl_blksize;
2290	isp1362_hcd->atl_queue.buf_avail = isp1362_hcd->atl_queue.buf_count;
2291	isp1362_hcd->atl_queue.skip_map = ~0;
2292	INIT_LIST_HEAD(&isp1362_hcd->atl_queue.active);
2293
2294	isp1362_write_reg16(isp1362_hcd, HCATLBUFSZ,
2295			    isp1362_hcd->atl_queue.buf_size);
2296	isp1362_write_reg16(isp1362_hcd, HCATLBLKSZ,
2297			    isp1362_hcd->atl_queue.blk_size - PTD_HEADER_SIZE);
2298	isp1362_write_reg32(isp1362_hcd, HCATLSKIP, ~0);
2299	isp1362_write_reg32(isp1362_hcd, HCATLLAST,
2300			    1 << (atl_buffers - 1));
2301
2302	snprintf(isp1362_hcd->atl_queue.name,
2303		 sizeof(isp1362_hcd->atl_queue.name), "ATL");
2304	snprintf(isp1362_hcd->intl_queue.name,
2305		 sizeof(isp1362_hcd->intl_queue.name), "INTL");
2306	DBG(3, "%s: %5s buf $%04x %2d * %4d = %4d\n", __func__,
2307	     isp1362_hcd->intl_queue.name,
2308	     isp1362_hcd->intl_queue.buf_start,
2309	     ISP1362_INTL_BUFFERS, isp1362_hcd->intl_queue.blk_size,
2310	     isp1362_hcd->intl_queue.buf_size);
2311	DBG(3, "%s: %5s buf $%04x %2d * %4d = %4d\n", __func__,
2312	     isp1362_hcd->atl_queue.name,
2313	     isp1362_hcd->atl_queue.buf_start,
2314	     atl_buffers, isp1362_hcd->atl_queue.blk_size,
2315	     isp1362_hcd->atl_queue.buf_size);
2316
2317	spin_unlock_irqrestore(&isp1362_hcd->lock, flags);
2318
2319	return 0;
2320}
2321
2322static int isp1362_hc_reset(struct usb_hcd *hcd)
2323{
2324	int ret = 0;
2325	struct isp1362_hcd *isp1362_hcd = hcd_to_isp1362_hcd(hcd);
2326	unsigned long t;
2327	unsigned long timeout = 100;
2328	unsigned long flags;
2329	int clkrdy = 0;
2330
2331	pr_debug("%s:\n", __func__);
2332
2333	if (isp1362_hcd->board && isp1362_hcd->board->reset) {
2334		isp1362_hcd->board->reset(hcd->self.controller, 1);
2335		msleep(20);
2336		if (isp1362_hcd->board->clock)
2337			isp1362_hcd->board->clock(hcd->self.controller, 1);
2338		isp1362_hcd->board->reset(hcd->self.controller, 0);
2339	} else
2340		isp1362_sw_reset(isp1362_hcd);
2341
2342	/* chip has been reset. First we need to see a clock */
2343	t = jiffies + msecs_to_jiffies(timeout);
2344	while (!clkrdy && time_before_eq(jiffies, t)) {
2345		spin_lock_irqsave(&isp1362_hcd->lock, flags);
2346		clkrdy = isp1362_read_reg16(isp1362_hcd, HCuPINT) & HCuPINT_CLKRDY;
2347		spin_unlock_irqrestore(&isp1362_hcd->lock, flags);
2348		if (!clkrdy)
2349			msleep(4);
2350	}
2351
2352	spin_lock_irqsave(&isp1362_hcd->lock, flags);
2353	isp1362_write_reg16(isp1362_hcd, HCuPINT, HCuPINT_CLKRDY);
2354	spin_unlock_irqrestore(&isp1362_hcd->lock, flags);
2355	if (!clkrdy) {
2356		pr_err("Clock not ready after %lums\n", timeout);
2357		ret = -ENODEV;
2358	}
2359	return ret;
2360}
2361
2362static void isp1362_hc_stop(struct usb_hcd *hcd)
2363{
2364	struct isp1362_hcd *isp1362_hcd = hcd_to_isp1362_hcd(hcd);
2365	unsigned long flags;
2366	u32 tmp;
2367
2368	pr_debug("%s:\n", __func__);
2369
2370	del_timer_sync(&hcd->rh_timer);
2371
2372	spin_lock_irqsave(&isp1362_hcd->lock, flags);
2373
2374	isp1362_write_reg16(isp1362_hcd, HCuPINTENB, 0);
2375
2376	/* Switch off power for all ports */
2377	tmp = isp1362_read_reg32(isp1362_hcd, HCRHDESCA);
2378	tmp &= ~(RH_A_NPS | RH_A_PSM);
2379	isp1362_write_reg32(isp1362_hcd, HCRHDESCA, tmp);
2380	isp1362_write_reg32(isp1362_hcd, HCRHSTATUS, RH_HS_LPS);
2381
2382	/* Reset the chip */
2383	if (isp1362_hcd->board && isp1362_hcd->board->reset)
2384		isp1362_hcd->board->reset(hcd->self.controller, 1);
2385	else
2386		__isp1362_sw_reset(isp1362_hcd);
2387
2388	if (isp1362_hcd->board && isp1362_hcd->board->clock)
2389		isp1362_hcd->board->clock(hcd->self.controller, 0);
2390
2391	spin_unlock_irqrestore(&isp1362_hcd->lock, flags);
2392}
2393
2394#ifdef CHIP_BUFFER_TEST
2395static int isp1362_chip_test(struct isp1362_hcd *isp1362_hcd)
2396{
2397	int ret = 0;
2398	u16 *ref;
2399	unsigned long flags;
2400
2401	ref = kmalloc(2 * ISP1362_BUF_SIZE, GFP_KERNEL);
2402	if (ref) {
2403		int offset;
2404		u16 *tst = &ref[ISP1362_BUF_SIZE / 2];
2405
2406		for (offset = 0; offset < ISP1362_BUF_SIZE / 2; offset++) {
2407			ref[offset] = ~offset;
2408			tst[offset] = offset;
2409		}
2410
2411		for (offset = 0; offset < 4; offset++) {
2412			int j;
2413
2414			for (j = 0; j < 8; j++) {
2415				spin_lock_irqsave(&isp1362_hcd->lock, flags);
2416				isp1362_write_buffer(isp1362_hcd, (u8 *)ref + offset, 0, j);
2417				isp1362_read_buffer(isp1362_hcd, (u8 *)tst + offset, 0, j);
2418				spin_unlock_irqrestore(&isp1362_hcd->lock, flags);
2419
2420				if (memcmp(ref, tst, j)) {
2421					ret = -ENODEV;
2422					pr_err("%s: memory check with %d byte offset %d failed\n",
2423					    __func__, j, offset);
2424					dump_data((u8 *)ref + offset, j);
2425					dump_data((u8 *)tst + offset, j);
2426				}
2427			}
2428		}
2429
2430		spin_lock_irqsave(&isp1362_hcd->lock, flags);
2431		isp1362_write_buffer(isp1362_hcd, ref, 0, ISP1362_BUF_SIZE);
2432		isp1362_read_buffer(isp1362_hcd, tst, 0, ISP1362_BUF_SIZE);
2433		spin_unlock_irqrestore(&isp1362_hcd->lock, flags);
2434
2435		if (memcmp(ref, tst, ISP1362_BUF_SIZE)) {
2436			ret = -ENODEV;
2437			pr_err("%s: memory check failed\n", __func__);
2438			dump_data((u8 *)tst, ISP1362_BUF_SIZE / 2);
2439		}
2440
2441		for (offset = 0; offset < 256; offset++) {
2442			int test_size = 0;
2443
2444			yield();
2445
2446			memset(tst, 0, ISP1362_BUF_SIZE);
2447			spin_lock_irqsave(&isp1362_hcd->lock, flags);
2448			isp1362_write_buffer(isp1362_hcd, tst, 0, ISP1362_BUF_SIZE);
2449			isp1362_read_buffer(isp1362_hcd, tst, 0, ISP1362_BUF_SIZE);
2450			spin_unlock_irqrestore(&isp1362_hcd->lock, flags);
2451			if (memcmp(tst, tst + (ISP1362_BUF_SIZE / (2 * sizeof(*tst))),
2452				   ISP1362_BUF_SIZE / 2)) {
2453				pr_err("%s: Failed to clear buffer\n", __func__);
2454				dump_data((u8 *)tst, ISP1362_BUF_SIZE);
2455				break;
2456			}
2457			spin_lock_irqsave(&isp1362_hcd->lock, flags);
2458			isp1362_write_buffer(isp1362_hcd, ref, offset * 2, PTD_HEADER_SIZE);
2459			isp1362_write_buffer(isp1362_hcd, ref + PTD_HEADER_SIZE / sizeof(*ref),
2460					     offset * 2 + PTD_HEADER_SIZE, test_size);
2461			isp1362_read_buffer(isp1362_hcd, tst, offset * 2,
2462					    PTD_HEADER_SIZE + test_size);
2463			spin_unlock_irqrestore(&isp1362_hcd->lock, flags);
2464			if (memcmp(ref, tst, PTD_HEADER_SIZE + test_size)) {
2465				dump_data(((u8 *)ref) + offset, PTD_HEADER_SIZE + test_size);
2466				dump_data((u8 *)tst, PTD_HEADER_SIZE + test_size);
2467				spin_lock_irqsave(&isp1362_hcd->lock, flags);
2468				isp1362_read_buffer(isp1362_hcd, tst, offset * 2,
2469						    PTD_HEADER_SIZE + test_size);
2470				spin_unlock_irqrestore(&isp1362_hcd->lock, flags);
2471				if (memcmp(ref, tst, PTD_HEADER_SIZE + test_size)) {
2472					ret = -ENODEV;
2473					pr_err("%s: memory check with offset %02x failed\n",
2474					    __func__, offset);
2475					break;
2476				}
2477				pr_warning("%s: memory check with offset %02x ok after second read\n",
2478				     __func__, offset);
2479			}
2480		}
2481		kfree(ref);
2482	}
2483	return ret;
2484}
2485#endif
2486
2487static int isp1362_hc_start(struct usb_hcd *hcd)
2488{
2489	int ret;
2490	struct isp1362_hcd *isp1362_hcd = hcd_to_isp1362_hcd(hcd);
2491	struct isp1362_platform_data *board = isp1362_hcd->board;
2492	u16 hwcfg;
2493	u16 chipid;
2494	unsigned long flags;
2495
2496	pr_debug("%s:\n", __func__);
2497
2498	spin_lock_irqsave(&isp1362_hcd->lock, flags);
2499	chipid = isp1362_read_reg16(isp1362_hcd, HCCHIPID);
2500	spin_unlock_irqrestore(&isp1362_hcd->lock, flags);
2501
2502	if ((chipid & HCCHIPID_MASK) != HCCHIPID_MAGIC) {
2503		pr_err("%s: Invalid chip ID %04x\n", __func__, chipid);
2504		return -ENODEV;
2505	}
2506
2507#ifdef CHIP_BUFFER_TEST
2508	ret = isp1362_chip_test(isp1362_hcd);
2509	if (ret)
2510		return -ENODEV;
2511#endif
2512	spin_lock_irqsave(&isp1362_hcd->lock, flags);
2513	/* clear interrupt status and disable all interrupt sources */
2514	isp1362_write_reg16(isp1362_hcd, HCuPINT, 0xff);
2515	isp1362_write_reg16(isp1362_hcd, HCuPINTENB, 0);
2516
2517	/* HW conf */
2518	hwcfg = HCHWCFG_INT_ENABLE | HCHWCFG_DBWIDTH(1);
2519	if (board->sel15Kres)
2520		hwcfg |= HCHWCFG_PULLDOWN_DS2 |
2521			((MAX_ROOT_PORTS > 1) ? HCHWCFG_PULLDOWN_DS1 : 0);
2522	if (board->clknotstop)
2523		hwcfg |= HCHWCFG_CLKNOTSTOP;
2524	if (board->oc_enable)
2525		hwcfg |= HCHWCFG_ANALOG_OC;
2526	if (board->int_act_high)
2527		hwcfg |= HCHWCFG_INT_POL;
2528	if (board->int_edge_triggered)
2529		hwcfg |= HCHWCFG_INT_TRIGGER;
2530	if (board->dreq_act_high)
2531		hwcfg |= HCHWCFG_DREQ_POL;
2532	if (board->dack_act_high)
2533		hwcfg |= HCHWCFG_DACK_POL;
2534	isp1362_write_reg16(isp1362_hcd, HCHWCFG, hwcfg);
2535	isp1362_show_reg(isp1362_hcd, HCHWCFG);
2536	isp1362_write_reg16(isp1362_hcd, HCDMACFG, 0);
2537	spin_unlock_irqrestore(&isp1362_hcd->lock, flags);
2538
2539	ret = isp1362_mem_config(hcd);
2540	if (ret)
2541		return ret;
2542
2543	spin_lock_irqsave(&isp1362_hcd->lock, flags);
2544
2545	/* Root hub conf */
2546	isp1362_hcd->rhdesca = 0;
2547	if (board->no_power_switching)
2548		isp1362_hcd->rhdesca |= RH_A_NPS;
2549	if (board->power_switching_mode)
2550		isp1362_hcd->rhdesca |= RH_A_PSM;
2551	if (board->potpg)
2552		isp1362_hcd->rhdesca |= (board->potpg << 24) & RH_A_POTPGT;
2553	else
2554		isp1362_hcd->rhdesca |= (25 << 24) & RH_A_POTPGT;
2555
2556	isp1362_write_reg32(isp1362_hcd, HCRHDESCA, isp1362_hcd->rhdesca & ~RH_A_OCPM);
2557	isp1362_write_reg32(isp1362_hcd, HCRHDESCA, isp1362_hcd->rhdesca | RH_A_OCPM);
2558	isp1362_hcd->rhdesca = isp1362_read_reg32(isp1362_hcd, HCRHDESCA);
2559
2560	isp1362_hcd->rhdescb = RH_B_PPCM;
2561	isp1362_write_reg32(isp1362_hcd, HCRHDESCB, isp1362_hcd->rhdescb);
2562	isp1362_hcd->rhdescb = isp1362_read_reg32(isp1362_hcd, HCRHDESCB);
2563
2564	isp1362_read_reg32(isp1362_hcd, HCFMINTVL);
2565	isp1362_write_reg32(isp1362_hcd, HCFMINTVL, (FSMP(FI) << 16) | FI);
2566	isp1362_write_reg32(isp1362_hcd, HCLSTHRESH, LSTHRESH);
2567
2568	spin_unlock_irqrestore(&isp1362_hcd->lock, flags);
2569
2570	isp1362_hcd->hc_control = OHCI_USB_OPER;
2571	hcd->state = HC_STATE_RUNNING;
2572
2573	spin_lock_irqsave(&isp1362_hcd->lock, flags);
2574	/* Set up interrupts */
2575	isp1362_hcd->intenb = OHCI_INTR_MIE | OHCI_INTR_RHSC | OHCI_INTR_UE;
2576	isp1362_hcd->intenb |= OHCI_INTR_RD;
2577	isp1362_hcd->irqenb = HCuPINT_OPR | HCuPINT_SUSP;
2578	isp1362_write_reg32(isp1362_hcd, HCINTENB, isp1362_hcd->intenb);
2579	isp1362_write_reg16(isp1362_hcd, HCuPINTENB, isp1362_hcd->irqenb);
2580
2581	/* Go operational */
2582	isp1362_write_reg32(isp1362_hcd, HCCONTROL, isp1362_hcd->hc_control);
2583	/* enable global power */
2584	isp1362_write_reg32(isp1362_hcd, HCRHSTATUS, RH_HS_LPSC | RH_HS_DRWE);
2585
2586	spin_unlock_irqrestore(&isp1362_hcd->lock, flags);
2587
2588	return 0;
2589}
2590
2591/*-------------------------------------------------------------------------*/
2592
2593static struct hc_driver isp1362_hc_driver = {
2594	.description =		hcd_name,
2595	.product_desc =		"ISP1362 Host Controller",
2596	.hcd_priv_size =	sizeof(struct isp1362_hcd),
2597
2598	.irq =			isp1362_irq,
2599	.flags =		HCD_USB11 | HCD_MEMORY,
2600
2601	.reset =		isp1362_hc_reset,
2602	.start =		isp1362_hc_start,
2603	.stop =			isp1362_hc_stop,
2604
2605	.urb_enqueue =		isp1362_urb_enqueue,
2606	.urb_dequeue =		isp1362_urb_dequeue,
2607	.endpoint_disable =	isp1362_endpoint_disable,
2608
2609	.get_frame_number =	isp1362_get_frame,
2610
2611	.hub_status_data =	isp1362_hub_status_data,
2612	.hub_control =		isp1362_hub_control,
2613	.bus_suspend =		isp1362_bus_suspend,
2614	.bus_resume =		isp1362_bus_resume,
2615};
2616
2617/*-------------------------------------------------------------------------*/
2618
2619static int isp1362_remove(struct platform_device *pdev)
2620{
2621	struct usb_hcd *hcd = platform_get_drvdata(pdev);
2622	struct isp1362_hcd *isp1362_hcd = hcd_to_isp1362_hcd(hcd);
 
2623
2624	remove_debug_file(isp1362_hcd);
2625	DBG(0, "%s: Removing HCD\n", __func__);
2626	usb_remove_hcd(hcd);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2627	DBG(0, "%s: put_hcd\n", __func__);
2628	usb_put_hcd(hcd);
2629	DBG(0, "%s: Done\n", __func__);
2630
2631	return 0;
2632}
2633
2634static int isp1362_probe(struct platform_device *pdev)
2635{
2636	struct usb_hcd *hcd;
2637	struct isp1362_hcd *isp1362_hcd;
2638	struct resource *addr, *data, *irq_res;
2639	void __iomem *addr_reg;
2640	void __iomem *data_reg;
2641	int irq;
2642	int retval = 0;
 
2643	unsigned int irq_flags = 0;
2644
2645	if (usb_disabled())
2646		return -ENODEV;
2647
2648	/* basic sanity checks first.  board-specific init logic should
2649	 * have initialized this the three resources and probably board
2650	 * specific platform_data.  we don't probe for IRQs, and do only
2651	 * minimal sanity checking.
2652	 */
2653	if (pdev->num_resources < 3)
2654		return -ENODEV;
2655
2656	if (pdev->dev.dma_mask) {
2657		DBG(1, "won't do DMA");
2658		return -ENODEV;
2659	}
2660
 
 
2661	irq_res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
2662	if (!irq_res)
2663		return -ENODEV;
2664
 
2665	irq = irq_res->start;
2666
2667	addr = platform_get_resource(pdev, IORESOURCE_MEM, 1);
2668	addr_reg = devm_ioremap_resource(&pdev->dev, addr);
2669	if (IS_ERR(addr_reg))
2670		return PTR_ERR(addr_reg);
 
2671
2672	data = platform_get_resource(pdev, IORESOURCE_MEM, 0);
2673	data_reg = devm_ioremap_resource(&pdev->dev, data);
2674	if (IS_ERR(data_reg))
2675		return PTR_ERR(data_reg);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2676
2677	/* allocate and initialize hcd */
2678	hcd = usb_create_hcd(&isp1362_hc_driver, &pdev->dev, dev_name(&pdev->dev));
2679	if (!hcd)
2680		return -ENOMEM;
2681
 
2682	hcd->rsrc_start = data->start;
2683	isp1362_hcd = hcd_to_isp1362_hcd(hcd);
2684	isp1362_hcd->data_reg = data_reg;
2685	isp1362_hcd->addr_reg = addr_reg;
2686
2687	isp1362_hcd->next_statechange = jiffies;
2688	spin_lock_init(&isp1362_hcd->lock);
2689	INIT_LIST_HEAD(&isp1362_hcd->async);
2690	INIT_LIST_HEAD(&isp1362_hcd->periodic);
2691	INIT_LIST_HEAD(&isp1362_hcd->isoc);
2692	INIT_LIST_HEAD(&isp1362_hcd->remove_list);
2693	isp1362_hcd->board = dev_get_platdata(&pdev->dev);
2694#if USE_PLATFORM_DELAY
2695	if (!isp1362_hcd->board->delay) {
2696		dev_err(hcd->self.controller, "No platform delay function given\n");
2697		retval = -ENODEV;
2698		goto err;
2699	}
2700#endif
2701
2702	if (irq_res->flags & IORESOURCE_IRQ_HIGHEDGE)
2703		irq_flags |= IRQF_TRIGGER_RISING;
2704	if (irq_res->flags & IORESOURCE_IRQ_LOWEDGE)
2705		irq_flags |= IRQF_TRIGGER_FALLING;
2706	if (irq_res->flags & IORESOURCE_IRQ_HIGHLEVEL)
2707		irq_flags |= IRQF_TRIGGER_HIGH;
2708	if (irq_res->flags & IORESOURCE_IRQ_LOWLEVEL)
2709		irq_flags |= IRQF_TRIGGER_LOW;
2710
2711	retval = usb_add_hcd(hcd, irq, irq_flags | IRQF_SHARED);
2712	if (retval != 0)
2713		goto err;
2714	device_wakeup_enable(hcd->self.controller);
2715
2716	dev_info(&pdev->dev, "%s, irq %d\n", hcd->product_desc, irq);
2717
2718	create_debug_file(isp1362_hcd);
2719
2720	return 0;
2721
2722 err:
 
2723	usb_put_hcd(hcd);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2724
2725	return retval;
2726}
2727
2728#ifdef	CONFIG_PM
2729static int isp1362_suspend(struct platform_device *pdev, pm_message_t state)
2730{
2731	struct usb_hcd *hcd = platform_get_drvdata(pdev);
2732	struct isp1362_hcd *isp1362_hcd = hcd_to_isp1362_hcd(hcd);
2733	unsigned long flags;
2734	int retval = 0;
2735
2736	DBG(0, "%s: Suspending device\n", __func__);
2737
2738	if (state.event == PM_EVENT_FREEZE) {
2739		DBG(0, "%s: Suspending root hub\n", __func__);
2740		retval = isp1362_bus_suspend(hcd);
2741	} else {
2742		DBG(0, "%s: Suspending RH ports\n", __func__);
2743		spin_lock_irqsave(&isp1362_hcd->lock, flags);
2744		isp1362_write_reg32(isp1362_hcd, HCRHSTATUS, RH_HS_LPS);
2745		spin_unlock_irqrestore(&isp1362_hcd->lock, flags);
2746	}
2747	if (retval == 0)
2748		pdev->dev.power.power_state = state;
2749	return retval;
2750}
2751
2752static int isp1362_resume(struct platform_device *pdev)
2753{
2754	struct usb_hcd *hcd = platform_get_drvdata(pdev);
2755	struct isp1362_hcd *isp1362_hcd = hcd_to_isp1362_hcd(hcd);
2756	unsigned long flags;
2757
2758	DBG(0, "%s: Resuming\n", __func__);
2759
2760	if (pdev->dev.power.power_state.event == PM_EVENT_SUSPEND) {
2761		DBG(0, "%s: Resume RH ports\n", __func__);
2762		spin_lock_irqsave(&isp1362_hcd->lock, flags);
2763		isp1362_write_reg32(isp1362_hcd, HCRHSTATUS, RH_HS_LPSC);
2764		spin_unlock_irqrestore(&isp1362_hcd->lock, flags);
2765		return 0;
2766	}
2767
2768	pdev->dev.power.power_state = PMSG_ON;
2769
2770	return isp1362_bus_resume(isp1362_hcd_to_hcd(isp1362_hcd));
2771}
2772#else
2773#define	isp1362_suspend	NULL
2774#define	isp1362_resume	NULL
2775#endif
2776
2777static struct platform_driver isp1362_driver = {
2778	.probe = isp1362_probe,
2779	.remove = isp1362_remove,
2780
2781	.suspend = isp1362_suspend,
2782	.resume = isp1362_resume,
2783	.driver = {
2784		.name = hcd_name,
 
2785	},
2786};
2787
2788module_platform_driver(isp1362_driver);
v3.5.6
   1/*
   2 * ISP1362 HCD (Host Controller Driver) for USB.
   3 *
   4 * Copyright (C) 2005 Lothar Wassmann <LW@KARO-electronics.de>
   5 *
   6 * Derived from the SL811 HCD, rewritten for ISP116x.
   7 * Copyright (C) 2005 Olav Kongas <ok@artecdesign.ee>
   8 *
   9 * Portions:
  10 * Copyright (C) 2004 Psion Teklogix (for NetBook PRO)
  11 * Copyright (C) 2004 David Brownell
  12 */
  13
  14/*
  15 * The ISP1362 chip requires a large delay (300ns and 462ns) between
  16 * accesses to the address and data register.
  17 * The following timing options exist:
  18 *
  19 * 1. Configure your memory controller to add such delays if it can (the best)
  20 * 2. Implement platform-specific delay function possibly
  21 *    combined with configuring the memory controller; see
  22 *    include/linux/usb_isp1362.h for more info.
  23 * 3. Use ndelay (easiest, poorest).
  24 *
  25 * Use the corresponding macros USE_PLATFORM_DELAY and USE_NDELAY in the
  26 * platform specific section of isp1362.h to select the appropriate variant.
  27 *
  28 * Also note that according to the Philips "ISP1362 Errata" document
  29 * Rev 1.00 from 27 May data corruption may occur when the #WR signal
  30 * is reasserted (even with #CS deasserted) within 132ns after a
  31 * write cycle to any controller register. If the hardware doesn't
  32 * implement the recommended fix (gating the #WR with #CS) software
  33 * must ensure that no further write cycle (not necessarily to the chip!)
  34 * is issued by the CPU within this interval.
  35
  36 * For PXA25x this can be ensured by using VLIO with the maximum
  37 * recovery time (MSCx = 0x7f8c) with a memory clock of 99.53 MHz.
  38 */
  39
  40#ifdef CONFIG_USB_DEBUG
  41# define ISP1362_DEBUG
  42#else
  43# undef ISP1362_DEBUG
  44#endif
  45
  46/*
  47 * The PXA255 UDC apparently doesn't handle GET_STATUS, GET_CONFIG and
  48 * GET_INTERFACE requests correctly when the SETUP and DATA stages of the
  49 * requests are carried out in separate frames. This will delay any SETUP
  50 * packets until the start of the next frame so that this situation is
  51 * unlikely to occur (and makes usbtest happy running with a PXA255 target
  52 * device).
  53 */
  54#undef BUGGY_PXA2XX_UDC_USBTEST
  55
  56#undef PTD_TRACE
  57#undef URB_TRACE
  58#undef VERBOSE
  59#undef REGISTERS
  60
  61/* This enables a memory test on the ISP1362 chip memory to make sure the
  62 * chip access timing is correct.
  63 */
  64#undef CHIP_BUFFER_TEST
  65
  66#include <linux/module.h>
  67#include <linux/moduleparam.h>
  68#include <linux/kernel.h>
  69#include <linux/delay.h>
  70#include <linux/ioport.h>
  71#include <linux/sched.h>
  72#include <linux/slab.h>
  73#include <linux/errno.h>
  74#include <linux/init.h>
  75#include <linux/list.h>
  76#include <linux/interrupt.h>
  77#include <linux/usb.h>
  78#include <linux/usb/isp1362.h>
  79#include <linux/usb/hcd.h>
  80#include <linux/platform_device.h>
  81#include <linux/pm.h>
  82#include <linux/io.h>
  83#include <linux/bitmap.h>
  84#include <linux/prefetch.h>
 
 
  85
  86#include <asm/irq.h>
  87#include <asm/byteorder.h>
  88#include <asm/unaligned.h>
  89
  90static int dbg_level;
  91#ifdef ISP1362_DEBUG
  92module_param(dbg_level, int, 0644);
  93#else
  94module_param(dbg_level, int, 0);
  95#define	STUB_DEBUG_FILE
  96#endif
  97
  98#include "../core/usb.h"
  99#include "isp1362.h"
 100
 101
 102#define DRIVER_VERSION	"2005-04-04"
 103#define DRIVER_DESC	"ISP1362 USB Host Controller Driver"
 104
 105MODULE_DESCRIPTION(DRIVER_DESC);
 106MODULE_LICENSE("GPL");
 107
 108static const char hcd_name[] = "isp1362-hcd";
 109
 110static void isp1362_hc_stop(struct usb_hcd *hcd);
 111static int isp1362_hc_start(struct usb_hcd *hcd);
 112
 113/*-------------------------------------------------------------------------*/
 114
 115/*
 116 * When called from the interrupthandler only isp1362_hcd->irqenb is modified,
 117 * since the interrupt handler will write isp1362_hcd->irqenb to HCuPINT upon
 118 * completion.
 119 * We don't need a 'disable' counterpart, since interrupts will be disabled
 120 * only by the interrupt handler.
 121 */
 122static inline void isp1362_enable_int(struct isp1362_hcd *isp1362_hcd, u16 mask)
 123{
 124	if ((isp1362_hcd->irqenb | mask) == isp1362_hcd->irqenb)
 125		return;
 126	if (mask & ~isp1362_hcd->irqenb)
 127		isp1362_write_reg16(isp1362_hcd, HCuPINT, mask & ~isp1362_hcd->irqenb);
 128	isp1362_hcd->irqenb |= mask;
 129	if (isp1362_hcd->irq_active)
 130		return;
 131	isp1362_write_reg16(isp1362_hcd, HCuPINTENB, isp1362_hcd->irqenb);
 132}
 133
 134/*-------------------------------------------------------------------------*/
 135
 136static inline struct isp1362_ep_queue *get_ptd_queue(struct isp1362_hcd *isp1362_hcd,
 137						     u16 offset)
 138{
 139	struct isp1362_ep_queue *epq = NULL;
 140
 141	if (offset < isp1362_hcd->istl_queue[1].buf_start)
 142		epq = &isp1362_hcd->istl_queue[0];
 143	else if (offset < isp1362_hcd->intl_queue.buf_start)
 144		epq = &isp1362_hcd->istl_queue[1];
 145	else if (offset < isp1362_hcd->atl_queue.buf_start)
 146		epq = &isp1362_hcd->intl_queue;
 147	else if (offset < isp1362_hcd->atl_queue.buf_start +
 148		   isp1362_hcd->atl_queue.buf_size)
 149		epq = &isp1362_hcd->atl_queue;
 150
 151	if (epq)
 152		DBG(1, "%s: PTD $%04x is on %s queue\n", __func__, offset, epq->name);
 153	else
 154		pr_warning("%s: invalid PTD $%04x\n", __func__, offset);
 155
 156	return epq;
 157}
 158
 159static inline int get_ptd_offset(struct isp1362_ep_queue *epq, u8 index)
 160{
 161	int offset;
 162
 163	if (index * epq->blk_size > epq->buf_size) {
 164		pr_warning("%s: Bad %s index %d(%d)\n", __func__, epq->name, index,
 165		     epq->buf_size / epq->blk_size);
 166		return -EINVAL;
 167	}
 168	offset = epq->buf_start + index * epq->blk_size;
 169	DBG(3, "%s: %s PTD[%02x] # %04x\n", __func__, epq->name, index, offset);
 170
 171	return offset;
 172}
 173
 174/*-------------------------------------------------------------------------*/
 175
 176static inline u16 max_transfer_size(struct isp1362_ep_queue *epq, size_t size,
 177				    int mps)
 178{
 179	u16 xfer_size = min_t(size_t, MAX_XFER_SIZE, size);
 180
 181	xfer_size = min_t(size_t, xfer_size, epq->buf_avail * epq->blk_size - PTD_HEADER_SIZE);
 182	if (xfer_size < size && xfer_size % mps)
 183		xfer_size -= xfer_size % mps;
 184
 185	return xfer_size;
 186}
 187
 188static int claim_ptd_buffers(struct isp1362_ep_queue *epq,
 189			     struct isp1362_ep *ep, u16 len)
 190{
 191	int ptd_offset = -EINVAL;
 192	int num_ptds = ((len + PTD_HEADER_SIZE - 1) / epq->blk_size) + 1;
 193	int found;
 194
 195	BUG_ON(len > epq->buf_size);
 196
 197	if (!epq->buf_avail)
 198		return -ENOMEM;
 199
 200	if (ep->num_ptds)
 201		pr_err("%s: %s len %d/%d num_ptds %d buf_map %08lx skip_map %08lx\n", __func__,
 202		    epq->name, len, epq->blk_size, num_ptds, epq->buf_map, epq->skip_map);
 203	BUG_ON(ep->num_ptds != 0);
 204
 205	found = bitmap_find_next_zero_area(&epq->buf_map, epq->buf_count, 0,
 206						num_ptds, 0);
 207	if (found >= epq->buf_count)
 208		return -EOVERFLOW;
 209
 210	DBG(1, "%s: Found %d PTDs[%d] for %d/%d byte\n", __func__,
 211	    num_ptds, found, len, (int)(epq->blk_size - PTD_HEADER_SIZE));
 212	ptd_offset = get_ptd_offset(epq, found);
 213	WARN_ON(ptd_offset < 0);
 214	ep->ptd_offset = ptd_offset;
 215	ep->num_ptds += num_ptds;
 216	epq->buf_avail -= num_ptds;
 217	BUG_ON(epq->buf_avail > epq->buf_count);
 218	ep->ptd_index = found;
 219	bitmap_set(&epq->buf_map, found, num_ptds);
 220	DBG(1, "%s: Done %s PTD[%d] $%04x, avail %d count %d claimed %d %08lx:%08lx\n",
 221	    __func__, epq->name, ep->ptd_index, ep->ptd_offset,
 222	    epq->buf_avail, epq->buf_count, num_ptds, epq->buf_map, epq->skip_map);
 223
 224	return found;
 225}
 226
 227static inline void release_ptd_buffers(struct isp1362_ep_queue *epq, struct isp1362_ep *ep)
 228{
 229	int last = ep->ptd_index + ep->num_ptds;
 230
 231	if (last > epq->buf_count)
 232		pr_err("%s: ep %p req %d len %d %s PTD[%d] $%04x num_ptds %d buf_count %d buf_avail %d buf_map %08lx skip_map %08lx\n",
 233		    __func__, ep, ep->num_req, ep->length, epq->name, ep->ptd_index,
 234		    ep->ptd_offset, ep->num_ptds, epq->buf_count, epq->buf_avail,
 235		    epq->buf_map, epq->skip_map);
 236	BUG_ON(last > epq->buf_count);
 237
 238	bitmap_clear(&epq->buf_map, ep->ptd_index, ep->num_ptds);
 239	bitmap_set(&epq->skip_map, ep->ptd_index, ep->num_ptds);
 240	epq->buf_avail += ep->num_ptds;
 241	epq->ptd_count--;
 242
 243	BUG_ON(epq->buf_avail > epq->buf_count);
 244	BUG_ON(epq->ptd_count > epq->buf_count);
 245
 246	DBG(1, "%s: Done %s PTDs $%04x released %d avail %d count %d\n",
 247	    __func__, epq->name,
 248	    ep->ptd_offset, ep->num_ptds, epq->buf_avail, epq->buf_count);
 249	DBG(1, "%s: buf_map %08lx skip_map %08lx\n", __func__,
 250	    epq->buf_map, epq->skip_map);
 251
 252	ep->num_ptds = 0;
 253	ep->ptd_offset = -EINVAL;
 254	ep->ptd_index = -EINVAL;
 255}
 256
 257/*-------------------------------------------------------------------------*/
 258
 259/*
 260  Set up PTD's.
 261*/
 262static void prepare_ptd(struct isp1362_hcd *isp1362_hcd, struct urb *urb,
 263			struct isp1362_ep *ep, struct isp1362_ep_queue *epq,
 264			u16 fno)
 265{
 266	struct ptd *ptd;
 267	int toggle;
 268	int dir;
 269	u16 len;
 270	size_t buf_len = urb->transfer_buffer_length - urb->actual_length;
 271
 272	DBG(3, "%s: %s ep %p\n", __func__, epq->name, ep);
 273
 274	ptd = &ep->ptd;
 275
 276	ep->data = (unsigned char *)urb->transfer_buffer + urb->actual_length;
 277
 278	switch (ep->nextpid) {
 279	case USB_PID_IN:
 280		toggle = usb_gettoggle(urb->dev, ep->epnum, 0);
 281		dir = PTD_DIR_IN;
 282		if (usb_pipecontrol(urb->pipe)) {
 283			len = min_t(size_t, ep->maxpacket, buf_len);
 284		} else if (usb_pipeisoc(urb->pipe)) {
 285			len = min_t(size_t, urb->iso_frame_desc[fno].length, MAX_XFER_SIZE);
 286			ep->data = urb->transfer_buffer + urb->iso_frame_desc[fno].offset;
 287		} else
 288			len = max_transfer_size(epq, buf_len, ep->maxpacket);
 289		DBG(1, "%s: IN    len %d/%d/%d from URB\n", __func__, len, ep->maxpacket,
 290		    (int)buf_len);
 291		break;
 292	case USB_PID_OUT:
 293		toggle = usb_gettoggle(urb->dev, ep->epnum, 1);
 294		dir = PTD_DIR_OUT;
 295		if (usb_pipecontrol(urb->pipe))
 296			len = min_t(size_t, ep->maxpacket, buf_len);
 297		else if (usb_pipeisoc(urb->pipe))
 298			len = min_t(size_t, urb->iso_frame_desc[0].length, MAX_XFER_SIZE);
 299		else
 300			len = max_transfer_size(epq, buf_len, ep->maxpacket);
 301		if (len == 0)
 302			pr_info("%s: Sending ZERO packet: %d\n", __func__,
 303			     urb->transfer_flags & URB_ZERO_PACKET);
 304		DBG(1, "%s: OUT   len %d/%d/%d from URB\n", __func__, len, ep->maxpacket,
 305		    (int)buf_len);
 306		break;
 307	case USB_PID_SETUP:
 308		toggle = 0;
 309		dir = PTD_DIR_SETUP;
 310		len = sizeof(struct usb_ctrlrequest);
 311		DBG(1, "%s: SETUP len %d\n", __func__, len);
 312		ep->data = urb->setup_packet;
 313		break;
 314	case USB_PID_ACK:
 315		toggle = 1;
 316		len = 0;
 317		dir = (urb->transfer_buffer_length && usb_pipein(urb->pipe)) ?
 318			PTD_DIR_OUT : PTD_DIR_IN;
 319		DBG(1, "%s: ACK   len %d\n", __func__, len);
 320		break;
 321	default:
 322		toggle = dir = len = 0;
 323		pr_err("%s@%d: ep->nextpid %02x\n", __func__, __LINE__, ep->nextpid);
 324		BUG_ON(1);
 325	}
 326
 327	ep->length = len;
 328	if (!len)
 329		ep->data = NULL;
 330
 331	ptd->count = PTD_CC_MSK | PTD_ACTIVE_MSK | PTD_TOGGLE(toggle);
 332	ptd->mps = PTD_MPS(ep->maxpacket) | PTD_SPD(urb->dev->speed == USB_SPEED_LOW) |
 333		PTD_EP(ep->epnum);
 334	ptd->len = PTD_LEN(len) | PTD_DIR(dir);
 335	ptd->faddr = PTD_FA(usb_pipedevice(urb->pipe));
 336
 337	if (usb_pipeint(urb->pipe)) {
 338		ptd->faddr |= PTD_SF_INT(ep->branch);
 339		ptd->faddr |= PTD_PR(ep->interval ? __ffs(ep->interval) : 0);
 340	}
 341	if (usb_pipeisoc(urb->pipe))
 342		ptd->faddr |= PTD_SF_ISO(fno);
 343
 344	DBG(1, "%s: Finished\n", __func__);
 345}
 346
 347static void isp1362_write_ptd(struct isp1362_hcd *isp1362_hcd, struct isp1362_ep *ep,
 348			      struct isp1362_ep_queue *epq)
 349{
 350	struct ptd *ptd = &ep->ptd;
 351	int len = PTD_GET_DIR(ptd) == PTD_DIR_IN ? 0 : ep->length;
 352
 353	_BUG_ON(ep->ptd_offset < 0);
 354
 355	prefetch(ptd);
 356	isp1362_write_buffer(isp1362_hcd, ptd, ep->ptd_offset, PTD_HEADER_SIZE);
 357	if (len)
 358		isp1362_write_buffer(isp1362_hcd, ep->data,
 359				     ep->ptd_offset + PTD_HEADER_SIZE, len);
 360
 361	dump_ptd(ptd);
 362	dump_ptd_out_data(ptd, ep->data);
 363}
 364
 365static void isp1362_read_ptd(struct isp1362_hcd *isp1362_hcd, struct isp1362_ep *ep,
 366			     struct isp1362_ep_queue *epq)
 367{
 368	struct ptd *ptd = &ep->ptd;
 369	int act_len;
 370
 371	WARN_ON(list_empty(&ep->active));
 372	BUG_ON(ep->ptd_offset < 0);
 373
 374	list_del_init(&ep->active);
 375	DBG(1, "%s: ep %p removed from active list %p\n", __func__, ep, &epq->active);
 376
 377	prefetchw(ptd);
 378	isp1362_read_buffer(isp1362_hcd, ptd, ep->ptd_offset, PTD_HEADER_SIZE);
 379	dump_ptd(ptd);
 380	act_len = PTD_GET_COUNT(ptd);
 381	if (PTD_GET_DIR(ptd) != PTD_DIR_IN || act_len == 0)
 382		return;
 383	if (act_len > ep->length)
 384		pr_err("%s: ep %p PTD $%04x act_len %d ep->length %d\n", __func__, ep,
 385			 ep->ptd_offset, act_len, ep->length);
 386	BUG_ON(act_len > ep->length);
 387	/* Only transfer the amount of data that has actually been overwritten
 388	 * in the chip buffer. We don't want any data that doesn't belong to the
 389	 * transfer to leak out of the chip to the callers transfer buffer!
 390	 */
 391	prefetchw(ep->data);
 392	isp1362_read_buffer(isp1362_hcd, ep->data,
 393			    ep->ptd_offset + PTD_HEADER_SIZE, act_len);
 394	dump_ptd_in_data(ptd, ep->data);
 395}
 396
 397/*
 398 * INT PTDs will stay in the chip until data is available.
 399 * This function will remove a PTD from the chip when the URB is dequeued.
 400 * Must be called with the spinlock held and IRQs disabled
 401 */
 402static void remove_ptd(struct isp1362_hcd *isp1362_hcd, struct isp1362_ep *ep)
 403
 404{
 405	int index;
 406	struct isp1362_ep_queue *epq;
 407
 408	DBG(1, "%s: ep %p PTD[%d] $%04x\n", __func__, ep, ep->ptd_index, ep->ptd_offset);
 409	BUG_ON(ep->ptd_offset < 0);
 410
 411	epq = get_ptd_queue(isp1362_hcd, ep->ptd_offset);
 412	BUG_ON(!epq);
 413
 414	/* put ep in remove_list for cleanup */
 415	WARN_ON(!list_empty(&ep->remove_list));
 416	list_add_tail(&ep->remove_list, &isp1362_hcd->remove_list);
 417	/* let SOF interrupt handle the cleanup */
 418	isp1362_enable_int(isp1362_hcd, HCuPINT_SOF);
 419
 420	index = ep->ptd_index;
 421	if (index < 0)
 422		/* ISO queues don't have SKIP registers */
 423		return;
 424
 425	DBG(1, "%s: Disabling PTD[%02x] $%04x %08lx|%08x\n", __func__,
 426	    index, ep->ptd_offset, epq->skip_map, 1 << index);
 427
 428	/* prevent further processing of PTD (will be effective after next SOF) */
 429	epq->skip_map |= 1 << index;
 430	if (epq == &isp1362_hcd->atl_queue) {
 431		DBG(2, "%s: ATLSKIP = %08x -> %08lx\n", __func__,
 432		    isp1362_read_reg32(isp1362_hcd, HCATLSKIP), epq->skip_map);
 433		isp1362_write_reg32(isp1362_hcd, HCATLSKIP, epq->skip_map);
 434		if (~epq->skip_map == 0)
 435			isp1362_clr_mask16(isp1362_hcd, HCBUFSTAT, HCBUFSTAT_ATL_ACTIVE);
 436	} else if (epq == &isp1362_hcd->intl_queue) {
 437		DBG(2, "%s: INTLSKIP = %08x -> %08lx\n", __func__,
 438		    isp1362_read_reg32(isp1362_hcd, HCINTLSKIP), epq->skip_map);
 439		isp1362_write_reg32(isp1362_hcd, HCINTLSKIP, epq->skip_map);
 440		if (~epq->skip_map == 0)
 441			isp1362_clr_mask16(isp1362_hcd, HCBUFSTAT, HCBUFSTAT_INTL_ACTIVE);
 442	}
 443}
 444
 445/*
 446  Take done or failed requests out of schedule. Give back
 447  processed urbs.
 448*/
 449static void finish_request(struct isp1362_hcd *isp1362_hcd, struct isp1362_ep *ep,
 450			   struct urb *urb, int status)
 451     __releases(isp1362_hcd->lock)
 452     __acquires(isp1362_hcd->lock)
 453{
 454	urb->hcpriv = NULL;
 455	ep->error_count = 0;
 456
 457	if (usb_pipecontrol(urb->pipe))
 458		ep->nextpid = USB_PID_SETUP;
 459
 460	URB_DBG("%s: req %d FA %d ep%d%s %s: len %d/%d %s stat %d\n", __func__,
 461		ep->num_req, usb_pipedevice(urb->pipe),
 462		usb_pipeendpoint(urb->pipe),
 463		!usb_pipein(urb->pipe) ? "out" : "in",
 464		usb_pipecontrol(urb->pipe) ? "ctrl" :
 465			usb_pipeint(urb->pipe) ? "int" :
 466			usb_pipebulk(urb->pipe) ? "bulk" :
 467			"iso",
 468		urb->actual_length, urb->transfer_buffer_length,
 469		!(urb->transfer_flags & URB_SHORT_NOT_OK) ?
 470		"short_ok" : "", urb->status);
 471
 472
 473	usb_hcd_unlink_urb_from_ep(isp1362_hcd_to_hcd(isp1362_hcd), urb);
 474	spin_unlock(&isp1362_hcd->lock);
 475	usb_hcd_giveback_urb(isp1362_hcd_to_hcd(isp1362_hcd), urb, status);
 476	spin_lock(&isp1362_hcd->lock);
 477
 478	/* take idle endpoints out of the schedule right away */
 479	if (!list_empty(&ep->hep->urb_list))
 480		return;
 481
 482	/* async deschedule */
 483	if (!list_empty(&ep->schedule)) {
 484		list_del_init(&ep->schedule);
 485		return;
 486	}
 487
 488
 489	if (ep->interval) {
 490		/* periodic deschedule */
 491		DBG(1, "deschedule qh%d/%p branch %d load %d bandwidth %d -> %d\n", ep->interval,
 492		    ep, ep->branch, ep->load,
 493		    isp1362_hcd->load[ep->branch],
 494		    isp1362_hcd->load[ep->branch] - ep->load);
 495		isp1362_hcd->load[ep->branch] -= ep->load;
 496		ep->branch = PERIODIC_SIZE;
 497	}
 498}
 499
 500/*
 501 * Analyze transfer results, handle partial transfers and errors
 502*/
 503static void postproc_ep(struct isp1362_hcd *isp1362_hcd, struct isp1362_ep *ep)
 504{
 505	struct urb *urb = get_urb(ep);
 506	struct usb_device *udev;
 507	struct ptd *ptd;
 508	int short_ok;
 509	u16 len;
 510	int urbstat = -EINPROGRESS;
 511	u8 cc;
 512
 513	DBG(2, "%s: ep %p req %d\n", __func__, ep, ep->num_req);
 514
 515	udev = urb->dev;
 516	ptd = &ep->ptd;
 517	cc = PTD_GET_CC(ptd);
 518	if (cc == PTD_NOTACCESSED) {
 519		pr_err("%s: req %d PTD %p Untouched by ISP1362\n", __func__,
 520		    ep->num_req, ptd);
 521		cc = PTD_DEVNOTRESP;
 522	}
 523
 524	short_ok = !(urb->transfer_flags & URB_SHORT_NOT_OK);
 525	len = urb->transfer_buffer_length - urb->actual_length;
 526
 527	/* Data underrun is special. For allowed underrun
 528	   we clear the error and continue as normal. For
 529	   forbidden underrun we finish the DATA stage
 530	   immediately while for control transfer,
 531	   we do a STATUS stage.
 532	*/
 533	if (cc == PTD_DATAUNDERRUN) {
 534		if (short_ok) {
 535			DBG(1, "%s: req %d Allowed data underrun short_%sok %d/%d/%d byte\n",
 536			    __func__, ep->num_req, short_ok ? "" : "not_",
 537			    PTD_GET_COUNT(ptd), ep->maxpacket, len);
 538			cc = PTD_CC_NOERROR;
 539			urbstat = 0;
 540		} else {
 541			DBG(1, "%s: req %d Data Underrun %s nextpid %02x short_%sok %d/%d/%d byte\n",
 542			    __func__, ep->num_req,
 543			    usb_pipein(urb->pipe) ? "IN" : "OUT", ep->nextpid,
 544			    short_ok ? "" : "not_",
 545			    PTD_GET_COUNT(ptd), ep->maxpacket, len);
 
 
 
 
 546			if (usb_pipecontrol(urb->pipe)) {
 547				ep->nextpid = USB_PID_ACK;
 548				/* save the data underrun error code for later and
 549				 * proceed with the status stage
 550				 */
 551				urb->actual_length += PTD_GET_COUNT(ptd);
 552				BUG_ON(urb->actual_length > urb->transfer_buffer_length);
 553
 554				if (urb->status == -EINPROGRESS)
 555					urb->status = cc_to_error[PTD_DATAUNDERRUN];
 556			} else {
 557				usb_settoggle(udev, ep->epnum, ep->nextpid == USB_PID_OUT,
 558					      PTD_GET_TOGGLE(ptd));
 559				urbstat = cc_to_error[PTD_DATAUNDERRUN];
 560			}
 561			goto out;
 562		}
 563	}
 564
 565	if (cc != PTD_CC_NOERROR) {
 566		if (++ep->error_count >= 3 || cc == PTD_CC_STALL || cc == PTD_DATAOVERRUN) {
 567			urbstat = cc_to_error[cc];
 568			DBG(1, "%s: req %d nextpid %02x, status %d, error %d, error_count %d\n",
 569			    __func__, ep->num_req, ep->nextpid, urbstat, cc,
 570			    ep->error_count);
 571		}
 572		goto out;
 573	}
 574
 575	switch (ep->nextpid) {
 576	case USB_PID_OUT:
 577		if (PTD_GET_COUNT(ptd) != ep->length)
 578			pr_err("%s: count=%d len=%d\n", __func__,
 579			   PTD_GET_COUNT(ptd), ep->length);
 580		BUG_ON(PTD_GET_COUNT(ptd) != ep->length);
 581		urb->actual_length += ep->length;
 582		BUG_ON(urb->actual_length > urb->transfer_buffer_length);
 583		usb_settoggle(udev, ep->epnum, 1, PTD_GET_TOGGLE(ptd));
 584		if (urb->actual_length == urb->transfer_buffer_length) {
 585			DBG(3, "%s: req %d xfer complete %d/%d status %d -> 0\n", __func__,
 586			    ep->num_req, len, ep->maxpacket, urbstat);
 587			if (usb_pipecontrol(urb->pipe)) {
 588				DBG(3, "%s: req %d %s Wait for ACK\n", __func__,
 589				    ep->num_req,
 590				    usb_pipein(urb->pipe) ? "IN" : "OUT");
 591				ep->nextpid = USB_PID_ACK;
 592			} else {
 593				if (len % ep->maxpacket ||
 594				    !(urb->transfer_flags & URB_ZERO_PACKET)) {
 595					urbstat = 0;
 596					DBG(3, "%s: req %d URB %s status %d count %d/%d/%d\n",
 597					    __func__, ep->num_req, usb_pipein(urb->pipe) ? "IN" : "OUT",
 598					    urbstat, len, ep->maxpacket, urb->actual_length);
 599				}
 600			}
 601		}
 602		break;
 603	case USB_PID_IN:
 604		len = PTD_GET_COUNT(ptd);
 605		BUG_ON(len > ep->length);
 606		urb->actual_length += len;
 607		BUG_ON(urb->actual_length > urb->transfer_buffer_length);
 608		usb_settoggle(udev, ep->epnum, 0, PTD_GET_TOGGLE(ptd));
 609		/* if transfer completed or (allowed) data underrun */
 610		if ((urb->transfer_buffer_length == urb->actual_length) ||
 611		    len % ep->maxpacket) {
 612			DBG(3, "%s: req %d xfer complete %d/%d status %d -> 0\n", __func__,
 613			    ep->num_req, len, ep->maxpacket, urbstat);
 614			if (usb_pipecontrol(urb->pipe)) {
 615				DBG(3, "%s: req %d %s Wait for ACK\n", __func__,
 616				    ep->num_req,
 617				    usb_pipein(urb->pipe) ? "IN" : "OUT");
 618				ep->nextpid = USB_PID_ACK;
 619			} else {
 620				urbstat = 0;
 621				DBG(3, "%s: req %d URB %s status %d count %d/%d/%d\n",
 622				    __func__, ep->num_req, usb_pipein(urb->pipe) ? "IN" : "OUT",
 623				    urbstat, len, ep->maxpacket, urb->actual_length);
 624			}
 625		}
 626		break;
 627	case USB_PID_SETUP:
 628		if (urb->transfer_buffer_length == urb->actual_length) {
 629			ep->nextpid = USB_PID_ACK;
 630		} else if (usb_pipeout(urb->pipe)) {
 631			usb_settoggle(udev, 0, 1, 1);
 632			ep->nextpid = USB_PID_OUT;
 633		} else {
 634			usb_settoggle(udev, 0, 0, 1);
 635			ep->nextpid = USB_PID_IN;
 636		}
 637		break;
 638	case USB_PID_ACK:
 639		DBG(3, "%s: req %d got ACK %d -> 0\n", __func__, ep->num_req,
 640		    urbstat);
 641		WARN_ON(urbstat != -EINPROGRESS);
 642		urbstat = 0;
 643		ep->nextpid = 0;
 644		break;
 645	default:
 646		BUG_ON(1);
 647	}
 648
 649 out:
 650	if (urbstat != -EINPROGRESS) {
 651		DBG(2, "%s: Finishing ep %p req %d urb %p status %d\n", __func__,
 652		    ep, ep->num_req, urb, urbstat);
 653		finish_request(isp1362_hcd, ep, urb, urbstat);
 654	}
 655}
 656
 657static void finish_unlinks(struct isp1362_hcd *isp1362_hcd)
 658{
 659	struct isp1362_ep *ep;
 660	struct isp1362_ep *tmp;
 661
 662	list_for_each_entry_safe(ep, tmp, &isp1362_hcd->remove_list, remove_list) {
 663		struct isp1362_ep_queue *epq =
 664			get_ptd_queue(isp1362_hcd, ep->ptd_offset);
 665		int index = ep->ptd_index;
 666
 667		BUG_ON(epq == NULL);
 668		if (index >= 0) {
 669			DBG(1, "%s: remove PTD[%d] $%04x\n", __func__, index, ep->ptd_offset);
 670			BUG_ON(ep->num_ptds == 0);
 671			release_ptd_buffers(epq, ep);
 672		}
 673		if (!list_empty(&ep->hep->urb_list)) {
 674			struct urb *urb = get_urb(ep);
 675
 676			DBG(1, "%s: Finishing req %d ep %p from remove_list\n", __func__,
 677			    ep->num_req, ep);
 678			finish_request(isp1362_hcd, ep, urb, -ESHUTDOWN);
 679		}
 680		WARN_ON(list_empty(&ep->active));
 681		if (!list_empty(&ep->active)) {
 682			list_del_init(&ep->active);
 683			DBG(1, "%s: ep %p removed from active list\n", __func__, ep);
 684		}
 685		list_del_init(&ep->remove_list);
 686		DBG(1, "%s: ep %p removed from remove_list\n", __func__, ep);
 687	}
 688	DBG(1, "%s: Done\n", __func__);
 689}
 690
 691static inline void enable_atl_transfers(struct isp1362_hcd *isp1362_hcd, int count)
 692{
 693	if (count > 0) {
 694		if (count < isp1362_hcd->atl_queue.ptd_count)
 695			isp1362_write_reg16(isp1362_hcd, HCATLDTC, count);
 696		isp1362_enable_int(isp1362_hcd, HCuPINT_ATL);
 697		isp1362_write_reg32(isp1362_hcd, HCATLSKIP, isp1362_hcd->atl_queue.skip_map);
 698		isp1362_set_mask16(isp1362_hcd, HCBUFSTAT, HCBUFSTAT_ATL_ACTIVE);
 699	} else
 700		isp1362_enable_int(isp1362_hcd, HCuPINT_SOF);
 701}
 702
 703static inline void enable_intl_transfers(struct isp1362_hcd *isp1362_hcd)
 704{
 705	isp1362_enable_int(isp1362_hcd, HCuPINT_INTL);
 706	isp1362_set_mask16(isp1362_hcd, HCBUFSTAT, HCBUFSTAT_INTL_ACTIVE);
 707	isp1362_write_reg32(isp1362_hcd, HCINTLSKIP, isp1362_hcd->intl_queue.skip_map);
 708}
 709
 710static inline void enable_istl_transfers(struct isp1362_hcd *isp1362_hcd, int flip)
 711{
 712	isp1362_enable_int(isp1362_hcd, flip ? HCuPINT_ISTL1 : HCuPINT_ISTL0);
 713	isp1362_set_mask16(isp1362_hcd, HCBUFSTAT, flip ?
 714			   HCBUFSTAT_ISTL1_FULL : HCBUFSTAT_ISTL0_FULL);
 715}
 716
 717static int submit_req(struct isp1362_hcd *isp1362_hcd, struct urb *urb,
 718		      struct isp1362_ep *ep, struct isp1362_ep_queue *epq)
 719{
 720	int index = epq->free_ptd;
 721
 722	prepare_ptd(isp1362_hcd, urb, ep, epq, 0);
 723	index = claim_ptd_buffers(epq, ep, ep->length);
 724	if (index == -ENOMEM) {
 725		DBG(1, "%s: req %d No free %s PTD available: %d, %08lx:%08lx\n", __func__,
 726		    ep->num_req, epq->name, ep->num_ptds, epq->buf_map, epq->skip_map);
 727		return index;
 728	} else if (index == -EOVERFLOW) {
 729		DBG(1, "%s: req %d Not enough space for %d byte %s PTD %d %08lx:%08lx\n",
 730		    __func__, ep->num_req, ep->length, epq->name, ep->num_ptds,
 731		    epq->buf_map, epq->skip_map);
 732		return index;
 733	} else
 734		BUG_ON(index < 0);
 735	list_add_tail(&ep->active, &epq->active);
 736	DBG(1, "%s: ep %p req %d len %d added to active list %p\n", __func__,
 737	    ep, ep->num_req, ep->length, &epq->active);
 738	DBG(1, "%s: Submitting %s PTD $%04x for ep %p req %d\n", __func__, epq->name,
 739	    ep->ptd_offset, ep, ep->num_req);
 740	isp1362_write_ptd(isp1362_hcd, ep, epq);
 741	__clear_bit(ep->ptd_index, &epq->skip_map);
 742
 743	return 0;
 744}
 745
 746static void start_atl_transfers(struct isp1362_hcd *isp1362_hcd)
 747{
 748	int ptd_count = 0;
 749	struct isp1362_ep_queue *epq = &isp1362_hcd->atl_queue;
 750	struct isp1362_ep *ep;
 751	int defer = 0;
 752
 753	if (atomic_read(&epq->finishing)) {
 754		DBG(1, "%s: finish_transfers is active for %s\n", __func__, epq->name);
 755		return;
 756	}
 757
 758	list_for_each_entry(ep, &isp1362_hcd->async, schedule) {
 759		struct urb *urb = get_urb(ep);
 760		int ret;
 761
 762		if (!list_empty(&ep->active)) {
 763			DBG(2, "%s: Skipping active %s ep %p\n", __func__, epq->name, ep);
 764			continue;
 765		}
 766
 767		DBG(1, "%s: Processing %s ep %p req %d\n", __func__, epq->name,
 768		    ep, ep->num_req);
 769
 770		ret = submit_req(isp1362_hcd, urb, ep, epq);
 771		if (ret == -ENOMEM) {
 772			defer = 1;
 773			break;
 774		} else if (ret == -EOVERFLOW) {
 775			defer = 1;
 776			continue;
 777		}
 778#ifdef BUGGY_PXA2XX_UDC_USBTEST
 779		defer = ep->nextpid == USB_PID_SETUP;
 780#endif
 781		ptd_count++;
 782	}
 783
 784	/* Avoid starving of endpoints */
 785	if (isp1362_hcd->async.next != isp1362_hcd->async.prev) {
 786		DBG(2, "%s: Cycling ASYNC schedule %d\n", __func__, ptd_count);
 787		list_move(&isp1362_hcd->async, isp1362_hcd->async.next);
 788	}
 789	if (ptd_count || defer)
 790		enable_atl_transfers(isp1362_hcd, defer ? 0 : ptd_count);
 791
 792	epq->ptd_count += ptd_count;
 793	if (epq->ptd_count > epq->stat_maxptds) {
 794		epq->stat_maxptds = epq->ptd_count;
 795		DBG(0, "%s: max_ptds: %d\n", __func__, epq->stat_maxptds);
 796	}
 797}
 798
 799static void start_intl_transfers(struct isp1362_hcd *isp1362_hcd)
 800{
 801	int ptd_count = 0;
 802	struct isp1362_ep_queue *epq = &isp1362_hcd->intl_queue;
 803	struct isp1362_ep *ep;
 804
 805	if (atomic_read(&epq->finishing)) {
 806		DBG(1, "%s: finish_transfers is active for %s\n", __func__, epq->name);
 807		return;
 808	}
 809
 810	list_for_each_entry(ep, &isp1362_hcd->periodic, schedule) {
 811		struct urb *urb = get_urb(ep);
 812		int ret;
 813
 814		if (!list_empty(&ep->active)) {
 815			DBG(1, "%s: Skipping active %s ep %p\n", __func__,
 816			    epq->name, ep);
 817			continue;
 818		}
 819
 820		DBG(1, "%s: Processing %s ep %p req %d\n", __func__,
 821		    epq->name, ep, ep->num_req);
 822		ret = submit_req(isp1362_hcd, urb, ep, epq);
 823		if (ret == -ENOMEM)
 824			break;
 825		else if (ret == -EOVERFLOW)
 826			continue;
 827		ptd_count++;
 828	}
 829
 830	if (ptd_count) {
 831		static int last_count;
 832
 833		if (ptd_count != last_count) {
 834			DBG(0, "%s: ptd_count: %d\n", __func__, ptd_count);
 835			last_count = ptd_count;
 836		}
 837		enable_intl_transfers(isp1362_hcd);
 838	}
 839
 840	epq->ptd_count += ptd_count;
 841	if (epq->ptd_count > epq->stat_maxptds)
 842		epq->stat_maxptds = epq->ptd_count;
 843}
 844
 845static inline int next_ptd(struct isp1362_ep_queue *epq, struct isp1362_ep *ep)
 846{
 847	u16 ptd_offset = ep->ptd_offset;
 848	int num_ptds = (ep->length + PTD_HEADER_SIZE + (epq->blk_size - 1)) / epq->blk_size;
 849
 850	DBG(2, "%s: PTD offset $%04x + %04x => %d * %04x -> $%04x\n", __func__, ptd_offset,
 851	    ep->length, num_ptds, epq->blk_size, ptd_offset + num_ptds * epq->blk_size);
 852
 853	ptd_offset += num_ptds * epq->blk_size;
 854	if (ptd_offset < epq->buf_start + epq->buf_size)
 855		return ptd_offset;
 856	else
 857		return -ENOMEM;
 858}
 859
 860static void start_iso_transfers(struct isp1362_hcd *isp1362_hcd)
 861{
 862	int ptd_count = 0;
 863	int flip = isp1362_hcd->istl_flip;
 864	struct isp1362_ep_queue *epq;
 865	int ptd_offset;
 866	struct isp1362_ep *ep;
 867	struct isp1362_ep *tmp;
 868	u16 fno = isp1362_read_reg32(isp1362_hcd, HCFMNUM);
 869
 870 fill2:
 871	epq = &isp1362_hcd->istl_queue[flip];
 872	if (atomic_read(&epq->finishing)) {
 873		DBG(1, "%s: finish_transfers is active for %s\n", __func__, epq->name);
 874		return;
 875	}
 876
 877	if (!list_empty(&epq->active))
 878		return;
 879
 880	ptd_offset = epq->buf_start;
 881	list_for_each_entry_safe(ep, tmp, &isp1362_hcd->isoc, schedule) {
 882		struct urb *urb = get_urb(ep);
 883		s16 diff = fno - (u16)urb->start_frame;
 884
 885		DBG(1, "%s: Processing %s ep %p\n", __func__, epq->name, ep);
 886
 887		if (diff > urb->number_of_packets) {
 888			/* time frame for this URB has elapsed */
 889			finish_request(isp1362_hcd, ep, urb, -EOVERFLOW);
 890			continue;
 891		} else if (diff < -1) {
 892			/* URB is not due in this frame or the next one.
 893			 * Comparing with '-1' instead of '0' accounts for double
 894			 * buffering in the ISP1362 which enables us to queue the PTD
 895			 * one frame ahead of time
 896			 */
 897		} else if (diff == -1) {
 898			/* submit PTD's that are due in the next frame */
 899			prepare_ptd(isp1362_hcd, urb, ep, epq, fno);
 900			if (ptd_offset + PTD_HEADER_SIZE + ep->length >
 901			    epq->buf_start + epq->buf_size) {
 902				pr_err("%s: Not enough ISO buffer space for %d byte PTD\n",
 903				    __func__, ep->length);
 904				continue;
 905			}
 906			ep->ptd_offset = ptd_offset;
 907			list_add_tail(&ep->active, &epq->active);
 908
 909			ptd_offset = next_ptd(epq, ep);
 910			if (ptd_offset < 0) {
 911				pr_warning("%s: req %d No more %s PTD buffers available\n", __func__,
 912				     ep->num_req, epq->name);
 913				break;
 914			}
 915		}
 916	}
 917	list_for_each_entry(ep, &epq->active, active) {
 918		if (epq->active.next == &ep->active)
 919			ep->ptd.mps |= PTD_LAST_MSK;
 920		isp1362_write_ptd(isp1362_hcd, ep, epq);
 921		ptd_count++;
 922	}
 923
 924	if (ptd_count)
 925		enable_istl_transfers(isp1362_hcd, flip);
 926
 927	epq->ptd_count += ptd_count;
 928	if (epq->ptd_count > epq->stat_maxptds)
 929		epq->stat_maxptds = epq->ptd_count;
 930
 931	/* check, whether the second ISTL buffer may also be filled */
 932	if (!(isp1362_read_reg16(isp1362_hcd, HCBUFSTAT) &
 933	      (flip ? HCBUFSTAT_ISTL0_FULL : HCBUFSTAT_ISTL1_FULL))) {
 934		fno++;
 935		ptd_count = 0;
 936		flip = 1 - flip;
 937		goto fill2;
 938	}
 939}
 940
 941static void finish_transfers(struct isp1362_hcd *isp1362_hcd, unsigned long done_map,
 942			     struct isp1362_ep_queue *epq)
 943{
 944	struct isp1362_ep *ep;
 945	struct isp1362_ep *tmp;
 946
 947	if (list_empty(&epq->active)) {
 948		DBG(1, "%s: Nothing to do for %s queue\n", __func__, epq->name);
 949		return;
 950	}
 951
 952	DBG(1, "%s: Finishing %s transfers %08lx\n", __func__, epq->name, done_map);
 953
 954	atomic_inc(&epq->finishing);
 955	list_for_each_entry_safe(ep, tmp, &epq->active, active) {
 956		int index = ep->ptd_index;
 957
 958		DBG(1, "%s: Checking %s PTD[%02x] $%04x\n", __func__, epq->name,
 959		    index, ep->ptd_offset);
 960
 961		BUG_ON(index < 0);
 962		if (__test_and_clear_bit(index, &done_map)) {
 963			isp1362_read_ptd(isp1362_hcd, ep, epq);
 964			epq->free_ptd = index;
 965			BUG_ON(ep->num_ptds == 0);
 966			release_ptd_buffers(epq, ep);
 967
 968			DBG(1, "%s: ep %p req %d removed from active list\n", __func__,
 969			    ep, ep->num_req);
 970			if (!list_empty(&ep->remove_list)) {
 971				list_del_init(&ep->remove_list);
 972				DBG(1, "%s: ep %p removed from remove list\n", __func__, ep);
 973			}
 974			DBG(1, "%s: Postprocessing %s ep %p req %d\n", __func__, epq->name,
 975			    ep, ep->num_req);
 976			postproc_ep(isp1362_hcd, ep);
 977		}
 978		if (!done_map)
 979			break;
 980	}
 981	if (done_map)
 982		pr_warning("%s: done_map not clear: %08lx:%08lx\n", __func__, done_map,
 983		     epq->skip_map);
 984	atomic_dec(&epq->finishing);
 985}
 986
 987static void finish_iso_transfers(struct isp1362_hcd *isp1362_hcd, struct isp1362_ep_queue *epq)
 988{
 989	struct isp1362_ep *ep;
 990	struct isp1362_ep *tmp;
 991
 992	if (list_empty(&epq->active)) {
 993		DBG(1, "%s: Nothing to do for %s queue\n", __func__, epq->name);
 994		return;
 995	}
 996
 997	DBG(1, "%s: Finishing %s transfers\n", __func__, epq->name);
 998
 999	atomic_inc(&epq->finishing);
1000	list_for_each_entry_safe(ep, tmp, &epq->active, active) {
1001		DBG(1, "%s: Checking PTD $%04x\n", __func__, ep->ptd_offset);
1002
1003		isp1362_read_ptd(isp1362_hcd, ep, epq);
1004		DBG(1, "%s: Postprocessing %s ep %p\n", __func__, epq->name, ep);
1005		postproc_ep(isp1362_hcd, ep);
1006	}
1007	WARN_ON(epq->blk_size != 0);
1008	atomic_dec(&epq->finishing);
1009}
1010
1011static irqreturn_t isp1362_irq(struct usb_hcd *hcd)
1012{
1013	int handled = 0;
1014	struct isp1362_hcd *isp1362_hcd = hcd_to_isp1362_hcd(hcd);
1015	u16 irqstat;
1016	u16 svc_mask;
1017
1018	spin_lock(&isp1362_hcd->lock);
1019
1020	BUG_ON(isp1362_hcd->irq_active++);
1021
1022	isp1362_write_reg16(isp1362_hcd, HCuPINTENB, 0);
1023
1024	irqstat = isp1362_read_reg16(isp1362_hcd, HCuPINT);
1025	DBG(3, "%s: got IRQ %04x:%04x\n", __func__, irqstat, isp1362_hcd->irqenb);
1026
1027	/* only handle interrupts that are currently enabled */
1028	irqstat &= isp1362_hcd->irqenb;
1029	isp1362_write_reg16(isp1362_hcd, HCuPINT, irqstat);
1030	svc_mask = irqstat;
1031
1032	if (irqstat & HCuPINT_SOF) {
1033		isp1362_hcd->irqenb &= ~HCuPINT_SOF;
1034		isp1362_hcd->irq_stat[ISP1362_INT_SOF]++;
1035		handled = 1;
1036		svc_mask &= ~HCuPINT_SOF;
1037		DBG(3, "%s: SOF\n", __func__);
1038		isp1362_hcd->fmindex = isp1362_read_reg32(isp1362_hcd, HCFMNUM);
1039		if (!list_empty(&isp1362_hcd->remove_list))
1040			finish_unlinks(isp1362_hcd);
1041		if (!list_empty(&isp1362_hcd->async) && !(irqstat & HCuPINT_ATL)) {
1042			if (list_empty(&isp1362_hcd->atl_queue.active)) {
1043				start_atl_transfers(isp1362_hcd);
1044			} else {
1045				isp1362_enable_int(isp1362_hcd, HCuPINT_ATL);
1046				isp1362_write_reg32(isp1362_hcd, HCATLSKIP,
1047						    isp1362_hcd->atl_queue.skip_map);
1048				isp1362_set_mask16(isp1362_hcd, HCBUFSTAT, HCBUFSTAT_ATL_ACTIVE);
1049			}
1050		}
1051	}
1052
1053	if (irqstat & HCuPINT_ISTL0) {
1054		isp1362_hcd->irq_stat[ISP1362_INT_ISTL0]++;
1055		handled = 1;
1056		svc_mask &= ~HCuPINT_ISTL0;
1057		isp1362_clr_mask16(isp1362_hcd, HCBUFSTAT, HCBUFSTAT_ISTL0_FULL);
1058		DBG(1, "%s: ISTL0\n", __func__);
1059		WARN_ON((int)!!isp1362_hcd->istl_flip);
1060		WARN_ON(isp1362_read_reg16(isp1362_hcd, HCBUFSTAT) &
1061			HCBUFSTAT_ISTL0_ACTIVE);
1062		WARN_ON(!(isp1362_read_reg16(isp1362_hcd, HCBUFSTAT) &
1063			HCBUFSTAT_ISTL0_DONE));
1064		isp1362_hcd->irqenb &= ~HCuPINT_ISTL0;
1065	}
1066
1067	if (irqstat & HCuPINT_ISTL1) {
1068		isp1362_hcd->irq_stat[ISP1362_INT_ISTL1]++;
1069		handled = 1;
1070		svc_mask &= ~HCuPINT_ISTL1;
1071		isp1362_clr_mask16(isp1362_hcd, HCBUFSTAT, HCBUFSTAT_ISTL1_FULL);
1072		DBG(1, "%s: ISTL1\n", __func__);
1073		WARN_ON(!(int)isp1362_hcd->istl_flip);
1074		WARN_ON(isp1362_read_reg16(isp1362_hcd, HCBUFSTAT) &
1075			HCBUFSTAT_ISTL1_ACTIVE);
1076		WARN_ON(!(isp1362_read_reg16(isp1362_hcd, HCBUFSTAT) &
1077			HCBUFSTAT_ISTL1_DONE));
1078		isp1362_hcd->irqenb &= ~HCuPINT_ISTL1;
1079	}
1080
1081	if (irqstat & (HCuPINT_ISTL0 | HCuPINT_ISTL1)) {
1082		WARN_ON((irqstat & (HCuPINT_ISTL0 | HCuPINT_ISTL1)) ==
1083			(HCuPINT_ISTL0 | HCuPINT_ISTL1));
1084		finish_iso_transfers(isp1362_hcd,
1085				     &isp1362_hcd->istl_queue[isp1362_hcd->istl_flip]);
1086		start_iso_transfers(isp1362_hcd);
1087		isp1362_hcd->istl_flip = 1 - isp1362_hcd->istl_flip;
1088	}
1089
1090	if (irqstat & HCuPINT_INTL) {
1091		u32 done_map = isp1362_read_reg32(isp1362_hcd, HCINTLDONE);
1092		u32 skip_map = isp1362_read_reg32(isp1362_hcd, HCINTLSKIP);
1093		isp1362_hcd->irq_stat[ISP1362_INT_INTL]++;
1094
1095		DBG(2, "%s: INTL\n", __func__);
1096
1097		svc_mask &= ~HCuPINT_INTL;
1098
1099		isp1362_write_reg32(isp1362_hcd, HCINTLSKIP, skip_map | done_map);
1100		if (~(done_map | skip_map) == 0)
1101			/* All PTDs are finished, disable INTL processing entirely */
1102			isp1362_clr_mask16(isp1362_hcd, HCBUFSTAT, HCBUFSTAT_INTL_ACTIVE);
1103
1104		handled = 1;
1105		WARN_ON(!done_map);
1106		if (done_map) {
1107			DBG(3, "%s: INTL done_map %08x\n", __func__, done_map);
1108			finish_transfers(isp1362_hcd, done_map, &isp1362_hcd->intl_queue);
1109			start_intl_transfers(isp1362_hcd);
1110		}
1111	}
1112
1113	if (irqstat & HCuPINT_ATL) {
1114		u32 done_map = isp1362_read_reg32(isp1362_hcd, HCATLDONE);
1115		u32 skip_map = isp1362_read_reg32(isp1362_hcd, HCATLSKIP);
1116		isp1362_hcd->irq_stat[ISP1362_INT_ATL]++;
1117
1118		DBG(2, "%s: ATL\n", __func__);
1119
1120		svc_mask &= ~HCuPINT_ATL;
1121
1122		isp1362_write_reg32(isp1362_hcd, HCATLSKIP, skip_map | done_map);
1123		if (~(done_map | skip_map) == 0)
1124			isp1362_clr_mask16(isp1362_hcd, HCBUFSTAT, HCBUFSTAT_ATL_ACTIVE);
1125		if (done_map) {
1126			DBG(3, "%s: ATL done_map %08x\n", __func__, done_map);
1127			finish_transfers(isp1362_hcd, done_map, &isp1362_hcd->atl_queue);
1128			start_atl_transfers(isp1362_hcd);
1129		}
1130		handled = 1;
1131	}
1132
1133	if (irqstat & HCuPINT_OPR) {
1134		u32 intstat = isp1362_read_reg32(isp1362_hcd, HCINTSTAT);
1135		isp1362_hcd->irq_stat[ISP1362_INT_OPR]++;
1136
1137		svc_mask &= ~HCuPINT_OPR;
1138		DBG(2, "%s: OPR %08x:%08x\n", __func__, intstat, isp1362_hcd->intenb);
1139		intstat &= isp1362_hcd->intenb;
1140		if (intstat & OHCI_INTR_UE) {
1141			pr_err("Unrecoverable error\n");
1142			/* FIXME: do here reset or cleanup or whatever */
1143		}
1144		if (intstat & OHCI_INTR_RHSC) {
1145			isp1362_hcd->rhstatus = isp1362_read_reg32(isp1362_hcd, HCRHSTATUS);
1146			isp1362_hcd->rhport[0] = isp1362_read_reg32(isp1362_hcd, HCRHPORT1);
1147			isp1362_hcd->rhport[1] = isp1362_read_reg32(isp1362_hcd, HCRHPORT2);
1148		}
1149		if (intstat & OHCI_INTR_RD) {
1150			pr_info("%s: RESUME DETECTED\n", __func__);
1151			isp1362_show_reg(isp1362_hcd, HCCONTROL);
1152			usb_hcd_resume_root_hub(hcd);
1153		}
1154		isp1362_write_reg32(isp1362_hcd, HCINTSTAT, intstat);
1155		irqstat &= ~HCuPINT_OPR;
1156		handled = 1;
1157	}
1158
1159	if (irqstat & HCuPINT_SUSP) {
1160		isp1362_hcd->irq_stat[ISP1362_INT_SUSP]++;
1161		handled = 1;
1162		svc_mask &= ~HCuPINT_SUSP;
1163
1164		pr_info("%s: SUSPEND IRQ\n", __func__);
1165	}
1166
1167	if (irqstat & HCuPINT_CLKRDY) {
1168		isp1362_hcd->irq_stat[ISP1362_INT_CLKRDY]++;
1169		handled = 1;
1170		isp1362_hcd->irqenb &= ~HCuPINT_CLKRDY;
1171		svc_mask &= ~HCuPINT_CLKRDY;
1172		pr_info("%s: CLKRDY IRQ\n", __func__);
1173	}
1174
1175	if (svc_mask)
1176		pr_err("%s: Unserviced interrupt(s) %04x\n", __func__, svc_mask);
1177
1178	isp1362_write_reg16(isp1362_hcd, HCuPINTENB, isp1362_hcd->irqenb);
1179	isp1362_hcd->irq_active--;
1180	spin_unlock(&isp1362_hcd->lock);
1181
1182	return IRQ_RETVAL(handled);
1183}
1184
1185/*-------------------------------------------------------------------------*/
1186
1187#define	MAX_PERIODIC_LOAD	900	/* out of 1000 usec */
1188static int balance(struct isp1362_hcd *isp1362_hcd, u16 interval, u16 load)
1189{
1190	int i, branch = -ENOSPC;
1191
1192	/* search for the least loaded schedule branch of that interval
1193	 * which has enough bandwidth left unreserved.
1194	 */
1195	for (i = 0; i < interval; i++) {
1196		if (branch < 0 || isp1362_hcd->load[branch] > isp1362_hcd->load[i]) {
1197			int j;
1198
1199			for (j = i; j < PERIODIC_SIZE; j += interval) {
1200				if ((isp1362_hcd->load[j] + load) > MAX_PERIODIC_LOAD) {
1201					pr_err("%s: new load %d load[%02x] %d max %d\n", __func__,
1202					    load, j, isp1362_hcd->load[j], MAX_PERIODIC_LOAD);
1203					break;
1204				}
1205			}
1206			if (j < PERIODIC_SIZE)
1207				continue;
1208			branch = i;
1209		}
1210	}
1211	return branch;
1212}
1213
1214/* NB! ALL the code above this point runs with isp1362_hcd->lock
1215   held, irqs off
1216*/
1217
1218/*-------------------------------------------------------------------------*/
1219
1220static int isp1362_urb_enqueue(struct usb_hcd *hcd,
1221			       struct urb *urb,
1222			       gfp_t mem_flags)
1223{
1224	struct isp1362_hcd *isp1362_hcd = hcd_to_isp1362_hcd(hcd);
1225	struct usb_device *udev = urb->dev;
1226	unsigned int pipe = urb->pipe;
1227	int is_out = !usb_pipein(pipe);
1228	int type = usb_pipetype(pipe);
1229	int epnum = usb_pipeendpoint(pipe);
1230	struct usb_host_endpoint *hep = urb->ep;
1231	struct isp1362_ep *ep = NULL;
1232	unsigned long flags;
1233	int retval = 0;
1234
1235	DBG(3, "%s: urb %p\n", __func__, urb);
1236
1237	if (type == PIPE_ISOCHRONOUS) {
1238		pr_err("Isochronous transfers not supported\n");
1239		return -ENOSPC;
1240	}
1241
1242	URB_DBG("%s: FA %d ep%d%s %s: len %d %s%s\n", __func__,
1243		usb_pipedevice(pipe), epnum,
1244		is_out ? "out" : "in",
1245		usb_pipecontrol(pipe) ? "ctrl" :
1246			usb_pipeint(pipe) ? "int" :
1247			usb_pipebulk(pipe) ? "bulk" :
1248			"iso",
1249		urb->transfer_buffer_length,
1250		(urb->transfer_flags & URB_ZERO_PACKET) ? "ZERO_PACKET " : "",
1251		!(urb->transfer_flags & URB_SHORT_NOT_OK) ?
1252		"short_ok" : "");
1253
1254	/* avoid all allocations within spinlocks: request or endpoint */
1255	if (!hep->hcpriv) {
1256		ep = kzalloc(sizeof *ep, mem_flags);
1257		if (!ep)
1258			return -ENOMEM;
1259	}
1260	spin_lock_irqsave(&isp1362_hcd->lock, flags);
1261
1262	/* don't submit to a dead or disabled port */
1263	if (!((isp1362_hcd->rhport[0] | isp1362_hcd->rhport[1]) &
1264	      USB_PORT_STAT_ENABLE) ||
1265	    !HC_IS_RUNNING(hcd->state)) {
1266		kfree(ep);
1267		retval = -ENODEV;
1268		goto fail_not_linked;
1269	}
1270
1271	retval = usb_hcd_link_urb_to_ep(hcd, urb);
1272	if (retval) {
1273		kfree(ep);
1274		goto fail_not_linked;
1275	}
1276
1277	if (hep->hcpriv) {
1278		ep = hep->hcpriv;
1279	} else {
1280		INIT_LIST_HEAD(&ep->schedule);
1281		INIT_LIST_HEAD(&ep->active);
1282		INIT_LIST_HEAD(&ep->remove_list);
1283		ep->udev = usb_get_dev(udev);
1284		ep->hep = hep;
1285		ep->epnum = epnum;
1286		ep->maxpacket = usb_maxpacket(udev, urb->pipe, is_out);
1287		ep->ptd_offset = -EINVAL;
1288		ep->ptd_index = -EINVAL;
1289		usb_settoggle(udev, epnum, is_out, 0);
1290
1291		if (type == PIPE_CONTROL)
1292			ep->nextpid = USB_PID_SETUP;
1293		else if (is_out)
1294			ep->nextpid = USB_PID_OUT;
1295		else
1296			ep->nextpid = USB_PID_IN;
1297
1298		switch (type) {
1299		case PIPE_ISOCHRONOUS:
1300		case PIPE_INTERRUPT:
1301			if (urb->interval > PERIODIC_SIZE)
1302				urb->interval = PERIODIC_SIZE;
1303			ep->interval = urb->interval;
1304			ep->branch = PERIODIC_SIZE;
1305			ep->load = usb_calc_bus_time(udev->speed, !is_out,
1306						     (type == PIPE_ISOCHRONOUS),
1307						     usb_maxpacket(udev, pipe, is_out)) / 1000;
1308			break;
1309		}
1310		hep->hcpriv = ep;
1311	}
1312	ep->num_req = isp1362_hcd->req_serial++;
1313
1314	/* maybe put endpoint into schedule */
1315	switch (type) {
1316	case PIPE_CONTROL:
1317	case PIPE_BULK:
1318		if (list_empty(&ep->schedule)) {
1319			DBG(1, "%s: Adding ep %p req %d to async schedule\n",
1320				__func__, ep, ep->num_req);
1321			list_add_tail(&ep->schedule, &isp1362_hcd->async);
1322		}
1323		break;
1324	case PIPE_ISOCHRONOUS:
1325	case PIPE_INTERRUPT:
1326		urb->interval = ep->interval;
1327
1328		/* urb submitted for already existing EP */
1329		if (ep->branch < PERIODIC_SIZE)
1330			break;
1331
1332		retval = balance(isp1362_hcd, ep->interval, ep->load);
1333		if (retval < 0) {
1334			pr_err("%s: balance returned %d\n", __func__, retval);
1335			goto fail;
1336		}
1337		ep->branch = retval;
1338		retval = 0;
1339		isp1362_hcd->fmindex = isp1362_read_reg32(isp1362_hcd, HCFMNUM);
1340		DBG(1, "%s: Current frame %04x branch %02x start_frame %04x(%04x)\n",
1341		    __func__, isp1362_hcd->fmindex, ep->branch,
1342		    ((isp1362_hcd->fmindex + PERIODIC_SIZE - 1) &
1343		     ~(PERIODIC_SIZE - 1)) + ep->branch,
1344		    (isp1362_hcd->fmindex & (PERIODIC_SIZE - 1)) + ep->branch);
1345
1346		if (list_empty(&ep->schedule)) {
1347			if (type == PIPE_ISOCHRONOUS) {
1348				u16 frame = isp1362_hcd->fmindex;
1349
1350				frame += max_t(u16, 8, ep->interval);
1351				frame &= ~(ep->interval - 1);
1352				frame |= ep->branch;
1353				if (frame_before(frame, isp1362_hcd->fmindex))
1354					frame += ep->interval;
1355				urb->start_frame = frame;
1356
1357				DBG(1, "%s: Adding ep %p to isoc schedule\n", __func__, ep);
1358				list_add_tail(&ep->schedule, &isp1362_hcd->isoc);
1359			} else {
1360				DBG(1, "%s: Adding ep %p to periodic schedule\n", __func__, ep);
1361				list_add_tail(&ep->schedule, &isp1362_hcd->periodic);
1362			}
1363		} else
1364			DBG(1, "%s: ep %p already scheduled\n", __func__, ep);
1365
1366		DBG(2, "%s: load %d bandwidth %d -> %d\n", __func__,
1367		    ep->load / ep->interval, isp1362_hcd->load[ep->branch],
1368		    isp1362_hcd->load[ep->branch] + ep->load);
1369		isp1362_hcd->load[ep->branch] += ep->load;
1370	}
1371
1372	urb->hcpriv = hep;
1373	ALIGNSTAT(isp1362_hcd, urb->transfer_buffer);
1374
1375	switch (type) {
1376	case PIPE_CONTROL:
1377	case PIPE_BULK:
1378		start_atl_transfers(isp1362_hcd);
1379		break;
1380	case PIPE_INTERRUPT:
1381		start_intl_transfers(isp1362_hcd);
1382		break;
1383	case PIPE_ISOCHRONOUS:
1384		start_iso_transfers(isp1362_hcd);
1385		break;
1386	default:
1387		BUG();
1388	}
1389 fail:
1390	if (retval)
1391		usb_hcd_unlink_urb_from_ep(hcd, urb);
1392
1393
1394 fail_not_linked:
1395	spin_unlock_irqrestore(&isp1362_hcd->lock, flags);
1396	if (retval)
1397		DBG(0, "%s: urb %p failed with %d\n", __func__, urb, retval);
1398	return retval;
1399}
1400
1401static int isp1362_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status)
1402{
1403	struct isp1362_hcd *isp1362_hcd = hcd_to_isp1362_hcd(hcd);
1404	struct usb_host_endpoint *hep;
1405	unsigned long flags;
1406	struct isp1362_ep *ep;
1407	int retval = 0;
1408
1409	DBG(3, "%s: urb %p\n", __func__, urb);
1410
1411	spin_lock_irqsave(&isp1362_hcd->lock, flags);
1412	retval = usb_hcd_check_unlink_urb(hcd, urb, status);
1413	if (retval)
1414		goto done;
1415
1416	hep = urb->hcpriv;
1417
1418	if (!hep) {
1419		spin_unlock_irqrestore(&isp1362_hcd->lock, flags);
1420		return -EIDRM;
1421	}
1422
1423	ep = hep->hcpriv;
1424	if (ep) {
1425		/* In front of queue? */
1426		if (ep->hep->urb_list.next == &urb->urb_list) {
1427			if (!list_empty(&ep->active)) {
1428				DBG(1, "%s: urb %p ep %p req %d active PTD[%d] $%04x\n", __func__,
1429				    urb, ep, ep->num_req, ep->ptd_index, ep->ptd_offset);
1430				/* disable processing and queue PTD for removal */
1431				remove_ptd(isp1362_hcd, ep);
1432				urb = NULL;
1433			}
1434		}
1435		if (urb) {
1436			DBG(1, "%s: Finishing ep %p req %d\n", __func__, ep,
1437			    ep->num_req);
1438			finish_request(isp1362_hcd, ep, urb, status);
1439		} else
1440			DBG(1, "%s: urb %p active; wait4irq\n", __func__, urb);
1441	} else {
1442		pr_warning("%s: No EP in URB %p\n", __func__, urb);
1443		retval = -EINVAL;
1444	}
1445done:
1446	spin_unlock_irqrestore(&isp1362_hcd->lock, flags);
1447
1448	DBG(3, "%s: exit\n", __func__);
1449
1450	return retval;
1451}
1452
1453static void isp1362_endpoint_disable(struct usb_hcd *hcd, struct usb_host_endpoint *hep)
1454{
1455	struct isp1362_ep *ep = hep->hcpriv;
1456	struct isp1362_hcd *isp1362_hcd = hcd_to_isp1362_hcd(hcd);
1457	unsigned long flags;
1458
1459	DBG(1, "%s: ep %p\n", __func__, ep);
1460	if (!ep)
1461		return;
1462	spin_lock_irqsave(&isp1362_hcd->lock, flags);
1463	if (!list_empty(&hep->urb_list)) {
1464		if (!list_empty(&ep->active) && list_empty(&ep->remove_list)) {
1465			DBG(1, "%s: Removing ep %p req %d PTD[%d] $%04x\n", __func__,
1466			    ep, ep->num_req, ep->ptd_index, ep->ptd_offset);
1467			remove_ptd(isp1362_hcd, ep);
1468			pr_info("%s: Waiting for Interrupt to clean up\n", __func__);
1469		}
1470	}
1471	spin_unlock_irqrestore(&isp1362_hcd->lock, flags);
1472	/* Wait for interrupt to clear out active list */
1473	while (!list_empty(&ep->active))
1474		msleep(1);
1475
1476	DBG(1, "%s: Freeing EP %p\n", __func__, ep);
1477
1478	usb_put_dev(ep->udev);
1479	kfree(ep);
1480	hep->hcpriv = NULL;
1481}
1482
1483static int isp1362_get_frame(struct usb_hcd *hcd)
1484{
1485	struct isp1362_hcd *isp1362_hcd = hcd_to_isp1362_hcd(hcd);
1486	u32 fmnum;
1487	unsigned long flags;
1488
1489	spin_lock_irqsave(&isp1362_hcd->lock, flags);
1490	fmnum = isp1362_read_reg32(isp1362_hcd, HCFMNUM);
1491	spin_unlock_irqrestore(&isp1362_hcd->lock, flags);
1492
1493	return (int)fmnum;
1494}
1495
1496/*-------------------------------------------------------------------------*/
1497
1498/* Adapted from ohci-hub.c */
1499static int isp1362_hub_status_data(struct usb_hcd *hcd, char *buf)
1500{
1501	struct isp1362_hcd *isp1362_hcd = hcd_to_isp1362_hcd(hcd);
1502	int ports, i, changed = 0;
1503	unsigned long flags;
1504
1505	if (!HC_IS_RUNNING(hcd->state))
1506		return -ESHUTDOWN;
1507
1508	/* Report no status change now, if we are scheduled to be
1509	   called later */
1510	if (timer_pending(&hcd->rh_timer))
1511		return 0;
1512
1513	ports = isp1362_hcd->rhdesca & RH_A_NDP;
1514	BUG_ON(ports > 2);
1515
1516	spin_lock_irqsave(&isp1362_hcd->lock, flags);
1517	/* init status */
1518	if (isp1362_hcd->rhstatus & (RH_HS_LPSC | RH_HS_OCIC))
1519		buf[0] = changed = 1;
1520	else
1521		buf[0] = 0;
1522
1523	for (i = 0; i < ports; i++) {
1524		u32 status = isp1362_hcd->rhport[i];
1525
1526		if (status & (RH_PS_CSC | RH_PS_PESC | RH_PS_PSSC |
1527			      RH_PS_OCIC | RH_PS_PRSC)) {
1528			changed = 1;
1529			buf[0] |= 1 << (i + 1);
1530			continue;
1531		}
1532
1533		if (!(status & RH_PS_CCS))
1534			continue;
1535	}
1536	spin_unlock_irqrestore(&isp1362_hcd->lock, flags);
1537	return changed;
1538}
1539
1540static void isp1362_hub_descriptor(struct isp1362_hcd *isp1362_hcd,
1541				   struct usb_hub_descriptor *desc)
1542{
1543	u32 reg = isp1362_hcd->rhdesca;
1544
1545	DBG(3, "%s: enter\n", __func__);
1546
1547	desc->bDescriptorType = 0x29;
1548	desc->bDescLength = 9;
1549	desc->bHubContrCurrent = 0;
1550	desc->bNbrPorts = reg & 0x3;
1551	/* Power switching, device type, overcurrent. */
1552	desc->wHubCharacteristics = cpu_to_le16((reg >> 8) & 0x1f);
1553	DBG(0, "%s: hubcharacteristics = %02x\n", __func__, cpu_to_le16((reg >> 8) & 0x1f));
 
 
 
 
1554	desc->bPwrOn2PwrGood = (reg >> 24) & 0xff;
1555	/* ports removable, and legacy PortPwrCtrlMask */
1556	desc->u.hs.DeviceRemovable[0] = desc->bNbrPorts == 1 ? 1 << 1 : 3 << 1;
1557	desc->u.hs.DeviceRemovable[1] = ~0;
1558
1559	DBG(3, "%s: exit\n", __func__);
1560}
1561
1562/* Adapted from ohci-hub.c */
1563static int isp1362_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue,
1564			       u16 wIndex, char *buf, u16 wLength)
1565{
1566	struct isp1362_hcd *isp1362_hcd = hcd_to_isp1362_hcd(hcd);
1567	int retval = 0;
1568	unsigned long flags;
1569	unsigned long t1;
1570	int ports = isp1362_hcd->rhdesca & RH_A_NDP;
1571	u32 tmp = 0;
1572
1573	switch (typeReq) {
1574	case ClearHubFeature:
1575		DBG(0, "ClearHubFeature: ");
1576		switch (wValue) {
1577		case C_HUB_OVER_CURRENT:
1578			_DBG(0, "C_HUB_OVER_CURRENT\n");
1579			spin_lock_irqsave(&isp1362_hcd->lock, flags);
1580			isp1362_write_reg32(isp1362_hcd, HCRHSTATUS, RH_HS_OCIC);
1581			spin_unlock_irqrestore(&isp1362_hcd->lock, flags);
1582		case C_HUB_LOCAL_POWER:
1583			_DBG(0, "C_HUB_LOCAL_POWER\n");
1584			break;
1585		default:
1586			goto error;
1587		}
1588		break;
1589	case SetHubFeature:
1590		DBG(0, "SetHubFeature: ");
1591		switch (wValue) {
1592		case C_HUB_OVER_CURRENT:
1593		case C_HUB_LOCAL_POWER:
1594			_DBG(0, "C_HUB_OVER_CURRENT or C_HUB_LOCAL_POWER\n");
1595			break;
1596		default:
1597			goto error;
1598		}
1599		break;
1600	case GetHubDescriptor:
1601		DBG(0, "GetHubDescriptor\n");
1602		isp1362_hub_descriptor(isp1362_hcd, (struct usb_hub_descriptor *)buf);
1603		break;
1604	case GetHubStatus:
1605		DBG(0, "GetHubStatus\n");
1606		put_unaligned(cpu_to_le32(0), (__le32 *) buf);
1607		break;
1608	case GetPortStatus:
1609#ifndef VERBOSE
1610		DBG(0, "GetPortStatus\n");
1611#endif
1612		if (!wIndex || wIndex > ports)
1613			goto error;
1614		tmp = isp1362_hcd->rhport[--wIndex];
1615		put_unaligned(cpu_to_le32(tmp), (__le32 *) buf);
1616		break;
1617	case ClearPortFeature:
1618		DBG(0, "ClearPortFeature: ");
1619		if (!wIndex || wIndex > ports)
1620			goto error;
1621		wIndex--;
1622
1623		switch (wValue) {
1624		case USB_PORT_FEAT_ENABLE:
1625			_DBG(0, "USB_PORT_FEAT_ENABLE\n");
1626			tmp = RH_PS_CCS;
1627			break;
1628		case USB_PORT_FEAT_C_ENABLE:
1629			_DBG(0, "USB_PORT_FEAT_C_ENABLE\n");
1630			tmp = RH_PS_PESC;
1631			break;
1632		case USB_PORT_FEAT_SUSPEND:
1633			_DBG(0, "USB_PORT_FEAT_SUSPEND\n");
1634			tmp = RH_PS_POCI;
1635			break;
1636		case USB_PORT_FEAT_C_SUSPEND:
1637			_DBG(0, "USB_PORT_FEAT_C_SUSPEND\n");
1638			tmp = RH_PS_PSSC;
1639			break;
1640		case USB_PORT_FEAT_POWER:
1641			_DBG(0, "USB_PORT_FEAT_POWER\n");
1642			tmp = RH_PS_LSDA;
1643
1644			break;
1645		case USB_PORT_FEAT_C_CONNECTION:
1646			_DBG(0, "USB_PORT_FEAT_C_CONNECTION\n");
1647			tmp = RH_PS_CSC;
1648			break;
1649		case USB_PORT_FEAT_C_OVER_CURRENT:
1650			_DBG(0, "USB_PORT_FEAT_C_OVER_CURRENT\n");
1651			tmp = RH_PS_OCIC;
1652			break;
1653		case USB_PORT_FEAT_C_RESET:
1654			_DBG(0, "USB_PORT_FEAT_C_RESET\n");
1655			tmp = RH_PS_PRSC;
1656			break;
1657		default:
1658			goto error;
1659		}
1660
1661		spin_lock_irqsave(&isp1362_hcd->lock, flags);
1662		isp1362_write_reg32(isp1362_hcd, HCRHPORT1 + wIndex, tmp);
1663		isp1362_hcd->rhport[wIndex] =
1664			isp1362_read_reg32(isp1362_hcd, HCRHPORT1 + wIndex);
1665		spin_unlock_irqrestore(&isp1362_hcd->lock, flags);
1666		break;
1667	case SetPortFeature:
1668		DBG(0, "SetPortFeature: ");
1669		if (!wIndex || wIndex > ports)
1670			goto error;
1671		wIndex--;
1672		switch (wValue) {
1673		case USB_PORT_FEAT_SUSPEND:
1674			_DBG(0, "USB_PORT_FEAT_SUSPEND\n");
1675			spin_lock_irqsave(&isp1362_hcd->lock, flags);
1676			isp1362_write_reg32(isp1362_hcd, HCRHPORT1 + wIndex, RH_PS_PSS);
1677			isp1362_hcd->rhport[wIndex] =
1678				isp1362_read_reg32(isp1362_hcd, HCRHPORT1 + wIndex);
1679			spin_unlock_irqrestore(&isp1362_hcd->lock, flags);
1680			break;
1681		case USB_PORT_FEAT_POWER:
1682			_DBG(0, "USB_PORT_FEAT_POWER\n");
1683			spin_lock_irqsave(&isp1362_hcd->lock, flags);
1684			isp1362_write_reg32(isp1362_hcd, HCRHPORT1 + wIndex, RH_PS_PPS);
1685			isp1362_hcd->rhport[wIndex] =
1686				isp1362_read_reg32(isp1362_hcd, HCRHPORT1 + wIndex);
1687			spin_unlock_irqrestore(&isp1362_hcd->lock, flags);
1688			break;
1689		case USB_PORT_FEAT_RESET:
1690			_DBG(0, "USB_PORT_FEAT_RESET\n");
1691			spin_lock_irqsave(&isp1362_hcd->lock, flags);
1692
1693			t1 = jiffies + msecs_to_jiffies(USB_RESET_WIDTH);
1694			while (time_before(jiffies, t1)) {
1695				/* spin until any current reset finishes */
1696				for (;;) {
1697					tmp = isp1362_read_reg32(isp1362_hcd, HCRHPORT1 + wIndex);
1698					if (!(tmp & RH_PS_PRS))
1699						break;
1700					udelay(500);
1701				}
1702				if (!(tmp & RH_PS_CCS))
1703					break;
1704				/* Reset lasts 10ms (claims datasheet) */
1705				isp1362_write_reg32(isp1362_hcd, HCRHPORT1 + wIndex, (RH_PS_PRS));
1706
1707				spin_unlock_irqrestore(&isp1362_hcd->lock, flags);
1708				msleep(10);
1709				spin_lock_irqsave(&isp1362_hcd->lock, flags);
1710			}
1711
1712			isp1362_hcd->rhport[wIndex] = isp1362_read_reg32(isp1362_hcd,
1713									 HCRHPORT1 + wIndex);
1714			spin_unlock_irqrestore(&isp1362_hcd->lock, flags);
1715			break;
1716		default:
1717			goto error;
1718		}
1719		break;
1720
1721	default:
1722 error:
1723		/* "protocol stall" on error */
1724		_DBG(0, "PROTOCOL STALL\n");
1725		retval = -EPIPE;
1726	}
1727
1728	return retval;
1729}
1730
1731#ifdef	CONFIG_PM
1732static int isp1362_bus_suspend(struct usb_hcd *hcd)
1733{
1734	int status = 0;
1735	struct isp1362_hcd *isp1362_hcd = hcd_to_isp1362_hcd(hcd);
1736	unsigned long flags;
1737
1738	if (time_before(jiffies, isp1362_hcd->next_statechange))
1739		msleep(5);
1740
1741	spin_lock_irqsave(&isp1362_hcd->lock, flags);
1742
1743	isp1362_hcd->hc_control = isp1362_read_reg32(isp1362_hcd, HCCONTROL);
1744	switch (isp1362_hcd->hc_control & OHCI_CTRL_HCFS) {
1745	case OHCI_USB_RESUME:
1746		DBG(0, "%s: resume/suspend?\n", __func__);
1747		isp1362_hcd->hc_control &= ~OHCI_CTRL_HCFS;
1748		isp1362_hcd->hc_control |= OHCI_USB_RESET;
1749		isp1362_write_reg32(isp1362_hcd, HCCONTROL, isp1362_hcd->hc_control);
1750		/* FALL THROUGH */
1751	case OHCI_USB_RESET:
1752		status = -EBUSY;
1753		pr_warning("%s: needs reinit!\n", __func__);
1754		goto done;
1755	case OHCI_USB_SUSPEND:
1756		pr_warning("%s: already suspended?\n", __func__);
1757		goto done;
1758	}
1759	DBG(0, "%s: suspend root hub\n", __func__);
1760
1761	/* First stop any processing */
1762	hcd->state = HC_STATE_QUIESCING;
1763	if (!list_empty(&isp1362_hcd->atl_queue.active) ||
1764	    !list_empty(&isp1362_hcd->intl_queue.active) ||
1765	    !list_empty(&isp1362_hcd->istl_queue[0] .active) ||
1766	    !list_empty(&isp1362_hcd->istl_queue[1] .active)) {
1767		int limit;
1768
1769		isp1362_write_reg32(isp1362_hcd, HCATLSKIP, ~0);
1770		isp1362_write_reg32(isp1362_hcd, HCINTLSKIP, ~0);
1771		isp1362_write_reg16(isp1362_hcd, HCBUFSTAT, 0);
1772		isp1362_write_reg16(isp1362_hcd, HCuPINTENB, 0);
1773		isp1362_write_reg32(isp1362_hcd, HCINTSTAT, OHCI_INTR_SF);
1774
1775		DBG(0, "%s: stopping schedules ...\n", __func__);
1776		limit = 2000;
1777		while (limit > 0) {
1778			udelay(250);
1779			limit -= 250;
1780			if (isp1362_read_reg32(isp1362_hcd, HCINTSTAT) & OHCI_INTR_SF)
1781				break;
1782		}
1783		mdelay(7);
1784		if (isp1362_read_reg16(isp1362_hcd, HCuPINT) & HCuPINT_ATL) {
1785			u32 done_map = isp1362_read_reg32(isp1362_hcd, HCATLDONE);
1786			finish_transfers(isp1362_hcd, done_map, &isp1362_hcd->atl_queue);
1787		}
1788		if (isp1362_read_reg16(isp1362_hcd, HCuPINT) & HCuPINT_INTL) {
1789			u32 done_map = isp1362_read_reg32(isp1362_hcd, HCINTLDONE);
1790			finish_transfers(isp1362_hcd, done_map, &isp1362_hcd->intl_queue);
1791		}
1792		if (isp1362_read_reg16(isp1362_hcd, HCuPINT) & HCuPINT_ISTL0)
1793			finish_iso_transfers(isp1362_hcd, &isp1362_hcd->istl_queue[0]);
1794		if (isp1362_read_reg16(isp1362_hcd, HCuPINT) & HCuPINT_ISTL1)
1795			finish_iso_transfers(isp1362_hcd, &isp1362_hcd->istl_queue[1]);
1796	}
1797	DBG(0, "%s: HCINTSTAT: %08x\n", __func__,
1798		    isp1362_read_reg32(isp1362_hcd, HCINTSTAT));
1799	isp1362_write_reg32(isp1362_hcd, HCINTSTAT,
1800			    isp1362_read_reg32(isp1362_hcd, HCINTSTAT));
1801
1802	/* Suspend hub */
1803	isp1362_hcd->hc_control = OHCI_USB_SUSPEND;
1804	isp1362_show_reg(isp1362_hcd, HCCONTROL);
1805	isp1362_write_reg32(isp1362_hcd, HCCONTROL, isp1362_hcd->hc_control);
1806	isp1362_show_reg(isp1362_hcd, HCCONTROL);
1807
1808#if 1
1809	isp1362_hcd->hc_control = isp1362_read_reg32(isp1362_hcd, HCCONTROL);
1810	if ((isp1362_hcd->hc_control & OHCI_CTRL_HCFS) != OHCI_USB_SUSPEND) {
1811		pr_err("%s: controller won't suspend %08x\n", __func__,
1812		    isp1362_hcd->hc_control);
1813		status = -EBUSY;
1814	} else
1815#endif
1816	{
1817		/* no resumes until devices finish suspending */
1818		isp1362_hcd->next_statechange = jiffies + msecs_to_jiffies(5);
1819	}
1820done:
1821	if (status == 0) {
1822		hcd->state = HC_STATE_SUSPENDED;
1823		DBG(0, "%s: HCD suspended: %08x\n", __func__,
1824		    isp1362_read_reg32(isp1362_hcd, HCCONTROL));
1825	}
1826	spin_unlock_irqrestore(&isp1362_hcd->lock, flags);
1827	return status;
1828}
1829
1830static int isp1362_bus_resume(struct usb_hcd *hcd)
1831{
1832	struct isp1362_hcd *isp1362_hcd = hcd_to_isp1362_hcd(hcd);
1833	u32 port;
1834	unsigned long flags;
1835	int status = -EINPROGRESS;
1836
1837	if (time_before(jiffies, isp1362_hcd->next_statechange))
1838		msleep(5);
1839
1840	spin_lock_irqsave(&isp1362_hcd->lock, flags);
1841	isp1362_hcd->hc_control = isp1362_read_reg32(isp1362_hcd, HCCONTROL);
1842	pr_info("%s: HCCONTROL: %08x\n", __func__, isp1362_hcd->hc_control);
1843	if (hcd->state == HC_STATE_RESUMING) {
1844		pr_warning("%s: duplicate resume\n", __func__);
1845		status = 0;
1846	} else
1847		switch (isp1362_hcd->hc_control & OHCI_CTRL_HCFS) {
1848		case OHCI_USB_SUSPEND:
1849			DBG(0, "%s: resume root hub\n", __func__);
1850			isp1362_hcd->hc_control &= ~OHCI_CTRL_HCFS;
1851			isp1362_hcd->hc_control |= OHCI_USB_RESUME;
1852			isp1362_write_reg32(isp1362_hcd, HCCONTROL, isp1362_hcd->hc_control);
1853			break;
1854		case OHCI_USB_RESUME:
1855			/* HCFS changes sometime after INTR_RD */
1856			DBG(0, "%s: remote wakeup\n", __func__);
1857			break;
1858		case OHCI_USB_OPER:
1859			DBG(0, "%s: odd resume\n", __func__);
1860			status = 0;
1861			hcd->self.root_hub->dev.power.power_state = PMSG_ON;
1862			break;
1863		default:		/* RESET, we lost power */
1864			DBG(0, "%s: root hub hardware reset\n", __func__);
1865			status = -EBUSY;
1866		}
1867	spin_unlock_irqrestore(&isp1362_hcd->lock, flags);
1868	if (status == -EBUSY) {
1869		DBG(0, "%s: Restarting HC\n", __func__);
1870		isp1362_hc_stop(hcd);
1871		return isp1362_hc_start(hcd);
1872	}
1873	if (status != -EINPROGRESS)
1874		return status;
1875	spin_lock_irqsave(&isp1362_hcd->lock, flags);
1876	port = isp1362_read_reg32(isp1362_hcd, HCRHDESCA) & RH_A_NDP;
1877	while (port--) {
1878		u32 stat = isp1362_read_reg32(isp1362_hcd, HCRHPORT1 + port);
1879
1880		/* force global, not selective, resume */
1881		if (!(stat & RH_PS_PSS)) {
1882			DBG(0, "%s: Not Resuming RH port %d\n", __func__, port);
1883			continue;
1884		}
1885		DBG(0, "%s: Resuming RH port %d\n", __func__, port);
1886		isp1362_write_reg32(isp1362_hcd, HCRHPORT1 + port, RH_PS_POCI);
1887	}
1888	spin_unlock_irqrestore(&isp1362_hcd->lock, flags);
1889
1890	/* Some controllers (lucent) need extra-long delays */
1891	hcd->state = HC_STATE_RESUMING;
1892	mdelay(20 /* usb 11.5.1.10 */ + 15);
1893
1894	isp1362_hcd->hc_control = OHCI_USB_OPER;
1895	spin_lock_irqsave(&isp1362_hcd->lock, flags);
1896	isp1362_show_reg(isp1362_hcd, HCCONTROL);
1897	isp1362_write_reg32(isp1362_hcd, HCCONTROL, isp1362_hcd->hc_control);
1898	spin_unlock_irqrestore(&isp1362_hcd->lock, flags);
1899	/* TRSMRCY */
1900	msleep(10);
1901
1902	/* keep it alive for ~5x suspend + resume costs */
1903	isp1362_hcd->next_statechange = jiffies + msecs_to_jiffies(250);
1904
1905	hcd->self.root_hub->dev.power.power_state = PMSG_ON;
1906	hcd->state = HC_STATE_RUNNING;
1907	return 0;
1908}
1909#else
1910#define	isp1362_bus_suspend	NULL
1911#define	isp1362_bus_resume	NULL
1912#endif
1913
1914/*-------------------------------------------------------------------------*/
1915
1916#ifdef STUB_DEBUG_FILE
1917
1918static inline void create_debug_file(struct isp1362_hcd *isp1362_hcd)
1919{
1920}
1921static inline void remove_debug_file(struct isp1362_hcd *isp1362_hcd)
1922{
1923}
1924
1925#else
1926
1927#include <linux/proc_fs.h>
1928#include <linux/seq_file.h>
1929
1930static void dump_irq(struct seq_file *s, char *label, u16 mask)
1931{
1932	seq_printf(s, "%-15s %04x%s%s%s%s%s%s\n", label, mask,
1933		   mask & HCuPINT_CLKRDY ? " clkrdy" : "",
1934		   mask & HCuPINT_SUSP ? " susp" : "",
1935		   mask & HCuPINT_OPR ? " opr" : "",
1936		   mask & HCuPINT_EOT ? " eot" : "",
1937		   mask & HCuPINT_ATL ? " atl" : "",
1938		   mask & HCuPINT_SOF ? " sof" : "");
1939}
1940
1941static void dump_int(struct seq_file *s, char *label, u32 mask)
1942{
1943	seq_printf(s, "%-15s %08x%s%s%s%s%s%s%s\n", label, mask,
1944		   mask & OHCI_INTR_MIE ? " MIE" : "",
1945		   mask & OHCI_INTR_RHSC ? " rhsc" : "",
1946		   mask & OHCI_INTR_FNO ? " fno" : "",
1947		   mask & OHCI_INTR_UE ? " ue" : "",
1948		   mask & OHCI_INTR_RD ? " rd" : "",
1949		   mask & OHCI_INTR_SF ? " sof" : "",
1950		   mask & OHCI_INTR_SO ? " so" : "");
1951}
1952
1953static void dump_ctrl(struct seq_file *s, char *label, u32 mask)
1954{
1955	seq_printf(s, "%-15s %08x%s%s%s\n", label, mask,
1956		   mask & OHCI_CTRL_RWC ? " rwc" : "",
1957		   mask & OHCI_CTRL_RWE ? " rwe" : "",
1958		   ({
1959			   char *hcfs;
1960			   switch (mask & OHCI_CTRL_HCFS) {
1961			   case OHCI_USB_OPER:
1962				   hcfs = " oper";
1963				   break;
1964			   case OHCI_USB_RESET:
1965				   hcfs = " reset";
1966				   break;
1967			   case OHCI_USB_RESUME:
1968				   hcfs = " resume";
1969				   break;
1970			   case OHCI_USB_SUSPEND:
1971				   hcfs = " suspend";
1972				   break;
1973			   default:
1974				   hcfs = " ?";
1975			   }
1976			   hcfs;
1977		   }));
1978}
1979
1980static void dump_regs(struct seq_file *s, struct isp1362_hcd *isp1362_hcd)
1981{
1982	seq_printf(s, "HCREVISION [%02x] %08x\n", ISP1362_REG_NO(ISP1362_REG_HCREVISION),
1983		   isp1362_read_reg32(isp1362_hcd, HCREVISION));
1984	seq_printf(s, "HCCONTROL  [%02x] %08x\n", ISP1362_REG_NO(ISP1362_REG_HCCONTROL),
1985		   isp1362_read_reg32(isp1362_hcd, HCCONTROL));
1986	seq_printf(s, "HCCMDSTAT  [%02x] %08x\n", ISP1362_REG_NO(ISP1362_REG_HCCMDSTAT),
1987		   isp1362_read_reg32(isp1362_hcd, HCCMDSTAT));
1988	seq_printf(s, "HCINTSTAT  [%02x] %08x\n", ISP1362_REG_NO(ISP1362_REG_HCINTSTAT),
1989		   isp1362_read_reg32(isp1362_hcd, HCINTSTAT));
1990	seq_printf(s, "HCINTENB   [%02x] %08x\n", ISP1362_REG_NO(ISP1362_REG_HCINTENB),
1991		   isp1362_read_reg32(isp1362_hcd, HCINTENB));
1992	seq_printf(s, "HCFMINTVL  [%02x] %08x\n", ISP1362_REG_NO(ISP1362_REG_HCFMINTVL),
1993		   isp1362_read_reg32(isp1362_hcd, HCFMINTVL));
1994	seq_printf(s, "HCFMREM    [%02x] %08x\n", ISP1362_REG_NO(ISP1362_REG_HCFMREM),
1995		   isp1362_read_reg32(isp1362_hcd, HCFMREM));
1996	seq_printf(s, "HCFMNUM    [%02x] %08x\n", ISP1362_REG_NO(ISP1362_REG_HCFMNUM),
1997		   isp1362_read_reg32(isp1362_hcd, HCFMNUM));
1998	seq_printf(s, "HCLSTHRESH [%02x] %08x\n", ISP1362_REG_NO(ISP1362_REG_HCLSTHRESH),
1999		   isp1362_read_reg32(isp1362_hcd, HCLSTHRESH));
2000	seq_printf(s, "HCRHDESCA  [%02x] %08x\n", ISP1362_REG_NO(ISP1362_REG_HCRHDESCA),
2001		   isp1362_read_reg32(isp1362_hcd, HCRHDESCA));
2002	seq_printf(s, "HCRHDESCB  [%02x] %08x\n", ISP1362_REG_NO(ISP1362_REG_HCRHDESCB),
2003		   isp1362_read_reg32(isp1362_hcd, HCRHDESCB));
2004	seq_printf(s, "HCRHSTATUS [%02x] %08x\n", ISP1362_REG_NO(ISP1362_REG_HCRHSTATUS),
2005		   isp1362_read_reg32(isp1362_hcd, HCRHSTATUS));
2006	seq_printf(s, "HCRHPORT1  [%02x] %08x\n", ISP1362_REG_NO(ISP1362_REG_HCRHPORT1),
2007		   isp1362_read_reg32(isp1362_hcd, HCRHPORT1));
2008	seq_printf(s, "HCRHPORT2  [%02x] %08x\n", ISP1362_REG_NO(ISP1362_REG_HCRHPORT2),
2009		   isp1362_read_reg32(isp1362_hcd, HCRHPORT2));
2010	seq_printf(s, "\n");
2011	seq_printf(s, "HCHWCFG    [%02x]     %04x\n", ISP1362_REG_NO(ISP1362_REG_HCHWCFG),
2012		   isp1362_read_reg16(isp1362_hcd, HCHWCFG));
2013	seq_printf(s, "HCDMACFG   [%02x]     %04x\n", ISP1362_REG_NO(ISP1362_REG_HCDMACFG),
2014		   isp1362_read_reg16(isp1362_hcd, HCDMACFG));
2015	seq_printf(s, "HCXFERCTR  [%02x]     %04x\n", ISP1362_REG_NO(ISP1362_REG_HCXFERCTR),
2016		   isp1362_read_reg16(isp1362_hcd, HCXFERCTR));
2017	seq_printf(s, "HCuPINT    [%02x]     %04x\n", ISP1362_REG_NO(ISP1362_REG_HCuPINT),
2018		   isp1362_read_reg16(isp1362_hcd, HCuPINT));
2019	seq_printf(s, "HCuPINTENB [%02x]     %04x\n", ISP1362_REG_NO(ISP1362_REG_HCuPINTENB),
2020		   isp1362_read_reg16(isp1362_hcd, HCuPINTENB));
2021	seq_printf(s, "HCCHIPID   [%02x]     %04x\n", ISP1362_REG_NO(ISP1362_REG_HCCHIPID),
2022		   isp1362_read_reg16(isp1362_hcd, HCCHIPID));
2023	seq_printf(s, "HCSCRATCH  [%02x]     %04x\n", ISP1362_REG_NO(ISP1362_REG_HCSCRATCH),
2024		   isp1362_read_reg16(isp1362_hcd, HCSCRATCH));
2025	seq_printf(s, "HCBUFSTAT  [%02x]     %04x\n", ISP1362_REG_NO(ISP1362_REG_HCBUFSTAT),
2026		   isp1362_read_reg16(isp1362_hcd, HCBUFSTAT));
2027	seq_printf(s, "HCDIRADDR  [%02x] %08x\n", ISP1362_REG_NO(ISP1362_REG_HCDIRADDR),
2028		   isp1362_read_reg32(isp1362_hcd, HCDIRADDR));
2029#if 0
2030	seq_printf(s, "HCDIRDATA  [%02x]     %04x\n", ISP1362_REG_NO(HCDIRDATA),
2031		   isp1362_read_reg16(isp1362_hcd, HCDIRDATA));
2032#endif
2033	seq_printf(s, "HCISTLBUFSZ[%02x]     %04x\n", ISP1362_REG_NO(ISP1362_REG_HCISTLBUFSZ),
2034		   isp1362_read_reg16(isp1362_hcd, HCISTLBUFSZ));
2035	seq_printf(s, "HCISTLRATE [%02x]     %04x\n", ISP1362_REG_NO(ISP1362_REG_HCISTLRATE),
2036		   isp1362_read_reg16(isp1362_hcd, HCISTLRATE));
2037	seq_printf(s, "\n");
2038	seq_printf(s, "HCINTLBUFSZ[%02x]     %04x\n", ISP1362_REG_NO(ISP1362_REG_HCINTLBUFSZ),
2039		   isp1362_read_reg16(isp1362_hcd, HCINTLBUFSZ));
2040	seq_printf(s, "HCINTLBLKSZ[%02x]     %04x\n", ISP1362_REG_NO(ISP1362_REG_HCINTLBLKSZ),
2041		   isp1362_read_reg16(isp1362_hcd, HCINTLBLKSZ));
2042	seq_printf(s, "HCINTLDONE [%02x] %08x\n", ISP1362_REG_NO(ISP1362_REG_HCINTLDONE),
2043		   isp1362_read_reg32(isp1362_hcd, HCINTLDONE));
2044	seq_printf(s, "HCINTLSKIP [%02x] %08x\n", ISP1362_REG_NO(ISP1362_REG_HCINTLSKIP),
2045		   isp1362_read_reg32(isp1362_hcd, HCINTLSKIP));
2046	seq_printf(s, "HCINTLLAST [%02x] %08x\n", ISP1362_REG_NO(ISP1362_REG_HCINTLLAST),
2047		   isp1362_read_reg32(isp1362_hcd, HCINTLLAST));
2048	seq_printf(s, "HCINTLCURR [%02x]     %04x\n", ISP1362_REG_NO(ISP1362_REG_HCINTLCURR),
2049		   isp1362_read_reg16(isp1362_hcd, HCINTLCURR));
2050	seq_printf(s, "\n");
2051	seq_printf(s, "HCATLBUFSZ [%02x]     %04x\n", ISP1362_REG_NO(ISP1362_REG_HCATLBUFSZ),
2052		   isp1362_read_reg16(isp1362_hcd, HCATLBUFSZ));
2053	seq_printf(s, "HCATLBLKSZ [%02x]     %04x\n", ISP1362_REG_NO(ISP1362_REG_HCATLBLKSZ),
2054		   isp1362_read_reg16(isp1362_hcd, HCATLBLKSZ));
2055#if 0
2056	seq_printf(s, "HCATLDONE  [%02x] %08x\n", ISP1362_REG_NO(ISP1362_REG_HCATLDONE),
2057		   isp1362_read_reg32(isp1362_hcd, HCATLDONE));
2058#endif
2059	seq_printf(s, "HCATLSKIP  [%02x] %08x\n", ISP1362_REG_NO(ISP1362_REG_HCATLSKIP),
2060		   isp1362_read_reg32(isp1362_hcd, HCATLSKIP));
2061	seq_printf(s, "HCATLLAST  [%02x] %08x\n", ISP1362_REG_NO(ISP1362_REG_HCATLLAST),
2062		   isp1362_read_reg32(isp1362_hcd, HCATLLAST));
2063	seq_printf(s, "HCATLCURR  [%02x]     %04x\n", ISP1362_REG_NO(ISP1362_REG_HCATLCURR),
2064		   isp1362_read_reg16(isp1362_hcd, HCATLCURR));
2065	seq_printf(s, "\n");
2066	seq_printf(s, "HCATLDTC   [%02x]     %04x\n", ISP1362_REG_NO(ISP1362_REG_HCATLDTC),
2067		   isp1362_read_reg16(isp1362_hcd, HCATLDTC));
2068	seq_printf(s, "HCATLDTCTO [%02x]     %04x\n", ISP1362_REG_NO(ISP1362_REG_HCATLDTCTO),
2069		   isp1362_read_reg16(isp1362_hcd, HCATLDTCTO));
2070}
2071
2072static int proc_isp1362_show(struct seq_file *s, void *unused)
2073{
2074	struct isp1362_hcd *isp1362_hcd = s->private;
2075	struct isp1362_ep *ep;
2076	int i;
2077
2078	seq_printf(s, "%s\n%s version %s\n",
2079		   isp1362_hcd_to_hcd(isp1362_hcd)->product_desc, hcd_name, DRIVER_VERSION);
2080
2081	/* collect statistics to help estimate potential win for
2082	 * DMA engines that care about alignment (PXA)
2083	 */
2084	seq_printf(s, "alignment:  16b/%ld 8b/%ld 4b/%ld 2b/%ld 1b/%ld\n",
2085		   isp1362_hcd->stat16, isp1362_hcd->stat8, isp1362_hcd->stat4,
2086		   isp1362_hcd->stat2, isp1362_hcd->stat1);
2087	seq_printf(s, "max # ptds in ATL  fifo: %d\n", isp1362_hcd->atl_queue.stat_maxptds);
2088	seq_printf(s, "max # ptds in INTL fifo: %d\n", isp1362_hcd->intl_queue.stat_maxptds);
2089	seq_printf(s, "max # ptds in ISTL fifo: %d\n",
2090		   max(isp1362_hcd->istl_queue[0] .stat_maxptds,
2091		       isp1362_hcd->istl_queue[1] .stat_maxptds));
2092
2093	/* FIXME: don't show the following in suspended state */
2094	spin_lock_irq(&isp1362_hcd->lock);
2095
2096	dump_irq(s, "hc_irq_enable", isp1362_read_reg16(isp1362_hcd, HCuPINTENB));
2097	dump_irq(s, "hc_irq_status", isp1362_read_reg16(isp1362_hcd, HCuPINT));
2098	dump_int(s, "ohci_int_enable", isp1362_read_reg32(isp1362_hcd, HCINTENB));
2099	dump_int(s, "ohci_int_status", isp1362_read_reg32(isp1362_hcd, HCINTSTAT));
2100	dump_ctrl(s, "ohci_control", isp1362_read_reg32(isp1362_hcd, HCCONTROL));
2101
2102	for (i = 0; i < NUM_ISP1362_IRQS; i++)
2103		if (isp1362_hcd->irq_stat[i])
2104			seq_printf(s, "%-15s: %d\n",
2105				   ISP1362_INT_NAME(i), isp1362_hcd->irq_stat[i]);
2106
2107	dump_regs(s, isp1362_hcd);
2108	list_for_each_entry(ep, &isp1362_hcd->async, schedule) {
2109		struct urb *urb;
2110
2111		seq_printf(s, "%p, ep%d%s, maxpacket %d:\n", ep, ep->epnum,
2112			   ({
2113				   char *s;
2114				   switch (ep->nextpid) {
2115				   case USB_PID_IN:
2116					   s = "in";
2117					   break;
2118				   case USB_PID_OUT:
2119					   s = "out";
2120					   break;
2121				   case USB_PID_SETUP:
2122					   s = "setup";
2123					   break;
2124				   case USB_PID_ACK:
2125					   s = "status";
2126					   break;
2127				   default:
2128					   s = "?";
2129					   break;
2130				   };
2131				   s;}), ep->maxpacket) ;
2132		list_for_each_entry(urb, &ep->hep->urb_list, urb_list) {
2133			seq_printf(s, "  urb%p, %d/%d\n", urb,
2134				   urb->actual_length,
2135				   urb->transfer_buffer_length);
2136		}
2137	}
2138	if (!list_empty(&isp1362_hcd->async))
2139		seq_printf(s, "\n");
2140	dump_ptd_queue(&isp1362_hcd->atl_queue);
2141
2142	seq_printf(s, "periodic size= %d\n", PERIODIC_SIZE);
2143
2144	list_for_each_entry(ep, &isp1362_hcd->periodic, schedule) {
2145		seq_printf(s, "branch:%2d load:%3d PTD[%d] $%04x:\n", ep->branch,
2146			   isp1362_hcd->load[ep->branch], ep->ptd_index, ep->ptd_offset);
2147
2148		seq_printf(s, "   %d/%p (%sdev%d ep%d%s max %d)\n",
2149			   ep->interval, ep,
2150			   (ep->udev->speed == USB_SPEED_FULL) ? "" : "ls ",
2151			   ep->udev->devnum, ep->epnum,
2152			   (ep->epnum == 0) ? "" :
2153			   ((ep->nextpid == USB_PID_IN) ?
2154			    "in" : "out"), ep->maxpacket);
2155	}
2156	dump_ptd_queue(&isp1362_hcd->intl_queue);
2157
2158	seq_printf(s, "ISO:\n");
2159
2160	list_for_each_entry(ep, &isp1362_hcd->isoc, schedule) {
2161		seq_printf(s, "   %d/%p (%sdev%d ep%d%s max %d)\n",
2162			   ep->interval, ep,
2163			   (ep->udev->speed == USB_SPEED_FULL) ? "" : "ls ",
2164			   ep->udev->devnum, ep->epnum,
2165			   (ep->epnum == 0) ? "" :
2166			   ((ep->nextpid == USB_PID_IN) ?
2167			    "in" : "out"), ep->maxpacket);
2168	}
2169
2170	spin_unlock_irq(&isp1362_hcd->lock);
2171	seq_printf(s, "\n");
2172
2173	return 0;
2174}
2175
2176static int proc_isp1362_open(struct inode *inode, struct file *file)
2177{
2178	return single_open(file, proc_isp1362_show, PDE(inode)->data);
2179}
2180
2181static const struct file_operations proc_ops = {
2182	.open = proc_isp1362_open,
2183	.read = seq_read,
2184	.llseek = seq_lseek,
2185	.release = single_release,
2186};
2187
2188/* expect just one isp1362_hcd per system */
2189static const char proc_filename[] = "driver/isp1362";
2190
2191static void create_debug_file(struct isp1362_hcd *isp1362_hcd)
2192{
2193	struct proc_dir_entry *pde;
2194
2195	pde = create_proc_entry(proc_filename, 0, NULL);
2196	if (pde == NULL) {
2197		pr_warning("%s: Failed to create debug file '%s'\n", __func__, proc_filename);
2198		return;
2199	}
2200
2201	pde->proc_fops = &proc_ops;
2202	pde->data = isp1362_hcd;
2203	isp1362_hcd->pde = pde;
2204}
2205
2206static void remove_debug_file(struct isp1362_hcd *isp1362_hcd)
2207{
2208	if (isp1362_hcd->pde)
2209		remove_proc_entry(proc_filename, NULL);
2210}
2211
2212#endif
2213
2214/*-------------------------------------------------------------------------*/
2215
2216static void __isp1362_sw_reset(struct isp1362_hcd *isp1362_hcd)
2217{
2218	int tmp = 20;
2219
2220	isp1362_write_reg16(isp1362_hcd, HCSWRES, HCSWRES_MAGIC);
2221	isp1362_write_reg32(isp1362_hcd, HCCMDSTAT, OHCI_HCR);
2222	while (--tmp) {
2223		mdelay(1);
2224		if (!(isp1362_read_reg32(isp1362_hcd, HCCMDSTAT) & OHCI_HCR))
2225			break;
2226	}
2227	if (!tmp)
2228		pr_err("Software reset timeout\n");
2229}
2230
2231static void isp1362_sw_reset(struct isp1362_hcd *isp1362_hcd)
2232{
2233	unsigned long flags;
2234
2235	spin_lock_irqsave(&isp1362_hcd->lock, flags);
2236	__isp1362_sw_reset(isp1362_hcd);
2237	spin_unlock_irqrestore(&isp1362_hcd->lock, flags);
2238}
2239
2240static int isp1362_mem_config(struct usb_hcd *hcd)
2241{
2242	struct isp1362_hcd *isp1362_hcd = hcd_to_isp1362_hcd(hcd);
2243	unsigned long flags;
2244	u32 total;
2245	u16 istl_size = ISP1362_ISTL_BUFSIZE;
2246	u16 intl_blksize = ISP1362_INTL_BLKSIZE + PTD_HEADER_SIZE;
2247	u16 intl_size = ISP1362_INTL_BUFFERS * intl_blksize;
2248	u16 atl_blksize = ISP1362_ATL_BLKSIZE + PTD_HEADER_SIZE;
2249	u16 atl_buffers = (ISP1362_BUF_SIZE - (istl_size + intl_size)) / atl_blksize;
2250	u16 atl_size;
2251	int i;
2252
2253	WARN_ON(istl_size & 3);
2254	WARN_ON(atl_blksize & 3);
2255	WARN_ON(intl_blksize & 3);
2256	WARN_ON(atl_blksize < PTD_HEADER_SIZE);
2257	WARN_ON(intl_blksize < PTD_HEADER_SIZE);
2258
2259	BUG_ON((unsigned)ISP1362_INTL_BUFFERS > 32);
2260	if (atl_buffers > 32)
2261		atl_buffers = 32;
2262	atl_size = atl_buffers * atl_blksize;
2263	total = atl_size + intl_size + istl_size;
2264	dev_info(hcd->self.controller, "ISP1362 Memory usage:\n");
2265	dev_info(hcd->self.controller, "  ISTL:    2 * %4d:     %4d @ $%04x:$%04x\n",
2266		 istl_size / 2, istl_size, 0, istl_size / 2);
2267	dev_info(hcd->self.controller, "  INTL: %4d * (%3zu+8):  %4d @ $%04x\n",
2268		 ISP1362_INTL_BUFFERS, intl_blksize - PTD_HEADER_SIZE,
2269		 intl_size, istl_size);
2270	dev_info(hcd->self.controller, "  ATL : %4d * (%3zu+8):  %4d @ $%04x\n",
2271		 atl_buffers, atl_blksize - PTD_HEADER_SIZE,
2272		 atl_size, istl_size + intl_size);
2273	dev_info(hcd->self.controller, "  USED/FREE:   %4d      %4d\n", total,
2274		 ISP1362_BUF_SIZE - total);
2275
2276	if (total > ISP1362_BUF_SIZE) {
2277		dev_err(hcd->self.controller, "%s: Memory requested: %d, available %d\n",
2278			__func__, total, ISP1362_BUF_SIZE);
2279		return -ENOMEM;
2280	}
2281
2282	total = istl_size + intl_size + atl_size;
2283	spin_lock_irqsave(&isp1362_hcd->lock, flags);
2284
2285	for (i = 0; i < 2; i++) {
2286		isp1362_hcd->istl_queue[i].buf_start = i * istl_size / 2,
2287		isp1362_hcd->istl_queue[i].buf_size = istl_size / 2;
2288		isp1362_hcd->istl_queue[i].blk_size = 4;
2289		INIT_LIST_HEAD(&isp1362_hcd->istl_queue[i].active);
2290		snprintf(isp1362_hcd->istl_queue[i].name,
2291			 sizeof(isp1362_hcd->istl_queue[i].name), "ISTL%d", i);
2292		DBG(3, "%s: %5s buf $%04x %d\n", __func__,
2293		     isp1362_hcd->istl_queue[i].name,
2294		     isp1362_hcd->istl_queue[i].buf_start,
2295		     isp1362_hcd->istl_queue[i].buf_size);
2296	}
2297	isp1362_write_reg16(isp1362_hcd, HCISTLBUFSZ, istl_size / 2);
2298
2299	isp1362_hcd->intl_queue.buf_start = istl_size;
2300	isp1362_hcd->intl_queue.buf_size = intl_size;
2301	isp1362_hcd->intl_queue.buf_count = ISP1362_INTL_BUFFERS;
2302	isp1362_hcd->intl_queue.blk_size = intl_blksize;
2303	isp1362_hcd->intl_queue.buf_avail = isp1362_hcd->intl_queue.buf_count;
2304	isp1362_hcd->intl_queue.skip_map = ~0;
2305	INIT_LIST_HEAD(&isp1362_hcd->intl_queue.active);
2306
2307	isp1362_write_reg16(isp1362_hcd, HCINTLBUFSZ,
2308			    isp1362_hcd->intl_queue.buf_size);
2309	isp1362_write_reg16(isp1362_hcd, HCINTLBLKSZ,
2310			    isp1362_hcd->intl_queue.blk_size - PTD_HEADER_SIZE);
2311	isp1362_write_reg32(isp1362_hcd, HCINTLSKIP, ~0);
2312	isp1362_write_reg32(isp1362_hcd, HCINTLLAST,
2313			    1 << (ISP1362_INTL_BUFFERS - 1));
2314
2315	isp1362_hcd->atl_queue.buf_start = istl_size + intl_size;
2316	isp1362_hcd->atl_queue.buf_size = atl_size;
2317	isp1362_hcd->atl_queue.buf_count = atl_buffers;
2318	isp1362_hcd->atl_queue.blk_size = atl_blksize;
2319	isp1362_hcd->atl_queue.buf_avail = isp1362_hcd->atl_queue.buf_count;
2320	isp1362_hcd->atl_queue.skip_map = ~0;
2321	INIT_LIST_HEAD(&isp1362_hcd->atl_queue.active);
2322
2323	isp1362_write_reg16(isp1362_hcd, HCATLBUFSZ,
2324			    isp1362_hcd->atl_queue.buf_size);
2325	isp1362_write_reg16(isp1362_hcd, HCATLBLKSZ,
2326			    isp1362_hcd->atl_queue.blk_size - PTD_HEADER_SIZE);
2327	isp1362_write_reg32(isp1362_hcd, HCATLSKIP, ~0);
2328	isp1362_write_reg32(isp1362_hcd, HCATLLAST,
2329			    1 << (atl_buffers - 1));
2330
2331	snprintf(isp1362_hcd->atl_queue.name,
2332		 sizeof(isp1362_hcd->atl_queue.name), "ATL");
2333	snprintf(isp1362_hcd->intl_queue.name,
2334		 sizeof(isp1362_hcd->intl_queue.name), "INTL");
2335	DBG(3, "%s: %5s buf $%04x %2d * %4d = %4d\n", __func__,
2336	     isp1362_hcd->intl_queue.name,
2337	     isp1362_hcd->intl_queue.buf_start,
2338	     ISP1362_INTL_BUFFERS, isp1362_hcd->intl_queue.blk_size,
2339	     isp1362_hcd->intl_queue.buf_size);
2340	DBG(3, "%s: %5s buf $%04x %2d * %4d = %4d\n", __func__,
2341	     isp1362_hcd->atl_queue.name,
2342	     isp1362_hcd->atl_queue.buf_start,
2343	     atl_buffers, isp1362_hcd->atl_queue.blk_size,
2344	     isp1362_hcd->atl_queue.buf_size);
2345
2346	spin_unlock_irqrestore(&isp1362_hcd->lock, flags);
2347
2348	return 0;
2349}
2350
2351static int isp1362_hc_reset(struct usb_hcd *hcd)
2352{
2353	int ret = 0;
2354	struct isp1362_hcd *isp1362_hcd = hcd_to_isp1362_hcd(hcd);
2355	unsigned long t;
2356	unsigned long timeout = 100;
2357	unsigned long flags;
2358	int clkrdy = 0;
2359
2360	pr_debug("%s:\n", __func__);
2361
2362	if (isp1362_hcd->board && isp1362_hcd->board->reset) {
2363		isp1362_hcd->board->reset(hcd->self.controller, 1);
2364		msleep(20);
2365		if (isp1362_hcd->board->clock)
2366			isp1362_hcd->board->clock(hcd->self.controller, 1);
2367		isp1362_hcd->board->reset(hcd->self.controller, 0);
2368	} else
2369		isp1362_sw_reset(isp1362_hcd);
2370
2371	/* chip has been reset. First we need to see a clock */
2372	t = jiffies + msecs_to_jiffies(timeout);
2373	while (!clkrdy && time_before_eq(jiffies, t)) {
2374		spin_lock_irqsave(&isp1362_hcd->lock, flags);
2375		clkrdy = isp1362_read_reg16(isp1362_hcd, HCuPINT) & HCuPINT_CLKRDY;
2376		spin_unlock_irqrestore(&isp1362_hcd->lock, flags);
2377		if (!clkrdy)
2378			msleep(4);
2379	}
2380
2381	spin_lock_irqsave(&isp1362_hcd->lock, flags);
2382	isp1362_write_reg16(isp1362_hcd, HCuPINT, HCuPINT_CLKRDY);
2383	spin_unlock_irqrestore(&isp1362_hcd->lock, flags);
2384	if (!clkrdy) {
2385		pr_err("Clock not ready after %lums\n", timeout);
2386		ret = -ENODEV;
2387	}
2388	return ret;
2389}
2390
2391static void isp1362_hc_stop(struct usb_hcd *hcd)
2392{
2393	struct isp1362_hcd *isp1362_hcd = hcd_to_isp1362_hcd(hcd);
2394	unsigned long flags;
2395	u32 tmp;
2396
2397	pr_debug("%s:\n", __func__);
2398
2399	del_timer_sync(&hcd->rh_timer);
2400
2401	spin_lock_irqsave(&isp1362_hcd->lock, flags);
2402
2403	isp1362_write_reg16(isp1362_hcd, HCuPINTENB, 0);
2404
2405	/* Switch off power for all ports */
2406	tmp = isp1362_read_reg32(isp1362_hcd, HCRHDESCA);
2407	tmp &= ~(RH_A_NPS | RH_A_PSM);
2408	isp1362_write_reg32(isp1362_hcd, HCRHDESCA, tmp);
2409	isp1362_write_reg32(isp1362_hcd, HCRHSTATUS, RH_HS_LPS);
2410
2411	/* Reset the chip */
2412	if (isp1362_hcd->board && isp1362_hcd->board->reset)
2413		isp1362_hcd->board->reset(hcd->self.controller, 1);
2414	else
2415		__isp1362_sw_reset(isp1362_hcd);
2416
2417	if (isp1362_hcd->board && isp1362_hcd->board->clock)
2418		isp1362_hcd->board->clock(hcd->self.controller, 0);
2419
2420	spin_unlock_irqrestore(&isp1362_hcd->lock, flags);
2421}
2422
2423#ifdef CHIP_BUFFER_TEST
2424static int isp1362_chip_test(struct isp1362_hcd *isp1362_hcd)
2425{
2426	int ret = 0;
2427	u16 *ref;
2428	unsigned long flags;
2429
2430	ref = kmalloc(2 * ISP1362_BUF_SIZE, GFP_KERNEL);
2431	if (ref) {
2432		int offset;
2433		u16 *tst = &ref[ISP1362_BUF_SIZE / 2];
2434
2435		for (offset = 0; offset < ISP1362_BUF_SIZE / 2; offset++) {
2436			ref[offset] = ~offset;
2437			tst[offset] = offset;
2438		}
2439
2440		for (offset = 0; offset < 4; offset++) {
2441			int j;
2442
2443			for (j = 0; j < 8; j++) {
2444				spin_lock_irqsave(&isp1362_hcd->lock, flags);
2445				isp1362_write_buffer(isp1362_hcd, (u8 *)ref + offset, 0, j);
2446				isp1362_read_buffer(isp1362_hcd, (u8 *)tst + offset, 0, j);
2447				spin_unlock_irqrestore(&isp1362_hcd->lock, flags);
2448
2449				if (memcmp(ref, tst, j)) {
2450					ret = -ENODEV;
2451					pr_err("%s: memory check with %d byte offset %d failed\n",
2452					    __func__, j, offset);
2453					dump_data((u8 *)ref + offset, j);
2454					dump_data((u8 *)tst + offset, j);
2455				}
2456			}
2457		}
2458
2459		spin_lock_irqsave(&isp1362_hcd->lock, flags);
2460		isp1362_write_buffer(isp1362_hcd, ref, 0, ISP1362_BUF_SIZE);
2461		isp1362_read_buffer(isp1362_hcd, tst, 0, ISP1362_BUF_SIZE);
2462		spin_unlock_irqrestore(&isp1362_hcd->lock, flags);
2463
2464		if (memcmp(ref, tst, ISP1362_BUF_SIZE)) {
2465			ret = -ENODEV;
2466			pr_err("%s: memory check failed\n", __func__);
2467			dump_data((u8 *)tst, ISP1362_BUF_SIZE / 2);
2468		}
2469
2470		for (offset = 0; offset < 256; offset++) {
2471			int test_size = 0;
2472
2473			yield();
2474
2475			memset(tst, 0, ISP1362_BUF_SIZE);
2476			spin_lock_irqsave(&isp1362_hcd->lock, flags);
2477			isp1362_write_buffer(isp1362_hcd, tst, 0, ISP1362_BUF_SIZE);
2478			isp1362_read_buffer(isp1362_hcd, tst, 0, ISP1362_BUF_SIZE);
2479			spin_unlock_irqrestore(&isp1362_hcd->lock, flags);
2480			if (memcmp(tst, tst + (ISP1362_BUF_SIZE / (2 * sizeof(*tst))),
2481				   ISP1362_BUF_SIZE / 2)) {
2482				pr_err("%s: Failed to clear buffer\n", __func__);
2483				dump_data((u8 *)tst, ISP1362_BUF_SIZE);
2484				break;
2485			}
2486			spin_lock_irqsave(&isp1362_hcd->lock, flags);
2487			isp1362_write_buffer(isp1362_hcd, ref, offset * 2, PTD_HEADER_SIZE);
2488			isp1362_write_buffer(isp1362_hcd, ref + PTD_HEADER_SIZE / sizeof(*ref),
2489					     offset * 2 + PTD_HEADER_SIZE, test_size);
2490			isp1362_read_buffer(isp1362_hcd, tst, offset * 2,
2491					    PTD_HEADER_SIZE + test_size);
2492			spin_unlock_irqrestore(&isp1362_hcd->lock, flags);
2493			if (memcmp(ref, tst, PTD_HEADER_SIZE + test_size)) {
2494				dump_data(((u8 *)ref) + offset, PTD_HEADER_SIZE + test_size);
2495				dump_data((u8 *)tst, PTD_HEADER_SIZE + test_size);
2496				spin_lock_irqsave(&isp1362_hcd->lock, flags);
2497				isp1362_read_buffer(isp1362_hcd, tst, offset * 2,
2498						    PTD_HEADER_SIZE + test_size);
2499				spin_unlock_irqrestore(&isp1362_hcd->lock, flags);
2500				if (memcmp(ref, tst, PTD_HEADER_SIZE + test_size)) {
2501					ret = -ENODEV;
2502					pr_err("%s: memory check with offset %02x failed\n",
2503					    __func__, offset);
2504					break;
2505				}
2506				pr_warning("%s: memory check with offset %02x ok after second read\n",
2507				     __func__, offset);
2508			}
2509		}
2510		kfree(ref);
2511	}
2512	return ret;
2513}
2514#endif
2515
2516static int isp1362_hc_start(struct usb_hcd *hcd)
2517{
2518	int ret;
2519	struct isp1362_hcd *isp1362_hcd = hcd_to_isp1362_hcd(hcd);
2520	struct isp1362_platform_data *board = isp1362_hcd->board;
2521	u16 hwcfg;
2522	u16 chipid;
2523	unsigned long flags;
2524
2525	pr_debug("%s:\n", __func__);
2526
2527	spin_lock_irqsave(&isp1362_hcd->lock, flags);
2528	chipid = isp1362_read_reg16(isp1362_hcd, HCCHIPID);
2529	spin_unlock_irqrestore(&isp1362_hcd->lock, flags);
2530
2531	if ((chipid & HCCHIPID_MASK) != HCCHIPID_MAGIC) {
2532		pr_err("%s: Invalid chip ID %04x\n", __func__, chipid);
2533		return -ENODEV;
2534	}
2535
2536#ifdef CHIP_BUFFER_TEST
2537	ret = isp1362_chip_test(isp1362_hcd);
2538	if (ret)
2539		return -ENODEV;
2540#endif
2541	spin_lock_irqsave(&isp1362_hcd->lock, flags);
2542	/* clear interrupt status and disable all interrupt sources */
2543	isp1362_write_reg16(isp1362_hcd, HCuPINT, 0xff);
2544	isp1362_write_reg16(isp1362_hcd, HCuPINTENB, 0);
2545
2546	/* HW conf */
2547	hwcfg = HCHWCFG_INT_ENABLE | HCHWCFG_DBWIDTH(1);
2548	if (board->sel15Kres)
2549		hwcfg |= HCHWCFG_PULLDOWN_DS2 |
2550			((MAX_ROOT_PORTS > 1) ? HCHWCFG_PULLDOWN_DS1 : 0);
2551	if (board->clknotstop)
2552		hwcfg |= HCHWCFG_CLKNOTSTOP;
2553	if (board->oc_enable)
2554		hwcfg |= HCHWCFG_ANALOG_OC;
2555	if (board->int_act_high)
2556		hwcfg |= HCHWCFG_INT_POL;
2557	if (board->int_edge_triggered)
2558		hwcfg |= HCHWCFG_INT_TRIGGER;
2559	if (board->dreq_act_high)
2560		hwcfg |= HCHWCFG_DREQ_POL;
2561	if (board->dack_act_high)
2562		hwcfg |= HCHWCFG_DACK_POL;
2563	isp1362_write_reg16(isp1362_hcd, HCHWCFG, hwcfg);
2564	isp1362_show_reg(isp1362_hcd, HCHWCFG);
2565	isp1362_write_reg16(isp1362_hcd, HCDMACFG, 0);
2566	spin_unlock_irqrestore(&isp1362_hcd->lock, flags);
2567
2568	ret = isp1362_mem_config(hcd);
2569	if (ret)
2570		return ret;
2571
2572	spin_lock_irqsave(&isp1362_hcd->lock, flags);
2573
2574	/* Root hub conf */
2575	isp1362_hcd->rhdesca = 0;
2576	if (board->no_power_switching)
2577		isp1362_hcd->rhdesca |= RH_A_NPS;
2578	if (board->power_switching_mode)
2579		isp1362_hcd->rhdesca |= RH_A_PSM;
2580	if (board->potpg)
2581		isp1362_hcd->rhdesca |= (board->potpg << 24) & RH_A_POTPGT;
2582	else
2583		isp1362_hcd->rhdesca |= (25 << 24) & RH_A_POTPGT;
2584
2585	isp1362_write_reg32(isp1362_hcd, HCRHDESCA, isp1362_hcd->rhdesca & ~RH_A_OCPM);
2586	isp1362_write_reg32(isp1362_hcd, HCRHDESCA, isp1362_hcd->rhdesca | RH_A_OCPM);
2587	isp1362_hcd->rhdesca = isp1362_read_reg32(isp1362_hcd, HCRHDESCA);
2588
2589	isp1362_hcd->rhdescb = RH_B_PPCM;
2590	isp1362_write_reg32(isp1362_hcd, HCRHDESCB, isp1362_hcd->rhdescb);
2591	isp1362_hcd->rhdescb = isp1362_read_reg32(isp1362_hcd, HCRHDESCB);
2592
2593	isp1362_read_reg32(isp1362_hcd, HCFMINTVL);
2594	isp1362_write_reg32(isp1362_hcd, HCFMINTVL, (FSMP(FI) << 16) | FI);
2595	isp1362_write_reg32(isp1362_hcd, HCLSTHRESH, LSTHRESH);
2596
2597	spin_unlock_irqrestore(&isp1362_hcd->lock, flags);
2598
2599	isp1362_hcd->hc_control = OHCI_USB_OPER;
2600	hcd->state = HC_STATE_RUNNING;
2601
2602	spin_lock_irqsave(&isp1362_hcd->lock, flags);
2603	/* Set up interrupts */
2604	isp1362_hcd->intenb = OHCI_INTR_MIE | OHCI_INTR_RHSC | OHCI_INTR_UE;
2605	isp1362_hcd->intenb |= OHCI_INTR_RD;
2606	isp1362_hcd->irqenb = HCuPINT_OPR | HCuPINT_SUSP;
2607	isp1362_write_reg32(isp1362_hcd, HCINTENB, isp1362_hcd->intenb);
2608	isp1362_write_reg16(isp1362_hcd, HCuPINTENB, isp1362_hcd->irqenb);
2609
2610	/* Go operational */
2611	isp1362_write_reg32(isp1362_hcd, HCCONTROL, isp1362_hcd->hc_control);
2612	/* enable global power */
2613	isp1362_write_reg32(isp1362_hcd, HCRHSTATUS, RH_HS_LPSC | RH_HS_DRWE);
2614
2615	spin_unlock_irqrestore(&isp1362_hcd->lock, flags);
2616
2617	return 0;
2618}
2619
2620/*-------------------------------------------------------------------------*/
2621
2622static struct hc_driver isp1362_hc_driver = {
2623	.description =		hcd_name,
2624	.product_desc =		"ISP1362 Host Controller",
2625	.hcd_priv_size =	sizeof(struct isp1362_hcd),
2626
2627	.irq =			isp1362_irq,
2628	.flags =		HCD_USB11 | HCD_MEMORY,
2629
2630	.reset =		isp1362_hc_reset,
2631	.start =		isp1362_hc_start,
2632	.stop =			isp1362_hc_stop,
2633
2634	.urb_enqueue =		isp1362_urb_enqueue,
2635	.urb_dequeue =		isp1362_urb_dequeue,
2636	.endpoint_disable =	isp1362_endpoint_disable,
2637
2638	.get_frame_number =	isp1362_get_frame,
2639
2640	.hub_status_data =	isp1362_hub_status_data,
2641	.hub_control =		isp1362_hub_control,
2642	.bus_suspend =		isp1362_bus_suspend,
2643	.bus_resume =		isp1362_bus_resume,
2644};
2645
2646/*-------------------------------------------------------------------------*/
2647
2648static int __devexit isp1362_remove(struct platform_device *pdev)
2649{
2650	struct usb_hcd *hcd = platform_get_drvdata(pdev);
2651	struct isp1362_hcd *isp1362_hcd = hcd_to_isp1362_hcd(hcd);
2652	struct resource *res;
2653
2654	remove_debug_file(isp1362_hcd);
2655	DBG(0, "%s: Removing HCD\n", __func__);
2656	usb_remove_hcd(hcd);
2657
2658	DBG(0, "%s: Unmapping data_reg @ %p\n", __func__,
2659	    isp1362_hcd->data_reg);
2660	iounmap(isp1362_hcd->data_reg);
2661
2662	DBG(0, "%s: Unmapping addr_reg @ %p\n", __func__,
2663	    isp1362_hcd->addr_reg);
2664	iounmap(isp1362_hcd->addr_reg);
2665
2666	res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
2667	DBG(0, "%s: release mem_region: %08lx\n", __func__, (long unsigned int)res->start);
2668	if (res)
2669		release_mem_region(res->start, resource_size(res));
2670
2671	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
2672	DBG(0, "%s: release mem_region: %08lx\n", __func__, (long unsigned int)res->start);
2673	if (res)
2674		release_mem_region(res->start, resource_size(res));
2675
2676	DBG(0, "%s: put_hcd\n", __func__);
2677	usb_put_hcd(hcd);
2678	DBG(0, "%s: Done\n", __func__);
2679
2680	return 0;
2681}
2682
2683static int __devinit isp1362_probe(struct platform_device *pdev)
2684{
2685	struct usb_hcd *hcd;
2686	struct isp1362_hcd *isp1362_hcd;
2687	struct resource *addr, *data;
2688	void __iomem *addr_reg;
2689	void __iomem *data_reg;
2690	int irq;
2691	int retval = 0;
2692	struct resource *irq_res;
2693	unsigned int irq_flags = 0;
2694
2695	if (usb_disabled())
2696		return -ENODEV;
2697
2698	/* basic sanity checks first.  board-specific init logic should
2699	 * have initialized this the three resources and probably board
2700	 * specific platform_data.  we don't probe for IRQs, and do only
2701	 * minimal sanity checking.
2702	 */
2703	if (pdev->num_resources < 3) {
2704		retval = -ENODEV;
2705		goto err1;
 
 
 
2706	}
2707
2708	data = platform_get_resource(pdev, IORESOURCE_MEM, 0);
2709	addr = platform_get_resource(pdev, IORESOURCE_MEM, 1);
2710	irq_res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
2711	if (!addr || !data || !irq_res) {
2712		retval = -ENODEV;
2713		goto err1;
2714	}
2715	irq = irq_res->start;
2716
2717	if (pdev->dev.dma_mask) {
2718		DBG(1, "won't do DMA");
2719		retval = -ENODEV;
2720		goto err1;
2721	}
2722
2723	if (!request_mem_region(addr->start, resource_size(addr), hcd_name)) {
2724		retval = -EBUSY;
2725		goto err1;
2726	}
2727	addr_reg = ioremap(addr->start, resource_size(addr));
2728	if (addr_reg == NULL) {
2729		retval = -ENOMEM;
2730		goto err2;
2731	}
2732
2733	if (!request_mem_region(data->start, resource_size(data), hcd_name)) {
2734		retval = -EBUSY;
2735		goto err3;
2736	}
2737	data_reg = ioremap(data->start, resource_size(data));
2738	if (data_reg == NULL) {
2739		retval = -ENOMEM;
2740		goto err4;
2741	}
2742
2743	/* allocate and initialize hcd */
2744	hcd = usb_create_hcd(&isp1362_hc_driver, &pdev->dev, dev_name(&pdev->dev));
2745	if (!hcd) {
2746		retval = -ENOMEM;
2747		goto err5;
2748	}
2749	hcd->rsrc_start = data->start;
2750	isp1362_hcd = hcd_to_isp1362_hcd(hcd);
2751	isp1362_hcd->data_reg = data_reg;
2752	isp1362_hcd->addr_reg = addr_reg;
2753
2754	isp1362_hcd->next_statechange = jiffies;
2755	spin_lock_init(&isp1362_hcd->lock);
2756	INIT_LIST_HEAD(&isp1362_hcd->async);
2757	INIT_LIST_HEAD(&isp1362_hcd->periodic);
2758	INIT_LIST_HEAD(&isp1362_hcd->isoc);
2759	INIT_LIST_HEAD(&isp1362_hcd->remove_list);
2760	isp1362_hcd->board = pdev->dev.platform_data;
2761#if USE_PLATFORM_DELAY
2762	if (!isp1362_hcd->board->delay) {
2763		dev_err(hcd->self.controller, "No platform delay function given\n");
2764		retval = -ENODEV;
2765		goto err6;
2766	}
2767#endif
2768
2769	if (irq_res->flags & IORESOURCE_IRQ_HIGHEDGE)
2770		irq_flags |= IRQF_TRIGGER_RISING;
2771	if (irq_res->flags & IORESOURCE_IRQ_LOWEDGE)
2772		irq_flags |= IRQF_TRIGGER_FALLING;
2773	if (irq_res->flags & IORESOURCE_IRQ_HIGHLEVEL)
2774		irq_flags |= IRQF_TRIGGER_HIGH;
2775	if (irq_res->flags & IORESOURCE_IRQ_LOWLEVEL)
2776		irq_flags |= IRQF_TRIGGER_LOW;
2777
2778	retval = usb_add_hcd(hcd, irq, irq_flags | IRQF_SHARED);
2779	if (retval != 0)
2780		goto err6;
2781	pr_info("%s, irq %d\n", hcd->product_desc, irq);
 
 
2782
2783	create_debug_file(isp1362_hcd);
2784
2785	return 0;
2786
2787 err6:
2788	DBG(0, "%s: Freeing dev %p\n", __func__, isp1362_hcd);
2789	usb_put_hcd(hcd);
2790 err5:
2791	DBG(0, "%s: Unmapping data_reg @ %p\n", __func__, data_reg);
2792	iounmap(data_reg);
2793 err4:
2794	DBG(0, "%s: Releasing mem region %08lx\n", __func__, (long unsigned int)data->start);
2795	release_mem_region(data->start, resource_size(data));
2796 err3:
2797	DBG(0, "%s: Unmapping addr_reg @ %p\n", __func__, addr_reg);
2798	iounmap(addr_reg);
2799 err2:
2800	DBG(0, "%s: Releasing mem region %08lx\n", __func__, (long unsigned int)addr->start);
2801	release_mem_region(addr->start, resource_size(addr));
2802 err1:
2803	pr_err("%s: init error, %d\n", __func__, retval);
2804
2805	return retval;
2806}
2807
2808#ifdef	CONFIG_PM
2809static int isp1362_suspend(struct platform_device *pdev, pm_message_t state)
2810{
2811	struct usb_hcd *hcd = platform_get_drvdata(pdev);
2812	struct isp1362_hcd *isp1362_hcd = hcd_to_isp1362_hcd(hcd);
2813	unsigned long flags;
2814	int retval = 0;
2815
2816	DBG(0, "%s: Suspending device\n", __func__);
2817
2818	if (state.event == PM_EVENT_FREEZE) {
2819		DBG(0, "%s: Suspending root hub\n", __func__);
2820		retval = isp1362_bus_suspend(hcd);
2821	} else {
2822		DBG(0, "%s: Suspending RH ports\n", __func__);
2823		spin_lock_irqsave(&isp1362_hcd->lock, flags);
2824		isp1362_write_reg32(isp1362_hcd, HCRHSTATUS, RH_HS_LPS);
2825		spin_unlock_irqrestore(&isp1362_hcd->lock, flags);
2826	}
2827	if (retval == 0)
2828		pdev->dev.power.power_state = state;
2829	return retval;
2830}
2831
2832static int isp1362_resume(struct platform_device *pdev)
2833{
2834	struct usb_hcd *hcd = platform_get_drvdata(pdev);
2835	struct isp1362_hcd *isp1362_hcd = hcd_to_isp1362_hcd(hcd);
2836	unsigned long flags;
2837
2838	DBG(0, "%s: Resuming\n", __func__);
2839
2840	if (pdev->dev.power.power_state.event == PM_EVENT_SUSPEND) {
2841		DBG(0, "%s: Resume RH ports\n", __func__);
2842		spin_lock_irqsave(&isp1362_hcd->lock, flags);
2843		isp1362_write_reg32(isp1362_hcd, HCRHSTATUS, RH_HS_LPSC);
2844		spin_unlock_irqrestore(&isp1362_hcd->lock, flags);
2845		return 0;
2846	}
2847
2848	pdev->dev.power.power_state = PMSG_ON;
2849
2850	return isp1362_bus_resume(isp1362_hcd_to_hcd(isp1362_hcd));
2851}
2852#else
2853#define	isp1362_suspend	NULL
2854#define	isp1362_resume	NULL
2855#endif
2856
2857static struct platform_driver isp1362_driver = {
2858	.probe = isp1362_probe,
2859	.remove = __devexit_p(isp1362_remove),
2860
2861	.suspend = isp1362_suspend,
2862	.resume = isp1362_resume,
2863	.driver = {
2864		.name = (char *)hcd_name,
2865		.owner = THIS_MODULE,
2866	},
2867};
2868
2869module_platform_driver(isp1362_driver);