Linux Audio

Check our new training course

Loading...
Note: File does not exist in v6.2.
   1/*
   2 * Copyright (C) 2011 Marvell International Ltd. All rights reserved.
   3 * Author: Chao Xie <chao.xie@marvell.com>
   4 *	   Neil Zhang <zhangwm@marvell.com>
   5 *
   6 * This program is free software; you can redistribute  it and/or modify it
   7 * under  the terms of  the GNU General  Public License as published by the
   8 * Free Software Foundation;  either version 2 of the  License, or (at your
   9 * option) any later version.
  10 */
  11
  12#include <linux/module.h>
  13#include <linux/pci.h>
  14#include <linux/dma-mapping.h>
  15#include <linux/dmapool.h>
  16#include <linux/kernel.h>
  17#include <linux/delay.h>
  18#include <linux/ioport.h>
  19#include <linux/sched.h>
  20#include <linux/slab.h>
  21#include <linux/errno.h>
  22#include <linux/err.h>
  23#include <linux/timer.h>
  24#include <linux/list.h>
  25#include <linux/interrupt.h>
  26#include <linux/moduleparam.h>
  27#include <linux/device.h>
  28#include <linux/usb/ch9.h>
  29#include <linux/usb/gadget.h>
  30#include <linux/usb/otg.h>
  31#include <linux/pm.h>
  32#include <linux/io.h>
  33#include <linux/irq.h>
  34#include <linux/platform_device.h>
  35#include <linux/clk.h>
  36#include <linux/platform_data/mv_usb.h>
  37#include <asm/unaligned.h>
  38
  39#include "mv_udc.h"
  40
  41#define DRIVER_DESC		"Marvell PXA USB Device Controller driver"
  42#define DRIVER_VERSION		"8 Nov 2010"
  43
  44#define ep_dir(ep)	(((ep)->ep_num == 0) ? \
  45				((ep)->udc->ep0_dir) : ((ep)->direction))
  46
  47/* timeout value -- usec */
  48#define RESET_TIMEOUT		10000
  49#define FLUSH_TIMEOUT		10000
  50#define EPSTATUS_TIMEOUT	10000
  51#define PRIME_TIMEOUT		10000
  52#define READSAFE_TIMEOUT	1000
  53
  54#define LOOPS_USEC_SHIFT	1
  55#define LOOPS_USEC		(1 << LOOPS_USEC_SHIFT)
  56#define LOOPS(timeout)		((timeout) >> LOOPS_USEC_SHIFT)
  57
  58static DECLARE_COMPLETION(release_done);
  59
  60static const char driver_name[] = "mv_udc";
  61static const char driver_desc[] = DRIVER_DESC;
  62
  63static void nuke(struct mv_ep *ep, int status);
  64static void stop_activity(struct mv_udc *udc, struct usb_gadget_driver *driver);
  65
  66/* for endpoint 0 operations */
  67static const struct usb_endpoint_descriptor mv_ep0_desc = {
  68	.bLength =		USB_DT_ENDPOINT_SIZE,
  69	.bDescriptorType =	USB_DT_ENDPOINT,
  70	.bEndpointAddress =	0,
  71	.bmAttributes =		USB_ENDPOINT_XFER_CONTROL,
  72	.wMaxPacketSize =	EP0_MAX_PKT_SIZE,
  73};
  74
  75static void ep0_reset(struct mv_udc *udc)
  76{
  77	struct mv_ep *ep;
  78	u32 epctrlx;
  79	int i = 0;
  80
  81	/* ep0 in and out */
  82	for (i = 0; i < 2; i++) {
  83		ep = &udc->eps[i];
  84		ep->udc = udc;
  85
  86		/* ep0 dQH */
  87		ep->dqh = &udc->ep_dqh[i];
  88
  89		/* configure ep0 endpoint capabilities in dQH */
  90		ep->dqh->max_packet_length =
  91			(EP0_MAX_PKT_SIZE << EP_QUEUE_HEAD_MAX_PKT_LEN_POS)
  92			| EP_QUEUE_HEAD_IOS;
  93
  94		ep->dqh->next_dtd_ptr = EP_QUEUE_HEAD_NEXT_TERMINATE;
  95
  96		epctrlx = readl(&udc->op_regs->epctrlx[0]);
  97		if (i) {	/* TX */
  98			epctrlx |= EPCTRL_TX_ENABLE
  99				| (USB_ENDPOINT_XFER_CONTROL
 100					<< EPCTRL_TX_EP_TYPE_SHIFT);
 101
 102		} else {	/* RX */
 103			epctrlx |= EPCTRL_RX_ENABLE
 104				| (USB_ENDPOINT_XFER_CONTROL
 105					<< EPCTRL_RX_EP_TYPE_SHIFT);
 106		}
 107
 108		writel(epctrlx, &udc->op_regs->epctrlx[0]);
 109	}
 110}
 111
 112/* protocol ep0 stall, will automatically be cleared on new transaction */
 113static void ep0_stall(struct mv_udc *udc)
 114{
 115	u32	epctrlx;
 116
 117	/* set TX and RX to stall */
 118	epctrlx = readl(&udc->op_regs->epctrlx[0]);
 119	epctrlx |= EPCTRL_RX_EP_STALL | EPCTRL_TX_EP_STALL;
 120	writel(epctrlx, &udc->op_regs->epctrlx[0]);
 121
 122	/* update ep0 state */
 123	udc->ep0_state = WAIT_FOR_SETUP;
 124	udc->ep0_dir = EP_DIR_OUT;
 125}
 126
 127static int process_ep_req(struct mv_udc *udc, int index,
 128	struct mv_req *curr_req)
 129{
 130	struct mv_dtd	*curr_dtd;
 131	struct mv_dqh	*curr_dqh;
 132	int td_complete, actual, remaining_length;
 133	int i, direction;
 134	int retval = 0;
 135	u32 errors;
 136	u32 bit_pos;
 137
 138	curr_dqh = &udc->ep_dqh[index];
 139	direction = index % 2;
 140
 141	curr_dtd = curr_req->head;
 142	td_complete = 0;
 143	actual = curr_req->req.length;
 144
 145	for (i = 0; i < curr_req->dtd_count; i++) {
 146		if (curr_dtd->size_ioc_sts & DTD_STATUS_ACTIVE) {
 147			dev_dbg(&udc->dev->dev, "%s, dTD not completed\n",
 148				udc->eps[index].name);
 149			return 1;
 150		}
 151
 152		errors = curr_dtd->size_ioc_sts & DTD_ERROR_MASK;
 153		if (!errors) {
 154			remaining_length =
 155				(curr_dtd->size_ioc_sts	& DTD_PACKET_SIZE)
 156					>> DTD_LENGTH_BIT_POS;
 157			actual -= remaining_length;
 158
 159			if (remaining_length) {
 160				if (direction) {
 161					dev_dbg(&udc->dev->dev,
 162						"TX dTD remains data\n");
 163					retval = -EPROTO;
 164					break;
 165				} else
 166					break;
 167			}
 168		} else {
 169			dev_info(&udc->dev->dev,
 170				"complete_tr error: ep=%d %s: error = 0x%x\n",
 171				index >> 1, direction ? "SEND" : "RECV",
 172				errors);
 173			if (errors & DTD_STATUS_HALTED) {
 174				/* Clear the errors and Halt condition */
 175				curr_dqh->size_ioc_int_sts &= ~errors;
 176				retval = -EPIPE;
 177			} else if (errors & DTD_STATUS_DATA_BUFF_ERR) {
 178				retval = -EPROTO;
 179			} else if (errors & DTD_STATUS_TRANSACTION_ERR) {
 180				retval = -EILSEQ;
 181			}
 182		}
 183		if (i != curr_req->dtd_count - 1)
 184			curr_dtd = (struct mv_dtd *)curr_dtd->next_dtd_virt;
 185	}
 186	if (retval)
 187		return retval;
 188
 189	if (direction == EP_DIR_OUT)
 190		bit_pos = 1 << curr_req->ep->ep_num;
 191	else
 192		bit_pos = 1 << (16 + curr_req->ep->ep_num);
 193
 194	while ((curr_dqh->curr_dtd_ptr == curr_dtd->td_dma)) {
 195		if (curr_dtd->dtd_next == EP_QUEUE_HEAD_NEXT_TERMINATE) {
 196			while (readl(&udc->op_regs->epstatus) & bit_pos)
 197				udelay(1);
 198			break;
 199		}
 200		udelay(1);
 201	}
 202
 203	curr_req->req.actual = actual;
 204
 205	return 0;
 206}
 207
 208/*
 209 * done() - retire a request; caller blocked irqs
 210 * @status : request status to be set, only works when
 211 * request is still in progress.
 212 */
 213static void done(struct mv_ep *ep, struct mv_req *req, int status)
 214	__releases(&ep->udc->lock)
 215	__acquires(&ep->udc->lock)
 216{
 217	struct mv_udc *udc = NULL;
 218	unsigned char stopped = ep->stopped;
 219	struct mv_dtd *curr_td, *next_td;
 220	int j;
 221
 222	udc = (struct mv_udc *)ep->udc;
 223	/* Removed the req from fsl_ep->queue */
 224	list_del_init(&req->queue);
 225
 226	/* req.status should be set as -EINPROGRESS in ep_queue() */
 227	if (req->req.status == -EINPROGRESS)
 228		req->req.status = status;
 229	else
 230		status = req->req.status;
 231
 232	/* Free dtd for the request */
 233	next_td = req->head;
 234	for (j = 0; j < req->dtd_count; j++) {
 235		curr_td = next_td;
 236		if (j != req->dtd_count - 1)
 237			next_td = curr_td->next_dtd_virt;
 238		dma_pool_free(udc->dtd_pool, curr_td, curr_td->td_dma);
 239	}
 240
 241	usb_gadget_unmap_request(&udc->gadget, &req->req, ep_dir(ep));
 242
 243	if (status && (status != -ESHUTDOWN))
 244		dev_info(&udc->dev->dev, "complete %s req %p stat %d len %u/%u",
 245			ep->ep.name, &req->req, status,
 246			req->req.actual, req->req.length);
 247
 248	ep->stopped = 1;
 249
 250	spin_unlock(&ep->udc->lock);
 251	/*
 252	 * complete() is from gadget layer,
 253	 * eg fsg->bulk_in_complete()
 254	 */
 255	if (req->req.complete)
 256		req->req.complete(&ep->ep, &req->req);
 257
 258	spin_lock(&ep->udc->lock);
 259	ep->stopped = stopped;
 260}
 261
 262static int queue_dtd(struct mv_ep *ep, struct mv_req *req)
 263{
 264	struct mv_udc *udc;
 265	struct mv_dqh *dqh;
 266	u32 bit_pos, direction;
 267	u32 usbcmd, epstatus;
 268	unsigned int loops;
 269	int retval = 0;
 270
 271	udc = ep->udc;
 272	direction = ep_dir(ep);
 273	dqh = &(udc->ep_dqh[ep->ep_num * 2 + direction]);
 274	bit_pos = 1 << (((direction == EP_DIR_OUT) ? 0 : 16) + ep->ep_num);
 275
 276	/* check if the pipe is empty */
 277	if (!(list_empty(&ep->queue))) {
 278		struct mv_req *lastreq;
 279		lastreq = list_entry(ep->queue.prev, struct mv_req, queue);
 280		lastreq->tail->dtd_next =
 281			req->head->td_dma & EP_QUEUE_HEAD_NEXT_POINTER_MASK;
 282
 283		wmb();
 284
 285		if (readl(&udc->op_regs->epprime) & bit_pos)
 286			goto done;
 287
 288		loops = LOOPS(READSAFE_TIMEOUT);
 289		while (1) {
 290			/* start with setting the semaphores */
 291			usbcmd = readl(&udc->op_regs->usbcmd);
 292			usbcmd |= USBCMD_ATDTW_TRIPWIRE_SET;
 293			writel(usbcmd, &udc->op_regs->usbcmd);
 294
 295			/* read the endpoint status */
 296			epstatus = readl(&udc->op_regs->epstatus) & bit_pos;
 297
 298			/*
 299			 * Reread the ATDTW semaphore bit to check if it is
 300			 * cleared. When hardware see a hazard, it will clear
 301			 * the bit or else we remain set to 1 and we can
 302			 * proceed with priming of endpoint if not already
 303			 * primed.
 304			 */
 305			if (readl(&udc->op_regs->usbcmd)
 306				& USBCMD_ATDTW_TRIPWIRE_SET)
 307				break;
 308
 309			loops--;
 310			if (loops == 0) {
 311				dev_err(&udc->dev->dev,
 312					"Timeout for ATDTW_TRIPWIRE...\n");
 313				retval = -ETIME;
 314				goto done;
 315			}
 316			udelay(LOOPS_USEC);
 317		}
 318
 319		/* Clear the semaphore */
 320		usbcmd = readl(&udc->op_regs->usbcmd);
 321		usbcmd &= USBCMD_ATDTW_TRIPWIRE_CLEAR;
 322		writel(usbcmd, &udc->op_regs->usbcmd);
 323
 324		if (epstatus)
 325			goto done;
 326	}
 327
 328	/* Write dQH next pointer and terminate bit to 0 */
 329	dqh->next_dtd_ptr = req->head->td_dma
 330				& EP_QUEUE_HEAD_NEXT_POINTER_MASK;
 331
 332	/* clear active and halt bit, in case set from a previous error */
 333	dqh->size_ioc_int_sts &= ~(DTD_STATUS_ACTIVE | DTD_STATUS_HALTED);
 334
 335	/* Ensure that updates to the QH will occure before priming. */
 336	wmb();
 337
 338	/* Prime the Endpoint */
 339	writel(bit_pos, &udc->op_regs->epprime);
 340
 341done:
 342	return retval;
 343}
 344
 345static struct mv_dtd *build_dtd(struct mv_req *req, unsigned *length,
 346		dma_addr_t *dma, int *is_last)
 347{
 348	struct mv_dtd *dtd;
 349	struct mv_udc *udc;
 350	struct mv_dqh *dqh;
 351	u32 temp, mult = 0;
 352
 353	/* how big will this transfer be? */
 354	if (usb_endpoint_xfer_isoc(req->ep->ep.desc)) {
 355		dqh = req->ep->dqh;
 356		mult = (dqh->max_packet_length >> EP_QUEUE_HEAD_MULT_POS)
 357				& 0x3;
 358		*length = min(req->req.length - req->req.actual,
 359				(unsigned)(mult * req->ep->ep.maxpacket));
 360	} else
 361		*length = min(req->req.length - req->req.actual,
 362				(unsigned)EP_MAX_LENGTH_TRANSFER);
 363
 364	udc = req->ep->udc;
 365
 366	/*
 367	 * Be careful that no _GFP_HIGHMEM is set,
 368	 * or we can not use dma_to_virt
 369	 */
 370	dtd = dma_pool_alloc(udc->dtd_pool, GFP_ATOMIC, dma);
 371	if (dtd == NULL)
 372		return dtd;
 373
 374	dtd->td_dma = *dma;
 375	/* initialize buffer page pointers */
 376	temp = (u32)(req->req.dma + req->req.actual);
 377	dtd->buff_ptr0 = cpu_to_le32(temp);
 378	temp &= ~0xFFF;
 379	dtd->buff_ptr1 = cpu_to_le32(temp + 0x1000);
 380	dtd->buff_ptr2 = cpu_to_le32(temp + 0x2000);
 381	dtd->buff_ptr3 = cpu_to_le32(temp + 0x3000);
 382	dtd->buff_ptr4 = cpu_to_le32(temp + 0x4000);
 383
 384	req->req.actual += *length;
 385
 386	/* zlp is needed if req->req.zero is set */
 387	if (req->req.zero) {
 388		if (*length == 0 || (*length % req->ep->ep.maxpacket) != 0)
 389			*is_last = 1;
 390		else
 391			*is_last = 0;
 392	} else if (req->req.length == req->req.actual)
 393		*is_last = 1;
 394	else
 395		*is_last = 0;
 396
 397	/* Fill in the transfer size; set active bit */
 398	temp = ((*length << DTD_LENGTH_BIT_POS) | DTD_STATUS_ACTIVE);
 399
 400	/* Enable interrupt for the last dtd of a request */
 401	if (*is_last && !req->req.no_interrupt)
 402		temp |= DTD_IOC;
 403
 404	temp |= mult << 10;
 405
 406	dtd->size_ioc_sts = temp;
 407
 408	mb();
 409
 410	return dtd;
 411}
 412
 413/* generate dTD linked list for a request */
 414static int req_to_dtd(struct mv_req *req)
 415{
 416	unsigned count;
 417	int is_last, is_first = 1;
 418	struct mv_dtd *dtd, *last_dtd = NULL;
 419	struct mv_udc *udc;
 420	dma_addr_t dma;
 421
 422	udc = req->ep->udc;
 423
 424	do {
 425		dtd = build_dtd(req, &count, &dma, &is_last);
 426		if (dtd == NULL)
 427			return -ENOMEM;
 428
 429		if (is_first) {
 430			is_first = 0;
 431			req->head = dtd;
 432		} else {
 433			last_dtd->dtd_next = dma;
 434			last_dtd->next_dtd_virt = dtd;
 435		}
 436		last_dtd = dtd;
 437		req->dtd_count++;
 438	} while (!is_last);
 439
 440	/* set terminate bit to 1 for the last dTD */
 441	dtd->dtd_next = DTD_NEXT_TERMINATE;
 442
 443	req->tail = dtd;
 444
 445	return 0;
 446}
 447
 448static int mv_ep_enable(struct usb_ep *_ep,
 449		const struct usb_endpoint_descriptor *desc)
 450{
 451	struct mv_udc *udc;
 452	struct mv_ep *ep;
 453	struct mv_dqh *dqh;
 454	u16 max = 0;
 455	u32 bit_pos, epctrlx, direction;
 456	unsigned char zlt = 0, ios = 0, mult = 0;
 457	unsigned long flags;
 458
 459	ep = container_of(_ep, struct mv_ep, ep);
 460	udc = ep->udc;
 461
 462	if (!_ep || !desc
 463			|| desc->bDescriptorType != USB_DT_ENDPOINT)
 464		return -EINVAL;
 465
 466	if (!udc->driver || udc->gadget.speed == USB_SPEED_UNKNOWN)
 467		return -ESHUTDOWN;
 468
 469	direction = ep_dir(ep);
 470	max = usb_endpoint_maxp(desc);
 471
 472	/*
 473	 * disable HW zero length termination select
 474	 * driver handles zero length packet through req->req.zero
 475	 */
 476	zlt = 1;
 477
 478	bit_pos = 1 << ((direction == EP_DIR_OUT ? 0 : 16) + ep->ep_num);
 479
 480	/* Check if the Endpoint is Primed */
 481	if ((readl(&udc->op_regs->epprime) & bit_pos)
 482		|| (readl(&udc->op_regs->epstatus) & bit_pos)) {
 483		dev_info(&udc->dev->dev,
 484			"ep=%d %s: Init ERROR: ENDPTPRIME=0x%x,"
 485			" ENDPTSTATUS=0x%x, bit_pos=0x%x\n",
 486			(unsigned)ep->ep_num, direction ? "SEND" : "RECV",
 487			(unsigned)readl(&udc->op_regs->epprime),
 488			(unsigned)readl(&udc->op_regs->epstatus),
 489			(unsigned)bit_pos);
 490		goto en_done;
 491	}
 492	/* Set the max packet length, interrupt on Setup and Mult fields */
 493	switch (desc->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK) {
 494	case USB_ENDPOINT_XFER_BULK:
 495		zlt = 1;
 496		mult = 0;
 497		break;
 498	case USB_ENDPOINT_XFER_CONTROL:
 499		ios = 1;
 500	case USB_ENDPOINT_XFER_INT:
 501		mult = 0;
 502		break;
 503	case USB_ENDPOINT_XFER_ISOC:
 504		/* Calculate transactions needed for high bandwidth iso */
 505		mult = (unsigned char)(1 + ((max >> 11) & 0x03));
 506		max = max & 0x7ff;	/* bit 0~10 */
 507		/* 3 transactions at most */
 508		if (mult > 3)
 509			goto en_done;
 510		break;
 511	default:
 512		goto en_done;
 513	}
 514
 515	spin_lock_irqsave(&udc->lock, flags);
 516	/* Get the endpoint queue head address */
 517	dqh = ep->dqh;
 518	dqh->max_packet_length = (max << EP_QUEUE_HEAD_MAX_PKT_LEN_POS)
 519		| (mult << EP_QUEUE_HEAD_MULT_POS)
 520		| (zlt ? EP_QUEUE_HEAD_ZLT_SEL : 0)
 521		| (ios ? EP_QUEUE_HEAD_IOS : 0);
 522	dqh->next_dtd_ptr = 1;
 523	dqh->size_ioc_int_sts = 0;
 524
 525	ep->ep.maxpacket = max;
 526	ep->ep.desc = desc;
 527	ep->stopped = 0;
 528
 529	/* Enable the endpoint for Rx or Tx and set the endpoint type */
 530	epctrlx = readl(&udc->op_regs->epctrlx[ep->ep_num]);
 531	if (direction == EP_DIR_IN) {
 532		epctrlx &= ~EPCTRL_TX_ALL_MASK;
 533		epctrlx |= EPCTRL_TX_ENABLE | EPCTRL_TX_DATA_TOGGLE_RST
 534			| ((desc->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK)
 535				<< EPCTRL_TX_EP_TYPE_SHIFT);
 536	} else {
 537		epctrlx &= ~EPCTRL_RX_ALL_MASK;
 538		epctrlx |= EPCTRL_RX_ENABLE | EPCTRL_RX_DATA_TOGGLE_RST
 539			| ((desc->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK)
 540				<< EPCTRL_RX_EP_TYPE_SHIFT);
 541	}
 542	writel(epctrlx, &udc->op_regs->epctrlx[ep->ep_num]);
 543
 544	/*
 545	 * Implement Guideline (GL# USB-7) The unused endpoint type must
 546	 * be programmed to bulk.
 547	 */
 548	epctrlx = readl(&udc->op_regs->epctrlx[ep->ep_num]);
 549	if ((epctrlx & EPCTRL_RX_ENABLE) == 0) {
 550		epctrlx |= (USB_ENDPOINT_XFER_BULK
 551				<< EPCTRL_RX_EP_TYPE_SHIFT);
 552		writel(epctrlx, &udc->op_regs->epctrlx[ep->ep_num]);
 553	}
 554
 555	epctrlx = readl(&udc->op_regs->epctrlx[ep->ep_num]);
 556	if ((epctrlx & EPCTRL_TX_ENABLE) == 0) {
 557		epctrlx |= (USB_ENDPOINT_XFER_BULK
 558				<< EPCTRL_TX_EP_TYPE_SHIFT);
 559		writel(epctrlx, &udc->op_regs->epctrlx[ep->ep_num]);
 560	}
 561
 562	spin_unlock_irqrestore(&udc->lock, flags);
 563
 564	return 0;
 565en_done:
 566	return -EINVAL;
 567}
 568
 569static int  mv_ep_disable(struct usb_ep *_ep)
 570{
 571	struct mv_udc *udc;
 572	struct mv_ep *ep;
 573	struct mv_dqh *dqh;
 574	u32 bit_pos, epctrlx, direction;
 575	unsigned long flags;
 576
 577	ep = container_of(_ep, struct mv_ep, ep);
 578	if ((_ep == NULL) || !ep->ep.desc)
 579		return -EINVAL;
 580
 581	udc = ep->udc;
 582
 583	/* Get the endpoint queue head address */
 584	dqh = ep->dqh;
 585
 586	spin_lock_irqsave(&udc->lock, flags);
 587
 588	direction = ep_dir(ep);
 589	bit_pos = 1 << ((direction == EP_DIR_OUT ? 0 : 16) + ep->ep_num);
 590
 591	/* Reset the max packet length and the interrupt on Setup */
 592	dqh->max_packet_length = 0;
 593
 594	/* Disable the endpoint for Rx or Tx and reset the endpoint type */
 595	epctrlx = readl(&udc->op_regs->epctrlx[ep->ep_num]);
 596	epctrlx &= ~((direction == EP_DIR_IN)
 597			? (EPCTRL_TX_ENABLE | EPCTRL_TX_TYPE)
 598			: (EPCTRL_RX_ENABLE | EPCTRL_RX_TYPE));
 599	writel(epctrlx, &udc->op_regs->epctrlx[ep->ep_num]);
 600
 601	/* nuke all pending requests (does flush) */
 602	nuke(ep, -ESHUTDOWN);
 603
 604	ep->ep.desc = NULL;
 605	ep->stopped = 1;
 606
 607	spin_unlock_irqrestore(&udc->lock, flags);
 608
 609	return 0;
 610}
 611
 612static struct usb_request *
 613mv_alloc_request(struct usb_ep *_ep, gfp_t gfp_flags)
 614{
 615	struct mv_req *req = NULL;
 616
 617	req = kzalloc(sizeof *req, gfp_flags);
 618	if (!req)
 619		return NULL;
 620
 621	req->req.dma = DMA_ADDR_INVALID;
 622	INIT_LIST_HEAD(&req->queue);
 623
 624	return &req->req;
 625}
 626
 627static void mv_free_request(struct usb_ep *_ep, struct usb_request *_req)
 628{
 629	struct mv_req *req = NULL;
 630
 631	req = container_of(_req, struct mv_req, req);
 632
 633	if (_req)
 634		kfree(req);
 635}
 636
 637static void mv_ep_fifo_flush(struct usb_ep *_ep)
 638{
 639	struct mv_udc *udc;
 640	u32 bit_pos, direction;
 641	struct mv_ep *ep;
 642	unsigned int loops;
 643
 644	if (!_ep)
 645		return;
 646
 647	ep = container_of(_ep, struct mv_ep, ep);
 648	if (!ep->ep.desc)
 649		return;
 650
 651	udc = ep->udc;
 652	direction = ep_dir(ep);
 653
 654	if (ep->ep_num == 0)
 655		bit_pos = (1 << 16) | 1;
 656	else if (direction == EP_DIR_OUT)
 657		bit_pos = 1 << ep->ep_num;
 658	else
 659		bit_pos = 1 << (16 + ep->ep_num);
 660
 661	loops = LOOPS(EPSTATUS_TIMEOUT);
 662	do {
 663		unsigned int inter_loops;
 664
 665		if (loops == 0) {
 666			dev_err(&udc->dev->dev,
 667				"TIMEOUT for ENDPTSTATUS=0x%x, bit_pos=0x%x\n",
 668				(unsigned)readl(&udc->op_regs->epstatus),
 669				(unsigned)bit_pos);
 670			return;
 671		}
 672		/* Write 1 to the Flush register */
 673		writel(bit_pos, &udc->op_regs->epflush);
 674
 675		/* Wait until flushing completed */
 676		inter_loops = LOOPS(FLUSH_TIMEOUT);
 677		while (readl(&udc->op_regs->epflush)) {
 678			/*
 679			 * ENDPTFLUSH bit should be cleared to indicate this
 680			 * operation is complete
 681			 */
 682			if (inter_loops == 0) {
 683				dev_err(&udc->dev->dev,
 684					"TIMEOUT for ENDPTFLUSH=0x%x,"
 685					"bit_pos=0x%x\n",
 686					(unsigned)readl(&udc->op_regs->epflush),
 687					(unsigned)bit_pos);
 688				return;
 689			}
 690			inter_loops--;
 691			udelay(LOOPS_USEC);
 692		}
 693		loops--;
 694	} while (readl(&udc->op_regs->epstatus) & bit_pos);
 695}
 696
 697/* queues (submits) an I/O request to an endpoint */
 698static int
 699mv_ep_queue(struct usb_ep *_ep, struct usb_request *_req, gfp_t gfp_flags)
 700{
 701	struct mv_ep *ep = container_of(_ep, struct mv_ep, ep);
 702	struct mv_req *req = container_of(_req, struct mv_req, req);
 703	struct mv_udc *udc = ep->udc;
 704	unsigned long flags;
 705	int retval;
 706
 707	/* catch various bogus parameters */
 708	if (!_req || !req->req.complete || !req->req.buf
 709			|| !list_empty(&req->queue)) {
 710		dev_err(&udc->dev->dev, "%s, bad params", __func__);
 711		return -EINVAL;
 712	}
 713	if (unlikely(!_ep || !ep->ep.desc)) {
 714		dev_err(&udc->dev->dev, "%s, bad ep", __func__);
 715		return -EINVAL;
 716	}
 717
 718	udc = ep->udc;
 719	if (!udc->driver || udc->gadget.speed == USB_SPEED_UNKNOWN)
 720		return -ESHUTDOWN;
 721
 722	req->ep = ep;
 723
 724	/* map virtual address to hardware */
 725	retval = usb_gadget_map_request(&udc->gadget, _req, ep_dir(ep));
 726	if (retval)
 727		return retval;
 728
 729	req->req.status = -EINPROGRESS;
 730	req->req.actual = 0;
 731	req->dtd_count = 0;
 732
 733	spin_lock_irqsave(&udc->lock, flags);
 734
 735	/* build dtds and push them to device queue */
 736	if (!req_to_dtd(req)) {
 737		retval = queue_dtd(ep, req);
 738		if (retval) {
 739			spin_unlock_irqrestore(&udc->lock, flags);
 740			dev_err(&udc->dev->dev, "Failed to queue dtd\n");
 741			goto err_unmap_dma;
 742		}
 743	} else {
 744		spin_unlock_irqrestore(&udc->lock, flags);
 745		dev_err(&udc->dev->dev, "Failed to dma_pool_alloc\n");
 746		retval = -ENOMEM;
 747		goto err_unmap_dma;
 748	}
 749
 750	/* Update ep0 state */
 751	if (ep->ep_num == 0)
 752		udc->ep0_state = DATA_STATE_XMIT;
 753
 754	/* irq handler advances the queue */
 755	list_add_tail(&req->queue, &ep->queue);
 756	spin_unlock_irqrestore(&udc->lock, flags);
 757
 758	return 0;
 759
 760err_unmap_dma:
 761	usb_gadget_unmap_request(&udc->gadget, _req, ep_dir(ep));
 762
 763	return retval;
 764}
 765
 766static void mv_prime_ep(struct mv_ep *ep, struct mv_req *req)
 767{
 768	struct mv_dqh *dqh = ep->dqh;
 769	u32 bit_pos;
 770
 771	/* Write dQH next pointer and terminate bit to 0 */
 772	dqh->next_dtd_ptr = req->head->td_dma
 773		& EP_QUEUE_HEAD_NEXT_POINTER_MASK;
 774
 775	/* clear active and halt bit, in case set from a previous error */
 776	dqh->size_ioc_int_sts &= ~(DTD_STATUS_ACTIVE | DTD_STATUS_HALTED);
 777
 778	/* Ensure that updates to the QH will occure before priming. */
 779	wmb();
 780
 781	bit_pos = 1 << (((ep_dir(ep) == EP_DIR_OUT) ? 0 : 16) + ep->ep_num);
 782
 783	/* Prime the Endpoint */
 784	writel(bit_pos, &ep->udc->op_regs->epprime);
 785}
 786
 787/* dequeues (cancels, unlinks) an I/O request from an endpoint */
 788static int mv_ep_dequeue(struct usb_ep *_ep, struct usb_request *_req)
 789{
 790	struct mv_ep *ep = container_of(_ep, struct mv_ep, ep);
 791	struct mv_req *req;
 792	struct mv_udc *udc = ep->udc;
 793	unsigned long flags;
 794	int stopped, ret = 0;
 795	u32 epctrlx;
 796
 797	if (!_ep || !_req)
 798		return -EINVAL;
 799
 800	spin_lock_irqsave(&ep->udc->lock, flags);
 801	stopped = ep->stopped;
 802
 803	/* Stop the ep before we deal with the queue */
 804	ep->stopped = 1;
 805	epctrlx = readl(&udc->op_regs->epctrlx[ep->ep_num]);
 806	if (ep_dir(ep) == EP_DIR_IN)
 807		epctrlx &= ~EPCTRL_TX_ENABLE;
 808	else
 809		epctrlx &= ~EPCTRL_RX_ENABLE;
 810	writel(epctrlx, &udc->op_regs->epctrlx[ep->ep_num]);
 811
 812	/* make sure it's actually queued on this endpoint */
 813	list_for_each_entry(req, &ep->queue, queue) {
 814		if (&req->req == _req)
 815			break;
 816	}
 817	if (&req->req != _req) {
 818		ret = -EINVAL;
 819		goto out;
 820	}
 821
 822	/* The request is in progress, or completed but not dequeued */
 823	if (ep->queue.next == &req->queue) {
 824		_req->status = -ECONNRESET;
 825		mv_ep_fifo_flush(_ep);	/* flush current transfer */
 826
 827		/* The request isn't the last request in this ep queue */
 828		if (req->queue.next != &ep->queue) {
 829			struct mv_req *next_req;
 830
 831			next_req = list_entry(req->queue.next,
 832				struct mv_req, queue);
 833
 834			/* Point the QH to the first TD of next request */
 835			mv_prime_ep(ep, next_req);
 836		} else {
 837			struct mv_dqh *qh;
 838
 839			qh = ep->dqh;
 840			qh->next_dtd_ptr = 1;
 841			qh->size_ioc_int_sts = 0;
 842		}
 843
 844		/* The request hasn't been processed, patch up the TD chain */
 845	} else {
 846		struct mv_req *prev_req;
 847
 848		prev_req = list_entry(req->queue.prev, struct mv_req, queue);
 849		writel(readl(&req->tail->dtd_next),
 850				&prev_req->tail->dtd_next);
 851
 852	}
 853
 854	done(ep, req, -ECONNRESET);
 855
 856	/* Enable EP */
 857out:
 858	epctrlx = readl(&udc->op_regs->epctrlx[ep->ep_num]);
 859	if (ep_dir(ep) == EP_DIR_IN)
 860		epctrlx |= EPCTRL_TX_ENABLE;
 861	else
 862		epctrlx |= EPCTRL_RX_ENABLE;
 863	writel(epctrlx, &udc->op_regs->epctrlx[ep->ep_num]);
 864	ep->stopped = stopped;
 865
 866	spin_unlock_irqrestore(&ep->udc->lock, flags);
 867	return ret;
 868}
 869
 870static void ep_set_stall(struct mv_udc *udc, u8 ep_num, u8 direction, int stall)
 871{
 872	u32 epctrlx;
 873
 874	epctrlx = readl(&udc->op_regs->epctrlx[ep_num]);
 875
 876	if (stall) {
 877		if (direction == EP_DIR_IN)
 878			epctrlx |= EPCTRL_TX_EP_STALL;
 879		else
 880			epctrlx |= EPCTRL_RX_EP_STALL;
 881	} else {
 882		if (direction == EP_DIR_IN) {
 883			epctrlx &= ~EPCTRL_TX_EP_STALL;
 884			epctrlx |= EPCTRL_TX_DATA_TOGGLE_RST;
 885		} else {
 886			epctrlx &= ~EPCTRL_RX_EP_STALL;
 887			epctrlx |= EPCTRL_RX_DATA_TOGGLE_RST;
 888		}
 889	}
 890	writel(epctrlx, &udc->op_regs->epctrlx[ep_num]);
 891}
 892
 893static int ep_is_stall(struct mv_udc *udc, u8 ep_num, u8 direction)
 894{
 895	u32 epctrlx;
 896
 897	epctrlx = readl(&udc->op_regs->epctrlx[ep_num]);
 898
 899	if (direction == EP_DIR_OUT)
 900		return (epctrlx & EPCTRL_RX_EP_STALL) ? 1 : 0;
 901	else
 902		return (epctrlx & EPCTRL_TX_EP_STALL) ? 1 : 0;
 903}
 904
 905static int mv_ep_set_halt_wedge(struct usb_ep *_ep, int halt, int wedge)
 906{
 907	struct mv_ep *ep;
 908	unsigned long flags = 0;
 909	int status = 0;
 910	struct mv_udc *udc;
 911
 912	ep = container_of(_ep, struct mv_ep, ep);
 913	udc = ep->udc;
 914	if (!_ep || !ep->ep.desc) {
 915		status = -EINVAL;
 916		goto out;
 917	}
 918
 919	if (ep->ep.desc->bmAttributes == USB_ENDPOINT_XFER_ISOC) {
 920		status = -EOPNOTSUPP;
 921		goto out;
 922	}
 923
 924	/*
 925	 * Attempt to halt IN ep will fail if any transfer requests
 926	 * are still queue
 927	 */
 928	if (halt && (ep_dir(ep) == EP_DIR_IN) && !list_empty(&ep->queue)) {
 929		status = -EAGAIN;
 930		goto out;
 931	}
 932
 933	spin_lock_irqsave(&ep->udc->lock, flags);
 934	ep_set_stall(udc, ep->ep_num, ep_dir(ep), halt);
 935	if (halt && wedge)
 936		ep->wedge = 1;
 937	else if (!halt)
 938		ep->wedge = 0;
 939	spin_unlock_irqrestore(&ep->udc->lock, flags);
 940
 941	if (ep->ep_num == 0) {
 942		udc->ep0_state = WAIT_FOR_SETUP;
 943		udc->ep0_dir = EP_DIR_OUT;
 944	}
 945out:
 946	return status;
 947}
 948
 949static int mv_ep_set_halt(struct usb_ep *_ep, int halt)
 950{
 951	return mv_ep_set_halt_wedge(_ep, halt, 0);
 952}
 953
 954static int mv_ep_set_wedge(struct usb_ep *_ep)
 955{
 956	return mv_ep_set_halt_wedge(_ep, 1, 1);
 957}
 958
 959static struct usb_ep_ops mv_ep_ops = {
 960	.enable		= mv_ep_enable,
 961	.disable	= mv_ep_disable,
 962
 963	.alloc_request	= mv_alloc_request,
 964	.free_request	= mv_free_request,
 965
 966	.queue		= mv_ep_queue,
 967	.dequeue	= mv_ep_dequeue,
 968
 969	.set_wedge	= mv_ep_set_wedge,
 970	.set_halt	= mv_ep_set_halt,
 971	.fifo_flush	= mv_ep_fifo_flush,	/* flush fifo */
 972};
 973
 974static void udc_clock_enable(struct mv_udc *udc)
 975{
 976	clk_prepare_enable(udc->clk);
 977}
 978
 979static void udc_clock_disable(struct mv_udc *udc)
 980{
 981	clk_disable_unprepare(udc->clk);
 982}
 983
 984static void udc_stop(struct mv_udc *udc)
 985{
 986	u32 tmp;
 987
 988	/* Disable interrupts */
 989	tmp = readl(&udc->op_regs->usbintr);
 990	tmp &= ~(USBINTR_INT_EN | USBINTR_ERR_INT_EN |
 991		USBINTR_PORT_CHANGE_DETECT_EN | USBINTR_RESET_EN);
 992	writel(tmp, &udc->op_regs->usbintr);
 993
 994	udc->stopped = 1;
 995
 996	/* Reset the Run the bit in the command register to stop VUSB */
 997	tmp = readl(&udc->op_regs->usbcmd);
 998	tmp &= ~USBCMD_RUN_STOP;
 999	writel(tmp, &udc->op_regs->usbcmd);
1000}
1001
1002static void udc_start(struct mv_udc *udc)
1003{
1004	u32 usbintr;
1005
1006	usbintr = USBINTR_INT_EN | USBINTR_ERR_INT_EN
1007		| USBINTR_PORT_CHANGE_DETECT_EN
1008		| USBINTR_RESET_EN | USBINTR_DEVICE_SUSPEND;
1009	/* Enable interrupts */
1010	writel(usbintr, &udc->op_regs->usbintr);
1011
1012	udc->stopped = 0;
1013
1014	/* Set the Run bit in the command register */
1015	writel(USBCMD_RUN_STOP, &udc->op_regs->usbcmd);
1016}
1017
1018static int udc_reset(struct mv_udc *udc)
1019{
1020	unsigned int loops;
1021	u32 tmp, portsc;
1022
1023	/* Stop the controller */
1024	tmp = readl(&udc->op_regs->usbcmd);
1025	tmp &= ~USBCMD_RUN_STOP;
1026	writel(tmp, &udc->op_regs->usbcmd);
1027
1028	/* Reset the controller to get default values */
1029	writel(USBCMD_CTRL_RESET, &udc->op_regs->usbcmd);
1030
1031	/* wait for reset to complete */
1032	loops = LOOPS(RESET_TIMEOUT);
1033	while (readl(&udc->op_regs->usbcmd) & USBCMD_CTRL_RESET) {
1034		if (loops == 0) {
1035			dev_err(&udc->dev->dev,
1036				"Wait for RESET completed TIMEOUT\n");
1037			return -ETIMEDOUT;
1038		}
1039		loops--;
1040		udelay(LOOPS_USEC);
1041	}
1042
1043	/* set controller to device mode */
1044	tmp = readl(&udc->op_regs->usbmode);
1045	tmp |= USBMODE_CTRL_MODE_DEVICE;
1046
1047	/* turn setup lockout off, require setup tripwire in usbcmd */
1048	tmp |= USBMODE_SETUP_LOCK_OFF;
1049
1050	writel(tmp, &udc->op_regs->usbmode);
1051
1052	writel(0x0, &udc->op_regs->epsetupstat);
1053
1054	/* Configure the Endpoint List Address */
1055	writel(udc->ep_dqh_dma & USB_EP_LIST_ADDRESS_MASK,
1056		&udc->op_regs->eplistaddr);
1057
1058	portsc = readl(&udc->op_regs->portsc[0]);
1059	if (readl(&udc->cap_regs->hcsparams) & HCSPARAMS_PPC)
1060		portsc &= (~PORTSCX_W1C_BITS | ~PORTSCX_PORT_POWER);
1061
1062	if (udc->force_fs)
1063		portsc |= PORTSCX_FORCE_FULL_SPEED_CONNECT;
1064	else
1065		portsc &= (~PORTSCX_FORCE_FULL_SPEED_CONNECT);
1066
1067	writel(portsc, &udc->op_regs->portsc[0]);
1068
1069	tmp = readl(&udc->op_regs->epctrlx[0]);
1070	tmp &= ~(EPCTRL_TX_EP_STALL | EPCTRL_RX_EP_STALL);
1071	writel(tmp, &udc->op_regs->epctrlx[0]);
1072
1073	return 0;
1074}
1075
1076static int mv_udc_enable_internal(struct mv_udc *udc)
1077{
1078	int retval;
1079
1080	if (udc->active)
1081		return 0;
1082
1083	dev_dbg(&udc->dev->dev, "enable udc\n");
1084	udc_clock_enable(udc);
1085	if (udc->pdata->phy_init) {
1086		retval = udc->pdata->phy_init(udc->phy_regs);
1087		if (retval) {
1088			dev_err(&udc->dev->dev,
1089				"init phy error %d\n", retval);
1090			udc_clock_disable(udc);
1091			return retval;
1092		}
1093	}
1094	udc->active = 1;
1095
1096	return 0;
1097}
1098
1099static int mv_udc_enable(struct mv_udc *udc)
1100{
1101	if (udc->clock_gating)
1102		return mv_udc_enable_internal(udc);
1103
1104	return 0;
1105}
1106
1107static void mv_udc_disable_internal(struct mv_udc *udc)
1108{
1109	if (udc->active) {
1110		dev_dbg(&udc->dev->dev, "disable udc\n");
1111		if (udc->pdata->phy_deinit)
1112			udc->pdata->phy_deinit(udc->phy_regs);
1113		udc_clock_disable(udc);
1114		udc->active = 0;
1115	}
1116}
1117
1118static void mv_udc_disable(struct mv_udc *udc)
1119{
1120	if (udc->clock_gating)
1121		mv_udc_disable_internal(udc);
1122}
1123
1124static int mv_udc_get_frame(struct usb_gadget *gadget)
1125{
1126	struct mv_udc *udc;
1127	u16	retval;
1128
1129	if (!gadget)
1130		return -ENODEV;
1131
1132	udc = container_of(gadget, struct mv_udc, gadget);
1133
1134	retval = readl(&udc->op_regs->frindex) & USB_FRINDEX_MASKS;
1135
1136	return retval;
1137}
1138
1139/* Tries to wake up the host connected to this gadget */
1140static int mv_udc_wakeup(struct usb_gadget *gadget)
1141{
1142	struct mv_udc *udc = container_of(gadget, struct mv_udc, gadget);
1143	u32 portsc;
1144
1145	/* Remote wakeup feature not enabled by host */
1146	if (!udc->remote_wakeup)
1147		return -ENOTSUPP;
1148
1149	portsc = readl(&udc->op_regs->portsc);
1150	/* not suspended? */
1151	if (!(portsc & PORTSCX_PORT_SUSPEND))
1152		return 0;
1153	/* trigger force resume */
1154	portsc |= PORTSCX_PORT_FORCE_RESUME;
1155	writel(portsc, &udc->op_regs->portsc[0]);
1156	return 0;
1157}
1158
1159static int mv_udc_vbus_session(struct usb_gadget *gadget, int is_active)
1160{
1161	struct mv_udc *udc;
1162	unsigned long flags;
1163	int retval = 0;
1164
1165	udc = container_of(gadget, struct mv_udc, gadget);
1166	spin_lock_irqsave(&udc->lock, flags);
1167
1168	udc->vbus_active = (is_active != 0);
1169
1170	dev_dbg(&udc->dev->dev, "%s: softconnect %d, vbus_active %d\n",
1171		__func__, udc->softconnect, udc->vbus_active);
1172
1173	if (udc->driver && udc->softconnect && udc->vbus_active) {
1174		retval = mv_udc_enable(udc);
1175		if (retval == 0) {
1176			/* Clock is disabled, need re-init registers */
1177			udc_reset(udc);
1178			ep0_reset(udc);
1179			udc_start(udc);
1180		}
1181	} else if (udc->driver && udc->softconnect) {
1182		if (!udc->active)
1183			goto out;
1184
1185		/* stop all the transfer in queue*/
1186		stop_activity(udc, udc->driver);
1187		udc_stop(udc);
1188		mv_udc_disable(udc);
1189	}
1190
1191out:
1192	spin_unlock_irqrestore(&udc->lock, flags);
1193	return retval;
1194}
1195
1196static int mv_udc_pullup(struct usb_gadget *gadget, int is_on)
1197{
1198	struct mv_udc *udc;
1199	unsigned long flags;
1200	int retval = 0;
1201
1202	udc = container_of(gadget, struct mv_udc, gadget);
1203	spin_lock_irqsave(&udc->lock, flags);
1204
1205	udc->softconnect = (is_on != 0);
1206
1207	dev_dbg(&udc->dev->dev, "%s: softconnect %d, vbus_active %d\n",
1208			__func__, udc->softconnect, udc->vbus_active);
1209
1210	if (udc->driver && udc->softconnect && udc->vbus_active) {
1211		retval = mv_udc_enable(udc);
1212		if (retval == 0) {
1213			/* Clock is disabled, need re-init registers */
1214			udc_reset(udc);
1215			ep0_reset(udc);
1216			udc_start(udc);
1217		}
1218	} else if (udc->driver && udc->vbus_active) {
1219		/* stop all the transfer in queue*/
1220		stop_activity(udc, udc->driver);
1221		udc_stop(udc);
1222		mv_udc_disable(udc);
1223	}
1224
1225	spin_unlock_irqrestore(&udc->lock, flags);
1226	return retval;
1227}
1228
1229static int mv_udc_start(struct usb_gadget *, struct usb_gadget_driver *);
1230static int mv_udc_stop(struct usb_gadget *, struct usb_gadget_driver *);
1231/* device controller usb_gadget_ops structure */
1232static const struct usb_gadget_ops mv_ops = {
1233
1234	/* returns the current frame number */
1235	.get_frame	= mv_udc_get_frame,
1236
1237	/* tries to wake up the host connected to this gadget */
1238	.wakeup		= mv_udc_wakeup,
1239
1240	/* notify controller that VBUS is powered or not */
1241	.vbus_session	= mv_udc_vbus_session,
1242
1243	/* D+ pullup, software-controlled connect/disconnect to USB host */
1244	.pullup		= mv_udc_pullup,
1245	.udc_start	= mv_udc_start,
1246	.udc_stop	= mv_udc_stop,
1247};
1248
1249static int eps_init(struct mv_udc *udc)
1250{
1251	struct mv_ep	*ep;
1252	char name[14];
1253	int i;
1254
1255	/* initialize ep0 */
1256	ep = &udc->eps[0];
1257	ep->udc = udc;
1258	strncpy(ep->name, "ep0", sizeof(ep->name));
1259	ep->ep.name = ep->name;
1260	ep->ep.ops = &mv_ep_ops;
1261	ep->wedge = 0;
1262	ep->stopped = 0;
1263	usb_ep_set_maxpacket_limit(&ep->ep, EP0_MAX_PKT_SIZE);
1264	ep->ep_num = 0;
1265	ep->ep.desc = &mv_ep0_desc;
1266	INIT_LIST_HEAD(&ep->queue);
1267
1268	ep->ep_type = USB_ENDPOINT_XFER_CONTROL;
1269
1270	/* initialize other endpoints */
1271	for (i = 2; i < udc->max_eps * 2; i++) {
1272		ep = &udc->eps[i];
1273		if (i % 2) {
1274			snprintf(name, sizeof(name), "ep%din", i / 2);
1275			ep->direction = EP_DIR_IN;
1276		} else {
1277			snprintf(name, sizeof(name), "ep%dout", i / 2);
1278			ep->direction = EP_DIR_OUT;
1279		}
1280		ep->udc = udc;
1281		strncpy(ep->name, name, sizeof(ep->name));
1282		ep->ep.name = ep->name;
1283
1284		ep->ep.ops = &mv_ep_ops;
1285		ep->stopped = 0;
1286		usb_ep_set_maxpacket_limit(&ep->ep, (unsigned short) ~0);
1287		ep->ep_num = i / 2;
1288
1289		INIT_LIST_HEAD(&ep->queue);
1290		list_add_tail(&ep->ep.ep_list, &udc->gadget.ep_list);
1291
1292		ep->dqh = &udc->ep_dqh[i];
1293	}
1294
1295	return 0;
1296}
1297
1298/* delete all endpoint requests, called with spinlock held */
1299static void nuke(struct mv_ep *ep, int status)
1300{
1301	/* called with spinlock held */
1302	ep->stopped = 1;
1303
1304	/* endpoint fifo flush */
1305	mv_ep_fifo_flush(&ep->ep);
1306
1307	while (!list_empty(&ep->queue)) {
1308		struct mv_req *req = NULL;
1309		req = list_entry(ep->queue.next, struct mv_req, queue);
1310		done(ep, req, status);
1311	}
1312}
1313
1314/* stop all USB activities */
1315static void stop_activity(struct mv_udc *udc, struct usb_gadget_driver *driver)
1316{
1317	struct mv_ep	*ep;
1318
1319	nuke(&udc->eps[0], -ESHUTDOWN);
1320
1321	list_for_each_entry(ep, &udc->gadget.ep_list, ep.ep_list) {
1322		nuke(ep, -ESHUTDOWN);
1323	}
1324
1325	/* report disconnect; the driver is already quiesced */
1326	if (driver) {
1327		spin_unlock(&udc->lock);
1328		driver->disconnect(&udc->gadget);
1329		spin_lock(&udc->lock);
1330	}
1331}
1332
1333static int mv_udc_start(struct usb_gadget *gadget,
1334		struct usb_gadget_driver *driver)
1335{
1336	struct mv_udc *udc;
1337	int retval = 0;
1338	unsigned long flags;
1339
1340	udc = container_of(gadget, struct mv_udc, gadget);
1341
1342	if (udc->driver)
1343		return -EBUSY;
1344
1345	spin_lock_irqsave(&udc->lock, flags);
1346
1347	/* hook up the driver ... */
1348	driver->driver.bus = NULL;
1349	udc->driver = driver;
1350
1351	udc->usb_state = USB_STATE_ATTACHED;
1352	udc->ep0_state = WAIT_FOR_SETUP;
1353	udc->ep0_dir = EP_DIR_OUT;
1354
1355	spin_unlock_irqrestore(&udc->lock, flags);
1356
1357	if (udc->transceiver) {
1358		retval = otg_set_peripheral(udc->transceiver->otg,
1359					&udc->gadget);
1360		if (retval) {
1361			dev_err(&udc->dev->dev,
1362				"unable to register peripheral to otg\n");
1363			udc->driver = NULL;
1364			return retval;
1365		}
1366	}
1367
1368	/* pullup is always on */
1369	mv_udc_pullup(&udc->gadget, 1);
1370
1371	/* When boot with cable attached, there will be no vbus irq occurred */
1372	if (udc->qwork)
1373		queue_work(udc->qwork, &udc->vbus_work);
1374
1375	return 0;
1376}
1377
1378static int mv_udc_stop(struct usb_gadget *gadget,
1379		struct usb_gadget_driver *driver)
1380{
1381	struct mv_udc *udc;
1382	unsigned long flags;
1383
1384	udc = container_of(gadget, struct mv_udc, gadget);
1385
1386	spin_lock_irqsave(&udc->lock, flags);
1387
1388	mv_udc_enable(udc);
1389	udc_stop(udc);
1390
1391	/* stop all usb activities */
1392	udc->gadget.speed = USB_SPEED_UNKNOWN;
1393	stop_activity(udc, driver);
1394	mv_udc_disable(udc);
1395
1396	spin_unlock_irqrestore(&udc->lock, flags);
1397
1398	/* unbind gadget driver */
1399	udc->driver = NULL;
1400
1401	return 0;
1402}
1403
1404static void mv_set_ptc(struct mv_udc *udc, u32 mode)
1405{
1406	u32 portsc;
1407
1408	portsc = readl(&udc->op_regs->portsc[0]);
1409	portsc |= mode << 16;
1410	writel(portsc, &udc->op_regs->portsc[0]);
1411}
1412
1413static void prime_status_complete(struct usb_ep *ep, struct usb_request *_req)
1414{
1415	struct mv_ep *mvep = container_of(ep, struct mv_ep, ep);
1416	struct mv_req *req = container_of(_req, struct mv_req, req);
1417	struct mv_udc *udc;
1418	unsigned long flags;
1419
1420	udc = mvep->udc;
1421
1422	dev_info(&udc->dev->dev, "switch to test mode %d\n", req->test_mode);
1423
1424	spin_lock_irqsave(&udc->lock, flags);
1425	if (req->test_mode) {
1426		mv_set_ptc(udc, req->test_mode);
1427		req->test_mode = 0;
1428	}
1429	spin_unlock_irqrestore(&udc->lock, flags);
1430}
1431
1432static int
1433udc_prime_status(struct mv_udc *udc, u8 direction, u16 status, bool empty)
1434{
1435	int retval = 0;
1436	struct mv_req *req;
1437	struct mv_ep *ep;
1438
1439	ep = &udc->eps[0];
1440	udc->ep0_dir = direction;
1441	udc->ep0_state = WAIT_FOR_OUT_STATUS;
1442
1443	req = udc->status_req;
1444
1445	/* fill in the reqest structure */
1446	if (empty == false) {
1447		*((u16 *) req->req.buf) = cpu_to_le16(status);
1448		req->req.length = 2;
1449	} else
1450		req->req.length = 0;
1451
1452	req->ep = ep;
1453	req->req.status = -EINPROGRESS;
1454	req->req.actual = 0;
1455	if (udc->test_mode) {
1456		req->req.complete = prime_status_complete;
1457		req->test_mode = udc->test_mode;
1458		udc->test_mode = 0;
1459	} else
1460		req->req.complete = NULL;
1461	req->dtd_count = 0;
1462
1463	if (req->req.dma == DMA_ADDR_INVALID) {
1464		req->req.dma = dma_map_single(ep->udc->gadget.dev.parent,
1465				req->req.buf, req->req.length,
1466				ep_dir(ep) ? DMA_TO_DEVICE : DMA_FROM_DEVICE);
1467		req->mapped = 1;
1468	}
1469
1470	/* prime the data phase */
1471	if (!req_to_dtd(req)) {
1472		retval = queue_dtd(ep, req);
1473		if (retval) {
1474			dev_err(&udc->dev->dev,
1475				"Failed to queue dtd when prime status\n");
1476			goto out;
1477		}
1478	} else{	/* no mem */
1479		retval = -ENOMEM;
1480		dev_err(&udc->dev->dev,
1481			"Failed to dma_pool_alloc when prime status\n");
1482		goto out;
1483	}
1484
1485	list_add_tail(&req->queue, &ep->queue);
1486
1487	return 0;
1488out:
1489	usb_gadget_unmap_request(&udc->gadget, &req->req, ep_dir(ep));
1490
1491	return retval;
1492}
1493
1494static void mv_udc_testmode(struct mv_udc *udc, u16 index)
1495{
1496	if (index <= TEST_FORCE_EN) {
1497		udc->test_mode = index;
1498		if (udc_prime_status(udc, EP_DIR_IN, 0, true))
1499			ep0_stall(udc);
1500	} else
1501		dev_err(&udc->dev->dev,
1502			"This test mode(%d) is not supported\n", index);
1503}
1504
1505static void ch9setaddress(struct mv_udc *udc, struct usb_ctrlrequest *setup)
1506{
1507	udc->dev_addr = (u8)setup->wValue;
1508
1509	/* update usb state */
1510	udc->usb_state = USB_STATE_ADDRESS;
1511
1512	if (udc_prime_status(udc, EP_DIR_IN, 0, true))
1513		ep0_stall(udc);
1514}
1515
1516static void ch9getstatus(struct mv_udc *udc, u8 ep_num,
1517	struct usb_ctrlrequest *setup)
1518{
1519	u16 status = 0;
1520	int retval;
1521
1522	if ((setup->bRequestType & (USB_DIR_IN | USB_TYPE_MASK))
1523		!= (USB_DIR_IN | USB_TYPE_STANDARD))
1524		return;
1525
1526	if ((setup->bRequestType & USB_RECIP_MASK) == USB_RECIP_DEVICE) {
1527		status = 1 << USB_DEVICE_SELF_POWERED;
1528		status |= udc->remote_wakeup << USB_DEVICE_REMOTE_WAKEUP;
1529	} else if ((setup->bRequestType & USB_RECIP_MASK)
1530			== USB_RECIP_INTERFACE) {
1531		/* get interface status */
1532		status = 0;
1533	} else if ((setup->bRequestType & USB_RECIP_MASK)
1534			== USB_RECIP_ENDPOINT) {
1535		u8 ep_num, direction;
1536
1537		ep_num = setup->wIndex & USB_ENDPOINT_NUMBER_MASK;
1538		direction = (setup->wIndex & USB_ENDPOINT_DIR_MASK)
1539				? EP_DIR_IN : EP_DIR_OUT;
1540		status = ep_is_stall(udc, ep_num, direction)
1541				<< USB_ENDPOINT_HALT;
1542	}
1543
1544	retval = udc_prime_status(udc, EP_DIR_IN, status, false);
1545	if (retval)
1546		ep0_stall(udc);
1547	else
1548		udc->ep0_state = DATA_STATE_XMIT;
1549}
1550
1551static void ch9clearfeature(struct mv_udc *udc, struct usb_ctrlrequest *setup)
1552{
1553	u8 ep_num;
1554	u8 direction;
1555	struct mv_ep *ep;
1556
1557	if ((setup->bRequestType & (USB_TYPE_MASK | USB_RECIP_MASK))
1558		== ((USB_TYPE_STANDARD | USB_RECIP_DEVICE))) {
1559		switch (setup->wValue) {
1560		case USB_DEVICE_REMOTE_WAKEUP:
1561			udc->remote_wakeup = 0;
1562			break;
1563		default:
1564			goto out;
1565		}
1566	} else if ((setup->bRequestType & (USB_TYPE_MASK | USB_RECIP_MASK))
1567		== ((USB_TYPE_STANDARD | USB_RECIP_ENDPOINT))) {
1568		switch (setup->wValue) {
1569		case USB_ENDPOINT_HALT:
1570			ep_num = setup->wIndex & USB_ENDPOINT_NUMBER_MASK;
1571			direction = (setup->wIndex & USB_ENDPOINT_DIR_MASK)
1572				? EP_DIR_IN : EP_DIR_OUT;
1573			if (setup->wValue != 0 || setup->wLength != 0
1574				|| ep_num > udc->max_eps)
1575				goto out;
1576			ep = &udc->eps[ep_num * 2 + direction];
1577			if (ep->wedge == 1)
1578				break;
1579			spin_unlock(&udc->lock);
1580			ep_set_stall(udc, ep_num, direction, 0);
1581			spin_lock(&udc->lock);
1582			break;
1583		default:
1584			goto out;
1585		}
1586	} else
1587		goto out;
1588
1589	if (udc_prime_status(udc, EP_DIR_IN, 0, true))
1590		ep0_stall(udc);
1591out:
1592	return;
1593}
1594
1595static void ch9setfeature(struct mv_udc *udc, struct usb_ctrlrequest *setup)
1596{
1597	u8 ep_num;
1598	u8 direction;
1599
1600	if ((setup->bRequestType & (USB_TYPE_MASK | USB_RECIP_MASK))
1601		== ((USB_TYPE_STANDARD | USB_RECIP_DEVICE))) {
1602		switch (setup->wValue) {
1603		case USB_DEVICE_REMOTE_WAKEUP:
1604			udc->remote_wakeup = 1;
1605			break;
1606		case USB_DEVICE_TEST_MODE:
1607			if (setup->wIndex & 0xFF
1608				||  udc->gadget.speed != USB_SPEED_HIGH)
1609				ep0_stall(udc);
1610
1611			if (udc->usb_state != USB_STATE_CONFIGURED
1612				&& udc->usb_state != USB_STATE_ADDRESS
1613				&& udc->usb_state != USB_STATE_DEFAULT)
1614				ep0_stall(udc);
1615
1616			mv_udc_testmode(udc, (setup->wIndex >> 8));
1617			goto out;
1618		default:
1619			goto out;
1620		}
1621	} else if ((setup->bRequestType & (USB_TYPE_MASK | USB_RECIP_MASK))
1622		== ((USB_TYPE_STANDARD | USB_RECIP_ENDPOINT))) {
1623		switch (setup->wValue) {
1624		case USB_ENDPOINT_HALT:
1625			ep_num = setup->wIndex & USB_ENDPOINT_NUMBER_MASK;
1626			direction = (setup->wIndex & USB_ENDPOINT_DIR_MASK)
1627				? EP_DIR_IN : EP_DIR_OUT;
1628			if (setup->wValue != 0 || setup->wLength != 0
1629				|| ep_num > udc->max_eps)
1630				goto out;
1631			spin_unlock(&udc->lock);
1632			ep_set_stall(udc, ep_num, direction, 1);
1633			spin_lock(&udc->lock);
1634			break;
1635		default:
1636			goto out;
1637		}
1638	} else
1639		goto out;
1640
1641	if (udc_prime_status(udc, EP_DIR_IN, 0, true))
1642		ep0_stall(udc);
1643out:
1644	return;
1645}
1646
1647static void handle_setup_packet(struct mv_udc *udc, u8 ep_num,
1648	struct usb_ctrlrequest *setup)
1649	__releases(&ep->udc->lock)
1650	__acquires(&ep->udc->lock)
1651{
1652	bool delegate = false;
1653
1654	nuke(&udc->eps[ep_num * 2 + EP_DIR_OUT], -ESHUTDOWN);
1655
1656	dev_dbg(&udc->dev->dev, "SETUP %02x.%02x v%04x i%04x l%04x\n",
1657			setup->bRequestType, setup->bRequest,
1658			setup->wValue, setup->wIndex, setup->wLength);
1659	/* We process some stardard setup requests here */
1660	if ((setup->bRequestType & USB_TYPE_MASK) == USB_TYPE_STANDARD) {
1661		switch (setup->bRequest) {
1662		case USB_REQ_GET_STATUS:
1663			ch9getstatus(udc, ep_num, setup);
1664			break;
1665
1666		case USB_REQ_SET_ADDRESS:
1667			ch9setaddress(udc, setup);
1668			break;
1669
1670		case USB_REQ_CLEAR_FEATURE:
1671			ch9clearfeature(udc, setup);
1672			break;
1673
1674		case USB_REQ_SET_FEATURE:
1675			ch9setfeature(udc, setup);
1676			break;
1677
1678		default:
1679			delegate = true;
1680		}
1681	} else
1682		delegate = true;
1683
1684	/* delegate USB standard requests to the gadget driver */
1685	if (delegate == true) {
1686		/* USB requests handled by gadget */
1687		if (setup->wLength) {
1688			/* DATA phase from gadget, STATUS phase from udc */
1689			udc->ep0_dir = (setup->bRequestType & USB_DIR_IN)
1690					?  EP_DIR_IN : EP_DIR_OUT;
1691			spin_unlock(&udc->lock);
1692			if (udc->driver->setup(&udc->gadget,
1693				&udc->local_setup_buff) < 0)
1694				ep0_stall(udc);
1695			spin_lock(&udc->lock);
1696			udc->ep0_state = (setup->bRequestType & USB_DIR_IN)
1697					?  DATA_STATE_XMIT : DATA_STATE_RECV;
1698		} else {
1699			/* no DATA phase, IN STATUS phase from gadget */
1700			udc->ep0_dir = EP_DIR_IN;
1701			spin_unlock(&udc->lock);
1702			if (udc->driver->setup(&udc->gadget,
1703				&udc->local_setup_buff) < 0)
1704				ep0_stall(udc);
1705			spin_lock(&udc->lock);
1706			udc->ep0_state = WAIT_FOR_OUT_STATUS;
1707		}
1708	}
1709}
1710
1711/* complete DATA or STATUS phase of ep0 prime status phase if needed */
1712static void ep0_req_complete(struct mv_udc *udc,
1713	struct mv_ep *ep0, struct mv_req *req)
1714{
1715	u32 new_addr;
1716
1717	if (udc->usb_state == USB_STATE_ADDRESS) {
1718		/* set the new address */
1719		new_addr = (u32)udc->dev_addr;
1720		writel(new_addr << USB_DEVICE_ADDRESS_BIT_SHIFT,
1721			&udc->op_regs->deviceaddr);
1722	}
1723
1724	done(ep0, req, 0);
1725
1726	switch (udc->ep0_state) {
1727	case DATA_STATE_XMIT:
1728		/* receive status phase */
1729		if (udc_prime_status(udc, EP_DIR_OUT, 0, true))
1730			ep0_stall(udc);
1731		break;
1732	case DATA_STATE_RECV:
1733		/* send status phase */
1734		if (udc_prime_status(udc, EP_DIR_IN, 0 , true))
1735			ep0_stall(udc);
1736		break;
1737	case WAIT_FOR_OUT_STATUS:
1738		udc->ep0_state = WAIT_FOR_SETUP;
1739		break;
1740	case WAIT_FOR_SETUP:
1741		dev_err(&udc->dev->dev, "unexpect ep0 packets\n");
1742		break;
1743	default:
1744		ep0_stall(udc);
1745		break;
1746	}
1747}
1748
1749static void get_setup_data(struct mv_udc *udc, u8 ep_num, u8 *buffer_ptr)
1750{
1751	u32 temp;
1752	struct mv_dqh *dqh;
1753
1754	dqh = &udc->ep_dqh[ep_num * 2 + EP_DIR_OUT];
1755
1756	/* Clear bit in ENDPTSETUPSTAT */
1757	writel((1 << ep_num), &udc->op_regs->epsetupstat);
1758
1759	/* while a hazard exists when setup package arrives */
1760	do {
1761		/* Set Setup Tripwire */
1762		temp = readl(&udc->op_regs->usbcmd);
1763		writel(temp | USBCMD_SETUP_TRIPWIRE_SET, &udc->op_regs->usbcmd);
1764
1765		/* Copy the setup packet to local buffer */
1766		memcpy(buffer_ptr, (u8 *) dqh->setup_buffer, 8);
1767	} while (!(readl(&udc->op_regs->usbcmd) & USBCMD_SETUP_TRIPWIRE_SET));
1768
1769	/* Clear Setup Tripwire */
1770	temp = readl(&udc->op_regs->usbcmd);
1771	writel(temp & ~USBCMD_SETUP_TRIPWIRE_SET, &udc->op_regs->usbcmd);
1772}
1773
1774static void irq_process_tr_complete(struct mv_udc *udc)
1775{
1776	u32 tmp, bit_pos;
1777	int i, ep_num = 0, direction = 0;
1778	struct mv_ep	*curr_ep;
1779	struct mv_req *curr_req, *temp_req;
1780	int status;
1781
1782	/*
1783	 * We use separate loops for ENDPTSETUPSTAT and ENDPTCOMPLETE
1784	 * because the setup packets are to be read ASAP
1785	 */
1786
1787	/* Process all Setup packet received interrupts */
1788	tmp = readl(&udc->op_regs->epsetupstat);
1789
1790	if (tmp) {
1791		for (i = 0; i < udc->max_eps; i++) {
1792			if (tmp & (1 << i)) {
1793				get_setup_data(udc, i,
1794					(u8 *)(&udc->local_setup_buff));
1795				handle_setup_packet(udc, i,
1796					&udc->local_setup_buff);
1797			}
1798		}
1799	}
1800
1801	/* Don't clear the endpoint setup status register here.
1802	 * It is cleared as a setup packet is read out of the buffer
1803	 */
1804
1805	/* Process non-setup transaction complete interrupts */
1806	tmp = readl(&udc->op_regs->epcomplete);
1807
1808	if (!tmp)
1809		return;
1810
1811	writel(tmp, &udc->op_regs->epcomplete);
1812
1813	for (i = 0; i < udc->max_eps * 2; i++) {
1814		ep_num = i >> 1;
1815		direction = i % 2;
1816
1817		bit_pos = 1 << (ep_num + 16 * direction);
1818
1819		if (!(bit_pos & tmp))
1820			continue;
1821
1822		if (i == 1)
1823			curr_ep = &udc->eps[0];
1824		else
1825			curr_ep = &udc->eps[i];
1826		/* process the req queue until an uncomplete request */
1827		list_for_each_entry_safe(curr_req, temp_req,
1828			&curr_ep->queue, queue) {
1829			status = process_ep_req(udc, i, curr_req);
1830			if (status)
1831				break;
1832
1833			/* write back status to req */
1834			curr_req->req.status = status;
1835
1836			/* ep0 request completion */
1837			if (ep_num == 0) {
1838				ep0_req_complete(udc, curr_ep, curr_req);
1839				break;
1840			} else {
1841				done(curr_ep, curr_req, status);
1842			}
1843		}
1844	}
1845}
1846
1847static void irq_process_reset(struct mv_udc *udc)
1848{
1849	u32 tmp;
1850	unsigned int loops;
1851
1852	udc->ep0_dir = EP_DIR_OUT;
1853	udc->ep0_state = WAIT_FOR_SETUP;
1854	udc->remote_wakeup = 0;		/* default to 0 on reset */
1855
1856	/* The address bits are past bit 25-31. Set the address */
1857	tmp = readl(&udc->op_regs->deviceaddr);
1858	tmp &= ~(USB_DEVICE_ADDRESS_MASK);
1859	writel(tmp, &udc->op_regs->deviceaddr);
1860
1861	/* Clear all the setup token semaphores */
1862	tmp = readl(&udc->op_regs->epsetupstat);
1863	writel(tmp, &udc->op_regs->epsetupstat);
1864
1865	/* Clear all the endpoint complete status bits */
1866	tmp = readl(&udc->op_regs->epcomplete);
1867	writel(tmp, &udc->op_regs->epcomplete);
1868
1869	/* wait until all endptprime bits cleared */
1870	loops = LOOPS(PRIME_TIMEOUT);
1871	while (readl(&udc->op_regs->epprime) & 0xFFFFFFFF) {
1872		if (loops == 0) {
1873			dev_err(&udc->dev->dev,
1874				"Timeout for ENDPTPRIME = 0x%x\n",
1875				readl(&udc->op_regs->epprime));
1876			break;
1877		}
1878		loops--;
1879		udelay(LOOPS_USEC);
1880	}
1881
1882	/* Write 1s to the Flush register */
1883	writel((u32)~0, &udc->op_regs->epflush);
1884
1885	if (readl(&udc->op_regs->portsc[0]) & PORTSCX_PORT_RESET) {
1886		dev_info(&udc->dev->dev, "usb bus reset\n");
1887		udc->usb_state = USB_STATE_DEFAULT;
1888		/* reset all the queues, stop all USB activities */
1889		stop_activity(udc, udc->driver);
1890	} else {
1891		dev_info(&udc->dev->dev, "USB reset portsc 0x%x\n",
1892			readl(&udc->op_regs->portsc));
1893
1894		/*
1895		 * re-initialize
1896		 * controller reset
1897		 */
1898		udc_reset(udc);
1899
1900		/* reset all the queues, stop all USB activities */
1901		stop_activity(udc, udc->driver);
1902
1903		/* reset ep0 dQH and endptctrl */
1904		ep0_reset(udc);
1905
1906		/* enable interrupt and set controller to run state */
1907		udc_start(udc);
1908
1909		udc->usb_state = USB_STATE_ATTACHED;
1910	}
1911}
1912
1913static void handle_bus_resume(struct mv_udc *udc)
1914{
1915	udc->usb_state = udc->resume_state;
1916	udc->resume_state = 0;
1917
1918	/* report resume to the driver */
1919	if (udc->driver) {
1920		if (udc->driver->resume) {
1921			spin_unlock(&udc->lock);
1922			udc->driver->resume(&udc->gadget);
1923			spin_lock(&udc->lock);
1924		}
1925	}
1926}
1927
1928static void irq_process_suspend(struct mv_udc *udc)
1929{
1930	udc->resume_state = udc->usb_state;
1931	udc->usb_state = USB_STATE_SUSPENDED;
1932
1933	if (udc->driver->suspend) {
1934		spin_unlock(&udc->lock);
1935		udc->driver->suspend(&udc->gadget);
1936		spin_lock(&udc->lock);
1937	}
1938}
1939
1940static void irq_process_port_change(struct mv_udc *udc)
1941{
1942	u32 portsc;
1943
1944	portsc = readl(&udc->op_regs->portsc[0]);
1945	if (!(portsc & PORTSCX_PORT_RESET)) {
1946		/* Get the speed */
1947		u32 speed = portsc & PORTSCX_PORT_SPEED_MASK;
1948		switch (speed) {
1949		case PORTSCX_PORT_SPEED_HIGH:
1950			udc->gadget.speed = USB_SPEED_HIGH;
1951			break;
1952		case PORTSCX_PORT_SPEED_FULL:
1953			udc->gadget.speed = USB_SPEED_FULL;
1954			break;
1955		case PORTSCX_PORT_SPEED_LOW:
1956			udc->gadget.speed = USB_SPEED_LOW;
1957			break;
1958		default:
1959			udc->gadget.speed = USB_SPEED_UNKNOWN;
1960			break;
1961		}
1962	}
1963
1964	if (portsc & PORTSCX_PORT_SUSPEND) {
1965		udc->resume_state = udc->usb_state;
1966		udc->usb_state = USB_STATE_SUSPENDED;
1967		if (udc->driver->suspend) {
1968			spin_unlock(&udc->lock);
1969			udc->driver->suspend(&udc->gadget);
1970			spin_lock(&udc->lock);
1971		}
1972	}
1973
1974	if (!(portsc & PORTSCX_PORT_SUSPEND)
1975		&& udc->usb_state == USB_STATE_SUSPENDED) {
1976		handle_bus_resume(udc);
1977	}
1978
1979	if (!udc->resume_state)
1980		udc->usb_state = USB_STATE_DEFAULT;
1981}
1982
1983static void irq_process_error(struct mv_udc *udc)
1984{
1985	/* Increment the error count */
1986	udc->errors++;
1987}
1988
1989static irqreturn_t mv_udc_irq(int irq, void *dev)
1990{
1991	struct mv_udc *udc = (struct mv_udc *)dev;
1992	u32 status, intr;
1993
1994	/* Disable ISR when stopped bit is set */
1995	if (udc->stopped)
1996		return IRQ_NONE;
1997
1998	spin_lock(&udc->lock);
1999
2000	status = readl(&udc->op_regs->usbsts);
2001	intr = readl(&udc->op_regs->usbintr);
2002	status &= intr;
2003
2004	if (status == 0) {
2005		spin_unlock(&udc->lock);
2006		return IRQ_NONE;
2007	}
2008
2009	/* Clear all the interrupts occurred */
2010	writel(status, &udc->op_regs->usbsts);
2011
2012	if (status & USBSTS_ERR)
2013		irq_process_error(udc);
2014
2015	if (status & USBSTS_RESET)
2016		irq_process_reset(udc);
2017
2018	if (status & USBSTS_PORT_CHANGE)
2019		irq_process_port_change(udc);
2020
2021	if (status & USBSTS_INT)
2022		irq_process_tr_complete(udc);
2023
2024	if (status & USBSTS_SUSPEND)
2025		irq_process_suspend(udc);
2026
2027	spin_unlock(&udc->lock);
2028
2029	return IRQ_HANDLED;
2030}
2031
2032static irqreturn_t mv_udc_vbus_irq(int irq, void *dev)
2033{
2034	struct mv_udc *udc = (struct mv_udc *)dev;
2035
2036	/* polling VBUS and init phy may cause too much time*/
2037	if (udc->qwork)
2038		queue_work(udc->qwork, &udc->vbus_work);
2039
2040	return IRQ_HANDLED;
2041}
2042
2043static void mv_udc_vbus_work(struct work_struct *work)
2044{
2045	struct mv_udc *udc;
2046	unsigned int vbus;
2047
2048	udc = container_of(work, struct mv_udc, vbus_work);
2049	if (!udc->pdata->vbus)
2050		return;
2051
2052	vbus = udc->pdata->vbus->poll();
2053	dev_info(&udc->dev->dev, "vbus is %d\n", vbus);
2054
2055	if (vbus == VBUS_HIGH)
2056		mv_udc_vbus_session(&udc->gadget, 1);
2057	else if (vbus == VBUS_LOW)
2058		mv_udc_vbus_session(&udc->gadget, 0);
2059}
2060
2061/* release device structure */
2062static void gadget_release(struct device *_dev)
2063{
2064	struct mv_udc *udc;
2065
2066	udc = dev_get_drvdata(_dev);
2067
2068	complete(udc->done);
2069}
2070
2071static int mv_udc_remove(struct platform_device *pdev)
2072{
2073	struct mv_udc *udc;
2074
2075	udc = platform_get_drvdata(pdev);
2076
2077	usb_del_gadget_udc(&udc->gadget);
2078
2079	if (udc->qwork) {
2080		flush_workqueue(udc->qwork);
2081		destroy_workqueue(udc->qwork);
2082	}
2083
2084	/* free memory allocated in probe */
2085	if (udc->dtd_pool)
2086		dma_pool_destroy(udc->dtd_pool);
2087
2088	if (udc->ep_dqh)
2089		dma_free_coherent(&pdev->dev, udc->ep_dqh_size,
2090			udc->ep_dqh, udc->ep_dqh_dma);
2091
2092	mv_udc_disable(udc);
2093
2094	/* free dev, wait for the release() finished */
2095	wait_for_completion(udc->done);
2096
2097	return 0;
2098}
2099
2100static int mv_udc_probe(struct platform_device *pdev)
2101{
2102	struct mv_usb_platform_data *pdata = dev_get_platdata(&pdev->dev);
2103	struct mv_udc *udc;
2104	int retval = 0;
2105	struct resource *r;
2106	size_t size;
2107
2108	if (pdata == NULL) {
2109		dev_err(&pdev->dev, "missing platform_data\n");
2110		return -ENODEV;
2111	}
2112
2113	udc = devm_kzalloc(&pdev->dev, sizeof(*udc), GFP_KERNEL);
2114	if (udc == NULL) {
2115		dev_err(&pdev->dev, "failed to allocate memory for udc\n");
2116		return -ENOMEM;
2117	}
2118
2119	udc->done = &release_done;
2120	udc->pdata = dev_get_platdata(&pdev->dev);
2121	spin_lock_init(&udc->lock);
2122
2123	udc->dev = pdev;
2124
2125	if (pdata->mode == MV_USB_MODE_OTG) {
2126		udc->transceiver = devm_usb_get_phy(&pdev->dev,
2127					USB_PHY_TYPE_USB2);
2128		if (IS_ERR(udc->transceiver)) {
2129			retval = PTR_ERR(udc->transceiver);
2130
2131			if (retval == -ENXIO)
2132				return retval;
2133
2134			udc->transceiver = NULL;
2135			return -EPROBE_DEFER;
2136		}
2137	}
2138
2139	/* udc only have one sysclk. */
2140	udc->clk = devm_clk_get(&pdev->dev, NULL);
2141	if (IS_ERR(udc->clk))
2142		return PTR_ERR(udc->clk);
2143
2144	r = platform_get_resource_byname(udc->dev, IORESOURCE_MEM, "capregs");
2145	if (r == NULL) {
2146		dev_err(&pdev->dev, "no I/O memory resource defined\n");
2147		return -ENODEV;
2148	}
2149
2150	udc->cap_regs = (struct mv_cap_regs __iomem *)
2151		devm_ioremap(&pdev->dev, r->start, resource_size(r));
2152	if (udc->cap_regs == NULL) {
2153		dev_err(&pdev->dev, "failed to map I/O memory\n");
2154		return -EBUSY;
2155	}
2156
2157	r = platform_get_resource_byname(udc->dev, IORESOURCE_MEM, "phyregs");
2158	if (r == NULL) {
2159		dev_err(&pdev->dev, "no phy I/O memory resource defined\n");
2160		return -ENODEV;
2161	}
2162
2163	udc->phy_regs = ioremap(r->start, resource_size(r));
2164	if (udc->phy_regs == NULL) {
2165		dev_err(&pdev->dev, "failed to map phy I/O memory\n");
2166		return -EBUSY;
2167	}
2168
2169	/* we will acces controller register, so enable the clk */
2170	retval = mv_udc_enable_internal(udc);
2171	if (retval)
2172		return retval;
2173
2174	udc->op_regs =
2175		(struct mv_op_regs __iomem *)((unsigned long)udc->cap_regs
2176		+ (readl(&udc->cap_regs->caplength_hciversion)
2177			& CAPLENGTH_MASK));
2178	udc->max_eps = readl(&udc->cap_regs->dccparams) & DCCPARAMS_DEN_MASK;
2179
2180	/*
2181	 * some platform will use usb to download image, it may not disconnect
2182	 * usb gadget before loading kernel. So first stop udc here.
2183	 */
2184	udc_stop(udc);
2185	writel(0xFFFFFFFF, &udc->op_regs->usbsts);
2186
2187	size = udc->max_eps * sizeof(struct mv_dqh) *2;
2188	size = (size + DQH_ALIGNMENT - 1) & ~(DQH_ALIGNMENT - 1);
2189	udc->ep_dqh = dma_alloc_coherent(&pdev->dev, size,
2190					&udc->ep_dqh_dma, GFP_KERNEL);
2191
2192	if (udc->ep_dqh == NULL) {
2193		dev_err(&pdev->dev, "allocate dQH memory failed\n");
2194		retval = -ENOMEM;
2195		goto err_disable_clock;
2196	}
2197	udc->ep_dqh_size = size;
2198
2199	/* create dTD dma_pool resource */
2200	udc->dtd_pool = dma_pool_create("mv_dtd",
2201			&pdev->dev,
2202			sizeof(struct mv_dtd),
2203			DTD_ALIGNMENT,
2204			DMA_BOUNDARY);
2205
2206	if (!udc->dtd_pool) {
2207		retval = -ENOMEM;
2208		goto err_free_dma;
2209	}
2210
2211	size = udc->max_eps * sizeof(struct mv_ep) *2;
2212	udc->eps = devm_kzalloc(&pdev->dev, size, GFP_KERNEL);
2213	if (udc->eps == NULL) {
2214		dev_err(&pdev->dev, "allocate ep memory failed\n");
2215		retval = -ENOMEM;
2216		goto err_destroy_dma;
2217	}
2218
2219	/* initialize ep0 status request structure */
2220	udc->status_req = devm_kzalloc(&pdev->dev, sizeof(struct mv_req),
2221					GFP_KERNEL);
2222	if (!udc->status_req) {
2223		dev_err(&pdev->dev, "allocate status_req memory failed\n");
2224		retval = -ENOMEM;
2225		goto err_destroy_dma;
2226	}
2227	INIT_LIST_HEAD(&udc->status_req->queue);
2228
2229	/* allocate a small amount of memory to get valid address */
2230	udc->status_req->req.buf = kzalloc(8, GFP_KERNEL);
2231	udc->status_req->req.dma = DMA_ADDR_INVALID;
2232
2233	udc->resume_state = USB_STATE_NOTATTACHED;
2234	udc->usb_state = USB_STATE_POWERED;
2235	udc->ep0_dir = EP_DIR_OUT;
2236	udc->remote_wakeup = 0;
2237
2238	r = platform_get_resource(udc->dev, IORESOURCE_IRQ, 0);
2239	if (r == NULL) {
2240		dev_err(&pdev->dev, "no IRQ resource defined\n");
2241		retval = -ENODEV;
2242		goto err_destroy_dma;
2243	}
2244	udc->irq = r->start;
2245	if (devm_request_irq(&pdev->dev, udc->irq, mv_udc_irq,
2246		IRQF_SHARED, driver_name, udc)) {
2247		dev_err(&pdev->dev, "Request irq %d for UDC failed\n",
2248			udc->irq);
2249		retval = -ENODEV;
2250		goto err_destroy_dma;
2251	}
2252
2253	/* initialize gadget structure */
2254	udc->gadget.ops = &mv_ops;	/* usb_gadget_ops */
2255	udc->gadget.ep0 = &udc->eps[0].ep;	/* gadget ep0 */
2256	INIT_LIST_HEAD(&udc->gadget.ep_list);	/* ep_list */
2257	udc->gadget.speed = USB_SPEED_UNKNOWN;	/* speed */
2258	udc->gadget.max_speed = USB_SPEED_HIGH;	/* support dual speed */
2259
2260	/* the "gadget" abstracts/virtualizes the controller */
2261	udc->gadget.name = driver_name;		/* gadget name */
2262
2263	eps_init(udc);
2264
2265	/* VBUS detect: we can disable/enable clock on demand.*/
2266	if (udc->transceiver)
2267		udc->clock_gating = 1;
2268	else if (pdata->vbus) {
2269		udc->clock_gating = 1;
2270		retval = devm_request_threaded_irq(&pdev->dev,
2271				pdata->vbus->irq, NULL,
2272				mv_udc_vbus_irq, IRQF_ONESHOT, "vbus", udc);
2273		if (retval) {
2274			dev_info(&pdev->dev,
2275				"Can not request irq for VBUS, "
2276				"disable clock gating\n");
2277			udc->clock_gating = 0;
2278		}
2279
2280		udc->qwork = create_singlethread_workqueue("mv_udc_queue");
2281		if (!udc->qwork) {
2282			dev_err(&pdev->dev, "cannot create workqueue\n");
2283			retval = -ENOMEM;
2284			goto err_destroy_dma;
2285		}
2286
2287		INIT_WORK(&udc->vbus_work, mv_udc_vbus_work);
2288	}
2289
2290	/*
2291	 * When clock gating is supported, we can disable clk and phy.
2292	 * If not, it means that VBUS detection is not supported, we
2293	 * have to enable vbus active all the time to let controller work.
2294	 */
2295	if (udc->clock_gating)
2296		mv_udc_disable_internal(udc);
2297	else
2298		udc->vbus_active = 1;
2299
2300	retval = usb_add_gadget_udc_release(&pdev->dev, &udc->gadget,
2301			gadget_release);
2302	if (retval)
2303		goto err_create_workqueue;
2304
2305	platform_set_drvdata(pdev, udc);
2306	dev_info(&pdev->dev, "successful probe UDC device %s clock gating.\n",
2307		udc->clock_gating ? "with" : "without");
2308
2309	return 0;
2310
2311err_create_workqueue:
2312	destroy_workqueue(udc->qwork);
2313err_destroy_dma:
2314	dma_pool_destroy(udc->dtd_pool);
2315err_free_dma:
2316	dma_free_coherent(&pdev->dev, udc->ep_dqh_size,
2317			udc->ep_dqh, udc->ep_dqh_dma);
2318err_disable_clock:
2319	mv_udc_disable_internal(udc);
2320
2321	return retval;
2322}
2323
2324#ifdef CONFIG_PM
2325static int mv_udc_suspend(struct device *dev)
2326{
2327	struct mv_udc *udc;
2328
2329	udc = dev_get_drvdata(dev);
2330
2331	/* if OTG is enabled, the following will be done in OTG driver*/
2332	if (udc->transceiver)
2333		return 0;
2334
2335	if (udc->pdata->vbus && udc->pdata->vbus->poll)
2336		if (udc->pdata->vbus->poll() == VBUS_HIGH) {
2337			dev_info(&udc->dev->dev, "USB cable is connected!\n");
2338			return -EAGAIN;
2339		}
2340
2341	/*
2342	 * only cable is unplugged, udc can suspend.
2343	 * So do not care about clock_gating == 1.
2344	 */
2345	if (!udc->clock_gating) {
2346		udc_stop(udc);
2347
2348		spin_lock_irq(&udc->lock);
2349		/* stop all usb activities */
2350		stop_activity(udc, udc->driver);
2351		spin_unlock_irq(&udc->lock);
2352
2353		mv_udc_disable_internal(udc);
2354	}
2355
2356	return 0;
2357}
2358
2359static int mv_udc_resume(struct device *dev)
2360{
2361	struct mv_udc *udc;
2362	int retval;
2363
2364	udc = dev_get_drvdata(dev);
2365
2366	/* if OTG is enabled, the following will be done in OTG driver*/
2367	if (udc->transceiver)
2368		return 0;
2369
2370	if (!udc->clock_gating) {
2371		retval = mv_udc_enable_internal(udc);
2372		if (retval)
2373			return retval;
2374
2375		if (udc->driver && udc->softconnect) {
2376			udc_reset(udc);
2377			ep0_reset(udc);
2378			udc_start(udc);
2379		}
2380	}
2381
2382	return 0;
2383}
2384
2385static const struct dev_pm_ops mv_udc_pm_ops = {
2386	.suspend	= mv_udc_suspend,
2387	.resume		= mv_udc_resume,
2388};
2389#endif
2390
2391static void mv_udc_shutdown(struct platform_device *pdev)
2392{
2393	struct mv_udc *udc;
2394	u32 mode;
2395
2396	udc = platform_get_drvdata(pdev);
2397	/* reset controller mode to IDLE */
2398	mv_udc_enable(udc);
2399	mode = readl(&udc->op_regs->usbmode);
2400	mode &= ~3;
2401	writel(mode, &udc->op_regs->usbmode);
2402	mv_udc_disable(udc);
2403}
2404
2405static struct platform_driver udc_driver = {
2406	.probe		= mv_udc_probe,
2407	.remove		= mv_udc_remove,
2408	.shutdown	= mv_udc_shutdown,
2409	.driver		= {
2410		.owner	= THIS_MODULE,
2411		.name	= "mv-udc",
2412#ifdef CONFIG_PM
2413		.pm	= &mv_udc_pm_ops,
2414#endif
2415	},
2416};
2417
2418module_platform_driver(udc_driver);
2419MODULE_ALIAS("platform:mv-udc");
2420MODULE_DESCRIPTION(DRIVER_DESC);
2421MODULE_AUTHOR("Chao Xie <chao.xie@marvell.com>");
2422MODULE_VERSION(DRIVER_VERSION);
2423MODULE_LICENSE("GPL");