Linux Audio

Check our new training course

In-person Linux kernel drivers training

Jun 16-20, 2025
Register
Loading...
Note: File does not exist in v6.13.7.
   1/**
   2 * linux/drivers/usb/gadget/s3c-hsotg.c
   3 *
   4 * Copyright (c) 2011 Samsung Electronics Co., Ltd.
   5 *		http://www.samsung.com
   6 *
   7 * Copyright 2008 Openmoko, Inc.
   8 * Copyright 2008 Simtec Electronics
   9 *      Ben Dooks <ben@simtec.co.uk>
  10 *      http://armlinux.simtec.co.uk/
  11 *
  12 * S3C USB2.0 High-speed / OtG driver
  13 *
  14 * This program is free software; you can redistribute it and/or modify
  15 * it under the terms of the GNU General Public License version 2 as
  16 * published by the Free Software Foundation.
  17 */
  18
  19#include <linux/kernel.h>
  20#include <linux/module.h>
  21#include <linux/spinlock.h>
  22#include <linux/interrupt.h>
  23#include <linux/platform_device.h>
  24#include <linux/dma-mapping.h>
  25#include <linux/debugfs.h>
  26#include <linux/seq_file.h>
  27#include <linux/delay.h>
  28#include <linux/io.h>
  29#include <linux/slab.h>
  30#include <linux/clk.h>
  31#include <linux/regulator/consumer.h>
  32
  33#include <linux/usb/ch9.h>
  34#include <linux/usb/gadget.h>
  35#include <linux/platform_data/s3c-hsotg.h>
  36
  37#include <mach/map.h>
  38
  39#include "s3c-hsotg.h"
  40
  41#define DMA_ADDR_INVALID (~((dma_addr_t)0))
  42
  43static const char * const s3c_hsotg_supply_names[] = {
  44	"vusb_d",		/* digital USB supply, 1.2V */
  45	"vusb_a",		/* analog USB supply, 1.1V */
  46};
  47
  48/*
  49 * EP0_MPS_LIMIT
  50 *
  51 * Unfortunately there seems to be a limit of the amount of data that can
  52 * be transferred by IN transactions on EP0. This is either 127 bytes or 3
  53 * packets (which practically means 1 packet and 63 bytes of data) when the
  54 * MPS is set to 64.
  55 *
  56 * This means if we are wanting to move >127 bytes of data, we need to
  57 * split the transactions up, but just doing one packet at a time does
  58 * not work (this may be an implicit DATA0 PID on first packet of the
  59 * transaction) and doing 2 packets is outside the controller's limits.
  60 *
  61 * If we try to lower the MPS size for EP0, then no transfers work properly
  62 * for EP0, and the system will fail basic enumeration. As no cause for this
  63 * has currently been found, we cannot support any large IN transfers for
  64 * EP0.
  65 */
  66#define EP0_MPS_LIMIT	64
  67
  68struct s3c_hsotg;
  69struct s3c_hsotg_req;
  70
  71/**
  72 * struct s3c_hsotg_ep - driver endpoint definition.
  73 * @ep: The gadget layer representation of the endpoint.
  74 * @name: The driver generated name for the endpoint.
  75 * @queue: Queue of requests for this endpoint.
  76 * @parent: Reference back to the parent device structure.
  77 * @req: The current request that the endpoint is processing. This is
  78 *       used to indicate an request has been loaded onto the endpoint
  79 *       and has yet to be completed (maybe due to data move, or simply
  80 *	 awaiting an ack from the core all the data has been completed).
  81 * @debugfs: File entry for debugfs file for this endpoint.
  82 * @lock: State lock to protect contents of endpoint.
  83 * @dir_in: Set to true if this endpoint is of the IN direction, which
  84 *	    means that it is sending data to the Host.
  85 * @index: The index for the endpoint registers.
  86 * @name: The name array passed to the USB core.
  87 * @halted: Set if the endpoint has been halted.
  88 * @periodic: Set if this is a periodic ep, such as Interrupt
  89 * @sent_zlp: Set if we've sent a zero-length packet.
  90 * @total_data: The total number of data bytes done.
  91 * @fifo_size: The size of the FIFO (for periodic IN endpoints)
  92 * @fifo_load: The amount of data loaded into the FIFO (periodic IN)
  93 * @last_load: The offset of data for the last start of request.
  94 * @size_loaded: The last loaded size for DxEPTSIZE for periodic IN
  95 *
  96 * This is the driver's state for each registered enpoint, allowing it
  97 * to keep track of transactions that need doing. Each endpoint has a
  98 * lock to protect the state, to try and avoid using an overall lock
  99 * for the host controller as much as possible.
 100 *
 101 * For periodic IN endpoints, we have fifo_size and fifo_load to try
 102 * and keep track of the amount of data in the periodic FIFO for each
 103 * of these as we don't have a status register that tells us how much
 104 * is in each of them. (note, this may actually be useless information
 105 * as in shared-fifo mode periodic in acts like a single-frame packet
 106 * buffer than a fifo)
 107 */
 108struct s3c_hsotg_ep {
 109	struct usb_ep		ep;
 110	struct list_head	queue;
 111	struct s3c_hsotg	*parent;
 112	struct s3c_hsotg_req	*req;
 113	struct dentry		*debugfs;
 114
 115	spinlock_t		lock;
 116
 117	unsigned long		total_data;
 118	unsigned int		size_loaded;
 119	unsigned int		last_load;
 120	unsigned int		fifo_load;
 121	unsigned short		fifo_size;
 122
 123	unsigned char		dir_in;
 124	unsigned char		index;
 125
 126	unsigned int		halted:1;
 127	unsigned int		periodic:1;
 128	unsigned int		sent_zlp:1;
 129
 130	char			name[10];
 131};
 132
 133/**
 134 * struct s3c_hsotg - driver state.
 135 * @dev: The parent device supplied to the probe function
 136 * @driver: USB gadget driver
 137 * @plat: The platform specific configuration data.
 138 * @regs: The memory area mapped for accessing registers.
 139 * @regs_res: The resource that was allocated when claiming register space.
 140 * @irq: The IRQ number we are using
 141 * @supplies: Definition of USB power supplies
 142 * @dedicated_fifos: Set if the hardware has dedicated IN-EP fifos.
 143 * @num_of_eps: Number of available EPs (excluding EP0)
 144 * @debug_root: root directrory for debugfs.
 145 * @debug_file: main status file for debugfs.
 146 * @debug_fifo: FIFO status file for debugfs.
 147 * @ep0_reply: Request used for ep0 reply.
 148 * @ep0_buff: Buffer for EP0 reply data, if needed.
 149 * @ctrl_buff: Buffer for EP0 control requests.
 150 * @ctrl_req: Request for EP0 control packets.
 151 * @setup: NAK management for EP0 SETUP
 152 * @last_rst: Time of last reset
 153 * @eps: The endpoints being supplied to the gadget framework
 154 */
 155struct s3c_hsotg {
 156	struct device		 *dev;
 157	struct usb_gadget_driver *driver;
 158	struct s3c_hsotg_plat	 *plat;
 159
 160	void __iomem		*regs;
 161	struct resource		*regs_res;
 162	int			irq;
 163	struct clk		*clk;
 164
 165	struct regulator_bulk_data supplies[ARRAY_SIZE(s3c_hsotg_supply_names)];
 166
 167	unsigned int		dedicated_fifos:1;
 168	unsigned char           num_of_eps;
 169
 170	struct dentry		*debug_root;
 171	struct dentry		*debug_file;
 172	struct dentry		*debug_fifo;
 173
 174	struct usb_request	*ep0_reply;
 175	struct usb_request	*ctrl_req;
 176	u8			ep0_buff[8];
 177	u8			ctrl_buff[8];
 178
 179	struct usb_gadget	gadget;
 180	unsigned int		setup;
 181	unsigned long           last_rst;
 182	struct s3c_hsotg_ep	*eps;
 183};
 184
 185/**
 186 * struct s3c_hsotg_req - data transfer request
 187 * @req: The USB gadget request
 188 * @queue: The list of requests for the endpoint this is queued for.
 189 * @in_progress: Has already had size/packets written to core
 190 * @mapped: DMA buffer for this request has been mapped via dma_map_single().
 191 */
 192struct s3c_hsotg_req {
 193	struct usb_request	req;
 194	struct list_head	queue;
 195	unsigned char		in_progress;
 196	unsigned char		mapped;
 197};
 198
 199/* conversion functions */
 200static inline struct s3c_hsotg_req *our_req(struct usb_request *req)
 201{
 202	return container_of(req, struct s3c_hsotg_req, req);
 203}
 204
 205static inline struct s3c_hsotg_ep *our_ep(struct usb_ep *ep)
 206{
 207	return container_of(ep, struct s3c_hsotg_ep, ep);
 208}
 209
 210static inline struct s3c_hsotg *to_hsotg(struct usb_gadget *gadget)
 211{
 212	return container_of(gadget, struct s3c_hsotg, gadget);
 213}
 214
 215static inline void __orr32(void __iomem *ptr, u32 val)
 216{
 217	writel(readl(ptr) | val, ptr);
 218}
 219
 220static inline void __bic32(void __iomem *ptr, u32 val)
 221{
 222	writel(readl(ptr) & ~val, ptr);
 223}
 224
 225/* forward decleration of functions */
 226static void s3c_hsotg_dump(struct s3c_hsotg *hsotg);
 227
 228/**
 229 * using_dma - return the DMA status of the driver.
 230 * @hsotg: The driver state.
 231 *
 232 * Return true if we're using DMA.
 233 *
 234 * Currently, we have the DMA support code worked into everywhere
 235 * that needs it, but the AMBA DMA implementation in the hardware can
 236 * only DMA from 32bit aligned addresses. This means that gadgets such
 237 * as the CDC Ethernet cannot work as they often pass packets which are
 238 * not 32bit aligned.
 239 *
 240 * Unfortunately the choice to use DMA or not is global to the controller
 241 * and seems to be only settable when the controller is being put through
 242 * a core reset. This means we either need to fix the gadgets to take
 243 * account of DMA alignment, or add bounce buffers (yuerk).
 244 *
 245 * Until this issue is sorted out, we always return 'false'.
 246 */
 247static inline bool using_dma(struct s3c_hsotg *hsotg)
 248{
 249	return false;	/* support is not complete */
 250}
 251
 252/**
 253 * s3c_hsotg_en_gsint - enable one or more of the general interrupt
 254 * @hsotg: The device state
 255 * @ints: A bitmask of the interrupts to enable
 256 */
 257static void s3c_hsotg_en_gsint(struct s3c_hsotg *hsotg, u32 ints)
 258{
 259	u32 gsintmsk = readl(hsotg->regs + GINTMSK);
 260	u32 new_gsintmsk;
 261
 262	new_gsintmsk = gsintmsk | ints;
 263
 264	if (new_gsintmsk != gsintmsk) {
 265		dev_dbg(hsotg->dev, "gsintmsk now 0x%08x\n", new_gsintmsk);
 266		writel(new_gsintmsk, hsotg->regs + GINTMSK);
 267	}
 268}
 269
 270/**
 271 * s3c_hsotg_disable_gsint - disable one or more of the general interrupt
 272 * @hsotg: The device state
 273 * @ints: A bitmask of the interrupts to enable
 274 */
 275static void s3c_hsotg_disable_gsint(struct s3c_hsotg *hsotg, u32 ints)
 276{
 277	u32 gsintmsk = readl(hsotg->regs + GINTMSK);
 278	u32 new_gsintmsk;
 279
 280	new_gsintmsk = gsintmsk & ~ints;
 281
 282	if (new_gsintmsk != gsintmsk)
 283		writel(new_gsintmsk, hsotg->regs + GINTMSK);
 284}
 285
 286/**
 287 * s3c_hsotg_ctrl_epint - enable/disable an endpoint irq
 288 * @hsotg: The device state
 289 * @ep: The endpoint index
 290 * @dir_in: True if direction is in.
 291 * @en: The enable value, true to enable
 292 *
 293 * Set or clear the mask for an individual endpoint's interrupt
 294 * request.
 295 */
 296static void s3c_hsotg_ctrl_epint(struct s3c_hsotg *hsotg,
 297				 unsigned int ep, unsigned int dir_in,
 298				 unsigned int en)
 299{
 300	unsigned long flags;
 301	u32 bit = 1 << ep;
 302	u32 daint;
 303
 304	if (!dir_in)
 305		bit <<= 16;
 306
 307	local_irq_save(flags);
 308	daint = readl(hsotg->regs + DAINTMSK);
 309	if (en)
 310		daint |= bit;
 311	else
 312		daint &= ~bit;
 313	writel(daint, hsotg->regs + DAINTMSK);
 314	local_irq_restore(flags);
 315}
 316
 317/**
 318 * s3c_hsotg_init_fifo - initialise non-periodic FIFOs
 319 * @hsotg: The device instance.
 320 */
 321static void s3c_hsotg_init_fifo(struct s3c_hsotg *hsotg)
 322{
 323	unsigned int ep;
 324	unsigned int addr;
 325	unsigned int size;
 326	int timeout;
 327	u32 val;
 328
 329	/* set FIFO sizes to 2048/1024 */
 330
 331	writel(2048, hsotg->regs + GRXFSIZ);
 332	writel(GNPTXFSIZ_NPTxFStAddr(2048) |
 333	       GNPTXFSIZ_NPTxFDep(1024),
 334	       hsotg->regs + GNPTXFSIZ);
 335
 336	/*
 337	 * arange all the rest of the TX FIFOs, as some versions of this
 338	 * block have overlapping default addresses. This also ensures
 339	 * that if the settings have been changed, then they are set to
 340	 * known values.
 341	 */
 342
 343	/* start at the end of the GNPTXFSIZ, rounded up */
 344	addr = 2048 + 1024;
 345	size = 768;
 346
 347	/*
 348	 * currently we allocate TX FIFOs for all possible endpoints,
 349	 * and assume that they are all the same size.
 350	 */
 351
 352	for (ep = 1; ep <= 15; ep++) {
 353		val = addr;
 354		val |= size << DPTXFSIZn_DPTxFSize_SHIFT;
 355		addr += size;
 356
 357		writel(val, hsotg->regs + DPTXFSIZn(ep));
 358	}
 359
 360	/*
 361	 * according to p428 of the design guide, we need to ensure that
 362	 * all fifos are flushed before continuing
 363	 */
 364
 365	writel(GRSTCTL_TxFNum(0x10) | GRSTCTL_TxFFlsh |
 366	       GRSTCTL_RxFFlsh, hsotg->regs + GRSTCTL);
 367
 368	/* wait until the fifos are both flushed */
 369	timeout = 100;
 370	while (1) {
 371		val = readl(hsotg->regs + GRSTCTL);
 372
 373		if ((val & (GRSTCTL_TxFFlsh | GRSTCTL_RxFFlsh)) == 0)
 374			break;
 375
 376		if (--timeout == 0) {
 377			dev_err(hsotg->dev,
 378				"%s: timeout flushing fifos (GRSTCTL=%08x)\n",
 379				__func__, val);
 380		}
 381
 382		udelay(1);
 383	}
 384
 385	dev_dbg(hsotg->dev, "FIFOs reset, timeout at %d\n", timeout);
 386}
 387
 388/**
 389 * @ep: USB endpoint to allocate request for.
 390 * @flags: Allocation flags
 391 *
 392 * Allocate a new USB request structure appropriate for the specified endpoint
 393 */
 394static struct usb_request *s3c_hsotg_ep_alloc_request(struct usb_ep *ep,
 395						      gfp_t flags)
 396{
 397	struct s3c_hsotg_req *req;
 398
 399	req = kzalloc(sizeof(struct s3c_hsotg_req), flags);
 400	if (!req)
 401		return NULL;
 402
 403	INIT_LIST_HEAD(&req->queue);
 404
 405	req->req.dma = DMA_ADDR_INVALID;
 406	return &req->req;
 407}
 408
 409/**
 410 * is_ep_periodic - return true if the endpoint is in periodic mode.
 411 * @hs_ep: The endpoint to query.
 412 *
 413 * Returns true if the endpoint is in periodic mode, meaning it is being
 414 * used for an Interrupt or ISO transfer.
 415 */
 416static inline int is_ep_periodic(struct s3c_hsotg_ep *hs_ep)
 417{
 418	return hs_ep->periodic;
 419}
 420
 421/**
 422 * s3c_hsotg_unmap_dma - unmap the DMA memory being used for the request
 423 * @hsotg: The device state.
 424 * @hs_ep: The endpoint for the request
 425 * @hs_req: The request being processed.
 426 *
 427 * This is the reverse of s3c_hsotg_map_dma(), called for the completion
 428 * of a request to ensure the buffer is ready for access by the caller.
 429 */
 430static void s3c_hsotg_unmap_dma(struct s3c_hsotg *hsotg,
 431				struct s3c_hsotg_ep *hs_ep,
 432				struct s3c_hsotg_req *hs_req)
 433{
 434	struct usb_request *req = &hs_req->req;
 435	enum dma_data_direction dir;
 436
 437	dir = hs_ep->dir_in ? DMA_TO_DEVICE : DMA_FROM_DEVICE;
 438
 439	/* ignore this if we're not moving any data */
 440	if (hs_req->req.length == 0)
 441		return;
 442
 443	if (hs_req->mapped) {
 444		/* we mapped this, so unmap and remove the dma */
 445
 446		dma_unmap_single(hsotg->dev, req->dma, req->length, dir);
 447
 448		req->dma = DMA_ADDR_INVALID;
 449		hs_req->mapped = 0;
 450	} else {
 451		dma_sync_single_for_cpu(hsotg->dev, req->dma, req->length, dir);
 452	}
 453}
 454
 455/**
 456 * s3c_hsotg_write_fifo - write packet Data to the TxFIFO
 457 * @hsotg: The controller state.
 458 * @hs_ep: The endpoint we're going to write for.
 459 * @hs_req: The request to write data for.
 460 *
 461 * This is called when the TxFIFO has some space in it to hold a new
 462 * transmission and we have something to give it. The actual setup of
 463 * the data size is done elsewhere, so all we have to do is to actually
 464 * write the data.
 465 *
 466 * The return value is zero if there is more space (or nothing was done)
 467 * otherwise -ENOSPC is returned if the FIFO space was used up.
 468 *
 469 * This routine is only needed for PIO
 470 */
 471static int s3c_hsotg_write_fifo(struct s3c_hsotg *hsotg,
 472				struct s3c_hsotg_ep *hs_ep,
 473				struct s3c_hsotg_req *hs_req)
 474{
 475	bool periodic = is_ep_periodic(hs_ep);
 476	u32 gnptxsts = readl(hsotg->regs + GNPTXSTS);
 477	int buf_pos = hs_req->req.actual;
 478	int to_write = hs_ep->size_loaded;
 479	void *data;
 480	int can_write;
 481	int pkt_round;
 482
 483	to_write -= (buf_pos - hs_ep->last_load);
 484
 485	/* if there's nothing to write, get out early */
 486	if (to_write == 0)
 487		return 0;
 488
 489	if (periodic && !hsotg->dedicated_fifos) {
 490		u32 epsize = readl(hsotg->regs + DIEPTSIZ(hs_ep->index));
 491		int size_left;
 492		int size_done;
 493
 494		/*
 495		 * work out how much data was loaded so we can calculate
 496		 * how much data is left in the fifo.
 497		 */
 498
 499		size_left = DxEPTSIZ_XferSize_GET(epsize);
 500
 501		/*
 502		 * if shared fifo, we cannot write anything until the
 503		 * previous data has been completely sent.
 504		 */
 505		if (hs_ep->fifo_load != 0) {
 506			s3c_hsotg_en_gsint(hsotg, GINTSTS_PTxFEmp);
 507			return -ENOSPC;
 508		}
 509
 510		dev_dbg(hsotg->dev, "%s: left=%d, load=%d, fifo=%d, size %d\n",
 511			__func__, size_left,
 512			hs_ep->size_loaded, hs_ep->fifo_load, hs_ep->fifo_size);
 513
 514		/* how much of the data has moved */
 515		size_done = hs_ep->size_loaded - size_left;
 516
 517		/* how much data is left in the fifo */
 518		can_write = hs_ep->fifo_load - size_done;
 519		dev_dbg(hsotg->dev, "%s: => can_write1=%d\n",
 520			__func__, can_write);
 521
 522		can_write = hs_ep->fifo_size - can_write;
 523		dev_dbg(hsotg->dev, "%s: => can_write2=%d\n",
 524			__func__, can_write);
 525
 526		if (can_write <= 0) {
 527			s3c_hsotg_en_gsint(hsotg, GINTSTS_PTxFEmp);
 528			return -ENOSPC;
 529		}
 530	} else if (hsotg->dedicated_fifos && hs_ep->index != 0) {
 531		can_write = readl(hsotg->regs + DTXFSTS(hs_ep->index));
 532
 533		can_write &= 0xffff;
 534		can_write *= 4;
 535	} else {
 536		if (GNPTXSTS_NPTxQSpcAvail_GET(gnptxsts) == 0) {
 537			dev_dbg(hsotg->dev,
 538				"%s: no queue slots available (0x%08x)\n",
 539				__func__, gnptxsts);
 540
 541			s3c_hsotg_en_gsint(hsotg, GINTSTS_NPTxFEmp);
 542			return -ENOSPC;
 543		}
 544
 545		can_write = GNPTXSTS_NPTxFSpcAvail_GET(gnptxsts);
 546		can_write *= 4;	/* fifo size is in 32bit quantities. */
 547	}
 548
 549	dev_dbg(hsotg->dev, "%s: GNPTXSTS=%08x, can=%d, to=%d, mps %d\n",
 550		 __func__, gnptxsts, can_write, to_write, hs_ep->ep.maxpacket);
 551
 552	/*
 553	 * limit to 512 bytes of data, it seems at least on the non-periodic
 554	 * FIFO, requests of >512 cause the endpoint to get stuck with a
 555	 * fragment of the end of the transfer in it.
 556	 */
 557	if (can_write > 512)
 558		can_write = 512;
 559
 560	/*
 561	 * limit the write to one max-packet size worth of data, but allow
 562	 * the transfer to return that it did not run out of fifo space
 563	 * doing it.
 564	 */
 565	if (to_write > hs_ep->ep.maxpacket) {
 566		to_write = hs_ep->ep.maxpacket;
 567
 568		s3c_hsotg_en_gsint(hsotg,
 569				   periodic ? GINTSTS_PTxFEmp :
 570				   GINTSTS_NPTxFEmp);
 571	}
 572
 573	/* see if we can write data */
 574
 575	if (to_write > can_write) {
 576		to_write = can_write;
 577		pkt_round = to_write % hs_ep->ep.maxpacket;
 578
 579		/*
 580		 * Round the write down to an
 581		 * exact number of packets.
 582		 *
 583		 * Note, we do not currently check to see if we can ever
 584		 * write a full packet or not to the FIFO.
 585		 */
 586
 587		if (pkt_round)
 588			to_write -= pkt_round;
 589
 590		/*
 591		 * enable correct FIFO interrupt to alert us when there
 592		 * is more room left.
 593		 */
 594
 595		s3c_hsotg_en_gsint(hsotg,
 596				   periodic ? GINTSTS_PTxFEmp :
 597				   GINTSTS_NPTxFEmp);
 598	}
 599
 600	dev_dbg(hsotg->dev, "write %d/%d, can_write %d, done %d\n",
 601		 to_write, hs_req->req.length, can_write, buf_pos);
 602
 603	if (to_write <= 0)
 604		return -ENOSPC;
 605
 606	hs_req->req.actual = buf_pos + to_write;
 607	hs_ep->total_data += to_write;
 608
 609	if (periodic)
 610		hs_ep->fifo_load += to_write;
 611
 612	to_write = DIV_ROUND_UP(to_write, 4);
 613	data = hs_req->req.buf + buf_pos;
 614
 615	writesl(hsotg->regs + EPFIFO(hs_ep->index), data, to_write);
 616
 617	return (to_write >= can_write) ? -ENOSPC : 0;
 618}
 619
 620/**
 621 * get_ep_limit - get the maximum data legnth for this endpoint
 622 * @hs_ep: The endpoint
 623 *
 624 * Return the maximum data that can be queued in one go on a given endpoint
 625 * so that transfers that are too long can be split.
 626 */
 627static unsigned get_ep_limit(struct s3c_hsotg_ep *hs_ep)
 628{
 629	int index = hs_ep->index;
 630	unsigned maxsize;
 631	unsigned maxpkt;
 632
 633	if (index != 0) {
 634		maxsize = DxEPTSIZ_XferSize_LIMIT + 1;
 635		maxpkt = DxEPTSIZ_PktCnt_LIMIT + 1;
 636	} else {
 637		maxsize = 64+64;
 638		if (hs_ep->dir_in)
 639			maxpkt = DIEPTSIZ0_PktCnt_LIMIT + 1;
 640		else
 641			maxpkt = 2;
 642	}
 643
 644	/* we made the constant loading easier above by using +1 */
 645	maxpkt--;
 646	maxsize--;
 647
 648	/*
 649	 * constrain by packet count if maxpkts*pktsize is greater
 650	 * than the length register size.
 651	 */
 652
 653	if ((maxpkt * hs_ep->ep.maxpacket) < maxsize)
 654		maxsize = maxpkt * hs_ep->ep.maxpacket;
 655
 656	return maxsize;
 657}
 658
 659/**
 660 * s3c_hsotg_start_req - start a USB request from an endpoint's queue
 661 * @hsotg: The controller state.
 662 * @hs_ep: The endpoint to process a request for
 663 * @hs_req: The request to start.
 664 * @continuing: True if we are doing more for the current request.
 665 *
 666 * Start the given request running by setting the endpoint registers
 667 * appropriately, and writing any data to the FIFOs.
 668 */
 669static void s3c_hsotg_start_req(struct s3c_hsotg *hsotg,
 670				struct s3c_hsotg_ep *hs_ep,
 671				struct s3c_hsotg_req *hs_req,
 672				bool continuing)
 673{
 674	struct usb_request *ureq = &hs_req->req;
 675	int index = hs_ep->index;
 676	int dir_in = hs_ep->dir_in;
 677	u32 epctrl_reg;
 678	u32 epsize_reg;
 679	u32 epsize;
 680	u32 ctrl;
 681	unsigned length;
 682	unsigned packets;
 683	unsigned maxreq;
 684
 685	if (index != 0) {
 686		if (hs_ep->req && !continuing) {
 687			dev_err(hsotg->dev, "%s: active request\n", __func__);
 688			WARN_ON(1);
 689			return;
 690		} else if (hs_ep->req != hs_req && continuing) {
 691			dev_err(hsotg->dev,
 692				"%s: continue different req\n", __func__);
 693			WARN_ON(1);
 694			return;
 695		}
 696	}
 697
 698	epctrl_reg = dir_in ? DIEPCTL(index) : DOEPCTL(index);
 699	epsize_reg = dir_in ? DIEPTSIZ(index) : DOEPTSIZ(index);
 700
 701	dev_dbg(hsotg->dev, "%s: DxEPCTL=0x%08x, ep %d, dir %s\n",
 702		__func__, readl(hsotg->regs + epctrl_reg), index,
 703		hs_ep->dir_in ? "in" : "out");
 704
 705	/* If endpoint is stalled, we will restart request later */
 706	ctrl = readl(hsotg->regs + epctrl_reg);
 707
 708	if (ctrl & DxEPCTL_Stall) {
 709		dev_warn(hsotg->dev, "%s: ep%d is stalled\n", __func__, index);
 710		return;
 711	}
 712
 713	length = ureq->length - ureq->actual;
 714	dev_dbg(hsotg->dev, "ureq->length:%d ureq->actual:%d\n",
 715		ureq->length, ureq->actual);
 716	if (0)
 717		dev_dbg(hsotg->dev,
 718			"REQ buf %p len %d dma 0x%08x noi=%d zp=%d snok=%d\n",
 719			ureq->buf, length, ureq->dma,
 720			ureq->no_interrupt, ureq->zero, ureq->short_not_ok);
 721
 722	maxreq = get_ep_limit(hs_ep);
 723	if (length > maxreq) {
 724		int round = maxreq % hs_ep->ep.maxpacket;
 725
 726		dev_dbg(hsotg->dev, "%s: length %d, max-req %d, r %d\n",
 727			__func__, length, maxreq, round);
 728
 729		/* round down to multiple of packets */
 730		if (round)
 731			maxreq -= round;
 732
 733		length = maxreq;
 734	}
 735
 736	if (length)
 737		packets = DIV_ROUND_UP(length, hs_ep->ep.maxpacket);
 738	else
 739		packets = 1;	/* send one packet if length is zero. */
 740
 741	if (dir_in && index != 0)
 742		epsize = DxEPTSIZ_MC(1);
 743	else
 744		epsize = 0;
 745
 746	if (index != 0 && ureq->zero) {
 747		/*
 748		 * test for the packets being exactly right for the
 749		 * transfer
 750		 */
 751
 752		if (length == (packets * hs_ep->ep.maxpacket))
 753			packets++;
 754	}
 755
 756	epsize |= DxEPTSIZ_PktCnt(packets);
 757	epsize |= DxEPTSIZ_XferSize(length);
 758
 759	dev_dbg(hsotg->dev, "%s: %d@%d/%d, 0x%08x => 0x%08x\n",
 760		__func__, packets, length, ureq->length, epsize, epsize_reg);
 761
 762	/* store the request as the current one we're doing */
 763	hs_ep->req = hs_req;
 764
 765	/* write size / packets */
 766	writel(epsize, hsotg->regs + epsize_reg);
 767
 768	if (using_dma(hsotg) && !continuing) {
 769		unsigned int dma_reg;
 770
 771		/*
 772		 * write DMA address to control register, buffer already
 773		 * synced by s3c_hsotg_ep_queue().
 774		 */
 775
 776		dma_reg = dir_in ? DIEPDMA(index) : DOEPDMA(index);
 777		writel(ureq->dma, hsotg->regs + dma_reg);
 778
 779		dev_dbg(hsotg->dev, "%s: 0x%08x => 0x%08x\n",
 780			__func__, ureq->dma, dma_reg);
 781	}
 782
 783	ctrl |= DxEPCTL_EPEna;	/* ensure ep enabled */
 784	ctrl |= DxEPCTL_USBActEp;
 785
 786	dev_dbg(hsotg->dev, "setup req:%d\n", hsotg->setup);
 787
 788	/* For Setup request do not clear NAK */
 789	if (hsotg->setup && index == 0)
 790		hsotg->setup = 0;
 791	else
 792		ctrl |= DxEPCTL_CNAK;	/* clear NAK set by core */
 793
 794
 795	dev_dbg(hsotg->dev, "%s: DxEPCTL=0x%08x\n", __func__, ctrl);
 796	writel(ctrl, hsotg->regs + epctrl_reg);
 797
 798	/*
 799	 * set these, it seems that DMA support increments past the end
 800	 * of the packet buffer so we need to calculate the length from
 801	 * this information.
 802	 */
 803	hs_ep->size_loaded = length;
 804	hs_ep->last_load = ureq->actual;
 805
 806	if (dir_in && !using_dma(hsotg)) {
 807		/* set these anyway, we may need them for non-periodic in */
 808		hs_ep->fifo_load = 0;
 809
 810		s3c_hsotg_write_fifo(hsotg, hs_ep, hs_req);
 811	}
 812
 813	/*
 814	 * clear the INTknTXFEmpMsk when we start request, more as a aide
 815	 * to debugging to see what is going on.
 816	 */
 817	if (dir_in)
 818		writel(DIEPMSK_INTknTXFEmpMsk,
 819		       hsotg->regs + DIEPINT(index));
 820
 821	/*
 822	 * Note, trying to clear the NAK here causes problems with transmit
 823	 * on the S3C6400 ending up with the TXFIFO becoming full.
 824	 */
 825
 826	/* check ep is enabled */
 827	if (!(readl(hsotg->regs + epctrl_reg) & DxEPCTL_EPEna))
 828		dev_warn(hsotg->dev,
 829			 "ep%d: failed to become enabled (DxEPCTL=0x%08x)?\n",
 830			 index, readl(hsotg->regs + epctrl_reg));
 831
 832	dev_dbg(hsotg->dev, "%s: DxEPCTL=0x%08x\n",
 833		__func__, readl(hsotg->regs + epctrl_reg));
 834}
 835
 836/**
 837 * s3c_hsotg_map_dma - map the DMA memory being used for the request
 838 * @hsotg: The device state.
 839 * @hs_ep: The endpoint the request is on.
 840 * @req: The request being processed.
 841 *
 842 * We've been asked to queue a request, so ensure that the memory buffer
 843 * is correctly setup for DMA. If we've been passed an extant DMA address
 844 * then ensure the buffer has been synced to memory. If our buffer has no
 845 * DMA memory, then we map the memory and mark our request to allow us to
 846 * cleanup on completion.
 847 */
 848static int s3c_hsotg_map_dma(struct s3c_hsotg *hsotg,
 849			     struct s3c_hsotg_ep *hs_ep,
 850			     struct usb_request *req)
 851{
 852	enum dma_data_direction dir;
 853	struct s3c_hsotg_req *hs_req = our_req(req);
 854
 855	dir = hs_ep->dir_in ? DMA_TO_DEVICE : DMA_FROM_DEVICE;
 856
 857	/* if the length is zero, ignore the DMA data */
 858	if (hs_req->req.length == 0)
 859		return 0;
 860
 861	if (req->dma == DMA_ADDR_INVALID) {
 862		dma_addr_t dma;
 863
 864		dma = dma_map_single(hsotg->dev, req->buf, req->length, dir);
 865
 866		if (unlikely(dma_mapping_error(hsotg->dev, dma)))
 867			goto dma_error;
 868
 869		if (dma & 3) {
 870			dev_err(hsotg->dev, "%s: unaligned dma buffer\n",
 871				__func__);
 872
 873			dma_unmap_single(hsotg->dev, dma, req->length, dir);
 874			return -EINVAL;
 875		}
 876
 877		hs_req->mapped = 1;
 878		req->dma = dma;
 879	} else {
 880		dma_sync_single_for_cpu(hsotg->dev, req->dma, req->length, dir);
 881		hs_req->mapped = 0;
 882	}
 883
 884	return 0;
 885
 886dma_error:
 887	dev_err(hsotg->dev, "%s: failed to map buffer %p, %d bytes\n",
 888		__func__, req->buf, req->length);
 889
 890	return -EIO;
 891}
 892
 893static int s3c_hsotg_ep_queue(struct usb_ep *ep, struct usb_request *req,
 894			      gfp_t gfp_flags)
 895{
 896	struct s3c_hsotg_req *hs_req = our_req(req);
 897	struct s3c_hsotg_ep *hs_ep = our_ep(ep);
 898	struct s3c_hsotg *hs = hs_ep->parent;
 899	unsigned long irqflags;
 900	bool first;
 901
 902	dev_dbg(hs->dev, "%s: req %p: %d@%p, noi=%d, zero=%d, snok=%d\n",
 903		ep->name, req, req->length, req->buf, req->no_interrupt,
 904		req->zero, req->short_not_ok);
 905
 906	/* initialise status of the request */
 907	INIT_LIST_HEAD(&hs_req->queue);
 908	req->actual = 0;
 909	req->status = -EINPROGRESS;
 910
 911	/* if we're using DMA, sync the buffers as necessary */
 912	if (using_dma(hs)) {
 913		int ret = s3c_hsotg_map_dma(hs, hs_ep, req);
 914		if (ret)
 915			return ret;
 916	}
 917
 918	spin_lock_irqsave(&hs_ep->lock, irqflags);
 919
 920	first = list_empty(&hs_ep->queue);
 921	list_add_tail(&hs_req->queue, &hs_ep->queue);
 922
 923	if (first)
 924		s3c_hsotg_start_req(hs, hs_ep, hs_req, false);
 925
 926	spin_unlock_irqrestore(&hs_ep->lock, irqflags);
 927
 928	return 0;
 929}
 930
 931static void s3c_hsotg_ep_free_request(struct usb_ep *ep,
 932				      struct usb_request *req)
 933{
 934	struct s3c_hsotg_req *hs_req = our_req(req);
 935
 936	kfree(hs_req);
 937}
 938
 939/**
 940 * s3c_hsotg_complete_oursetup - setup completion callback
 941 * @ep: The endpoint the request was on.
 942 * @req: The request completed.
 943 *
 944 * Called on completion of any requests the driver itself
 945 * submitted that need cleaning up.
 946 */
 947static void s3c_hsotg_complete_oursetup(struct usb_ep *ep,
 948					struct usb_request *req)
 949{
 950	struct s3c_hsotg_ep *hs_ep = our_ep(ep);
 951	struct s3c_hsotg *hsotg = hs_ep->parent;
 952
 953	dev_dbg(hsotg->dev, "%s: ep %p, req %p\n", __func__, ep, req);
 954
 955	s3c_hsotg_ep_free_request(ep, req);
 956}
 957
 958/**
 959 * ep_from_windex - convert control wIndex value to endpoint
 960 * @hsotg: The driver state.
 961 * @windex: The control request wIndex field (in host order).
 962 *
 963 * Convert the given wIndex into a pointer to an driver endpoint
 964 * structure, or return NULL if it is not a valid endpoint.
 965 */
 966static struct s3c_hsotg_ep *ep_from_windex(struct s3c_hsotg *hsotg,
 967					   u32 windex)
 968{
 969	struct s3c_hsotg_ep *ep = &hsotg->eps[windex & 0x7F];
 970	int dir = (windex & USB_DIR_IN) ? 1 : 0;
 971	int idx = windex & 0x7F;
 972
 973	if (windex >= 0x100)
 974		return NULL;
 975
 976	if (idx > hsotg->num_of_eps)
 977		return NULL;
 978
 979	if (idx && ep->dir_in != dir)
 980		return NULL;
 981
 982	return ep;
 983}
 984
 985/**
 986 * s3c_hsotg_send_reply - send reply to control request
 987 * @hsotg: The device state
 988 * @ep: Endpoint 0
 989 * @buff: Buffer for request
 990 * @length: Length of reply.
 991 *
 992 * Create a request and queue it on the given endpoint. This is useful as
 993 * an internal method of sending replies to certain control requests, etc.
 994 */
 995static int s3c_hsotg_send_reply(struct s3c_hsotg *hsotg,
 996				struct s3c_hsotg_ep *ep,
 997				void *buff,
 998				int length)
 999{
1000	struct usb_request *req;
1001	int ret;
1002
1003	dev_dbg(hsotg->dev, "%s: buff %p, len %d\n", __func__, buff, length);
1004
1005	req = s3c_hsotg_ep_alloc_request(&ep->ep, GFP_ATOMIC);
1006	hsotg->ep0_reply = req;
1007	if (!req) {
1008		dev_warn(hsotg->dev, "%s: cannot alloc req\n", __func__);
1009		return -ENOMEM;
1010	}
1011
1012	req->buf = hsotg->ep0_buff;
1013	req->length = length;
1014	req->zero = 1; /* always do zero-length final transfer */
1015	req->complete = s3c_hsotg_complete_oursetup;
1016
1017	if (length)
1018		memcpy(req->buf, buff, length);
1019	else
1020		ep->sent_zlp = 1;
1021
1022	ret = s3c_hsotg_ep_queue(&ep->ep, req, GFP_ATOMIC);
1023	if (ret) {
1024		dev_warn(hsotg->dev, "%s: cannot queue req\n", __func__);
1025		return ret;
1026	}
1027
1028	return 0;
1029}
1030
1031/**
1032 * s3c_hsotg_process_req_status - process request GET_STATUS
1033 * @hsotg: The device state
1034 * @ctrl: USB control request
1035 */
1036static int s3c_hsotg_process_req_status(struct s3c_hsotg *hsotg,
1037					struct usb_ctrlrequest *ctrl)
1038{
1039	struct s3c_hsotg_ep *ep0 = &hsotg->eps[0];
1040	struct s3c_hsotg_ep *ep;
1041	__le16 reply;
1042	int ret;
1043
1044	dev_dbg(hsotg->dev, "%s: USB_REQ_GET_STATUS\n", __func__);
1045
1046	if (!ep0->dir_in) {
1047		dev_warn(hsotg->dev, "%s: direction out?\n", __func__);
1048		return -EINVAL;
1049	}
1050
1051	switch (ctrl->bRequestType & USB_RECIP_MASK) {
1052	case USB_RECIP_DEVICE:
1053		reply = cpu_to_le16(0); /* bit 0 => self powered,
1054					 * bit 1 => remote wakeup */
1055		break;
1056
1057	case USB_RECIP_INTERFACE:
1058		/* currently, the data result should be zero */
1059		reply = cpu_to_le16(0);
1060		break;
1061
1062	case USB_RECIP_ENDPOINT:
1063		ep = ep_from_windex(hsotg, le16_to_cpu(ctrl->wIndex));
1064		if (!ep)
1065			return -ENOENT;
1066
1067		reply = cpu_to_le16(ep->halted ? 1 : 0);
1068		break;
1069
1070	default:
1071		return 0;
1072	}
1073
1074	if (le16_to_cpu(ctrl->wLength) != 2)
1075		return -EINVAL;
1076
1077	ret = s3c_hsotg_send_reply(hsotg, ep0, &reply, 2);
1078	if (ret) {
1079		dev_err(hsotg->dev, "%s: failed to send reply\n", __func__);
1080		return ret;
1081	}
1082
1083	return 1;
1084}
1085
1086static int s3c_hsotg_ep_sethalt(struct usb_ep *ep, int value);
1087
1088/**
1089 * get_ep_head - return the first request on the endpoint
1090 * @hs_ep: The controller endpoint to get
1091 *
1092 * Get the first request on the endpoint.
1093 */
1094static struct s3c_hsotg_req *get_ep_head(struct s3c_hsotg_ep *hs_ep)
1095{
1096	if (list_empty(&hs_ep->queue))
1097		return NULL;
1098
1099	return list_first_entry(&hs_ep->queue, struct s3c_hsotg_req, queue);
1100}
1101
1102/**
1103 * s3c_hsotg_process_req_featire - process request {SET,CLEAR}_FEATURE
1104 * @hsotg: The device state
1105 * @ctrl: USB control request
1106 */
1107static int s3c_hsotg_process_req_feature(struct s3c_hsotg *hsotg,
1108					 struct usb_ctrlrequest *ctrl)
1109{
1110	struct s3c_hsotg_ep *ep0 = &hsotg->eps[0];
1111	struct s3c_hsotg_req *hs_req;
1112	bool restart;
1113	bool set = (ctrl->bRequest == USB_REQ_SET_FEATURE);
1114	struct s3c_hsotg_ep *ep;
1115	int ret;
1116
1117	dev_dbg(hsotg->dev, "%s: %s_FEATURE\n",
1118		__func__, set ? "SET" : "CLEAR");
1119
1120	if (ctrl->bRequestType == USB_RECIP_ENDPOINT) {
1121		ep = ep_from_windex(hsotg, le16_to_cpu(ctrl->wIndex));
1122		if (!ep) {
1123			dev_dbg(hsotg->dev, "%s: no endpoint for 0x%04x\n",
1124				__func__, le16_to_cpu(ctrl->wIndex));
1125			return -ENOENT;
1126		}
1127
1128		switch (le16_to_cpu(ctrl->wValue)) {
1129		case USB_ENDPOINT_HALT:
1130			s3c_hsotg_ep_sethalt(&ep->ep, set);
1131
1132			ret = s3c_hsotg_send_reply(hsotg, ep0, NULL, 0);
1133			if (ret) {
1134				dev_err(hsotg->dev,
1135					"%s: failed to send reply\n", __func__);
1136				return ret;
1137			}
1138
1139			if (!set) {
1140				/*
1141				 * If we have request in progress,
1142				 * then complete it
1143				 */
1144				if (ep->req) {
1145					hs_req = ep->req;
1146					ep->req = NULL;
1147					list_del_init(&hs_req->queue);
1148					hs_req->req.complete(&ep->ep,
1149							     &hs_req->req);
1150				}
1151
1152				/* If we have pending request, then start it */
1153				restart = !list_empty(&ep->queue);
1154				if (restart) {
1155					hs_req = get_ep_head(ep);
1156					s3c_hsotg_start_req(hsotg, ep,
1157							    hs_req, false);
1158				}
1159			}
1160
1161			break;
1162
1163		default:
1164			return -ENOENT;
1165		}
1166	} else
1167		return -ENOENT;  /* currently only deal with endpoint */
1168
1169	return 1;
1170}
1171
1172/**
1173 * s3c_hsotg_process_control - process a control request
1174 * @hsotg: The device state
1175 * @ctrl: The control request received
1176 *
1177 * The controller has received the SETUP phase of a control request, and
1178 * needs to work out what to do next (and whether to pass it on to the
1179 * gadget driver).
1180 */
1181static void s3c_hsotg_process_control(struct s3c_hsotg *hsotg,
1182				      struct usb_ctrlrequest *ctrl)
1183{
1184	struct s3c_hsotg_ep *ep0 = &hsotg->eps[0];
1185	int ret = 0;
1186	u32 dcfg;
1187
1188	ep0->sent_zlp = 0;
1189
1190	dev_dbg(hsotg->dev, "ctrl Req=%02x, Type=%02x, V=%04x, L=%04x\n",
1191		 ctrl->bRequest, ctrl->bRequestType,
1192		 ctrl->wValue, ctrl->wLength);
1193
1194	/*
1195	 * record the direction of the request, for later use when enquing
1196	 * packets onto EP0.
1197	 */
1198
1199	ep0->dir_in = (ctrl->bRequestType & USB_DIR_IN) ? 1 : 0;
1200	dev_dbg(hsotg->dev, "ctrl: dir_in=%d\n", ep0->dir_in);
1201
1202	/*
1203	 * if we've no data with this request, then the last part of the
1204	 * transaction is going to implicitly be IN.
1205	 */
1206	if (ctrl->wLength == 0)
1207		ep0->dir_in = 1;
1208
1209	if ((ctrl->bRequestType & USB_TYPE_MASK) == USB_TYPE_STANDARD) {
1210		switch (ctrl->bRequest) {
1211		case USB_REQ_SET_ADDRESS:
1212			dcfg = readl(hsotg->regs + DCFG);
1213			dcfg &= ~DCFG_DevAddr_MASK;
1214			dcfg |= ctrl->wValue << DCFG_DevAddr_SHIFT;
1215			writel(dcfg, hsotg->regs + DCFG);
1216
1217			dev_info(hsotg->dev, "new address %d\n", ctrl->wValue);
1218
1219			ret = s3c_hsotg_send_reply(hsotg, ep0, NULL, 0);
1220			return;
1221
1222		case USB_REQ_GET_STATUS:
1223			ret = s3c_hsotg_process_req_status(hsotg, ctrl);
1224			break;
1225
1226		case USB_REQ_CLEAR_FEATURE:
1227		case USB_REQ_SET_FEATURE:
1228			ret = s3c_hsotg_process_req_feature(hsotg, ctrl);
1229			break;
1230		}
1231	}
1232
1233	/* as a fallback, try delivering it to the driver to deal with */
1234
1235	if (ret == 0 && hsotg->driver) {
1236		ret = hsotg->driver->setup(&hsotg->gadget, ctrl);
1237		if (ret < 0)
1238			dev_dbg(hsotg->dev, "driver->setup() ret %d\n", ret);
1239	}
1240
1241	/*
1242	 * the request is either unhandlable, or is not formatted correctly
1243	 * so respond with a STALL for the status stage to indicate failure.
1244	 */
1245
1246	if (ret < 0) {
1247		u32 reg;
1248		u32 ctrl;
1249
1250		dev_dbg(hsotg->dev, "ep0 stall (dir=%d)\n", ep0->dir_in);
1251		reg = (ep0->dir_in) ? DIEPCTL0 : DOEPCTL0;
1252
1253		/*
1254		 * DxEPCTL_Stall will be cleared by EP once it has
1255		 * taken effect, so no need to clear later.
1256		 */
1257
1258		ctrl = readl(hsotg->regs + reg);
1259		ctrl |= DxEPCTL_Stall;
1260		ctrl |= DxEPCTL_CNAK;
1261		writel(ctrl, hsotg->regs + reg);
1262
1263		dev_dbg(hsotg->dev,
1264			"written DxEPCTL=0x%08x to %08x (DxEPCTL=0x%08x)\n",
1265			ctrl, reg, readl(hsotg->regs + reg));
1266
1267		/*
1268		 * don't believe we need to anything more to get the EP
1269		 * to reply with a STALL packet
1270		 */
1271	}
1272}
1273
1274static void s3c_hsotg_enqueue_setup(struct s3c_hsotg *hsotg);
1275
1276/**
1277 * s3c_hsotg_complete_setup - completion of a setup transfer
1278 * @ep: The endpoint the request was on.
1279 * @req: The request completed.
1280 *
1281 * Called on completion of any requests the driver itself submitted for
1282 * EP0 setup packets
1283 */
1284static void s3c_hsotg_complete_setup(struct usb_ep *ep,
1285				     struct usb_request *req)
1286{
1287	struct s3c_hsotg_ep *hs_ep = our_ep(ep);
1288	struct s3c_hsotg *hsotg = hs_ep->parent;
1289
1290	if (req->status < 0) {
1291		dev_dbg(hsotg->dev, "%s: failed %d\n", __func__, req->status);
1292		return;
1293	}
1294
1295	if (req->actual == 0)
1296		s3c_hsotg_enqueue_setup(hsotg);
1297	else
1298		s3c_hsotg_process_control(hsotg, req->buf);
1299}
1300
1301/**
1302 * s3c_hsotg_enqueue_setup - start a request for EP0 packets
1303 * @hsotg: The device state.
1304 *
1305 * Enqueue a request on EP0 if necessary to received any SETUP packets
1306 * received from the host.
1307 */
1308static void s3c_hsotg_enqueue_setup(struct s3c_hsotg *hsotg)
1309{
1310	struct usb_request *req = hsotg->ctrl_req;
1311	struct s3c_hsotg_req *hs_req = our_req(req);
1312	int ret;
1313
1314	dev_dbg(hsotg->dev, "%s: queueing setup request\n", __func__);
1315
1316	req->zero = 0;
1317	req->length = 8;
1318	req->buf = hsotg->ctrl_buff;
1319	req->complete = s3c_hsotg_complete_setup;
1320
1321	if (!list_empty(&hs_req->queue)) {
1322		dev_dbg(hsotg->dev, "%s already queued???\n", __func__);
1323		return;
1324	}
1325
1326	hsotg->eps[0].dir_in = 0;
1327
1328	ret = s3c_hsotg_ep_queue(&hsotg->eps[0].ep, req, GFP_ATOMIC);
1329	if (ret < 0) {
1330		dev_err(hsotg->dev, "%s: failed queue (%d)\n", __func__, ret);
1331		/*
1332		 * Don't think there's much we can do other than watch the
1333		 * driver fail.
1334		 */
1335	}
1336}
1337
1338/**
1339 * s3c_hsotg_complete_request - complete a request given to us
1340 * @hsotg: The device state.
1341 * @hs_ep: The endpoint the request was on.
1342 * @hs_req: The request to complete.
1343 * @result: The result code (0 => Ok, otherwise errno)
1344 *
1345 * The given request has finished, so call the necessary completion
1346 * if it has one and then look to see if we can start a new request
1347 * on the endpoint.
1348 *
1349 * Note, expects the ep to already be locked as appropriate.
1350 */
1351static void s3c_hsotg_complete_request(struct s3c_hsotg *hsotg,
1352				       struct s3c_hsotg_ep *hs_ep,
1353				       struct s3c_hsotg_req *hs_req,
1354				       int result)
1355{
1356	bool restart;
1357
1358	if (!hs_req) {
1359		dev_dbg(hsotg->dev, "%s: nothing to complete?\n", __func__);
1360		return;
1361	}
1362
1363	dev_dbg(hsotg->dev, "complete: ep %p %s, req %p, %d => %p\n",
1364		hs_ep, hs_ep->ep.name, hs_req, result, hs_req->req.complete);
1365
1366	/*
1367	 * only replace the status if we've not already set an error
1368	 * from a previous transaction
1369	 */
1370
1371	if (hs_req->req.status == -EINPROGRESS)
1372		hs_req->req.status = result;
1373
1374	hs_ep->req = NULL;
1375	list_del_init(&hs_req->queue);
1376
1377	if (using_dma(hsotg))
1378		s3c_hsotg_unmap_dma(hsotg, hs_ep, hs_req);
1379
1380	/*
1381	 * call the complete request with the locks off, just in case the
1382	 * request tries to queue more work for this endpoint.
1383	 */
1384
1385	if (hs_req->req.complete) {
1386		spin_unlock(&hs_ep->lock);
1387		hs_req->req.complete(&hs_ep->ep, &hs_req->req);
1388		spin_lock(&hs_ep->lock);
1389	}
1390
1391	/*
1392	 * Look to see if there is anything else to do. Note, the completion
1393	 * of the previous request may have caused a new request to be started
1394	 * so be careful when doing this.
1395	 */
1396
1397	if (!hs_ep->req && result >= 0) {
1398		restart = !list_empty(&hs_ep->queue);
1399		if (restart) {
1400			hs_req = get_ep_head(hs_ep);
1401			s3c_hsotg_start_req(hsotg, hs_ep, hs_req, false);
1402		}
1403	}
1404}
1405
1406/**
1407 * s3c_hsotg_complete_request_lock - complete a request given to us (locked)
1408 * @hsotg: The device state.
1409 * @hs_ep: The endpoint the request was on.
1410 * @hs_req: The request to complete.
1411 * @result: The result code (0 => Ok, otherwise errno)
1412 *
1413 * See s3c_hsotg_complete_request(), but called with the endpoint's
1414 * lock held.
1415 */
1416static void s3c_hsotg_complete_request_lock(struct s3c_hsotg *hsotg,
1417					    struct s3c_hsotg_ep *hs_ep,
1418					    struct s3c_hsotg_req *hs_req,
1419					    int result)
1420{
1421	unsigned long flags;
1422
1423	spin_lock_irqsave(&hs_ep->lock, flags);
1424	s3c_hsotg_complete_request(hsotg, hs_ep, hs_req, result);
1425	spin_unlock_irqrestore(&hs_ep->lock, flags);
1426}
1427
1428/**
1429 * s3c_hsotg_rx_data - receive data from the FIFO for an endpoint
1430 * @hsotg: The device state.
1431 * @ep_idx: The endpoint index for the data
1432 * @size: The size of data in the fifo, in bytes
1433 *
1434 * The FIFO status shows there is data to read from the FIFO for a given
1435 * endpoint, so sort out whether we need to read the data into a request
1436 * that has been made for that endpoint.
1437 */
1438static void s3c_hsotg_rx_data(struct s3c_hsotg *hsotg, int ep_idx, int size)
1439{
1440	struct s3c_hsotg_ep *hs_ep = &hsotg->eps[ep_idx];
1441	struct s3c_hsotg_req *hs_req = hs_ep->req;
1442	void __iomem *fifo = hsotg->regs + EPFIFO(ep_idx);
1443	int to_read;
1444	int max_req;
1445	int read_ptr;
1446
1447	if (!hs_req) {
1448		u32 epctl = readl(hsotg->regs + DOEPCTL(ep_idx));
1449		int ptr;
1450
1451		dev_warn(hsotg->dev,
1452			 "%s: FIFO %d bytes on ep%d but no req (DxEPCTl=0x%08x)\n",
1453			 __func__, size, ep_idx, epctl);
1454
1455		/* dump the data from the FIFO, we've nothing we can do */
1456		for (ptr = 0; ptr < size; ptr += 4)
1457			(void)readl(fifo);
1458
1459		return;
1460	}
1461
1462	spin_lock(&hs_ep->lock);
1463
1464	to_read = size;
1465	read_ptr = hs_req->req.actual;
1466	max_req = hs_req->req.length - read_ptr;
1467
1468	dev_dbg(hsotg->dev, "%s: read %d/%d, done %d/%d\n",
1469		__func__, to_read, max_req, read_ptr, hs_req->req.length);
1470
1471	if (to_read > max_req) {
1472		/*
1473		 * more data appeared than we where willing
1474		 * to deal with in this request.
1475		 */
1476
1477		/* currently we don't deal this */
1478		WARN_ON_ONCE(1);
1479	}
1480
1481	hs_ep->total_data += to_read;
1482	hs_req->req.actual += to_read;
1483	to_read = DIV_ROUND_UP(to_read, 4);
1484
1485	/*
1486	 * note, we might over-write the buffer end by 3 bytes depending on
1487	 * alignment of the data.
1488	 */
1489	readsl(fifo, hs_req->req.buf + read_ptr, to_read);
1490
1491	spin_unlock(&hs_ep->lock);
1492}
1493
1494/**
1495 * s3c_hsotg_send_zlp - send zero-length packet on control endpoint
1496 * @hsotg: The device instance
1497 * @req: The request currently on this endpoint
1498 *
1499 * Generate a zero-length IN packet request for terminating a SETUP
1500 * transaction.
1501 *
1502 * Note, since we don't write any data to the TxFIFO, then it is
1503 * currently believed that we do not need to wait for any space in
1504 * the TxFIFO.
1505 */
1506static void s3c_hsotg_send_zlp(struct s3c_hsotg *hsotg,
1507			       struct s3c_hsotg_req *req)
1508{
1509	u32 ctrl;
1510
1511	if (!req) {
1512		dev_warn(hsotg->dev, "%s: no request?\n", __func__);
1513		return;
1514	}
1515
1516	if (req->req.length == 0) {
1517		hsotg->eps[0].sent_zlp = 1;
1518		s3c_hsotg_enqueue_setup(hsotg);
1519		return;
1520	}
1521
1522	hsotg->eps[0].dir_in = 1;
1523	hsotg->eps[0].sent_zlp = 1;
1524
1525	dev_dbg(hsotg->dev, "sending zero-length packet\n");
1526
1527	/* issue a zero-sized packet to terminate this */
1528	writel(DxEPTSIZ_MC(1) | DxEPTSIZ_PktCnt(1) |
1529	       DxEPTSIZ_XferSize(0), hsotg->regs + DIEPTSIZ(0));
1530
1531	ctrl = readl(hsotg->regs + DIEPCTL0);
1532	ctrl |= DxEPCTL_CNAK;  /* clear NAK set by core */
1533	ctrl |= DxEPCTL_EPEna; /* ensure ep enabled */
1534	ctrl |= DxEPCTL_USBActEp;
1535	writel(ctrl, hsotg->regs + DIEPCTL0);
1536}
1537
1538/**
1539 * s3c_hsotg_handle_outdone - handle receiving OutDone/SetupDone from RXFIFO
1540 * @hsotg: The device instance
1541 * @epnum: The endpoint received from
1542 * @was_setup: Set if processing a SetupDone event.
1543 *
1544 * The RXFIFO has delivered an OutDone event, which means that the data
1545 * transfer for an OUT endpoint has been completed, either by a short
1546 * packet or by the finish of a transfer.
1547 */
1548static void s3c_hsotg_handle_outdone(struct s3c_hsotg *hsotg,
1549				     int epnum, bool was_setup)
1550{
1551	u32 epsize = readl(hsotg->regs + DOEPTSIZ(epnum));
1552	struct s3c_hsotg_ep *hs_ep = &hsotg->eps[epnum];
1553	struct s3c_hsotg_req *hs_req = hs_ep->req;
1554	struct usb_request *req = &hs_req->req;
1555	unsigned size_left = DxEPTSIZ_XferSize_GET(epsize);
1556	int result = 0;
1557
1558	if (!hs_req) {
1559		dev_dbg(hsotg->dev, "%s: no request active\n", __func__);
1560		return;
1561	}
1562
1563	if (using_dma(hsotg)) {
1564		unsigned size_done;
1565
1566		/*
1567		 * Calculate the size of the transfer by checking how much
1568		 * is left in the endpoint size register and then working it
1569		 * out from the amount we loaded for the transfer.
1570		 *
1571		 * We need to do this as DMA pointers are always 32bit aligned
1572		 * so may overshoot/undershoot the transfer.
1573		 */
1574
1575		size_done = hs_ep->size_loaded - size_left;
1576		size_done += hs_ep->last_load;
1577
1578		req->actual = size_done;
1579	}
1580
1581	/* if there is more request to do, schedule new transfer */
1582	if (req->actual < req->length && size_left == 0) {
1583		s3c_hsotg_start_req(hsotg, hs_ep, hs_req, true);
1584		return;
1585	} else if (epnum == 0) {
1586		/*
1587		 * After was_setup = 1 =>
1588		 * set CNAK for non Setup requests
1589		 */
1590		hsotg->setup = was_setup ? 0 : 1;
1591	}
1592
1593	if (req->actual < req->length && req->short_not_ok) {
1594		dev_dbg(hsotg->dev, "%s: got %d/%d (short not ok) => error\n",
1595			__func__, req->actual, req->length);
1596
1597		/*
1598		 * todo - what should we return here? there's no one else
1599		 * even bothering to check the status.
1600		 */
1601	}
1602
1603	if (epnum == 0) {
1604		/*
1605		 * Condition req->complete != s3c_hsotg_complete_setup says:
1606		 * send ZLP when we have an asynchronous request from gadget
1607		 */
1608		if (!was_setup && req->complete != s3c_hsotg_complete_setup)
1609			s3c_hsotg_send_zlp(hsotg, hs_req);
1610	}
1611
1612	s3c_hsotg_complete_request_lock(hsotg, hs_ep, hs_req, result);
1613}
1614
1615/**
1616 * s3c_hsotg_read_frameno - read current frame number
1617 * @hsotg: The device instance
1618 *
1619 * Return the current frame number
1620 */
1621static u32 s3c_hsotg_read_frameno(struct s3c_hsotg *hsotg)
1622{
1623	u32 dsts;
1624
1625	dsts = readl(hsotg->regs + DSTS);
1626	dsts &= DSTS_SOFFN_MASK;
1627	dsts >>= DSTS_SOFFN_SHIFT;
1628
1629	return dsts;
1630}
1631
1632/**
1633 * s3c_hsotg_handle_rx - RX FIFO has data
1634 * @hsotg: The device instance
1635 *
1636 * The IRQ handler has detected that the RX FIFO has some data in it
1637 * that requires processing, so find out what is in there and do the
1638 * appropriate read.
1639 *
1640 * The RXFIFO is a true FIFO, the packets coming out are still in packet
1641 * chunks, so if you have x packets received on an endpoint you'll get x
1642 * FIFO events delivered, each with a packet's worth of data in it.
1643 *
1644 * When using DMA, we should not be processing events from the RXFIFO
1645 * as the actual data should be sent to the memory directly and we turn
1646 * on the completion interrupts to get notifications of transfer completion.
1647 */
1648static void s3c_hsotg_handle_rx(struct s3c_hsotg *hsotg)
1649{
1650	u32 grxstsr = readl(hsotg->regs + GRXSTSP);
1651	u32 epnum, status, size;
1652
1653	WARN_ON(using_dma(hsotg));
1654
1655	epnum = grxstsr & GRXSTS_EPNum_MASK;
1656	status = grxstsr & GRXSTS_PktSts_MASK;
1657
1658	size = grxstsr & GRXSTS_ByteCnt_MASK;
1659	size >>= GRXSTS_ByteCnt_SHIFT;
1660
1661	if (1)
1662		dev_dbg(hsotg->dev, "%s: GRXSTSP=0x%08x (%d@%d)\n",
1663			__func__, grxstsr, size, epnum);
1664
1665#define __status(x) ((x) >> GRXSTS_PktSts_SHIFT)
1666
1667	switch (status >> GRXSTS_PktSts_SHIFT) {
1668	case __status(GRXSTS_PktSts_GlobalOutNAK):
1669		dev_dbg(hsotg->dev, "GlobalOutNAK\n");
1670		break;
1671
1672	case __status(GRXSTS_PktSts_OutDone):
1673		dev_dbg(hsotg->dev, "OutDone (Frame=0x%08x)\n",
1674			s3c_hsotg_read_frameno(hsotg));
1675
1676		if (!using_dma(hsotg))
1677			s3c_hsotg_handle_outdone(hsotg, epnum, false);
1678		break;
1679
1680	case __status(GRXSTS_PktSts_SetupDone):
1681		dev_dbg(hsotg->dev,
1682			"SetupDone (Frame=0x%08x, DOPEPCTL=0x%08x)\n",
1683			s3c_hsotg_read_frameno(hsotg),
1684			readl(hsotg->regs + DOEPCTL(0)));
1685
1686		s3c_hsotg_handle_outdone(hsotg, epnum, true);
1687		break;
1688
1689	case __status(GRXSTS_PktSts_OutRX):
1690		s3c_hsotg_rx_data(hsotg, epnum, size);
1691		break;
1692
1693	case __status(GRXSTS_PktSts_SetupRX):
1694		dev_dbg(hsotg->dev,
1695			"SetupRX (Frame=0x%08x, DOPEPCTL=0x%08x)\n",
1696			s3c_hsotg_read_frameno(hsotg),
1697			readl(hsotg->regs + DOEPCTL(0)));
1698
1699		s3c_hsotg_rx_data(hsotg, epnum, size);
1700		break;
1701
1702	default:
1703		dev_warn(hsotg->dev, "%s: unknown status %08x\n",
1704			 __func__, grxstsr);
1705
1706		s3c_hsotg_dump(hsotg);
1707		break;
1708	}
1709}
1710
1711/**
1712 * s3c_hsotg_ep0_mps - turn max packet size into register setting
1713 * @mps: The maximum packet size in bytes.
1714 */
1715static u32 s3c_hsotg_ep0_mps(unsigned int mps)
1716{
1717	switch (mps) {
1718	case 64:
1719		return D0EPCTL_MPS_64;
1720	case 32:
1721		return D0EPCTL_MPS_32;
1722	case 16:
1723		return D0EPCTL_MPS_16;
1724	case 8:
1725		return D0EPCTL_MPS_8;
1726	}
1727
1728	/* bad max packet size, warn and return invalid result */
1729	WARN_ON(1);
1730	return (u32)-1;
1731}
1732
1733/**
1734 * s3c_hsotg_set_ep_maxpacket - set endpoint's max-packet field
1735 * @hsotg: The driver state.
1736 * @ep: The index number of the endpoint
1737 * @mps: The maximum packet size in bytes
1738 *
1739 * Configure the maximum packet size for the given endpoint, updating
1740 * the hardware control registers to reflect this.
1741 */
1742static void s3c_hsotg_set_ep_maxpacket(struct s3c_hsotg *hsotg,
1743				       unsigned int ep, unsigned int mps)
1744{
1745	struct s3c_hsotg_ep *hs_ep = &hsotg->eps[ep];
1746	void __iomem *regs = hsotg->regs;
1747	u32 mpsval;
1748	u32 reg;
1749
1750	if (ep == 0) {
1751		/* EP0 is a special case */
1752		mpsval = s3c_hsotg_ep0_mps(mps);
1753		if (mpsval > 3)
1754			goto bad_mps;
1755	} else {
1756		if (mps >= DxEPCTL_MPS_LIMIT+1)
1757			goto bad_mps;
1758
1759		mpsval = mps;
1760	}
1761
1762	hs_ep->ep.maxpacket = mps;
1763
1764	/*
1765	 * update both the in and out endpoint controldir_ registers, even
1766	 * if one of the directions may not be in use.
1767	 */
1768
1769	reg = readl(regs + DIEPCTL(ep));
1770	reg &= ~DxEPCTL_MPS_MASK;
1771	reg |= mpsval;
1772	writel(reg, regs + DIEPCTL(ep));
1773
1774	if (ep) {
1775		reg = readl(regs + DOEPCTL(ep));
1776		reg &= ~DxEPCTL_MPS_MASK;
1777		reg |= mpsval;
1778		writel(reg, regs + DOEPCTL(ep));
1779	}
1780
1781	return;
1782
1783bad_mps:
1784	dev_err(hsotg->dev, "ep%d: bad mps of %d\n", ep, mps);
1785}
1786
1787/**
1788 * s3c_hsotg_txfifo_flush - flush Tx FIFO
1789 * @hsotg: The driver state
1790 * @idx: The index for the endpoint (0..15)
1791 */
1792static void s3c_hsotg_txfifo_flush(struct s3c_hsotg *hsotg, unsigned int idx)
1793{
1794	int timeout;
1795	int val;
1796
1797	writel(GRSTCTL_TxFNum(idx) | GRSTCTL_TxFFlsh,
1798		hsotg->regs + GRSTCTL);
1799
1800	/* wait until the fifo is flushed */
1801	timeout = 100;
1802
1803	while (1) {
1804		val = readl(hsotg->regs + GRSTCTL);
1805
1806		if ((val & (GRSTCTL_TxFFlsh)) == 0)
1807			break;
1808
1809		if (--timeout == 0) {
1810			dev_err(hsotg->dev,
1811				"%s: timeout flushing fifo (GRSTCTL=%08x)\n",
1812				__func__, val);
1813		}
1814
1815		udelay(1);
1816	}
1817}
1818
1819/**
1820 * s3c_hsotg_trytx - check to see if anything needs transmitting
1821 * @hsotg: The driver state
1822 * @hs_ep: The driver endpoint to check.
1823 *
1824 * Check to see if there is a request that has data to send, and if so
1825 * make an attempt to write data into the FIFO.
1826 */
1827static int s3c_hsotg_trytx(struct s3c_hsotg *hsotg,
1828			   struct s3c_hsotg_ep *hs_ep)
1829{
1830	struct s3c_hsotg_req *hs_req = hs_ep->req;
1831
1832	if (!hs_ep->dir_in || !hs_req)
1833		return 0;
1834
1835	if (hs_req->req.actual < hs_req->req.length) {
1836		dev_dbg(hsotg->dev, "trying to write more for ep%d\n",
1837			hs_ep->index);
1838		return s3c_hsotg_write_fifo(hsotg, hs_ep, hs_req);
1839	}
1840
1841	return 0;
1842}
1843
1844/**
1845 * s3c_hsotg_complete_in - complete IN transfer
1846 * @hsotg: The device state.
1847 * @hs_ep: The endpoint that has just completed.
1848 *
1849 * An IN transfer has been completed, update the transfer's state and then
1850 * call the relevant completion routines.
1851 */
1852static void s3c_hsotg_complete_in(struct s3c_hsotg *hsotg,
1853				  struct s3c_hsotg_ep *hs_ep)
1854{
1855	struct s3c_hsotg_req *hs_req = hs_ep->req;
1856	u32 epsize = readl(hsotg->regs + DIEPTSIZ(hs_ep->index));
1857	int size_left, size_done;
1858
1859	if (!hs_req) {
1860		dev_dbg(hsotg->dev, "XferCompl but no req\n");
1861		return;
1862	}
1863
1864	/* Finish ZLP handling for IN EP0 transactions */
1865	if (hsotg->eps[0].sent_zlp) {
1866		dev_dbg(hsotg->dev, "zlp packet received\n");
1867		s3c_hsotg_complete_request_lock(hsotg, hs_ep, hs_req, 0);
1868		return;
1869	}
1870
1871	/*
1872	 * Calculate the size of the transfer by checking how much is left
1873	 * in the endpoint size register and then working it out from
1874	 * the amount we loaded for the transfer.
1875	 *
1876	 * We do this even for DMA, as the transfer may have incremented
1877	 * past the end of the buffer (DMA transfers are always 32bit
1878	 * aligned).
1879	 */
1880
1881	size_left = DxEPTSIZ_XferSize_GET(epsize);
1882
1883	size_done = hs_ep->size_loaded - size_left;
1884	size_done += hs_ep->last_load;
1885
1886	if (hs_req->req.actual != size_done)
1887		dev_dbg(hsotg->dev, "%s: adjusting size done %d => %d\n",
1888			__func__, hs_req->req.actual, size_done);
1889
1890	hs_req->req.actual = size_done;
1891	dev_dbg(hsotg->dev, "req->length:%d req->actual:%d req->zero:%d\n",
1892		hs_req->req.length, hs_req->req.actual, hs_req->req.zero);
1893
1894	/*
1895	 * Check if dealing with Maximum Packet Size(MPS) IN transfer at EP0
1896	 * When sent data is a multiple MPS size (e.g. 64B ,128B ,192B
1897	 * ,256B ... ), after last MPS sized packet send IN ZLP packet to
1898	 * inform the host that no more data is available.
1899	 * The state of req.zero member is checked to be sure that the value to
1900	 * send is smaller than wValue expected from host.
1901	 * Check req.length to NOT send another ZLP when the current one is
1902	 * under completion (the one for which this completion has been called).
1903	 */
1904	if (hs_req->req.length && hs_ep->index == 0 && hs_req->req.zero &&
1905	    hs_req->req.length == hs_req->req.actual &&
1906	    !(hs_req->req.length % hs_ep->ep.maxpacket)) {
1907
1908		dev_dbg(hsotg->dev, "ep0 zlp IN packet sent\n");
1909		s3c_hsotg_send_zlp(hsotg, hs_req);
1910
1911		return;
1912	}
1913
1914	if (!size_left && hs_req->req.actual < hs_req->req.length) {
1915		dev_dbg(hsotg->dev, "%s trying more for req...\n", __func__);
1916		s3c_hsotg_start_req(hsotg, hs_ep, hs_req, true);
1917	} else
1918		s3c_hsotg_complete_request_lock(hsotg, hs_ep, hs_req, 0);
1919}
1920
1921/**
1922 * s3c_hsotg_epint - handle an in/out endpoint interrupt
1923 * @hsotg: The driver state
1924 * @idx: The index for the endpoint (0..15)
1925 * @dir_in: Set if this is an IN endpoint
1926 *
1927 * Process and clear any interrupt pending for an individual endpoint
1928 */
1929static void s3c_hsotg_epint(struct s3c_hsotg *hsotg, unsigned int idx,
1930			    int dir_in)
1931{
1932	struct s3c_hsotg_ep *hs_ep = &hsotg->eps[idx];
1933	u32 epint_reg = dir_in ? DIEPINT(idx) : DOEPINT(idx);
1934	u32 epctl_reg = dir_in ? DIEPCTL(idx) : DOEPCTL(idx);
1935	u32 epsiz_reg = dir_in ? DIEPTSIZ(idx) : DOEPTSIZ(idx);
1936	u32 ints;
1937
1938	ints = readl(hsotg->regs + epint_reg);
1939
1940	/* Clear endpoint interrupts */
1941	writel(ints, hsotg->regs + epint_reg);
1942
1943	dev_dbg(hsotg->dev, "%s: ep%d(%s) DxEPINT=0x%08x\n",
1944		__func__, idx, dir_in ? "in" : "out", ints);
1945
1946	if (ints & DxEPINT_XferCompl) {
1947		dev_dbg(hsotg->dev,
1948			"%s: XferCompl: DxEPCTL=0x%08x, DxEPTSIZ=%08x\n",
1949			__func__, readl(hsotg->regs + epctl_reg),
1950			readl(hsotg->regs + epsiz_reg));
1951
1952		/*
1953		 * we get OutDone from the FIFO, so we only need to look
1954		 * at completing IN requests here
1955		 */
1956		if (dir_in) {
1957			s3c_hsotg_complete_in(hsotg, hs_ep);
1958
1959			if (idx == 0 && !hs_ep->req)
1960				s3c_hsotg_enqueue_setup(hsotg);
1961		} else if (using_dma(hsotg)) {
1962			/*
1963			 * We're using DMA, we need to fire an OutDone here
1964			 * as we ignore the RXFIFO.
1965			 */
1966
1967			s3c_hsotg_handle_outdone(hsotg, idx, false);
1968		}
1969	}
1970
1971	if (ints & DxEPINT_EPDisbld) {
1972		dev_dbg(hsotg->dev, "%s: EPDisbld\n", __func__);
1973
1974		if (dir_in) {
1975			int epctl = readl(hsotg->regs + epctl_reg);
1976
1977			s3c_hsotg_txfifo_flush(hsotg, idx);
1978
1979			if ((epctl & DxEPCTL_Stall) &&
1980				(epctl & DxEPCTL_EPType_Bulk)) {
1981				int dctl = readl(hsotg->regs + DCTL);
1982
1983				dctl |= DCTL_CGNPInNAK;
1984				writel(dctl, hsotg->regs + DCTL);
1985			}
1986		}
1987	}
1988
1989	if (ints & DxEPINT_AHBErr)
1990		dev_dbg(hsotg->dev, "%s: AHBErr\n", __func__);
1991
1992	if (ints & DxEPINT_Setup) {  /* Setup or Timeout */
1993		dev_dbg(hsotg->dev, "%s: Setup/Timeout\n",  __func__);
1994
1995		if (using_dma(hsotg) && idx == 0) {
1996			/*
1997			 * this is the notification we've received a
1998			 * setup packet. In non-DMA mode we'd get this
1999			 * from the RXFIFO, instead we need to process
2000			 * the setup here.
2001			 */
2002
2003			if (dir_in)
2004				WARN_ON_ONCE(1);
2005			else
2006				s3c_hsotg_handle_outdone(hsotg, 0, true);
2007		}
2008	}
2009
2010	if (ints & DxEPINT_Back2BackSetup)
2011		dev_dbg(hsotg->dev, "%s: B2BSetup/INEPNakEff\n", __func__);
2012
2013	if (dir_in) {
2014		/* not sure if this is important, but we'll clear it anyway */
2015		if (ints & DIEPMSK_INTknTXFEmpMsk) {
2016			dev_dbg(hsotg->dev, "%s: ep%d: INTknTXFEmpMsk\n",
2017				__func__, idx);
2018		}
2019
2020		/* this probably means something bad is happening */
2021		if (ints & DIEPMSK_INTknEPMisMsk) {
2022			dev_warn(hsotg->dev, "%s: ep%d: INTknEP\n",
2023				 __func__, idx);
2024		}
2025
2026		/* FIFO has space or is empty (see GAHBCFG) */
2027		if (hsotg->dedicated_fifos &&
2028		    ints & DIEPMSK_TxFIFOEmpty) {
2029			dev_dbg(hsotg->dev, "%s: ep%d: TxFIFOEmpty\n",
2030				__func__, idx);
2031			if (!using_dma(hsotg))
2032				s3c_hsotg_trytx(hsotg, hs_ep);
2033		}
2034	}
2035}
2036
2037/**
2038 * s3c_hsotg_irq_enumdone - Handle EnumDone interrupt (enumeration done)
2039 * @hsotg: The device state.
2040 *
2041 * Handle updating the device settings after the enumeration phase has
2042 * been completed.
2043 */
2044static void s3c_hsotg_irq_enumdone(struct s3c_hsotg *hsotg)
2045{
2046	u32 dsts = readl(hsotg->regs + DSTS);
2047	int ep0_mps = 0, ep_mps;
2048
2049	/*
2050	 * This should signal the finish of the enumeration phase
2051	 * of the USB handshaking, so we should now know what rate
2052	 * we connected at.
2053	 */
2054
2055	dev_dbg(hsotg->dev, "EnumDone (DSTS=0x%08x)\n", dsts);
2056
2057	/*
2058	 * note, since we're limited by the size of transfer on EP0, and
2059	 * it seems IN transfers must be a even number of packets we do
2060	 * not advertise a 64byte MPS on EP0.
2061	 */
2062
2063	/* catch both EnumSpd_FS and EnumSpd_FS48 */
2064	switch (dsts & DSTS_EnumSpd_MASK) {
2065	case DSTS_EnumSpd_FS:
2066	case DSTS_EnumSpd_FS48:
2067		hsotg->gadget.speed = USB_SPEED_FULL;
2068		ep0_mps = EP0_MPS_LIMIT;
2069		ep_mps = 64;
2070		break;
2071
2072	case DSTS_EnumSpd_HS:
2073		hsotg->gadget.speed = USB_SPEED_HIGH;
2074		ep0_mps = EP0_MPS_LIMIT;
2075		ep_mps = 512;
2076		break;
2077
2078	case DSTS_EnumSpd_LS:
2079		hsotg->gadget.speed = USB_SPEED_LOW;
2080		/*
2081		 * note, we don't actually support LS in this driver at the
2082		 * moment, and the documentation seems to imply that it isn't
2083		 * supported by the PHYs on some of the devices.
2084		 */
2085		break;
2086	}
2087	dev_info(hsotg->dev, "new device is %s\n",
2088		 usb_speed_string(hsotg->gadget.speed));
2089
2090	/*
2091	 * we should now know the maximum packet size for an
2092	 * endpoint, so set the endpoints to a default value.
2093	 */
2094
2095	if (ep0_mps) {
2096		int i;
2097		s3c_hsotg_set_ep_maxpacket(hsotg, 0, ep0_mps);
2098		for (i = 1; i < hsotg->num_of_eps; i++)
2099			s3c_hsotg_set_ep_maxpacket(hsotg, i, ep_mps);
2100	}
2101
2102	/* ensure after enumeration our EP0 is active */
2103
2104	s3c_hsotg_enqueue_setup(hsotg);
2105
2106	dev_dbg(hsotg->dev, "EP0: DIEPCTL0=0x%08x, DOEPCTL0=0x%08x\n",
2107		readl(hsotg->regs + DIEPCTL0),
2108		readl(hsotg->regs + DOEPCTL0));
2109}
2110
2111/**
2112 * kill_all_requests - remove all requests from the endpoint's queue
2113 * @hsotg: The device state.
2114 * @ep: The endpoint the requests may be on.
2115 * @result: The result code to use.
2116 * @force: Force removal of any current requests
2117 *
2118 * Go through the requests on the given endpoint and mark them
2119 * completed with the given result code.
2120 */
2121static void kill_all_requests(struct s3c_hsotg *hsotg,
2122			      struct s3c_hsotg_ep *ep,
2123			      int result, bool force)
2124{
2125	struct s3c_hsotg_req *req, *treq;
2126	unsigned long flags;
2127
2128	spin_lock_irqsave(&ep->lock, flags);
2129
2130	list_for_each_entry_safe(req, treq, &ep->queue, queue) {
2131		/*
2132		 * currently, we can't do much about an already
2133		 * running request on an in endpoint
2134		 */
2135
2136		if (ep->req == req && ep->dir_in && !force)
2137			continue;
2138
2139		s3c_hsotg_complete_request(hsotg, ep, req,
2140					   result);
2141	}
2142
2143	spin_unlock_irqrestore(&ep->lock, flags);
2144}
2145
2146#define call_gadget(_hs, _entry) \
2147	if ((_hs)->gadget.speed != USB_SPEED_UNKNOWN &&	\
2148	    (_hs)->driver && (_hs)->driver->_entry)	\
2149		(_hs)->driver->_entry(&(_hs)->gadget);
2150
2151/**
2152 * s3c_hsotg_disconnect - disconnect service
2153 * @hsotg: The device state.
2154 *
2155 * The device has been disconnected. Remove all current
2156 * transactions and signal the gadget driver that this
2157 * has happened.
2158 */
2159static void s3c_hsotg_disconnect(struct s3c_hsotg *hsotg)
2160{
2161	unsigned ep;
2162
2163	for (ep = 0; ep < hsotg->num_of_eps; ep++)
2164		kill_all_requests(hsotg, &hsotg->eps[ep], -ESHUTDOWN, true);
2165
2166	call_gadget(hsotg, disconnect);
2167}
2168
2169/**
2170 * s3c_hsotg_irq_fifoempty - TX FIFO empty interrupt handler
2171 * @hsotg: The device state:
2172 * @periodic: True if this is a periodic FIFO interrupt
2173 */
2174static void s3c_hsotg_irq_fifoempty(struct s3c_hsotg *hsotg, bool periodic)
2175{
2176	struct s3c_hsotg_ep *ep;
2177	int epno, ret;
2178
2179	/* look through for any more data to transmit */
2180
2181	for (epno = 0; epno < hsotg->num_of_eps; epno++) {
2182		ep = &hsotg->eps[epno];
2183
2184		if (!ep->dir_in)
2185			continue;
2186
2187		if ((periodic && !ep->periodic) ||
2188		    (!periodic && ep->periodic))
2189			continue;
2190
2191		ret = s3c_hsotg_trytx(hsotg, ep);
2192		if (ret < 0)
2193			break;
2194	}
2195}
2196
2197/* IRQ flags which will trigger a retry around the IRQ loop */
2198#define IRQ_RETRY_MASK (GINTSTS_NPTxFEmp | \
2199			GINTSTS_PTxFEmp |  \
2200			GINTSTS_RxFLvl)
2201
2202/**
2203 * s3c_hsotg_corereset - issue softreset to the core
2204 * @hsotg: The device state
2205 *
2206 * Issue a soft reset to the core, and await the core finishing it.
2207 */
2208static int s3c_hsotg_corereset(struct s3c_hsotg *hsotg)
2209{
2210	int timeout;
2211	u32 grstctl;
2212
2213	dev_dbg(hsotg->dev, "resetting core\n");
2214
2215	/* issue soft reset */
2216	writel(GRSTCTL_CSftRst, hsotg->regs + GRSTCTL);
2217
2218	timeout = 1000;
2219	do {
2220		grstctl = readl(hsotg->regs + GRSTCTL);
2221	} while ((grstctl & GRSTCTL_CSftRst) && timeout-- > 0);
2222
2223	if (grstctl & GRSTCTL_CSftRst) {
2224		dev_err(hsotg->dev, "Failed to get CSftRst asserted\n");
2225		return -EINVAL;
2226	}
2227
2228	timeout = 1000;
2229
2230	while (1) {
2231		u32 grstctl = readl(hsotg->regs + GRSTCTL);
2232
2233		if (timeout-- < 0) {
2234			dev_info(hsotg->dev,
2235				 "%s: reset failed, GRSTCTL=%08x\n",
2236				 __func__, grstctl);
2237			return -ETIMEDOUT;
2238		}
2239
2240		if (!(grstctl & GRSTCTL_AHBIdle))
2241			continue;
2242
2243		break;		/* reset done */
2244	}
2245
2246	dev_dbg(hsotg->dev, "reset successful\n");
2247	return 0;
2248}
2249
2250/**
2251 * s3c_hsotg_core_init - issue softreset to the core
2252 * @hsotg: The device state
2253 *
2254 * Issue a soft reset to the core, and await the core finishing it.
2255 */
2256static void s3c_hsotg_core_init(struct s3c_hsotg *hsotg)
2257{
2258	s3c_hsotg_corereset(hsotg);
2259
2260	/*
2261	 * we must now enable ep0 ready for host detection and then
2262	 * set configuration.
2263	 */
2264
2265	/* set the PLL on, remove the HNP/SRP and set the PHY */
2266	writel(GUSBCFG_PHYIf16 | GUSBCFG_TOutCal(7) |
2267	       (0x5 << 10), hsotg->regs + GUSBCFG);
2268
2269	s3c_hsotg_init_fifo(hsotg);
2270
2271	__orr32(hsotg->regs + DCTL, DCTL_SftDiscon);
2272
2273	writel(1 << 18 | DCFG_DevSpd_HS,  hsotg->regs + DCFG);
2274
2275	/* Clear any pending OTG interrupts */
2276	writel(0xffffffff, hsotg->regs + GOTGINT);
2277
2278	/* Clear any pending interrupts */
2279	writel(0xffffffff, hsotg->regs + GINTSTS);
2280
2281	writel(GINTSTS_ErlySusp | GINTSTS_SessReqInt |
2282	       GINTSTS_GOUTNakEff | GINTSTS_GINNakEff |
2283	       GINTSTS_ConIDStsChng | GINTSTS_USBRst |
2284	       GINTSTS_EnumDone | GINTSTS_OTGInt |
2285	       GINTSTS_USBSusp | GINTSTS_WkUpInt,
2286	       hsotg->regs + GINTMSK);
2287
2288	if (using_dma(hsotg))
2289		writel(GAHBCFG_GlblIntrEn | GAHBCFG_DMAEn |
2290		       GAHBCFG_HBstLen_Incr4,
2291		       hsotg->regs + GAHBCFG);
2292	else
2293		writel(GAHBCFG_GlblIntrEn, hsotg->regs + GAHBCFG);
2294
2295	/*
2296	 * Enabling INTknTXFEmpMsk here seems to be a big mistake, we end
2297	 * up being flooded with interrupts if the host is polling the
2298	 * endpoint to try and read data.
2299	 */
2300
2301	writel(((hsotg->dedicated_fifos) ? DIEPMSK_TxFIFOEmpty : 0) |
2302	       DIEPMSK_EPDisbldMsk | DIEPMSK_XferComplMsk |
2303	       DIEPMSK_TimeOUTMsk | DIEPMSK_AHBErrMsk |
2304	       DIEPMSK_INTknEPMisMsk,
2305	       hsotg->regs + DIEPMSK);
2306
2307	/*
2308	 * don't need XferCompl, we get that from RXFIFO in slave mode. In
2309	 * DMA mode we may need this.
2310	 */
2311	writel((using_dma(hsotg) ? (DIEPMSK_XferComplMsk |
2312				    DIEPMSK_TimeOUTMsk) : 0) |
2313	       DOEPMSK_EPDisbldMsk | DOEPMSK_AHBErrMsk |
2314	       DOEPMSK_SetupMsk,
2315	       hsotg->regs + DOEPMSK);
2316
2317	writel(0, hsotg->regs + DAINTMSK);
2318
2319	dev_dbg(hsotg->dev, "EP0: DIEPCTL0=0x%08x, DOEPCTL0=0x%08x\n",
2320		readl(hsotg->regs + DIEPCTL0),
2321		readl(hsotg->regs + DOEPCTL0));
2322
2323	/* enable in and out endpoint interrupts */
2324	s3c_hsotg_en_gsint(hsotg, GINTSTS_OEPInt | GINTSTS_IEPInt);
2325
2326	/*
2327	 * Enable the RXFIFO when in slave mode, as this is how we collect
2328	 * the data. In DMA mode, we get events from the FIFO but also
2329	 * things we cannot process, so do not use it.
2330	 */
2331	if (!using_dma(hsotg))
2332		s3c_hsotg_en_gsint(hsotg, GINTSTS_RxFLvl);
2333
2334	/* Enable interrupts for EP0 in and out */
2335	s3c_hsotg_ctrl_epint(hsotg, 0, 0, 1);
2336	s3c_hsotg_ctrl_epint(hsotg, 0, 1, 1);
2337
2338	__orr32(hsotg->regs + DCTL, DCTL_PWROnPrgDone);
2339	udelay(10);  /* see openiboot */
2340	__bic32(hsotg->regs + DCTL, DCTL_PWROnPrgDone);
2341
2342	dev_dbg(hsotg->dev, "DCTL=0x%08x\n", readl(hsotg->regs + DCTL));
2343
2344	/*
2345	 * DxEPCTL_USBActEp says RO in manual, but seems to be set by
2346	 * writing to the EPCTL register..
2347	 */
2348
2349	/* set to read 1 8byte packet */
2350	writel(DxEPTSIZ_MC(1) | DxEPTSIZ_PktCnt(1) |
2351	       DxEPTSIZ_XferSize(8), hsotg->regs + DOEPTSIZ0);
2352
2353	writel(s3c_hsotg_ep0_mps(hsotg->eps[0].ep.maxpacket) |
2354	       DxEPCTL_CNAK | DxEPCTL_EPEna |
2355	       DxEPCTL_USBActEp,
2356	       hsotg->regs + DOEPCTL0);
2357
2358	/* enable, but don't activate EP0in */
2359	writel(s3c_hsotg_ep0_mps(hsotg->eps[0].ep.maxpacket) |
2360	       DxEPCTL_USBActEp, hsotg->regs + DIEPCTL0);
2361
2362	s3c_hsotg_enqueue_setup(hsotg);
2363
2364	dev_dbg(hsotg->dev, "EP0: DIEPCTL0=0x%08x, DOEPCTL0=0x%08x\n",
2365		readl(hsotg->regs + DIEPCTL0),
2366		readl(hsotg->regs + DOEPCTL0));
2367
2368	/* clear global NAKs */
2369	writel(DCTL_CGOUTNak | DCTL_CGNPInNAK,
2370	       hsotg->regs + DCTL);
2371
2372	/* must be at-least 3ms to allow bus to see disconnect */
2373	mdelay(3);
2374
2375	/* remove the soft-disconnect and let's go */
2376	__bic32(hsotg->regs + DCTL, DCTL_SftDiscon);
2377}
2378
2379/**
2380 * s3c_hsotg_irq - handle device interrupt
2381 * @irq: The IRQ number triggered
2382 * @pw: The pw value when registered the handler.
2383 */
2384static irqreturn_t s3c_hsotg_irq(int irq, void *pw)
2385{
2386	struct s3c_hsotg *hsotg = pw;
2387	int retry_count = 8;
2388	u32 gintsts;
2389	u32 gintmsk;
2390
2391irq_retry:
2392	gintsts = readl(hsotg->regs + GINTSTS);
2393	gintmsk = readl(hsotg->regs + GINTMSK);
2394
2395	dev_dbg(hsotg->dev, "%s: %08x %08x (%08x) retry %d\n",
2396		__func__, gintsts, gintsts & gintmsk, gintmsk, retry_count);
2397
2398	gintsts &= gintmsk;
2399
2400	if (gintsts & GINTSTS_OTGInt) {
2401		u32 otgint = readl(hsotg->regs + GOTGINT);
2402
2403		dev_info(hsotg->dev, "OTGInt: %08x\n", otgint);
2404
2405		writel(otgint, hsotg->regs + GOTGINT);
2406	}
2407
2408	if (gintsts & GINTSTS_SessReqInt) {
2409		dev_dbg(hsotg->dev, "%s: SessReqInt\n", __func__);
2410		writel(GINTSTS_SessReqInt, hsotg->regs + GINTSTS);
2411	}
2412
2413	if (gintsts & GINTSTS_EnumDone) {
2414		writel(GINTSTS_EnumDone, hsotg->regs + GINTSTS);
2415
2416		s3c_hsotg_irq_enumdone(hsotg);
2417	}
2418
2419	if (gintsts & GINTSTS_ConIDStsChng) {
2420		dev_dbg(hsotg->dev, "ConIDStsChg (DSTS=0x%08x, GOTCTL=%08x)\n",
2421			readl(hsotg->regs + DSTS),
2422			readl(hsotg->regs + GOTGCTL));
2423
2424		writel(GINTSTS_ConIDStsChng, hsotg->regs + GINTSTS);
2425	}
2426
2427	if (gintsts & (GINTSTS_OEPInt | GINTSTS_IEPInt)) {
2428		u32 daint = readl(hsotg->regs + DAINT);
2429		u32 daint_out = daint >> DAINT_OutEP_SHIFT;
2430		u32 daint_in = daint & ~(daint_out << DAINT_OutEP_SHIFT);
2431		int ep;
2432
2433		dev_dbg(hsotg->dev, "%s: daint=%08x\n", __func__, daint);
2434
2435		for (ep = 0; ep < 15 && daint_out; ep++, daint_out >>= 1) {
2436			if (daint_out & 1)
2437				s3c_hsotg_epint(hsotg, ep, 0);
2438		}
2439
2440		for (ep = 0; ep < 15 && daint_in; ep++, daint_in >>= 1) {
2441			if (daint_in & 1)
2442				s3c_hsotg_epint(hsotg, ep, 1);
2443		}
2444	}
2445
2446	if (gintsts & GINTSTS_USBRst) {
2447
2448		u32 usb_status = readl(hsotg->regs + GOTGCTL);
2449
2450		dev_info(hsotg->dev, "%s: USBRst\n", __func__);
2451		dev_dbg(hsotg->dev, "GNPTXSTS=%08x\n",
2452			readl(hsotg->regs + GNPTXSTS));
2453
2454		writel(GINTSTS_USBRst, hsotg->regs + GINTSTS);
2455
2456		if (usb_status & GOTGCTL_BSESVLD) {
2457			if (time_after(jiffies, hsotg->last_rst +
2458				       msecs_to_jiffies(200))) {
2459
2460				kill_all_requests(hsotg, &hsotg->eps[0],
2461							  -ECONNRESET, true);
2462
2463				s3c_hsotg_core_init(hsotg);
2464				hsotg->last_rst = jiffies;
2465			}
2466		}
2467	}
2468
2469	/* check both FIFOs */
2470
2471	if (gintsts & GINTSTS_NPTxFEmp) {
2472		dev_dbg(hsotg->dev, "NPTxFEmp\n");
2473
2474		/*
2475		 * Disable the interrupt to stop it happening again
2476		 * unless one of these endpoint routines decides that
2477		 * it needs re-enabling
2478		 */
2479
2480		s3c_hsotg_disable_gsint(hsotg, GINTSTS_NPTxFEmp);
2481		s3c_hsotg_irq_fifoempty(hsotg, false);
2482	}
2483
2484	if (gintsts & GINTSTS_PTxFEmp) {
2485		dev_dbg(hsotg->dev, "PTxFEmp\n");
2486
2487		/* See note in GINTSTS_NPTxFEmp */
2488
2489		s3c_hsotg_disable_gsint(hsotg, GINTSTS_PTxFEmp);
2490		s3c_hsotg_irq_fifoempty(hsotg, true);
2491	}
2492
2493	if (gintsts & GINTSTS_RxFLvl) {
2494		/*
2495		 * note, since GINTSTS_RxFLvl doubles as FIFO-not-empty,
2496		 * we need to retry s3c_hsotg_handle_rx if this is still
2497		 * set.
2498		 */
2499
2500		s3c_hsotg_handle_rx(hsotg);
2501	}
2502
2503	if (gintsts & GINTSTS_ModeMis) {
2504		dev_warn(hsotg->dev, "warning, mode mismatch triggered\n");
2505		writel(GINTSTS_ModeMis, hsotg->regs + GINTSTS);
2506	}
2507
2508	if (gintsts & GINTSTS_USBSusp) {
2509		dev_info(hsotg->dev, "GINTSTS_USBSusp\n");
2510		writel(GINTSTS_USBSusp, hsotg->regs + GINTSTS);
2511
2512		call_gadget(hsotg, suspend);
2513		s3c_hsotg_disconnect(hsotg);
2514	}
2515
2516	if (gintsts & GINTSTS_WkUpInt) {
2517		dev_info(hsotg->dev, "GINTSTS_WkUpIn\n");
2518		writel(GINTSTS_WkUpInt, hsotg->regs + GINTSTS);
2519
2520		call_gadget(hsotg, resume);
2521	}
2522
2523	if (gintsts & GINTSTS_ErlySusp) {
2524		dev_dbg(hsotg->dev, "GINTSTS_ErlySusp\n");
2525		writel(GINTSTS_ErlySusp, hsotg->regs + GINTSTS);
2526
2527		s3c_hsotg_disconnect(hsotg);
2528	}
2529
2530	/*
2531	 * these next two seem to crop-up occasionally causing the core
2532	 * to shutdown the USB transfer, so try clearing them and logging
2533	 * the occurrence.
2534	 */
2535
2536	if (gintsts & GINTSTS_GOUTNakEff) {
2537		dev_info(hsotg->dev, "GOUTNakEff triggered\n");
2538
2539		writel(DCTL_CGOUTNak, hsotg->regs + DCTL);
2540
2541		s3c_hsotg_dump(hsotg);
2542	}
2543
2544	if (gintsts & GINTSTS_GINNakEff) {
2545		dev_info(hsotg->dev, "GINNakEff triggered\n");
2546
2547		writel(DCTL_CGNPInNAK, hsotg->regs + DCTL);
2548
2549		s3c_hsotg_dump(hsotg);
2550	}
2551
2552	/*
2553	 * if we've had fifo events, we should try and go around the
2554	 * loop again to see if there's any point in returning yet.
2555	 */
2556
2557	if (gintsts & IRQ_RETRY_MASK && --retry_count > 0)
2558			goto irq_retry;
2559
2560	return IRQ_HANDLED;
2561}
2562
2563/**
2564 * s3c_hsotg_ep_enable - enable the given endpoint
2565 * @ep: The USB endpint to configure
2566 * @desc: The USB endpoint descriptor to configure with.
2567 *
2568 * This is called from the USB gadget code's usb_ep_enable().
2569 */
2570static int s3c_hsotg_ep_enable(struct usb_ep *ep,
2571			       const struct usb_endpoint_descriptor *desc)
2572{
2573	struct s3c_hsotg_ep *hs_ep = our_ep(ep);
2574	struct s3c_hsotg *hsotg = hs_ep->parent;
2575	unsigned long flags;
2576	int index = hs_ep->index;
2577	u32 epctrl_reg;
2578	u32 epctrl;
2579	u32 mps;
2580	int dir_in;
2581	int ret = 0;
2582
2583	dev_dbg(hsotg->dev,
2584		"%s: ep %s: a 0x%02x, attr 0x%02x, mps 0x%04x, intr %d\n",
2585		__func__, ep->name, desc->bEndpointAddress, desc->bmAttributes,
2586		desc->wMaxPacketSize, desc->bInterval);
2587
2588	/* not to be called for EP0 */
2589	WARN_ON(index == 0);
2590
2591	dir_in = (desc->bEndpointAddress & USB_ENDPOINT_DIR_MASK) ? 1 : 0;
2592	if (dir_in != hs_ep->dir_in) {
2593		dev_err(hsotg->dev, "%s: direction mismatch!\n", __func__);
2594		return -EINVAL;
2595	}
2596
2597	mps = usb_endpoint_maxp(desc);
2598
2599	/* note, we handle this here instead of s3c_hsotg_set_ep_maxpacket */
2600
2601	epctrl_reg = dir_in ? DIEPCTL(index) : DOEPCTL(index);
2602	epctrl = readl(hsotg->regs + epctrl_reg);
2603
2604	dev_dbg(hsotg->dev, "%s: read DxEPCTL=0x%08x from 0x%08x\n",
2605		__func__, epctrl, epctrl_reg);
2606
2607	spin_lock_irqsave(&hs_ep->lock, flags);
2608
2609	epctrl &= ~(DxEPCTL_EPType_MASK | DxEPCTL_MPS_MASK);
2610	epctrl |= DxEPCTL_MPS(mps);
2611
2612	/*
2613	 * mark the endpoint as active, otherwise the core may ignore
2614	 * transactions entirely for this endpoint
2615	 */
2616	epctrl |= DxEPCTL_USBActEp;
2617
2618	/*
2619	 * set the NAK status on the endpoint, otherwise we might try and
2620	 * do something with data that we've yet got a request to process
2621	 * since the RXFIFO will take data for an endpoint even if the
2622	 * size register hasn't been set.
2623	 */
2624
2625	epctrl |= DxEPCTL_SNAK;
2626
2627	/* update the endpoint state */
2628	hs_ep->ep.maxpacket = mps;
2629
2630	/* default, set to non-periodic */
2631	hs_ep->periodic = 0;
2632
2633	switch (desc->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK) {
2634	case USB_ENDPOINT_XFER_ISOC:
2635		dev_err(hsotg->dev, "no current ISOC support\n");
2636		ret = -EINVAL;
2637		goto out;
2638
2639	case USB_ENDPOINT_XFER_BULK:
2640		epctrl |= DxEPCTL_EPType_Bulk;
2641		break;
2642
2643	case USB_ENDPOINT_XFER_INT:
2644		if (dir_in) {
2645			/*
2646			 * Allocate our TxFNum by simply using the index
2647			 * of the endpoint for the moment. We could do
2648			 * something better if the host indicates how
2649			 * many FIFOs we are expecting to use.
2650			 */
2651
2652			hs_ep->periodic = 1;
2653			epctrl |= DxEPCTL_TxFNum(index);
2654		}
2655
2656		epctrl |= DxEPCTL_EPType_Intterupt;
2657		break;
2658
2659	case USB_ENDPOINT_XFER_CONTROL:
2660		epctrl |= DxEPCTL_EPType_Control;
2661		break;
2662	}
2663
2664	/*
2665	 * if the hardware has dedicated fifos, we must give each IN EP
2666	 * a unique tx-fifo even if it is non-periodic.
2667	 */
2668	if (dir_in && hsotg->dedicated_fifos)
2669		epctrl |= DxEPCTL_TxFNum(index);
2670
2671	/* for non control endpoints, set PID to D0 */
2672	if (index)
2673		epctrl |= DxEPCTL_SetD0PID;
2674
2675	dev_dbg(hsotg->dev, "%s: write DxEPCTL=0x%08x\n",
2676		__func__, epctrl);
2677
2678	writel(epctrl, hsotg->regs + epctrl_reg);
2679	dev_dbg(hsotg->dev, "%s: read DxEPCTL=0x%08x\n",
2680		__func__, readl(hsotg->regs + epctrl_reg));
2681
2682	/* enable the endpoint interrupt */
2683	s3c_hsotg_ctrl_epint(hsotg, index, dir_in, 1);
2684
2685out:
2686	spin_unlock_irqrestore(&hs_ep->lock, flags);
2687	return ret;
2688}
2689
2690/**
2691 * s3c_hsotg_ep_disable - disable given endpoint
2692 * @ep: The endpoint to disable.
2693 */
2694static int s3c_hsotg_ep_disable(struct usb_ep *ep)
2695{
2696	struct s3c_hsotg_ep *hs_ep = our_ep(ep);
2697	struct s3c_hsotg *hsotg = hs_ep->parent;
2698	int dir_in = hs_ep->dir_in;
2699	int index = hs_ep->index;
2700	unsigned long flags;
2701	u32 epctrl_reg;
2702	u32 ctrl;
2703
2704	dev_info(hsotg->dev, "%s(ep %p)\n", __func__, ep);
2705
2706	if (ep == &hsotg->eps[0].ep) {
2707		dev_err(hsotg->dev, "%s: called for ep0\n", __func__);
2708		return -EINVAL;
2709	}
2710
2711	epctrl_reg = dir_in ? DIEPCTL(index) : DOEPCTL(index);
2712
2713	/* terminate all requests with shutdown */
2714	kill_all_requests(hsotg, hs_ep, -ESHUTDOWN, false);
2715
2716	spin_lock_irqsave(&hs_ep->lock, flags);
2717
2718	ctrl = readl(hsotg->regs + epctrl_reg);
2719	ctrl &= ~DxEPCTL_EPEna;
2720	ctrl &= ~DxEPCTL_USBActEp;
2721	ctrl |= DxEPCTL_SNAK;
2722
2723	dev_dbg(hsotg->dev, "%s: DxEPCTL=0x%08x\n", __func__, ctrl);
2724	writel(ctrl, hsotg->regs + epctrl_reg);
2725
2726	/* disable endpoint interrupts */
2727	s3c_hsotg_ctrl_epint(hsotg, hs_ep->index, hs_ep->dir_in, 0);
2728
2729	spin_unlock_irqrestore(&hs_ep->lock, flags);
2730	return 0;
2731}
2732
2733/**
2734 * on_list - check request is on the given endpoint
2735 * @ep: The endpoint to check.
2736 * @test: The request to test if it is on the endpoint.
2737 */
2738static bool on_list(struct s3c_hsotg_ep *ep, struct s3c_hsotg_req *test)
2739{
2740	struct s3c_hsotg_req *req, *treq;
2741
2742	list_for_each_entry_safe(req, treq, &ep->queue, queue) {
2743		if (req == test)
2744			return true;
2745	}
2746
2747	return false;
2748}
2749
2750/**
2751 * s3c_hsotg_ep_dequeue - dequeue given endpoint
2752 * @ep: The endpoint to dequeue.
2753 * @req: The request to be removed from a queue.
2754 */
2755static int s3c_hsotg_ep_dequeue(struct usb_ep *ep, struct usb_request *req)
2756{
2757	struct s3c_hsotg_req *hs_req = our_req(req);
2758	struct s3c_hsotg_ep *hs_ep = our_ep(ep);
2759	struct s3c_hsotg *hs = hs_ep->parent;
2760	unsigned long flags;
2761
2762	dev_info(hs->dev, "ep_dequeue(%p,%p)\n", ep, req);
2763
2764	spin_lock_irqsave(&hs_ep->lock, flags);
2765
2766	if (!on_list(hs_ep, hs_req)) {
2767		spin_unlock_irqrestore(&hs_ep->lock, flags);
2768		return -EINVAL;
2769	}
2770
2771	s3c_hsotg_complete_request(hs, hs_ep, hs_req, -ECONNRESET);
2772	spin_unlock_irqrestore(&hs_ep->lock, flags);
2773
2774	return 0;
2775}
2776
2777/**
2778 * s3c_hsotg_ep_sethalt - set halt on a given endpoint
2779 * @ep: The endpoint to set halt.
2780 * @value: Set or unset the halt.
2781 */
2782static int s3c_hsotg_ep_sethalt(struct usb_ep *ep, int value)
2783{
2784	struct s3c_hsotg_ep *hs_ep = our_ep(ep);
2785	struct s3c_hsotg *hs = hs_ep->parent;
2786	int index = hs_ep->index;
2787	unsigned long irqflags;
2788	u32 epreg;
2789	u32 epctl;
2790	u32 xfertype;
2791
2792	dev_info(hs->dev, "%s(ep %p %s, %d)\n", __func__, ep, ep->name, value);
2793
2794	spin_lock_irqsave(&hs_ep->lock, irqflags);
2795
2796	/* write both IN and OUT control registers */
2797
2798	epreg = DIEPCTL(index);
2799	epctl = readl(hs->regs + epreg);
2800
2801	if (value) {
2802		epctl |= DxEPCTL_Stall + DxEPCTL_SNAK;
2803		if (epctl & DxEPCTL_EPEna)
2804			epctl |= DxEPCTL_EPDis;
2805	} else {
2806		epctl &= ~DxEPCTL_Stall;
2807		xfertype = epctl & DxEPCTL_EPType_MASK;
2808		if (xfertype == DxEPCTL_EPType_Bulk ||
2809			xfertype == DxEPCTL_EPType_Intterupt)
2810				epctl |= DxEPCTL_SetD0PID;
2811	}
2812
2813	writel(epctl, hs->regs + epreg);
2814
2815	epreg = DOEPCTL(index);
2816	epctl = readl(hs->regs + epreg);
2817
2818	if (value)
2819		epctl |= DxEPCTL_Stall;
2820	else {
2821		epctl &= ~DxEPCTL_Stall;
2822		xfertype = epctl & DxEPCTL_EPType_MASK;
2823		if (xfertype == DxEPCTL_EPType_Bulk ||
2824			xfertype == DxEPCTL_EPType_Intterupt)
2825				epctl |= DxEPCTL_SetD0PID;
2826	}
2827
2828	writel(epctl, hs->regs + epreg);
2829
2830	spin_unlock_irqrestore(&hs_ep->lock, irqflags);
2831
2832	return 0;
2833}
2834
2835static struct usb_ep_ops s3c_hsotg_ep_ops = {
2836	.enable		= s3c_hsotg_ep_enable,
2837	.disable	= s3c_hsotg_ep_disable,
2838	.alloc_request	= s3c_hsotg_ep_alloc_request,
2839	.free_request	= s3c_hsotg_ep_free_request,
2840	.queue		= s3c_hsotg_ep_queue,
2841	.dequeue	= s3c_hsotg_ep_dequeue,
2842	.set_halt	= s3c_hsotg_ep_sethalt,
2843	/* note, don't believe we have any call for the fifo routines */
2844};
2845
2846/**
2847 * s3c_hsotg_phy_enable - enable platform phy dev
2848 * @hsotg: The driver state
2849 *
2850 * A wrapper for platform code responsible for controlling
2851 * low-level USB code
2852 */
2853static void s3c_hsotg_phy_enable(struct s3c_hsotg *hsotg)
2854{
2855	struct platform_device *pdev = to_platform_device(hsotg->dev);
2856
2857	dev_dbg(hsotg->dev, "pdev 0x%p\n", pdev);
2858	if (hsotg->plat->phy_init)
2859		hsotg->plat->phy_init(pdev, hsotg->plat->phy_type);
2860}
2861
2862/**
2863 * s3c_hsotg_phy_disable - disable platform phy dev
2864 * @hsotg: The driver state
2865 *
2866 * A wrapper for platform code responsible for controlling
2867 * low-level USB code
2868 */
2869static void s3c_hsotg_phy_disable(struct s3c_hsotg *hsotg)
2870{
2871	struct platform_device *pdev = to_platform_device(hsotg->dev);
2872
2873	if (hsotg->plat->phy_exit)
2874		hsotg->plat->phy_exit(pdev, hsotg->plat->phy_type);
2875}
2876
2877/**
2878 * s3c_hsotg_init - initalize the usb core
2879 * @hsotg: The driver state
2880 */
2881static void s3c_hsotg_init(struct s3c_hsotg *hsotg)
2882{
2883	/* unmask subset of endpoint interrupts */
2884
2885	writel(DIEPMSK_TimeOUTMsk | DIEPMSK_AHBErrMsk |
2886	       DIEPMSK_EPDisbldMsk | DIEPMSK_XferComplMsk,
2887	       hsotg->regs + DIEPMSK);
2888
2889	writel(DOEPMSK_SetupMsk | DOEPMSK_AHBErrMsk |
2890	       DOEPMSK_EPDisbldMsk | DOEPMSK_XferComplMsk,
2891	       hsotg->regs + DOEPMSK);
2892
2893	writel(0, hsotg->regs + DAINTMSK);
2894
2895	/* Be in disconnected state until gadget is registered */
2896	__orr32(hsotg->regs + DCTL, DCTL_SftDiscon);
2897
2898	if (0) {
2899		/* post global nak until we're ready */
2900		writel(DCTL_SGNPInNAK | DCTL_SGOUTNak,
2901		       hsotg->regs + DCTL);
2902	}
2903
2904	/* setup fifos */
2905
2906	dev_dbg(hsotg->dev, "GRXFSIZ=0x%08x, GNPTXFSIZ=0x%08x\n",
2907		readl(hsotg->regs + GRXFSIZ),
2908		readl(hsotg->regs + GNPTXFSIZ));
2909
2910	s3c_hsotg_init_fifo(hsotg);
2911
2912	/* set the PLL on, remove the HNP/SRP and set the PHY */
2913	writel(GUSBCFG_PHYIf16 | GUSBCFG_TOutCal(7) | (0x5 << 10),
2914	       hsotg->regs + GUSBCFG);
2915
2916	writel(using_dma(hsotg) ? GAHBCFG_DMAEn : 0x0,
2917	       hsotg->regs + GAHBCFG);
2918}
2919
2920/**
2921 * s3c_hsotg_udc_start - prepare the udc for work
2922 * @gadget: The usb gadget state
2923 * @driver: The usb gadget driver
2924 *
2925 * Perform initialization to prepare udc device and driver
2926 * to work.
2927 */
2928static int s3c_hsotg_udc_start(struct usb_gadget *gadget,
2929			   struct usb_gadget_driver *driver)
2930{
2931	struct s3c_hsotg *hsotg = to_hsotg(gadget);
2932	int ret;
2933
2934	if (!hsotg) {
2935		printk(KERN_ERR "%s: called with no device\n", __func__);
2936		return -ENODEV;
2937	}
2938
2939	if (!driver) {
2940		dev_err(hsotg->dev, "%s: no driver\n", __func__);
2941		return -EINVAL;
2942	}
2943
2944	if (driver->max_speed < USB_SPEED_FULL)
2945		dev_err(hsotg->dev, "%s: bad speed\n", __func__);
2946
2947	if (!driver->setup) {
2948		dev_err(hsotg->dev, "%s: missing entry points\n", __func__);
2949		return -EINVAL;
2950	}
2951
2952	WARN_ON(hsotg->driver);
2953
2954	driver->driver.bus = NULL;
2955	hsotg->driver = driver;
2956	hsotg->gadget.dev.driver = &driver->driver;
2957	hsotg->gadget.dev.dma_mask = hsotg->dev->dma_mask;
2958	hsotg->gadget.speed = USB_SPEED_UNKNOWN;
2959
2960	ret = regulator_bulk_enable(ARRAY_SIZE(hsotg->supplies),
2961				    hsotg->supplies);
2962	if (ret) {
2963		dev_err(hsotg->dev, "failed to enable supplies: %d\n", ret);
2964		goto err;
2965	}
2966
2967	s3c_hsotg_phy_enable(hsotg);
2968
2969	s3c_hsotg_core_init(hsotg);
2970	hsotg->last_rst = jiffies;
2971	dev_info(hsotg->dev, "bound driver %s\n", driver->driver.name);
2972	return 0;
2973
2974err:
2975	hsotg->driver = NULL;
2976	hsotg->gadget.dev.driver = NULL;
2977	return ret;
2978}
2979
2980/**
2981 * s3c_hsotg_udc_stop - stop the udc
2982 * @gadget: The usb gadget state
2983 * @driver: The usb gadget driver
2984 *
2985 * Stop udc hw block and stay tunned for future transmissions
2986 */
2987static int s3c_hsotg_udc_stop(struct usb_gadget *gadget,
2988			  struct usb_gadget_driver *driver)
2989{
2990	struct s3c_hsotg *hsotg = to_hsotg(gadget);
2991	int ep;
2992
2993	if (!hsotg)
2994		return -ENODEV;
2995
2996	if (!driver || driver != hsotg->driver || !driver->unbind)
2997		return -EINVAL;
2998
2999	/* all endpoints should be shutdown */
3000	for (ep = 0; ep < hsotg->num_of_eps; ep++)
3001		s3c_hsotg_ep_disable(&hsotg->eps[ep].ep);
3002
3003	s3c_hsotg_phy_disable(hsotg);
3004	regulator_bulk_disable(ARRAY_SIZE(hsotg->supplies), hsotg->supplies);
3005
3006	hsotg->driver = NULL;
3007	hsotg->gadget.speed = USB_SPEED_UNKNOWN;
3008	hsotg->gadget.dev.driver = NULL;
3009
3010	dev_info(hsotg->dev, "unregistered gadget driver '%s'\n",
3011		 driver->driver.name);
3012
3013	return 0;
3014}
3015
3016/**
3017 * s3c_hsotg_gadget_getframe - read the frame number
3018 * @gadget: The usb gadget state
3019 *
3020 * Read the {micro} frame number
3021 */
3022static int s3c_hsotg_gadget_getframe(struct usb_gadget *gadget)
3023{
3024	return s3c_hsotg_read_frameno(to_hsotg(gadget));
3025}
3026
3027static struct usb_gadget_ops s3c_hsotg_gadget_ops = {
3028	.get_frame	= s3c_hsotg_gadget_getframe,
3029	.udc_start		= s3c_hsotg_udc_start,
3030	.udc_stop		= s3c_hsotg_udc_stop,
3031};
3032
3033/**
3034 * s3c_hsotg_initep - initialise a single endpoint
3035 * @hsotg: The device state.
3036 * @hs_ep: The endpoint to be initialised.
3037 * @epnum: The endpoint number
3038 *
3039 * Initialise the given endpoint (as part of the probe and device state
3040 * creation) to give to the gadget driver. Setup the endpoint name, any
3041 * direction information and other state that may be required.
3042 */
3043static void __devinit s3c_hsotg_initep(struct s3c_hsotg *hsotg,
3044				       struct s3c_hsotg_ep *hs_ep,
3045				       int epnum)
3046{
3047	u32 ptxfifo;
3048	char *dir;
3049
3050	if (epnum == 0)
3051		dir = "";
3052	else if ((epnum % 2) == 0) {
3053		dir = "out";
3054	} else {
3055		dir = "in";
3056		hs_ep->dir_in = 1;
3057	}
3058
3059	hs_ep->index = epnum;
3060
3061	snprintf(hs_ep->name, sizeof(hs_ep->name), "ep%d%s", epnum, dir);
3062
3063	INIT_LIST_HEAD(&hs_ep->queue);
3064	INIT_LIST_HEAD(&hs_ep->ep.ep_list);
3065
3066	spin_lock_init(&hs_ep->lock);
3067
3068	/* add to the list of endpoints known by the gadget driver */
3069	if (epnum)
3070		list_add_tail(&hs_ep->ep.ep_list, &hsotg->gadget.ep_list);
3071
3072	hs_ep->parent = hsotg;
3073	hs_ep->ep.name = hs_ep->name;
3074	hs_ep->ep.maxpacket = epnum ? 512 : EP0_MPS_LIMIT;
3075	hs_ep->ep.ops = &s3c_hsotg_ep_ops;
3076
3077	/*
3078	 * Read the FIFO size for the Periodic TX FIFO, even if we're
3079	 * an OUT endpoint, we may as well do this if in future the
3080	 * code is changed to make each endpoint's direction changeable.
3081	 */
3082
3083	ptxfifo = readl(hsotg->regs + DPTXFSIZn(epnum));
3084	hs_ep->fifo_size = DPTXFSIZn_DPTxFSize_GET(ptxfifo) * 4;
3085
3086	/*
3087	 * if we're using dma, we need to set the next-endpoint pointer
3088	 * to be something valid.
3089	 */
3090
3091	if (using_dma(hsotg)) {
3092		u32 next = DxEPCTL_NextEp((epnum + 1) % 15);
3093		writel(next, hsotg->regs + DIEPCTL(epnum));
3094		writel(next, hsotg->regs + DOEPCTL(epnum));
3095	}
3096}
3097
3098/**
3099 * s3c_hsotg_hw_cfg - read HW configuration registers
3100 * @param: The device state
3101 *
3102 * Read the USB core HW configuration registers
3103 */
3104static void s3c_hsotg_hw_cfg(struct s3c_hsotg *hsotg)
3105{
3106	u32 cfg2, cfg4;
3107	/* check hardware configuration */
3108
3109	cfg2 = readl(hsotg->regs + 0x48);
3110	hsotg->num_of_eps = (cfg2 >> 10) & 0xF;
3111
3112	dev_info(hsotg->dev, "EPs:%d\n", hsotg->num_of_eps);
3113
3114	cfg4 = readl(hsotg->regs + 0x50);
3115	hsotg->dedicated_fifos = (cfg4 >> 25) & 1;
3116
3117	dev_info(hsotg->dev, "%s fifos\n",
3118		 hsotg->dedicated_fifos ? "dedicated" : "shared");
3119}
3120
3121/**
3122 * s3c_hsotg_dump - dump state of the udc
3123 * @param: The device state
3124 */
3125static void s3c_hsotg_dump(struct s3c_hsotg *hsotg)
3126{
3127#ifdef DEBUG
3128	struct device *dev = hsotg->dev;
3129	void __iomem *regs = hsotg->regs;
3130	u32 val;
3131	int idx;
3132
3133	dev_info(dev, "DCFG=0x%08x, DCTL=0x%08x, DIEPMSK=%08x\n",
3134		 readl(regs + DCFG), readl(regs + DCTL),
3135		 readl(regs + DIEPMSK));
3136
3137	dev_info(dev, "GAHBCFG=0x%08x, 0x44=0x%08x\n",
3138		 readl(regs + GAHBCFG), readl(regs + 0x44));
3139
3140	dev_info(dev, "GRXFSIZ=0x%08x, GNPTXFSIZ=0x%08x\n",
3141		 readl(regs + GRXFSIZ), readl(regs + GNPTXFSIZ));
3142
3143	/* show periodic fifo settings */
3144
3145	for (idx = 1; idx <= 15; idx++) {
3146		val = readl(regs + DPTXFSIZn(idx));
3147		dev_info(dev, "DPTx[%d] FSize=%d, StAddr=0x%08x\n", idx,
3148			 val >> DPTXFSIZn_DPTxFSize_SHIFT,
3149			 val & DPTXFSIZn_DPTxFStAddr_MASK);
3150	}
3151
3152	for (idx = 0; idx < 15; idx++) {
3153		dev_info(dev,
3154			 "ep%d-in: EPCTL=0x%08x, SIZ=0x%08x, DMA=0x%08x\n", idx,
3155			 readl(regs + DIEPCTL(idx)),
3156			 readl(regs + DIEPTSIZ(idx)),
3157			 readl(regs + DIEPDMA(idx)));
3158
3159		val = readl(regs + DOEPCTL(idx));
3160		dev_info(dev,
3161			 "ep%d-out: EPCTL=0x%08x, SIZ=0x%08x, DMA=0x%08x\n",
3162			 idx, readl(regs + DOEPCTL(idx)),
3163			 readl(regs + DOEPTSIZ(idx)),
3164			 readl(regs + DOEPDMA(idx)));
3165
3166	}
3167
3168	dev_info(dev, "DVBUSDIS=0x%08x, DVBUSPULSE=%08x\n",
3169		 readl(regs + DVBUSDIS), readl(regs + DVBUSPULSE));
3170#endif
3171}
3172
3173/**
3174 * state_show - debugfs: show overall driver and device state.
3175 * @seq: The seq file to write to.
3176 * @v: Unused parameter.
3177 *
3178 * This debugfs entry shows the overall state of the hardware and
3179 * some general information about each of the endpoints available
3180 * to the system.
3181 */
3182static int state_show(struct seq_file *seq, void *v)
3183{
3184	struct s3c_hsotg *hsotg = seq->private;
3185	void __iomem *regs = hsotg->regs;
3186	int idx;
3187
3188	seq_printf(seq, "DCFG=0x%08x, DCTL=0x%08x, DSTS=0x%08x\n",
3189		 readl(regs + DCFG),
3190		 readl(regs + DCTL),
3191		 readl(regs + DSTS));
3192
3193	seq_printf(seq, "DIEPMSK=0x%08x, DOEPMASK=0x%08x\n",
3194		   readl(regs + DIEPMSK), readl(regs + DOEPMSK));
3195
3196	seq_printf(seq, "GINTMSK=0x%08x, GINTSTS=0x%08x\n",
3197		   readl(regs + GINTMSK),
3198		   readl(regs + GINTSTS));
3199
3200	seq_printf(seq, "DAINTMSK=0x%08x, DAINT=0x%08x\n",
3201		   readl(regs + DAINTMSK),
3202		   readl(regs + DAINT));
3203
3204	seq_printf(seq, "GNPTXSTS=0x%08x, GRXSTSR=%08x\n",
3205		   readl(regs + GNPTXSTS),
3206		   readl(regs + GRXSTSR));
3207
3208	seq_printf(seq, "\nEndpoint status:\n");
3209
3210	for (idx = 0; idx < 15; idx++) {
3211		u32 in, out;
3212
3213		in = readl(regs + DIEPCTL(idx));
3214		out = readl(regs + DOEPCTL(idx));
3215
3216		seq_printf(seq, "ep%d: DIEPCTL=0x%08x, DOEPCTL=0x%08x",
3217			   idx, in, out);
3218
3219		in = readl(regs + DIEPTSIZ(idx));
3220		out = readl(regs + DOEPTSIZ(idx));
3221
3222		seq_printf(seq, ", DIEPTSIZ=0x%08x, DOEPTSIZ=0x%08x",
3223			   in, out);
3224
3225		seq_printf(seq, "\n");
3226	}
3227
3228	return 0;
3229}
3230
3231static int state_open(struct inode *inode, struct file *file)
3232{
3233	return single_open(file, state_show, inode->i_private);
3234}
3235
3236static const struct file_operations state_fops = {
3237	.owner		= THIS_MODULE,
3238	.open		= state_open,
3239	.read		= seq_read,
3240	.llseek		= seq_lseek,
3241	.release	= single_release,
3242};
3243
3244/**
3245 * fifo_show - debugfs: show the fifo information
3246 * @seq: The seq_file to write data to.
3247 * @v: Unused parameter.
3248 *
3249 * Show the FIFO information for the overall fifo and all the
3250 * periodic transmission FIFOs.
3251 */
3252static int fifo_show(struct seq_file *seq, void *v)
3253{
3254	struct s3c_hsotg *hsotg = seq->private;
3255	void __iomem *regs = hsotg->regs;
3256	u32 val;
3257	int idx;
3258
3259	seq_printf(seq, "Non-periodic FIFOs:\n");
3260	seq_printf(seq, "RXFIFO: Size %d\n", readl(regs + GRXFSIZ));
3261
3262	val = readl(regs + GNPTXFSIZ);
3263	seq_printf(seq, "NPTXFIFO: Size %d, Start 0x%08x\n",
3264		   val >> GNPTXFSIZ_NPTxFDep_SHIFT,
3265		   val & GNPTXFSIZ_NPTxFStAddr_MASK);
3266
3267	seq_printf(seq, "\nPeriodic TXFIFOs:\n");
3268
3269	for (idx = 1; idx <= 15; idx++) {
3270		val = readl(regs + DPTXFSIZn(idx));
3271
3272		seq_printf(seq, "\tDPTXFIFO%2d: Size %d, Start 0x%08x\n", idx,
3273			   val >> DPTXFSIZn_DPTxFSize_SHIFT,
3274			   val & DPTXFSIZn_DPTxFStAddr_MASK);
3275	}
3276
3277	return 0;
3278}
3279
3280static int fifo_open(struct inode *inode, struct file *file)
3281{
3282	return single_open(file, fifo_show, inode->i_private);
3283}
3284
3285static const struct file_operations fifo_fops = {
3286	.owner		= THIS_MODULE,
3287	.open		= fifo_open,
3288	.read		= seq_read,
3289	.llseek		= seq_lseek,
3290	.release	= single_release,
3291};
3292
3293
3294static const char *decode_direction(int is_in)
3295{
3296	return is_in ? "in" : "out";
3297}
3298
3299/**
3300 * ep_show - debugfs: show the state of an endpoint.
3301 * @seq: The seq_file to write data to.
3302 * @v: Unused parameter.
3303 *
3304 * This debugfs entry shows the state of the given endpoint (one is
3305 * registered for each available).
3306 */
3307static int ep_show(struct seq_file *seq, void *v)
3308{
3309	struct s3c_hsotg_ep *ep = seq->private;
3310	struct s3c_hsotg *hsotg = ep->parent;
3311	struct s3c_hsotg_req *req;
3312	void __iomem *regs = hsotg->regs;
3313	int index = ep->index;
3314	int show_limit = 15;
3315	unsigned long flags;
3316
3317	seq_printf(seq, "Endpoint index %d, named %s,  dir %s:\n",
3318		   ep->index, ep->ep.name, decode_direction(ep->dir_in));
3319
3320	/* first show the register state */
3321
3322	seq_printf(seq, "\tDIEPCTL=0x%08x, DOEPCTL=0x%08x\n",
3323		   readl(regs + DIEPCTL(index)),
3324		   readl(regs + DOEPCTL(index)));
3325
3326	seq_printf(seq, "\tDIEPDMA=0x%08x, DOEPDMA=0x%08x\n",
3327		   readl(regs + DIEPDMA(index)),
3328		   readl(regs + DOEPDMA(index)));
3329
3330	seq_printf(seq, "\tDIEPINT=0x%08x, DOEPINT=0x%08x\n",
3331		   readl(regs + DIEPINT(index)),
3332		   readl(regs + DOEPINT(index)));
3333
3334	seq_printf(seq, "\tDIEPTSIZ=0x%08x, DOEPTSIZ=0x%08x\n",
3335		   readl(regs + DIEPTSIZ(index)),
3336		   readl(regs + DOEPTSIZ(index)));
3337
3338	seq_printf(seq, "\n");
3339	seq_printf(seq, "mps %d\n", ep->ep.maxpacket);
3340	seq_printf(seq, "total_data=%ld\n", ep->total_data);
3341
3342	seq_printf(seq, "request list (%p,%p):\n",
3343		   ep->queue.next, ep->queue.prev);
3344
3345	spin_lock_irqsave(&ep->lock, flags);
3346
3347	list_for_each_entry(req, &ep->queue, queue) {
3348		if (--show_limit < 0) {
3349			seq_printf(seq, "not showing more requests...\n");
3350			break;
3351		}
3352
3353		seq_printf(seq, "%c req %p: %d bytes @%p, ",
3354			   req == ep->req ? '*' : ' ',
3355			   req, req->req.length, req->req.buf);
3356		seq_printf(seq, "%d done, res %d\n",
3357			   req->req.actual, req->req.status);
3358	}
3359
3360	spin_unlock_irqrestore(&ep->lock, flags);
3361
3362	return 0;
3363}
3364
3365static int ep_open(struct inode *inode, struct file *file)
3366{
3367	return single_open(file, ep_show, inode->i_private);
3368}
3369
3370static const struct file_operations ep_fops = {
3371	.owner		= THIS_MODULE,
3372	.open		= ep_open,
3373	.read		= seq_read,
3374	.llseek		= seq_lseek,
3375	.release	= single_release,
3376};
3377
3378/**
3379 * s3c_hsotg_create_debug - create debugfs directory and files
3380 * @hsotg: The driver state
3381 *
3382 * Create the debugfs files to allow the user to get information
3383 * about the state of the system. The directory name is created
3384 * with the same name as the device itself, in case we end up
3385 * with multiple blocks in future systems.
3386 */
3387static void __devinit s3c_hsotg_create_debug(struct s3c_hsotg *hsotg)
3388{
3389	struct dentry *root;
3390	unsigned epidx;
3391
3392	root = debugfs_create_dir(dev_name(hsotg->dev), NULL);
3393	hsotg->debug_root = root;
3394	if (IS_ERR(root)) {
3395		dev_err(hsotg->dev, "cannot create debug root\n");
3396		return;
3397	}
3398
3399	/* create general state file */
3400
3401	hsotg->debug_file = debugfs_create_file("state", 0444, root,
3402						hsotg, &state_fops);
3403
3404	if (IS_ERR(hsotg->debug_file))
3405		dev_err(hsotg->dev, "%s: failed to create state\n", __func__);
3406
3407	hsotg->debug_fifo = debugfs_create_file("fifo", 0444, root,
3408						hsotg, &fifo_fops);
3409
3410	if (IS_ERR(hsotg->debug_fifo))
3411		dev_err(hsotg->dev, "%s: failed to create fifo\n", __func__);
3412
3413	/* create one file for each endpoint */
3414
3415	for (epidx = 0; epidx < hsotg->num_of_eps; epidx++) {
3416		struct s3c_hsotg_ep *ep = &hsotg->eps[epidx];
3417
3418		ep->debugfs = debugfs_create_file(ep->name, 0444,
3419						  root, ep, &ep_fops);
3420
3421		if (IS_ERR(ep->debugfs))
3422			dev_err(hsotg->dev, "failed to create %s debug file\n",
3423				ep->name);
3424	}
3425}
3426
3427/**
3428 * s3c_hsotg_delete_debug - cleanup debugfs entries
3429 * @hsotg: The driver state
3430 *
3431 * Cleanup (remove) the debugfs files for use on module exit.
3432 */
3433static void __devexit s3c_hsotg_delete_debug(struct s3c_hsotg *hsotg)
3434{
3435	unsigned epidx;
3436
3437	for (epidx = 0; epidx < hsotg->num_of_eps; epidx++) {
3438		struct s3c_hsotg_ep *ep = &hsotg->eps[epidx];
3439		debugfs_remove(ep->debugfs);
3440	}
3441
3442	debugfs_remove(hsotg->debug_file);
3443	debugfs_remove(hsotg->debug_fifo);
3444	debugfs_remove(hsotg->debug_root);
3445}
3446
3447/**
3448 * s3c_hsotg_release - release callback for hsotg device
3449 * @dev: Device to for which release is called
3450 */
3451static void s3c_hsotg_release(struct device *dev)
3452{
3453	struct s3c_hsotg *hsotg = dev_get_drvdata(dev);
3454
3455	kfree(hsotg);
3456}
3457
3458/**
3459 * s3c_hsotg_probe - probe function for hsotg driver
3460 * @pdev: The platform information for the driver
3461 */
3462
3463static int __devinit s3c_hsotg_probe(struct platform_device *pdev)
3464{
3465	struct s3c_hsotg_plat *plat = pdev->dev.platform_data;
3466	struct device *dev = &pdev->dev;
3467	struct s3c_hsotg_ep *eps;
3468	struct s3c_hsotg *hsotg;
3469	struct resource *res;
3470	int epnum;
3471	int ret;
3472	int i;
3473
3474	plat = pdev->dev.platform_data;
3475	if (!plat) {
3476		dev_err(&pdev->dev, "no platform data defined\n");
3477		return -EINVAL;
3478	}
3479
3480	hsotg = kzalloc(sizeof(struct s3c_hsotg), GFP_KERNEL);
3481	if (!hsotg) {
3482		dev_err(dev, "cannot get memory\n");
3483		return -ENOMEM;
3484	}
3485
3486	hsotg->dev = dev;
3487	hsotg->plat = plat;
3488
3489	hsotg->clk = clk_get(&pdev->dev, "otg");
3490	if (IS_ERR(hsotg->clk)) {
3491		dev_err(dev, "cannot get otg clock\n");
3492		ret = PTR_ERR(hsotg->clk);
3493		goto err_mem;
3494	}
3495
3496	platform_set_drvdata(pdev, hsotg);
3497
3498	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
3499	if (!res) {
3500		dev_err(dev, "cannot find register resource 0\n");
3501		ret = -EINVAL;
3502		goto err_clk;
3503	}
3504
3505	hsotg->regs_res = request_mem_region(res->start, resource_size(res),
3506					     dev_name(dev));
3507	if (!hsotg->regs_res) {
3508		dev_err(dev, "cannot reserve registers\n");
3509		ret = -ENOENT;
3510		goto err_clk;
3511	}
3512
3513	hsotg->regs = ioremap(res->start, resource_size(res));
3514	if (!hsotg->regs) {
3515		dev_err(dev, "cannot map registers\n");
3516		ret = -ENXIO;
3517		goto err_regs_res;
3518	}
3519
3520	ret = platform_get_irq(pdev, 0);
3521	if (ret < 0) {
3522		dev_err(dev, "cannot find IRQ\n");
3523		goto err_regs;
3524	}
3525
3526	hsotg->irq = ret;
3527
3528	ret = request_irq(ret, s3c_hsotg_irq, 0, dev_name(dev), hsotg);
3529	if (ret < 0) {
3530		dev_err(dev, "cannot claim IRQ\n");
3531		goto err_regs;
3532	}
3533
3534	dev_info(dev, "regs %p, irq %d\n", hsotg->regs, hsotg->irq);
3535
3536	device_initialize(&hsotg->gadget.dev);
3537
3538	dev_set_name(&hsotg->gadget.dev, "gadget");
3539
3540	hsotg->gadget.max_speed = USB_SPEED_HIGH;
3541	hsotg->gadget.ops = &s3c_hsotg_gadget_ops;
3542	hsotg->gadget.name = dev_name(dev);
3543
3544	hsotg->gadget.dev.parent = dev;
3545	hsotg->gadget.dev.dma_mask = dev->dma_mask;
3546	hsotg->gadget.dev.release = s3c_hsotg_release;
3547
3548	/* reset the system */
3549
3550	clk_prepare_enable(hsotg->clk);
3551
3552	/* regulators */
3553
3554	for (i = 0; i < ARRAY_SIZE(hsotg->supplies); i++)
3555		hsotg->supplies[i].supply = s3c_hsotg_supply_names[i];
3556
3557	ret = regulator_bulk_get(dev, ARRAY_SIZE(hsotg->supplies),
3558				 hsotg->supplies);
3559	if (ret) {
3560		dev_err(dev, "failed to request supplies: %d\n", ret);
3561		goto err_irq;
3562	}
3563
3564	ret = regulator_bulk_enable(ARRAY_SIZE(hsotg->supplies),
3565				    hsotg->supplies);
3566
3567	if (ret) {
3568		dev_err(hsotg->dev, "failed to enable supplies: %d\n", ret);
3569		goto err_supplies;
3570	}
3571
3572	/* usb phy enable */
3573	s3c_hsotg_phy_enable(hsotg);
3574
3575	s3c_hsotg_corereset(hsotg);
3576	s3c_hsotg_init(hsotg);
3577	s3c_hsotg_hw_cfg(hsotg);
3578
3579	/* hsotg->num_of_eps holds number of EPs other than ep0 */
3580
3581	if (hsotg->num_of_eps == 0) {
3582		dev_err(dev, "wrong number of EPs (zero)\n");
3583		goto err_supplies;
3584	}
3585
3586	eps = kcalloc(hsotg->num_of_eps + 1, sizeof(struct s3c_hsotg_ep),
3587		      GFP_KERNEL);
3588	if (!eps) {
3589		dev_err(dev, "cannot get memory\n");
3590		goto err_supplies;
3591	}
3592
3593	hsotg->eps = eps;
3594
3595	/* setup endpoint information */
3596
3597	INIT_LIST_HEAD(&hsotg->gadget.ep_list);
3598	hsotg->gadget.ep0 = &hsotg->eps[0].ep;
3599
3600	/* allocate EP0 request */
3601
3602	hsotg->ctrl_req = s3c_hsotg_ep_alloc_request(&hsotg->eps[0].ep,
3603						     GFP_KERNEL);
3604	if (!hsotg->ctrl_req) {
3605		dev_err(dev, "failed to allocate ctrl req\n");
3606		goto err_ep_mem;
3607	}
3608
3609	/* initialise the endpoints now the core has been initialised */
3610	for (epnum = 0; epnum < hsotg->num_of_eps; epnum++)
3611		s3c_hsotg_initep(hsotg, &hsotg->eps[epnum], epnum);
3612
3613	/* disable power and clock */
3614
3615	ret = regulator_bulk_disable(ARRAY_SIZE(hsotg->supplies),
3616				    hsotg->supplies);
3617	if (ret) {
3618		dev_err(hsotg->dev, "failed to disable supplies: %d\n", ret);
3619		goto err_ep_mem;
3620	}
3621
3622	s3c_hsotg_phy_disable(hsotg);
3623
3624	ret = device_add(&hsotg->gadget.dev);
3625	if (ret) {
3626		put_device(&hsotg->gadget.dev);
3627		goto err_ep_mem;
3628	}
3629
3630	ret = usb_add_gadget_udc(&pdev->dev, &hsotg->gadget);
3631	if (ret)
3632		goto err_ep_mem;
3633
3634	s3c_hsotg_create_debug(hsotg);
3635
3636	s3c_hsotg_dump(hsotg);
3637
3638	return 0;
3639
3640err_ep_mem:
3641	kfree(eps);
3642err_supplies:
3643	s3c_hsotg_phy_disable(hsotg);
3644	regulator_bulk_free(ARRAY_SIZE(hsotg->supplies), hsotg->supplies);
3645err_irq:
3646	free_irq(hsotg->irq, hsotg);
3647err_regs:
3648	iounmap(hsotg->regs);
3649
3650err_regs_res:
3651	release_resource(hsotg->regs_res);
3652	kfree(hsotg->regs_res);
3653err_clk:
3654	clk_disable_unprepare(hsotg->clk);
3655	clk_put(hsotg->clk);
3656err_mem:
3657	kfree(hsotg);
3658	return ret;
3659}
3660
3661/**
3662 * s3c_hsotg_remove - remove function for hsotg driver
3663 * @pdev: The platform information for the driver
3664 */
3665static int __devexit s3c_hsotg_remove(struct platform_device *pdev)
3666{
3667	struct s3c_hsotg *hsotg = platform_get_drvdata(pdev);
3668
3669	usb_del_gadget_udc(&hsotg->gadget);
3670
3671	s3c_hsotg_delete_debug(hsotg);
3672
3673	if (hsotg->driver) {
3674		/* should have been done already by driver model core */
3675		usb_gadget_unregister_driver(hsotg->driver);
3676	}
3677
3678	free_irq(hsotg->irq, hsotg);
3679	iounmap(hsotg->regs);
3680
3681	release_resource(hsotg->regs_res);
3682	kfree(hsotg->regs_res);
3683
3684	s3c_hsotg_phy_disable(hsotg);
3685	regulator_bulk_free(ARRAY_SIZE(hsotg->supplies), hsotg->supplies);
3686
3687	clk_disable_unprepare(hsotg->clk);
3688	clk_put(hsotg->clk);
3689
3690	device_unregister(&hsotg->gadget.dev);
3691	return 0;
3692}
3693
3694#if 1
3695#define s3c_hsotg_suspend NULL
3696#define s3c_hsotg_resume NULL
3697#endif
3698
3699static struct platform_driver s3c_hsotg_driver = {
3700	.driver		= {
3701		.name	= "s3c-hsotg",
3702		.owner	= THIS_MODULE,
3703	},
3704	.probe		= s3c_hsotg_probe,
3705	.remove		= __devexit_p(s3c_hsotg_remove),
3706	.suspend	= s3c_hsotg_suspend,
3707	.resume		= s3c_hsotg_resume,
3708};
3709
3710module_platform_driver(s3c_hsotg_driver);
3711
3712MODULE_DESCRIPTION("Samsung S3C USB High-speed/OtG device");
3713MODULE_AUTHOR("Ben Dooks <ben@simtec.co.uk>");
3714MODULE_LICENSE("GPL");
3715MODULE_ALIAS("platform:s3c-hsotg");