Linux Audio

Check our new training course

In-person Linux kernel drivers training

Jun 16-20, 2025
Register
Loading...
v6.8
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 * Copyright (c) 2011 Samsung Electronics Co., Ltd.
   4 *		http://www.samsung.com
   5 *
   6 * Copyright 2008 Openmoko, Inc.
   7 * Copyright 2008 Simtec Electronics
   8 *      Ben Dooks <ben@simtec.co.uk>
   9 *      http://armlinux.simtec.co.uk/
  10 *
  11 * S3C USB2.0 High-speed / OtG driver
  12 */
  13
  14#include <linux/kernel.h>
  15#include <linux/module.h>
  16#include <linux/spinlock.h>
  17#include <linux/interrupt.h>
  18#include <linux/platform_device.h>
  19#include <linux/dma-mapping.h>
  20#include <linux/mutex.h>
  21#include <linux/seq_file.h>
  22#include <linux/delay.h>
  23#include <linux/io.h>
  24#include <linux/slab.h>
 
  25
  26#include <linux/usb/ch9.h>
  27#include <linux/usb/gadget.h>
  28#include <linux/usb/phy.h>
  29#include <linux/usb/composite.h>
  30
  31
  32#include "core.h"
  33#include "hw.h"
  34
  35/* conversion functions */
  36static inline struct dwc2_hsotg_req *our_req(struct usb_request *req)
  37{
  38	return container_of(req, struct dwc2_hsotg_req, req);
  39}
  40
  41static inline struct dwc2_hsotg_ep *our_ep(struct usb_ep *ep)
  42{
  43	return container_of(ep, struct dwc2_hsotg_ep, ep);
  44}
  45
  46static inline struct dwc2_hsotg *to_hsotg(struct usb_gadget *gadget)
  47{
  48	return container_of(gadget, struct dwc2_hsotg, gadget);
  49}
  50
  51static inline void dwc2_set_bit(struct dwc2_hsotg *hsotg, u32 offset, u32 val)
  52{
  53	dwc2_writel(hsotg, dwc2_readl(hsotg, offset) | val, offset);
  54}
  55
  56static inline void dwc2_clear_bit(struct dwc2_hsotg *hsotg, u32 offset, u32 val)
  57{
  58	dwc2_writel(hsotg, dwc2_readl(hsotg, offset) & ~val, offset);
  59}
  60
  61static inline struct dwc2_hsotg_ep *index_to_ep(struct dwc2_hsotg *hsotg,
  62						u32 ep_index, u32 dir_in)
  63{
  64	if (dir_in)
  65		return hsotg->eps_in[ep_index];
  66	else
  67		return hsotg->eps_out[ep_index];
  68}
  69
  70/* forward declaration of functions */
  71static void dwc2_hsotg_dump(struct dwc2_hsotg *hsotg);
  72
  73/**
  74 * using_dma - return the DMA status of the driver.
  75 * @hsotg: The driver state.
  76 *
  77 * Return true if we're using DMA.
  78 *
  79 * Currently, we have the DMA support code worked into everywhere
  80 * that needs it, but the AMBA DMA implementation in the hardware can
  81 * only DMA from 32bit aligned addresses. This means that gadgets such
  82 * as the CDC Ethernet cannot work as they often pass packets which are
  83 * not 32bit aligned.
  84 *
  85 * Unfortunately the choice to use DMA or not is global to the controller
  86 * and seems to be only settable when the controller is being put through
  87 * a core reset. This means we either need to fix the gadgets to take
  88 * account of DMA alignment, or add bounce buffers (yuerk).
  89 *
  90 * g_using_dma is set depending on dts flag.
  91 */
  92static inline bool using_dma(struct dwc2_hsotg *hsotg)
  93{
  94	return hsotg->params.g_dma;
  95}
  96
  97/*
  98 * using_desc_dma - return the descriptor DMA status of the driver.
  99 * @hsotg: The driver state.
 100 *
 101 * Return true if we're using descriptor DMA.
 102 */
 103static inline bool using_desc_dma(struct dwc2_hsotg *hsotg)
 104{
 105	return hsotg->params.g_dma_desc;
 106}
 107
 108/**
 109 * dwc2_gadget_incr_frame_num - Increments the targeted frame number.
 110 * @hs_ep: The endpoint
 111 *
 112 * This function will also check if the frame number overruns DSTS_SOFFN_LIMIT.
 113 * If an overrun occurs it will wrap the value and set the frame_overrun flag.
 114 */
 115static inline void dwc2_gadget_incr_frame_num(struct dwc2_hsotg_ep *hs_ep)
 116{
 117	struct dwc2_hsotg *hsotg = hs_ep->parent;
 118	u16 limit = DSTS_SOFFN_LIMIT;
 119
 120	if (hsotg->gadget.speed != USB_SPEED_HIGH)
 121		limit >>= 3;
 122
 123	hs_ep->target_frame += hs_ep->interval;
 124	if (hs_ep->target_frame > limit) {
 125		hs_ep->frame_overrun = true;
 126		hs_ep->target_frame &= limit;
 127	} else {
 128		hs_ep->frame_overrun = false;
 129	}
 130}
 131
 132/**
 133 * dwc2_gadget_dec_frame_num_by_one - Decrements the targeted frame number
 134 *                                    by one.
 135 * @hs_ep: The endpoint.
 136 *
 137 * This function used in service interval based scheduling flow to calculate
 138 * descriptor frame number filed value. For service interval mode frame
 139 * number in descriptor should point to last (u)frame in the interval.
 140 *
 141 */
 142static inline void dwc2_gadget_dec_frame_num_by_one(struct dwc2_hsotg_ep *hs_ep)
 143{
 144	struct dwc2_hsotg *hsotg = hs_ep->parent;
 145	u16 limit = DSTS_SOFFN_LIMIT;
 146
 147	if (hsotg->gadget.speed != USB_SPEED_HIGH)
 148		limit >>= 3;
 149
 150	if (hs_ep->target_frame)
 151		hs_ep->target_frame -= 1;
 152	else
 153		hs_ep->target_frame = limit;
 154}
 155
 156/**
 157 * dwc2_hsotg_en_gsint - enable one or more of the general interrupt
 158 * @hsotg: The device state
 159 * @ints: A bitmask of the interrupts to enable
 160 */
 161static void dwc2_hsotg_en_gsint(struct dwc2_hsotg *hsotg, u32 ints)
 162{
 163	u32 gsintmsk = dwc2_readl(hsotg, GINTMSK);
 164	u32 new_gsintmsk;
 165
 166	new_gsintmsk = gsintmsk | ints;
 167
 168	if (new_gsintmsk != gsintmsk) {
 169		dev_dbg(hsotg->dev, "gsintmsk now 0x%08x\n", new_gsintmsk);
 170		dwc2_writel(hsotg, new_gsintmsk, GINTMSK);
 171	}
 172}
 173
 174/**
 175 * dwc2_hsotg_disable_gsint - disable one or more of the general interrupt
 176 * @hsotg: The device state
 177 * @ints: A bitmask of the interrupts to enable
 178 */
 179static void dwc2_hsotg_disable_gsint(struct dwc2_hsotg *hsotg, u32 ints)
 180{
 181	u32 gsintmsk = dwc2_readl(hsotg, GINTMSK);
 182	u32 new_gsintmsk;
 183
 184	new_gsintmsk = gsintmsk & ~ints;
 185
 186	if (new_gsintmsk != gsintmsk)
 187		dwc2_writel(hsotg, new_gsintmsk, GINTMSK);
 188}
 189
 190/**
 191 * dwc2_hsotg_ctrl_epint - enable/disable an endpoint irq
 192 * @hsotg: The device state
 193 * @ep: The endpoint index
 194 * @dir_in: True if direction is in.
 195 * @en: The enable value, true to enable
 196 *
 197 * Set or clear the mask for an individual endpoint's interrupt
 198 * request.
 199 */
 200static void dwc2_hsotg_ctrl_epint(struct dwc2_hsotg *hsotg,
 201				  unsigned int ep, unsigned int dir_in,
 202				 unsigned int en)
 203{
 204	unsigned long flags;
 205	u32 bit = 1 << ep;
 206	u32 daint;
 207
 208	if (!dir_in)
 209		bit <<= 16;
 210
 211	local_irq_save(flags);
 212	daint = dwc2_readl(hsotg, DAINTMSK);
 213	if (en)
 214		daint |= bit;
 215	else
 216		daint &= ~bit;
 217	dwc2_writel(hsotg, daint, DAINTMSK);
 218	local_irq_restore(flags);
 219}
 220
 221/**
 222 * dwc2_hsotg_tx_fifo_count - return count of TX FIFOs in device mode
 223 *
 224 * @hsotg: Programming view of the DWC_otg controller
 225 */
 226int dwc2_hsotg_tx_fifo_count(struct dwc2_hsotg *hsotg)
 227{
 228	if (hsotg->hw_params.en_multiple_tx_fifo)
 229		/* In dedicated FIFO mode we need count of IN EPs */
 230		return hsotg->hw_params.num_dev_in_eps;
 231	else
 232		/* In shared FIFO mode we need count of Periodic IN EPs */
 233		return hsotg->hw_params.num_dev_perio_in_ep;
 234}
 235
 236/**
 237 * dwc2_hsotg_tx_fifo_total_depth - return total FIFO depth available for
 238 * device mode TX FIFOs
 239 *
 240 * @hsotg: Programming view of the DWC_otg controller
 241 */
 242int dwc2_hsotg_tx_fifo_total_depth(struct dwc2_hsotg *hsotg)
 243{
 244	int addr;
 245	int tx_addr_max;
 246	u32 np_tx_fifo_size;
 247
 248	np_tx_fifo_size = min_t(u32, hsotg->hw_params.dev_nperio_tx_fifo_size,
 249				hsotg->params.g_np_tx_fifo_size);
 250
 251	/* Get Endpoint Info Control block size in DWORDs. */
 252	tx_addr_max = hsotg->hw_params.total_fifo_size;
 253
 254	addr = hsotg->params.g_rx_fifo_size + np_tx_fifo_size;
 255	if (tx_addr_max <= addr)
 256		return 0;
 257
 258	return tx_addr_max - addr;
 259}
 260
 261/**
 262 * dwc2_gadget_wkup_alert_handler - Handler for WKUP_ALERT interrupt
 263 *
 264 * @hsotg: Programming view of the DWC_otg controller
 265 *
 266 */
 267static void dwc2_gadget_wkup_alert_handler(struct dwc2_hsotg *hsotg)
 268{
 269	u32 gintsts2;
 270	u32 gintmsk2;
 271
 272	gintsts2 = dwc2_readl(hsotg, GINTSTS2);
 273	gintmsk2 = dwc2_readl(hsotg, GINTMSK2);
 274	gintsts2 &= gintmsk2;
 275
 276	if (gintsts2 & GINTSTS2_WKUP_ALERT_INT) {
 277		dev_dbg(hsotg->dev, "%s: Wkup_Alert_Int\n", __func__);
 278		dwc2_set_bit(hsotg, GINTSTS2, GINTSTS2_WKUP_ALERT_INT);
 279		dwc2_set_bit(hsotg, DCTL, DCTL_RMTWKUPSIG);
 280	}
 281}
 282
 283/**
 284 * dwc2_hsotg_tx_fifo_average_depth - returns average depth of device mode
 285 * TX FIFOs
 286 *
 287 * @hsotg: Programming view of the DWC_otg controller
 288 */
 289int dwc2_hsotg_tx_fifo_average_depth(struct dwc2_hsotg *hsotg)
 290{
 291	int tx_fifo_count;
 292	int tx_fifo_depth;
 293
 294	tx_fifo_depth = dwc2_hsotg_tx_fifo_total_depth(hsotg);
 295
 296	tx_fifo_count = dwc2_hsotg_tx_fifo_count(hsotg);
 297
 298	if (!tx_fifo_count)
 299		return tx_fifo_depth;
 300	else
 301		return tx_fifo_depth / tx_fifo_count;
 302}
 303
 304/**
 305 * dwc2_hsotg_init_fifo - initialise non-periodic FIFOs
 306 * @hsotg: The device instance.
 307 */
 308static void dwc2_hsotg_init_fifo(struct dwc2_hsotg *hsotg)
 309{
 310	unsigned int ep;
 311	unsigned int addr;
 312	int timeout;
 313
 314	u32 val;
 315	u32 *txfsz = hsotg->params.g_tx_fifo_size;
 316
 317	/* Reset fifo map if not correctly cleared during previous session */
 318	WARN_ON(hsotg->fifo_map);
 319	hsotg->fifo_map = 0;
 320
 321	/* set RX/NPTX FIFO sizes */
 322	dwc2_writel(hsotg, hsotg->params.g_rx_fifo_size, GRXFSIZ);
 323	dwc2_writel(hsotg, (hsotg->params.g_rx_fifo_size <<
 324		    FIFOSIZE_STARTADDR_SHIFT) |
 325		    (hsotg->params.g_np_tx_fifo_size << FIFOSIZE_DEPTH_SHIFT),
 326		    GNPTXFSIZ);
 327
 328	/*
 329	 * arange all the rest of the TX FIFOs, as some versions of this
 330	 * block have overlapping default addresses. This also ensures
 331	 * that if the settings have been changed, then they are set to
 332	 * known values.
 333	 */
 334
 335	/* start at the end of the GNPTXFSIZ, rounded up */
 336	addr = hsotg->params.g_rx_fifo_size + hsotg->params.g_np_tx_fifo_size;
 337
 338	/*
 339	 * Configure fifos sizes from provided configuration and assign
 340	 * them to endpoints dynamically according to maxpacket size value of
 341	 * given endpoint.
 342	 */
 343	for (ep = 1; ep < MAX_EPS_CHANNELS; ep++) {
 344		if (!txfsz[ep])
 345			continue;
 346		val = addr;
 347		val |= txfsz[ep] << FIFOSIZE_DEPTH_SHIFT;
 348		WARN_ONCE(addr + txfsz[ep] > hsotg->fifo_mem,
 349			  "insufficient fifo memory");
 350		addr += txfsz[ep];
 351
 352		dwc2_writel(hsotg, val, DPTXFSIZN(ep));
 353		val = dwc2_readl(hsotg, DPTXFSIZN(ep));
 354	}
 355
 356	dwc2_writel(hsotg, hsotg->hw_params.total_fifo_size |
 357		    addr << GDFIFOCFG_EPINFOBASE_SHIFT,
 358		    GDFIFOCFG);
 359	/*
 360	 * according to p428 of the design guide, we need to ensure that
 361	 * all fifos are flushed before continuing
 362	 */
 363
 364	dwc2_writel(hsotg, GRSTCTL_TXFNUM(0x10) | GRSTCTL_TXFFLSH |
 365	       GRSTCTL_RXFFLSH, GRSTCTL);
 366
 367	/* wait until the fifos are both flushed */
 368	timeout = 100;
 369	while (1) {
 370		val = dwc2_readl(hsotg, GRSTCTL);
 371
 372		if ((val & (GRSTCTL_TXFFLSH | GRSTCTL_RXFFLSH)) == 0)
 373			break;
 374
 375		if (--timeout == 0) {
 376			dev_err(hsotg->dev,
 377				"%s: timeout flushing fifos (GRSTCTL=%08x)\n",
 378				__func__, val);
 379			break;
 380		}
 381
 382		udelay(1);
 383	}
 384
 385	dev_dbg(hsotg->dev, "FIFOs reset, timeout at %d\n", timeout);
 386}
 387
 388/**
 389 * dwc2_hsotg_ep_alloc_request - allocate USB rerequest structure
 390 * @ep: USB endpoint to allocate request for.
 391 * @flags: Allocation flags
 392 *
 393 * Allocate a new USB request structure appropriate for the specified endpoint
 394 */
 395static struct usb_request *dwc2_hsotg_ep_alloc_request(struct usb_ep *ep,
 396						       gfp_t flags)
 397{
 398	struct dwc2_hsotg_req *req;
 399
 400	req = kzalloc(sizeof(*req), flags);
 401	if (!req)
 402		return NULL;
 403
 404	INIT_LIST_HEAD(&req->queue);
 405
 406	return &req->req;
 407}
 408
 409/**
 410 * is_ep_periodic - return true if the endpoint is in periodic mode.
 411 * @hs_ep: The endpoint to query.
 412 *
 413 * Returns true if the endpoint is in periodic mode, meaning it is being
 414 * used for an Interrupt or ISO transfer.
 415 */
 416static inline int is_ep_periodic(struct dwc2_hsotg_ep *hs_ep)
 417{
 418	return hs_ep->periodic;
 419}
 420
 421/**
 422 * dwc2_hsotg_unmap_dma - unmap the DMA memory being used for the request
 423 * @hsotg: The device state.
 424 * @hs_ep: The endpoint for the request
 425 * @hs_req: The request being processed.
 426 *
 427 * This is the reverse of dwc2_hsotg_map_dma(), called for the completion
 428 * of a request to ensure the buffer is ready for access by the caller.
 429 */
 430static void dwc2_hsotg_unmap_dma(struct dwc2_hsotg *hsotg,
 431				 struct dwc2_hsotg_ep *hs_ep,
 432				struct dwc2_hsotg_req *hs_req)
 433{
 434	struct usb_request *req = &hs_req->req;
 435
 436	usb_gadget_unmap_request(&hsotg->gadget, req, hs_ep->map_dir);
 437}
 438
 439/*
 440 * dwc2_gadget_alloc_ctrl_desc_chains - allocate DMA descriptor chains
 441 * for Control endpoint
 442 * @hsotg: The device state.
 443 *
 444 * This function will allocate 4 descriptor chains for EP 0: 2 for
 445 * Setup stage, per one for IN and OUT data/status transactions.
 446 */
 447static int dwc2_gadget_alloc_ctrl_desc_chains(struct dwc2_hsotg *hsotg)
 448{
 449	hsotg->setup_desc[0] =
 450		dmam_alloc_coherent(hsotg->dev,
 451				    sizeof(struct dwc2_dma_desc),
 452				    &hsotg->setup_desc_dma[0],
 453				    GFP_KERNEL);
 454	if (!hsotg->setup_desc[0])
 455		goto fail;
 456
 457	hsotg->setup_desc[1] =
 458		dmam_alloc_coherent(hsotg->dev,
 459				    sizeof(struct dwc2_dma_desc),
 460				    &hsotg->setup_desc_dma[1],
 461				    GFP_KERNEL);
 462	if (!hsotg->setup_desc[1])
 463		goto fail;
 464
 465	hsotg->ctrl_in_desc =
 466		dmam_alloc_coherent(hsotg->dev,
 467				    sizeof(struct dwc2_dma_desc),
 468				    &hsotg->ctrl_in_desc_dma,
 469				    GFP_KERNEL);
 470	if (!hsotg->ctrl_in_desc)
 471		goto fail;
 472
 473	hsotg->ctrl_out_desc =
 474		dmam_alloc_coherent(hsotg->dev,
 475				    sizeof(struct dwc2_dma_desc),
 476				    &hsotg->ctrl_out_desc_dma,
 477				    GFP_KERNEL);
 478	if (!hsotg->ctrl_out_desc)
 479		goto fail;
 480
 481	return 0;
 482
 483fail:
 484	return -ENOMEM;
 485}
 486
 487/**
 488 * dwc2_hsotg_write_fifo - write packet Data to the TxFIFO
 489 * @hsotg: The controller state.
 490 * @hs_ep: The endpoint we're going to write for.
 491 * @hs_req: The request to write data for.
 492 *
 493 * This is called when the TxFIFO has some space in it to hold a new
 494 * transmission and we have something to give it. The actual setup of
 495 * the data size is done elsewhere, so all we have to do is to actually
 496 * write the data.
 497 *
 498 * The return value is zero if there is more space (or nothing was done)
 499 * otherwise -ENOSPC is returned if the FIFO space was used up.
 500 *
 501 * This routine is only needed for PIO
 502 */
 503static int dwc2_hsotg_write_fifo(struct dwc2_hsotg *hsotg,
 504				 struct dwc2_hsotg_ep *hs_ep,
 505				struct dwc2_hsotg_req *hs_req)
 506{
 507	bool periodic = is_ep_periodic(hs_ep);
 508	u32 gnptxsts = dwc2_readl(hsotg, GNPTXSTS);
 509	int buf_pos = hs_req->req.actual;
 510	int to_write = hs_ep->size_loaded;
 511	void *data;
 512	int can_write;
 513	int pkt_round;
 514	int max_transfer;
 515
 516	to_write -= (buf_pos - hs_ep->last_load);
 517
 518	/* if there's nothing to write, get out early */
 519	if (to_write == 0)
 520		return 0;
 521
 522	if (periodic && !hsotg->dedicated_fifos) {
 523		u32 epsize = dwc2_readl(hsotg, DIEPTSIZ(hs_ep->index));
 524		int size_left;
 525		int size_done;
 526
 527		/*
 528		 * work out how much data was loaded so we can calculate
 529		 * how much data is left in the fifo.
 530		 */
 531
 532		size_left = DXEPTSIZ_XFERSIZE_GET(epsize);
 533
 534		/*
 535		 * if shared fifo, we cannot write anything until the
 536		 * previous data has been completely sent.
 537		 */
 538		if (hs_ep->fifo_load != 0) {
 539			dwc2_hsotg_en_gsint(hsotg, GINTSTS_PTXFEMP);
 540			return -ENOSPC;
 541		}
 542
 543		dev_dbg(hsotg->dev, "%s: left=%d, load=%d, fifo=%d, size %d\n",
 544			__func__, size_left,
 545			hs_ep->size_loaded, hs_ep->fifo_load, hs_ep->fifo_size);
 546
 547		/* how much of the data has moved */
 548		size_done = hs_ep->size_loaded - size_left;
 549
 550		/* how much data is left in the fifo */
 551		can_write = hs_ep->fifo_load - size_done;
 552		dev_dbg(hsotg->dev, "%s: => can_write1=%d\n",
 553			__func__, can_write);
 554
 555		can_write = hs_ep->fifo_size - can_write;
 556		dev_dbg(hsotg->dev, "%s: => can_write2=%d\n",
 557			__func__, can_write);
 558
 559		if (can_write <= 0) {
 560			dwc2_hsotg_en_gsint(hsotg, GINTSTS_PTXFEMP);
 561			return -ENOSPC;
 562		}
 563	} else if (hsotg->dedicated_fifos && hs_ep->index != 0) {
 564		can_write = dwc2_readl(hsotg,
 565				       DTXFSTS(hs_ep->fifo_index));
 566
 567		can_write &= 0xffff;
 568		can_write *= 4;
 569	} else {
 570		if (GNPTXSTS_NP_TXQ_SPC_AVAIL_GET(gnptxsts) == 0) {
 571			dev_dbg(hsotg->dev,
 572				"%s: no queue slots available (0x%08x)\n",
 573				__func__, gnptxsts);
 574
 575			dwc2_hsotg_en_gsint(hsotg, GINTSTS_NPTXFEMP);
 576			return -ENOSPC;
 577		}
 578
 579		can_write = GNPTXSTS_NP_TXF_SPC_AVAIL_GET(gnptxsts);
 580		can_write *= 4;	/* fifo size is in 32bit quantities. */
 581	}
 582
 583	max_transfer = hs_ep->ep.maxpacket * hs_ep->mc;
 584
 585	dev_dbg(hsotg->dev, "%s: GNPTXSTS=%08x, can=%d, to=%d, max_transfer %d\n",
 586		__func__, gnptxsts, can_write, to_write, max_transfer);
 587
 588	/*
 589	 * limit to 512 bytes of data, it seems at least on the non-periodic
 590	 * FIFO, requests of >512 cause the endpoint to get stuck with a
 591	 * fragment of the end of the transfer in it.
 592	 */
 593	if (can_write > 512 && !periodic)
 594		can_write = 512;
 595
 596	/*
 597	 * limit the write to one max-packet size worth of data, but allow
 598	 * the transfer to return that it did not run out of fifo space
 599	 * doing it.
 600	 */
 601	if (to_write > max_transfer) {
 602		to_write = max_transfer;
 603
 604		/* it's needed only when we do not use dedicated fifos */
 605		if (!hsotg->dedicated_fifos)
 606			dwc2_hsotg_en_gsint(hsotg,
 607					    periodic ? GINTSTS_PTXFEMP :
 608					   GINTSTS_NPTXFEMP);
 609	}
 610
 611	/* see if we can write data */
 612
 613	if (to_write > can_write) {
 614		to_write = can_write;
 615		pkt_round = to_write % max_transfer;
 616
 617		/*
 618		 * Round the write down to an
 619		 * exact number of packets.
 620		 *
 621		 * Note, we do not currently check to see if we can ever
 622		 * write a full packet or not to the FIFO.
 623		 */
 624
 625		if (pkt_round)
 626			to_write -= pkt_round;
 627
 628		/*
 629		 * enable correct FIFO interrupt to alert us when there
 630		 * is more room left.
 631		 */
 632
 633		/* it's needed only when we do not use dedicated fifos */
 634		if (!hsotg->dedicated_fifos)
 635			dwc2_hsotg_en_gsint(hsotg,
 636					    periodic ? GINTSTS_PTXFEMP :
 637					   GINTSTS_NPTXFEMP);
 638	}
 639
 640	dev_dbg(hsotg->dev, "write %d/%d, can_write %d, done %d\n",
 641		to_write, hs_req->req.length, can_write, buf_pos);
 642
 643	if (to_write <= 0)
 644		return -ENOSPC;
 645
 646	hs_req->req.actual = buf_pos + to_write;
 647	hs_ep->total_data += to_write;
 648
 649	if (periodic)
 650		hs_ep->fifo_load += to_write;
 651
 652	to_write = DIV_ROUND_UP(to_write, 4);
 653	data = hs_req->req.buf + buf_pos;
 654
 655	dwc2_writel_rep(hsotg, EPFIFO(hs_ep->index), data, to_write);
 656
 657	return (to_write >= can_write) ? -ENOSPC : 0;
 658}
 659
 660/**
 661 * get_ep_limit - get the maximum data legnth for this endpoint
 662 * @hs_ep: The endpoint
 663 *
 664 * Return the maximum data that can be queued in one go on a given endpoint
 665 * so that transfers that are too long can be split.
 666 */
 667static unsigned int get_ep_limit(struct dwc2_hsotg_ep *hs_ep)
 668{
 669	int index = hs_ep->index;
 670	unsigned int maxsize;
 671	unsigned int maxpkt;
 672
 673	if (index != 0) {
 674		maxsize = DXEPTSIZ_XFERSIZE_LIMIT + 1;
 675		maxpkt = DXEPTSIZ_PKTCNT_LIMIT + 1;
 676	} else {
 677		maxsize = 64 + 64;
 678		if (hs_ep->dir_in)
 679			maxpkt = DIEPTSIZ0_PKTCNT_LIMIT + 1;
 680		else
 681			maxpkt = 2;
 682	}
 683
 684	/* we made the constant loading easier above by using +1 */
 685	maxpkt--;
 686	maxsize--;
 687
 688	/*
 689	 * constrain by packet count if maxpkts*pktsize is greater
 690	 * than the length register size.
 691	 */
 692
 693	if ((maxpkt * hs_ep->ep.maxpacket) < maxsize)
 694		maxsize = maxpkt * hs_ep->ep.maxpacket;
 695
 696	return maxsize;
 697}
 698
 699/**
 700 * dwc2_hsotg_read_frameno - read current frame number
 701 * @hsotg: The device instance
 702 *
 703 * Return the current frame number
 704 */
 705static u32 dwc2_hsotg_read_frameno(struct dwc2_hsotg *hsotg)
 706{
 707	u32 dsts;
 708
 709	dsts = dwc2_readl(hsotg, DSTS);
 710	dsts &= DSTS_SOFFN_MASK;
 711	dsts >>= DSTS_SOFFN_SHIFT;
 712
 713	return dsts;
 714}
 715
 716/**
 717 * dwc2_gadget_get_chain_limit - get the maximum data payload value of the
 718 * DMA descriptor chain prepared for specific endpoint
 719 * @hs_ep: The endpoint
 720 *
 721 * Return the maximum data that can be queued in one go on a given endpoint
 722 * depending on its descriptor chain capacity so that transfers that
 723 * are too long can be split.
 724 */
 725static unsigned int dwc2_gadget_get_chain_limit(struct dwc2_hsotg_ep *hs_ep)
 726{
 727	const struct usb_endpoint_descriptor *ep_desc = hs_ep->ep.desc;
 728	int is_isoc = hs_ep->isochronous;
 729	unsigned int maxsize;
 730	u32 mps = hs_ep->ep.maxpacket;
 731	int dir_in = hs_ep->dir_in;
 732
 733	if (is_isoc)
 734		maxsize = (hs_ep->dir_in ? DEV_DMA_ISOC_TX_NBYTES_LIMIT :
 735					   DEV_DMA_ISOC_RX_NBYTES_LIMIT) *
 736					   MAX_DMA_DESC_NUM_HS_ISOC;
 737	else
 738		maxsize = DEV_DMA_NBYTES_LIMIT * MAX_DMA_DESC_NUM_GENERIC;
 739
 740	/* Interrupt OUT EP with mps not multiple of 4 */
 741	if (hs_ep->index)
 742		if (usb_endpoint_xfer_int(ep_desc) && !dir_in && (mps % 4))
 743			maxsize = mps * MAX_DMA_DESC_NUM_GENERIC;
 744
 745	return maxsize;
 746}
 747
 748/*
 749 * dwc2_gadget_get_desc_params - get DMA descriptor parameters.
 750 * @hs_ep: The endpoint
 751 * @mask: RX/TX bytes mask to be defined
 752 *
 753 * Returns maximum data payload for one descriptor after analyzing endpoint
 754 * characteristics.
 755 * DMA descriptor transfer bytes limit depends on EP type:
 756 * Control out - MPS,
 757 * Isochronous - descriptor rx/tx bytes bitfield limit,
 758 * Control In/Bulk/Interrupt - multiple of mps. This will allow to not
 759 * have concatenations from various descriptors within one packet.
 760 * Interrupt OUT - if mps not multiple of 4 then a single packet corresponds
 761 * to a single descriptor.
 762 *
 763 * Selects corresponding mask for RX/TX bytes as well.
 764 */
 765static u32 dwc2_gadget_get_desc_params(struct dwc2_hsotg_ep *hs_ep, u32 *mask)
 766{
 767	const struct usb_endpoint_descriptor *ep_desc = hs_ep->ep.desc;
 768	u32 mps = hs_ep->ep.maxpacket;
 769	int dir_in = hs_ep->dir_in;
 770	u32 desc_size = 0;
 771
 772	if (!hs_ep->index && !dir_in) {
 773		desc_size = mps;
 774		*mask = DEV_DMA_NBYTES_MASK;
 775	} else if (hs_ep->isochronous) {
 776		if (dir_in) {
 777			desc_size = DEV_DMA_ISOC_TX_NBYTES_LIMIT;
 778			*mask = DEV_DMA_ISOC_TX_NBYTES_MASK;
 779		} else {
 780			desc_size = DEV_DMA_ISOC_RX_NBYTES_LIMIT;
 781			*mask = DEV_DMA_ISOC_RX_NBYTES_MASK;
 782		}
 783	} else {
 784		desc_size = DEV_DMA_NBYTES_LIMIT;
 785		*mask = DEV_DMA_NBYTES_MASK;
 786
 787		/* Round down desc_size to be mps multiple */
 788		desc_size -= desc_size % mps;
 789	}
 790
 791	/* Interrupt OUT EP with mps not multiple of 4 */
 792	if (hs_ep->index)
 793		if (usb_endpoint_xfer_int(ep_desc) && !dir_in && (mps % 4)) {
 794			desc_size = mps;
 795			*mask = DEV_DMA_NBYTES_MASK;
 796		}
 797
 798	return desc_size;
 799}
 800
 801static void dwc2_gadget_fill_nonisoc_xfer_ddma_one(struct dwc2_hsotg_ep *hs_ep,
 802						 struct dwc2_dma_desc **desc,
 803						 dma_addr_t dma_buff,
 804						 unsigned int len,
 805						 bool true_last)
 806{
 807	int dir_in = hs_ep->dir_in;
 808	u32 mps = hs_ep->ep.maxpacket;
 809	u32 maxsize = 0;
 810	u32 offset = 0;
 811	u32 mask = 0;
 812	int i;
 813
 814	maxsize = dwc2_gadget_get_desc_params(hs_ep, &mask);
 815
 816	hs_ep->desc_count = (len / maxsize) +
 817				((len % maxsize) ? 1 : 0);
 818	if (len == 0)
 819		hs_ep->desc_count = 1;
 820
 821	for (i = 0; i < hs_ep->desc_count; ++i) {
 822		(*desc)->status = 0;
 823		(*desc)->status |= (DEV_DMA_BUFF_STS_HBUSY
 824				 << DEV_DMA_BUFF_STS_SHIFT);
 825
 826		if (len > maxsize) {
 827			if (!hs_ep->index && !dir_in)
 828				(*desc)->status |= (DEV_DMA_L | DEV_DMA_IOC);
 829
 830			(*desc)->status |=
 831				maxsize << DEV_DMA_NBYTES_SHIFT & mask;
 832			(*desc)->buf = dma_buff + offset;
 833
 834			len -= maxsize;
 835			offset += maxsize;
 836		} else {
 837			if (true_last)
 838				(*desc)->status |= (DEV_DMA_L | DEV_DMA_IOC);
 839
 840			if (dir_in)
 841				(*desc)->status |= (len % mps) ? DEV_DMA_SHORT :
 842					((hs_ep->send_zlp && true_last) ?
 843					DEV_DMA_SHORT : 0);
 844
 845			(*desc)->status |=
 846				len << DEV_DMA_NBYTES_SHIFT & mask;
 847			(*desc)->buf = dma_buff + offset;
 848		}
 849
 850		(*desc)->status &= ~DEV_DMA_BUFF_STS_MASK;
 851		(*desc)->status |= (DEV_DMA_BUFF_STS_HREADY
 852				 << DEV_DMA_BUFF_STS_SHIFT);
 853		(*desc)++;
 854	}
 855}
 856
 857/*
 858 * dwc2_gadget_config_nonisoc_xfer_ddma - prepare non ISOC DMA desc chain.
 859 * @hs_ep: The endpoint
 860 * @ureq: Request to transfer
 861 * @offset: offset in bytes
 862 * @len: Length of the transfer
 863 *
 864 * This function will iterate over descriptor chain and fill its entries
 865 * with corresponding information based on transfer data.
 866 */
 867static void dwc2_gadget_config_nonisoc_xfer_ddma(struct dwc2_hsotg_ep *hs_ep,
 868						 dma_addr_t dma_buff,
 869						 unsigned int len)
 870{
 871	struct usb_request *ureq = NULL;
 872	struct dwc2_dma_desc *desc = hs_ep->desc_list;
 873	struct scatterlist *sg;
 874	int i;
 875	u8 desc_count = 0;
 876
 877	if (hs_ep->req)
 878		ureq = &hs_ep->req->req;
 879
 880	/* non-DMA sg buffer */
 881	if (!ureq || !ureq->num_sgs) {
 882		dwc2_gadget_fill_nonisoc_xfer_ddma_one(hs_ep, &desc,
 883			dma_buff, len, true);
 884		return;
 885	}
 886
 887	/* DMA sg buffer */
 888	for_each_sg(ureq->sg, sg, ureq->num_sgs, i) {
 889		dwc2_gadget_fill_nonisoc_xfer_ddma_one(hs_ep, &desc,
 890			sg_dma_address(sg) + sg->offset, sg_dma_len(sg),
 891			sg_is_last(sg));
 892		desc_count += hs_ep->desc_count;
 893	}
 894
 895	hs_ep->desc_count = desc_count;
 896}
 897
 898/*
 899 * dwc2_gadget_fill_isoc_desc - fills next isochronous descriptor in chain.
 900 * @hs_ep: The isochronous endpoint.
 901 * @dma_buff: usb requests dma buffer.
 902 * @len: usb request transfer length.
 903 *
 904 * Fills next free descriptor with the data of the arrived usb request,
 905 * frame info, sets Last and IOC bits increments next_desc. If filled
 906 * descriptor is not the first one, removes L bit from the previous descriptor
 907 * status.
 908 */
 909static int dwc2_gadget_fill_isoc_desc(struct dwc2_hsotg_ep *hs_ep,
 910				      dma_addr_t dma_buff, unsigned int len)
 911{
 912	struct dwc2_dma_desc *desc;
 913	struct dwc2_hsotg *hsotg = hs_ep->parent;
 914	u32 index;
 915	u32 mask = 0;
 916	u8 pid = 0;
 917
 918	dwc2_gadget_get_desc_params(hs_ep, &mask);
 919
 920	index = hs_ep->next_desc;
 921	desc = &hs_ep->desc_list[index];
 922
 923	/* Check if descriptor chain full */
 924	if ((desc->status >> DEV_DMA_BUFF_STS_SHIFT) ==
 925	    DEV_DMA_BUFF_STS_HREADY) {
 926		dev_dbg(hsotg->dev, "%s: desc chain full\n", __func__);
 927		return 1;
 928	}
 929
 930	/* Clear L bit of previous desc if more than one entries in the chain */
 931	if (hs_ep->next_desc)
 932		hs_ep->desc_list[index - 1].status &= ~DEV_DMA_L;
 933
 934	dev_dbg(hsotg->dev, "%s: Filling ep %d, dir %s isoc desc # %d\n",
 935		__func__, hs_ep->index, hs_ep->dir_in ? "in" : "out", index);
 936
 937	desc->status = 0;
 938	desc->status |= (DEV_DMA_BUFF_STS_HBUSY	<< DEV_DMA_BUFF_STS_SHIFT);
 939
 940	desc->buf = dma_buff;
 941	desc->status |= (DEV_DMA_L | DEV_DMA_IOC |
 942			 ((len << DEV_DMA_NBYTES_SHIFT) & mask));
 943
 944	if (hs_ep->dir_in) {
 945		if (len)
 946			pid = DIV_ROUND_UP(len, hs_ep->ep.maxpacket);
 947		else
 948			pid = 1;
 949		desc->status |= ((pid << DEV_DMA_ISOC_PID_SHIFT) &
 950				 DEV_DMA_ISOC_PID_MASK) |
 951				((len % hs_ep->ep.maxpacket) ?
 952				 DEV_DMA_SHORT : 0) |
 953				((hs_ep->target_frame <<
 954				  DEV_DMA_ISOC_FRNUM_SHIFT) &
 955				 DEV_DMA_ISOC_FRNUM_MASK);
 956	}
 957
 958	desc->status &= ~DEV_DMA_BUFF_STS_MASK;
 959	desc->status |= (DEV_DMA_BUFF_STS_HREADY << DEV_DMA_BUFF_STS_SHIFT);
 960
 961	/* Increment frame number by interval for IN */
 962	if (hs_ep->dir_in)
 963		dwc2_gadget_incr_frame_num(hs_ep);
 964
 965	/* Update index of last configured entry in the chain */
 966	hs_ep->next_desc++;
 967	if (hs_ep->next_desc >= MAX_DMA_DESC_NUM_HS_ISOC)
 968		hs_ep->next_desc = 0;
 969
 970	return 0;
 971}
 972
 973/*
 974 * dwc2_gadget_start_isoc_ddma - start isochronous transfer in DDMA
 975 * @hs_ep: The isochronous endpoint.
 976 *
 977 * Prepare descriptor chain for isochronous endpoints. Afterwards
 978 * write DMA address to HW and enable the endpoint.
 979 */
 980static void dwc2_gadget_start_isoc_ddma(struct dwc2_hsotg_ep *hs_ep)
 981{
 982	struct dwc2_hsotg *hsotg = hs_ep->parent;
 983	struct dwc2_hsotg_req *hs_req, *treq;
 984	int index = hs_ep->index;
 985	int ret;
 986	int i;
 987	u32 dma_reg;
 988	u32 depctl;
 989	u32 ctrl;
 990	struct dwc2_dma_desc *desc;
 991
 992	if (list_empty(&hs_ep->queue)) {
 993		hs_ep->target_frame = TARGET_FRAME_INITIAL;
 994		dev_dbg(hsotg->dev, "%s: No requests in queue\n", __func__);
 995		return;
 996	}
 997
 998	/* Initialize descriptor chain by Host Busy status */
 999	for (i = 0; i < MAX_DMA_DESC_NUM_HS_ISOC; i++) {
1000		desc = &hs_ep->desc_list[i];
1001		desc->status = 0;
1002		desc->status |= (DEV_DMA_BUFF_STS_HBUSY
1003				    << DEV_DMA_BUFF_STS_SHIFT);
1004	}
1005
1006	hs_ep->next_desc = 0;
1007	list_for_each_entry_safe(hs_req, treq, &hs_ep->queue, queue) {
1008		dma_addr_t dma_addr = hs_req->req.dma;
1009
1010		if (hs_req->req.num_sgs) {
1011			WARN_ON(hs_req->req.num_sgs > 1);
1012			dma_addr = sg_dma_address(hs_req->req.sg);
1013		}
1014		ret = dwc2_gadget_fill_isoc_desc(hs_ep, dma_addr,
1015						 hs_req->req.length);
1016		if (ret)
1017			break;
1018	}
1019
1020	hs_ep->compl_desc = 0;
1021	depctl = hs_ep->dir_in ? DIEPCTL(index) : DOEPCTL(index);
1022	dma_reg = hs_ep->dir_in ? DIEPDMA(index) : DOEPDMA(index);
1023
1024	/* write descriptor chain address to control register */
1025	dwc2_writel(hsotg, hs_ep->desc_list_dma, dma_reg);
1026
1027	ctrl = dwc2_readl(hsotg, depctl);
1028	ctrl |= DXEPCTL_EPENA | DXEPCTL_CNAK;
1029	dwc2_writel(hsotg, ctrl, depctl);
1030}
1031
1032static bool dwc2_gadget_target_frame_elapsed(struct dwc2_hsotg_ep *hs_ep);
1033static void dwc2_hsotg_complete_request(struct dwc2_hsotg *hsotg,
1034					struct dwc2_hsotg_ep *hs_ep,
1035				       struct dwc2_hsotg_req *hs_req,
1036				       int result);
1037
1038/**
1039 * dwc2_hsotg_start_req - start a USB request from an endpoint's queue
1040 * @hsotg: The controller state.
1041 * @hs_ep: The endpoint to process a request for
1042 * @hs_req: The request to start.
1043 * @continuing: True if we are doing more for the current request.
1044 *
1045 * Start the given request running by setting the endpoint registers
1046 * appropriately, and writing any data to the FIFOs.
1047 */
1048static void dwc2_hsotg_start_req(struct dwc2_hsotg *hsotg,
1049				 struct dwc2_hsotg_ep *hs_ep,
1050				struct dwc2_hsotg_req *hs_req,
1051				bool continuing)
1052{
1053	struct usb_request *ureq = &hs_req->req;
1054	int index = hs_ep->index;
1055	int dir_in = hs_ep->dir_in;
1056	u32 epctrl_reg;
1057	u32 epsize_reg;
1058	u32 epsize;
1059	u32 ctrl;
1060	unsigned int length;
1061	unsigned int packets;
1062	unsigned int maxreq;
1063	unsigned int dma_reg;
1064
1065	if (index != 0) {
1066		if (hs_ep->req && !continuing) {
1067			dev_err(hsotg->dev, "%s: active request\n", __func__);
1068			WARN_ON(1);
1069			return;
1070		} else if (hs_ep->req != hs_req && continuing) {
1071			dev_err(hsotg->dev,
1072				"%s: continue different req\n", __func__);
1073			WARN_ON(1);
1074			return;
1075		}
1076	}
1077
1078	dma_reg = dir_in ? DIEPDMA(index) : DOEPDMA(index);
1079	epctrl_reg = dir_in ? DIEPCTL(index) : DOEPCTL(index);
1080	epsize_reg = dir_in ? DIEPTSIZ(index) : DOEPTSIZ(index);
1081
1082	dev_dbg(hsotg->dev, "%s: DxEPCTL=0x%08x, ep %d, dir %s\n",
1083		__func__, dwc2_readl(hsotg, epctrl_reg), index,
1084		hs_ep->dir_in ? "in" : "out");
1085
1086	/* If endpoint is stalled, we will restart request later */
1087	ctrl = dwc2_readl(hsotg, epctrl_reg);
1088
1089	if (index && ctrl & DXEPCTL_STALL) {
1090		dev_warn(hsotg->dev, "%s: ep%d is stalled\n", __func__, index);
1091		return;
1092	}
1093
1094	length = ureq->length - ureq->actual;
1095	dev_dbg(hsotg->dev, "ureq->length:%d ureq->actual:%d\n",
1096		ureq->length, ureq->actual);
1097
1098	if (!using_desc_dma(hsotg))
1099		maxreq = get_ep_limit(hs_ep);
1100	else
1101		maxreq = dwc2_gadget_get_chain_limit(hs_ep);
1102
1103	if (length > maxreq) {
1104		int round = maxreq % hs_ep->ep.maxpacket;
1105
1106		dev_dbg(hsotg->dev, "%s: length %d, max-req %d, r %d\n",
1107			__func__, length, maxreq, round);
1108
1109		/* round down to multiple of packets */
1110		if (round)
1111			maxreq -= round;
1112
1113		length = maxreq;
1114	}
1115
1116	if (length)
1117		packets = DIV_ROUND_UP(length, hs_ep->ep.maxpacket);
1118	else
1119		packets = 1;	/* send one packet if length is zero. */
1120
1121	if (dir_in && index != 0)
1122		if (hs_ep->isochronous)
1123			epsize = DXEPTSIZ_MC(packets);
1124		else
1125			epsize = DXEPTSIZ_MC(1);
1126	else
1127		epsize = 0;
1128
1129	/*
1130	 * zero length packet should be programmed on its own and should not
1131	 * be counted in DIEPTSIZ.PktCnt with other packets.
1132	 */
1133	if (dir_in && ureq->zero && !continuing) {
1134		/* Test if zlp is actually required. */
1135		if ((ureq->length >= hs_ep->ep.maxpacket) &&
1136		    !(ureq->length % hs_ep->ep.maxpacket))
1137			hs_ep->send_zlp = 1;
1138	}
1139
1140	epsize |= DXEPTSIZ_PKTCNT(packets);
1141	epsize |= DXEPTSIZ_XFERSIZE(length);
1142
1143	dev_dbg(hsotg->dev, "%s: %d@%d/%d, 0x%08x => 0x%08x\n",
1144		__func__, packets, length, ureq->length, epsize, epsize_reg);
1145
1146	/* store the request as the current one we're doing */
1147	hs_ep->req = hs_req;
1148
1149	if (using_desc_dma(hsotg)) {
1150		u32 offset = 0;
1151		u32 mps = hs_ep->ep.maxpacket;
1152
1153		/* Adjust length: EP0 - MPS, other OUT EPs - multiple of MPS */
1154		if (!dir_in) {
1155			if (!index)
1156				length = mps;
1157			else if (length % mps)
1158				length += (mps - (length % mps));
1159		}
1160
1161		if (continuing)
 
 
 
 
 
 
1162			offset = ureq->actual;
1163
1164		/* Fill DDMA chain entries */
1165		dwc2_gadget_config_nonisoc_xfer_ddma(hs_ep, ureq->dma + offset,
1166						     length);
1167
1168		/* write descriptor chain address to control register */
1169		dwc2_writel(hsotg, hs_ep->desc_list_dma, dma_reg);
1170
1171		dev_dbg(hsotg->dev, "%s: %08x pad => 0x%08x\n",
1172			__func__, (u32)hs_ep->desc_list_dma, dma_reg);
1173	} else {
1174		/* write size / packets */
1175		dwc2_writel(hsotg, epsize, epsize_reg);
1176
1177		if (using_dma(hsotg) && !continuing && (length != 0)) {
1178			/*
1179			 * write DMA address to control register, buffer
1180			 * already synced by dwc2_hsotg_ep_queue().
1181			 */
1182
1183			dwc2_writel(hsotg, ureq->dma, dma_reg);
1184
1185			dev_dbg(hsotg->dev, "%s: %pad => 0x%08x\n",
1186				__func__, &ureq->dma, dma_reg);
1187		}
1188	}
1189
1190	if (hs_ep->isochronous) {
1191		if (!dwc2_gadget_target_frame_elapsed(hs_ep)) {
1192			if (hs_ep->interval == 1) {
1193				if (hs_ep->target_frame & 0x1)
1194					ctrl |= DXEPCTL_SETODDFR;
1195				else
1196					ctrl |= DXEPCTL_SETEVENFR;
1197			}
1198			ctrl |= DXEPCTL_CNAK;
1199		} else {
1200			hs_req->req.frame_number = hs_ep->target_frame;
1201			hs_req->req.actual = 0;
1202			dwc2_hsotg_complete_request(hsotg, hs_ep, hs_req, -ENODATA);
1203			return;
1204		}
1205	}
1206
1207	ctrl |= DXEPCTL_EPENA;	/* ensure ep enabled */
1208
1209	dev_dbg(hsotg->dev, "ep0 state:%d\n", hsotg->ep0_state);
1210
1211	/* For Setup request do not clear NAK */
1212	if (!(index == 0 && hsotg->ep0_state == DWC2_EP0_SETUP))
1213		ctrl |= DXEPCTL_CNAK;	/* clear NAK set by core */
1214
1215	dev_dbg(hsotg->dev, "%s: DxEPCTL=0x%08x\n", __func__, ctrl);
1216	dwc2_writel(hsotg, ctrl, epctrl_reg);
1217
1218	/*
1219	 * set these, it seems that DMA support increments past the end
1220	 * of the packet buffer so we need to calculate the length from
1221	 * this information.
1222	 */
1223	hs_ep->size_loaded = length;
1224	hs_ep->last_load = ureq->actual;
1225
1226	if (dir_in && !using_dma(hsotg)) {
1227		/* set these anyway, we may need them for non-periodic in */
1228		hs_ep->fifo_load = 0;
1229
1230		dwc2_hsotg_write_fifo(hsotg, hs_ep, hs_req);
1231	}
1232
1233	/*
1234	 * Note, trying to clear the NAK here causes problems with transmit
1235	 * on the S3C6400 ending up with the TXFIFO becoming full.
1236	 */
1237
1238	/* check ep is enabled */
1239	if (!(dwc2_readl(hsotg, epctrl_reg) & DXEPCTL_EPENA))
1240		dev_dbg(hsotg->dev,
1241			"ep%d: failed to become enabled (DXEPCTL=0x%08x)?\n",
1242			 index, dwc2_readl(hsotg, epctrl_reg));
1243
1244	dev_dbg(hsotg->dev, "%s: DXEPCTL=0x%08x\n",
1245		__func__, dwc2_readl(hsotg, epctrl_reg));
1246
1247	/* enable ep interrupts */
1248	dwc2_hsotg_ctrl_epint(hsotg, hs_ep->index, hs_ep->dir_in, 1);
1249}
1250
1251/**
1252 * dwc2_hsotg_map_dma - map the DMA memory being used for the request
1253 * @hsotg: The device state.
1254 * @hs_ep: The endpoint the request is on.
1255 * @req: The request being processed.
1256 *
1257 * We've been asked to queue a request, so ensure that the memory buffer
1258 * is correctly setup for DMA. If we've been passed an extant DMA address
1259 * then ensure the buffer has been synced to memory. If our buffer has no
1260 * DMA memory, then we map the memory and mark our request to allow us to
1261 * cleanup on completion.
1262 */
1263static int dwc2_hsotg_map_dma(struct dwc2_hsotg *hsotg,
1264			      struct dwc2_hsotg_ep *hs_ep,
1265			     struct usb_request *req)
1266{
1267	int ret;
1268
1269	hs_ep->map_dir = hs_ep->dir_in;
1270	ret = usb_gadget_map_request(&hsotg->gadget, req, hs_ep->dir_in);
1271	if (ret)
1272		goto dma_error;
1273
1274	return 0;
1275
1276dma_error:
1277	dev_err(hsotg->dev, "%s: failed to map buffer %p, %d bytes\n",
1278		__func__, req->buf, req->length);
1279
1280	return -EIO;
1281}
1282
1283static int dwc2_hsotg_handle_unaligned_buf_start(struct dwc2_hsotg *hsotg,
1284						 struct dwc2_hsotg_ep *hs_ep,
1285						 struct dwc2_hsotg_req *hs_req)
1286{
1287	void *req_buf = hs_req->req.buf;
1288
1289	/* If dma is not being used or buffer is aligned */
1290	if (!using_dma(hsotg) || !((long)req_buf & 3))
1291		return 0;
1292
1293	WARN_ON(hs_req->saved_req_buf);
1294
1295	dev_dbg(hsotg->dev, "%s: %s: buf=%p length=%d\n", __func__,
1296		hs_ep->ep.name, req_buf, hs_req->req.length);
1297
1298	hs_req->req.buf = kmalloc(hs_req->req.length, GFP_ATOMIC);
1299	if (!hs_req->req.buf) {
1300		hs_req->req.buf = req_buf;
1301		dev_err(hsotg->dev,
1302			"%s: unable to allocate memory for bounce buffer\n",
1303			__func__);
1304		return -ENOMEM;
1305	}
1306
1307	/* Save actual buffer */
1308	hs_req->saved_req_buf = req_buf;
1309
1310	if (hs_ep->dir_in)
1311		memcpy(hs_req->req.buf, req_buf, hs_req->req.length);
1312	return 0;
1313}
1314
1315static void
1316dwc2_hsotg_handle_unaligned_buf_complete(struct dwc2_hsotg *hsotg,
1317					 struct dwc2_hsotg_ep *hs_ep,
1318					 struct dwc2_hsotg_req *hs_req)
1319{
1320	/* If dma is not being used or buffer was aligned */
1321	if (!using_dma(hsotg) || !hs_req->saved_req_buf)
1322		return;
1323
1324	dev_dbg(hsotg->dev, "%s: %s: status=%d actual-length=%d\n", __func__,
1325		hs_ep->ep.name, hs_req->req.status, hs_req->req.actual);
1326
1327	/* Copy data from bounce buffer on successful out transfer */
1328	if (!hs_ep->dir_in && !hs_req->req.status)
1329		memcpy(hs_req->saved_req_buf, hs_req->req.buf,
1330		       hs_req->req.actual);
1331
1332	/* Free bounce buffer */
1333	kfree(hs_req->req.buf);
1334
1335	hs_req->req.buf = hs_req->saved_req_buf;
1336	hs_req->saved_req_buf = NULL;
1337}
1338
1339/**
1340 * dwc2_gadget_target_frame_elapsed - Checks target frame
1341 * @hs_ep: The driver endpoint to check
1342 *
1343 * Returns 1 if targeted frame elapsed. If returned 1 then we need to drop
1344 * corresponding transfer.
1345 */
1346static bool dwc2_gadget_target_frame_elapsed(struct dwc2_hsotg_ep *hs_ep)
1347{
1348	struct dwc2_hsotg *hsotg = hs_ep->parent;
1349	u32 target_frame = hs_ep->target_frame;
1350	u32 current_frame = hsotg->frame_number;
1351	bool frame_overrun = hs_ep->frame_overrun;
1352	u16 limit = DSTS_SOFFN_LIMIT;
1353
1354	if (hsotg->gadget.speed != USB_SPEED_HIGH)
1355		limit >>= 3;
1356
1357	if (!frame_overrun && current_frame >= target_frame)
1358		return true;
1359
1360	if (frame_overrun && current_frame >= target_frame &&
1361	    ((current_frame - target_frame) < limit / 2))
1362		return true;
1363
1364	return false;
1365}
1366
1367/*
1368 * dwc2_gadget_set_ep0_desc_chain - Set EP's desc chain pointers
1369 * @hsotg: The driver state
1370 * @hs_ep: the ep descriptor chain is for
1371 *
1372 * Called to update EP0 structure's pointers depend on stage of
1373 * control transfer.
1374 */
1375static int dwc2_gadget_set_ep0_desc_chain(struct dwc2_hsotg *hsotg,
1376					  struct dwc2_hsotg_ep *hs_ep)
1377{
1378	switch (hsotg->ep0_state) {
1379	case DWC2_EP0_SETUP:
1380	case DWC2_EP0_STATUS_OUT:
1381		hs_ep->desc_list = hsotg->setup_desc[0];
1382		hs_ep->desc_list_dma = hsotg->setup_desc_dma[0];
1383		break;
1384	case DWC2_EP0_DATA_IN:
1385	case DWC2_EP0_STATUS_IN:
1386		hs_ep->desc_list = hsotg->ctrl_in_desc;
1387		hs_ep->desc_list_dma = hsotg->ctrl_in_desc_dma;
1388		break;
1389	case DWC2_EP0_DATA_OUT:
1390		hs_ep->desc_list = hsotg->ctrl_out_desc;
1391		hs_ep->desc_list_dma = hsotg->ctrl_out_desc_dma;
1392		break;
1393	default:
1394		dev_err(hsotg->dev, "invalid EP 0 state in queue %d\n",
1395			hsotg->ep0_state);
1396		return -EINVAL;
1397	}
1398
1399	return 0;
1400}
1401
1402static int dwc2_hsotg_ep_queue(struct usb_ep *ep, struct usb_request *req,
1403			       gfp_t gfp_flags)
1404{
1405	struct dwc2_hsotg_req *hs_req = our_req(req);
1406	struct dwc2_hsotg_ep *hs_ep = our_ep(ep);
1407	struct dwc2_hsotg *hs = hs_ep->parent;
1408	bool first;
1409	int ret;
1410	u32 maxsize = 0;
1411	u32 mask = 0;
1412
1413
1414	dev_dbg(hs->dev, "%s: req %p: %d@%p, noi=%d, zero=%d, snok=%d\n",
1415		ep->name, req, req->length, req->buf, req->no_interrupt,
1416		req->zero, req->short_not_ok);
1417
1418	/* Prevent new request submission when controller is suspended */
1419	if (hs->lx_state != DWC2_L0) {
1420		dev_dbg(hs->dev, "%s: submit request only in active state\n",
1421			__func__);
1422		return -EAGAIN;
1423	}
1424
1425	/* initialise status of the request */
1426	INIT_LIST_HEAD(&hs_req->queue);
1427	req->actual = 0;
1428	req->status = -EINPROGRESS;
1429
1430	/* Don't queue ISOC request if length greater than mps*mc */
1431	if (hs_ep->isochronous &&
1432	    req->length > (hs_ep->mc * hs_ep->ep.maxpacket)) {
1433		dev_err(hs->dev, "req length > maxpacket*mc\n");
1434		return -EINVAL;
1435	}
1436
1437	/* In DDMA mode for ISOC's don't queue request if length greater
1438	 * than descriptor limits.
1439	 */
1440	if (using_desc_dma(hs) && hs_ep->isochronous) {
1441		maxsize = dwc2_gadget_get_desc_params(hs_ep, &mask);
1442		if (hs_ep->dir_in && req->length > maxsize) {
1443			dev_err(hs->dev, "wrong length %d (maxsize=%d)\n",
1444				req->length, maxsize);
1445			return -EINVAL;
1446		}
1447
1448		if (!hs_ep->dir_in && req->length > hs_ep->ep.maxpacket) {
1449			dev_err(hs->dev, "ISOC OUT: wrong length %d (mps=%d)\n",
1450				req->length, hs_ep->ep.maxpacket);
1451			return -EINVAL;
1452		}
1453	}
1454
1455	ret = dwc2_hsotg_handle_unaligned_buf_start(hs, hs_ep, hs_req);
1456	if (ret)
1457		return ret;
1458
1459	/* if we're using DMA, sync the buffers as necessary */
1460	if (using_dma(hs)) {
1461		ret = dwc2_hsotg_map_dma(hs, hs_ep, req);
1462		if (ret)
1463			return ret;
1464	}
1465	/* If using descriptor DMA configure EP0 descriptor chain pointers */
1466	if (using_desc_dma(hs) && !hs_ep->index) {
1467		ret = dwc2_gadget_set_ep0_desc_chain(hs, hs_ep);
1468		if (ret)
1469			return ret;
1470	}
1471
1472	first = list_empty(&hs_ep->queue);
1473	list_add_tail(&hs_req->queue, &hs_ep->queue);
1474
1475	/*
1476	 * Handle DDMA isochronous transfers separately - just add new entry
1477	 * to the descriptor chain.
1478	 * Transfer will be started once SW gets either one of NAK or
1479	 * OutTknEpDis interrupts.
1480	 */
1481	if (using_desc_dma(hs) && hs_ep->isochronous) {
1482		if (hs_ep->target_frame != TARGET_FRAME_INITIAL) {
1483			dma_addr_t dma_addr = hs_req->req.dma;
1484
1485			if (hs_req->req.num_sgs) {
1486				WARN_ON(hs_req->req.num_sgs > 1);
1487				dma_addr = sg_dma_address(hs_req->req.sg);
1488			}
1489			dwc2_gadget_fill_isoc_desc(hs_ep, dma_addr,
1490						   hs_req->req.length);
1491		}
1492		return 0;
1493	}
1494
1495	/* Change EP direction if status phase request is after data out */
1496	if (!hs_ep->index && !req->length && !hs_ep->dir_in &&
1497	    hs->ep0_state == DWC2_EP0_DATA_OUT)
1498		hs_ep->dir_in = 1;
1499
1500	if (first) {
1501		if (!hs_ep->isochronous) {
1502			dwc2_hsotg_start_req(hs, hs_ep, hs_req, false);
1503			return 0;
1504		}
1505
1506		/* Update current frame number value. */
1507		hs->frame_number = dwc2_hsotg_read_frameno(hs);
1508		while (dwc2_gadget_target_frame_elapsed(hs_ep)) {
1509			dwc2_gadget_incr_frame_num(hs_ep);
1510			/* Update current frame number value once more as it
1511			 * changes here.
1512			 */
1513			hs->frame_number = dwc2_hsotg_read_frameno(hs);
1514		}
1515
1516		if (hs_ep->target_frame != TARGET_FRAME_INITIAL)
1517			dwc2_hsotg_start_req(hs, hs_ep, hs_req, false);
1518	}
1519	return 0;
1520}
1521
1522static int dwc2_hsotg_ep_queue_lock(struct usb_ep *ep, struct usb_request *req,
1523				    gfp_t gfp_flags)
1524{
1525	struct dwc2_hsotg_ep *hs_ep = our_ep(ep);
1526	struct dwc2_hsotg *hs = hs_ep->parent;
1527	unsigned long flags;
1528	int ret;
1529
1530	spin_lock_irqsave(&hs->lock, flags);
1531	ret = dwc2_hsotg_ep_queue(ep, req, gfp_flags);
1532	spin_unlock_irqrestore(&hs->lock, flags);
1533
1534	return ret;
1535}
1536
1537static void dwc2_hsotg_ep_free_request(struct usb_ep *ep,
1538				       struct usb_request *req)
1539{
1540	struct dwc2_hsotg_req *hs_req = our_req(req);
1541
1542	kfree(hs_req);
1543}
1544
1545/**
1546 * dwc2_hsotg_complete_oursetup - setup completion callback
1547 * @ep: The endpoint the request was on.
1548 * @req: The request completed.
1549 *
1550 * Called on completion of any requests the driver itself
1551 * submitted that need cleaning up.
1552 */
1553static void dwc2_hsotg_complete_oursetup(struct usb_ep *ep,
1554					 struct usb_request *req)
1555{
1556	struct dwc2_hsotg_ep *hs_ep = our_ep(ep);
1557	struct dwc2_hsotg *hsotg = hs_ep->parent;
1558
1559	dev_dbg(hsotg->dev, "%s: ep %p, req %p\n", __func__, ep, req);
1560
1561	dwc2_hsotg_ep_free_request(ep, req);
1562}
1563
1564/**
1565 * ep_from_windex - convert control wIndex value to endpoint
1566 * @hsotg: The driver state.
1567 * @windex: The control request wIndex field (in host order).
1568 *
1569 * Convert the given wIndex into a pointer to an driver endpoint
1570 * structure, or return NULL if it is not a valid endpoint.
1571 */
1572static struct dwc2_hsotg_ep *ep_from_windex(struct dwc2_hsotg *hsotg,
1573					    u32 windex)
1574{
 
1575	int dir = (windex & USB_DIR_IN) ? 1 : 0;
1576	int idx = windex & 0x7F;
1577
1578	if (windex >= 0x100)
1579		return NULL;
1580
1581	if (idx > hsotg->num_of_eps)
1582		return NULL;
1583
1584	return index_to_ep(hsotg, idx, dir);
 
 
 
 
 
1585}
1586
1587/**
1588 * dwc2_hsotg_set_test_mode - Enable usb Test Modes
1589 * @hsotg: The driver state.
1590 * @testmode: requested usb test mode
1591 * Enable usb Test Mode requested by the Host.
1592 */
1593int dwc2_hsotg_set_test_mode(struct dwc2_hsotg *hsotg, int testmode)
1594{
1595	int dctl = dwc2_readl(hsotg, DCTL);
1596
1597	dctl &= ~DCTL_TSTCTL_MASK;
1598	switch (testmode) {
1599	case USB_TEST_J:
1600	case USB_TEST_K:
1601	case USB_TEST_SE0_NAK:
1602	case USB_TEST_PACKET:
1603	case USB_TEST_FORCE_ENABLE:
1604		dctl |= testmode << DCTL_TSTCTL_SHIFT;
1605		break;
1606	default:
1607		return -EINVAL;
1608	}
1609	dwc2_writel(hsotg, dctl, DCTL);
1610	return 0;
1611}
1612
1613/**
1614 * dwc2_hsotg_send_reply - send reply to control request
1615 * @hsotg: The device state
1616 * @ep: Endpoint 0
1617 * @buff: Buffer for request
1618 * @length: Length of reply.
1619 *
1620 * Create a request and queue it on the given endpoint. This is useful as
1621 * an internal method of sending replies to certain control requests, etc.
1622 */
1623static int dwc2_hsotg_send_reply(struct dwc2_hsotg *hsotg,
1624				 struct dwc2_hsotg_ep *ep,
1625				void *buff,
1626				int length)
1627{
1628	struct usb_request *req;
1629	int ret;
1630
1631	dev_dbg(hsotg->dev, "%s: buff %p, len %d\n", __func__, buff, length);
1632
1633	req = dwc2_hsotg_ep_alloc_request(&ep->ep, GFP_ATOMIC);
1634	hsotg->ep0_reply = req;
1635	if (!req) {
1636		dev_warn(hsotg->dev, "%s: cannot alloc req\n", __func__);
1637		return -ENOMEM;
1638	}
1639
1640	req->buf = hsotg->ep0_buff;
1641	req->length = length;
1642	/*
1643	 * zero flag is for sending zlp in DATA IN stage. It has no impact on
1644	 * STATUS stage.
1645	 */
1646	req->zero = 0;
1647	req->complete = dwc2_hsotg_complete_oursetup;
1648
1649	if (length)
1650		memcpy(req->buf, buff, length);
1651
1652	ret = dwc2_hsotg_ep_queue(&ep->ep, req, GFP_ATOMIC);
1653	if (ret) {
1654		dev_warn(hsotg->dev, "%s: cannot queue req\n", __func__);
1655		return ret;
1656	}
1657
1658	return 0;
1659}
1660
1661/**
1662 * dwc2_hsotg_process_req_status - process request GET_STATUS
1663 * @hsotg: The device state
1664 * @ctrl: USB control request
1665 */
1666static int dwc2_hsotg_process_req_status(struct dwc2_hsotg *hsotg,
1667					 struct usb_ctrlrequest *ctrl)
1668{
1669	struct dwc2_hsotg_ep *ep0 = hsotg->eps_out[0];
1670	struct dwc2_hsotg_ep *ep;
1671	__le16 reply;
1672	u16 status;
1673	int ret;
1674
1675	dev_dbg(hsotg->dev, "%s: USB_REQ_GET_STATUS\n", __func__);
1676
1677	if (!ep0->dir_in) {
1678		dev_warn(hsotg->dev, "%s: direction out?\n", __func__);
1679		return -EINVAL;
1680	}
1681
1682	switch (ctrl->bRequestType & USB_RECIP_MASK) {
1683	case USB_RECIP_DEVICE:
1684		status = hsotg->gadget.is_selfpowered <<
1685			 USB_DEVICE_SELF_POWERED;
1686		status |= hsotg->remote_wakeup_allowed <<
1687			  USB_DEVICE_REMOTE_WAKEUP;
1688		reply = cpu_to_le16(status);
1689		break;
1690
1691	case USB_RECIP_INTERFACE:
1692		/* currently, the data result should be zero */
1693		reply = cpu_to_le16(0);
1694		break;
1695
1696	case USB_RECIP_ENDPOINT:
1697		ep = ep_from_windex(hsotg, le16_to_cpu(ctrl->wIndex));
1698		if (!ep)
1699			return -ENOENT;
1700
1701		reply = cpu_to_le16(ep->halted ? 1 : 0);
1702		break;
1703
1704	default:
1705		return 0;
1706	}
1707
1708	if (le16_to_cpu(ctrl->wLength) != 2)
1709		return -EINVAL;
1710
1711	ret = dwc2_hsotg_send_reply(hsotg, ep0, &reply, 2);
1712	if (ret) {
1713		dev_err(hsotg->dev, "%s: failed to send reply\n", __func__);
1714		return ret;
1715	}
1716
1717	return 1;
1718}
1719
1720static int dwc2_hsotg_ep_sethalt(struct usb_ep *ep, int value, bool now);
1721
1722/**
1723 * get_ep_head - return the first request on the endpoint
1724 * @hs_ep: The controller endpoint to get
1725 *
1726 * Get the first request on the endpoint.
1727 */
1728static struct dwc2_hsotg_req *get_ep_head(struct dwc2_hsotg_ep *hs_ep)
1729{
1730	return list_first_entry_or_null(&hs_ep->queue, struct dwc2_hsotg_req,
1731					queue);
1732}
1733
1734/**
1735 * dwc2_gadget_start_next_request - Starts next request from ep queue
1736 * @hs_ep: Endpoint structure
1737 *
1738 * If queue is empty and EP is ISOC-OUT - unmasks OUTTKNEPDIS which is masked
1739 * in its handler. Hence we need to unmask it here to be able to do
1740 * resynchronization.
1741 */
1742static void dwc2_gadget_start_next_request(struct dwc2_hsotg_ep *hs_ep)
1743{
 
1744	struct dwc2_hsotg *hsotg = hs_ep->parent;
1745	int dir_in = hs_ep->dir_in;
1746	struct dwc2_hsotg_req *hs_req;
 
1747
1748	if (!list_empty(&hs_ep->queue)) {
1749		hs_req = get_ep_head(hs_ep);
1750		dwc2_hsotg_start_req(hsotg, hs_ep, hs_req, false);
1751		return;
1752	}
1753	if (!hs_ep->isochronous)
1754		return;
1755
1756	if (dir_in) {
1757		dev_dbg(hsotg->dev, "%s: No more ISOC-IN requests\n",
1758			__func__);
1759	} else {
1760		dev_dbg(hsotg->dev, "%s: No more ISOC-OUT requests\n",
1761			__func__);
 
 
 
1762	}
1763}
1764
1765/**
1766 * dwc2_hsotg_process_req_feature - process request {SET,CLEAR}_FEATURE
1767 * @hsotg: The device state
1768 * @ctrl: USB control request
1769 */
1770static int dwc2_hsotg_process_req_feature(struct dwc2_hsotg *hsotg,
1771					  struct usb_ctrlrequest *ctrl)
1772{
1773	struct dwc2_hsotg_ep *ep0 = hsotg->eps_out[0];
1774	struct dwc2_hsotg_req *hs_req;
1775	bool set = (ctrl->bRequest == USB_REQ_SET_FEATURE);
1776	struct dwc2_hsotg_ep *ep;
1777	int ret;
1778	bool halted;
1779	u32 recip;
1780	u32 wValue;
1781	u32 wIndex;
1782
1783	dev_dbg(hsotg->dev, "%s: %s_FEATURE\n",
1784		__func__, set ? "SET" : "CLEAR");
1785
1786	wValue = le16_to_cpu(ctrl->wValue);
1787	wIndex = le16_to_cpu(ctrl->wIndex);
1788	recip = ctrl->bRequestType & USB_RECIP_MASK;
1789
1790	switch (recip) {
1791	case USB_RECIP_DEVICE:
1792		switch (wValue) {
1793		case USB_DEVICE_REMOTE_WAKEUP:
1794			if (set)
1795				hsotg->remote_wakeup_allowed = 1;
1796			else
1797				hsotg->remote_wakeup_allowed = 0;
1798			break;
1799
1800		case USB_DEVICE_TEST_MODE:
1801			if ((wIndex & 0xff) != 0)
1802				return -EINVAL;
1803			if (!set)
1804				return -EINVAL;
1805
1806			hsotg->test_mode = wIndex >> 8;
1807			break;
1808		default:
1809			return -ENOENT;
1810		}
1811
1812		ret = dwc2_hsotg_send_reply(hsotg, ep0, NULL, 0);
1813		if (ret) {
1814			dev_err(hsotg->dev,
1815				"%s: failed to send reply\n", __func__);
1816			return ret;
1817		}
1818		break;
1819
1820	case USB_RECIP_ENDPOINT:
1821		ep = ep_from_windex(hsotg, wIndex);
1822		if (!ep) {
1823			dev_dbg(hsotg->dev, "%s: no endpoint for 0x%04x\n",
1824				__func__, wIndex);
1825			return -ENOENT;
1826		}
1827
1828		switch (wValue) {
1829		case USB_ENDPOINT_HALT:
1830			halted = ep->halted;
1831
1832			if (!ep->wedged)
1833				dwc2_hsotg_ep_sethalt(&ep->ep, set, true);
1834
1835			ret = dwc2_hsotg_send_reply(hsotg, ep0, NULL, 0);
1836			if (ret) {
1837				dev_err(hsotg->dev,
1838					"%s: failed to send reply\n", __func__);
1839				return ret;
1840			}
1841
1842			/*
1843			 * we have to complete all requests for ep if it was
1844			 * halted, and the halt was cleared by CLEAR_FEATURE
1845			 */
1846
1847			if (!set && halted) {
1848				/*
1849				 * If we have request in progress,
1850				 * then complete it
1851				 */
1852				if (ep->req) {
1853					hs_req = ep->req;
1854					ep->req = NULL;
1855					list_del_init(&hs_req->queue);
1856					if (hs_req->req.complete) {
1857						spin_unlock(&hsotg->lock);
1858						usb_gadget_giveback_request(
1859							&ep->ep, &hs_req->req);
1860						spin_lock(&hsotg->lock);
1861					}
1862				}
1863
1864				/* If we have pending request, then start it */
1865				if (!ep->req)
1866					dwc2_gadget_start_next_request(ep);
1867			}
1868
1869			break;
1870
1871		default:
1872			return -ENOENT;
1873		}
1874		break;
1875	default:
1876		return -ENOENT;
1877	}
1878	return 1;
1879}
1880
1881static void dwc2_hsotg_enqueue_setup(struct dwc2_hsotg *hsotg);
1882
1883/**
1884 * dwc2_hsotg_stall_ep0 - stall ep0
1885 * @hsotg: The device state
1886 *
1887 * Set stall for ep0 as response for setup request.
1888 */
1889static void dwc2_hsotg_stall_ep0(struct dwc2_hsotg *hsotg)
1890{
1891	struct dwc2_hsotg_ep *ep0 = hsotg->eps_out[0];
1892	u32 reg;
1893	u32 ctrl;
1894
1895	dev_dbg(hsotg->dev, "ep0 stall (dir=%d)\n", ep0->dir_in);
1896	reg = (ep0->dir_in) ? DIEPCTL0 : DOEPCTL0;
1897
1898	/*
1899	 * DxEPCTL_Stall will be cleared by EP once it has
1900	 * taken effect, so no need to clear later.
1901	 */
1902
1903	ctrl = dwc2_readl(hsotg, reg);
1904	ctrl |= DXEPCTL_STALL;
1905	ctrl |= DXEPCTL_CNAK;
1906	dwc2_writel(hsotg, ctrl, reg);
1907
1908	dev_dbg(hsotg->dev,
1909		"written DXEPCTL=0x%08x to %08x (DXEPCTL=0x%08x)\n",
1910		ctrl, reg, dwc2_readl(hsotg, reg));
1911
1912	 /*
1913	  * complete won't be called, so we enqueue
1914	  * setup request here
1915	  */
1916	 dwc2_hsotg_enqueue_setup(hsotg);
1917}
1918
1919/**
1920 * dwc2_hsotg_process_control - process a control request
1921 * @hsotg: The device state
1922 * @ctrl: The control request received
1923 *
1924 * The controller has received the SETUP phase of a control request, and
1925 * needs to work out what to do next (and whether to pass it on to the
1926 * gadget driver).
1927 */
1928static void dwc2_hsotg_process_control(struct dwc2_hsotg *hsotg,
1929				       struct usb_ctrlrequest *ctrl)
1930{
1931	struct dwc2_hsotg_ep *ep0 = hsotg->eps_out[0];
1932	int ret = 0;
1933	u32 dcfg;
1934
1935	dev_dbg(hsotg->dev,
1936		"ctrl Type=%02x, Req=%02x, V=%04x, I=%04x, L=%04x\n",
1937		ctrl->bRequestType, ctrl->bRequest, ctrl->wValue,
1938		ctrl->wIndex, ctrl->wLength);
1939
1940	if (ctrl->wLength == 0) {
1941		ep0->dir_in = 1;
1942		hsotg->ep0_state = DWC2_EP0_STATUS_IN;
1943	} else if (ctrl->bRequestType & USB_DIR_IN) {
1944		ep0->dir_in = 1;
1945		hsotg->ep0_state = DWC2_EP0_DATA_IN;
1946	} else {
1947		ep0->dir_in = 0;
1948		hsotg->ep0_state = DWC2_EP0_DATA_OUT;
1949	}
1950
1951	if ((ctrl->bRequestType & USB_TYPE_MASK) == USB_TYPE_STANDARD) {
1952		switch (ctrl->bRequest) {
1953		case USB_REQ_SET_ADDRESS:
1954			hsotg->connected = 1;
1955			dcfg = dwc2_readl(hsotg, DCFG);
1956			dcfg &= ~DCFG_DEVADDR_MASK;
1957			dcfg |= (le16_to_cpu(ctrl->wValue) <<
1958				 DCFG_DEVADDR_SHIFT) & DCFG_DEVADDR_MASK;
1959			dwc2_writel(hsotg, dcfg, DCFG);
1960
1961			dev_info(hsotg->dev, "new address %d\n", ctrl->wValue);
1962
1963			ret = dwc2_hsotg_send_reply(hsotg, ep0, NULL, 0);
1964			return;
1965
1966		case USB_REQ_GET_STATUS:
1967			ret = dwc2_hsotg_process_req_status(hsotg, ctrl);
1968			break;
1969
1970		case USB_REQ_CLEAR_FEATURE:
1971		case USB_REQ_SET_FEATURE:
1972			ret = dwc2_hsotg_process_req_feature(hsotg, ctrl);
1973			break;
1974		}
1975	}
1976
1977	/* as a fallback, try delivering it to the driver to deal with */
1978
1979	if (ret == 0 && hsotg->driver) {
1980		spin_unlock(&hsotg->lock);
1981		ret = hsotg->driver->setup(&hsotg->gadget, ctrl);
1982		spin_lock(&hsotg->lock);
1983		if (ret < 0)
1984			dev_dbg(hsotg->dev, "driver->setup() ret %d\n", ret);
1985	}
1986
1987	hsotg->delayed_status = false;
1988	if (ret == USB_GADGET_DELAYED_STATUS)
1989		hsotg->delayed_status = true;
1990
1991	/*
1992	 * the request is either unhandlable, or is not formatted correctly
1993	 * so respond with a STALL for the status stage to indicate failure.
1994	 */
1995
1996	if (ret < 0)
1997		dwc2_hsotg_stall_ep0(hsotg);
1998}
1999
2000/**
2001 * dwc2_hsotg_complete_setup - completion of a setup transfer
2002 * @ep: The endpoint the request was on.
2003 * @req: The request completed.
2004 *
2005 * Called on completion of any requests the driver itself submitted for
2006 * EP0 setup packets
2007 */
2008static void dwc2_hsotg_complete_setup(struct usb_ep *ep,
2009				      struct usb_request *req)
2010{
2011	struct dwc2_hsotg_ep *hs_ep = our_ep(ep);
2012	struct dwc2_hsotg *hsotg = hs_ep->parent;
2013
2014	if (req->status < 0) {
2015		dev_dbg(hsotg->dev, "%s: failed %d\n", __func__, req->status);
2016		return;
2017	}
2018
2019	spin_lock(&hsotg->lock);
2020	if (req->actual == 0)
2021		dwc2_hsotg_enqueue_setup(hsotg);
2022	else
2023		dwc2_hsotg_process_control(hsotg, req->buf);
2024	spin_unlock(&hsotg->lock);
2025}
2026
2027/**
2028 * dwc2_hsotg_enqueue_setup - start a request for EP0 packets
2029 * @hsotg: The device state.
2030 *
2031 * Enqueue a request on EP0 if necessary to received any SETUP packets
2032 * received from the host.
2033 */
2034static void dwc2_hsotg_enqueue_setup(struct dwc2_hsotg *hsotg)
2035{
2036	struct usb_request *req = hsotg->ctrl_req;
2037	struct dwc2_hsotg_req *hs_req = our_req(req);
2038	int ret;
2039
2040	dev_dbg(hsotg->dev, "%s: queueing setup request\n", __func__);
2041
2042	req->zero = 0;
2043	req->length = 8;
2044	req->buf = hsotg->ctrl_buff;
2045	req->complete = dwc2_hsotg_complete_setup;
2046
2047	if (!list_empty(&hs_req->queue)) {
2048		dev_dbg(hsotg->dev, "%s already queued???\n", __func__);
2049		return;
2050	}
2051
2052	hsotg->eps_out[0]->dir_in = 0;
2053	hsotg->eps_out[0]->send_zlp = 0;
2054	hsotg->ep0_state = DWC2_EP0_SETUP;
2055
2056	ret = dwc2_hsotg_ep_queue(&hsotg->eps_out[0]->ep, req, GFP_ATOMIC);
2057	if (ret < 0) {
2058		dev_err(hsotg->dev, "%s: failed queue (%d)\n", __func__, ret);
2059		/*
2060		 * Don't think there's much we can do other than watch the
2061		 * driver fail.
2062		 */
2063	}
2064}
2065
2066static void dwc2_hsotg_program_zlp(struct dwc2_hsotg *hsotg,
2067				   struct dwc2_hsotg_ep *hs_ep)
2068{
2069	u32 ctrl;
2070	u8 index = hs_ep->index;
2071	u32 epctl_reg = hs_ep->dir_in ? DIEPCTL(index) : DOEPCTL(index);
2072	u32 epsiz_reg = hs_ep->dir_in ? DIEPTSIZ(index) : DOEPTSIZ(index);
2073
2074	if (hs_ep->dir_in)
2075		dev_dbg(hsotg->dev, "Sending zero-length packet on ep%d\n",
2076			index);
2077	else
2078		dev_dbg(hsotg->dev, "Receiving zero-length packet on ep%d\n",
2079			index);
2080	if (using_desc_dma(hsotg)) {
2081		/* Not specific buffer needed for ep0 ZLP */
2082		dma_addr_t dma = hs_ep->desc_list_dma;
2083
2084		if (!index)
2085			dwc2_gadget_set_ep0_desc_chain(hsotg, hs_ep);
2086
2087		dwc2_gadget_config_nonisoc_xfer_ddma(hs_ep, dma, 0);
2088	} else {
2089		dwc2_writel(hsotg, DXEPTSIZ_MC(1) | DXEPTSIZ_PKTCNT(1) |
2090			    DXEPTSIZ_XFERSIZE(0),
2091			    epsiz_reg);
2092	}
2093
2094	ctrl = dwc2_readl(hsotg, epctl_reg);
2095	ctrl |= DXEPCTL_CNAK;  /* clear NAK set by core */
2096	ctrl |= DXEPCTL_EPENA; /* ensure ep enabled */
2097	ctrl |= DXEPCTL_USBACTEP;
2098	dwc2_writel(hsotg, ctrl, epctl_reg);
2099}
2100
2101/**
2102 * dwc2_hsotg_complete_request - complete a request given to us
2103 * @hsotg: The device state.
2104 * @hs_ep: The endpoint the request was on.
2105 * @hs_req: The request to complete.
2106 * @result: The result code (0 => Ok, otherwise errno)
2107 *
2108 * The given request has finished, so call the necessary completion
2109 * if it has one and then look to see if we can start a new request
2110 * on the endpoint.
2111 *
2112 * Note, expects the ep to already be locked as appropriate.
2113 */
2114static void dwc2_hsotg_complete_request(struct dwc2_hsotg *hsotg,
2115					struct dwc2_hsotg_ep *hs_ep,
2116				       struct dwc2_hsotg_req *hs_req,
2117				       int result)
2118{
2119	if (!hs_req) {
2120		dev_dbg(hsotg->dev, "%s: nothing to complete?\n", __func__);
2121		return;
2122	}
2123
2124	dev_dbg(hsotg->dev, "complete: ep %p %s, req %p, %d => %p\n",
2125		hs_ep, hs_ep->ep.name, hs_req, result, hs_req->req.complete);
2126
2127	/*
2128	 * only replace the status if we've not already set an error
2129	 * from a previous transaction
2130	 */
2131
2132	if (hs_req->req.status == -EINPROGRESS)
2133		hs_req->req.status = result;
2134
2135	if (using_dma(hsotg))
2136		dwc2_hsotg_unmap_dma(hsotg, hs_ep, hs_req);
2137
2138	dwc2_hsotg_handle_unaligned_buf_complete(hsotg, hs_ep, hs_req);
2139
2140	hs_ep->req = NULL;
2141	list_del_init(&hs_req->queue);
2142
2143	/*
2144	 * call the complete request with the locks off, just in case the
2145	 * request tries to queue more work for this endpoint.
2146	 */
2147
2148	if (hs_req->req.complete) {
2149		spin_unlock(&hsotg->lock);
2150		usb_gadget_giveback_request(&hs_ep->ep, &hs_req->req);
2151		spin_lock(&hsotg->lock);
2152	}
2153
2154	/* In DDMA don't need to proceed to starting of next ISOC request */
2155	if (using_desc_dma(hsotg) && hs_ep->isochronous)
2156		return;
2157
2158	/*
2159	 * Look to see if there is anything else to do. Note, the completion
2160	 * of the previous request may have caused a new request to be started
2161	 * so be careful when doing this.
2162	 */
2163
2164	if (!hs_ep->req && result >= 0)
2165		dwc2_gadget_start_next_request(hs_ep);
2166}
2167
2168/*
2169 * dwc2_gadget_complete_isoc_request_ddma - complete an isoc request in DDMA
2170 * @hs_ep: The endpoint the request was on.
2171 *
2172 * Get first request from the ep queue, determine descriptor on which complete
2173 * happened. SW discovers which descriptor currently in use by HW, adjusts
2174 * dma_address and calculates index of completed descriptor based on the value
2175 * of DEPDMA register. Update actual length of request, giveback to gadget.
2176 */
2177static void dwc2_gadget_complete_isoc_request_ddma(struct dwc2_hsotg_ep *hs_ep)
2178{
2179	struct dwc2_hsotg *hsotg = hs_ep->parent;
2180	struct dwc2_hsotg_req *hs_req;
2181	struct usb_request *ureq;
2182	u32 desc_sts;
2183	u32 mask;
2184
2185	desc_sts = hs_ep->desc_list[hs_ep->compl_desc].status;
2186
2187	/* Process only descriptors with buffer status set to DMA done */
2188	while ((desc_sts & DEV_DMA_BUFF_STS_MASK) >>
2189		DEV_DMA_BUFF_STS_SHIFT == DEV_DMA_BUFF_STS_DMADONE) {
2190
2191		hs_req = get_ep_head(hs_ep);
2192		if (!hs_req) {
2193			dev_warn(hsotg->dev, "%s: ISOC EP queue empty\n", __func__);
2194			return;
2195		}
2196		ureq = &hs_req->req;
2197
2198		/* Check completion status */
2199		if ((desc_sts & DEV_DMA_STS_MASK) >> DEV_DMA_STS_SHIFT ==
2200			DEV_DMA_STS_SUCC) {
2201			mask = hs_ep->dir_in ? DEV_DMA_ISOC_TX_NBYTES_MASK :
2202				DEV_DMA_ISOC_RX_NBYTES_MASK;
2203			ureq->actual = ureq->length - ((desc_sts & mask) >>
2204				DEV_DMA_ISOC_NBYTES_SHIFT);
2205
2206			/* Adjust actual len for ISOC Out if len is
2207			 * not align of 4
2208			 */
2209			if (!hs_ep->dir_in && ureq->length & 0x3)
2210				ureq->actual += 4 - (ureq->length & 0x3);
2211
2212			/* Set actual frame number for completed transfers */
2213			ureq->frame_number =
2214				(desc_sts & DEV_DMA_ISOC_FRNUM_MASK) >>
2215				DEV_DMA_ISOC_FRNUM_SHIFT;
2216		}
2217
2218		dwc2_hsotg_complete_request(hsotg, hs_ep, hs_req, 0);
2219
2220		hs_ep->compl_desc++;
2221		if (hs_ep->compl_desc > (MAX_DMA_DESC_NUM_HS_ISOC - 1))
2222			hs_ep->compl_desc = 0;
2223		desc_sts = hs_ep->desc_list[hs_ep->compl_desc].status;
2224	}
2225}
2226
2227/*
2228 * dwc2_gadget_handle_isoc_bna - handle BNA interrupt for ISOC.
2229 * @hs_ep: The isochronous endpoint.
2230 *
2231 * If EP ISOC OUT then need to flush RX FIFO to remove source of BNA
2232 * interrupt. Reset target frame and next_desc to allow to start
2233 * ISOC's on NAK interrupt for IN direction or on OUTTKNEPDIS
2234 * interrupt for OUT direction.
2235 */
2236static void dwc2_gadget_handle_isoc_bna(struct dwc2_hsotg_ep *hs_ep)
2237{
2238	struct dwc2_hsotg *hsotg = hs_ep->parent;
2239
2240	if (!hs_ep->dir_in)
2241		dwc2_flush_rx_fifo(hsotg);
2242	dwc2_hsotg_complete_request(hsotg, hs_ep, get_ep_head(hs_ep), 0);
2243
2244	hs_ep->target_frame = TARGET_FRAME_INITIAL;
2245	hs_ep->next_desc = 0;
2246	hs_ep->compl_desc = 0;
2247}
2248
2249/**
2250 * dwc2_hsotg_rx_data - receive data from the FIFO for an endpoint
2251 * @hsotg: The device state.
2252 * @ep_idx: The endpoint index for the data
2253 * @size: The size of data in the fifo, in bytes
2254 *
2255 * The FIFO status shows there is data to read from the FIFO for a given
2256 * endpoint, so sort out whether we need to read the data into a request
2257 * that has been made for that endpoint.
2258 */
2259static void dwc2_hsotg_rx_data(struct dwc2_hsotg *hsotg, int ep_idx, int size)
2260{
2261	struct dwc2_hsotg_ep *hs_ep = hsotg->eps_out[ep_idx];
2262	struct dwc2_hsotg_req *hs_req = hs_ep->req;
2263	int to_read;
2264	int max_req;
2265	int read_ptr;
2266
2267	if (!hs_req) {
2268		u32 epctl = dwc2_readl(hsotg, DOEPCTL(ep_idx));
2269		int ptr;
2270
2271		dev_dbg(hsotg->dev,
2272			"%s: FIFO %d bytes on ep%d but no req (DXEPCTl=0x%08x)\n",
2273			 __func__, size, ep_idx, epctl);
2274
2275		/* dump the data from the FIFO, we've nothing we can do */
2276		for (ptr = 0; ptr < size; ptr += 4)
2277			(void)dwc2_readl(hsotg, EPFIFO(ep_idx));
2278
2279		return;
2280	}
2281
2282	to_read = size;
2283	read_ptr = hs_req->req.actual;
2284	max_req = hs_req->req.length - read_ptr;
2285
2286	dev_dbg(hsotg->dev, "%s: read %d/%d, done %d/%d\n",
2287		__func__, to_read, max_req, read_ptr, hs_req->req.length);
2288
2289	if (to_read > max_req) {
2290		/*
2291		 * more data appeared than we where willing
2292		 * to deal with in this request.
2293		 */
2294
2295		/* currently we don't deal this */
2296		WARN_ON_ONCE(1);
2297	}
2298
2299	hs_ep->total_data += to_read;
2300	hs_req->req.actual += to_read;
2301	to_read = DIV_ROUND_UP(to_read, 4);
2302
2303	/*
2304	 * note, we might over-write the buffer end by 3 bytes depending on
2305	 * alignment of the data.
2306	 */
2307	dwc2_readl_rep(hsotg, EPFIFO(ep_idx),
2308		       hs_req->req.buf + read_ptr, to_read);
2309}
2310
2311/**
2312 * dwc2_hsotg_ep0_zlp - send/receive zero-length packet on control endpoint
2313 * @hsotg: The device instance
2314 * @dir_in: If IN zlp
2315 *
2316 * Generate a zero-length IN packet request for terminating a SETUP
2317 * transaction.
2318 *
2319 * Note, since we don't write any data to the TxFIFO, then it is
2320 * currently believed that we do not need to wait for any space in
2321 * the TxFIFO.
2322 */
2323static void dwc2_hsotg_ep0_zlp(struct dwc2_hsotg *hsotg, bool dir_in)
2324{
2325	/* eps_out[0] is used in both directions */
2326	hsotg->eps_out[0]->dir_in = dir_in;
2327	hsotg->ep0_state = dir_in ? DWC2_EP0_STATUS_IN : DWC2_EP0_STATUS_OUT;
2328
2329	dwc2_hsotg_program_zlp(hsotg, hsotg->eps_out[0]);
2330}
2331
 
 
 
 
 
 
 
 
 
 
 
 
 
2332/*
2333 * dwc2_gadget_get_xfersize_ddma - get transferred bytes amount from desc
2334 * @hs_ep - The endpoint on which transfer went
2335 *
2336 * Iterate over endpoints descriptor chain and get info on bytes remained
2337 * in DMA descriptors after transfer has completed. Used for non isoc EPs.
2338 */
2339static unsigned int dwc2_gadget_get_xfersize_ddma(struct dwc2_hsotg_ep *hs_ep)
2340{
2341	const struct usb_endpoint_descriptor *ep_desc = hs_ep->ep.desc;
2342	struct dwc2_hsotg *hsotg = hs_ep->parent;
2343	unsigned int bytes_rem = 0;
2344	unsigned int bytes_rem_correction = 0;
2345	struct dwc2_dma_desc *desc = hs_ep->desc_list;
2346	int i;
2347	u32 status;
2348	u32 mps = hs_ep->ep.maxpacket;
2349	int dir_in = hs_ep->dir_in;
2350
2351	if (!desc)
2352		return -EINVAL;
2353
2354	/* Interrupt OUT EP with mps not multiple of 4 */
2355	if (hs_ep->index)
2356		if (usb_endpoint_xfer_int(ep_desc) && !dir_in && (mps % 4))
2357			bytes_rem_correction = 4 - (mps % 4);
2358
2359	for (i = 0; i < hs_ep->desc_count; ++i) {
2360		status = desc->status;
2361		bytes_rem += status & DEV_DMA_NBYTES_MASK;
2362		bytes_rem -= bytes_rem_correction;
2363
2364		if (status & DEV_DMA_STS_MASK)
2365			dev_err(hsotg->dev, "descriptor %d closed with %x\n",
2366				i, status & DEV_DMA_STS_MASK);
2367
2368		if (status & DEV_DMA_L)
2369			break;
2370
2371		desc++;
2372	}
2373
2374	return bytes_rem;
2375}
2376
2377/**
2378 * dwc2_hsotg_handle_outdone - handle receiving OutDone/SetupDone from RXFIFO
2379 * @hsotg: The device instance
2380 * @epnum: The endpoint received from
2381 *
2382 * The RXFIFO has delivered an OutDone event, which means that the data
2383 * transfer for an OUT endpoint has been completed, either by a short
2384 * packet or by the finish of a transfer.
2385 */
2386static void dwc2_hsotg_handle_outdone(struct dwc2_hsotg *hsotg, int epnum)
2387{
2388	u32 epsize = dwc2_readl(hsotg, DOEPTSIZ(epnum));
2389	struct dwc2_hsotg_ep *hs_ep = hsotg->eps_out[epnum];
2390	struct dwc2_hsotg_req *hs_req = hs_ep->req;
2391	struct usb_request *req = &hs_req->req;
2392	unsigned int size_left = DXEPTSIZ_XFERSIZE_GET(epsize);
2393	int result = 0;
2394
2395	if (!hs_req) {
2396		dev_dbg(hsotg->dev, "%s: no request active\n", __func__);
2397		return;
2398	}
2399
2400	if (epnum == 0 && hsotg->ep0_state == DWC2_EP0_STATUS_OUT) {
2401		dev_dbg(hsotg->dev, "zlp packet received\n");
2402		dwc2_hsotg_complete_request(hsotg, hs_ep, hs_req, 0);
2403		dwc2_hsotg_enqueue_setup(hsotg);
2404		return;
2405	}
2406
2407	if (using_desc_dma(hsotg))
2408		size_left = dwc2_gadget_get_xfersize_ddma(hs_ep);
2409
2410	if (using_dma(hsotg)) {
2411		unsigned int size_done;
2412
2413		/*
2414		 * Calculate the size of the transfer by checking how much
2415		 * is left in the endpoint size register and then working it
2416		 * out from the amount we loaded for the transfer.
2417		 *
2418		 * We need to do this as DMA pointers are always 32bit aligned
2419		 * so may overshoot/undershoot the transfer.
2420		 */
2421
2422		size_done = hs_ep->size_loaded - size_left;
2423		size_done += hs_ep->last_load;
2424
2425		req->actual = size_done;
2426	}
2427
2428	/* if there is more request to do, schedule new transfer */
2429	if (req->actual < req->length && size_left == 0) {
2430		dwc2_hsotg_start_req(hsotg, hs_ep, hs_req, true);
2431		return;
2432	}
2433
2434	if (req->actual < req->length && req->short_not_ok) {
2435		dev_dbg(hsotg->dev, "%s: got %d/%d (short not ok) => error\n",
2436			__func__, req->actual, req->length);
2437
2438		/*
2439		 * todo - what should we return here? there's no one else
2440		 * even bothering to check the status.
2441		 */
2442	}
2443
2444	/* DDMA IN status phase will start from StsPhseRcvd interrupt */
2445	if (!using_desc_dma(hsotg) && epnum == 0 &&
2446	    hsotg->ep0_state == DWC2_EP0_DATA_OUT) {
2447		/* Move to STATUS IN */
2448		if (!hsotg->delayed_status)
2449			dwc2_hsotg_ep0_zlp(hsotg, true);
2450	}
2451
2452	/* Set actual frame number for completed transfers */
2453	if (!using_desc_dma(hsotg) && hs_ep->isochronous) {
2454		req->frame_number = hs_ep->target_frame;
2455		dwc2_gadget_incr_frame_num(hs_ep);
 
 
 
 
 
2456	}
2457
 
 
 
 
2458	dwc2_hsotg_complete_request(hsotg, hs_ep, hs_req, result);
2459}
2460
2461/**
2462 * dwc2_hsotg_handle_rx - RX FIFO has data
2463 * @hsotg: The device instance
2464 *
2465 * The IRQ handler has detected that the RX FIFO has some data in it
2466 * that requires processing, so find out what is in there and do the
2467 * appropriate read.
2468 *
2469 * The RXFIFO is a true FIFO, the packets coming out are still in packet
2470 * chunks, so if you have x packets received on an endpoint you'll get x
2471 * FIFO events delivered, each with a packet's worth of data in it.
2472 *
2473 * When using DMA, we should not be processing events from the RXFIFO
2474 * as the actual data should be sent to the memory directly and we turn
2475 * on the completion interrupts to get notifications of transfer completion.
2476 */
2477static void dwc2_hsotg_handle_rx(struct dwc2_hsotg *hsotg)
2478{
2479	u32 grxstsr = dwc2_readl(hsotg, GRXSTSP);
2480	u32 epnum, status, size;
2481
2482	WARN_ON(using_dma(hsotg));
2483
2484	epnum = grxstsr & GRXSTS_EPNUM_MASK;
2485	status = grxstsr & GRXSTS_PKTSTS_MASK;
2486
2487	size = grxstsr & GRXSTS_BYTECNT_MASK;
2488	size >>= GRXSTS_BYTECNT_SHIFT;
2489
2490	dev_dbg(hsotg->dev, "%s: GRXSTSP=0x%08x (%d@%d)\n",
2491		__func__, grxstsr, size, epnum);
2492
2493	switch ((status & GRXSTS_PKTSTS_MASK) >> GRXSTS_PKTSTS_SHIFT) {
2494	case GRXSTS_PKTSTS_GLOBALOUTNAK:
2495		dev_dbg(hsotg->dev, "GLOBALOUTNAK\n");
2496		break;
2497
2498	case GRXSTS_PKTSTS_OUTDONE:
2499		dev_dbg(hsotg->dev, "OutDone (Frame=0x%08x)\n",
2500			dwc2_hsotg_read_frameno(hsotg));
2501
2502		if (!using_dma(hsotg))
2503			dwc2_hsotg_handle_outdone(hsotg, epnum);
2504		break;
2505
2506	case GRXSTS_PKTSTS_SETUPDONE:
2507		dev_dbg(hsotg->dev,
2508			"SetupDone (Frame=0x%08x, DOPEPCTL=0x%08x)\n",
2509			dwc2_hsotg_read_frameno(hsotg),
2510			dwc2_readl(hsotg, DOEPCTL(0)));
2511		/*
2512		 * Call dwc2_hsotg_handle_outdone here if it was not called from
2513		 * GRXSTS_PKTSTS_OUTDONE. That is, if the core didn't
2514		 * generate GRXSTS_PKTSTS_OUTDONE for setup packet.
2515		 */
2516		if (hsotg->ep0_state == DWC2_EP0_SETUP)
2517			dwc2_hsotg_handle_outdone(hsotg, epnum);
2518		break;
2519
2520	case GRXSTS_PKTSTS_OUTRX:
2521		dwc2_hsotg_rx_data(hsotg, epnum, size);
2522		break;
2523
2524	case GRXSTS_PKTSTS_SETUPRX:
2525		dev_dbg(hsotg->dev,
2526			"SetupRX (Frame=0x%08x, DOPEPCTL=0x%08x)\n",
2527			dwc2_hsotg_read_frameno(hsotg),
2528			dwc2_readl(hsotg, DOEPCTL(0)));
2529
2530		WARN_ON(hsotg->ep0_state != DWC2_EP0_SETUP);
2531
2532		dwc2_hsotg_rx_data(hsotg, epnum, size);
2533		break;
2534
2535	default:
2536		dev_warn(hsotg->dev, "%s: unknown status %08x\n",
2537			 __func__, grxstsr);
2538
2539		dwc2_hsotg_dump(hsotg);
2540		break;
2541	}
2542}
2543
2544/**
2545 * dwc2_hsotg_ep0_mps - turn max packet size into register setting
2546 * @mps: The maximum packet size in bytes.
2547 */
2548static u32 dwc2_hsotg_ep0_mps(unsigned int mps)
2549{
2550	switch (mps) {
2551	case 64:
2552		return D0EPCTL_MPS_64;
2553	case 32:
2554		return D0EPCTL_MPS_32;
2555	case 16:
2556		return D0EPCTL_MPS_16;
2557	case 8:
2558		return D0EPCTL_MPS_8;
2559	}
2560
2561	/* bad max packet size, warn and return invalid result */
2562	WARN_ON(1);
2563	return (u32)-1;
2564}
2565
2566/**
2567 * dwc2_hsotg_set_ep_maxpacket - set endpoint's max-packet field
2568 * @hsotg: The driver state.
2569 * @ep: The index number of the endpoint
2570 * @mps: The maximum packet size in bytes
2571 * @mc: The multicount value
2572 * @dir_in: True if direction is in.
2573 *
2574 * Configure the maximum packet size for the given endpoint, updating
2575 * the hardware control registers to reflect this.
2576 */
2577static void dwc2_hsotg_set_ep_maxpacket(struct dwc2_hsotg *hsotg,
2578					unsigned int ep, unsigned int mps,
2579					unsigned int mc, unsigned int dir_in)
2580{
2581	struct dwc2_hsotg_ep *hs_ep;
2582	u32 reg;
2583
2584	hs_ep = index_to_ep(hsotg, ep, dir_in);
2585	if (!hs_ep)
2586		return;
2587
2588	if (ep == 0) {
2589		u32 mps_bytes = mps;
2590
2591		/* EP0 is a special case */
2592		mps = dwc2_hsotg_ep0_mps(mps_bytes);
2593		if (mps > 3)
2594			goto bad_mps;
2595		hs_ep->ep.maxpacket = mps_bytes;
2596		hs_ep->mc = 1;
2597	} else {
2598		if (mps > 1024)
2599			goto bad_mps;
2600		hs_ep->mc = mc;
2601		if (mc > 3)
2602			goto bad_mps;
2603		hs_ep->ep.maxpacket = mps;
2604	}
2605
2606	if (dir_in) {
2607		reg = dwc2_readl(hsotg, DIEPCTL(ep));
2608		reg &= ~DXEPCTL_MPS_MASK;
2609		reg |= mps;
2610		dwc2_writel(hsotg, reg, DIEPCTL(ep));
2611	} else {
2612		reg = dwc2_readl(hsotg, DOEPCTL(ep));
2613		reg &= ~DXEPCTL_MPS_MASK;
2614		reg |= mps;
2615		dwc2_writel(hsotg, reg, DOEPCTL(ep));
2616	}
2617
2618	return;
2619
2620bad_mps:
2621	dev_err(hsotg->dev, "ep%d: bad mps of %d\n", ep, mps);
2622}
2623
2624/**
2625 * dwc2_hsotg_txfifo_flush - flush Tx FIFO
2626 * @hsotg: The driver state
2627 * @idx: The index for the endpoint (0..15)
2628 */
2629static void dwc2_hsotg_txfifo_flush(struct dwc2_hsotg *hsotg, unsigned int idx)
2630{
2631	dwc2_writel(hsotg, GRSTCTL_TXFNUM(idx) | GRSTCTL_TXFFLSH,
2632		    GRSTCTL);
2633
2634	/* wait until the fifo is flushed */
2635	if (dwc2_hsotg_wait_bit_clear(hsotg, GRSTCTL, GRSTCTL_TXFFLSH, 100))
2636		dev_warn(hsotg->dev, "%s: timeout flushing fifo GRSTCTL_TXFFLSH\n",
2637			 __func__);
2638}
2639
2640/**
2641 * dwc2_hsotg_trytx - check to see if anything needs transmitting
2642 * @hsotg: The driver state
2643 * @hs_ep: The driver endpoint to check.
2644 *
2645 * Check to see if there is a request that has data to send, and if so
2646 * make an attempt to write data into the FIFO.
2647 */
2648static int dwc2_hsotg_trytx(struct dwc2_hsotg *hsotg,
2649			    struct dwc2_hsotg_ep *hs_ep)
2650{
2651	struct dwc2_hsotg_req *hs_req = hs_ep->req;
2652
2653	if (!hs_ep->dir_in || !hs_req) {
2654		/**
2655		 * if request is not enqueued, we disable interrupts
2656		 * for endpoints, excepting ep0
2657		 */
2658		if (hs_ep->index != 0)
2659			dwc2_hsotg_ctrl_epint(hsotg, hs_ep->index,
2660					      hs_ep->dir_in, 0);
2661		return 0;
2662	}
2663
2664	if (hs_req->req.actual < hs_req->req.length) {
2665		dev_dbg(hsotg->dev, "trying to write more for ep%d\n",
2666			hs_ep->index);
2667		return dwc2_hsotg_write_fifo(hsotg, hs_ep, hs_req);
2668	}
2669
2670	return 0;
2671}
2672
2673/**
2674 * dwc2_hsotg_complete_in - complete IN transfer
2675 * @hsotg: The device state.
2676 * @hs_ep: The endpoint that has just completed.
2677 *
2678 * An IN transfer has been completed, update the transfer's state and then
2679 * call the relevant completion routines.
2680 */
2681static void dwc2_hsotg_complete_in(struct dwc2_hsotg *hsotg,
2682				   struct dwc2_hsotg_ep *hs_ep)
2683{
2684	struct dwc2_hsotg_req *hs_req = hs_ep->req;
2685	u32 epsize = dwc2_readl(hsotg, DIEPTSIZ(hs_ep->index));
2686	int size_left, size_done;
2687
2688	if (!hs_req) {
2689		dev_dbg(hsotg->dev, "XferCompl but no req\n");
2690		return;
2691	}
2692
2693	/* Finish ZLP handling for IN EP0 transactions */
2694	if (hs_ep->index == 0 && hsotg->ep0_state == DWC2_EP0_STATUS_IN) {
2695		dev_dbg(hsotg->dev, "zlp packet sent\n");
2696
2697		/*
2698		 * While send zlp for DWC2_EP0_STATUS_IN EP direction was
2699		 * changed to IN. Change back to complete OUT transfer request
2700		 */
2701		hs_ep->dir_in = 0;
2702
2703		dwc2_hsotg_complete_request(hsotg, hs_ep, hs_req, 0);
2704		if (hsotg->test_mode) {
2705			int ret;
2706
2707			ret = dwc2_hsotg_set_test_mode(hsotg, hsotg->test_mode);
2708			if (ret < 0) {
2709				dev_dbg(hsotg->dev, "Invalid Test #%d\n",
2710					hsotg->test_mode);
2711				dwc2_hsotg_stall_ep0(hsotg);
2712				return;
2713			}
2714		}
2715		dwc2_hsotg_enqueue_setup(hsotg);
2716		return;
2717	}
2718
2719	/*
2720	 * Calculate the size of the transfer by checking how much is left
2721	 * in the endpoint size register and then working it out from
2722	 * the amount we loaded for the transfer.
2723	 *
2724	 * We do this even for DMA, as the transfer may have incremented
2725	 * past the end of the buffer (DMA transfers are always 32bit
2726	 * aligned).
2727	 */
2728	if (using_desc_dma(hsotg)) {
2729		size_left = dwc2_gadget_get_xfersize_ddma(hs_ep);
2730		if (size_left < 0)
2731			dev_err(hsotg->dev, "error parsing DDMA results %d\n",
2732				size_left);
2733	} else {
2734		size_left = DXEPTSIZ_XFERSIZE_GET(epsize);
2735	}
2736
2737	size_done = hs_ep->size_loaded - size_left;
2738	size_done += hs_ep->last_load;
2739
2740	if (hs_req->req.actual != size_done)
2741		dev_dbg(hsotg->dev, "%s: adjusting size done %d => %d\n",
2742			__func__, hs_req->req.actual, size_done);
2743
2744	hs_req->req.actual = size_done;
2745	dev_dbg(hsotg->dev, "req->length:%d req->actual:%d req->zero:%d\n",
2746		hs_req->req.length, hs_req->req.actual, hs_req->req.zero);
2747
2748	if (!size_left && hs_req->req.actual < hs_req->req.length) {
2749		dev_dbg(hsotg->dev, "%s trying more for req...\n", __func__);
2750		dwc2_hsotg_start_req(hsotg, hs_ep, hs_req, true);
2751		return;
2752	}
2753
2754	/* Zlp for all endpoints in non DDMA, for ep0 only in DATA IN stage */
2755	if (hs_ep->send_zlp) {
 
2756		hs_ep->send_zlp = 0;
2757		if (!using_desc_dma(hsotg)) {
2758			dwc2_hsotg_program_zlp(hsotg, hs_ep);
2759			/* transfer will be completed on next complete interrupt */
2760			return;
2761		}
2762	}
2763
2764	if (hs_ep->index == 0 && hsotg->ep0_state == DWC2_EP0_DATA_IN) {
2765		/* Move to STATUS OUT */
2766		dwc2_hsotg_ep0_zlp(hsotg, false);
2767		return;
2768	}
2769
2770	/* Set actual frame number for completed transfers */
2771	if (!using_desc_dma(hsotg) && hs_ep->isochronous) {
2772		hs_req->req.frame_number = hs_ep->target_frame;
2773		dwc2_gadget_incr_frame_num(hs_ep);
2774	}
2775
2776	dwc2_hsotg_complete_request(hsotg, hs_ep, hs_req, 0);
2777}
2778
2779/**
2780 * dwc2_gadget_read_ep_interrupts - reads interrupts for given ep
2781 * @hsotg: The device state.
2782 * @idx: Index of ep.
2783 * @dir_in: Endpoint direction 1-in 0-out.
2784 *
2785 * Reads for endpoint with given index and direction, by masking
2786 * epint_reg with coresponding mask.
2787 */
2788static u32 dwc2_gadget_read_ep_interrupts(struct dwc2_hsotg *hsotg,
2789					  unsigned int idx, int dir_in)
2790{
2791	u32 epmsk_reg = dir_in ? DIEPMSK : DOEPMSK;
2792	u32 epint_reg = dir_in ? DIEPINT(idx) : DOEPINT(idx);
2793	u32 ints;
2794	u32 mask;
2795	u32 diepempmsk;
2796
2797	mask = dwc2_readl(hsotg, epmsk_reg);
2798	diepempmsk = dwc2_readl(hsotg, DIEPEMPMSK);
2799	mask |= ((diepempmsk >> idx) & 0x1) ? DIEPMSK_TXFIFOEMPTY : 0;
2800	mask |= DXEPINT_SETUP_RCVD;
2801
2802	ints = dwc2_readl(hsotg, epint_reg);
2803	ints &= mask;
2804	return ints;
2805}
2806
2807/**
2808 * dwc2_gadget_handle_ep_disabled - handle DXEPINT_EPDISBLD
2809 * @hs_ep: The endpoint on which interrupt is asserted.
2810 *
2811 * This interrupt indicates that the endpoint has been disabled per the
2812 * application's request.
2813 *
2814 * For IN endpoints flushes txfifo, in case of BULK clears DCTL_CGNPINNAK,
2815 * in case of ISOC completes current request.
2816 *
2817 * For ISOC-OUT endpoints completes expired requests. If there is remaining
2818 * request starts it.
2819 */
2820static void dwc2_gadget_handle_ep_disabled(struct dwc2_hsotg_ep *hs_ep)
2821{
2822	struct dwc2_hsotg *hsotg = hs_ep->parent;
2823	struct dwc2_hsotg_req *hs_req;
2824	unsigned char idx = hs_ep->index;
2825	int dir_in = hs_ep->dir_in;
2826	u32 epctl_reg = dir_in ? DIEPCTL(idx) : DOEPCTL(idx);
2827	int dctl = dwc2_readl(hsotg, DCTL);
2828
2829	dev_dbg(hsotg->dev, "%s: EPDisbld\n", __func__);
2830
2831	if (dir_in) {
2832		int epctl = dwc2_readl(hsotg, epctl_reg);
2833
2834		dwc2_hsotg_txfifo_flush(hsotg, hs_ep->fifo_index);
2835
 
 
 
 
 
2836		if ((epctl & DXEPCTL_STALL) && (epctl & DXEPCTL_EPTYPE_BULK)) {
2837			int dctl = dwc2_readl(hsotg, DCTL);
2838
2839			dctl |= DCTL_CGNPINNAK;
2840			dwc2_writel(hsotg, dctl, DCTL);
2841		}
2842	} else {
 
2843
2844		if (dctl & DCTL_GOUTNAKSTS) {
2845			dctl |= DCTL_CGOUTNAK;
2846			dwc2_writel(hsotg, dctl, DCTL);
2847		}
2848	}
2849
2850	if (!hs_ep->isochronous)
2851		return;
2852
2853	if (list_empty(&hs_ep->queue)) {
2854		dev_dbg(hsotg->dev, "%s: complete_ep 0x%p, ep->queue empty!\n",
2855			__func__, hs_ep);
2856		return;
2857	}
2858
2859	do {
2860		hs_req = get_ep_head(hs_ep);
2861		if (hs_req) {
2862			hs_req->req.frame_number = hs_ep->target_frame;
2863			hs_req->req.actual = 0;
2864			dwc2_hsotg_complete_request(hsotg, hs_ep, hs_req,
2865						    -ENODATA);
2866		}
2867		dwc2_gadget_incr_frame_num(hs_ep);
2868		/* Update current frame number value. */
2869		hsotg->frame_number = dwc2_hsotg_read_frameno(hsotg);
2870	} while (dwc2_gadget_target_frame_elapsed(hs_ep));
 
 
2871}
2872
2873/**
2874 * dwc2_gadget_handle_out_token_ep_disabled - handle DXEPINT_OUTTKNEPDIS
2875 * @ep: The endpoint on which interrupt is asserted.
2876 *
2877 * This is starting point for ISOC-OUT transfer, synchronization done with
2878 * first out token received from host while corresponding EP is disabled.
2879 *
2880 * Device does not know initial frame in which out token will come. For this
2881 * HW generates OUTTKNEPDIS - out token is received while EP is disabled. Upon
2882 * getting this interrupt SW starts calculation for next transfer frame.
2883 */
2884static void dwc2_gadget_handle_out_token_ep_disabled(struct dwc2_hsotg_ep *ep)
2885{
2886	struct dwc2_hsotg *hsotg = ep->parent;
2887	struct dwc2_hsotg_req *hs_req;
2888	int dir_in = ep->dir_in;
 
2889
2890	if (dir_in || !ep->isochronous)
2891		return;
2892
2893	if (using_desc_dma(hsotg)) {
2894		if (ep->target_frame == TARGET_FRAME_INITIAL) {
2895			/* Start first ISO Out */
2896			ep->target_frame = hsotg->frame_number;
2897			dwc2_gadget_start_isoc_ddma(ep);
2898		}
2899		return;
2900	}
2901
2902	if (ep->target_frame == TARGET_FRAME_INITIAL) {
 
2903		u32 ctrl;
2904
2905		ep->target_frame = hsotg->frame_number;
2906		if (ep->interval > 1) {
2907			ctrl = dwc2_readl(hsotg, DOEPCTL(ep->index));
2908			if (ep->target_frame & 0x1)
2909				ctrl |= DXEPCTL_SETODDFR;
2910			else
2911				ctrl |= DXEPCTL_SETEVENFR;
2912
2913			dwc2_writel(hsotg, ctrl, DOEPCTL(ep->index));
2914		}
2915	}
2916
2917	while (dwc2_gadget_target_frame_elapsed(ep)) {
2918		hs_req = get_ep_head(ep);
2919		if (hs_req) {
2920			hs_req->req.frame_number = ep->target_frame;
2921			hs_req->req.actual = 0;
2922			dwc2_hsotg_complete_request(hsotg, ep, hs_req, -ENODATA);
2923		}
2924
2925		dwc2_gadget_incr_frame_num(ep);
2926		/* Update current frame number value. */
2927		hsotg->frame_number = dwc2_hsotg_read_frameno(hsotg);
2928	}
2929
2930	if (!ep->req)
2931		dwc2_gadget_start_next_request(ep);
2932
 
2933}
2934
2935static void dwc2_hsotg_ep_stop_xfr(struct dwc2_hsotg *hsotg,
2936				   struct dwc2_hsotg_ep *hs_ep);
2937
2938/**
2939 * dwc2_gadget_handle_nak - handle NAK interrupt
2940 * @hs_ep: The endpoint on which interrupt is asserted.
2941 *
2942 * This is starting point for ISOC-IN transfer, synchronization done with
2943 * first IN token received from host while corresponding EP is disabled.
2944 *
2945 * Device does not know when first one token will arrive from host. On first
2946 * token arrival HW generates 2 interrupts: 'in token received while FIFO empty'
2947 * and 'NAK'. NAK interrupt for ISOC-IN means that token has arrived and ZLP was
2948 * sent in response to that as there was no data in FIFO. SW is basing on this
2949 * interrupt to obtain frame in which token has come and then based on the
2950 * interval calculates next frame for transfer.
2951 */
2952static void dwc2_gadget_handle_nak(struct dwc2_hsotg_ep *hs_ep)
2953{
2954	struct dwc2_hsotg *hsotg = hs_ep->parent;
2955	struct dwc2_hsotg_req *hs_req;
2956	int dir_in = hs_ep->dir_in;
2957	u32 ctrl;
2958
2959	if (!dir_in || !hs_ep->isochronous)
2960		return;
2961
2962	if (hs_ep->target_frame == TARGET_FRAME_INITIAL) {
2963
2964		if (using_desc_dma(hsotg)) {
2965			hs_ep->target_frame = hsotg->frame_number;
2966			dwc2_gadget_incr_frame_num(hs_ep);
2967
2968			/* In service interval mode target_frame must
2969			 * be set to last (u)frame of the service interval.
2970			 */
2971			if (hsotg->params.service_interval) {
2972				/* Set target_frame to the first (u)frame of
2973				 * the service interval
2974				 */
2975				hs_ep->target_frame &= ~hs_ep->interval + 1;
2976
2977				/* Set target_frame to the last (u)frame of
2978				 * the service interval
2979				 */
2980				dwc2_gadget_incr_frame_num(hs_ep);
2981				dwc2_gadget_dec_frame_num_by_one(hs_ep);
2982			}
2983
2984			dwc2_gadget_start_isoc_ddma(hs_ep);
2985			return;
2986		}
2987
2988		hs_ep->target_frame = hsotg->frame_number;
2989		if (hs_ep->interval > 1) {
2990			u32 ctrl = dwc2_readl(hsotg,
2991					      DIEPCTL(hs_ep->index));
2992			if (hs_ep->target_frame & 0x1)
2993				ctrl |= DXEPCTL_SETODDFR;
2994			else
2995				ctrl |= DXEPCTL_SETEVENFR;
2996
2997			dwc2_writel(hsotg, ctrl, DIEPCTL(hs_ep->index));
2998		}
2999	}
3000
3001	if (using_desc_dma(hsotg))
3002		return;
3003
3004	ctrl = dwc2_readl(hsotg, DIEPCTL(hs_ep->index));
3005	if (ctrl & DXEPCTL_EPENA)
3006		dwc2_hsotg_ep_stop_xfr(hsotg, hs_ep);
3007	else
3008		dwc2_hsotg_txfifo_flush(hsotg, hs_ep->fifo_index);
3009
3010	while (dwc2_gadget_target_frame_elapsed(hs_ep)) {
3011		hs_req = get_ep_head(hs_ep);
3012		if (hs_req) {
3013			hs_req->req.frame_number = hs_ep->target_frame;
3014			hs_req->req.actual = 0;
3015			dwc2_hsotg_complete_request(hsotg, hs_ep, hs_req, -ENODATA);
3016		}
3017
3018		dwc2_gadget_incr_frame_num(hs_ep);
3019		/* Update current frame number value. */
3020		hsotg->frame_number = dwc2_hsotg_read_frameno(hsotg);
3021	}
3022
3023	if (!hs_ep->req)
3024		dwc2_gadget_start_next_request(hs_ep);
3025}
3026
3027/**
3028 * dwc2_hsotg_epint - handle an in/out endpoint interrupt
3029 * @hsotg: The driver state
3030 * @idx: The index for the endpoint (0..15)
3031 * @dir_in: Set if this is an IN endpoint
3032 *
3033 * Process and clear any interrupt pending for an individual endpoint
3034 */
3035static void dwc2_hsotg_epint(struct dwc2_hsotg *hsotg, unsigned int idx,
3036			     int dir_in)
3037{
3038	struct dwc2_hsotg_ep *hs_ep = index_to_ep(hsotg, idx, dir_in);
3039	u32 epint_reg = dir_in ? DIEPINT(idx) : DOEPINT(idx);
3040	u32 epctl_reg = dir_in ? DIEPCTL(idx) : DOEPCTL(idx);
3041	u32 epsiz_reg = dir_in ? DIEPTSIZ(idx) : DOEPTSIZ(idx);
3042	u32 ints;
3043
3044	ints = dwc2_gadget_read_ep_interrupts(hsotg, idx, dir_in);
3045
3046	/* Clear endpoint interrupts */
3047	dwc2_writel(hsotg, ints, epint_reg);
3048
3049	if (!hs_ep) {
3050		dev_err(hsotg->dev, "%s:Interrupt for unconfigured ep%d(%s)\n",
3051			__func__, idx, dir_in ? "in" : "out");
3052		return;
3053	}
3054
3055	dev_dbg(hsotg->dev, "%s: ep%d(%s) DxEPINT=0x%08x\n",
3056		__func__, idx, dir_in ? "in" : "out", ints);
3057
3058	/* Don't process XferCompl interrupt if it is a setup packet */
3059	if (idx == 0 && (ints & (DXEPINT_SETUP | DXEPINT_SETUP_RCVD)))
3060		ints &= ~DXEPINT_XFERCOMPL;
3061
3062	/*
3063	 * Don't process XferCompl interrupt in DDMA if EP0 is still in SETUP
3064	 * stage and xfercomplete was generated without SETUP phase done
3065	 * interrupt. SW should parse received setup packet only after host's
3066	 * exit from setup phase of control transfer.
3067	 */
3068	if (using_desc_dma(hsotg) && idx == 0 && !hs_ep->dir_in &&
3069	    hsotg->ep0_state == DWC2_EP0_SETUP && !(ints & DXEPINT_SETUP))
3070		ints &= ~DXEPINT_XFERCOMPL;
3071
3072	if (ints & DXEPINT_XFERCOMPL) {
3073		dev_dbg(hsotg->dev,
3074			"%s: XferCompl: DxEPCTL=0x%08x, DXEPTSIZ=%08x\n",
3075			__func__, dwc2_readl(hsotg, epctl_reg),
3076			dwc2_readl(hsotg, epsiz_reg));
3077
3078		/* In DDMA handle isochronous requests separately */
3079		if (using_desc_dma(hsotg) && hs_ep->isochronous) {
3080			dwc2_gadget_complete_isoc_request_ddma(hs_ep);
 
 
3081		} else if (dir_in) {
3082			/*
3083			 * We get OutDone from the FIFO, so we only
3084			 * need to look at completing IN requests here
3085			 * if operating slave mode
3086			 */
3087			if (!hs_ep->isochronous || !(ints & DXEPINT_NAKINTRPT))
3088				dwc2_hsotg_complete_in(hsotg, hs_ep);
 
 
 
 
3089
3090			if (idx == 0 && !hs_ep->req)
3091				dwc2_hsotg_enqueue_setup(hsotg);
3092		} else if (using_dma(hsotg)) {
3093			/*
3094			 * We're using DMA, we need to fire an OutDone here
3095			 * as we ignore the RXFIFO.
3096			 */
3097			if (!hs_ep->isochronous || !(ints & DXEPINT_OUTTKNEPDIS))
3098				dwc2_hsotg_handle_outdone(hsotg, idx);
 
 
3099		}
3100	}
3101
3102	if (ints & DXEPINT_EPDISBLD)
3103		dwc2_gadget_handle_ep_disabled(hs_ep);
3104
3105	if (ints & DXEPINT_OUTTKNEPDIS)
3106		dwc2_gadget_handle_out_token_ep_disabled(hs_ep);
3107
3108	if (ints & DXEPINT_NAKINTRPT)
3109		dwc2_gadget_handle_nak(hs_ep);
3110
3111	if (ints & DXEPINT_AHBERR)
3112		dev_dbg(hsotg->dev, "%s: AHBErr\n", __func__);
3113
3114	if (ints & DXEPINT_SETUP) {  /* Setup or Timeout */
3115		dev_dbg(hsotg->dev, "%s: Setup/Timeout\n",  __func__);
3116
3117		if (using_dma(hsotg) && idx == 0) {
3118			/*
3119			 * this is the notification we've received a
3120			 * setup packet. In non-DMA mode we'd get this
3121			 * from the RXFIFO, instead we need to process
3122			 * the setup here.
3123			 */
3124
3125			if (dir_in)
3126				WARN_ON_ONCE(1);
3127			else
3128				dwc2_hsotg_handle_outdone(hsotg, 0);
3129		}
3130	}
3131
3132	if (ints & DXEPINT_STSPHSERCVD) {
3133		dev_dbg(hsotg->dev, "%s: StsPhseRcvd\n", __func__);
3134
3135		/* Safety check EP0 state when STSPHSERCVD asserted */
3136		if (hsotg->ep0_state == DWC2_EP0_DATA_OUT) {
3137			/* Move to STATUS IN for DDMA */
3138			if (using_desc_dma(hsotg)) {
3139				if (!hsotg->delayed_status)
3140					dwc2_hsotg_ep0_zlp(hsotg, true);
3141				else
3142				/* In case of 3 stage Control Write with delayed
3143				 * status, when Status IN transfer started
3144				 * before STSPHSERCVD asserted, NAKSTS bit not
3145				 * cleared by CNAK in dwc2_hsotg_start_req()
3146				 * function. Clear now NAKSTS to allow complete
3147				 * transfer.
3148				 */
3149					dwc2_set_bit(hsotg, DIEPCTL(0),
3150						     DXEPCTL_CNAK);
3151			}
3152		}
3153
3154	}
3155
3156	if (ints & DXEPINT_BACK2BACKSETUP)
3157		dev_dbg(hsotg->dev, "%s: B2BSetup/INEPNakEff\n", __func__);
3158
3159	if (ints & DXEPINT_BNAINTR) {
3160		dev_dbg(hsotg->dev, "%s: BNA interrupt\n", __func__);
3161		if (hs_ep->isochronous)
3162			dwc2_gadget_handle_isoc_bna(hs_ep);
3163	}
3164
3165	if (dir_in && !hs_ep->isochronous) {
3166		/* not sure if this is important, but we'll clear it anyway */
3167		if (ints & DXEPINT_INTKNTXFEMP) {
3168			dev_dbg(hsotg->dev, "%s: ep%d: INTknTXFEmpMsk\n",
3169				__func__, idx);
3170		}
3171
3172		/* this probably means something bad is happening */
3173		if (ints & DXEPINT_INTKNEPMIS) {
3174			dev_warn(hsotg->dev, "%s: ep%d: INTknEP\n",
3175				 __func__, idx);
3176		}
3177
3178		/* FIFO has space or is empty (see GAHBCFG) */
3179		if (hsotg->dedicated_fifos &&
3180		    ints & DXEPINT_TXFEMP) {
3181			dev_dbg(hsotg->dev, "%s: ep%d: TxFIFOEmpty\n",
3182				__func__, idx);
3183			if (!using_dma(hsotg))
3184				dwc2_hsotg_trytx(hsotg, hs_ep);
3185		}
3186	}
3187}
3188
3189/**
3190 * dwc2_hsotg_irq_enumdone - Handle EnumDone interrupt (enumeration done)
3191 * @hsotg: The device state.
3192 *
3193 * Handle updating the device settings after the enumeration phase has
3194 * been completed.
3195 */
3196static void dwc2_hsotg_irq_enumdone(struct dwc2_hsotg *hsotg)
3197{
3198	u32 dsts = dwc2_readl(hsotg, DSTS);
3199	int ep0_mps = 0, ep_mps = 8;
3200
3201	/*
3202	 * This should signal the finish of the enumeration phase
3203	 * of the USB handshaking, so we should now know what rate
3204	 * we connected at.
3205	 */
3206
3207	dev_dbg(hsotg->dev, "EnumDone (DSTS=0x%08x)\n", dsts);
3208
3209	/*
3210	 * note, since we're limited by the size of transfer on EP0, and
3211	 * it seems IN transfers must be a even number of packets we do
3212	 * not advertise a 64byte MPS on EP0.
3213	 */
3214
3215	/* catch both EnumSpd_FS and EnumSpd_FS48 */
3216	switch ((dsts & DSTS_ENUMSPD_MASK) >> DSTS_ENUMSPD_SHIFT) {
3217	case DSTS_ENUMSPD_FS:
3218	case DSTS_ENUMSPD_FS48:
3219		hsotg->gadget.speed = USB_SPEED_FULL;
3220		ep0_mps = EP0_MPS_LIMIT;
3221		ep_mps = 1023;
3222		break;
3223
3224	case DSTS_ENUMSPD_HS:
3225		hsotg->gadget.speed = USB_SPEED_HIGH;
3226		ep0_mps = EP0_MPS_LIMIT;
3227		ep_mps = 1024;
3228		break;
3229
3230	case DSTS_ENUMSPD_LS:
3231		hsotg->gadget.speed = USB_SPEED_LOW;
3232		ep0_mps = 8;
3233		ep_mps = 8;
3234		/*
3235		 * note, we don't actually support LS in this driver at the
3236		 * moment, and the documentation seems to imply that it isn't
3237		 * supported by the PHYs on some of the devices.
3238		 */
3239		break;
3240	}
3241	dev_info(hsotg->dev, "new device is %s\n",
3242		 usb_speed_string(hsotg->gadget.speed));
3243
3244	/*
3245	 * we should now know the maximum packet size for an
3246	 * endpoint, so set the endpoints to a default value.
3247	 */
3248
3249	if (ep0_mps) {
3250		int i;
3251		/* Initialize ep0 for both in and out directions */
3252		dwc2_hsotg_set_ep_maxpacket(hsotg, 0, ep0_mps, 0, 1);
3253		dwc2_hsotg_set_ep_maxpacket(hsotg, 0, ep0_mps, 0, 0);
3254		for (i = 1; i < hsotg->num_of_eps; i++) {
3255			if (hsotg->eps_in[i])
3256				dwc2_hsotg_set_ep_maxpacket(hsotg, i, ep_mps,
3257							    0, 1);
3258			if (hsotg->eps_out[i])
3259				dwc2_hsotg_set_ep_maxpacket(hsotg, i, ep_mps,
3260							    0, 0);
3261		}
3262	}
3263
3264	/* ensure after enumeration our EP0 is active */
3265
3266	dwc2_hsotg_enqueue_setup(hsotg);
3267
3268	dev_dbg(hsotg->dev, "EP0: DIEPCTL0=0x%08x, DOEPCTL0=0x%08x\n",
3269		dwc2_readl(hsotg, DIEPCTL0),
3270		dwc2_readl(hsotg, DOEPCTL0));
3271}
3272
3273/**
3274 * kill_all_requests - remove all requests from the endpoint's queue
3275 * @hsotg: The device state.
3276 * @ep: The endpoint the requests may be on.
3277 * @result: The result code to use.
3278 *
3279 * Go through the requests on the given endpoint and mark them
3280 * completed with the given result code.
3281 */
3282static void kill_all_requests(struct dwc2_hsotg *hsotg,
3283			      struct dwc2_hsotg_ep *ep,
3284			      int result)
3285{
3286	unsigned int size;
3287
3288	ep->req = NULL;
3289
3290	while (!list_empty(&ep->queue)) {
3291		struct dwc2_hsotg_req *req = get_ep_head(ep);
3292
3293		dwc2_hsotg_complete_request(hsotg, ep, req, result);
3294	}
3295
3296	if (!hsotg->dedicated_fifos)
3297		return;
3298	size = (dwc2_readl(hsotg, DTXFSTS(ep->fifo_index)) & 0xffff) * 4;
3299	if (size < ep->fifo_size)
3300		dwc2_hsotg_txfifo_flush(hsotg, ep->fifo_index);
3301}
3302
3303/**
3304 * dwc2_hsotg_disconnect - disconnect service
3305 * @hsotg: The device state.
3306 *
3307 * The device has been disconnected. Remove all current
3308 * transactions and signal the gadget driver that this
3309 * has happened.
3310 */
3311void dwc2_hsotg_disconnect(struct dwc2_hsotg *hsotg)
3312{
3313	unsigned int ep;
3314
3315	if (!hsotg->connected)
3316		return;
3317
3318	hsotg->connected = 0;
3319	hsotg->test_mode = 0;
3320
3321	/* all endpoints should be shutdown */
3322	for (ep = 0; ep < hsotg->num_of_eps; ep++) {
3323		if (hsotg->eps_in[ep])
3324			kill_all_requests(hsotg, hsotg->eps_in[ep],
3325					  -ESHUTDOWN);
3326		if (hsotg->eps_out[ep])
3327			kill_all_requests(hsotg, hsotg->eps_out[ep],
3328					  -ESHUTDOWN);
3329	}
3330
3331	call_gadget(hsotg, disconnect);
3332	hsotg->lx_state = DWC2_L3;
3333
3334	usb_gadget_set_state(&hsotg->gadget, USB_STATE_NOTATTACHED);
3335}
3336
3337/**
3338 * dwc2_hsotg_irq_fifoempty - TX FIFO empty interrupt handler
3339 * @hsotg: The device state:
3340 * @periodic: True if this is a periodic FIFO interrupt
3341 */
3342static void dwc2_hsotg_irq_fifoempty(struct dwc2_hsotg *hsotg, bool periodic)
3343{
3344	struct dwc2_hsotg_ep *ep;
3345	int epno, ret;
3346
3347	/* look through for any more data to transmit */
3348	for (epno = 0; epno < hsotg->num_of_eps; epno++) {
3349		ep = index_to_ep(hsotg, epno, 1);
3350
3351		if (!ep)
3352			continue;
3353
3354		if (!ep->dir_in)
3355			continue;
3356
3357		if ((periodic && !ep->periodic) ||
3358		    (!periodic && ep->periodic))
3359			continue;
3360
3361		ret = dwc2_hsotg_trytx(hsotg, ep);
3362		if (ret < 0)
3363			break;
3364	}
3365}
3366
3367/* IRQ flags which will trigger a retry around the IRQ loop */
3368#define IRQ_RETRY_MASK (GINTSTS_NPTXFEMP | \
3369			GINTSTS_PTXFEMP |  \
3370			GINTSTS_RXFLVL)
3371
3372static int dwc2_hsotg_ep_disable(struct usb_ep *ep);
3373/**
3374 * dwc2_hsotg_core_init_disconnected - issue softreset to the core
3375 * @hsotg: The device state
3376 * @is_usb_reset: Usb resetting flag
3377 *
3378 * Issue a soft reset to the core, and await the core finishing it.
3379 */
3380void dwc2_hsotg_core_init_disconnected(struct dwc2_hsotg *hsotg,
3381				       bool is_usb_reset)
3382{
3383	u32 intmsk;
3384	u32 val;
3385	u32 usbcfg;
3386	u32 dcfg = 0;
3387	int ep;
3388
3389	/* Kill any ep0 requests as controller will be reinitialized */
3390	kill_all_requests(hsotg, hsotg->eps_out[0], -ECONNRESET);
3391
3392	if (!is_usb_reset) {
3393		if (dwc2_core_reset(hsotg, true))
3394			return;
3395	} else {
3396		/* all endpoints should be shutdown */
3397		for (ep = 1; ep < hsotg->num_of_eps; ep++) {
3398			if (hsotg->eps_in[ep])
3399				dwc2_hsotg_ep_disable(&hsotg->eps_in[ep]->ep);
3400			if (hsotg->eps_out[ep])
3401				dwc2_hsotg_ep_disable(&hsotg->eps_out[ep]->ep);
3402		}
3403	}
3404
3405	/*
3406	 * we must now enable ep0 ready for host detection and then
3407	 * set configuration.
3408	 */
3409
3410	/* keep other bits untouched (so e.g. forced modes are not lost) */
3411	usbcfg = dwc2_readl(hsotg, GUSBCFG);
3412	usbcfg &= ~GUSBCFG_TOUTCAL_MASK;
3413	usbcfg |= GUSBCFG_TOUTCAL(7);
3414
3415	/* remove the HNP/SRP and set the PHY */
3416	usbcfg &= ~(GUSBCFG_SRPCAP | GUSBCFG_HNPCAP);
3417        dwc2_writel(hsotg, usbcfg, GUSBCFG);
3418
3419	dwc2_phy_init(hsotg, true);
3420
3421	dwc2_hsotg_init_fifo(hsotg);
3422
3423	if (!is_usb_reset)
3424		dwc2_set_bit(hsotg, DCTL, DCTL_SFTDISCON);
3425
3426	dcfg |= DCFG_EPMISCNT(1);
3427
3428	switch (hsotg->params.speed) {
3429	case DWC2_SPEED_PARAM_LOW:
3430		dcfg |= DCFG_DEVSPD_LS;
3431		break;
3432	case DWC2_SPEED_PARAM_FULL:
3433		if (hsotg->params.phy_type == DWC2_PHY_TYPE_PARAM_FS)
3434			dcfg |= DCFG_DEVSPD_FS48;
3435		else
3436			dcfg |= DCFG_DEVSPD_FS;
3437		break;
3438	default:
3439		dcfg |= DCFG_DEVSPD_HS;
3440	}
3441
3442	if (hsotg->params.ipg_isoc_en)
3443		dcfg |= DCFG_IPG_ISOC_SUPPORDED;
3444
3445	dwc2_writel(hsotg, dcfg,  DCFG);
3446
3447	/* Clear any pending OTG interrupts */
3448	dwc2_writel(hsotg, 0xffffffff, GOTGINT);
3449
3450	/* Clear any pending interrupts */
3451	dwc2_writel(hsotg, 0xffffffff, GINTSTS);
3452	intmsk = GINTSTS_ERLYSUSP | GINTSTS_SESSREQINT |
3453		GINTSTS_GOUTNAKEFF | GINTSTS_GINNAKEFF |
3454		GINTSTS_USBRST | GINTSTS_RESETDET |
3455		GINTSTS_ENUMDONE | GINTSTS_OTGINT |
3456		GINTSTS_USBSUSP | GINTSTS_WKUPINT |
3457		GINTSTS_LPMTRANRCVD;
3458
3459	if (!using_desc_dma(hsotg))
3460		intmsk |= GINTSTS_INCOMPL_SOIN | GINTSTS_INCOMPL_SOOUT;
3461
3462	if (!hsotg->params.external_id_pin_ctl)
3463		intmsk |= GINTSTS_CONIDSTSCHNG;
3464
3465	dwc2_writel(hsotg, intmsk, GINTMSK);
3466
3467	if (using_dma(hsotg)) {
3468		dwc2_writel(hsotg, GAHBCFG_GLBL_INTR_EN | GAHBCFG_DMA_EN |
3469			    hsotg->params.ahbcfg,
3470			    GAHBCFG);
3471
3472		/* Set DDMA mode support in the core if needed */
3473		if (using_desc_dma(hsotg))
3474			dwc2_set_bit(hsotg, DCFG, DCFG_DESCDMA_EN);
3475
3476	} else {
3477		dwc2_writel(hsotg, ((hsotg->dedicated_fifos) ?
3478						(GAHBCFG_NP_TXF_EMP_LVL |
3479						 GAHBCFG_P_TXF_EMP_LVL) : 0) |
3480			    GAHBCFG_GLBL_INTR_EN, GAHBCFG);
3481	}
3482
3483	/*
3484	 * If INTknTXFEmpMsk is enabled, it's important to disable ep interrupts
3485	 * when we have no data to transfer. Otherwise we get being flooded by
3486	 * interrupts.
3487	 */
3488
3489	dwc2_writel(hsotg, ((hsotg->dedicated_fifos && !using_dma(hsotg)) ?
3490		DIEPMSK_TXFIFOEMPTY | DIEPMSK_INTKNTXFEMPMSK : 0) |
3491		DIEPMSK_EPDISBLDMSK | DIEPMSK_XFERCOMPLMSK |
3492		DIEPMSK_TIMEOUTMSK | DIEPMSK_AHBERRMSK,
3493		DIEPMSK);
3494
3495	/*
3496	 * don't need XferCompl, we get that from RXFIFO in slave mode. In
3497	 * DMA mode we may need this and StsPhseRcvd.
3498	 */
3499	dwc2_writel(hsotg, (using_dma(hsotg) ? (DIEPMSK_XFERCOMPLMSK |
3500		DOEPMSK_STSPHSERCVDMSK) : 0) |
3501		DOEPMSK_EPDISBLDMSK | DOEPMSK_AHBERRMSK |
3502		DOEPMSK_SETUPMSK,
3503		DOEPMSK);
3504
3505	/* Enable BNA interrupt for DDMA */
3506	if (using_desc_dma(hsotg)) {
3507		dwc2_set_bit(hsotg, DOEPMSK, DOEPMSK_BNAMSK);
3508		dwc2_set_bit(hsotg, DIEPMSK, DIEPMSK_BNAININTRMSK);
3509	}
3510
3511	/* Enable Service Interval mode if supported */
3512	if (using_desc_dma(hsotg) && hsotg->params.service_interval)
3513		dwc2_set_bit(hsotg, DCTL, DCTL_SERVICE_INTERVAL_SUPPORTED);
3514
3515	dwc2_writel(hsotg, 0, DAINTMSK);
3516
3517	dev_dbg(hsotg->dev, "EP0: DIEPCTL0=0x%08x, DOEPCTL0=0x%08x\n",
3518		dwc2_readl(hsotg, DIEPCTL0),
3519		dwc2_readl(hsotg, DOEPCTL0));
3520
3521	/* enable in and out endpoint interrupts */
3522	dwc2_hsotg_en_gsint(hsotg, GINTSTS_OEPINT | GINTSTS_IEPINT);
3523
3524	/*
3525	 * Enable the RXFIFO when in slave mode, as this is how we collect
3526	 * the data. In DMA mode, we get events from the FIFO but also
3527	 * things we cannot process, so do not use it.
3528	 */
3529	if (!using_dma(hsotg))
3530		dwc2_hsotg_en_gsint(hsotg, GINTSTS_RXFLVL);
3531
3532	/* Enable interrupts for EP0 in and out */
3533	dwc2_hsotg_ctrl_epint(hsotg, 0, 0, 1);
3534	dwc2_hsotg_ctrl_epint(hsotg, 0, 1, 1);
3535
3536	if (!is_usb_reset) {
3537		dwc2_set_bit(hsotg, DCTL, DCTL_PWRONPRGDONE);
3538		udelay(10);  /* see openiboot */
3539		dwc2_clear_bit(hsotg, DCTL, DCTL_PWRONPRGDONE);
3540	}
3541
3542	dev_dbg(hsotg->dev, "DCTL=0x%08x\n", dwc2_readl(hsotg, DCTL));
3543
3544	/*
3545	 * DxEPCTL_USBActEp says RO in manual, but seems to be set by
3546	 * writing to the EPCTL register..
3547	 */
3548
3549	/* set to read 1 8byte packet */
3550	dwc2_writel(hsotg, DXEPTSIZ_MC(1) | DXEPTSIZ_PKTCNT(1) |
3551	       DXEPTSIZ_XFERSIZE(8), DOEPTSIZ0);
3552
3553	dwc2_writel(hsotg, dwc2_hsotg_ep0_mps(hsotg->eps_out[0]->ep.maxpacket) |
3554	       DXEPCTL_CNAK | DXEPCTL_EPENA |
3555	       DXEPCTL_USBACTEP,
3556	       DOEPCTL0);
3557
3558	/* enable, but don't activate EP0in */
3559	dwc2_writel(hsotg, dwc2_hsotg_ep0_mps(hsotg->eps_out[0]->ep.maxpacket) |
3560	       DXEPCTL_USBACTEP, DIEPCTL0);
3561
3562	/* clear global NAKs */
3563	val = DCTL_CGOUTNAK | DCTL_CGNPINNAK;
3564	if (!is_usb_reset)
3565		val |= DCTL_SFTDISCON;
3566	dwc2_set_bit(hsotg, DCTL, val);
3567
3568	/* configure the core to support LPM */
3569	dwc2_gadget_init_lpm(hsotg);
3570
3571	/* program GREFCLK register if needed */
3572	if (using_desc_dma(hsotg) && hsotg->params.service_interval)
3573		dwc2_gadget_program_ref_clk(hsotg);
3574
3575	/* must be at-least 3ms to allow bus to see disconnect */
3576	mdelay(3);
3577
3578	hsotg->lx_state = DWC2_L0;
3579
3580	dwc2_hsotg_enqueue_setup(hsotg);
3581
3582	dev_dbg(hsotg->dev, "EP0: DIEPCTL0=0x%08x, DOEPCTL0=0x%08x\n",
3583		dwc2_readl(hsotg, DIEPCTL0),
3584		dwc2_readl(hsotg, DOEPCTL0));
3585}
3586
3587void dwc2_hsotg_core_disconnect(struct dwc2_hsotg *hsotg)
3588{
3589	/* set the soft-disconnect bit */
3590	dwc2_set_bit(hsotg, DCTL, DCTL_SFTDISCON);
3591}
3592
3593void dwc2_hsotg_core_connect(struct dwc2_hsotg *hsotg)
3594{
3595	/* remove the soft-disconnect and let's go */
3596	if (!hsotg->role_sw || (dwc2_readl(hsotg, GOTGCTL) & GOTGCTL_BSESVLD))
3597		dwc2_clear_bit(hsotg, DCTL, DCTL_SFTDISCON);
3598}
3599
3600/**
3601 * dwc2_gadget_handle_incomplete_isoc_in - handle incomplete ISO IN Interrupt.
3602 * @hsotg: The device state:
3603 *
3604 * This interrupt indicates one of the following conditions occurred while
3605 * transmitting an ISOC transaction.
3606 * - Corrupted IN Token for ISOC EP.
3607 * - Packet not complete in FIFO.
3608 *
3609 * The following actions will be taken:
3610 * - Determine the EP
3611 * - Disable EP; when 'Endpoint Disabled' interrupt is received Flush FIFO
3612 */
3613static void dwc2_gadget_handle_incomplete_isoc_in(struct dwc2_hsotg *hsotg)
3614{
3615	struct dwc2_hsotg_ep *hs_ep;
3616	u32 epctrl;
3617	u32 daintmsk;
3618	u32 idx;
3619
3620	dev_dbg(hsotg->dev, "Incomplete isoc in interrupt received:\n");
3621
3622	daintmsk = dwc2_readl(hsotg, DAINTMSK);
3623
3624	for (idx = 1; idx < hsotg->num_of_eps; idx++) {
3625		hs_ep = hsotg->eps_in[idx];
3626		/* Proceed only unmasked ISOC EPs */
3627		if ((BIT(idx) & ~daintmsk) || !hs_ep->isochronous)
3628			continue;
3629
3630		epctrl = dwc2_readl(hsotg, DIEPCTL(idx));
3631		if ((epctrl & DXEPCTL_EPENA) &&
3632		    dwc2_gadget_target_frame_elapsed(hs_ep)) {
3633			epctrl |= DXEPCTL_SNAK;
3634			epctrl |= DXEPCTL_EPDIS;
3635			dwc2_writel(hsotg, epctrl, DIEPCTL(idx));
3636		}
3637	}
3638
3639	/* Clear interrupt */
3640	dwc2_writel(hsotg, GINTSTS_INCOMPL_SOIN, GINTSTS);
3641}
3642
3643/**
3644 * dwc2_gadget_handle_incomplete_isoc_out - handle incomplete ISO OUT Interrupt
3645 * @hsotg: The device state:
3646 *
3647 * This interrupt indicates one of the following conditions occurred while
3648 * transmitting an ISOC transaction.
3649 * - Corrupted OUT Token for ISOC EP.
3650 * - Packet not complete in FIFO.
3651 *
3652 * The following actions will be taken:
3653 * - Determine the EP
3654 * - Set DCTL_SGOUTNAK and unmask GOUTNAKEFF if target frame elapsed.
3655 */
3656static void dwc2_gadget_handle_incomplete_isoc_out(struct dwc2_hsotg *hsotg)
3657{
3658	u32 gintsts;
3659	u32 gintmsk;
3660	u32 daintmsk;
3661	u32 epctrl;
3662	struct dwc2_hsotg_ep *hs_ep;
3663	int idx;
3664
3665	dev_dbg(hsotg->dev, "%s: GINTSTS_INCOMPL_SOOUT\n", __func__);
3666
3667	daintmsk = dwc2_readl(hsotg, DAINTMSK);
3668	daintmsk >>= DAINT_OUTEP_SHIFT;
3669
3670	for (idx = 1; idx < hsotg->num_of_eps; idx++) {
3671		hs_ep = hsotg->eps_out[idx];
3672		/* Proceed only unmasked ISOC EPs */
3673		if ((BIT(idx) & ~daintmsk) || !hs_ep->isochronous)
3674			continue;
3675
3676		epctrl = dwc2_readl(hsotg, DOEPCTL(idx));
3677		if ((epctrl & DXEPCTL_EPENA) &&
3678		    dwc2_gadget_target_frame_elapsed(hs_ep)) {
3679			/* Unmask GOUTNAKEFF interrupt */
3680			gintmsk = dwc2_readl(hsotg, GINTMSK);
3681			gintmsk |= GINTSTS_GOUTNAKEFF;
3682			dwc2_writel(hsotg, gintmsk, GINTMSK);
3683
3684			gintsts = dwc2_readl(hsotg, GINTSTS);
3685			if (!(gintsts & GINTSTS_GOUTNAKEFF)) {
3686				dwc2_set_bit(hsotg, DCTL, DCTL_SGOUTNAK);
3687				break;
3688			}
3689		}
3690	}
3691
3692	/* Clear interrupt */
3693	dwc2_writel(hsotg, GINTSTS_INCOMPL_SOOUT, GINTSTS);
3694}
3695
3696/**
3697 * dwc2_hsotg_irq - handle device interrupt
3698 * @irq: The IRQ number triggered
3699 * @pw: The pw value when registered the handler.
3700 */
3701static irqreturn_t dwc2_hsotg_irq(int irq, void *pw)
3702{
3703	struct dwc2_hsotg *hsotg = pw;
3704	int retry_count = 8;
3705	u32 gintsts;
3706	u32 gintmsk;
3707
3708	if (!dwc2_is_device_mode(hsotg))
3709		return IRQ_NONE;
3710
3711	spin_lock(&hsotg->lock);
3712irq_retry:
3713	gintsts = dwc2_readl(hsotg, GINTSTS);
3714	gintmsk = dwc2_readl(hsotg, GINTMSK);
3715
3716	dev_dbg(hsotg->dev, "%s: %08x %08x (%08x) retry %d\n",
3717		__func__, gintsts, gintsts & gintmsk, gintmsk, retry_count);
3718
3719	gintsts &= gintmsk;
3720
3721	if (gintsts & GINTSTS_RESETDET) {
3722		dev_dbg(hsotg->dev, "%s: USBRstDet\n", __func__);
3723
3724		dwc2_writel(hsotg, GINTSTS_RESETDET, GINTSTS);
3725
3726		/* This event must be used only if controller is suspended */
3727		if (hsotg->in_ppd && hsotg->lx_state == DWC2_L2)
3728			dwc2_exit_partial_power_down(hsotg, 0, true);
3729
3730		hsotg->lx_state = DWC2_L0;
3731	}
3732
3733	if (gintsts & (GINTSTS_USBRST | GINTSTS_RESETDET)) {
3734		u32 usb_status = dwc2_readl(hsotg, GOTGCTL);
3735		u32 connected = hsotg->connected;
3736
3737		dev_dbg(hsotg->dev, "%s: USBRst\n", __func__);
3738		dev_dbg(hsotg->dev, "GNPTXSTS=%08x\n",
3739			dwc2_readl(hsotg, GNPTXSTS));
3740
3741		dwc2_writel(hsotg, GINTSTS_USBRST, GINTSTS);
3742
3743		/* Report disconnection if it is not already done. */
3744		dwc2_hsotg_disconnect(hsotg);
3745
3746		/* Reset device address to zero */
3747		dwc2_clear_bit(hsotg, DCFG, DCFG_DEVADDR_MASK);
3748
3749		if (usb_status & GOTGCTL_BSESVLD && connected)
3750			dwc2_hsotg_core_init_disconnected(hsotg, true);
3751	}
3752
3753	if (gintsts & GINTSTS_ENUMDONE) {
3754		dwc2_writel(hsotg, GINTSTS_ENUMDONE, GINTSTS);
3755
3756		dwc2_hsotg_irq_enumdone(hsotg);
3757	}
3758
3759	if (gintsts & (GINTSTS_OEPINT | GINTSTS_IEPINT)) {
3760		u32 daint = dwc2_readl(hsotg, DAINT);
3761		u32 daintmsk = dwc2_readl(hsotg, DAINTMSK);
3762		u32 daint_out, daint_in;
3763		int ep;
3764
3765		daint &= daintmsk;
3766		daint_out = daint >> DAINT_OUTEP_SHIFT;
3767		daint_in = daint & ~(daint_out << DAINT_OUTEP_SHIFT);
3768
3769		dev_dbg(hsotg->dev, "%s: daint=%08x\n", __func__, daint);
3770
3771		for (ep = 0; ep < hsotg->num_of_eps && daint_out;
3772						ep++, daint_out >>= 1) {
3773			if (daint_out & 1)
3774				dwc2_hsotg_epint(hsotg, ep, 0);
3775		}
3776
3777		for (ep = 0; ep < hsotg->num_of_eps  && daint_in;
3778						ep++, daint_in >>= 1) {
3779			if (daint_in & 1)
3780				dwc2_hsotg_epint(hsotg, ep, 1);
3781		}
3782	}
3783
3784	/* check both FIFOs */
3785
3786	if (gintsts & GINTSTS_NPTXFEMP) {
3787		dev_dbg(hsotg->dev, "NPTxFEmp\n");
3788
3789		/*
3790		 * Disable the interrupt to stop it happening again
3791		 * unless one of these endpoint routines decides that
3792		 * it needs re-enabling
3793		 */
3794
3795		dwc2_hsotg_disable_gsint(hsotg, GINTSTS_NPTXFEMP);
3796		dwc2_hsotg_irq_fifoempty(hsotg, false);
3797	}
3798
3799	if (gintsts & GINTSTS_PTXFEMP) {
3800		dev_dbg(hsotg->dev, "PTxFEmp\n");
3801
3802		/* See note in GINTSTS_NPTxFEmp */
3803
3804		dwc2_hsotg_disable_gsint(hsotg, GINTSTS_PTXFEMP);
3805		dwc2_hsotg_irq_fifoempty(hsotg, true);
3806	}
3807
3808	if (gintsts & GINTSTS_RXFLVL) {
3809		/*
3810		 * note, since GINTSTS_RxFLvl doubles as FIFO-not-empty,
3811		 * we need to retry dwc2_hsotg_handle_rx if this is still
3812		 * set.
3813		 */
3814
3815		dwc2_hsotg_handle_rx(hsotg);
3816	}
3817
3818	if (gintsts & GINTSTS_ERLYSUSP) {
3819		dev_dbg(hsotg->dev, "GINTSTS_ErlySusp\n");
3820		dwc2_writel(hsotg, GINTSTS_ERLYSUSP, GINTSTS);
3821	}
3822
3823	/*
3824	 * these next two seem to crop-up occasionally causing the core
3825	 * to shutdown the USB transfer, so try clearing them and logging
3826	 * the occurrence.
3827	 */
3828
3829	if (gintsts & GINTSTS_GOUTNAKEFF) {
3830		u8 idx;
3831		u32 epctrl;
3832		u32 gintmsk;
3833		u32 daintmsk;
3834		struct dwc2_hsotg_ep *hs_ep;
3835
3836		daintmsk = dwc2_readl(hsotg, DAINTMSK);
3837		daintmsk >>= DAINT_OUTEP_SHIFT;
3838		/* Mask this interrupt */
3839		gintmsk = dwc2_readl(hsotg, GINTMSK);
3840		gintmsk &= ~GINTSTS_GOUTNAKEFF;
3841		dwc2_writel(hsotg, gintmsk, GINTMSK);
3842
3843		dev_dbg(hsotg->dev, "GOUTNakEff triggered\n");
3844		for (idx = 1; idx < hsotg->num_of_eps; idx++) {
3845			hs_ep = hsotg->eps_out[idx];
3846			/* Proceed only unmasked ISOC EPs */
3847			if (BIT(idx) & ~daintmsk)
3848				continue;
3849
3850			epctrl = dwc2_readl(hsotg, DOEPCTL(idx));
3851
3852			//ISOC Ep's only
3853			if ((epctrl & DXEPCTL_EPENA) && hs_ep->isochronous) {
3854				epctrl |= DXEPCTL_SNAK;
3855				epctrl |= DXEPCTL_EPDIS;
3856				dwc2_writel(hsotg, epctrl, DOEPCTL(idx));
3857				continue;
3858			}
3859
3860			//Non-ISOC EP's
3861			if (hs_ep->halted) {
3862				if (!(epctrl & DXEPCTL_EPENA))
3863					epctrl |= DXEPCTL_EPENA;
3864				epctrl |= DXEPCTL_EPDIS;
3865				epctrl |= DXEPCTL_STALL;
3866				dwc2_writel(hsotg, epctrl, DOEPCTL(idx));
3867			}
3868		}
3869
3870		/* This interrupt bit is cleared in DXEPINT_EPDISBLD handler */
3871	}
3872
3873	if (gintsts & GINTSTS_GINNAKEFF) {
3874		dev_info(hsotg->dev, "GINNakEff triggered\n");
3875
3876		dwc2_set_bit(hsotg, DCTL, DCTL_CGNPINNAK);
3877
3878		dwc2_hsotg_dump(hsotg);
3879	}
3880
3881	if (gintsts & GINTSTS_INCOMPL_SOIN)
3882		dwc2_gadget_handle_incomplete_isoc_in(hsotg);
3883
3884	if (gintsts & GINTSTS_INCOMPL_SOOUT)
3885		dwc2_gadget_handle_incomplete_isoc_out(hsotg);
3886
3887	/*
3888	 * if we've had fifo events, we should try and go around the
3889	 * loop again to see if there's any point in returning yet.
3890	 */
3891
3892	if (gintsts & IRQ_RETRY_MASK && --retry_count > 0)
3893		goto irq_retry;
3894
3895	/* Check WKUP_ALERT interrupt*/
3896	if (hsotg->params.service_interval)
3897		dwc2_gadget_wkup_alert_handler(hsotg);
3898
3899	spin_unlock(&hsotg->lock);
3900
3901	return IRQ_HANDLED;
3902}
3903
3904static void dwc2_hsotg_ep_stop_xfr(struct dwc2_hsotg *hsotg,
3905				   struct dwc2_hsotg_ep *hs_ep)
3906{
3907	u32 epctrl_reg;
3908	u32 epint_reg;
3909
3910	epctrl_reg = hs_ep->dir_in ? DIEPCTL(hs_ep->index) :
3911		DOEPCTL(hs_ep->index);
3912	epint_reg = hs_ep->dir_in ? DIEPINT(hs_ep->index) :
3913		DOEPINT(hs_ep->index);
3914
3915	dev_dbg(hsotg->dev, "%s: stopping transfer on %s\n", __func__,
3916		hs_ep->name);
3917
3918	if (hs_ep->dir_in) {
3919		if (hsotg->dedicated_fifos || hs_ep->periodic) {
3920			dwc2_set_bit(hsotg, epctrl_reg, DXEPCTL_SNAK);
3921			/* Wait for Nak effect */
3922			if (dwc2_hsotg_wait_bit_set(hsotg, epint_reg,
3923						    DXEPINT_INEPNAKEFF, 100))
3924				dev_warn(hsotg->dev,
3925					 "%s: timeout DIEPINT.NAKEFF\n",
3926					 __func__);
3927		} else {
3928			dwc2_set_bit(hsotg, DCTL, DCTL_SGNPINNAK);
3929			/* Wait for Nak effect */
3930			if (dwc2_hsotg_wait_bit_set(hsotg, GINTSTS,
3931						    GINTSTS_GINNAKEFF, 100))
3932				dev_warn(hsotg->dev,
3933					 "%s: timeout GINTSTS.GINNAKEFF\n",
3934					 __func__);
3935		}
3936	} else {
3937		/* Mask GINTSTS_GOUTNAKEFF interrupt */
3938		dwc2_hsotg_disable_gsint(hsotg, GINTSTS_GOUTNAKEFF);
3939
3940		if (!(dwc2_readl(hsotg, GINTSTS) & GINTSTS_GOUTNAKEFF))
3941			dwc2_set_bit(hsotg, DCTL, DCTL_SGOUTNAK);
3942
3943		if (!using_dma(hsotg)) {
3944			/* Wait for GINTSTS_RXFLVL interrupt */
3945			if (dwc2_hsotg_wait_bit_set(hsotg, GINTSTS,
3946						    GINTSTS_RXFLVL, 100)) {
3947				dev_warn(hsotg->dev, "%s: timeout GINTSTS.RXFLVL\n",
3948					 __func__);
3949			} else {
3950				/*
3951				 * Pop GLOBAL OUT NAK status packet from RxFIFO
3952				 * to assert GOUTNAKEFF interrupt
3953				 */
3954				dwc2_readl(hsotg, GRXSTSP);
3955			}
3956		}
3957
3958		/* Wait for global nak to take effect */
3959		if (dwc2_hsotg_wait_bit_set(hsotg, GINTSTS,
3960					    GINTSTS_GOUTNAKEFF, 100))
3961			dev_warn(hsotg->dev, "%s: timeout GINTSTS.GOUTNAKEFF\n",
3962				 __func__);
3963	}
3964
3965	/* Disable ep */
3966	dwc2_set_bit(hsotg, epctrl_reg, DXEPCTL_EPDIS | DXEPCTL_SNAK);
3967
3968	/* Wait for ep to be disabled */
3969	if (dwc2_hsotg_wait_bit_set(hsotg, epint_reg, DXEPINT_EPDISBLD, 100))
3970		dev_warn(hsotg->dev,
3971			 "%s: timeout DOEPCTL.EPDisable\n", __func__);
3972
3973	/* Clear EPDISBLD interrupt */
3974	dwc2_set_bit(hsotg, epint_reg, DXEPINT_EPDISBLD);
3975
3976	if (hs_ep->dir_in) {
3977		unsigned short fifo_index;
3978
3979		if (hsotg->dedicated_fifos || hs_ep->periodic)
3980			fifo_index = hs_ep->fifo_index;
3981		else
3982			fifo_index = 0;
3983
3984		/* Flush TX FIFO */
3985		dwc2_flush_tx_fifo(hsotg, fifo_index);
3986
3987		/* Clear Global In NP NAK in Shared FIFO for non periodic ep */
3988		if (!hsotg->dedicated_fifos && !hs_ep->periodic)
3989			dwc2_set_bit(hsotg, DCTL, DCTL_CGNPINNAK);
3990
3991	} else {
3992		/* Remove global NAKs */
3993		dwc2_set_bit(hsotg, DCTL, DCTL_CGOUTNAK);
3994	}
3995}
3996
3997/**
3998 * dwc2_hsotg_ep_enable - enable the given endpoint
3999 * @ep: The USB endpint to configure
4000 * @desc: The USB endpoint descriptor to configure with.
4001 *
4002 * This is called from the USB gadget code's usb_ep_enable().
4003 */
4004static int dwc2_hsotg_ep_enable(struct usb_ep *ep,
4005				const struct usb_endpoint_descriptor *desc)
4006{
4007	struct dwc2_hsotg_ep *hs_ep = our_ep(ep);
4008	struct dwc2_hsotg *hsotg = hs_ep->parent;
4009	unsigned long flags;
4010	unsigned int index = hs_ep->index;
4011	u32 epctrl_reg;
4012	u32 epctrl;
4013	u32 mps;
4014	u32 mc;
4015	u32 mask;
4016	unsigned int dir_in;
4017	unsigned int i, val, size;
4018	int ret = 0;
4019	unsigned char ep_type;
4020	int desc_num;
4021
4022	dev_dbg(hsotg->dev,
4023		"%s: ep %s: a 0x%02x, attr 0x%02x, mps 0x%04x, intr %d\n",
4024		__func__, ep->name, desc->bEndpointAddress, desc->bmAttributes,
4025		desc->wMaxPacketSize, desc->bInterval);
4026
4027	/* not to be called for EP0 */
4028	if (index == 0) {
4029		dev_err(hsotg->dev, "%s: called for EP 0\n", __func__);
4030		return -EINVAL;
4031	}
4032
4033	dir_in = (desc->bEndpointAddress & USB_ENDPOINT_DIR_MASK) ? 1 : 0;
4034	if (dir_in != hs_ep->dir_in) {
4035		dev_err(hsotg->dev, "%s: direction mismatch!\n", __func__);
4036		return -EINVAL;
4037	}
4038
4039	ep_type = desc->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK;
4040	mps = usb_endpoint_maxp(desc);
4041	mc = usb_endpoint_maxp_mult(desc);
4042
4043	/* ISOC IN in DDMA supported bInterval up to 10 */
4044	if (using_desc_dma(hsotg) && ep_type == USB_ENDPOINT_XFER_ISOC &&
4045	    dir_in && desc->bInterval > 10) {
4046		dev_err(hsotg->dev,
4047			"%s: ISOC IN, DDMA: bInterval>10 not supported!\n", __func__);
4048		return -EINVAL;
4049	}
4050
4051	/* High bandwidth ISOC OUT in DDMA not supported */
4052	if (using_desc_dma(hsotg) && ep_type == USB_ENDPOINT_XFER_ISOC &&
4053	    !dir_in && mc > 1) {
4054		dev_err(hsotg->dev,
4055			"%s: ISOC OUT, DDMA: HB not supported!\n", __func__);
4056		return -EINVAL;
4057	}
4058
4059	/* note, we handle this here instead of dwc2_hsotg_set_ep_maxpacket */
4060
4061	epctrl_reg = dir_in ? DIEPCTL(index) : DOEPCTL(index);
4062	epctrl = dwc2_readl(hsotg, epctrl_reg);
4063
4064	dev_dbg(hsotg->dev, "%s: read DxEPCTL=0x%08x from 0x%08x\n",
4065		__func__, epctrl, epctrl_reg);
4066
4067	if (using_desc_dma(hsotg) && ep_type == USB_ENDPOINT_XFER_ISOC)
4068		desc_num = MAX_DMA_DESC_NUM_HS_ISOC;
4069	else
4070		desc_num = MAX_DMA_DESC_NUM_GENERIC;
4071
4072	/* Allocate DMA descriptor chain for non-ctrl endpoints */
4073	if (using_desc_dma(hsotg) && !hs_ep->desc_list) {
4074		hs_ep->desc_list = dmam_alloc_coherent(hsotg->dev,
4075			desc_num * sizeof(struct dwc2_dma_desc),
4076			&hs_ep->desc_list_dma, GFP_ATOMIC);
4077		if (!hs_ep->desc_list) {
4078			ret = -ENOMEM;
4079			goto error2;
4080		}
4081	}
4082
4083	spin_lock_irqsave(&hsotg->lock, flags);
4084
4085	epctrl &= ~(DXEPCTL_EPTYPE_MASK | DXEPCTL_MPS_MASK);
4086	epctrl |= DXEPCTL_MPS(mps);
4087
4088	/*
4089	 * mark the endpoint as active, otherwise the core may ignore
4090	 * transactions entirely for this endpoint
4091	 */
4092	epctrl |= DXEPCTL_USBACTEP;
4093
4094	/* update the endpoint state */
4095	dwc2_hsotg_set_ep_maxpacket(hsotg, hs_ep->index, mps, mc, dir_in);
4096
4097	/* default, set to non-periodic */
4098	hs_ep->isochronous = 0;
4099	hs_ep->periodic = 0;
4100	hs_ep->halted = 0;
4101	hs_ep->wedged = 0;
4102	hs_ep->interval = desc->bInterval;
4103
4104	switch (ep_type) {
4105	case USB_ENDPOINT_XFER_ISOC:
4106		epctrl |= DXEPCTL_EPTYPE_ISO;
4107		epctrl |= DXEPCTL_SETEVENFR;
4108		hs_ep->isochronous = 1;
4109		hs_ep->interval = 1 << (desc->bInterval - 1);
4110		hs_ep->target_frame = TARGET_FRAME_INITIAL;
4111		hs_ep->next_desc = 0;
4112		hs_ep->compl_desc = 0;
4113		if (dir_in) {
4114			hs_ep->periodic = 1;
4115			mask = dwc2_readl(hsotg, DIEPMSK);
4116			mask |= DIEPMSK_NAKMSK;
4117			dwc2_writel(hsotg, mask, DIEPMSK);
4118		} else {
4119			epctrl |= DXEPCTL_SNAK;
4120			mask = dwc2_readl(hsotg, DOEPMSK);
4121			mask |= DOEPMSK_OUTTKNEPDISMSK;
4122			dwc2_writel(hsotg, mask, DOEPMSK);
4123		}
4124		break;
4125
4126	case USB_ENDPOINT_XFER_BULK:
4127		epctrl |= DXEPCTL_EPTYPE_BULK;
4128		break;
4129
4130	case USB_ENDPOINT_XFER_INT:
4131		if (dir_in)
4132			hs_ep->periodic = 1;
4133
4134		if (hsotg->gadget.speed == USB_SPEED_HIGH)
4135			hs_ep->interval = 1 << (desc->bInterval - 1);
4136
4137		epctrl |= DXEPCTL_EPTYPE_INTERRUPT;
4138		break;
4139
4140	case USB_ENDPOINT_XFER_CONTROL:
4141		epctrl |= DXEPCTL_EPTYPE_CONTROL;
4142		break;
4143	}
4144
4145	/*
4146	 * if the hardware has dedicated fifos, we must give each IN EP
4147	 * a unique tx-fifo even if it is non-periodic.
4148	 */
4149	if (dir_in && hsotg->dedicated_fifos) {
4150		unsigned fifo_count = dwc2_hsotg_tx_fifo_count(hsotg);
4151		u32 fifo_index = 0;
4152		u32 fifo_size = UINT_MAX;
4153
4154		size = hs_ep->ep.maxpacket * hs_ep->mc;
4155		for (i = 1; i <= fifo_count; ++i) {
4156			if (hsotg->fifo_map & (1 << i))
4157				continue;
4158			val = dwc2_readl(hsotg, DPTXFSIZN(i));
4159			val = (val >> FIFOSIZE_DEPTH_SHIFT) * 4;
4160			if (val < size)
4161				continue;
4162			/* Search for smallest acceptable fifo */
4163			if (val < fifo_size) {
4164				fifo_size = val;
4165				fifo_index = i;
4166			}
4167		}
4168		if (!fifo_index) {
4169			dev_err(hsotg->dev,
4170				"%s: No suitable fifo found\n", __func__);
4171			ret = -ENOMEM;
4172			goto error1;
4173		}
4174		epctrl &= ~(DXEPCTL_TXFNUM_LIMIT << DXEPCTL_TXFNUM_SHIFT);
4175		hsotg->fifo_map |= 1 << fifo_index;
4176		epctrl |= DXEPCTL_TXFNUM(fifo_index);
4177		hs_ep->fifo_index = fifo_index;
4178		hs_ep->fifo_size = fifo_size;
4179	}
4180
4181	/* for non control endpoints, set PID to D0 */
4182	if (index && !hs_ep->isochronous)
4183		epctrl |= DXEPCTL_SETD0PID;
4184
4185	/* WA for Full speed ISOC IN in DDMA mode.
4186	 * By Clear NAK status of EP, core will send ZLP
4187	 * to IN token and assert NAK interrupt relying
4188	 * on TxFIFO status only
4189	 */
4190
4191	if (hsotg->gadget.speed == USB_SPEED_FULL &&
4192	    hs_ep->isochronous && dir_in) {
4193		/* The WA applies only to core versions from 2.72a
4194		 * to 4.00a (including both). Also for FS_IOT_1.00a
4195		 * and HS_IOT_1.00a.
4196		 */
4197		u32 gsnpsid = dwc2_readl(hsotg, GSNPSID);
4198
4199		if ((gsnpsid >= DWC2_CORE_REV_2_72a &&
4200		     gsnpsid <= DWC2_CORE_REV_4_00a) ||
4201		     gsnpsid == DWC2_FS_IOT_REV_1_00a ||
4202		     gsnpsid == DWC2_HS_IOT_REV_1_00a)
4203			epctrl |= DXEPCTL_CNAK;
4204	}
4205
4206	dev_dbg(hsotg->dev, "%s: write DxEPCTL=0x%08x\n",
4207		__func__, epctrl);
4208
4209	dwc2_writel(hsotg, epctrl, epctrl_reg);
4210	dev_dbg(hsotg->dev, "%s: read DxEPCTL=0x%08x\n",
4211		__func__, dwc2_readl(hsotg, epctrl_reg));
4212
4213	/* enable the endpoint interrupt */
4214	dwc2_hsotg_ctrl_epint(hsotg, index, dir_in, 1);
4215
4216error1:
4217	spin_unlock_irqrestore(&hsotg->lock, flags);
4218
4219error2:
4220	if (ret && using_desc_dma(hsotg) && hs_ep->desc_list) {
4221		dmam_free_coherent(hsotg->dev, desc_num *
4222			sizeof(struct dwc2_dma_desc),
4223			hs_ep->desc_list, hs_ep->desc_list_dma);
4224		hs_ep->desc_list = NULL;
4225	}
4226
4227	return ret;
4228}
4229
4230/**
4231 * dwc2_hsotg_ep_disable - disable given endpoint
4232 * @ep: The endpoint to disable.
4233 */
4234static int dwc2_hsotg_ep_disable(struct usb_ep *ep)
4235{
4236	struct dwc2_hsotg_ep *hs_ep = our_ep(ep);
4237	struct dwc2_hsotg *hsotg = hs_ep->parent;
4238	int dir_in = hs_ep->dir_in;
4239	int index = hs_ep->index;
4240	u32 epctrl_reg;
4241	u32 ctrl;
4242
4243	dev_dbg(hsotg->dev, "%s(ep %p)\n", __func__, ep);
4244
4245	if (ep == &hsotg->eps_out[0]->ep) {
4246		dev_err(hsotg->dev, "%s: called for ep0\n", __func__);
4247		return -EINVAL;
4248	}
4249
4250	if (hsotg->op_state != OTG_STATE_B_PERIPHERAL) {
4251		dev_err(hsotg->dev, "%s: called in host mode?\n", __func__);
4252		return -EINVAL;
4253	}
4254
4255	epctrl_reg = dir_in ? DIEPCTL(index) : DOEPCTL(index);
4256
4257	ctrl = dwc2_readl(hsotg, epctrl_reg);
4258
4259	if (ctrl & DXEPCTL_EPENA)
4260		dwc2_hsotg_ep_stop_xfr(hsotg, hs_ep);
4261
4262	ctrl &= ~DXEPCTL_EPENA;
4263	ctrl &= ~DXEPCTL_USBACTEP;
4264	ctrl |= DXEPCTL_SNAK;
4265
4266	dev_dbg(hsotg->dev, "%s: DxEPCTL=0x%08x\n", __func__, ctrl);
4267	dwc2_writel(hsotg, ctrl, epctrl_reg);
4268
4269	/* disable endpoint interrupts */
4270	dwc2_hsotg_ctrl_epint(hsotg, hs_ep->index, hs_ep->dir_in, 0);
4271
4272	/* terminate all requests with shutdown */
4273	kill_all_requests(hsotg, hs_ep, -ESHUTDOWN);
4274
4275	hsotg->fifo_map &= ~(1 << hs_ep->fifo_index);
4276	hs_ep->fifo_index = 0;
4277	hs_ep->fifo_size = 0;
4278
4279	return 0;
4280}
4281
4282static int dwc2_hsotg_ep_disable_lock(struct usb_ep *ep)
4283{
4284	struct dwc2_hsotg_ep *hs_ep = our_ep(ep);
4285	struct dwc2_hsotg *hsotg = hs_ep->parent;
4286	unsigned long flags;
4287	int ret;
4288
4289	spin_lock_irqsave(&hsotg->lock, flags);
4290	ret = dwc2_hsotg_ep_disable(ep);
4291	spin_unlock_irqrestore(&hsotg->lock, flags);
4292	return ret;
4293}
4294
4295/**
4296 * on_list - check request is on the given endpoint
4297 * @ep: The endpoint to check.
4298 * @test: The request to test if it is on the endpoint.
4299 */
4300static bool on_list(struct dwc2_hsotg_ep *ep, struct dwc2_hsotg_req *test)
4301{
4302	struct dwc2_hsotg_req *req, *treq;
4303
4304	list_for_each_entry_safe(req, treq, &ep->queue, queue) {
4305		if (req == test)
4306			return true;
4307	}
4308
4309	return false;
4310}
4311
4312/**
4313 * dwc2_hsotg_ep_dequeue - dequeue given endpoint
4314 * @ep: The endpoint to dequeue.
4315 * @req: The request to be removed from a queue.
4316 */
4317static int dwc2_hsotg_ep_dequeue(struct usb_ep *ep, struct usb_request *req)
4318{
4319	struct dwc2_hsotg_req *hs_req = our_req(req);
4320	struct dwc2_hsotg_ep *hs_ep = our_ep(ep);
4321	struct dwc2_hsotg *hs = hs_ep->parent;
4322	unsigned long flags;
4323
4324	dev_dbg(hs->dev, "ep_dequeue(%p,%p)\n", ep, req);
4325
4326	spin_lock_irqsave(&hs->lock, flags);
4327
4328	if (!on_list(hs_ep, hs_req)) {
4329		spin_unlock_irqrestore(&hs->lock, flags);
4330		return -EINVAL;
4331	}
4332
4333	/* Dequeue already started request */
4334	if (req == &hs_ep->req->req)
4335		dwc2_hsotg_ep_stop_xfr(hs, hs_ep);
4336
4337	dwc2_hsotg_complete_request(hs, hs_ep, hs_req, -ECONNRESET);
4338	spin_unlock_irqrestore(&hs->lock, flags);
4339
4340	return 0;
4341}
4342
4343/**
4344 * dwc2_gadget_ep_set_wedge - set wedge on a given endpoint
4345 * @ep: The endpoint to be wedged.
4346 *
4347 */
4348static int dwc2_gadget_ep_set_wedge(struct usb_ep *ep)
4349{
4350	struct dwc2_hsotg_ep *hs_ep = our_ep(ep);
4351	struct dwc2_hsotg *hs = hs_ep->parent;
4352
4353	unsigned long	flags;
4354	int		ret;
4355
4356	spin_lock_irqsave(&hs->lock, flags);
4357	hs_ep->wedged = 1;
4358	ret = dwc2_hsotg_ep_sethalt(ep, 1, false);
4359	spin_unlock_irqrestore(&hs->lock, flags);
4360
4361	return ret;
4362}
4363
4364/**
4365 * dwc2_hsotg_ep_sethalt - set halt on a given endpoint
4366 * @ep: The endpoint to set halt.
4367 * @value: Set or unset the halt.
4368 * @now: If true, stall the endpoint now. Otherwise return -EAGAIN if
4369 *       the endpoint is busy processing requests.
4370 *
4371 * We need to stall the endpoint immediately if request comes from set_feature
4372 * protocol command handler.
4373 */
4374static int dwc2_hsotg_ep_sethalt(struct usb_ep *ep, int value, bool now)
4375{
4376	struct dwc2_hsotg_ep *hs_ep = our_ep(ep);
4377	struct dwc2_hsotg *hs = hs_ep->parent;
4378	int index = hs_ep->index;
4379	u32 epreg;
4380	u32 epctl;
4381	u32 xfertype;
4382
4383	dev_info(hs->dev, "%s(ep %p %s, %d)\n", __func__, ep, ep->name, value);
4384
4385	if (index == 0) {
4386		if (value)
4387			dwc2_hsotg_stall_ep0(hs);
4388		else
4389			dev_warn(hs->dev,
4390				 "%s: can't clear halt on ep0\n", __func__);
4391		return 0;
4392	}
4393
4394	if (hs_ep->isochronous) {
4395		dev_err(hs->dev, "%s is Isochronous Endpoint\n", ep->name);
4396		return -EINVAL;
4397	}
4398
4399	if (!now && value && !list_empty(&hs_ep->queue)) {
4400		dev_dbg(hs->dev, "%s request is pending, cannot halt\n",
4401			ep->name);
4402		return -EAGAIN;
4403	}
4404
4405	if (hs_ep->dir_in) {
4406		epreg = DIEPCTL(index);
4407		epctl = dwc2_readl(hs, epreg);
4408
4409		if (value) {
4410			epctl |= DXEPCTL_STALL | DXEPCTL_SNAK;
4411			if (epctl & DXEPCTL_EPENA)
4412				epctl |= DXEPCTL_EPDIS;
4413		} else {
4414			epctl &= ~DXEPCTL_STALL;
4415			hs_ep->wedged = 0;
4416			xfertype = epctl & DXEPCTL_EPTYPE_MASK;
4417			if (xfertype == DXEPCTL_EPTYPE_BULK ||
4418			    xfertype == DXEPCTL_EPTYPE_INTERRUPT)
4419				epctl |= DXEPCTL_SETD0PID;
4420		}
4421		dwc2_writel(hs, epctl, epreg);
4422	} else {
4423		epreg = DOEPCTL(index);
4424		epctl = dwc2_readl(hs, epreg);
4425
4426		if (value) {
4427			/* Unmask GOUTNAKEFF interrupt */
4428			dwc2_hsotg_en_gsint(hs, GINTSTS_GOUTNAKEFF);
4429
4430			if (!(dwc2_readl(hs, GINTSTS) & GINTSTS_GOUTNAKEFF))
4431				dwc2_set_bit(hs, DCTL, DCTL_SGOUTNAK);
4432			// STALL bit will be set in GOUTNAKEFF interrupt handler
4433		} else {
4434			epctl &= ~DXEPCTL_STALL;
4435			hs_ep->wedged = 0;
4436			xfertype = epctl & DXEPCTL_EPTYPE_MASK;
4437			if (xfertype == DXEPCTL_EPTYPE_BULK ||
4438			    xfertype == DXEPCTL_EPTYPE_INTERRUPT)
4439				epctl |= DXEPCTL_SETD0PID;
4440			dwc2_writel(hs, epctl, epreg);
4441		}
4442	}
4443
4444	hs_ep->halted = value;
4445	return 0;
4446}
4447
4448/**
4449 * dwc2_hsotg_ep_sethalt_lock - set halt on a given endpoint with lock held
4450 * @ep: The endpoint to set halt.
4451 * @value: Set or unset the halt.
4452 */
4453static int dwc2_hsotg_ep_sethalt_lock(struct usb_ep *ep, int value)
4454{
4455	struct dwc2_hsotg_ep *hs_ep = our_ep(ep);
4456	struct dwc2_hsotg *hs = hs_ep->parent;
4457	unsigned long flags;
4458	int ret;
4459
4460	spin_lock_irqsave(&hs->lock, flags);
4461	ret = dwc2_hsotg_ep_sethalt(ep, value, false);
4462	spin_unlock_irqrestore(&hs->lock, flags);
4463
4464	return ret;
4465}
4466
4467static const struct usb_ep_ops dwc2_hsotg_ep_ops = {
4468	.enable		= dwc2_hsotg_ep_enable,
4469	.disable	= dwc2_hsotg_ep_disable_lock,
4470	.alloc_request	= dwc2_hsotg_ep_alloc_request,
4471	.free_request	= dwc2_hsotg_ep_free_request,
4472	.queue		= dwc2_hsotg_ep_queue_lock,
4473	.dequeue	= dwc2_hsotg_ep_dequeue,
4474	.set_halt	= dwc2_hsotg_ep_sethalt_lock,
4475	.set_wedge	= dwc2_gadget_ep_set_wedge,
4476	/* note, don't believe we have any call for the fifo routines */
4477};
4478
4479/**
4480 * dwc2_hsotg_init - initialize the usb core
4481 * @hsotg: The driver state
4482 */
4483static void dwc2_hsotg_init(struct dwc2_hsotg *hsotg)
4484{
4485	/* unmask subset of endpoint interrupts */
4486
4487	dwc2_writel(hsotg, DIEPMSK_TIMEOUTMSK | DIEPMSK_AHBERRMSK |
4488		    DIEPMSK_EPDISBLDMSK | DIEPMSK_XFERCOMPLMSK,
4489		    DIEPMSK);
4490
4491	dwc2_writel(hsotg, DOEPMSK_SETUPMSK | DOEPMSK_AHBERRMSK |
4492		    DOEPMSK_EPDISBLDMSK | DOEPMSK_XFERCOMPLMSK,
4493		    DOEPMSK);
4494
4495	dwc2_writel(hsotg, 0, DAINTMSK);
4496
4497	/* Be in disconnected state until gadget is registered */
4498	dwc2_set_bit(hsotg, DCTL, DCTL_SFTDISCON);
4499
4500	/* setup fifos */
4501
4502	dev_dbg(hsotg->dev, "GRXFSIZ=0x%08x, GNPTXFSIZ=0x%08x\n",
4503		dwc2_readl(hsotg, GRXFSIZ),
4504		dwc2_readl(hsotg, GNPTXFSIZ));
4505
4506	dwc2_hsotg_init_fifo(hsotg);
4507
4508	if (using_dma(hsotg))
4509		dwc2_set_bit(hsotg, GAHBCFG, GAHBCFG_DMA_EN);
4510}
4511
4512/**
4513 * dwc2_hsotg_udc_start - prepare the udc for work
4514 * @gadget: The usb gadget state
4515 * @driver: The usb gadget driver
4516 *
4517 * Perform initialization to prepare udc device and driver
4518 * to work.
4519 */
4520static int dwc2_hsotg_udc_start(struct usb_gadget *gadget,
4521				struct usb_gadget_driver *driver)
4522{
4523	struct dwc2_hsotg *hsotg = to_hsotg(gadget);
4524	unsigned long flags;
4525	int ret;
4526
4527	if (!hsotg) {
4528		pr_err("%s: called with no device\n", __func__);
4529		return -ENODEV;
4530	}
4531
4532	if (!driver) {
4533		dev_err(hsotg->dev, "%s: no driver\n", __func__);
4534		return -EINVAL;
4535	}
4536
4537	if (driver->max_speed < USB_SPEED_FULL)
4538		dev_err(hsotg->dev, "%s: bad speed\n", __func__);
4539
4540	if (!driver->setup) {
4541		dev_err(hsotg->dev, "%s: missing entry points\n", __func__);
4542		return -EINVAL;
4543	}
4544
4545	WARN_ON(hsotg->driver);
4546
 
4547	hsotg->driver = driver;
4548	hsotg->gadget.dev.of_node = hsotg->dev->of_node;
4549	hsotg->gadget.speed = USB_SPEED_UNKNOWN;
4550
4551	if (hsotg->dr_mode == USB_DR_MODE_PERIPHERAL) {
4552		ret = dwc2_lowlevel_hw_enable(hsotg);
4553		if (ret)
4554			goto err;
4555	}
4556
4557	if (!IS_ERR_OR_NULL(hsotg->uphy))
4558		otg_set_peripheral(hsotg->uphy->otg, &hsotg->gadget);
4559
4560	spin_lock_irqsave(&hsotg->lock, flags);
4561	if (dwc2_hw_is_device(hsotg)) {
4562		dwc2_hsotg_init(hsotg);
4563		dwc2_hsotg_core_init_disconnected(hsotg, false);
4564	}
4565
4566	hsotg->enabled = 0;
4567	spin_unlock_irqrestore(&hsotg->lock, flags);
4568
4569	gadget->sg_supported = using_desc_dma(hsotg);
4570	dev_info(hsotg->dev, "bound driver %s\n", driver->driver.name);
4571
4572	return 0;
4573
4574err:
4575	hsotg->driver = NULL;
4576	return ret;
4577}
4578
4579/**
4580 * dwc2_hsotg_udc_stop - stop the udc
4581 * @gadget: The usb gadget state
4582 *
4583 * Stop udc hw block and stay tunned for future transmissions
4584 */
4585static int dwc2_hsotg_udc_stop(struct usb_gadget *gadget)
4586{
4587	struct dwc2_hsotg *hsotg = to_hsotg(gadget);
4588	unsigned long flags;
4589	int ep;
4590
4591	if (!hsotg)
4592		return -ENODEV;
4593
4594	/* all endpoints should be shutdown */
4595	for (ep = 1; ep < hsotg->num_of_eps; ep++) {
4596		if (hsotg->eps_in[ep])
4597			dwc2_hsotg_ep_disable_lock(&hsotg->eps_in[ep]->ep);
4598		if (hsotg->eps_out[ep])
4599			dwc2_hsotg_ep_disable_lock(&hsotg->eps_out[ep]->ep);
4600	}
4601
4602	spin_lock_irqsave(&hsotg->lock, flags);
4603
4604	hsotg->driver = NULL;
4605	hsotg->gadget.speed = USB_SPEED_UNKNOWN;
4606	hsotg->enabled = 0;
4607
4608	spin_unlock_irqrestore(&hsotg->lock, flags);
4609
4610	if (!IS_ERR_OR_NULL(hsotg->uphy))
4611		otg_set_peripheral(hsotg->uphy->otg, NULL);
4612
4613	if (hsotg->dr_mode == USB_DR_MODE_PERIPHERAL)
4614		dwc2_lowlevel_hw_disable(hsotg);
4615
4616	return 0;
4617}
4618
4619/**
4620 * dwc2_hsotg_gadget_getframe - read the frame number
4621 * @gadget: The usb gadget state
4622 *
4623 * Read the {micro} frame number
4624 */
4625static int dwc2_hsotg_gadget_getframe(struct usb_gadget *gadget)
4626{
4627	return dwc2_hsotg_read_frameno(to_hsotg(gadget));
4628}
4629
4630/**
4631 * dwc2_hsotg_set_selfpowered - set if device is self/bus powered
4632 * @gadget: The usb gadget state
4633 * @is_selfpowered: Whether the device is self-powered
4634 *
4635 * Set if the device is self or bus powered.
4636 */
4637static int dwc2_hsotg_set_selfpowered(struct usb_gadget *gadget,
4638				      int is_selfpowered)
4639{
4640	struct dwc2_hsotg *hsotg = to_hsotg(gadget);
4641	unsigned long flags;
4642
4643	spin_lock_irqsave(&hsotg->lock, flags);
4644	gadget->is_selfpowered = !!is_selfpowered;
4645	spin_unlock_irqrestore(&hsotg->lock, flags);
4646
4647	return 0;
4648}
4649
4650/**
4651 * dwc2_hsotg_pullup - connect/disconnect the USB PHY
4652 * @gadget: The usb gadget state
4653 * @is_on: Current state of the USB PHY
4654 *
4655 * Connect/Disconnect the USB PHY pullup
4656 */
4657static int dwc2_hsotg_pullup(struct usb_gadget *gadget, int is_on)
4658{
4659	struct dwc2_hsotg *hsotg = to_hsotg(gadget);
4660	unsigned long flags;
4661
4662	dev_dbg(hsotg->dev, "%s: is_on: %d op_state: %d\n", __func__, is_on,
4663		hsotg->op_state);
4664
4665	/* Don't modify pullup state while in host mode */
4666	if (hsotg->op_state != OTG_STATE_B_PERIPHERAL) {
4667		hsotg->enabled = is_on;
4668		return 0;
4669	}
4670
4671	spin_lock_irqsave(&hsotg->lock, flags);
4672	if (is_on) {
4673		hsotg->enabled = 1;
4674		dwc2_hsotg_core_init_disconnected(hsotg, false);
4675		/* Enable ACG feature in device mode,if supported */
4676		dwc2_enable_acg(hsotg);
4677		dwc2_hsotg_core_connect(hsotg);
4678	} else {
4679		dwc2_hsotg_core_disconnect(hsotg);
4680		dwc2_hsotg_disconnect(hsotg);
4681		hsotg->enabled = 0;
4682	}
4683
4684	hsotg->gadget.speed = USB_SPEED_UNKNOWN;
4685	spin_unlock_irqrestore(&hsotg->lock, flags);
4686
4687	return 0;
4688}
4689
4690static int dwc2_hsotg_vbus_session(struct usb_gadget *gadget, int is_active)
4691{
4692	struct dwc2_hsotg *hsotg = to_hsotg(gadget);
4693	unsigned long flags;
4694
4695	dev_dbg(hsotg->dev, "%s: is_active: %d\n", __func__, is_active);
4696	spin_lock_irqsave(&hsotg->lock, flags);
4697
4698	/*
4699	 * If controller is in partial power down state, it must exit from
4700	 * that state before being initialized / de-initialized
4701	 */
4702	if (hsotg->lx_state == DWC2_L2 && hsotg->in_ppd)
4703		/*
4704		 * No need to check the return value as
4705		 * registers are not being restored.
4706		 */
4707		dwc2_exit_partial_power_down(hsotg, 0, false);
4708
4709	if (is_active) {
4710		hsotg->op_state = OTG_STATE_B_PERIPHERAL;
4711
4712		dwc2_hsotg_core_init_disconnected(hsotg, false);
4713		if (hsotg->enabled) {
4714			/* Enable ACG feature in device mode,if supported */
4715			dwc2_enable_acg(hsotg);
4716			dwc2_hsotg_core_connect(hsotg);
4717		}
4718	} else {
4719		dwc2_hsotg_core_disconnect(hsotg);
4720		dwc2_hsotg_disconnect(hsotg);
4721	}
4722
4723	spin_unlock_irqrestore(&hsotg->lock, flags);
4724	return 0;
4725}
4726
4727/**
4728 * dwc2_hsotg_vbus_draw - report bMaxPower field
4729 * @gadget: The usb gadget state
4730 * @mA: Amount of current
4731 *
4732 * Report how much power the device may consume to the phy.
4733 */
4734static int dwc2_hsotg_vbus_draw(struct usb_gadget *gadget, unsigned int mA)
4735{
4736	struct dwc2_hsotg *hsotg = to_hsotg(gadget);
4737
4738	if (IS_ERR_OR_NULL(hsotg->uphy))
4739		return -ENOTSUPP;
4740	return usb_phy_set_power(hsotg->uphy, mA);
4741}
4742
4743static void dwc2_gadget_set_speed(struct usb_gadget *g, enum usb_device_speed speed)
4744{
4745	struct dwc2_hsotg *hsotg = to_hsotg(g);
4746	unsigned long		flags;
4747
4748	spin_lock_irqsave(&hsotg->lock, flags);
4749	switch (speed) {
4750	case USB_SPEED_HIGH:
4751		hsotg->params.speed = DWC2_SPEED_PARAM_HIGH;
4752		break;
4753	case USB_SPEED_FULL:
4754		hsotg->params.speed = DWC2_SPEED_PARAM_FULL;
4755		break;
4756	case USB_SPEED_LOW:
4757		hsotg->params.speed = DWC2_SPEED_PARAM_LOW;
4758		break;
4759	default:
4760		dev_err(hsotg->dev, "invalid speed (%d)\n", speed);
4761	}
4762	spin_unlock_irqrestore(&hsotg->lock, flags);
4763}
4764
4765static const struct usb_gadget_ops dwc2_hsotg_gadget_ops = {
4766	.get_frame	= dwc2_hsotg_gadget_getframe,
4767	.set_selfpowered	= dwc2_hsotg_set_selfpowered,
4768	.udc_start		= dwc2_hsotg_udc_start,
4769	.udc_stop		= dwc2_hsotg_udc_stop,
4770	.pullup                 = dwc2_hsotg_pullup,
4771	.udc_set_speed		= dwc2_gadget_set_speed,
4772	.vbus_session		= dwc2_hsotg_vbus_session,
4773	.vbus_draw		= dwc2_hsotg_vbus_draw,
4774};
4775
4776/**
4777 * dwc2_hsotg_initep - initialise a single endpoint
4778 * @hsotg: The device state.
4779 * @hs_ep: The endpoint to be initialised.
4780 * @epnum: The endpoint number
4781 * @dir_in: True if direction is in.
4782 *
4783 * Initialise the given endpoint (as part of the probe and device state
4784 * creation) to give to the gadget driver. Setup the endpoint name, any
4785 * direction information and other state that may be required.
4786 */
4787static void dwc2_hsotg_initep(struct dwc2_hsotg *hsotg,
4788			      struct dwc2_hsotg_ep *hs_ep,
4789				       int epnum,
4790				       bool dir_in)
4791{
4792	char *dir;
4793
4794	if (epnum == 0)
4795		dir = "";
4796	else if (dir_in)
4797		dir = "in";
4798	else
4799		dir = "out";
4800
4801	hs_ep->dir_in = dir_in;
4802	hs_ep->index = epnum;
4803
4804	snprintf(hs_ep->name, sizeof(hs_ep->name), "ep%d%s", epnum, dir);
4805
4806	INIT_LIST_HEAD(&hs_ep->queue);
4807	INIT_LIST_HEAD(&hs_ep->ep.ep_list);
4808
4809	/* add to the list of endpoints known by the gadget driver */
4810	if (epnum)
4811		list_add_tail(&hs_ep->ep.ep_list, &hsotg->gadget.ep_list);
4812
4813	hs_ep->parent = hsotg;
4814	hs_ep->ep.name = hs_ep->name;
4815
4816	if (hsotg->params.speed == DWC2_SPEED_PARAM_LOW)
4817		usb_ep_set_maxpacket_limit(&hs_ep->ep, 8);
4818	else
4819		usb_ep_set_maxpacket_limit(&hs_ep->ep,
4820					   epnum ? 1024 : EP0_MPS_LIMIT);
4821	hs_ep->ep.ops = &dwc2_hsotg_ep_ops;
4822
4823	if (epnum == 0) {
4824		hs_ep->ep.caps.type_control = true;
4825	} else {
4826		if (hsotg->params.speed != DWC2_SPEED_PARAM_LOW) {
4827			hs_ep->ep.caps.type_iso = true;
4828			hs_ep->ep.caps.type_bulk = true;
4829		}
4830		hs_ep->ep.caps.type_int = true;
4831	}
4832
4833	if (dir_in)
4834		hs_ep->ep.caps.dir_in = true;
4835	else
4836		hs_ep->ep.caps.dir_out = true;
4837
4838	/*
4839	 * if we're using dma, we need to set the next-endpoint pointer
4840	 * to be something valid.
4841	 */
4842
4843	if (using_dma(hsotg)) {
4844		u32 next = DXEPCTL_NEXTEP((epnum + 1) % 15);
4845
4846		if (dir_in)
4847			dwc2_writel(hsotg, next, DIEPCTL(epnum));
4848		else
4849			dwc2_writel(hsotg, next, DOEPCTL(epnum));
4850	}
4851}
4852
4853/**
4854 * dwc2_hsotg_hw_cfg - read HW configuration registers
4855 * @hsotg: Programming view of the DWC_otg controller
4856 *
4857 * Read the USB core HW configuration registers
4858 */
4859static int dwc2_hsotg_hw_cfg(struct dwc2_hsotg *hsotg)
4860{
4861	u32 cfg;
4862	u32 ep_type;
4863	u32 i;
4864
4865	/* check hardware configuration */
4866
4867	hsotg->num_of_eps = hsotg->hw_params.num_dev_ep;
4868
4869	/* Add ep0 */
4870	hsotg->num_of_eps++;
4871
4872	hsotg->eps_in[0] = devm_kzalloc(hsotg->dev,
4873					sizeof(struct dwc2_hsotg_ep),
4874					GFP_KERNEL);
4875	if (!hsotg->eps_in[0])
4876		return -ENOMEM;
4877	/* Same dwc2_hsotg_ep is used in both directions for ep0 */
4878	hsotg->eps_out[0] = hsotg->eps_in[0];
4879
4880	cfg = hsotg->hw_params.dev_ep_dirs;
4881	for (i = 1, cfg >>= 2; i < hsotg->num_of_eps; i++, cfg >>= 2) {
4882		ep_type = cfg & 3;
4883		/* Direction in or both */
4884		if (!(ep_type & 2)) {
4885			hsotg->eps_in[i] = devm_kzalloc(hsotg->dev,
4886				sizeof(struct dwc2_hsotg_ep), GFP_KERNEL);
4887			if (!hsotg->eps_in[i])
4888				return -ENOMEM;
4889		}
4890		/* Direction out or both */
4891		if (!(ep_type & 1)) {
4892			hsotg->eps_out[i] = devm_kzalloc(hsotg->dev,
4893				sizeof(struct dwc2_hsotg_ep), GFP_KERNEL);
4894			if (!hsotg->eps_out[i])
4895				return -ENOMEM;
4896		}
4897	}
4898
4899	hsotg->fifo_mem = hsotg->hw_params.total_fifo_size;
4900	hsotg->dedicated_fifos = hsotg->hw_params.en_multiple_tx_fifo;
4901
4902	dev_info(hsotg->dev, "EPs: %d, %s fifos, %d entries in SPRAM\n",
4903		 hsotg->num_of_eps,
4904		 hsotg->dedicated_fifos ? "dedicated" : "shared",
4905		 hsotg->fifo_mem);
4906	return 0;
4907}
4908
4909/**
4910 * dwc2_hsotg_dump - dump state of the udc
4911 * @hsotg: Programming view of the DWC_otg controller
4912 *
4913 */
4914static void dwc2_hsotg_dump(struct dwc2_hsotg *hsotg)
4915{
4916#ifdef DEBUG
4917	struct device *dev = hsotg->dev;
4918	u32 val;
4919	int idx;
4920
4921	dev_info(dev, "DCFG=0x%08x, DCTL=0x%08x, DIEPMSK=%08x\n",
4922		 dwc2_readl(hsotg, DCFG), dwc2_readl(hsotg, DCTL),
4923		 dwc2_readl(hsotg, DIEPMSK));
4924
4925	dev_info(dev, "GAHBCFG=0x%08x, GHWCFG1=0x%08x\n",
4926		 dwc2_readl(hsotg, GAHBCFG), dwc2_readl(hsotg, GHWCFG1));
4927
4928	dev_info(dev, "GRXFSIZ=0x%08x, GNPTXFSIZ=0x%08x\n",
4929		 dwc2_readl(hsotg, GRXFSIZ), dwc2_readl(hsotg, GNPTXFSIZ));
4930
4931	/* show periodic fifo settings */
4932
4933	for (idx = 1; idx < hsotg->num_of_eps; idx++) {
4934		val = dwc2_readl(hsotg, DPTXFSIZN(idx));
4935		dev_info(dev, "DPTx[%d] FSize=%d, StAddr=0x%08x\n", idx,
4936			 val >> FIFOSIZE_DEPTH_SHIFT,
4937			 val & FIFOSIZE_STARTADDR_MASK);
4938	}
4939
4940	for (idx = 0; idx < hsotg->num_of_eps; idx++) {
4941		dev_info(dev,
4942			 "ep%d-in: EPCTL=0x%08x, SIZ=0x%08x, DMA=0x%08x\n", idx,
4943			 dwc2_readl(hsotg, DIEPCTL(idx)),
4944			 dwc2_readl(hsotg, DIEPTSIZ(idx)),
4945			 dwc2_readl(hsotg, DIEPDMA(idx)));
4946
4947		val = dwc2_readl(hsotg, DOEPCTL(idx));
4948		dev_info(dev,
4949			 "ep%d-out: EPCTL=0x%08x, SIZ=0x%08x, DMA=0x%08x\n",
4950			 idx, dwc2_readl(hsotg, DOEPCTL(idx)),
4951			 dwc2_readl(hsotg, DOEPTSIZ(idx)),
4952			 dwc2_readl(hsotg, DOEPDMA(idx)));
4953	}
4954
4955	dev_info(dev, "DVBUSDIS=0x%08x, DVBUSPULSE=%08x\n",
4956		 dwc2_readl(hsotg, DVBUSDIS), dwc2_readl(hsotg, DVBUSPULSE));
4957#endif
4958}
4959
4960/**
4961 * dwc2_gadget_init - init function for gadget
4962 * @hsotg: Programming view of the DWC_otg controller
4963 *
4964 */
4965int dwc2_gadget_init(struct dwc2_hsotg *hsotg)
4966{
4967	struct device *dev = hsotg->dev;
4968	int epnum;
4969	int ret;
4970
4971	/* Dump fifo information */
4972	dev_dbg(dev, "NonPeriodic TXFIFO size: %d\n",
4973		hsotg->params.g_np_tx_fifo_size);
4974	dev_dbg(dev, "RXFIFO size: %d\n", hsotg->params.g_rx_fifo_size);
4975
4976	switch (hsotg->params.speed) {
4977	case DWC2_SPEED_PARAM_LOW:
4978		hsotg->gadget.max_speed = USB_SPEED_LOW;
4979		break;
4980	case DWC2_SPEED_PARAM_FULL:
4981		hsotg->gadget.max_speed = USB_SPEED_FULL;
4982		break;
4983	default:
4984		hsotg->gadget.max_speed = USB_SPEED_HIGH;
4985		break;
4986	}
4987
4988	hsotg->gadget.ops = &dwc2_hsotg_gadget_ops;
4989	hsotg->gadget.name = dev_name(dev);
4990	hsotg->gadget.otg_caps = &hsotg->params.otg_caps;
4991	hsotg->remote_wakeup_allowed = 0;
4992
4993	if (hsotg->params.lpm)
4994		hsotg->gadget.lpm_capable = true;
4995
4996	if (hsotg->dr_mode == USB_DR_MODE_OTG)
4997		hsotg->gadget.is_otg = 1;
4998	else if (hsotg->dr_mode == USB_DR_MODE_PERIPHERAL)
4999		hsotg->op_state = OTG_STATE_B_PERIPHERAL;
5000
5001	ret = dwc2_hsotg_hw_cfg(hsotg);
5002	if (ret) {
5003		dev_err(hsotg->dev, "Hardware configuration failed: %d\n", ret);
5004		return ret;
5005	}
5006
5007	hsotg->ctrl_buff = devm_kzalloc(hsotg->dev,
5008			DWC2_CTRL_BUFF_SIZE, GFP_KERNEL);
5009	if (!hsotg->ctrl_buff)
5010		return -ENOMEM;
5011
5012	hsotg->ep0_buff = devm_kzalloc(hsotg->dev,
5013			DWC2_CTRL_BUFF_SIZE, GFP_KERNEL);
5014	if (!hsotg->ep0_buff)
5015		return -ENOMEM;
5016
5017	if (using_desc_dma(hsotg)) {
5018		ret = dwc2_gadget_alloc_ctrl_desc_chains(hsotg);
5019		if (ret < 0)
5020			return ret;
5021	}
5022
5023	ret = devm_request_irq(hsotg->dev, hsotg->irq, dwc2_hsotg_irq,
5024			       IRQF_SHARED, dev_name(hsotg->dev), hsotg);
5025	if (ret < 0) {
5026		dev_err(dev, "cannot claim IRQ for gadget\n");
5027		return ret;
5028	}
5029
5030	/* hsotg->num_of_eps holds number of EPs other than ep0 */
5031
5032	if (hsotg->num_of_eps == 0) {
5033		dev_err(dev, "wrong number of EPs (zero)\n");
5034		return -EINVAL;
5035	}
5036
5037	/* setup endpoint information */
5038
5039	INIT_LIST_HEAD(&hsotg->gadget.ep_list);
5040	hsotg->gadget.ep0 = &hsotg->eps_out[0]->ep;
5041
5042	/* allocate EP0 request */
5043
5044	hsotg->ctrl_req = dwc2_hsotg_ep_alloc_request(&hsotg->eps_out[0]->ep,
5045						     GFP_KERNEL);
5046	if (!hsotg->ctrl_req) {
5047		dev_err(dev, "failed to allocate ctrl req\n");
5048		return -ENOMEM;
5049	}
5050
5051	/* initialise the endpoints now the core has been initialised */
5052	for (epnum = 0; epnum < hsotg->num_of_eps; epnum++) {
5053		if (hsotg->eps_in[epnum])
5054			dwc2_hsotg_initep(hsotg, hsotg->eps_in[epnum],
5055					  epnum, 1);
5056		if (hsotg->eps_out[epnum])
5057			dwc2_hsotg_initep(hsotg, hsotg->eps_out[epnum],
5058					  epnum, 0);
5059	}
5060
5061	dwc2_hsotg_dump(hsotg);
5062
5063	return 0;
5064}
5065
5066/**
5067 * dwc2_hsotg_remove - remove function for hsotg driver
5068 * @hsotg: Programming view of the DWC_otg controller
5069 *
5070 */
5071int dwc2_hsotg_remove(struct dwc2_hsotg *hsotg)
5072{
5073	usb_del_gadget_udc(&hsotg->gadget);
5074	dwc2_hsotg_ep_free_request(&hsotg->eps_out[0]->ep, hsotg->ctrl_req);
5075
5076	return 0;
5077}
5078
5079int dwc2_hsotg_suspend(struct dwc2_hsotg *hsotg)
5080{
5081	unsigned long flags;
5082
5083	if (hsotg->lx_state != DWC2_L0)
5084		return 0;
5085
5086	if (hsotg->driver) {
5087		int ep;
5088
5089		dev_info(hsotg->dev, "suspending usb gadget %s\n",
5090			 hsotg->driver->driver.name);
5091
5092		spin_lock_irqsave(&hsotg->lock, flags);
5093		if (hsotg->enabled)
5094			dwc2_hsotg_core_disconnect(hsotg);
5095		dwc2_hsotg_disconnect(hsotg);
5096		hsotg->gadget.speed = USB_SPEED_UNKNOWN;
5097		spin_unlock_irqrestore(&hsotg->lock, flags);
5098
5099		for (ep = 1; ep < hsotg->num_of_eps; ep++) {
5100			if (hsotg->eps_in[ep])
5101				dwc2_hsotg_ep_disable_lock(&hsotg->eps_in[ep]->ep);
5102			if (hsotg->eps_out[ep])
5103				dwc2_hsotg_ep_disable_lock(&hsotg->eps_out[ep]->ep);
5104		}
5105	}
5106
5107	return 0;
5108}
5109
5110int dwc2_hsotg_resume(struct dwc2_hsotg *hsotg)
5111{
5112	unsigned long flags;
5113
5114	if (hsotg->lx_state == DWC2_L2)
5115		return 0;
5116
5117	if (hsotg->driver) {
5118		dev_info(hsotg->dev, "resuming usb gadget %s\n",
5119			 hsotg->driver->driver.name);
5120
5121		spin_lock_irqsave(&hsotg->lock, flags);
5122		dwc2_hsotg_core_init_disconnected(hsotg, false);
5123		if (hsotg->enabled) {
5124			/* Enable ACG feature in device mode,if supported */
5125			dwc2_enable_acg(hsotg);
5126			dwc2_hsotg_core_connect(hsotg);
5127		}
5128		spin_unlock_irqrestore(&hsotg->lock, flags);
5129	}
5130
5131	return 0;
5132}
5133
5134/**
5135 * dwc2_backup_device_registers() - Backup controller device registers.
5136 * When suspending usb bus, registers needs to be backuped
5137 * if controller power is disabled once suspended.
5138 *
5139 * @hsotg: Programming view of the DWC_otg controller
5140 */
5141int dwc2_backup_device_registers(struct dwc2_hsotg *hsotg)
5142{
5143	struct dwc2_dregs_backup *dr;
5144	int i;
5145
5146	dev_dbg(hsotg->dev, "%s\n", __func__);
5147
5148	/* Backup dev regs */
5149	dr = &hsotg->dr_backup;
5150
5151	dr->dcfg = dwc2_readl(hsotg, DCFG);
5152	dr->dctl = dwc2_readl(hsotg, DCTL);
5153	dr->daintmsk = dwc2_readl(hsotg, DAINTMSK);
5154	dr->diepmsk = dwc2_readl(hsotg, DIEPMSK);
5155	dr->doepmsk = dwc2_readl(hsotg, DOEPMSK);
5156
5157	for (i = 0; i < hsotg->num_of_eps; i++) {
5158		/* Backup IN EPs */
5159		dr->diepctl[i] = dwc2_readl(hsotg, DIEPCTL(i));
5160
5161		/* Ensure DATA PID is correctly configured */
5162		if (dr->diepctl[i] & DXEPCTL_DPID)
5163			dr->diepctl[i] |= DXEPCTL_SETD1PID;
5164		else
5165			dr->diepctl[i] |= DXEPCTL_SETD0PID;
5166
5167		dr->dieptsiz[i] = dwc2_readl(hsotg, DIEPTSIZ(i));
5168		dr->diepdma[i] = dwc2_readl(hsotg, DIEPDMA(i));
5169
5170		/* Backup OUT EPs */
5171		dr->doepctl[i] = dwc2_readl(hsotg, DOEPCTL(i));
5172
5173		/* Ensure DATA PID is correctly configured */
5174		if (dr->doepctl[i] & DXEPCTL_DPID)
5175			dr->doepctl[i] |= DXEPCTL_SETD1PID;
5176		else
5177			dr->doepctl[i] |= DXEPCTL_SETD0PID;
5178
5179		dr->doeptsiz[i] = dwc2_readl(hsotg, DOEPTSIZ(i));
5180		dr->doepdma[i] = dwc2_readl(hsotg, DOEPDMA(i));
5181		dr->dtxfsiz[i] = dwc2_readl(hsotg, DPTXFSIZN(i));
5182	}
5183	dr->valid = true;
5184	return 0;
5185}
5186
5187/**
5188 * dwc2_restore_device_registers() - Restore controller device registers.
5189 * When resuming usb bus, device registers needs to be restored
5190 * if controller power were disabled.
5191 *
5192 * @hsotg: Programming view of the DWC_otg controller
5193 * @remote_wakeup: Indicates whether resume is initiated by Device or Host.
5194 *
5195 * Return: 0 if successful, negative error code otherwise
5196 */
5197int dwc2_restore_device_registers(struct dwc2_hsotg *hsotg, int remote_wakeup)
5198{
5199	struct dwc2_dregs_backup *dr;
5200	int i;
5201
5202	dev_dbg(hsotg->dev, "%s\n", __func__);
5203
5204	/* Restore dev regs */
5205	dr = &hsotg->dr_backup;
5206	if (!dr->valid) {
5207		dev_err(hsotg->dev, "%s: no device registers to restore\n",
5208			__func__);
5209		return -EINVAL;
5210	}
5211	dr->valid = false;
5212
5213	if (!remote_wakeup)
5214		dwc2_writel(hsotg, dr->dctl, DCTL);
5215
5216	dwc2_writel(hsotg, dr->daintmsk, DAINTMSK);
5217	dwc2_writel(hsotg, dr->diepmsk, DIEPMSK);
5218	dwc2_writel(hsotg, dr->doepmsk, DOEPMSK);
5219
5220	for (i = 0; i < hsotg->num_of_eps; i++) {
5221		/* Restore IN EPs */
5222		dwc2_writel(hsotg, dr->dieptsiz[i], DIEPTSIZ(i));
5223		dwc2_writel(hsotg, dr->diepdma[i], DIEPDMA(i));
5224		dwc2_writel(hsotg, dr->doeptsiz[i], DOEPTSIZ(i));
5225		/** WA for enabled EPx's IN in DDMA mode. On entering to
5226		 * hibernation wrong value read and saved from DIEPDMAx,
5227		 * as result BNA interrupt asserted on hibernation exit
5228		 * by restoring from saved area.
5229		 */
5230		if (using_desc_dma(hsotg) &&
5231		    (dr->diepctl[i] & DXEPCTL_EPENA))
5232			dr->diepdma[i] = hsotg->eps_in[i]->desc_list_dma;
5233		dwc2_writel(hsotg, dr->dtxfsiz[i], DPTXFSIZN(i));
5234		dwc2_writel(hsotg, dr->diepctl[i], DIEPCTL(i));
5235		/* Restore OUT EPs */
5236		dwc2_writel(hsotg, dr->doeptsiz[i], DOEPTSIZ(i));
5237		/* WA for enabled EPx's OUT in DDMA mode. On entering to
5238		 * hibernation wrong value read and saved from DOEPDMAx,
5239		 * as result BNA interrupt asserted on hibernation exit
5240		 * by restoring from saved area.
5241		 */
5242		if (using_desc_dma(hsotg) &&
5243		    (dr->doepctl[i] & DXEPCTL_EPENA))
5244			dr->doepdma[i] = hsotg->eps_out[i]->desc_list_dma;
5245		dwc2_writel(hsotg, dr->doepdma[i], DOEPDMA(i));
5246		dwc2_writel(hsotg, dr->doepctl[i], DOEPCTL(i));
5247	}
5248
5249	return 0;
5250}
5251
5252/**
5253 * dwc2_gadget_init_lpm - Configure the core to support LPM in device mode
5254 *
5255 * @hsotg: Programming view of DWC_otg controller
5256 *
5257 */
5258void dwc2_gadget_init_lpm(struct dwc2_hsotg *hsotg)
5259{
5260	u32 val;
5261
5262	if (!hsotg->params.lpm)
5263		return;
5264
5265	val = GLPMCFG_LPMCAP | GLPMCFG_APPL1RES;
5266	val |= hsotg->params.hird_threshold_en ? GLPMCFG_HIRD_THRES_EN : 0;
5267	val |= hsotg->params.lpm_clock_gating ? GLPMCFG_ENBLSLPM : 0;
5268	val |= hsotg->params.hird_threshold << GLPMCFG_HIRD_THRES_SHIFT;
5269	val |= hsotg->params.besl ? GLPMCFG_ENBESL : 0;
5270	val |= GLPMCFG_LPM_REJECT_CTRL_CONTROL;
5271	val |= GLPMCFG_LPM_ACCEPT_CTRL_ISOC;
5272	dwc2_writel(hsotg, val, GLPMCFG);
5273	dev_dbg(hsotg->dev, "GLPMCFG=0x%08x\n", dwc2_readl(hsotg, GLPMCFG));
5274
5275	/* Unmask WKUP_ALERT Interrupt */
5276	if (hsotg->params.service_interval)
5277		dwc2_set_bit(hsotg, GINTMSK2, GINTMSK2_WKUP_ALERT_INT_MSK);
5278}
5279
5280/**
5281 * dwc2_gadget_program_ref_clk - Program GREFCLK register in device mode
5282 *
5283 * @hsotg: Programming view of DWC_otg controller
5284 *
5285 */
5286void dwc2_gadget_program_ref_clk(struct dwc2_hsotg *hsotg)
5287{
5288	u32 val = 0;
5289
5290	val |= GREFCLK_REF_CLK_MODE;
5291	val |= hsotg->params.ref_clk_per << GREFCLK_REFCLKPER_SHIFT;
5292	val |= hsotg->params.sof_cnt_wkup_alert <<
5293	       GREFCLK_SOF_CNT_WKUP_ALERT_SHIFT;
5294
5295	dwc2_writel(hsotg, val, GREFCLK);
5296	dev_dbg(hsotg->dev, "GREFCLK=0x%08x\n", dwc2_readl(hsotg, GREFCLK));
5297}
5298
5299/**
5300 * dwc2_gadget_enter_hibernation() - Put controller in Hibernation.
5301 *
5302 * @hsotg: Programming view of the DWC_otg controller
5303 *
5304 * Return non-zero if failed to enter to hibernation.
5305 */
5306int dwc2_gadget_enter_hibernation(struct dwc2_hsotg *hsotg)
5307{
5308	u32 gpwrdn;
5309	int ret = 0;
5310
5311	/* Change to L2(suspend) state */
5312	hsotg->lx_state = DWC2_L2;
5313	dev_dbg(hsotg->dev, "Start of hibernation completed\n");
5314	ret = dwc2_backup_global_registers(hsotg);
5315	if (ret) {
5316		dev_err(hsotg->dev, "%s: failed to backup global registers\n",
5317			__func__);
5318		return ret;
5319	}
5320	ret = dwc2_backup_device_registers(hsotg);
5321	if (ret) {
5322		dev_err(hsotg->dev, "%s: failed to backup device registers\n",
5323			__func__);
5324		return ret;
5325	}
5326
5327	gpwrdn = GPWRDN_PWRDNRSTN;
5328	gpwrdn |= GPWRDN_PMUACTV;
5329	dwc2_writel(hsotg, gpwrdn, GPWRDN);
5330	udelay(10);
5331
5332	/* Set flag to indicate that we are in hibernation */
5333	hsotg->hibernated = 1;
5334
5335	/* Enable interrupts from wake up logic */
5336	gpwrdn = dwc2_readl(hsotg, GPWRDN);
5337	gpwrdn |= GPWRDN_PMUINTSEL;
5338	dwc2_writel(hsotg, gpwrdn, GPWRDN);
5339	udelay(10);
5340
5341	/* Unmask device mode interrupts in GPWRDN */
5342	gpwrdn = dwc2_readl(hsotg, GPWRDN);
5343	gpwrdn |= GPWRDN_RST_DET_MSK;
5344	gpwrdn |= GPWRDN_LNSTSCHG_MSK;
5345	gpwrdn |= GPWRDN_STS_CHGINT_MSK;
5346	dwc2_writel(hsotg, gpwrdn, GPWRDN);
5347	udelay(10);
5348
5349	/* Enable Power Down Clamp */
5350	gpwrdn = dwc2_readl(hsotg, GPWRDN);
5351	gpwrdn |= GPWRDN_PWRDNCLMP;
5352	dwc2_writel(hsotg, gpwrdn, GPWRDN);
5353	udelay(10);
5354
5355	/* Switch off VDD */
5356	gpwrdn = dwc2_readl(hsotg, GPWRDN);
5357	gpwrdn |= GPWRDN_PWRDNSWTCH;
5358	dwc2_writel(hsotg, gpwrdn, GPWRDN);
5359	udelay(10);
5360
5361	/* Save gpwrdn register for further usage if stschng interrupt */
5362	hsotg->gr_backup.gpwrdn = dwc2_readl(hsotg, GPWRDN);
5363	dev_dbg(hsotg->dev, "Hibernation completed\n");
5364
5365	return ret;
5366}
5367
5368/**
5369 * dwc2_gadget_exit_hibernation()
5370 * This function is for exiting from Device mode hibernation by host initiated
5371 * resume/reset and device initiated remote-wakeup.
5372 *
5373 * @hsotg: Programming view of the DWC_otg controller
5374 * @rem_wakeup: indicates whether resume is initiated by Device or Host.
5375 * @reset: indicates whether resume is initiated by Reset.
5376 *
5377 * Return non-zero if failed to exit from hibernation.
5378 */
5379int dwc2_gadget_exit_hibernation(struct dwc2_hsotg *hsotg,
5380				 int rem_wakeup, int reset)
5381{
5382	u32 pcgcctl;
5383	u32 gpwrdn;
5384	u32 dctl;
5385	int ret = 0;
5386	struct dwc2_gregs_backup *gr;
5387	struct dwc2_dregs_backup *dr;
5388
5389	gr = &hsotg->gr_backup;
5390	dr = &hsotg->dr_backup;
5391
5392	if (!hsotg->hibernated) {
5393		dev_dbg(hsotg->dev, "Already exited from Hibernation\n");
5394		return 1;
5395	}
5396	dev_dbg(hsotg->dev,
5397		"%s: called with rem_wakeup = %d reset = %d\n",
5398		__func__, rem_wakeup, reset);
5399
5400	dwc2_hib_restore_common(hsotg, rem_wakeup, 0);
5401
5402	if (!reset) {
5403		/* Clear all pending interupts */
5404		dwc2_writel(hsotg, 0xffffffff, GINTSTS);
5405	}
5406
5407	/* De-assert Restore */
5408	gpwrdn = dwc2_readl(hsotg, GPWRDN);
5409	gpwrdn &= ~GPWRDN_RESTORE;
5410	dwc2_writel(hsotg, gpwrdn, GPWRDN);
5411	udelay(10);
5412
5413	if (!rem_wakeup) {
5414		pcgcctl = dwc2_readl(hsotg, PCGCTL);
5415		pcgcctl &= ~PCGCTL_RSTPDWNMODULE;
5416		dwc2_writel(hsotg, pcgcctl, PCGCTL);
5417	}
5418
5419	/* Restore GUSBCFG, DCFG and DCTL */
5420	dwc2_writel(hsotg, gr->gusbcfg, GUSBCFG);
5421	dwc2_writel(hsotg, dr->dcfg, DCFG);
5422	dwc2_writel(hsotg, dr->dctl, DCTL);
5423
5424	/* On USB Reset, reset device address to zero */
5425	if (reset)
5426		dwc2_clear_bit(hsotg, DCFG, DCFG_DEVADDR_MASK);
5427
5428	/* De-assert Wakeup Logic */
5429	gpwrdn = dwc2_readl(hsotg, GPWRDN);
5430	gpwrdn &= ~GPWRDN_PMUACTV;
5431	dwc2_writel(hsotg, gpwrdn, GPWRDN);
5432
5433	if (rem_wakeup) {
5434		udelay(10);
5435		/* Start Remote Wakeup Signaling */
5436		dwc2_writel(hsotg, dr->dctl | DCTL_RMTWKUPSIG, DCTL);
5437	} else {
5438		udelay(50);
5439		/* Set Device programming done bit */
5440		dctl = dwc2_readl(hsotg, DCTL);
5441		dctl |= DCTL_PWRONPRGDONE;
5442		dwc2_writel(hsotg, dctl, DCTL);
5443	}
5444	/* Wait for interrupts which must be cleared */
5445	mdelay(2);
5446	/* Clear all pending interupts */
5447	dwc2_writel(hsotg, 0xffffffff, GINTSTS);
5448
5449	/* Restore global registers */
5450	ret = dwc2_restore_global_registers(hsotg);
5451	if (ret) {
5452		dev_err(hsotg->dev, "%s: failed to restore registers\n",
5453			__func__);
5454		return ret;
5455	}
5456
5457	/* Restore device registers */
5458	ret = dwc2_restore_device_registers(hsotg, rem_wakeup);
5459	if (ret) {
5460		dev_err(hsotg->dev, "%s: failed to restore device registers\n",
5461			__func__);
5462		return ret;
5463	}
5464
5465	if (rem_wakeup) {
5466		mdelay(10);
5467		dctl = dwc2_readl(hsotg, DCTL);
5468		dctl &= ~DCTL_RMTWKUPSIG;
5469		dwc2_writel(hsotg, dctl, DCTL);
5470	}
5471
5472	hsotg->hibernated = 0;
5473	hsotg->lx_state = DWC2_L0;
5474	dev_dbg(hsotg->dev, "Hibernation recovery completes here\n");
5475
5476	return ret;
5477}
5478
5479/**
5480 * dwc2_gadget_enter_partial_power_down() - Put controller in partial
5481 * power down.
5482 *
5483 * @hsotg: Programming view of the DWC_otg controller
5484 *
5485 * Return: non-zero if failed to enter device partial power down.
5486 *
5487 * This function is for entering device mode partial power down.
5488 */
5489int dwc2_gadget_enter_partial_power_down(struct dwc2_hsotg *hsotg)
5490{
5491	u32 pcgcctl;
5492	int ret = 0;
5493
5494	dev_dbg(hsotg->dev, "Entering device partial power down started.\n");
5495
5496	/* Backup all registers */
5497	ret = dwc2_backup_global_registers(hsotg);
5498	if (ret) {
5499		dev_err(hsotg->dev, "%s: failed to backup global registers\n",
5500			__func__);
5501		return ret;
5502	}
5503
5504	ret = dwc2_backup_device_registers(hsotg);
5505	if (ret) {
5506		dev_err(hsotg->dev, "%s: failed to backup device registers\n",
5507			__func__);
5508		return ret;
5509	}
5510
5511	/*
5512	 * Clear any pending interrupts since dwc2 will not be able to
5513	 * clear them after entering partial_power_down.
5514	 */
5515	dwc2_writel(hsotg, 0xffffffff, GINTSTS);
5516
5517	/* Put the controller in low power state */
5518	pcgcctl = dwc2_readl(hsotg, PCGCTL);
5519
5520	pcgcctl |= PCGCTL_PWRCLMP;
5521	dwc2_writel(hsotg, pcgcctl, PCGCTL);
5522	udelay(5);
5523
5524	pcgcctl |= PCGCTL_RSTPDWNMODULE;
5525	dwc2_writel(hsotg, pcgcctl, PCGCTL);
5526	udelay(5);
5527
5528	pcgcctl |= PCGCTL_STOPPCLK;
5529	dwc2_writel(hsotg, pcgcctl, PCGCTL);
5530
5531	/* Set in_ppd flag to 1 as here core enters suspend. */
5532	hsotg->in_ppd = 1;
5533	hsotg->lx_state = DWC2_L2;
5534
5535	dev_dbg(hsotg->dev, "Entering device partial power down completed.\n");
5536
5537	return ret;
5538}
5539
5540/*
5541 * dwc2_gadget_exit_partial_power_down() - Exit controller from device partial
5542 * power down.
5543 *
5544 * @hsotg: Programming view of the DWC_otg controller
5545 * @restore: indicates whether need to restore the registers or not.
5546 *
5547 * Return: non-zero if failed to exit device partial power down.
5548 *
5549 * This function is for exiting from device mode partial power down.
5550 */
5551int dwc2_gadget_exit_partial_power_down(struct dwc2_hsotg *hsotg,
5552					bool restore)
5553{
5554	u32 pcgcctl;
5555	u32 dctl;
5556	struct dwc2_dregs_backup *dr;
5557	int ret = 0;
5558
5559	dr = &hsotg->dr_backup;
5560
5561	dev_dbg(hsotg->dev, "Exiting device partial Power Down started.\n");
5562
5563	pcgcctl = dwc2_readl(hsotg, PCGCTL);
5564	pcgcctl &= ~PCGCTL_STOPPCLK;
5565	dwc2_writel(hsotg, pcgcctl, PCGCTL);
5566
5567	pcgcctl = dwc2_readl(hsotg, PCGCTL);
5568	pcgcctl &= ~PCGCTL_PWRCLMP;
5569	dwc2_writel(hsotg, pcgcctl, PCGCTL);
5570
5571	pcgcctl = dwc2_readl(hsotg, PCGCTL);
5572	pcgcctl &= ~PCGCTL_RSTPDWNMODULE;
5573	dwc2_writel(hsotg, pcgcctl, PCGCTL);
5574
5575	udelay(100);
5576	if (restore) {
5577		ret = dwc2_restore_global_registers(hsotg);
5578		if (ret) {
5579			dev_err(hsotg->dev, "%s: failed to restore registers\n",
5580				__func__);
5581			return ret;
5582		}
5583		/* Restore DCFG */
5584		dwc2_writel(hsotg, dr->dcfg, DCFG);
5585
5586		ret = dwc2_restore_device_registers(hsotg, 0);
5587		if (ret) {
5588			dev_err(hsotg->dev, "%s: failed to restore device registers\n",
5589				__func__);
5590			return ret;
5591		}
5592	}
5593
5594	/* Set the Power-On Programming done bit */
5595	dctl = dwc2_readl(hsotg, DCTL);
5596	dctl |= DCTL_PWRONPRGDONE;
5597	dwc2_writel(hsotg, dctl, DCTL);
5598
5599	/* Set in_ppd flag to 0 as here core exits from suspend. */
5600	hsotg->in_ppd = 0;
5601	hsotg->lx_state = DWC2_L0;
5602
5603	dev_dbg(hsotg->dev, "Exiting device partial Power Down completed.\n");
5604	return ret;
5605}
5606
5607/**
5608 * dwc2_gadget_enter_clock_gating() - Put controller in clock gating.
5609 *
5610 * @hsotg: Programming view of the DWC_otg controller
5611 *
5612 * Return: non-zero if failed to enter device partial power down.
5613 *
5614 * This function is for entering device mode clock gating.
5615 */
5616void dwc2_gadget_enter_clock_gating(struct dwc2_hsotg *hsotg)
5617{
5618	u32 pcgctl;
5619
5620	dev_dbg(hsotg->dev, "Entering device clock gating.\n");
5621
5622	/* Set the Phy Clock bit as suspend is received. */
5623	pcgctl = dwc2_readl(hsotg, PCGCTL);
5624	pcgctl |= PCGCTL_STOPPCLK;
5625	dwc2_writel(hsotg, pcgctl, PCGCTL);
5626	udelay(5);
5627
5628	/* Set the Gate hclk as suspend is received. */
5629	pcgctl = dwc2_readl(hsotg, PCGCTL);
5630	pcgctl |= PCGCTL_GATEHCLK;
5631	dwc2_writel(hsotg, pcgctl, PCGCTL);
5632	udelay(5);
5633
5634	hsotg->lx_state = DWC2_L2;
5635	hsotg->bus_suspended = true;
5636}
5637
5638/*
5639 * dwc2_gadget_exit_clock_gating() - Exit controller from device clock gating.
5640 *
5641 * @hsotg: Programming view of the DWC_otg controller
5642 * @rem_wakeup: indicates whether remote wake up is enabled.
5643 *
5644 * This function is for exiting from device mode clock gating.
5645 */
5646void dwc2_gadget_exit_clock_gating(struct dwc2_hsotg *hsotg, int rem_wakeup)
5647{
5648	u32 pcgctl;
5649	u32 dctl;
5650
5651	dev_dbg(hsotg->dev, "Exiting device clock gating.\n");
5652
5653	/* Clear the Gate hclk. */
5654	pcgctl = dwc2_readl(hsotg, PCGCTL);
5655	pcgctl &= ~PCGCTL_GATEHCLK;
5656	dwc2_writel(hsotg, pcgctl, PCGCTL);
5657	udelay(5);
5658
5659	/* Phy Clock bit. */
5660	pcgctl = dwc2_readl(hsotg, PCGCTL);
5661	pcgctl &= ~PCGCTL_STOPPCLK;
5662	dwc2_writel(hsotg, pcgctl, PCGCTL);
5663	udelay(5);
5664
5665	if (rem_wakeup) {
5666		/* Set Remote Wakeup Signaling */
5667		dctl = dwc2_readl(hsotg, DCTL);
5668		dctl |= DCTL_RMTWKUPSIG;
5669		dwc2_writel(hsotg, dctl, DCTL);
5670	}
5671
5672	/* Change to L0 state */
5673	call_gadget(hsotg, resume);
5674	hsotg->lx_state = DWC2_L0;
5675	hsotg->bus_suspended = false;
5676}
v5.9
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 * Copyright (c) 2011 Samsung Electronics Co., Ltd.
   4 *		http://www.samsung.com
   5 *
   6 * Copyright 2008 Openmoko, Inc.
   7 * Copyright 2008 Simtec Electronics
   8 *      Ben Dooks <ben@simtec.co.uk>
   9 *      http://armlinux.simtec.co.uk/
  10 *
  11 * S3C USB2.0 High-speed / OtG driver
  12 */
  13
  14#include <linux/kernel.h>
  15#include <linux/module.h>
  16#include <linux/spinlock.h>
  17#include <linux/interrupt.h>
  18#include <linux/platform_device.h>
  19#include <linux/dma-mapping.h>
  20#include <linux/mutex.h>
  21#include <linux/seq_file.h>
  22#include <linux/delay.h>
  23#include <linux/io.h>
  24#include <linux/slab.h>
  25#include <linux/of_platform.h>
  26
  27#include <linux/usb/ch9.h>
  28#include <linux/usb/gadget.h>
  29#include <linux/usb/phy.h>
  30#include <linux/usb/composite.h>
  31
  32
  33#include "core.h"
  34#include "hw.h"
  35
  36/* conversion functions */
  37static inline struct dwc2_hsotg_req *our_req(struct usb_request *req)
  38{
  39	return container_of(req, struct dwc2_hsotg_req, req);
  40}
  41
  42static inline struct dwc2_hsotg_ep *our_ep(struct usb_ep *ep)
  43{
  44	return container_of(ep, struct dwc2_hsotg_ep, ep);
  45}
  46
  47static inline struct dwc2_hsotg *to_hsotg(struct usb_gadget *gadget)
  48{
  49	return container_of(gadget, struct dwc2_hsotg, gadget);
  50}
  51
  52static inline void dwc2_set_bit(struct dwc2_hsotg *hsotg, u32 offset, u32 val)
  53{
  54	dwc2_writel(hsotg, dwc2_readl(hsotg, offset) | val, offset);
  55}
  56
  57static inline void dwc2_clear_bit(struct dwc2_hsotg *hsotg, u32 offset, u32 val)
  58{
  59	dwc2_writel(hsotg, dwc2_readl(hsotg, offset) & ~val, offset);
  60}
  61
  62static inline struct dwc2_hsotg_ep *index_to_ep(struct dwc2_hsotg *hsotg,
  63						u32 ep_index, u32 dir_in)
  64{
  65	if (dir_in)
  66		return hsotg->eps_in[ep_index];
  67	else
  68		return hsotg->eps_out[ep_index];
  69}
  70
  71/* forward declaration of functions */
  72static void dwc2_hsotg_dump(struct dwc2_hsotg *hsotg);
  73
  74/**
  75 * using_dma - return the DMA status of the driver.
  76 * @hsotg: The driver state.
  77 *
  78 * Return true if we're using DMA.
  79 *
  80 * Currently, we have the DMA support code worked into everywhere
  81 * that needs it, but the AMBA DMA implementation in the hardware can
  82 * only DMA from 32bit aligned addresses. This means that gadgets such
  83 * as the CDC Ethernet cannot work as they often pass packets which are
  84 * not 32bit aligned.
  85 *
  86 * Unfortunately the choice to use DMA or not is global to the controller
  87 * and seems to be only settable when the controller is being put through
  88 * a core reset. This means we either need to fix the gadgets to take
  89 * account of DMA alignment, or add bounce buffers (yuerk).
  90 *
  91 * g_using_dma is set depending on dts flag.
  92 */
  93static inline bool using_dma(struct dwc2_hsotg *hsotg)
  94{
  95	return hsotg->params.g_dma;
  96}
  97
  98/*
  99 * using_desc_dma - return the descriptor DMA status of the driver.
 100 * @hsotg: The driver state.
 101 *
 102 * Return true if we're using descriptor DMA.
 103 */
 104static inline bool using_desc_dma(struct dwc2_hsotg *hsotg)
 105{
 106	return hsotg->params.g_dma_desc;
 107}
 108
 109/**
 110 * dwc2_gadget_incr_frame_num - Increments the targeted frame number.
 111 * @hs_ep: The endpoint
 112 *
 113 * This function will also check if the frame number overruns DSTS_SOFFN_LIMIT.
 114 * If an overrun occurs it will wrap the value and set the frame_overrun flag.
 115 */
 116static inline void dwc2_gadget_incr_frame_num(struct dwc2_hsotg_ep *hs_ep)
 117{
 
 
 
 
 
 
 118	hs_ep->target_frame += hs_ep->interval;
 119	if (hs_ep->target_frame > DSTS_SOFFN_LIMIT) {
 120		hs_ep->frame_overrun = true;
 121		hs_ep->target_frame &= DSTS_SOFFN_LIMIT;
 122	} else {
 123		hs_ep->frame_overrun = false;
 124	}
 125}
 126
 127/**
 128 * dwc2_gadget_dec_frame_num_by_one - Decrements the targeted frame number
 129 *                                    by one.
 130 * @hs_ep: The endpoint.
 131 *
 132 * This function used in service interval based scheduling flow to calculate
 133 * descriptor frame number filed value. For service interval mode frame
 134 * number in descriptor should point to last (u)frame in the interval.
 135 *
 136 */
 137static inline void dwc2_gadget_dec_frame_num_by_one(struct dwc2_hsotg_ep *hs_ep)
 138{
 
 
 
 
 
 
 139	if (hs_ep->target_frame)
 140		hs_ep->target_frame -= 1;
 141	else
 142		hs_ep->target_frame = DSTS_SOFFN_LIMIT;
 143}
 144
 145/**
 146 * dwc2_hsotg_en_gsint - enable one or more of the general interrupt
 147 * @hsotg: The device state
 148 * @ints: A bitmask of the interrupts to enable
 149 */
 150static void dwc2_hsotg_en_gsint(struct dwc2_hsotg *hsotg, u32 ints)
 151{
 152	u32 gsintmsk = dwc2_readl(hsotg, GINTMSK);
 153	u32 new_gsintmsk;
 154
 155	new_gsintmsk = gsintmsk | ints;
 156
 157	if (new_gsintmsk != gsintmsk) {
 158		dev_dbg(hsotg->dev, "gsintmsk now 0x%08x\n", new_gsintmsk);
 159		dwc2_writel(hsotg, new_gsintmsk, GINTMSK);
 160	}
 161}
 162
 163/**
 164 * dwc2_hsotg_disable_gsint - disable one or more of the general interrupt
 165 * @hsotg: The device state
 166 * @ints: A bitmask of the interrupts to enable
 167 */
 168static void dwc2_hsotg_disable_gsint(struct dwc2_hsotg *hsotg, u32 ints)
 169{
 170	u32 gsintmsk = dwc2_readl(hsotg, GINTMSK);
 171	u32 new_gsintmsk;
 172
 173	new_gsintmsk = gsintmsk & ~ints;
 174
 175	if (new_gsintmsk != gsintmsk)
 176		dwc2_writel(hsotg, new_gsintmsk, GINTMSK);
 177}
 178
 179/**
 180 * dwc2_hsotg_ctrl_epint - enable/disable an endpoint irq
 181 * @hsotg: The device state
 182 * @ep: The endpoint index
 183 * @dir_in: True if direction is in.
 184 * @en: The enable value, true to enable
 185 *
 186 * Set or clear the mask for an individual endpoint's interrupt
 187 * request.
 188 */
 189static void dwc2_hsotg_ctrl_epint(struct dwc2_hsotg *hsotg,
 190				  unsigned int ep, unsigned int dir_in,
 191				 unsigned int en)
 192{
 193	unsigned long flags;
 194	u32 bit = 1 << ep;
 195	u32 daint;
 196
 197	if (!dir_in)
 198		bit <<= 16;
 199
 200	local_irq_save(flags);
 201	daint = dwc2_readl(hsotg, DAINTMSK);
 202	if (en)
 203		daint |= bit;
 204	else
 205		daint &= ~bit;
 206	dwc2_writel(hsotg, daint, DAINTMSK);
 207	local_irq_restore(flags);
 208}
 209
 210/**
 211 * dwc2_hsotg_tx_fifo_count - return count of TX FIFOs in device mode
 212 *
 213 * @hsotg: Programming view of the DWC_otg controller
 214 */
 215int dwc2_hsotg_tx_fifo_count(struct dwc2_hsotg *hsotg)
 216{
 217	if (hsotg->hw_params.en_multiple_tx_fifo)
 218		/* In dedicated FIFO mode we need count of IN EPs */
 219		return hsotg->hw_params.num_dev_in_eps;
 220	else
 221		/* In shared FIFO mode we need count of Periodic IN EPs */
 222		return hsotg->hw_params.num_dev_perio_in_ep;
 223}
 224
 225/**
 226 * dwc2_hsotg_tx_fifo_total_depth - return total FIFO depth available for
 227 * device mode TX FIFOs
 228 *
 229 * @hsotg: Programming view of the DWC_otg controller
 230 */
 231int dwc2_hsotg_tx_fifo_total_depth(struct dwc2_hsotg *hsotg)
 232{
 233	int addr;
 234	int tx_addr_max;
 235	u32 np_tx_fifo_size;
 236
 237	np_tx_fifo_size = min_t(u32, hsotg->hw_params.dev_nperio_tx_fifo_size,
 238				hsotg->params.g_np_tx_fifo_size);
 239
 240	/* Get Endpoint Info Control block size in DWORDs. */
 241	tx_addr_max = hsotg->hw_params.total_fifo_size;
 242
 243	addr = hsotg->params.g_rx_fifo_size + np_tx_fifo_size;
 244	if (tx_addr_max <= addr)
 245		return 0;
 246
 247	return tx_addr_max - addr;
 248}
 249
 250/**
 251 * dwc2_gadget_wkup_alert_handler - Handler for WKUP_ALERT interrupt
 252 *
 253 * @hsotg: Programming view of the DWC_otg controller
 254 *
 255 */
 256static void dwc2_gadget_wkup_alert_handler(struct dwc2_hsotg *hsotg)
 257{
 258	u32 gintsts2;
 259	u32 gintmsk2;
 260
 261	gintsts2 = dwc2_readl(hsotg, GINTSTS2);
 262	gintmsk2 = dwc2_readl(hsotg, GINTMSK2);
 263	gintsts2 &= gintmsk2;
 264
 265	if (gintsts2 & GINTSTS2_WKUP_ALERT_INT) {
 266		dev_dbg(hsotg->dev, "%s: Wkup_Alert_Int\n", __func__);
 267		dwc2_set_bit(hsotg, GINTSTS2, GINTSTS2_WKUP_ALERT_INT);
 268		dwc2_set_bit(hsotg, DCTL, DCTL_RMTWKUPSIG);
 269	}
 270}
 271
 272/**
 273 * dwc2_hsotg_tx_fifo_average_depth - returns average depth of device mode
 274 * TX FIFOs
 275 *
 276 * @hsotg: Programming view of the DWC_otg controller
 277 */
 278int dwc2_hsotg_tx_fifo_average_depth(struct dwc2_hsotg *hsotg)
 279{
 280	int tx_fifo_count;
 281	int tx_fifo_depth;
 282
 283	tx_fifo_depth = dwc2_hsotg_tx_fifo_total_depth(hsotg);
 284
 285	tx_fifo_count = dwc2_hsotg_tx_fifo_count(hsotg);
 286
 287	if (!tx_fifo_count)
 288		return tx_fifo_depth;
 289	else
 290		return tx_fifo_depth / tx_fifo_count;
 291}
 292
 293/**
 294 * dwc2_hsotg_init_fifo - initialise non-periodic FIFOs
 295 * @hsotg: The device instance.
 296 */
 297static void dwc2_hsotg_init_fifo(struct dwc2_hsotg *hsotg)
 298{
 299	unsigned int ep;
 300	unsigned int addr;
 301	int timeout;
 302
 303	u32 val;
 304	u32 *txfsz = hsotg->params.g_tx_fifo_size;
 305
 306	/* Reset fifo map if not correctly cleared during previous session */
 307	WARN_ON(hsotg->fifo_map);
 308	hsotg->fifo_map = 0;
 309
 310	/* set RX/NPTX FIFO sizes */
 311	dwc2_writel(hsotg, hsotg->params.g_rx_fifo_size, GRXFSIZ);
 312	dwc2_writel(hsotg, (hsotg->params.g_rx_fifo_size <<
 313		    FIFOSIZE_STARTADDR_SHIFT) |
 314		    (hsotg->params.g_np_tx_fifo_size << FIFOSIZE_DEPTH_SHIFT),
 315		    GNPTXFSIZ);
 316
 317	/*
 318	 * arange all the rest of the TX FIFOs, as some versions of this
 319	 * block have overlapping default addresses. This also ensures
 320	 * that if the settings have been changed, then they are set to
 321	 * known values.
 322	 */
 323
 324	/* start at the end of the GNPTXFSIZ, rounded up */
 325	addr = hsotg->params.g_rx_fifo_size + hsotg->params.g_np_tx_fifo_size;
 326
 327	/*
 328	 * Configure fifos sizes from provided configuration and assign
 329	 * them to endpoints dynamically according to maxpacket size value of
 330	 * given endpoint.
 331	 */
 332	for (ep = 1; ep < MAX_EPS_CHANNELS; ep++) {
 333		if (!txfsz[ep])
 334			continue;
 335		val = addr;
 336		val |= txfsz[ep] << FIFOSIZE_DEPTH_SHIFT;
 337		WARN_ONCE(addr + txfsz[ep] > hsotg->fifo_mem,
 338			  "insufficient fifo memory");
 339		addr += txfsz[ep];
 340
 341		dwc2_writel(hsotg, val, DPTXFSIZN(ep));
 342		val = dwc2_readl(hsotg, DPTXFSIZN(ep));
 343	}
 344
 345	dwc2_writel(hsotg, hsotg->hw_params.total_fifo_size |
 346		    addr << GDFIFOCFG_EPINFOBASE_SHIFT,
 347		    GDFIFOCFG);
 348	/*
 349	 * according to p428 of the design guide, we need to ensure that
 350	 * all fifos are flushed before continuing
 351	 */
 352
 353	dwc2_writel(hsotg, GRSTCTL_TXFNUM(0x10) | GRSTCTL_TXFFLSH |
 354	       GRSTCTL_RXFFLSH, GRSTCTL);
 355
 356	/* wait until the fifos are both flushed */
 357	timeout = 100;
 358	while (1) {
 359		val = dwc2_readl(hsotg, GRSTCTL);
 360
 361		if ((val & (GRSTCTL_TXFFLSH | GRSTCTL_RXFFLSH)) == 0)
 362			break;
 363
 364		if (--timeout == 0) {
 365			dev_err(hsotg->dev,
 366				"%s: timeout flushing fifos (GRSTCTL=%08x)\n",
 367				__func__, val);
 368			break;
 369		}
 370
 371		udelay(1);
 372	}
 373
 374	dev_dbg(hsotg->dev, "FIFOs reset, timeout at %d\n", timeout);
 375}
 376
 377/**
 378 * dwc2_hsotg_ep_alloc_request - allocate USB rerequest structure
 379 * @ep: USB endpoint to allocate request for.
 380 * @flags: Allocation flags
 381 *
 382 * Allocate a new USB request structure appropriate for the specified endpoint
 383 */
 384static struct usb_request *dwc2_hsotg_ep_alloc_request(struct usb_ep *ep,
 385						       gfp_t flags)
 386{
 387	struct dwc2_hsotg_req *req;
 388
 389	req = kzalloc(sizeof(*req), flags);
 390	if (!req)
 391		return NULL;
 392
 393	INIT_LIST_HEAD(&req->queue);
 394
 395	return &req->req;
 396}
 397
 398/**
 399 * is_ep_periodic - return true if the endpoint is in periodic mode.
 400 * @hs_ep: The endpoint to query.
 401 *
 402 * Returns true if the endpoint is in periodic mode, meaning it is being
 403 * used for an Interrupt or ISO transfer.
 404 */
 405static inline int is_ep_periodic(struct dwc2_hsotg_ep *hs_ep)
 406{
 407	return hs_ep->periodic;
 408}
 409
 410/**
 411 * dwc2_hsotg_unmap_dma - unmap the DMA memory being used for the request
 412 * @hsotg: The device state.
 413 * @hs_ep: The endpoint for the request
 414 * @hs_req: The request being processed.
 415 *
 416 * This is the reverse of dwc2_hsotg_map_dma(), called for the completion
 417 * of a request to ensure the buffer is ready for access by the caller.
 418 */
 419static void dwc2_hsotg_unmap_dma(struct dwc2_hsotg *hsotg,
 420				 struct dwc2_hsotg_ep *hs_ep,
 421				struct dwc2_hsotg_req *hs_req)
 422{
 423	struct usb_request *req = &hs_req->req;
 424
 425	usb_gadget_unmap_request(&hsotg->gadget, req, hs_ep->dir_in);
 426}
 427
 428/*
 429 * dwc2_gadget_alloc_ctrl_desc_chains - allocate DMA descriptor chains
 430 * for Control endpoint
 431 * @hsotg: The device state.
 432 *
 433 * This function will allocate 4 descriptor chains for EP 0: 2 for
 434 * Setup stage, per one for IN and OUT data/status transactions.
 435 */
 436static int dwc2_gadget_alloc_ctrl_desc_chains(struct dwc2_hsotg *hsotg)
 437{
 438	hsotg->setup_desc[0] =
 439		dmam_alloc_coherent(hsotg->dev,
 440				    sizeof(struct dwc2_dma_desc),
 441				    &hsotg->setup_desc_dma[0],
 442				    GFP_KERNEL);
 443	if (!hsotg->setup_desc[0])
 444		goto fail;
 445
 446	hsotg->setup_desc[1] =
 447		dmam_alloc_coherent(hsotg->dev,
 448				    sizeof(struct dwc2_dma_desc),
 449				    &hsotg->setup_desc_dma[1],
 450				    GFP_KERNEL);
 451	if (!hsotg->setup_desc[1])
 452		goto fail;
 453
 454	hsotg->ctrl_in_desc =
 455		dmam_alloc_coherent(hsotg->dev,
 456				    sizeof(struct dwc2_dma_desc),
 457				    &hsotg->ctrl_in_desc_dma,
 458				    GFP_KERNEL);
 459	if (!hsotg->ctrl_in_desc)
 460		goto fail;
 461
 462	hsotg->ctrl_out_desc =
 463		dmam_alloc_coherent(hsotg->dev,
 464				    sizeof(struct dwc2_dma_desc),
 465				    &hsotg->ctrl_out_desc_dma,
 466				    GFP_KERNEL);
 467	if (!hsotg->ctrl_out_desc)
 468		goto fail;
 469
 470	return 0;
 471
 472fail:
 473	return -ENOMEM;
 474}
 475
 476/**
 477 * dwc2_hsotg_write_fifo - write packet Data to the TxFIFO
 478 * @hsotg: The controller state.
 479 * @hs_ep: The endpoint we're going to write for.
 480 * @hs_req: The request to write data for.
 481 *
 482 * This is called when the TxFIFO has some space in it to hold a new
 483 * transmission and we have something to give it. The actual setup of
 484 * the data size is done elsewhere, so all we have to do is to actually
 485 * write the data.
 486 *
 487 * The return value is zero if there is more space (or nothing was done)
 488 * otherwise -ENOSPC is returned if the FIFO space was used up.
 489 *
 490 * This routine is only needed for PIO
 491 */
 492static int dwc2_hsotg_write_fifo(struct dwc2_hsotg *hsotg,
 493				 struct dwc2_hsotg_ep *hs_ep,
 494				struct dwc2_hsotg_req *hs_req)
 495{
 496	bool periodic = is_ep_periodic(hs_ep);
 497	u32 gnptxsts = dwc2_readl(hsotg, GNPTXSTS);
 498	int buf_pos = hs_req->req.actual;
 499	int to_write = hs_ep->size_loaded;
 500	void *data;
 501	int can_write;
 502	int pkt_round;
 503	int max_transfer;
 504
 505	to_write -= (buf_pos - hs_ep->last_load);
 506
 507	/* if there's nothing to write, get out early */
 508	if (to_write == 0)
 509		return 0;
 510
 511	if (periodic && !hsotg->dedicated_fifos) {
 512		u32 epsize = dwc2_readl(hsotg, DIEPTSIZ(hs_ep->index));
 513		int size_left;
 514		int size_done;
 515
 516		/*
 517		 * work out how much data was loaded so we can calculate
 518		 * how much data is left in the fifo.
 519		 */
 520
 521		size_left = DXEPTSIZ_XFERSIZE_GET(epsize);
 522
 523		/*
 524		 * if shared fifo, we cannot write anything until the
 525		 * previous data has been completely sent.
 526		 */
 527		if (hs_ep->fifo_load != 0) {
 528			dwc2_hsotg_en_gsint(hsotg, GINTSTS_PTXFEMP);
 529			return -ENOSPC;
 530		}
 531
 532		dev_dbg(hsotg->dev, "%s: left=%d, load=%d, fifo=%d, size %d\n",
 533			__func__, size_left,
 534			hs_ep->size_loaded, hs_ep->fifo_load, hs_ep->fifo_size);
 535
 536		/* how much of the data has moved */
 537		size_done = hs_ep->size_loaded - size_left;
 538
 539		/* how much data is left in the fifo */
 540		can_write = hs_ep->fifo_load - size_done;
 541		dev_dbg(hsotg->dev, "%s: => can_write1=%d\n",
 542			__func__, can_write);
 543
 544		can_write = hs_ep->fifo_size - can_write;
 545		dev_dbg(hsotg->dev, "%s: => can_write2=%d\n",
 546			__func__, can_write);
 547
 548		if (can_write <= 0) {
 549			dwc2_hsotg_en_gsint(hsotg, GINTSTS_PTXFEMP);
 550			return -ENOSPC;
 551		}
 552	} else if (hsotg->dedicated_fifos && hs_ep->index != 0) {
 553		can_write = dwc2_readl(hsotg,
 554				       DTXFSTS(hs_ep->fifo_index));
 555
 556		can_write &= 0xffff;
 557		can_write *= 4;
 558	} else {
 559		if (GNPTXSTS_NP_TXQ_SPC_AVAIL_GET(gnptxsts) == 0) {
 560			dev_dbg(hsotg->dev,
 561				"%s: no queue slots available (0x%08x)\n",
 562				__func__, gnptxsts);
 563
 564			dwc2_hsotg_en_gsint(hsotg, GINTSTS_NPTXFEMP);
 565			return -ENOSPC;
 566		}
 567
 568		can_write = GNPTXSTS_NP_TXF_SPC_AVAIL_GET(gnptxsts);
 569		can_write *= 4;	/* fifo size is in 32bit quantities. */
 570	}
 571
 572	max_transfer = hs_ep->ep.maxpacket * hs_ep->mc;
 573
 574	dev_dbg(hsotg->dev, "%s: GNPTXSTS=%08x, can=%d, to=%d, max_transfer %d\n",
 575		__func__, gnptxsts, can_write, to_write, max_transfer);
 576
 577	/*
 578	 * limit to 512 bytes of data, it seems at least on the non-periodic
 579	 * FIFO, requests of >512 cause the endpoint to get stuck with a
 580	 * fragment of the end of the transfer in it.
 581	 */
 582	if (can_write > 512 && !periodic)
 583		can_write = 512;
 584
 585	/*
 586	 * limit the write to one max-packet size worth of data, but allow
 587	 * the transfer to return that it did not run out of fifo space
 588	 * doing it.
 589	 */
 590	if (to_write > max_transfer) {
 591		to_write = max_transfer;
 592
 593		/* it's needed only when we do not use dedicated fifos */
 594		if (!hsotg->dedicated_fifos)
 595			dwc2_hsotg_en_gsint(hsotg,
 596					    periodic ? GINTSTS_PTXFEMP :
 597					   GINTSTS_NPTXFEMP);
 598	}
 599
 600	/* see if we can write data */
 601
 602	if (to_write > can_write) {
 603		to_write = can_write;
 604		pkt_round = to_write % max_transfer;
 605
 606		/*
 607		 * Round the write down to an
 608		 * exact number of packets.
 609		 *
 610		 * Note, we do not currently check to see if we can ever
 611		 * write a full packet or not to the FIFO.
 612		 */
 613
 614		if (pkt_round)
 615			to_write -= pkt_round;
 616
 617		/*
 618		 * enable correct FIFO interrupt to alert us when there
 619		 * is more room left.
 620		 */
 621
 622		/* it's needed only when we do not use dedicated fifos */
 623		if (!hsotg->dedicated_fifos)
 624			dwc2_hsotg_en_gsint(hsotg,
 625					    periodic ? GINTSTS_PTXFEMP :
 626					   GINTSTS_NPTXFEMP);
 627	}
 628
 629	dev_dbg(hsotg->dev, "write %d/%d, can_write %d, done %d\n",
 630		to_write, hs_req->req.length, can_write, buf_pos);
 631
 632	if (to_write <= 0)
 633		return -ENOSPC;
 634
 635	hs_req->req.actual = buf_pos + to_write;
 636	hs_ep->total_data += to_write;
 637
 638	if (periodic)
 639		hs_ep->fifo_load += to_write;
 640
 641	to_write = DIV_ROUND_UP(to_write, 4);
 642	data = hs_req->req.buf + buf_pos;
 643
 644	dwc2_writel_rep(hsotg, EPFIFO(hs_ep->index), data, to_write);
 645
 646	return (to_write >= can_write) ? -ENOSPC : 0;
 647}
 648
 649/**
 650 * get_ep_limit - get the maximum data legnth for this endpoint
 651 * @hs_ep: The endpoint
 652 *
 653 * Return the maximum data that can be queued in one go on a given endpoint
 654 * so that transfers that are too long can be split.
 655 */
 656static unsigned int get_ep_limit(struct dwc2_hsotg_ep *hs_ep)
 657{
 658	int index = hs_ep->index;
 659	unsigned int maxsize;
 660	unsigned int maxpkt;
 661
 662	if (index != 0) {
 663		maxsize = DXEPTSIZ_XFERSIZE_LIMIT + 1;
 664		maxpkt = DXEPTSIZ_PKTCNT_LIMIT + 1;
 665	} else {
 666		maxsize = 64 + 64;
 667		if (hs_ep->dir_in)
 668			maxpkt = DIEPTSIZ0_PKTCNT_LIMIT + 1;
 669		else
 670			maxpkt = 2;
 671	}
 672
 673	/* we made the constant loading easier above by using +1 */
 674	maxpkt--;
 675	maxsize--;
 676
 677	/*
 678	 * constrain by packet count if maxpkts*pktsize is greater
 679	 * than the length register size.
 680	 */
 681
 682	if ((maxpkt * hs_ep->ep.maxpacket) < maxsize)
 683		maxsize = maxpkt * hs_ep->ep.maxpacket;
 684
 685	return maxsize;
 686}
 687
 688/**
 689 * dwc2_hsotg_read_frameno - read current frame number
 690 * @hsotg: The device instance
 691 *
 692 * Return the current frame number
 693 */
 694static u32 dwc2_hsotg_read_frameno(struct dwc2_hsotg *hsotg)
 695{
 696	u32 dsts;
 697
 698	dsts = dwc2_readl(hsotg, DSTS);
 699	dsts &= DSTS_SOFFN_MASK;
 700	dsts >>= DSTS_SOFFN_SHIFT;
 701
 702	return dsts;
 703}
 704
 705/**
 706 * dwc2_gadget_get_chain_limit - get the maximum data payload value of the
 707 * DMA descriptor chain prepared for specific endpoint
 708 * @hs_ep: The endpoint
 709 *
 710 * Return the maximum data that can be queued in one go on a given endpoint
 711 * depending on its descriptor chain capacity so that transfers that
 712 * are too long can be split.
 713 */
 714static unsigned int dwc2_gadget_get_chain_limit(struct dwc2_hsotg_ep *hs_ep)
 715{
 
 716	int is_isoc = hs_ep->isochronous;
 717	unsigned int maxsize;
 
 
 718
 719	if (is_isoc)
 720		maxsize = (hs_ep->dir_in ? DEV_DMA_ISOC_TX_NBYTES_LIMIT :
 721					   DEV_DMA_ISOC_RX_NBYTES_LIMIT) *
 722					   MAX_DMA_DESC_NUM_HS_ISOC;
 723	else
 724		maxsize = DEV_DMA_NBYTES_LIMIT * MAX_DMA_DESC_NUM_GENERIC;
 725
 
 
 
 
 
 726	return maxsize;
 727}
 728
 729/*
 730 * dwc2_gadget_get_desc_params - get DMA descriptor parameters.
 731 * @hs_ep: The endpoint
 732 * @mask: RX/TX bytes mask to be defined
 733 *
 734 * Returns maximum data payload for one descriptor after analyzing endpoint
 735 * characteristics.
 736 * DMA descriptor transfer bytes limit depends on EP type:
 737 * Control out - MPS,
 738 * Isochronous - descriptor rx/tx bytes bitfield limit,
 739 * Control In/Bulk/Interrupt - multiple of mps. This will allow to not
 740 * have concatenations from various descriptors within one packet.
 
 
 741 *
 742 * Selects corresponding mask for RX/TX bytes as well.
 743 */
 744static u32 dwc2_gadget_get_desc_params(struct dwc2_hsotg_ep *hs_ep, u32 *mask)
 745{
 
 746	u32 mps = hs_ep->ep.maxpacket;
 747	int dir_in = hs_ep->dir_in;
 748	u32 desc_size = 0;
 749
 750	if (!hs_ep->index && !dir_in) {
 751		desc_size = mps;
 752		*mask = DEV_DMA_NBYTES_MASK;
 753	} else if (hs_ep->isochronous) {
 754		if (dir_in) {
 755			desc_size = DEV_DMA_ISOC_TX_NBYTES_LIMIT;
 756			*mask = DEV_DMA_ISOC_TX_NBYTES_MASK;
 757		} else {
 758			desc_size = DEV_DMA_ISOC_RX_NBYTES_LIMIT;
 759			*mask = DEV_DMA_ISOC_RX_NBYTES_MASK;
 760		}
 761	} else {
 762		desc_size = DEV_DMA_NBYTES_LIMIT;
 763		*mask = DEV_DMA_NBYTES_MASK;
 764
 765		/* Round down desc_size to be mps multiple */
 766		desc_size -= desc_size % mps;
 767	}
 768
 
 
 
 
 
 
 
 769	return desc_size;
 770}
 771
 772static void dwc2_gadget_fill_nonisoc_xfer_ddma_one(struct dwc2_hsotg_ep *hs_ep,
 773						 struct dwc2_dma_desc **desc,
 774						 dma_addr_t dma_buff,
 775						 unsigned int len,
 776						 bool true_last)
 777{
 778	int dir_in = hs_ep->dir_in;
 779	u32 mps = hs_ep->ep.maxpacket;
 780	u32 maxsize = 0;
 781	u32 offset = 0;
 782	u32 mask = 0;
 783	int i;
 784
 785	maxsize = dwc2_gadget_get_desc_params(hs_ep, &mask);
 786
 787	hs_ep->desc_count = (len / maxsize) +
 788				((len % maxsize) ? 1 : 0);
 789	if (len == 0)
 790		hs_ep->desc_count = 1;
 791
 792	for (i = 0; i < hs_ep->desc_count; ++i) {
 793		(*desc)->status = 0;
 794		(*desc)->status |= (DEV_DMA_BUFF_STS_HBUSY
 795				 << DEV_DMA_BUFF_STS_SHIFT);
 796
 797		if (len > maxsize) {
 798			if (!hs_ep->index && !dir_in)
 799				(*desc)->status |= (DEV_DMA_L | DEV_DMA_IOC);
 800
 801			(*desc)->status |=
 802				maxsize << DEV_DMA_NBYTES_SHIFT & mask;
 803			(*desc)->buf = dma_buff + offset;
 804
 805			len -= maxsize;
 806			offset += maxsize;
 807		} else {
 808			if (true_last)
 809				(*desc)->status |= (DEV_DMA_L | DEV_DMA_IOC);
 810
 811			if (dir_in)
 812				(*desc)->status |= (len % mps) ? DEV_DMA_SHORT :
 813					((hs_ep->send_zlp && true_last) ?
 814					DEV_DMA_SHORT : 0);
 815
 816			(*desc)->status |=
 817				len << DEV_DMA_NBYTES_SHIFT & mask;
 818			(*desc)->buf = dma_buff + offset;
 819		}
 820
 821		(*desc)->status &= ~DEV_DMA_BUFF_STS_MASK;
 822		(*desc)->status |= (DEV_DMA_BUFF_STS_HREADY
 823				 << DEV_DMA_BUFF_STS_SHIFT);
 824		(*desc)++;
 825	}
 826}
 827
 828/*
 829 * dwc2_gadget_config_nonisoc_xfer_ddma - prepare non ISOC DMA desc chain.
 830 * @hs_ep: The endpoint
 831 * @ureq: Request to transfer
 832 * @offset: offset in bytes
 833 * @len: Length of the transfer
 834 *
 835 * This function will iterate over descriptor chain and fill its entries
 836 * with corresponding information based on transfer data.
 837 */
 838static void dwc2_gadget_config_nonisoc_xfer_ddma(struct dwc2_hsotg_ep *hs_ep,
 839						 dma_addr_t dma_buff,
 840						 unsigned int len)
 841{
 842	struct usb_request *ureq = NULL;
 843	struct dwc2_dma_desc *desc = hs_ep->desc_list;
 844	struct scatterlist *sg;
 845	int i;
 846	u8 desc_count = 0;
 847
 848	if (hs_ep->req)
 849		ureq = &hs_ep->req->req;
 850
 851	/* non-DMA sg buffer */
 852	if (!ureq || !ureq->num_sgs) {
 853		dwc2_gadget_fill_nonisoc_xfer_ddma_one(hs_ep, &desc,
 854			dma_buff, len, true);
 855		return;
 856	}
 857
 858	/* DMA sg buffer */
 859	for_each_sg(ureq->sg, sg, ureq->num_sgs, i) {
 860		dwc2_gadget_fill_nonisoc_xfer_ddma_one(hs_ep, &desc,
 861			sg_dma_address(sg) + sg->offset, sg_dma_len(sg),
 862			sg_is_last(sg));
 863		desc_count += hs_ep->desc_count;
 864	}
 865
 866	hs_ep->desc_count = desc_count;
 867}
 868
 869/*
 870 * dwc2_gadget_fill_isoc_desc - fills next isochronous descriptor in chain.
 871 * @hs_ep: The isochronous endpoint.
 872 * @dma_buff: usb requests dma buffer.
 873 * @len: usb request transfer length.
 874 *
 875 * Fills next free descriptor with the data of the arrived usb request,
 876 * frame info, sets Last and IOC bits increments next_desc. If filled
 877 * descriptor is not the first one, removes L bit from the previous descriptor
 878 * status.
 879 */
 880static int dwc2_gadget_fill_isoc_desc(struct dwc2_hsotg_ep *hs_ep,
 881				      dma_addr_t dma_buff, unsigned int len)
 882{
 883	struct dwc2_dma_desc *desc;
 884	struct dwc2_hsotg *hsotg = hs_ep->parent;
 885	u32 index;
 886	u32 mask = 0;
 887	u8 pid = 0;
 888
 889	dwc2_gadget_get_desc_params(hs_ep, &mask);
 890
 891	index = hs_ep->next_desc;
 892	desc = &hs_ep->desc_list[index];
 893
 894	/* Check if descriptor chain full */
 895	if ((desc->status >> DEV_DMA_BUFF_STS_SHIFT) ==
 896	    DEV_DMA_BUFF_STS_HREADY) {
 897		dev_dbg(hsotg->dev, "%s: desc chain full\n", __func__);
 898		return 1;
 899	}
 900
 901	/* Clear L bit of previous desc if more than one entries in the chain */
 902	if (hs_ep->next_desc)
 903		hs_ep->desc_list[index - 1].status &= ~DEV_DMA_L;
 904
 905	dev_dbg(hsotg->dev, "%s: Filling ep %d, dir %s isoc desc # %d\n",
 906		__func__, hs_ep->index, hs_ep->dir_in ? "in" : "out", index);
 907
 908	desc->status = 0;
 909	desc->status |= (DEV_DMA_BUFF_STS_HBUSY	<< DEV_DMA_BUFF_STS_SHIFT);
 910
 911	desc->buf = dma_buff;
 912	desc->status |= (DEV_DMA_L | DEV_DMA_IOC |
 913			 ((len << DEV_DMA_NBYTES_SHIFT) & mask));
 914
 915	if (hs_ep->dir_in) {
 916		if (len)
 917			pid = DIV_ROUND_UP(len, hs_ep->ep.maxpacket);
 918		else
 919			pid = 1;
 920		desc->status |= ((pid << DEV_DMA_ISOC_PID_SHIFT) &
 921				 DEV_DMA_ISOC_PID_MASK) |
 922				((len % hs_ep->ep.maxpacket) ?
 923				 DEV_DMA_SHORT : 0) |
 924				((hs_ep->target_frame <<
 925				  DEV_DMA_ISOC_FRNUM_SHIFT) &
 926				 DEV_DMA_ISOC_FRNUM_MASK);
 927	}
 928
 929	desc->status &= ~DEV_DMA_BUFF_STS_MASK;
 930	desc->status |= (DEV_DMA_BUFF_STS_HREADY << DEV_DMA_BUFF_STS_SHIFT);
 931
 932	/* Increment frame number by interval for IN */
 933	if (hs_ep->dir_in)
 934		dwc2_gadget_incr_frame_num(hs_ep);
 935
 936	/* Update index of last configured entry in the chain */
 937	hs_ep->next_desc++;
 938	if (hs_ep->next_desc >= MAX_DMA_DESC_NUM_HS_ISOC)
 939		hs_ep->next_desc = 0;
 940
 941	return 0;
 942}
 943
 944/*
 945 * dwc2_gadget_start_isoc_ddma - start isochronous transfer in DDMA
 946 * @hs_ep: The isochronous endpoint.
 947 *
 948 * Prepare descriptor chain for isochronous endpoints. Afterwards
 949 * write DMA address to HW and enable the endpoint.
 950 */
 951static void dwc2_gadget_start_isoc_ddma(struct dwc2_hsotg_ep *hs_ep)
 952{
 953	struct dwc2_hsotg *hsotg = hs_ep->parent;
 954	struct dwc2_hsotg_req *hs_req, *treq;
 955	int index = hs_ep->index;
 956	int ret;
 957	int i;
 958	u32 dma_reg;
 959	u32 depctl;
 960	u32 ctrl;
 961	struct dwc2_dma_desc *desc;
 962
 963	if (list_empty(&hs_ep->queue)) {
 964		hs_ep->target_frame = TARGET_FRAME_INITIAL;
 965		dev_dbg(hsotg->dev, "%s: No requests in queue\n", __func__);
 966		return;
 967	}
 968
 969	/* Initialize descriptor chain by Host Busy status */
 970	for (i = 0; i < MAX_DMA_DESC_NUM_HS_ISOC; i++) {
 971		desc = &hs_ep->desc_list[i];
 972		desc->status = 0;
 973		desc->status |= (DEV_DMA_BUFF_STS_HBUSY
 974				    << DEV_DMA_BUFF_STS_SHIFT);
 975	}
 976
 977	hs_ep->next_desc = 0;
 978	list_for_each_entry_safe(hs_req, treq, &hs_ep->queue, queue) {
 979		dma_addr_t dma_addr = hs_req->req.dma;
 980
 981		if (hs_req->req.num_sgs) {
 982			WARN_ON(hs_req->req.num_sgs > 1);
 983			dma_addr = sg_dma_address(hs_req->req.sg);
 984		}
 985		ret = dwc2_gadget_fill_isoc_desc(hs_ep, dma_addr,
 986						 hs_req->req.length);
 987		if (ret)
 988			break;
 989	}
 990
 991	hs_ep->compl_desc = 0;
 992	depctl = hs_ep->dir_in ? DIEPCTL(index) : DOEPCTL(index);
 993	dma_reg = hs_ep->dir_in ? DIEPDMA(index) : DOEPDMA(index);
 994
 995	/* write descriptor chain address to control register */
 996	dwc2_writel(hsotg, hs_ep->desc_list_dma, dma_reg);
 997
 998	ctrl = dwc2_readl(hsotg, depctl);
 999	ctrl |= DXEPCTL_EPENA | DXEPCTL_CNAK;
1000	dwc2_writel(hsotg, ctrl, depctl);
1001}
1002
 
 
 
 
 
 
1003/**
1004 * dwc2_hsotg_start_req - start a USB request from an endpoint's queue
1005 * @hsotg: The controller state.
1006 * @hs_ep: The endpoint to process a request for
1007 * @hs_req: The request to start.
1008 * @continuing: True if we are doing more for the current request.
1009 *
1010 * Start the given request running by setting the endpoint registers
1011 * appropriately, and writing any data to the FIFOs.
1012 */
1013static void dwc2_hsotg_start_req(struct dwc2_hsotg *hsotg,
1014				 struct dwc2_hsotg_ep *hs_ep,
1015				struct dwc2_hsotg_req *hs_req,
1016				bool continuing)
1017{
1018	struct usb_request *ureq = &hs_req->req;
1019	int index = hs_ep->index;
1020	int dir_in = hs_ep->dir_in;
1021	u32 epctrl_reg;
1022	u32 epsize_reg;
1023	u32 epsize;
1024	u32 ctrl;
1025	unsigned int length;
1026	unsigned int packets;
1027	unsigned int maxreq;
1028	unsigned int dma_reg;
1029
1030	if (index != 0) {
1031		if (hs_ep->req && !continuing) {
1032			dev_err(hsotg->dev, "%s: active request\n", __func__);
1033			WARN_ON(1);
1034			return;
1035		} else if (hs_ep->req != hs_req && continuing) {
1036			dev_err(hsotg->dev,
1037				"%s: continue different req\n", __func__);
1038			WARN_ON(1);
1039			return;
1040		}
1041	}
1042
1043	dma_reg = dir_in ? DIEPDMA(index) : DOEPDMA(index);
1044	epctrl_reg = dir_in ? DIEPCTL(index) : DOEPCTL(index);
1045	epsize_reg = dir_in ? DIEPTSIZ(index) : DOEPTSIZ(index);
1046
1047	dev_dbg(hsotg->dev, "%s: DxEPCTL=0x%08x, ep %d, dir %s\n",
1048		__func__, dwc2_readl(hsotg, epctrl_reg), index,
1049		hs_ep->dir_in ? "in" : "out");
1050
1051	/* If endpoint is stalled, we will restart request later */
1052	ctrl = dwc2_readl(hsotg, epctrl_reg);
1053
1054	if (index && ctrl & DXEPCTL_STALL) {
1055		dev_warn(hsotg->dev, "%s: ep%d is stalled\n", __func__, index);
1056		return;
1057	}
1058
1059	length = ureq->length - ureq->actual;
1060	dev_dbg(hsotg->dev, "ureq->length:%d ureq->actual:%d\n",
1061		ureq->length, ureq->actual);
1062
1063	if (!using_desc_dma(hsotg))
1064		maxreq = get_ep_limit(hs_ep);
1065	else
1066		maxreq = dwc2_gadget_get_chain_limit(hs_ep);
1067
1068	if (length > maxreq) {
1069		int round = maxreq % hs_ep->ep.maxpacket;
1070
1071		dev_dbg(hsotg->dev, "%s: length %d, max-req %d, r %d\n",
1072			__func__, length, maxreq, round);
1073
1074		/* round down to multiple of packets */
1075		if (round)
1076			maxreq -= round;
1077
1078		length = maxreq;
1079	}
1080
1081	if (length)
1082		packets = DIV_ROUND_UP(length, hs_ep->ep.maxpacket);
1083	else
1084		packets = 1;	/* send one packet if length is zero. */
1085
1086	if (dir_in && index != 0)
1087		if (hs_ep->isochronous)
1088			epsize = DXEPTSIZ_MC(packets);
1089		else
1090			epsize = DXEPTSIZ_MC(1);
1091	else
1092		epsize = 0;
1093
1094	/*
1095	 * zero length packet should be programmed on its own and should not
1096	 * be counted in DIEPTSIZ.PktCnt with other packets.
1097	 */
1098	if (dir_in && ureq->zero && !continuing) {
1099		/* Test if zlp is actually required. */
1100		if ((ureq->length >= hs_ep->ep.maxpacket) &&
1101		    !(ureq->length % hs_ep->ep.maxpacket))
1102			hs_ep->send_zlp = 1;
1103	}
1104
1105	epsize |= DXEPTSIZ_PKTCNT(packets);
1106	epsize |= DXEPTSIZ_XFERSIZE(length);
1107
1108	dev_dbg(hsotg->dev, "%s: %d@%d/%d, 0x%08x => 0x%08x\n",
1109		__func__, packets, length, ureq->length, epsize, epsize_reg);
1110
1111	/* store the request as the current one we're doing */
1112	hs_ep->req = hs_req;
1113
1114	if (using_desc_dma(hsotg)) {
1115		u32 offset = 0;
1116		u32 mps = hs_ep->ep.maxpacket;
1117
1118		/* Adjust length: EP0 - MPS, other OUT EPs - multiple of MPS */
1119		if (!dir_in) {
1120			if (!index)
1121				length = mps;
1122			else if (length % mps)
1123				length += (mps - (length % mps));
1124		}
1125
1126		/*
1127		 * If more data to send, adjust DMA for EP0 out data stage.
1128		 * ureq->dma stays unchanged, hence increment it by already
1129		 * passed passed data count before starting new transaction.
1130		 */
1131		if (!index && hsotg->ep0_state == DWC2_EP0_DATA_OUT &&
1132		    continuing)
1133			offset = ureq->actual;
1134
1135		/* Fill DDMA chain entries */
1136		dwc2_gadget_config_nonisoc_xfer_ddma(hs_ep, ureq->dma + offset,
1137						     length);
1138
1139		/* write descriptor chain address to control register */
1140		dwc2_writel(hsotg, hs_ep->desc_list_dma, dma_reg);
1141
1142		dev_dbg(hsotg->dev, "%s: %08x pad => 0x%08x\n",
1143			__func__, (u32)hs_ep->desc_list_dma, dma_reg);
1144	} else {
1145		/* write size / packets */
1146		dwc2_writel(hsotg, epsize, epsize_reg);
1147
1148		if (using_dma(hsotg) && !continuing && (length != 0)) {
1149			/*
1150			 * write DMA address to control register, buffer
1151			 * already synced by dwc2_hsotg_ep_queue().
1152			 */
1153
1154			dwc2_writel(hsotg, ureq->dma, dma_reg);
1155
1156			dev_dbg(hsotg->dev, "%s: %pad => 0x%08x\n",
1157				__func__, &ureq->dma, dma_reg);
1158		}
1159	}
1160
1161	if (hs_ep->isochronous && hs_ep->interval == 1) {
1162		hs_ep->target_frame = dwc2_hsotg_read_frameno(hsotg);
1163		dwc2_gadget_incr_frame_num(hs_ep);
1164
1165		if (hs_ep->target_frame & 0x1)
1166			ctrl |= DXEPCTL_SETODDFR;
1167		else
1168			ctrl |= DXEPCTL_SETEVENFR;
 
 
 
 
 
 
 
1169	}
1170
1171	ctrl |= DXEPCTL_EPENA;	/* ensure ep enabled */
1172
1173	dev_dbg(hsotg->dev, "ep0 state:%d\n", hsotg->ep0_state);
1174
1175	/* For Setup request do not clear NAK */
1176	if (!(index == 0 && hsotg->ep0_state == DWC2_EP0_SETUP))
1177		ctrl |= DXEPCTL_CNAK;	/* clear NAK set by core */
1178
1179	dev_dbg(hsotg->dev, "%s: DxEPCTL=0x%08x\n", __func__, ctrl);
1180	dwc2_writel(hsotg, ctrl, epctrl_reg);
1181
1182	/*
1183	 * set these, it seems that DMA support increments past the end
1184	 * of the packet buffer so we need to calculate the length from
1185	 * this information.
1186	 */
1187	hs_ep->size_loaded = length;
1188	hs_ep->last_load = ureq->actual;
1189
1190	if (dir_in && !using_dma(hsotg)) {
1191		/* set these anyway, we may need them for non-periodic in */
1192		hs_ep->fifo_load = 0;
1193
1194		dwc2_hsotg_write_fifo(hsotg, hs_ep, hs_req);
1195	}
1196
1197	/*
1198	 * Note, trying to clear the NAK here causes problems with transmit
1199	 * on the S3C6400 ending up with the TXFIFO becoming full.
1200	 */
1201
1202	/* check ep is enabled */
1203	if (!(dwc2_readl(hsotg, epctrl_reg) & DXEPCTL_EPENA))
1204		dev_dbg(hsotg->dev,
1205			"ep%d: failed to become enabled (DXEPCTL=0x%08x)?\n",
1206			 index, dwc2_readl(hsotg, epctrl_reg));
1207
1208	dev_dbg(hsotg->dev, "%s: DXEPCTL=0x%08x\n",
1209		__func__, dwc2_readl(hsotg, epctrl_reg));
1210
1211	/* enable ep interrupts */
1212	dwc2_hsotg_ctrl_epint(hsotg, hs_ep->index, hs_ep->dir_in, 1);
1213}
1214
1215/**
1216 * dwc2_hsotg_map_dma - map the DMA memory being used for the request
1217 * @hsotg: The device state.
1218 * @hs_ep: The endpoint the request is on.
1219 * @req: The request being processed.
1220 *
1221 * We've been asked to queue a request, so ensure that the memory buffer
1222 * is correctly setup for DMA. If we've been passed an extant DMA address
1223 * then ensure the buffer has been synced to memory. If our buffer has no
1224 * DMA memory, then we map the memory and mark our request to allow us to
1225 * cleanup on completion.
1226 */
1227static int dwc2_hsotg_map_dma(struct dwc2_hsotg *hsotg,
1228			      struct dwc2_hsotg_ep *hs_ep,
1229			     struct usb_request *req)
1230{
1231	int ret;
1232
 
1233	ret = usb_gadget_map_request(&hsotg->gadget, req, hs_ep->dir_in);
1234	if (ret)
1235		goto dma_error;
1236
1237	return 0;
1238
1239dma_error:
1240	dev_err(hsotg->dev, "%s: failed to map buffer %p, %d bytes\n",
1241		__func__, req->buf, req->length);
1242
1243	return -EIO;
1244}
1245
1246static int dwc2_hsotg_handle_unaligned_buf_start(struct dwc2_hsotg *hsotg,
1247						 struct dwc2_hsotg_ep *hs_ep,
1248						 struct dwc2_hsotg_req *hs_req)
1249{
1250	void *req_buf = hs_req->req.buf;
1251
1252	/* If dma is not being used or buffer is aligned */
1253	if (!using_dma(hsotg) || !((long)req_buf & 3))
1254		return 0;
1255
1256	WARN_ON(hs_req->saved_req_buf);
1257
1258	dev_dbg(hsotg->dev, "%s: %s: buf=%p length=%d\n", __func__,
1259		hs_ep->ep.name, req_buf, hs_req->req.length);
1260
1261	hs_req->req.buf = kmalloc(hs_req->req.length, GFP_ATOMIC);
1262	if (!hs_req->req.buf) {
1263		hs_req->req.buf = req_buf;
1264		dev_err(hsotg->dev,
1265			"%s: unable to allocate memory for bounce buffer\n",
1266			__func__);
1267		return -ENOMEM;
1268	}
1269
1270	/* Save actual buffer */
1271	hs_req->saved_req_buf = req_buf;
1272
1273	if (hs_ep->dir_in)
1274		memcpy(hs_req->req.buf, req_buf, hs_req->req.length);
1275	return 0;
1276}
1277
1278static void
1279dwc2_hsotg_handle_unaligned_buf_complete(struct dwc2_hsotg *hsotg,
1280					 struct dwc2_hsotg_ep *hs_ep,
1281					 struct dwc2_hsotg_req *hs_req)
1282{
1283	/* If dma is not being used or buffer was aligned */
1284	if (!using_dma(hsotg) || !hs_req->saved_req_buf)
1285		return;
1286
1287	dev_dbg(hsotg->dev, "%s: %s: status=%d actual-length=%d\n", __func__,
1288		hs_ep->ep.name, hs_req->req.status, hs_req->req.actual);
1289
1290	/* Copy data from bounce buffer on successful out transfer */
1291	if (!hs_ep->dir_in && !hs_req->req.status)
1292		memcpy(hs_req->saved_req_buf, hs_req->req.buf,
1293		       hs_req->req.actual);
1294
1295	/* Free bounce buffer */
1296	kfree(hs_req->req.buf);
1297
1298	hs_req->req.buf = hs_req->saved_req_buf;
1299	hs_req->saved_req_buf = NULL;
1300}
1301
1302/**
1303 * dwc2_gadget_target_frame_elapsed - Checks target frame
1304 * @hs_ep: The driver endpoint to check
1305 *
1306 * Returns 1 if targeted frame elapsed. If returned 1 then we need to drop
1307 * corresponding transfer.
1308 */
1309static bool dwc2_gadget_target_frame_elapsed(struct dwc2_hsotg_ep *hs_ep)
1310{
1311	struct dwc2_hsotg *hsotg = hs_ep->parent;
1312	u32 target_frame = hs_ep->target_frame;
1313	u32 current_frame = hsotg->frame_number;
1314	bool frame_overrun = hs_ep->frame_overrun;
 
 
 
 
1315
1316	if (!frame_overrun && current_frame >= target_frame)
1317		return true;
1318
1319	if (frame_overrun && current_frame >= target_frame &&
1320	    ((current_frame - target_frame) < DSTS_SOFFN_LIMIT / 2))
1321		return true;
1322
1323	return false;
1324}
1325
1326/*
1327 * dwc2_gadget_set_ep0_desc_chain - Set EP's desc chain pointers
1328 * @hsotg: The driver state
1329 * @hs_ep: the ep descriptor chain is for
1330 *
1331 * Called to update EP0 structure's pointers depend on stage of
1332 * control transfer.
1333 */
1334static int dwc2_gadget_set_ep0_desc_chain(struct dwc2_hsotg *hsotg,
1335					  struct dwc2_hsotg_ep *hs_ep)
1336{
1337	switch (hsotg->ep0_state) {
1338	case DWC2_EP0_SETUP:
1339	case DWC2_EP0_STATUS_OUT:
1340		hs_ep->desc_list = hsotg->setup_desc[0];
1341		hs_ep->desc_list_dma = hsotg->setup_desc_dma[0];
1342		break;
1343	case DWC2_EP0_DATA_IN:
1344	case DWC2_EP0_STATUS_IN:
1345		hs_ep->desc_list = hsotg->ctrl_in_desc;
1346		hs_ep->desc_list_dma = hsotg->ctrl_in_desc_dma;
1347		break;
1348	case DWC2_EP0_DATA_OUT:
1349		hs_ep->desc_list = hsotg->ctrl_out_desc;
1350		hs_ep->desc_list_dma = hsotg->ctrl_out_desc_dma;
1351		break;
1352	default:
1353		dev_err(hsotg->dev, "invalid EP 0 state in queue %d\n",
1354			hsotg->ep0_state);
1355		return -EINVAL;
1356	}
1357
1358	return 0;
1359}
1360
1361static int dwc2_hsotg_ep_queue(struct usb_ep *ep, struct usb_request *req,
1362			       gfp_t gfp_flags)
1363{
1364	struct dwc2_hsotg_req *hs_req = our_req(req);
1365	struct dwc2_hsotg_ep *hs_ep = our_ep(ep);
1366	struct dwc2_hsotg *hs = hs_ep->parent;
1367	bool first;
1368	int ret;
1369	u32 maxsize = 0;
1370	u32 mask = 0;
1371
1372
1373	dev_dbg(hs->dev, "%s: req %p: %d@%p, noi=%d, zero=%d, snok=%d\n",
1374		ep->name, req, req->length, req->buf, req->no_interrupt,
1375		req->zero, req->short_not_ok);
1376
1377	/* Prevent new request submission when controller is suspended */
1378	if (hs->lx_state != DWC2_L0) {
1379		dev_dbg(hs->dev, "%s: submit request only in active state\n",
1380			__func__);
1381		return -EAGAIN;
1382	}
1383
1384	/* initialise status of the request */
1385	INIT_LIST_HEAD(&hs_req->queue);
1386	req->actual = 0;
1387	req->status = -EINPROGRESS;
1388
1389	/* Don't queue ISOC request if length greater than mps*mc */
1390	if (hs_ep->isochronous &&
1391	    req->length > (hs_ep->mc * hs_ep->ep.maxpacket)) {
1392		dev_err(hs->dev, "req length > maxpacket*mc\n");
1393		return -EINVAL;
1394	}
1395
1396	/* In DDMA mode for ISOC's don't queue request if length greater
1397	 * than descriptor limits.
1398	 */
1399	if (using_desc_dma(hs) && hs_ep->isochronous) {
1400		maxsize = dwc2_gadget_get_desc_params(hs_ep, &mask);
1401		if (hs_ep->dir_in && req->length > maxsize) {
1402			dev_err(hs->dev, "wrong length %d (maxsize=%d)\n",
1403				req->length, maxsize);
1404			return -EINVAL;
1405		}
1406
1407		if (!hs_ep->dir_in && req->length > hs_ep->ep.maxpacket) {
1408			dev_err(hs->dev, "ISOC OUT: wrong length %d (mps=%d)\n",
1409				req->length, hs_ep->ep.maxpacket);
1410			return -EINVAL;
1411		}
1412	}
1413
1414	ret = dwc2_hsotg_handle_unaligned_buf_start(hs, hs_ep, hs_req);
1415	if (ret)
1416		return ret;
1417
1418	/* if we're using DMA, sync the buffers as necessary */
1419	if (using_dma(hs)) {
1420		ret = dwc2_hsotg_map_dma(hs, hs_ep, req);
1421		if (ret)
1422			return ret;
1423	}
1424	/* If using descriptor DMA configure EP0 descriptor chain pointers */
1425	if (using_desc_dma(hs) && !hs_ep->index) {
1426		ret = dwc2_gadget_set_ep0_desc_chain(hs, hs_ep);
1427		if (ret)
1428			return ret;
1429	}
1430
1431	first = list_empty(&hs_ep->queue);
1432	list_add_tail(&hs_req->queue, &hs_ep->queue);
1433
1434	/*
1435	 * Handle DDMA isochronous transfers separately - just add new entry
1436	 * to the descriptor chain.
1437	 * Transfer will be started once SW gets either one of NAK or
1438	 * OutTknEpDis interrupts.
1439	 */
1440	if (using_desc_dma(hs) && hs_ep->isochronous) {
1441		if (hs_ep->target_frame != TARGET_FRAME_INITIAL) {
1442			dma_addr_t dma_addr = hs_req->req.dma;
1443
1444			if (hs_req->req.num_sgs) {
1445				WARN_ON(hs_req->req.num_sgs > 1);
1446				dma_addr = sg_dma_address(hs_req->req.sg);
1447			}
1448			dwc2_gadget_fill_isoc_desc(hs_ep, dma_addr,
1449						   hs_req->req.length);
1450		}
1451		return 0;
1452	}
1453
1454	/* Change EP direction if status phase request is after data out */
1455	if (!hs_ep->index && !req->length && !hs_ep->dir_in &&
1456	    hs->ep0_state == DWC2_EP0_DATA_OUT)
1457		hs_ep->dir_in = 1;
1458
1459	if (first) {
1460		if (!hs_ep->isochronous) {
1461			dwc2_hsotg_start_req(hs, hs_ep, hs_req, false);
1462			return 0;
1463		}
1464
1465		/* Update current frame number value. */
1466		hs->frame_number = dwc2_hsotg_read_frameno(hs);
1467		while (dwc2_gadget_target_frame_elapsed(hs_ep)) {
1468			dwc2_gadget_incr_frame_num(hs_ep);
1469			/* Update current frame number value once more as it
1470			 * changes here.
1471			 */
1472			hs->frame_number = dwc2_hsotg_read_frameno(hs);
1473		}
1474
1475		if (hs_ep->target_frame != TARGET_FRAME_INITIAL)
1476			dwc2_hsotg_start_req(hs, hs_ep, hs_req, false);
1477	}
1478	return 0;
1479}
1480
1481static int dwc2_hsotg_ep_queue_lock(struct usb_ep *ep, struct usb_request *req,
1482				    gfp_t gfp_flags)
1483{
1484	struct dwc2_hsotg_ep *hs_ep = our_ep(ep);
1485	struct dwc2_hsotg *hs = hs_ep->parent;
1486	unsigned long flags = 0;
1487	int ret = 0;
1488
1489	spin_lock_irqsave(&hs->lock, flags);
1490	ret = dwc2_hsotg_ep_queue(ep, req, gfp_flags);
1491	spin_unlock_irqrestore(&hs->lock, flags);
1492
1493	return ret;
1494}
1495
1496static void dwc2_hsotg_ep_free_request(struct usb_ep *ep,
1497				       struct usb_request *req)
1498{
1499	struct dwc2_hsotg_req *hs_req = our_req(req);
1500
1501	kfree(hs_req);
1502}
1503
1504/**
1505 * dwc2_hsotg_complete_oursetup - setup completion callback
1506 * @ep: The endpoint the request was on.
1507 * @req: The request completed.
1508 *
1509 * Called on completion of any requests the driver itself
1510 * submitted that need cleaning up.
1511 */
1512static void dwc2_hsotg_complete_oursetup(struct usb_ep *ep,
1513					 struct usb_request *req)
1514{
1515	struct dwc2_hsotg_ep *hs_ep = our_ep(ep);
1516	struct dwc2_hsotg *hsotg = hs_ep->parent;
1517
1518	dev_dbg(hsotg->dev, "%s: ep %p, req %p\n", __func__, ep, req);
1519
1520	dwc2_hsotg_ep_free_request(ep, req);
1521}
1522
1523/**
1524 * ep_from_windex - convert control wIndex value to endpoint
1525 * @hsotg: The driver state.
1526 * @windex: The control request wIndex field (in host order).
1527 *
1528 * Convert the given wIndex into a pointer to an driver endpoint
1529 * structure, or return NULL if it is not a valid endpoint.
1530 */
1531static struct dwc2_hsotg_ep *ep_from_windex(struct dwc2_hsotg *hsotg,
1532					    u32 windex)
1533{
1534	struct dwc2_hsotg_ep *ep;
1535	int dir = (windex & USB_DIR_IN) ? 1 : 0;
1536	int idx = windex & 0x7F;
1537
1538	if (windex >= 0x100)
1539		return NULL;
1540
1541	if (idx > hsotg->num_of_eps)
1542		return NULL;
1543
1544	ep = index_to_ep(hsotg, idx, dir);
1545
1546	if (idx && ep->dir_in != dir)
1547		return NULL;
1548
1549	return ep;
1550}
1551
1552/**
1553 * dwc2_hsotg_set_test_mode - Enable usb Test Modes
1554 * @hsotg: The driver state.
1555 * @testmode: requested usb test mode
1556 * Enable usb Test Mode requested by the Host.
1557 */
1558int dwc2_hsotg_set_test_mode(struct dwc2_hsotg *hsotg, int testmode)
1559{
1560	int dctl = dwc2_readl(hsotg, DCTL);
1561
1562	dctl &= ~DCTL_TSTCTL_MASK;
1563	switch (testmode) {
1564	case USB_TEST_J:
1565	case USB_TEST_K:
1566	case USB_TEST_SE0_NAK:
1567	case USB_TEST_PACKET:
1568	case USB_TEST_FORCE_ENABLE:
1569		dctl |= testmode << DCTL_TSTCTL_SHIFT;
1570		break;
1571	default:
1572		return -EINVAL;
1573	}
1574	dwc2_writel(hsotg, dctl, DCTL);
1575	return 0;
1576}
1577
1578/**
1579 * dwc2_hsotg_send_reply - send reply to control request
1580 * @hsotg: The device state
1581 * @ep: Endpoint 0
1582 * @buff: Buffer for request
1583 * @length: Length of reply.
1584 *
1585 * Create a request and queue it on the given endpoint. This is useful as
1586 * an internal method of sending replies to certain control requests, etc.
1587 */
1588static int dwc2_hsotg_send_reply(struct dwc2_hsotg *hsotg,
1589				 struct dwc2_hsotg_ep *ep,
1590				void *buff,
1591				int length)
1592{
1593	struct usb_request *req;
1594	int ret;
1595
1596	dev_dbg(hsotg->dev, "%s: buff %p, len %d\n", __func__, buff, length);
1597
1598	req = dwc2_hsotg_ep_alloc_request(&ep->ep, GFP_ATOMIC);
1599	hsotg->ep0_reply = req;
1600	if (!req) {
1601		dev_warn(hsotg->dev, "%s: cannot alloc req\n", __func__);
1602		return -ENOMEM;
1603	}
1604
1605	req->buf = hsotg->ep0_buff;
1606	req->length = length;
1607	/*
1608	 * zero flag is for sending zlp in DATA IN stage. It has no impact on
1609	 * STATUS stage.
1610	 */
1611	req->zero = 0;
1612	req->complete = dwc2_hsotg_complete_oursetup;
1613
1614	if (length)
1615		memcpy(req->buf, buff, length);
1616
1617	ret = dwc2_hsotg_ep_queue(&ep->ep, req, GFP_ATOMIC);
1618	if (ret) {
1619		dev_warn(hsotg->dev, "%s: cannot queue req\n", __func__);
1620		return ret;
1621	}
1622
1623	return 0;
1624}
1625
1626/**
1627 * dwc2_hsotg_process_req_status - process request GET_STATUS
1628 * @hsotg: The device state
1629 * @ctrl: USB control request
1630 */
1631static int dwc2_hsotg_process_req_status(struct dwc2_hsotg *hsotg,
1632					 struct usb_ctrlrequest *ctrl)
1633{
1634	struct dwc2_hsotg_ep *ep0 = hsotg->eps_out[0];
1635	struct dwc2_hsotg_ep *ep;
1636	__le16 reply;
1637	u16 status;
1638	int ret;
1639
1640	dev_dbg(hsotg->dev, "%s: USB_REQ_GET_STATUS\n", __func__);
1641
1642	if (!ep0->dir_in) {
1643		dev_warn(hsotg->dev, "%s: direction out?\n", __func__);
1644		return -EINVAL;
1645	}
1646
1647	switch (ctrl->bRequestType & USB_RECIP_MASK) {
1648	case USB_RECIP_DEVICE:
1649		status = hsotg->gadget.is_selfpowered <<
1650			 USB_DEVICE_SELF_POWERED;
1651		status |= hsotg->remote_wakeup_allowed <<
1652			  USB_DEVICE_REMOTE_WAKEUP;
1653		reply = cpu_to_le16(status);
1654		break;
1655
1656	case USB_RECIP_INTERFACE:
1657		/* currently, the data result should be zero */
1658		reply = cpu_to_le16(0);
1659		break;
1660
1661	case USB_RECIP_ENDPOINT:
1662		ep = ep_from_windex(hsotg, le16_to_cpu(ctrl->wIndex));
1663		if (!ep)
1664			return -ENOENT;
1665
1666		reply = cpu_to_le16(ep->halted ? 1 : 0);
1667		break;
1668
1669	default:
1670		return 0;
1671	}
1672
1673	if (le16_to_cpu(ctrl->wLength) != 2)
1674		return -EINVAL;
1675
1676	ret = dwc2_hsotg_send_reply(hsotg, ep0, &reply, 2);
1677	if (ret) {
1678		dev_err(hsotg->dev, "%s: failed to send reply\n", __func__);
1679		return ret;
1680	}
1681
1682	return 1;
1683}
1684
1685static int dwc2_hsotg_ep_sethalt(struct usb_ep *ep, int value, bool now);
1686
1687/**
1688 * get_ep_head - return the first request on the endpoint
1689 * @hs_ep: The controller endpoint to get
1690 *
1691 * Get the first request on the endpoint.
1692 */
1693static struct dwc2_hsotg_req *get_ep_head(struct dwc2_hsotg_ep *hs_ep)
1694{
1695	return list_first_entry_or_null(&hs_ep->queue, struct dwc2_hsotg_req,
1696					queue);
1697}
1698
1699/**
1700 * dwc2_gadget_start_next_request - Starts next request from ep queue
1701 * @hs_ep: Endpoint structure
1702 *
1703 * If queue is empty and EP is ISOC-OUT - unmasks OUTTKNEPDIS which is masked
1704 * in its handler. Hence we need to unmask it here to be able to do
1705 * resynchronization.
1706 */
1707static void dwc2_gadget_start_next_request(struct dwc2_hsotg_ep *hs_ep)
1708{
1709	u32 mask;
1710	struct dwc2_hsotg *hsotg = hs_ep->parent;
1711	int dir_in = hs_ep->dir_in;
1712	struct dwc2_hsotg_req *hs_req;
1713	u32 epmsk_reg = dir_in ? DIEPMSK : DOEPMSK;
1714
1715	if (!list_empty(&hs_ep->queue)) {
1716		hs_req = get_ep_head(hs_ep);
1717		dwc2_hsotg_start_req(hsotg, hs_ep, hs_req, false);
1718		return;
1719	}
1720	if (!hs_ep->isochronous)
1721		return;
1722
1723	if (dir_in) {
1724		dev_dbg(hsotg->dev, "%s: No more ISOC-IN requests\n",
1725			__func__);
1726	} else {
1727		dev_dbg(hsotg->dev, "%s: No more ISOC-OUT requests\n",
1728			__func__);
1729		mask = dwc2_readl(hsotg, epmsk_reg);
1730		mask |= DOEPMSK_OUTTKNEPDISMSK;
1731		dwc2_writel(hsotg, mask, epmsk_reg);
1732	}
1733}
1734
1735/**
1736 * dwc2_hsotg_process_req_feature - process request {SET,CLEAR}_FEATURE
1737 * @hsotg: The device state
1738 * @ctrl: USB control request
1739 */
1740static int dwc2_hsotg_process_req_feature(struct dwc2_hsotg *hsotg,
1741					  struct usb_ctrlrequest *ctrl)
1742{
1743	struct dwc2_hsotg_ep *ep0 = hsotg->eps_out[0];
1744	struct dwc2_hsotg_req *hs_req;
1745	bool set = (ctrl->bRequest == USB_REQ_SET_FEATURE);
1746	struct dwc2_hsotg_ep *ep;
1747	int ret;
1748	bool halted;
1749	u32 recip;
1750	u32 wValue;
1751	u32 wIndex;
1752
1753	dev_dbg(hsotg->dev, "%s: %s_FEATURE\n",
1754		__func__, set ? "SET" : "CLEAR");
1755
1756	wValue = le16_to_cpu(ctrl->wValue);
1757	wIndex = le16_to_cpu(ctrl->wIndex);
1758	recip = ctrl->bRequestType & USB_RECIP_MASK;
1759
1760	switch (recip) {
1761	case USB_RECIP_DEVICE:
1762		switch (wValue) {
1763		case USB_DEVICE_REMOTE_WAKEUP:
1764			if (set)
1765				hsotg->remote_wakeup_allowed = 1;
1766			else
1767				hsotg->remote_wakeup_allowed = 0;
1768			break;
1769
1770		case USB_DEVICE_TEST_MODE:
1771			if ((wIndex & 0xff) != 0)
1772				return -EINVAL;
1773			if (!set)
1774				return -EINVAL;
1775
1776			hsotg->test_mode = wIndex >> 8;
1777			break;
1778		default:
1779			return -ENOENT;
1780		}
1781
1782		ret = dwc2_hsotg_send_reply(hsotg, ep0, NULL, 0);
1783		if (ret) {
1784			dev_err(hsotg->dev,
1785				"%s: failed to send reply\n", __func__);
1786			return ret;
1787		}
1788		break;
1789
1790	case USB_RECIP_ENDPOINT:
1791		ep = ep_from_windex(hsotg, wIndex);
1792		if (!ep) {
1793			dev_dbg(hsotg->dev, "%s: no endpoint for 0x%04x\n",
1794				__func__, wIndex);
1795			return -ENOENT;
1796		}
1797
1798		switch (wValue) {
1799		case USB_ENDPOINT_HALT:
1800			halted = ep->halted;
1801
1802			dwc2_hsotg_ep_sethalt(&ep->ep, set, true);
 
1803
1804			ret = dwc2_hsotg_send_reply(hsotg, ep0, NULL, 0);
1805			if (ret) {
1806				dev_err(hsotg->dev,
1807					"%s: failed to send reply\n", __func__);
1808				return ret;
1809			}
1810
1811			/*
1812			 * we have to complete all requests for ep if it was
1813			 * halted, and the halt was cleared by CLEAR_FEATURE
1814			 */
1815
1816			if (!set && halted) {
1817				/*
1818				 * If we have request in progress,
1819				 * then complete it
1820				 */
1821				if (ep->req) {
1822					hs_req = ep->req;
1823					ep->req = NULL;
1824					list_del_init(&hs_req->queue);
1825					if (hs_req->req.complete) {
1826						spin_unlock(&hsotg->lock);
1827						usb_gadget_giveback_request(
1828							&ep->ep, &hs_req->req);
1829						spin_lock(&hsotg->lock);
1830					}
1831				}
1832
1833				/* If we have pending request, then start it */
1834				if (!ep->req)
1835					dwc2_gadget_start_next_request(ep);
1836			}
1837
1838			break;
1839
1840		default:
1841			return -ENOENT;
1842		}
1843		break;
1844	default:
1845		return -ENOENT;
1846	}
1847	return 1;
1848}
1849
1850static void dwc2_hsotg_enqueue_setup(struct dwc2_hsotg *hsotg);
1851
1852/**
1853 * dwc2_hsotg_stall_ep0 - stall ep0
1854 * @hsotg: The device state
1855 *
1856 * Set stall for ep0 as response for setup request.
1857 */
1858static void dwc2_hsotg_stall_ep0(struct dwc2_hsotg *hsotg)
1859{
1860	struct dwc2_hsotg_ep *ep0 = hsotg->eps_out[0];
1861	u32 reg;
1862	u32 ctrl;
1863
1864	dev_dbg(hsotg->dev, "ep0 stall (dir=%d)\n", ep0->dir_in);
1865	reg = (ep0->dir_in) ? DIEPCTL0 : DOEPCTL0;
1866
1867	/*
1868	 * DxEPCTL_Stall will be cleared by EP once it has
1869	 * taken effect, so no need to clear later.
1870	 */
1871
1872	ctrl = dwc2_readl(hsotg, reg);
1873	ctrl |= DXEPCTL_STALL;
1874	ctrl |= DXEPCTL_CNAK;
1875	dwc2_writel(hsotg, ctrl, reg);
1876
1877	dev_dbg(hsotg->dev,
1878		"written DXEPCTL=0x%08x to %08x (DXEPCTL=0x%08x)\n",
1879		ctrl, reg, dwc2_readl(hsotg, reg));
1880
1881	 /*
1882	  * complete won't be called, so we enqueue
1883	  * setup request here
1884	  */
1885	 dwc2_hsotg_enqueue_setup(hsotg);
1886}
1887
1888/**
1889 * dwc2_hsotg_process_control - process a control request
1890 * @hsotg: The device state
1891 * @ctrl: The control request received
1892 *
1893 * The controller has received the SETUP phase of a control request, and
1894 * needs to work out what to do next (and whether to pass it on to the
1895 * gadget driver).
1896 */
1897static void dwc2_hsotg_process_control(struct dwc2_hsotg *hsotg,
1898				       struct usb_ctrlrequest *ctrl)
1899{
1900	struct dwc2_hsotg_ep *ep0 = hsotg->eps_out[0];
1901	int ret = 0;
1902	u32 dcfg;
1903
1904	dev_dbg(hsotg->dev,
1905		"ctrl Type=%02x, Req=%02x, V=%04x, I=%04x, L=%04x\n",
1906		ctrl->bRequestType, ctrl->bRequest, ctrl->wValue,
1907		ctrl->wIndex, ctrl->wLength);
1908
1909	if (ctrl->wLength == 0) {
1910		ep0->dir_in = 1;
1911		hsotg->ep0_state = DWC2_EP0_STATUS_IN;
1912	} else if (ctrl->bRequestType & USB_DIR_IN) {
1913		ep0->dir_in = 1;
1914		hsotg->ep0_state = DWC2_EP0_DATA_IN;
1915	} else {
1916		ep0->dir_in = 0;
1917		hsotg->ep0_state = DWC2_EP0_DATA_OUT;
1918	}
1919
1920	if ((ctrl->bRequestType & USB_TYPE_MASK) == USB_TYPE_STANDARD) {
1921		switch (ctrl->bRequest) {
1922		case USB_REQ_SET_ADDRESS:
1923			hsotg->connected = 1;
1924			dcfg = dwc2_readl(hsotg, DCFG);
1925			dcfg &= ~DCFG_DEVADDR_MASK;
1926			dcfg |= (le16_to_cpu(ctrl->wValue) <<
1927				 DCFG_DEVADDR_SHIFT) & DCFG_DEVADDR_MASK;
1928			dwc2_writel(hsotg, dcfg, DCFG);
1929
1930			dev_info(hsotg->dev, "new address %d\n", ctrl->wValue);
1931
1932			ret = dwc2_hsotg_send_reply(hsotg, ep0, NULL, 0);
1933			return;
1934
1935		case USB_REQ_GET_STATUS:
1936			ret = dwc2_hsotg_process_req_status(hsotg, ctrl);
1937			break;
1938
1939		case USB_REQ_CLEAR_FEATURE:
1940		case USB_REQ_SET_FEATURE:
1941			ret = dwc2_hsotg_process_req_feature(hsotg, ctrl);
1942			break;
1943		}
1944	}
1945
1946	/* as a fallback, try delivering it to the driver to deal with */
1947
1948	if (ret == 0 && hsotg->driver) {
1949		spin_unlock(&hsotg->lock);
1950		ret = hsotg->driver->setup(&hsotg->gadget, ctrl);
1951		spin_lock(&hsotg->lock);
1952		if (ret < 0)
1953			dev_dbg(hsotg->dev, "driver->setup() ret %d\n", ret);
1954	}
1955
1956	hsotg->delayed_status = false;
1957	if (ret == USB_GADGET_DELAYED_STATUS)
1958		hsotg->delayed_status = true;
1959
1960	/*
1961	 * the request is either unhandlable, or is not formatted correctly
1962	 * so respond with a STALL for the status stage to indicate failure.
1963	 */
1964
1965	if (ret < 0)
1966		dwc2_hsotg_stall_ep0(hsotg);
1967}
1968
1969/**
1970 * dwc2_hsotg_complete_setup - completion of a setup transfer
1971 * @ep: The endpoint the request was on.
1972 * @req: The request completed.
1973 *
1974 * Called on completion of any requests the driver itself submitted for
1975 * EP0 setup packets
1976 */
1977static void dwc2_hsotg_complete_setup(struct usb_ep *ep,
1978				      struct usb_request *req)
1979{
1980	struct dwc2_hsotg_ep *hs_ep = our_ep(ep);
1981	struct dwc2_hsotg *hsotg = hs_ep->parent;
1982
1983	if (req->status < 0) {
1984		dev_dbg(hsotg->dev, "%s: failed %d\n", __func__, req->status);
1985		return;
1986	}
1987
1988	spin_lock(&hsotg->lock);
1989	if (req->actual == 0)
1990		dwc2_hsotg_enqueue_setup(hsotg);
1991	else
1992		dwc2_hsotg_process_control(hsotg, req->buf);
1993	spin_unlock(&hsotg->lock);
1994}
1995
1996/**
1997 * dwc2_hsotg_enqueue_setup - start a request for EP0 packets
1998 * @hsotg: The device state.
1999 *
2000 * Enqueue a request on EP0 if necessary to received any SETUP packets
2001 * received from the host.
2002 */
2003static void dwc2_hsotg_enqueue_setup(struct dwc2_hsotg *hsotg)
2004{
2005	struct usb_request *req = hsotg->ctrl_req;
2006	struct dwc2_hsotg_req *hs_req = our_req(req);
2007	int ret;
2008
2009	dev_dbg(hsotg->dev, "%s: queueing setup request\n", __func__);
2010
2011	req->zero = 0;
2012	req->length = 8;
2013	req->buf = hsotg->ctrl_buff;
2014	req->complete = dwc2_hsotg_complete_setup;
2015
2016	if (!list_empty(&hs_req->queue)) {
2017		dev_dbg(hsotg->dev, "%s already queued???\n", __func__);
2018		return;
2019	}
2020
2021	hsotg->eps_out[0]->dir_in = 0;
2022	hsotg->eps_out[0]->send_zlp = 0;
2023	hsotg->ep0_state = DWC2_EP0_SETUP;
2024
2025	ret = dwc2_hsotg_ep_queue(&hsotg->eps_out[0]->ep, req, GFP_ATOMIC);
2026	if (ret < 0) {
2027		dev_err(hsotg->dev, "%s: failed queue (%d)\n", __func__, ret);
2028		/*
2029		 * Don't think there's much we can do other than watch the
2030		 * driver fail.
2031		 */
2032	}
2033}
2034
2035static void dwc2_hsotg_program_zlp(struct dwc2_hsotg *hsotg,
2036				   struct dwc2_hsotg_ep *hs_ep)
2037{
2038	u32 ctrl;
2039	u8 index = hs_ep->index;
2040	u32 epctl_reg = hs_ep->dir_in ? DIEPCTL(index) : DOEPCTL(index);
2041	u32 epsiz_reg = hs_ep->dir_in ? DIEPTSIZ(index) : DOEPTSIZ(index);
2042
2043	if (hs_ep->dir_in)
2044		dev_dbg(hsotg->dev, "Sending zero-length packet on ep%d\n",
2045			index);
2046	else
2047		dev_dbg(hsotg->dev, "Receiving zero-length packet on ep%d\n",
2048			index);
2049	if (using_desc_dma(hsotg)) {
2050		/* Not specific buffer needed for ep0 ZLP */
2051		dma_addr_t dma = hs_ep->desc_list_dma;
2052
2053		if (!index)
2054			dwc2_gadget_set_ep0_desc_chain(hsotg, hs_ep);
2055
2056		dwc2_gadget_config_nonisoc_xfer_ddma(hs_ep, dma, 0);
2057	} else {
2058		dwc2_writel(hsotg, DXEPTSIZ_MC(1) | DXEPTSIZ_PKTCNT(1) |
2059			    DXEPTSIZ_XFERSIZE(0),
2060			    epsiz_reg);
2061	}
2062
2063	ctrl = dwc2_readl(hsotg, epctl_reg);
2064	ctrl |= DXEPCTL_CNAK;  /* clear NAK set by core */
2065	ctrl |= DXEPCTL_EPENA; /* ensure ep enabled */
2066	ctrl |= DXEPCTL_USBACTEP;
2067	dwc2_writel(hsotg, ctrl, epctl_reg);
2068}
2069
2070/**
2071 * dwc2_hsotg_complete_request - complete a request given to us
2072 * @hsotg: The device state.
2073 * @hs_ep: The endpoint the request was on.
2074 * @hs_req: The request to complete.
2075 * @result: The result code (0 => Ok, otherwise errno)
2076 *
2077 * The given request has finished, so call the necessary completion
2078 * if it has one and then look to see if we can start a new request
2079 * on the endpoint.
2080 *
2081 * Note, expects the ep to already be locked as appropriate.
2082 */
2083static void dwc2_hsotg_complete_request(struct dwc2_hsotg *hsotg,
2084					struct dwc2_hsotg_ep *hs_ep,
2085				       struct dwc2_hsotg_req *hs_req,
2086				       int result)
2087{
2088	if (!hs_req) {
2089		dev_dbg(hsotg->dev, "%s: nothing to complete?\n", __func__);
2090		return;
2091	}
2092
2093	dev_dbg(hsotg->dev, "complete: ep %p %s, req %p, %d => %p\n",
2094		hs_ep, hs_ep->ep.name, hs_req, result, hs_req->req.complete);
2095
2096	/*
2097	 * only replace the status if we've not already set an error
2098	 * from a previous transaction
2099	 */
2100
2101	if (hs_req->req.status == -EINPROGRESS)
2102		hs_req->req.status = result;
2103
2104	if (using_dma(hsotg))
2105		dwc2_hsotg_unmap_dma(hsotg, hs_ep, hs_req);
2106
2107	dwc2_hsotg_handle_unaligned_buf_complete(hsotg, hs_ep, hs_req);
2108
2109	hs_ep->req = NULL;
2110	list_del_init(&hs_req->queue);
2111
2112	/*
2113	 * call the complete request with the locks off, just in case the
2114	 * request tries to queue more work for this endpoint.
2115	 */
2116
2117	if (hs_req->req.complete) {
2118		spin_unlock(&hsotg->lock);
2119		usb_gadget_giveback_request(&hs_ep->ep, &hs_req->req);
2120		spin_lock(&hsotg->lock);
2121	}
2122
2123	/* In DDMA don't need to proceed to starting of next ISOC request */
2124	if (using_desc_dma(hsotg) && hs_ep->isochronous)
2125		return;
2126
2127	/*
2128	 * Look to see if there is anything else to do. Note, the completion
2129	 * of the previous request may have caused a new request to be started
2130	 * so be careful when doing this.
2131	 */
2132
2133	if (!hs_ep->req && result >= 0)
2134		dwc2_gadget_start_next_request(hs_ep);
2135}
2136
2137/*
2138 * dwc2_gadget_complete_isoc_request_ddma - complete an isoc request in DDMA
2139 * @hs_ep: The endpoint the request was on.
2140 *
2141 * Get first request from the ep queue, determine descriptor on which complete
2142 * happened. SW discovers which descriptor currently in use by HW, adjusts
2143 * dma_address and calculates index of completed descriptor based on the value
2144 * of DEPDMA register. Update actual length of request, giveback to gadget.
2145 */
2146static void dwc2_gadget_complete_isoc_request_ddma(struct dwc2_hsotg_ep *hs_ep)
2147{
2148	struct dwc2_hsotg *hsotg = hs_ep->parent;
2149	struct dwc2_hsotg_req *hs_req;
2150	struct usb_request *ureq;
2151	u32 desc_sts;
2152	u32 mask;
2153
2154	desc_sts = hs_ep->desc_list[hs_ep->compl_desc].status;
2155
2156	/* Process only descriptors with buffer status set to DMA done */
2157	while ((desc_sts & DEV_DMA_BUFF_STS_MASK) >>
2158		DEV_DMA_BUFF_STS_SHIFT == DEV_DMA_BUFF_STS_DMADONE) {
2159
2160		hs_req = get_ep_head(hs_ep);
2161		if (!hs_req) {
2162			dev_warn(hsotg->dev, "%s: ISOC EP queue empty\n", __func__);
2163			return;
2164		}
2165		ureq = &hs_req->req;
2166
2167		/* Check completion status */
2168		if ((desc_sts & DEV_DMA_STS_MASK) >> DEV_DMA_STS_SHIFT ==
2169			DEV_DMA_STS_SUCC) {
2170			mask = hs_ep->dir_in ? DEV_DMA_ISOC_TX_NBYTES_MASK :
2171				DEV_DMA_ISOC_RX_NBYTES_MASK;
2172			ureq->actual = ureq->length - ((desc_sts & mask) >>
2173				DEV_DMA_ISOC_NBYTES_SHIFT);
2174
2175			/* Adjust actual len for ISOC Out if len is
2176			 * not align of 4
2177			 */
2178			if (!hs_ep->dir_in && ureq->length & 0x3)
2179				ureq->actual += 4 - (ureq->length & 0x3);
2180
2181			/* Set actual frame number for completed transfers */
2182			ureq->frame_number =
2183				(desc_sts & DEV_DMA_ISOC_FRNUM_MASK) >>
2184				DEV_DMA_ISOC_FRNUM_SHIFT;
2185		}
2186
2187		dwc2_hsotg_complete_request(hsotg, hs_ep, hs_req, 0);
2188
2189		hs_ep->compl_desc++;
2190		if (hs_ep->compl_desc > (MAX_DMA_DESC_NUM_HS_ISOC - 1))
2191			hs_ep->compl_desc = 0;
2192		desc_sts = hs_ep->desc_list[hs_ep->compl_desc].status;
2193	}
2194}
2195
2196/*
2197 * dwc2_gadget_handle_isoc_bna - handle BNA interrupt for ISOC.
2198 * @hs_ep: The isochronous endpoint.
2199 *
2200 * If EP ISOC OUT then need to flush RX FIFO to remove source of BNA
2201 * interrupt. Reset target frame and next_desc to allow to start
2202 * ISOC's on NAK interrupt for IN direction or on OUTTKNEPDIS
2203 * interrupt for OUT direction.
2204 */
2205static void dwc2_gadget_handle_isoc_bna(struct dwc2_hsotg_ep *hs_ep)
2206{
2207	struct dwc2_hsotg *hsotg = hs_ep->parent;
2208
2209	if (!hs_ep->dir_in)
2210		dwc2_flush_rx_fifo(hsotg);
2211	dwc2_hsotg_complete_request(hsotg, hs_ep, get_ep_head(hs_ep), 0);
2212
2213	hs_ep->target_frame = TARGET_FRAME_INITIAL;
2214	hs_ep->next_desc = 0;
2215	hs_ep->compl_desc = 0;
2216}
2217
2218/**
2219 * dwc2_hsotg_rx_data - receive data from the FIFO for an endpoint
2220 * @hsotg: The device state.
2221 * @ep_idx: The endpoint index for the data
2222 * @size: The size of data in the fifo, in bytes
2223 *
2224 * The FIFO status shows there is data to read from the FIFO for a given
2225 * endpoint, so sort out whether we need to read the data into a request
2226 * that has been made for that endpoint.
2227 */
2228static void dwc2_hsotg_rx_data(struct dwc2_hsotg *hsotg, int ep_idx, int size)
2229{
2230	struct dwc2_hsotg_ep *hs_ep = hsotg->eps_out[ep_idx];
2231	struct dwc2_hsotg_req *hs_req = hs_ep->req;
2232	int to_read;
2233	int max_req;
2234	int read_ptr;
2235
2236	if (!hs_req) {
2237		u32 epctl = dwc2_readl(hsotg, DOEPCTL(ep_idx));
2238		int ptr;
2239
2240		dev_dbg(hsotg->dev,
2241			"%s: FIFO %d bytes on ep%d but no req (DXEPCTl=0x%08x)\n",
2242			 __func__, size, ep_idx, epctl);
2243
2244		/* dump the data from the FIFO, we've nothing we can do */
2245		for (ptr = 0; ptr < size; ptr += 4)
2246			(void)dwc2_readl(hsotg, EPFIFO(ep_idx));
2247
2248		return;
2249	}
2250
2251	to_read = size;
2252	read_ptr = hs_req->req.actual;
2253	max_req = hs_req->req.length - read_ptr;
2254
2255	dev_dbg(hsotg->dev, "%s: read %d/%d, done %d/%d\n",
2256		__func__, to_read, max_req, read_ptr, hs_req->req.length);
2257
2258	if (to_read > max_req) {
2259		/*
2260		 * more data appeared than we where willing
2261		 * to deal with in this request.
2262		 */
2263
2264		/* currently we don't deal this */
2265		WARN_ON_ONCE(1);
2266	}
2267
2268	hs_ep->total_data += to_read;
2269	hs_req->req.actual += to_read;
2270	to_read = DIV_ROUND_UP(to_read, 4);
2271
2272	/*
2273	 * note, we might over-write the buffer end by 3 bytes depending on
2274	 * alignment of the data.
2275	 */
2276	dwc2_readl_rep(hsotg, EPFIFO(ep_idx),
2277		       hs_req->req.buf + read_ptr, to_read);
2278}
2279
2280/**
2281 * dwc2_hsotg_ep0_zlp - send/receive zero-length packet on control endpoint
2282 * @hsotg: The device instance
2283 * @dir_in: If IN zlp
2284 *
2285 * Generate a zero-length IN packet request for terminating a SETUP
2286 * transaction.
2287 *
2288 * Note, since we don't write any data to the TxFIFO, then it is
2289 * currently believed that we do not need to wait for any space in
2290 * the TxFIFO.
2291 */
2292static void dwc2_hsotg_ep0_zlp(struct dwc2_hsotg *hsotg, bool dir_in)
2293{
2294	/* eps_out[0] is used in both directions */
2295	hsotg->eps_out[0]->dir_in = dir_in;
2296	hsotg->ep0_state = dir_in ? DWC2_EP0_STATUS_IN : DWC2_EP0_STATUS_OUT;
2297
2298	dwc2_hsotg_program_zlp(hsotg, hsotg->eps_out[0]);
2299}
2300
2301static void dwc2_hsotg_change_ep_iso_parity(struct dwc2_hsotg *hsotg,
2302					    u32 epctl_reg)
2303{
2304	u32 ctrl;
2305
2306	ctrl = dwc2_readl(hsotg, epctl_reg);
2307	if (ctrl & DXEPCTL_EOFRNUM)
2308		ctrl |= DXEPCTL_SETEVENFR;
2309	else
2310		ctrl |= DXEPCTL_SETODDFR;
2311	dwc2_writel(hsotg, ctrl, epctl_reg);
2312}
2313
2314/*
2315 * dwc2_gadget_get_xfersize_ddma - get transferred bytes amount from desc
2316 * @hs_ep - The endpoint on which transfer went
2317 *
2318 * Iterate over endpoints descriptor chain and get info on bytes remained
2319 * in DMA descriptors after transfer has completed. Used for non isoc EPs.
2320 */
2321static unsigned int dwc2_gadget_get_xfersize_ddma(struct dwc2_hsotg_ep *hs_ep)
2322{
 
2323	struct dwc2_hsotg *hsotg = hs_ep->parent;
2324	unsigned int bytes_rem = 0;
 
2325	struct dwc2_dma_desc *desc = hs_ep->desc_list;
2326	int i;
2327	u32 status;
 
 
2328
2329	if (!desc)
2330		return -EINVAL;
2331
 
 
 
 
 
2332	for (i = 0; i < hs_ep->desc_count; ++i) {
2333		status = desc->status;
2334		bytes_rem += status & DEV_DMA_NBYTES_MASK;
 
2335
2336		if (status & DEV_DMA_STS_MASK)
2337			dev_err(hsotg->dev, "descriptor %d closed with %x\n",
2338				i, status & DEV_DMA_STS_MASK);
 
 
 
 
2339		desc++;
2340	}
2341
2342	return bytes_rem;
2343}
2344
2345/**
2346 * dwc2_hsotg_handle_outdone - handle receiving OutDone/SetupDone from RXFIFO
2347 * @hsotg: The device instance
2348 * @epnum: The endpoint received from
2349 *
2350 * The RXFIFO has delivered an OutDone event, which means that the data
2351 * transfer for an OUT endpoint has been completed, either by a short
2352 * packet or by the finish of a transfer.
2353 */
2354static void dwc2_hsotg_handle_outdone(struct dwc2_hsotg *hsotg, int epnum)
2355{
2356	u32 epsize = dwc2_readl(hsotg, DOEPTSIZ(epnum));
2357	struct dwc2_hsotg_ep *hs_ep = hsotg->eps_out[epnum];
2358	struct dwc2_hsotg_req *hs_req = hs_ep->req;
2359	struct usb_request *req = &hs_req->req;
2360	unsigned int size_left = DXEPTSIZ_XFERSIZE_GET(epsize);
2361	int result = 0;
2362
2363	if (!hs_req) {
2364		dev_dbg(hsotg->dev, "%s: no request active\n", __func__);
2365		return;
2366	}
2367
2368	if (epnum == 0 && hsotg->ep0_state == DWC2_EP0_STATUS_OUT) {
2369		dev_dbg(hsotg->dev, "zlp packet received\n");
2370		dwc2_hsotg_complete_request(hsotg, hs_ep, hs_req, 0);
2371		dwc2_hsotg_enqueue_setup(hsotg);
2372		return;
2373	}
2374
2375	if (using_desc_dma(hsotg))
2376		size_left = dwc2_gadget_get_xfersize_ddma(hs_ep);
2377
2378	if (using_dma(hsotg)) {
2379		unsigned int size_done;
2380
2381		/*
2382		 * Calculate the size of the transfer by checking how much
2383		 * is left in the endpoint size register and then working it
2384		 * out from the amount we loaded for the transfer.
2385		 *
2386		 * We need to do this as DMA pointers are always 32bit aligned
2387		 * so may overshoot/undershoot the transfer.
2388		 */
2389
2390		size_done = hs_ep->size_loaded - size_left;
2391		size_done += hs_ep->last_load;
2392
2393		req->actual = size_done;
2394	}
2395
2396	/* if there is more request to do, schedule new transfer */
2397	if (req->actual < req->length && size_left == 0) {
2398		dwc2_hsotg_start_req(hsotg, hs_ep, hs_req, true);
2399		return;
2400	}
2401
2402	if (req->actual < req->length && req->short_not_ok) {
2403		dev_dbg(hsotg->dev, "%s: got %d/%d (short not ok) => error\n",
2404			__func__, req->actual, req->length);
2405
2406		/*
2407		 * todo - what should we return here? there's no one else
2408		 * even bothering to check the status.
2409		 */
2410	}
2411
2412	/* DDMA IN status phase will start from StsPhseRcvd interrupt */
2413	if (!using_desc_dma(hsotg) && epnum == 0 &&
2414	    hsotg->ep0_state == DWC2_EP0_DATA_OUT) {
2415		/* Move to STATUS IN */
2416		if (!hsotg->delayed_status)
2417			dwc2_hsotg_ep0_zlp(hsotg, true);
2418	}
2419
2420	/*
2421	 * Slave mode OUT transfers do not go through XferComplete so
2422	 * adjust the ISOC parity here.
2423	 */
2424	if (!using_dma(hsotg)) {
2425		if (hs_ep->isochronous && hs_ep->interval == 1)
2426			dwc2_hsotg_change_ep_iso_parity(hsotg, DOEPCTL(epnum));
2427		else if (hs_ep->isochronous && hs_ep->interval > 1)
2428			dwc2_gadget_incr_frame_num(hs_ep);
2429	}
2430
2431	/* Set actual frame number for completed transfers */
2432	if (!using_desc_dma(hsotg) && hs_ep->isochronous)
2433		req->frame_number = hsotg->frame_number;
2434
2435	dwc2_hsotg_complete_request(hsotg, hs_ep, hs_req, result);
2436}
2437
2438/**
2439 * dwc2_hsotg_handle_rx - RX FIFO has data
2440 * @hsotg: The device instance
2441 *
2442 * The IRQ handler has detected that the RX FIFO has some data in it
2443 * that requires processing, so find out what is in there and do the
2444 * appropriate read.
2445 *
2446 * The RXFIFO is a true FIFO, the packets coming out are still in packet
2447 * chunks, so if you have x packets received on an endpoint you'll get x
2448 * FIFO events delivered, each with a packet's worth of data in it.
2449 *
2450 * When using DMA, we should not be processing events from the RXFIFO
2451 * as the actual data should be sent to the memory directly and we turn
2452 * on the completion interrupts to get notifications of transfer completion.
2453 */
2454static void dwc2_hsotg_handle_rx(struct dwc2_hsotg *hsotg)
2455{
2456	u32 grxstsr = dwc2_readl(hsotg, GRXSTSP);
2457	u32 epnum, status, size;
2458
2459	WARN_ON(using_dma(hsotg));
2460
2461	epnum = grxstsr & GRXSTS_EPNUM_MASK;
2462	status = grxstsr & GRXSTS_PKTSTS_MASK;
2463
2464	size = grxstsr & GRXSTS_BYTECNT_MASK;
2465	size >>= GRXSTS_BYTECNT_SHIFT;
2466
2467	dev_dbg(hsotg->dev, "%s: GRXSTSP=0x%08x (%d@%d)\n",
2468		__func__, grxstsr, size, epnum);
2469
2470	switch ((status & GRXSTS_PKTSTS_MASK) >> GRXSTS_PKTSTS_SHIFT) {
2471	case GRXSTS_PKTSTS_GLOBALOUTNAK:
2472		dev_dbg(hsotg->dev, "GLOBALOUTNAK\n");
2473		break;
2474
2475	case GRXSTS_PKTSTS_OUTDONE:
2476		dev_dbg(hsotg->dev, "OutDone (Frame=0x%08x)\n",
2477			dwc2_hsotg_read_frameno(hsotg));
2478
2479		if (!using_dma(hsotg))
2480			dwc2_hsotg_handle_outdone(hsotg, epnum);
2481		break;
2482
2483	case GRXSTS_PKTSTS_SETUPDONE:
2484		dev_dbg(hsotg->dev,
2485			"SetupDone (Frame=0x%08x, DOPEPCTL=0x%08x)\n",
2486			dwc2_hsotg_read_frameno(hsotg),
2487			dwc2_readl(hsotg, DOEPCTL(0)));
2488		/*
2489		 * Call dwc2_hsotg_handle_outdone here if it was not called from
2490		 * GRXSTS_PKTSTS_OUTDONE. That is, if the core didn't
2491		 * generate GRXSTS_PKTSTS_OUTDONE for setup packet.
2492		 */
2493		if (hsotg->ep0_state == DWC2_EP0_SETUP)
2494			dwc2_hsotg_handle_outdone(hsotg, epnum);
2495		break;
2496
2497	case GRXSTS_PKTSTS_OUTRX:
2498		dwc2_hsotg_rx_data(hsotg, epnum, size);
2499		break;
2500
2501	case GRXSTS_PKTSTS_SETUPRX:
2502		dev_dbg(hsotg->dev,
2503			"SetupRX (Frame=0x%08x, DOPEPCTL=0x%08x)\n",
2504			dwc2_hsotg_read_frameno(hsotg),
2505			dwc2_readl(hsotg, DOEPCTL(0)));
2506
2507		WARN_ON(hsotg->ep0_state != DWC2_EP0_SETUP);
2508
2509		dwc2_hsotg_rx_data(hsotg, epnum, size);
2510		break;
2511
2512	default:
2513		dev_warn(hsotg->dev, "%s: unknown status %08x\n",
2514			 __func__, grxstsr);
2515
2516		dwc2_hsotg_dump(hsotg);
2517		break;
2518	}
2519}
2520
2521/**
2522 * dwc2_hsotg_ep0_mps - turn max packet size into register setting
2523 * @mps: The maximum packet size in bytes.
2524 */
2525static u32 dwc2_hsotg_ep0_mps(unsigned int mps)
2526{
2527	switch (mps) {
2528	case 64:
2529		return D0EPCTL_MPS_64;
2530	case 32:
2531		return D0EPCTL_MPS_32;
2532	case 16:
2533		return D0EPCTL_MPS_16;
2534	case 8:
2535		return D0EPCTL_MPS_8;
2536	}
2537
2538	/* bad max packet size, warn and return invalid result */
2539	WARN_ON(1);
2540	return (u32)-1;
2541}
2542
2543/**
2544 * dwc2_hsotg_set_ep_maxpacket - set endpoint's max-packet field
2545 * @hsotg: The driver state.
2546 * @ep: The index number of the endpoint
2547 * @mps: The maximum packet size in bytes
2548 * @mc: The multicount value
2549 * @dir_in: True if direction is in.
2550 *
2551 * Configure the maximum packet size for the given endpoint, updating
2552 * the hardware control registers to reflect this.
2553 */
2554static void dwc2_hsotg_set_ep_maxpacket(struct dwc2_hsotg *hsotg,
2555					unsigned int ep, unsigned int mps,
2556					unsigned int mc, unsigned int dir_in)
2557{
2558	struct dwc2_hsotg_ep *hs_ep;
2559	u32 reg;
2560
2561	hs_ep = index_to_ep(hsotg, ep, dir_in);
2562	if (!hs_ep)
2563		return;
2564
2565	if (ep == 0) {
2566		u32 mps_bytes = mps;
2567
2568		/* EP0 is a special case */
2569		mps = dwc2_hsotg_ep0_mps(mps_bytes);
2570		if (mps > 3)
2571			goto bad_mps;
2572		hs_ep->ep.maxpacket = mps_bytes;
2573		hs_ep->mc = 1;
2574	} else {
2575		if (mps > 1024)
2576			goto bad_mps;
2577		hs_ep->mc = mc;
2578		if (mc > 3)
2579			goto bad_mps;
2580		hs_ep->ep.maxpacket = mps;
2581	}
2582
2583	if (dir_in) {
2584		reg = dwc2_readl(hsotg, DIEPCTL(ep));
2585		reg &= ~DXEPCTL_MPS_MASK;
2586		reg |= mps;
2587		dwc2_writel(hsotg, reg, DIEPCTL(ep));
2588	} else {
2589		reg = dwc2_readl(hsotg, DOEPCTL(ep));
2590		reg &= ~DXEPCTL_MPS_MASK;
2591		reg |= mps;
2592		dwc2_writel(hsotg, reg, DOEPCTL(ep));
2593	}
2594
2595	return;
2596
2597bad_mps:
2598	dev_err(hsotg->dev, "ep%d: bad mps of %d\n", ep, mps);
2599}
2600
2601/**
2602 * dwc2_hsotg_txfifo_flush - flush Tx FIFO
2603 * @hsotg: The driver state
2604 * @idx: The index for the endpoint (0..15)
2605 */
2606static void dwc2_hsotg_txfifo_flush(struct dwc2_hsotg *hsotg, unsigned int idx)
2607{
2608	dwc2_writel(hsotg, GRSTCTL_TXFNUM(idx) | GRSTCTL_TXFFLSH,
2609		    GRSTCTL);
2610
2611	/* wait until the fifo is flushed */
2612	if (dwc2_hsotg_wait_bit_clear(hsotg, GRSTCTL, GRSTCTL_TXFFLSH, 100))
2613		dev_warn(hsotg->dev, "%s: timeout flushing fifo GRSTCTL_TXFFLSH\n",
2614			 __func__);
2615}
2616
2617/**
2618 * dwc2_hsotg_trytx - check to see if anything needs transmitting
2619 * @hsotg: The driver state
2620 * @hs_ep: The driver endpoint to check.
2621 *
2622 * Check to see if there is a request that has data to send, and if so
2623 * make an attempt to write data into the FIFO.
2624 */
2625static int dwc2_hsotg_trytx(struct dwc2_hsotg *hsotg,
2626			    struct dwc2_hsotg_ep *hs_ep)
2627{
2628	struct dwc2_hsotg_req *hs_req = hs_ep->req;
2629
2630	if (!hs_ep->dir_in || !hs_req) {
2631		/**
2632		 * if request is not enqueued, we disable interrupts
2633		 * for endpoints, excepting ep0
2634		 */
2635		if (hs_ep->index != 0)
2636			dwc2_hsotg_ctrl_epint(hsotg, hs_ep->index,
2637					      hs_ep->dir_in, 0);
2638		return 0;
2639	}
2640
2641	if (hs_req->req.actual < hs_req->req.length) {
2642		dev_dbg(hsotg->dev, "trying to write more for ep%d\n",
2643			hs_ep->index);
2644		return dwc2_hsotg_write_fifo(hsotg, hs_ep, hs_req);
2645	}
2646
2647	return 0;
2648}
2649
2650/**
2651 * dwc2_hsotg_complete_in - complete IN transfer
2652 * @hsotg: The device state.
2653 * @hs_ep: The endpoint that has just completed.
2654 *
2655 * An IN transfer has been completed, update the transfer's state and then
2656 * call the relevant completion routines.
2657 */
2658static void dwc2_hsotg_complete_in(struct dwc2_hsotg *hsotg,
2659				   struct dwc2_hsotg_ep *hs_ep)
2660{
2661	struct dwc2_hsotg_req *hs_req = hs_ep->req;
2662	u32 epsize = dwc2_readl(hsotg, DIEPTSIZ(hs_ep->index));
2663	int size_left, size_done;
2664
2665	if (!hs_req) {
2666		dev_dbg(hsotg->dev, "XferCompl but no req\n");
2667		return;
2668	}
2669
2670	/* Finish ZLP handling for IN EP0 transactions */
2671	if (hs_ep->index == 0 && hsotg->ep0_state == DWC2_EP0_STATUS_IN) {
2672		dev_dbg(hsotg->dev, "zlp packet sent\n");
2673
2674		/*
2675		 * While send zlp for DWC2_EP0_STATUS_IN EP direction was
2676		 * changed to IN. Change back to complete OUT transfer request
2677		 */
2678		hs_ep->dir_in = 0;
2679
2680		dwc2_hsotg_complete_request(hsotg, hs_ep, hs_req, 0);
2681		if (hsotg->test_mode) {
2682			int ret;
2683
2684			ret = dwc2_hsotg_set_test_mode(hsotg, hsotg->test_mode);
2685			if (ret < 0) {
2686				dev_dbg(hsotg->dev, "Invalid Test #%d\n",
2687					hsotg->test_mode);
2688				dwc2_hsotg_stall_ep0(hsotg);
2689				return;
2690			}
2691		}
2692		dwc2_hsotg_enqueue_setup(hsotg);
2693		return;
2694	}
2695
2696	/*
2697	 * Calculate the size of the transfer by checking how much is left
2698	 * in the endpoint size register and then working it out from
2699	 * the amount we loaded for the transfer.
2700	 *
2701	 * We do this even for DMA, as the transfer may have incremented
2702	 * past the end of the buffer (DMA transfers are always 32bit
2703	 * aligned).
2704	 */
2705	if (using_desc_dma(hsotg)) {
2706		size_left = dwc2_gadget_get_xfersize_ddma(hs_ep);
2707		if (size_left < 0)
2708			dev_err(hsotg->dev, "error parsing DDMA results %d\n",
2709				size_left);
2710	} else {
2711		size_left = DXEPTSIZ_XFERSIZE_GET(epsize);
2712	}
2713
2714	size_done = hs_ep->size_loaded - size_left;
2715	size_done += hs_ep->last_load;
2716
2717	if (hs_req->req.actual != size_done)
2718		dev_dbg(hsotg->dev, "%s: adjusting size done %d => %d\n",
2719			__func__, hs_req->req.actual, size_done);
2720
2721	hs_req->req.actual = size_done;
2722	dev_dbg(hsotg->dev, "req->length:%d req->actual:%d req->zero:%d\n",
2723		hs_req->req.length, hs_req->req.actual, hs_req->req.zero);
2724
2725	if (!size_left && hs_req->req.actual < hs_req->req.length) {
2726		dev_dbg(hsotg->dev, "%s trying more for req...\n", __func__);
2727		dwc2_hsotg_start_req(hsotg, hs_ep, hs_req, true);
2728		return;
2729	}
2730
2731	/* Zlp for all endpoints, for ep0 only in DATA IN stage */
2732	if (hs_ep->send_zlp) {
2733		dwc2_hsotg_program_zlp(hsotg, hs_ep);
2734		hs_ep->send_zlp = 0;
2735		/* transfer will be completed on next complete interrupt */
2736		return;
 
 
 
2737	}
2738
2739	if (hs_ep->index == 0 && hsotg->ep0_state == DWC2_EP0_DATA_IN) {
2740		/* Move to STATUS OUT */
2741		dwc2_hsotg_ep0_zlp(hsotg, false);
2742		return;
2743	}
2744
 
 
 
 
 
 
2745	dwc2_hsotg_complete_request(hsotg, hs_ep, hs_req, 0);
2746}
2747
2748/**
2749 * dwc2_gadget_read_ep_interrupts - reads interrupts for given ep
2750 * @hsotg: The device state.
2751 * @idx: Index of ep.
2752 * @dir_in: Endpoint direction 1-in 0-out.
2753 *
2754 * Reads for endpoint with given index and direction, by masking
2755 * epint_reg with coresponding mask.
2756 */
2757static u32 dwc2_gadget_read_ep_interrupts(struct dwc2_hsotg *hsotg,
2758					  unsigned int idx, int dir_in)
2759{
2760	u32 epmsk_reg = dir_in ? DIEPMSK : DOEPMSK;
2761	u32 epint_reg = dir_in ? DIEPINT(idx) : DOEPINT(idx);
2762	u32 ints;
2763	u32 mask;
2764	u32 diepempmsk;
2765
2766	mask = dwc2_readl(hsotg, epmsk_reg);
2767	diepempmsk = dwc2_readl(hsotg, DIEPEMPMSK);
2768	mask |= ((diepempmsk >> idx) & 0x1) ? DIEPMSK_TXFIFOEMPTY : 0;
2769	mask |= DXEPINT_SETUP_RCVD;
2770
2771	ints = dwc2_readl(hsotg, epint_reg);
2772	ints &= mask;
2773	return ints;
2774}
2775
2776/**
2777 * dwc2_gadget_handle_ep_disabled - handle DXEPINT_EPDISBLD
2778 * @hs_ep: The endpoint on which interrupt is asserted.
2779 *
2780 * This interrupt indicates that the endpoint has been disabled per the
2781 * application's request.
2782 *
2783 * For IN endpoints flushes txfifo, in case of BULK clears DCTL_CGNPINNAK,
2784 * in case of ISOC completes current request.
2785 *
2786 * For ISOC-OUT endpoints completes expired requests. If there is remaining
2787 * request starts it.
2788 */
2789static void dwc2_gadget_handle_ep_disabled(struct dwc2_hsotg_ep *hs_ep)
2790{
2791	struct dwc2_hsotg *hsotg = hs_ep->parent;
2792	struct dwc2_hsotg_req *hs_req;
2793	unsigned char idx = hs_ep->index;
2794	int dir_in = hs_ep->dir_in;
2795	u32 epctl_reg = dir_in ? DIEPCTL(idx) : DOEPCTL(idx);
2796	int dctl = dwc2_readl(hsotg, DCTL);
2797
2798	dev_dbg(hsotg->dev, "%s: EPDisbld\n", __func__);
2799
2800	if (dir_in) {
2801		int epctl = dwc2_readl(hsotg, epctl_reg);
2802
2803		dwc2_hsotg_txfifo_flush(hsotg, hs_ep->fifo_index);
2804
2805		if (hs_ep->isochronous) {
2806			dwc2_hsotg_complete_in(hsotg, hs_ep);
2807			return;
2808		}
2809
2810		if ((epctl & DXEPCTL_STALL) && (epctl & DXEPCTL_EPTYPE_BULK)) {
2811			int dctl = dwc2_readl(hsotg, DCTL);
2812
2813			dctl |= DCTL_CGNPINNAK;
2814			dwc2_writel(hsotg, dctl, DCTL);
2815		}
2816		return;
2817	}
2818
2819	if (dctl & DCTL_GOUTNAKSTS) {
2820		dctl |= DCTL_CGOUTNAK;
2821		dwc2_writel(hsotg, dctl, DCTL);
 
2822	}
2823
2824	if (!hs_ep->isochronous)
2825		return;
2826
2827	if (list_empty(&hs_ep->queue)) {
2828		dev_dbg(hsotg->dev, "%s: complete_ep 0x%p, ep->queue empty!\n",
2829			__func__, hs_ep);
2830		return;
2831	}
2832
2833	do {
2834		hs_req = get_ep_head(hs_ep);
2835		if (hs_req)
 
 
2836			dwc2_hsotg_complete_request(hsotg, hs_ep, hs_req,
2837						    -ENODATA);
 
2838		dwc2_gadget_incr_frame_num(hs_ep);
2839		/* Update current frame number value. */
2840		hsotg->frame_number = dwc2_hsotg_read_frameno(hsotg);
2841	} while (dwc2_gadget_target_frame_elapsed(hs_ep));
2842
2843	dwc2_gadget_start_next_request(hs_ep);
2844}
2845
2846/**
2847 * dwc2_gadget_handle_out_token_ep_disabled - handle DXEPINT_OUTTKNEPDIS
2848 * @ep: The endpoint on which interrupt is asserted.
2849 *
2850 * This is starting point for ISOC-OUT transfer, synchronization done with
2851 * first out token received from host while corresponding EP is disabled.
2852 *
2853 * Device does not know initial frame in which out token will come. For this
2854 * HW generates OUTTKNEPDIS - out token is received while EP is disabled. Upon
2855 * getting this interrupt SW starts calculation for next transfer frame.
2856 */
2857static void dwc2_gadget_handle_out_token_ep_disabled(struct dwc2_hsotg_ep *ep)
2858{
2859	struct dwc2_hsotg *hsotg = ep->parent;
 
2860	int dir_in = ep->dir_in;
2861	u32 doepmsk;
2862
2863	if (dir_in || !ep->isochronous)
2864		return;
2865
2866	if (using_desc_dma(hsotg)) {
2867		if (ep->target_frame == TARGET_FRAME_INITIAL) {
2868			/* Start first ISO Out */
2869			ep->target_frame = hsotg->frame_number;
2870			dwc2_gadget_start_isoc_ddma(ep);
2871		}
2872		return;
2873	}
2874
2875	if (ep->interval > 1 &&
2876	    ep->target_frame == TARGET_FRAME_INITIAL) {
2877		u32 ctrl;
2878
2879		ep->target_frame = hsotg->frame_number;
2880		dwc2_gadget_incr_frame_num(ep);
 
 
 
 
 
 
 
 
 
2881
2882		ctrl = dwc2_readl(hsotg, DOEPCTL(ep->index));
2883		if (ep->target_frame & 0x1)
2884			ctrl |= DXEPCTL_SETODDFR;
2885		else
2886			ctrl |= DXEPCTL_SETEVENFR;
 
 
2887
2888		dwc2_writel(hsotg, ctrl, DOEPCTL(ep->index));
 
 
2889	}
2890
2891	dwc2_gadget_start_next_request(ep);
2892	doepmsk = dwc2_readl(hsotg, DOEPMSK);
2893	doepmsk &= ~DOEPMSK_OUTTKNEPDISMSK;
2894	dwc2_writel(hsotg, doepmsk, DOEPMSK);
2895}
2896
 
 
 
2897/**
2898 * dwc2_gadget_handle_nak - handle NAK interrupt
2899 * @hs_ep: The endpoint on which interrupt is asserted.
2900 *
2901 * This is starting point for ISOC-IN transfer, synchronization done with
2902 * first IN token received from host while corresponding EP is disabled.
2903 *
2904 * Device does not know when first one token will arrive from host. On first
2905 * token arrival HW generates 2 interrupts: 'in token received while FIFO empty'
2906 * and 'NAK'. NAK interrupt for ISOC-IN means that token has arrived and ZLP was
2907 * sent in response to that as there was no data in FIFO. SW is basing on this
2908 * interrupt to obtain frame in which token has come and then based on the
2909 * interval calculates next frame for transfer.
2910 */
2911static void dwc2_gadget_handle_nak(struct dwc2_hsotg_ep *hs_ep)
2912{
2913	struct dwc2_hsotg *hsotg = hs_ep->parent;
 
2914	int dir_in = hs_ep->dir_in;
 
2915
2916	if (!dir_in || !hs_ep->isochronous)
2917		return;
2918
2919	if (hs_ep->target_frame == TARGET_FRAME_INITIAL) {
2920
2921		if (using_desc_dma(hsotg)) {
2922			hs_ep->target_frame = hsotg->frame_number;
2923			dwc2_gadget_incr_frame_num(hs_ep);
2924
2925			/* In service interval mode target_frame must
2926			 * be set to last (u)frame of the service interval.
2927			 */
2928			if (hsotg->params.service_interval) {
2929				/* Set target_frame to the first (u)frame of
2930				 * the service interval
2931				 */
2932				hs_ep->target_frame &= ~hs_ep->interval + 1;
2933
2934				/* Set target_frame to the last (u)frame of
2935				 * the service interval
2936				 */
2937				dwc2_gadget_incr_frame_num(hs_ep);
2938				dwc2_gadget_dec_frame_num_by_one(hs_ep);
2939			}
2940
2941			dwc2_gadget_start_isoc_ddma(hs_ep);
2942			return;
2943		}
2944
2945		hs_ep->target_frame = hsotg->frame_number;
2946		if (hs_ep->interval > 1) {
2947			u32 ctrl = dwc2_readl(hsotg,
2948					      DIEPCTL(hs_ep->index));
2949			if (hs_ep->target_frame & 0x1)
2950				ctrl |= DXEPCTL_SETODDFR;
2951			else
2952				ctrl |= DXEPCTL_SETEVENFR;
2953
2954			dwc2_writel(hsotg, ctrl, DIEPCTL(hs_ep->index));
2955		}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2956
2957		dwc2_hsotg_complete_request(hsotg, hs_ep,
2958					    get_ep_head(hs_ep), 0);
 
2959	}
2960
2961	if (!using_desc_dma(hsotg))
2962		dwc2_gadget_incr_frame_num(hs_ep);
2963}
2964
2965/**
2966 * dwc2_hsotg_epint - handle an in/out endpoint interrupt
2967 * @hsotg: The driver state
2968 * @idx: The index for the endpoint (0..15)
2969 * @dir_in: Set if this is an IN endpoint
2970 *
2971 * Process and clear any interrupt pending for an individual endpoint
2972 */
2973static void dwc2_hsotg_epint(struct dwc2_hsotg *hsotg, unsigned int idx,
2974			     int dir_in)
2975{
2976	struct dwc2_hsotg_ep *hs_ep = index_to_ep(hsotg, idx, dir_in);
2977	u32 epint_reg = dir_in ? DIEPINT(idx) : DOEPINT(idx);
2978	u32 epctl_reg = dir_in ? DIEPCTL(idx) : DOEPCTL(idx);
2979	u32 epsiz_reg = dir_in ? DIEPTSIZ(idx) : DOEPTSIZ(idx);
2980	u32 ints;
2981
2982	ints = dwc2_gadget_read_ep_interrupts(hsotg, idx, dir_in);
2983
2984	/* Clear endpoint interrupts */
2985	dwc2_writel(hsotg, ints, epint_reg);
2986
2987	if (!hs_ep) {
2988		dev_err(hsotg->dev, "%s:Interrupt for unconfigured ep%d(%s)\n",
2989			__func__, idx, dir_in ? "in" : "out");
2990		return;
2991	}
2992
2993	dev_dbg(hsotg->dev, "%s: ep%d(%s) DxEPINT=0x%08x\n",
2994		__func__, idx, dir_in ? "in" : "out", ints);
2995
2996	/* Don't process XferCompl interrupt if it is a setup packet */
2997	if (idx == 0 && (ints & (DXEPINT_SETUP | DXEPINT_SETUP_RCVD)))
2998		ints &= ~DXEPINT_XFERCOMPL;
2999
3000	/*
3001	 * Don't process XferCompl interrupt in DDMA if EP0 is still in SETUP
3002	 * stage and xfercomplete was generated without SETUP phase done
3003	 * interrupt. SW should parse received setup packet only after host's
3004	 * exit from setup phase of control transfer.
3005	 */
3006	if (using_desc_dma(hsotg) && idx == 0 && !hs_ep->dir_in &&
3007	    hsotg->ep0_state == DWC2_EP0_SETUP && !(ints & DXEPINT_SETUP))
3008		ints &= ~DXEPINT_XFERCOMPL;
3009
3010	if (ints & DXEPINT_XFERCOMPL) {
3011		dev_dbg(hsotg->dev,
3012			"%s: XferCompl: DxEPCTL=0x%08x, DXEPTSIZ=%08x\n",
3013			__func__, dwc2_readl(hsotg, epctl_reg),
3014			dwc2_readl(hsotg, epsiz_reg));
3015
3016		/* In DDMA handle isochronous requests separately */
3017		if (using_desc_dma(hsotg) && hs_ep->isochronous) {
3018			/* XferCompl set along with BNA */
3019			if (!(ints & DXEPINT_BNAINTR))
3020				dwc2_gadget_complete_isoc_request_ddma(hs_ep);
3021		} else if (dir_in) {
3022			/*
3023			 * We get OutDone from the FIFO, so we only
3024			 * need to look at completing IN requests here
3025			 * if operating slave mode
3026			 */
3027			if (hs_ep->isochronous && hs_ep->interval > 1)
3028				dwc2_gadget_incr_frame_num(hs_ep);
3029
3030			dwc2_hsotg_complete_in(hsotg, hs_ep);
3031			if (ints & DXEPINT_NAKINTRPT)
3032				ints &= ~DXEPINT_NAKINTRPT;
3033
3034			if (idx == 0 && !hs_ep->req)
3035				dwc2_hsotg_enqueue_setup(hsotg);
3036		} else if (using_dma(hsotg)) {
3037			/*
3038			 * We're using DMA, we need to fire an OutDone here
3039			 * as we ignore the RXFIFO.
3040			 */
3041			if (hs_ep->isochronous && hs_ep->interval > 1)
3042				dwc2_gadget_incr_frame_num(hs_ep);
3043
3044			dwc2_hsotg_handle_outdone(hsotg, idx);
3045		}
3046	}
3047
3048	if (ints & DXEPINT_EPDISBLD)
3049		dwc2_gadget_handle_ep_disabled(hs_ep);
3050
3051	if (ints & DXEPINT_OUTTKNEPDIS)
3052		dwc2_gadget_handle_out_token_ep_disabled(hs_ep);
3053
3054	if (ints & DXEPINT_NAKINTRPT)
3055		dwc2_gadget_handle_nak(hs_ep);
3056
3057	if (ints & DXEPINT_AHBERR)
3058		dev_dbg(hsotg->dev, "%s: AHBErr\n", __func__);
3059
3060	if (ints & DXEPINT_SETUP) {  /* Setup or Timeout */
3061		dev_dbg(hsotg->dev, "%s: Setup/Timeout\n",  __func__);
3062
3063		if (using_dma(hsotg) && idx == 0) {
3064			/*
3065			 * this is the notification we've received a
3066			 * setup packet. In non-DMA mode we'd get this
3067			 * from the RXFIFO, instead we need to process
3068			 * the setup here.
3069			 */
3070
3071			if (dir_in)
3072				WARN_ON_ONCE(1);
3073			else
3074				dwc2_hsotg_handle_outdone(hsotg, 0);
3075		}
3076	}
3077
3078	if (ints & DXEPINT_STSPHSERCVD) {
3079		dev_dbg(hsotg->dev, "%s: StsPhseRcvd\n", __func__);
3080
3081		/* Safety check EP0 state when STSPHSERCVD asserted */
3082		if (hsotg->ep0_state == DWC2_EP0_DATA_OUT) {
3083			/* Move to STATUS IN for DDMA */
3084			if (using_desc_dma(hsotg)) {
3085				if (!hsotg->delayed_status)
3086					dwc2_hsotg_ep0_zlp(hsotg, true);
3087				else
3088				/* In case of 3 stage Control Write with delayed
3089				 * status, when Status IN transfer started
3090				 * before STSPHSERCVD asserted, NAKSTS bit not
3091				 * cleared by CNAK in dwc2_hsotg_start_req()
3092				 * function. Clear now NAKSTS to allow complete
3093				 * transfer.
3094				 */
3095					dwc2_set_bit(hsotg, DIEPCTL(0),
3096						     DXEPCTL_CNAK);
3097			}
3098		}
3099
3100	}
3101
3102	if (ints & DXEPINT_BACK2BACKSETUP)
3103		dev_dbg(hsotg->dev, "%s: B2BSetup/INEPNakEff\n", __func__);
3104
3105	if (ints & DXEPINT_BNAINTR) {
3106		dev_dbg(hsotg->dev, "%s: BNA interrupt\n", __func__);
3107		if (hs_ep->isochronous)
3108			dwc2_gadget_handle_isoc_bna(hs_ep);
3109	}
3110
3111	if (dir_in && !hs_ep->isochronous) {
3112		/* not sure if this is important, but we'll clear it anyway */
3113		if (ints & DXEPINT_INTKNTXFEMP) {
3114			dev_dbg(hsotg->dev, "%s: ep%d: INTknTXFEmpMsk\n",
3115				__func__, idx);
3116		}
3117
3118		/* this probably means something bad is happening */
3119		if (ints & DXEPINT_INTKNEPMIS) {
3120			dev_warn(hsotg->dev, "%s: ep%d: INTknEP\n",
3121				 __func__, idx);
3122		}
3123
3124		/* FIFO has space or is empty (see GAHBCFG) */
3125		if (hsotg->dedicated_fifos &&
3126		    ints & DXEPINT_TXFEMP) {
3127			dev_dbg(hsotg->dev, "%s: ep%d: TxFIFOEmpty\n",
3128				__func__, idx);
3129			if (!using_dma(hsotg))
3130				dwc2_hsotg_trytx(hsotg, hs_ep);
3131		}
3132	}
3133}
3134
3135/**
3136 * dwc2_hsotg_irq_enumdone - Handle EnumDone interrupt (enumeration done)
3137 * @hsotg: The device state.
3138 *
3139 * Handle updating the device settings after the enumeration phase has
3140 * been completed.
3141 */
3142static void dwc2_hsotg_irq_enumdone(struct dwc2_hsotg *hsotg)
3143{
3144	u32 dsts = dwc2_readl(hsotg, DSTS);
3145	int ep0_mps = 0, ep_mps = 8;
3146
3147	/*
3148	 * This should signal the finish of the enumeration phase
3149	 * of the USB handshaking, so we should now know what rate
3150	 * we connected at.
3151	 */
3152
3153	dev_dbg(hsotg->dev, "EnumDone (DSTS=0x%08x)\n", dsts);
3154
3155	/*
3156	 * note, since we're limited by the size of transfer on EP0, and
3157	 * it seems IN transfers must be a even number of packets we do
3158	 * not advertise a 64byte MPS on EP0.
3159	 */
3160
3161	/* catch both EnumSpd_FS and EnumSpd_FS48 */
3162	switch ((dsts & DSTS_ENUMSPD_MASK) >> DSTS_ENUMSPD_SHIFT) {
3163	case DSTS_ENUMSPD_FS:
3164	case DSTS_ENUMSPD_FS48:
3165		hsotg->gadget.speed = USB_SPEED_FULL;
3166		ep0_mps = EP0_MPS_LIMIT;
3167		ep_mps = 1023;
3168		break;
3169
3170	case DSTS_ENUMSPD_HS:
3171		hsotg->gadget.speed = USB_SPEED_HIGH;
3172		ep0_mps = EP0_MPS_LIMIT;
3173		ep_mps = 1024;
3174		break;
3175
3176	case DSTS_ENUMSPD_LS:
3177		hsotg->gadget.speed = USB_SPEED_LOW;
3178		ep0_mps = 8;
3179		ep_mps = 8;
3180		/*
3181		 * note, we don't actually support LS in this driver at the
3182		 * moment, and the documentation seems to imply that it isn't
3183		 * supported by the PHYs on some of the devices.
3184		 */
3185		break;
3186	}
3187	dev_info(hsotg->dev, "new device is %s\n",
3188		 usb_speed_string(hsotg->gadget.speed));
3189
3190	/*
3191	 * we should now know the maximum packet size for an
3192	 * endpoint, so set the endpoints to a default value.
3193	 */
3194
3195	if (ep0_mps) {
3196		int i;
3197		/* Initialize ep0 for both in and out directions */
3198		dwc2_hsotg_set_ep_maxpacket(hsotg, 0, ep0_mps, 0, 1);
3199		dwc2_hsotg_set_ep_maxpacket(hsotg, 0, ep0_mps, 0, 0);
3200		for (i = 1; i < hsotg->num_of_eps; i++) {
3201			if (hsotg->eps_in[i])
3202				dwc2_hsotg_set_ep_maxpacket(hsotg, i, ep_mps,
3203							    0, 1);
3204			if (hsotg->eps_out[i])
3205				dwc2_hsotg_set_ep_maxpacket(hsotg, i, ep_mps,
3206							    0, 0);
3207		}
3208	}
3209
3210	/* ensure after enumeration our EP0 is active */
3211
3212	dwc2_hsotg_enqueue_setup(hsotg);
3213
3214	dev_dbg(hsotg->dev, "EP0: DIEPCTL0=0x%08x, DOEPCTL0=0x%08x\n",
3215		dwc2_readl(hsotg, DIEPCTL0),
3216		dwc2_readl(hsotg, DOEPCTL0));
3217}
3218
3219/**
3220 * kill_all_requests - remove all requests from the endpoint's queue
3221 * @hsotg: The device state.
3222 * @ep: The endpoint the requests may be on.
3223 * @result: The result code to use.
3224 *
3225 * Go through the requests on the given endpoint and mark them
3226 * completed with the given result code.
3227 */
3228static void kill_all_requests(struct dwc2_hsotg *hsotg,
3229			      struct dwc2_hsotg_ep *ep,
3230			      int result)
3231{
3232	unsigned int size;
3233
3234	ep->req = NULL;
3235
3236	while (!list_empty(&ep->queue)) {
3237		struct dwc2_hsotg_req *req = get_ep_head(ep);
3238
3239		dwc2_hsotg_complete_request(hsotg, ep, req, result);
3240	}
3241
3242	if (!hsotg->dedicated_fifos)
3243		return;
3244	size = (dwc2_readl(hsotg, DTXFSTS(ep->fifo_index)) & 0xffff) * 4;
3245	if (size < ep->fifo_size)
3246		dwc2_hsotg_txfifo_flush(hsotg, ep->fifo_index);
3247}
3248
3249/**
3250 * dwc2_hsotg_disconnect - disconnect service
3251 * @hsotg: The device state.
3252 *
3253 * The device has been disconnected. Remove all current
3254 * transactions and signal the gadget driver that this
3255 * has happened.
3256 */
3257void dwc2_hsotg_disconnect(struct dwc2_hsotg *hsotg)
3258{
3259	unsigned int ep;
3260
3261	if (!hsotg->connected)
3262		return;
3263
3264	hsotg->connected = 0;
3265	hsotg->test_mode = 0;
3266
3267	/* all endpoints should be shutdown */
3268	for (ep = 0; ep < hsotg->num_of_eps; ep++) {
3269		if (hsotg->eps_in[ep])
3270			kill_all_requests(hsotg, hsotg->eps_in[ep],
3271					  -ESHUTDOWN);
3272		if (hsotg->eps_out[ep])
3273			kill_all_requests(hsotg, hsotg->eps_out[ep],
3274					  -ESHUTDOWN);
3275	}
3276
3277	call_gadget(hsotg, disconnect);
3278	hsotg->lx_state = DWC2_L3;
3279
3280	usb_gadget_set_state(&hsotg->gadget, USB_STATE_NOTATTACHED);
3281}
3282
3283/**
3284 * dwc2_hsotg_irq_fifoempty - TX FIFO empty interrupt handler
3285 * @hsotg: The device state:
3286 * @periodic: True if this is a periodic FIFO interrupt
3287 */
3288static void dwc2_hsotg_irq_fifoempty(struct dwc2_hsotg *hsotg, bool periodic)
3289{
3290	struct dwc2_hsotg_ep *ep;
3291	int epno, ret;
3292
3293	/* look through for any more data to transmit */
3294	for (epno = 0; epno < hsotg->num_of_eps; epno++) {
3295		ep = index_to_ep(hsotg, epno, 1);
3296
3297		if (!ep)
3298			continue;
3299
3300		if (!ep->dir_in)
3301			continue;
3302
3303		if ((periodic && !ep->periodic) ||
3304		    (!periodic && ep->periodic))
3305			continue;
3306
3307		ret = dwc2_hsotg_trytx(hsotg, ep);
3308		if (ret < 0)
3309			break;
3310	}
3311}
3312
3313/* IRQ flags which will trigger a retry around the IRQ loop */
3314#define IRQ_RETRY_MASK (GINTSTS_NPTXFEMP | \
3315			GINTSTS_PTXFEMP |  \
3316			GINTSTS_RXFLVL)
3317
3318static int dwc2_hsotg_ep_disable(struct usb_ep *ep);
3319/**
3320 * dwc2_hsotg_core_init - issue softreset to the core
3321 * @hsotg: The device state
3322 * @is_usb_reset: Usb resetting flag
3323 *
3324 * Issue a soft reset to the core, and await the core finishing it.
3325 */
3326void dwc2_hsotg_core_init_disconnected(struct dwc2_hsotg *hsotg,
3327				       bool is_usb_reset)
3328{
3329	u32 intmsk;
3330	u32 val;
3331	u32 usbcfg;
3332	u32 dcfg = 0;
3333	int ep;
3334
3335	/* Kill any ep0 requests as controller will be reinitialized */
3336	kill_all_requests(hsotg, hsotg->eps_out[0], -ECONNRESET);
3337
3338	if (!is_usb_reset) {
3339		if (dwc2_core_reset(hsotg, true))
3340			return;
3341	} else {
3342		/* all endpoints should be shutdown */
3343		for (ep = 1; ep < hsotg->num_of_eps; ep++) {
3344			if (hsotg->eps_in[ep])
3345				dwc2_hsotg_ep_disable(&hsotg->eps_in[ep]->ep);
3346			if (hsotg->eps_out[ep])
3347				dwc2_hsotg_ep_disable(&hsotg->eps_out[ep]->ep);
3348		}
3349	}
3350
3351	/*
3352	 * we must now enable ep0 ready for host detection and then
3353	 * set configuration.
3354	 */
3355
3356	/* keep other bits untouched (so e.g. forced modes are not lost) */
3357	usbcfg = dwc2_readl(hsotg, GUSBCFG);
3358	usbcfg &= ~GUSBCFG_TOUTCAL_MASK;
3359	usbcfg |= GUSBCFG_TOUTCAL(7);
3360
3361	/* remove the HNP/SRP and set the PHY */
3362	usbcfg &= ~(GUSBCFG_SRPCAP | GUSBCFG_HNPCAP);
3363        dwc2_writel(hsotg, usbcfg, GUSBCFG);
3364
3365	dwc2_phy_init(hsotg, true);
3366
3367	dwc2_hsotg_init_fifo(hsotg);
3368
3369	if (!is_usb_reset)
3370		dwc2_set_bit(hsotg, DCTL, DCTL_SFTDISCON);
3371
3372	dcfg |= DCFG_EPMISCNT(1);
3373
3374	switch (hsotg->params.speed) {
3375	case DWC2_SPEED_PARAM_LOW:
3376		dcfg |= DCFG_DEVSPD_LS;
3377		break;
3378	case DWC2_SPEED_PARAM_FULL:
3379		if (hsotg->params.phy_type == DWC2_PHY_TYPE_PARAM_FS)
3380			dcfg |= DCFG_DEVSPD_FS48;
3381		else
3382			dcfg |= DCFG_DEVSPD_FS;
3383		break;
3384	default:
3385		dcfg |= DCFG_DEVSPD_HS;
3386	}
3387
3388	if (hsotg->params.ipg_isoc_en)
3389		dcfg |= DCFG_IPG_ISOC_SUPPORDED;
3390
3391	dwc2_writel(hsotg, dcfg,  DCFG);
3392
3393	/* Clear any pending OTG interrupts */
3394	dwc2_writel(hsotg, 0xffffffff, GOTGINT);
3395
3396	/* Clear any pending interrupts */
3397	dwc2_writel(hsotg, 0xffffffff, GINTSTS);
3398	intmsk = GINTSTS_ERLYSUSP | GINTSTS_SESSREQINT |
3399		GINTSTS_GOUTNAKEFF | GINTSTS_GINNAKEFF |
3400		GINTSTS_USBRST | GINTSTS_RESETDET |
3401		GINTSTS_ENUMDONE | GINTSTS_OTGINT |
3402		GINTSTS_USBSUSP | GINTSTS_WKUPINT |
3403		GINTSTS_LPMTRANRCVD;
3404
3405	if (!using_desc_dma(hsotg))
3406		intmsk |= GINTSTS_INCOMPL_SOIN | GINTSTS_INCOMPL_SOOUT;
3407
3408	if (!hsotg->params.external_id_pin_ctl)
3409		intmsk |= GINTSTS_CONIDSTSCHNG;
3410
3411	dwc2_writel(hsotg, intmsk, GINTMSK);
3412
3413	if (using_dma(hsotg)) {
3414		dwc2_writel(hsotg, GAHBCFG_GLBL_INTR_EN | GAHBCFG_DMA_EN |
3415			    hsotg->params.ahbcfg,
3416			    GAHBCFG);
3417
3418		/* Set DDMA mode support in the core if needed */
3419		if (using_desc_dma(hsotg))
3420			dwc2_set_bit(hsotg, DCFG, DCFG_DESCDMA_EN);
3421
3422	} else {
3423		dwc2_writel(hsotg, ((hsotg->dedicated_fifos) ?
3424						(GAHBCFG_NP_TXF_EMP_LVL |
3425						 GAHBCFG_P_TXF_EMP_LVL) : 0) |
3426			    GAHBCFG_GLBL_INTR_EN, GAHBCFG);
3427	}
3428
3429	/*
3430	 * If INTknTXFEmpMsk is enabled, it's important to disable ep interrupts
3431	 * when we have no data to transfer. Otherwise we get being flooded by
3432	 * interrupts.
3433	 */
3434
3435	dwc2_writel(hsotg, ((hsotg->dedicated_fifos && !using_dma(hsotg)) ?
3436		DIEPMSK_TXFIFOEMPTY | DIEPMSK_INTKNTXFEMPMSK : 0) |
3437		DIEPMSK_EPDISBLDMSK | DIEPMSK_XFERCOMPLMSK |
3438		DIEPMSK_TIMEOUTMSK | DIEPMSK_AHBERRMSK,
3439		DIEPMSK);
3440
3441	/*
3442	 * don't need XferCompl, we get that from RXFIFO in slave mode. In
3443	 * DMA mode we may need this and StsPhseRcvd.
3444	 */
3445	dwc2_writel(hsotg, (using_dma(hsotg) ? (DIEPMSK_XFERCOMPLMSK |
3446		DOEPMSK_STSPHSERCVDMSK) : 0) |
3447		DOEPMSK_EPDISBLDMSK | DOEPMSK_AHBERRMSK |
3448		DOEPMSK_SETUPMSK,
3449		DOEPMSK);
3450
3451	/* Enable BNA interrupt for DDMA */
3452	if (using_desc_dma(hsotg)) {
3453		dwc2_set_bit(hsotg, DOEPMSK, DOEPMSK_BNAMSK);
3454		dwc2_set_bit(hsotg, DIEPMSK, DIEPMSK_BNAININTRMSK);
3455	}
3456
3457	/* Enable Service Interval mode if supported */
3458	if (using_desc_dma(hsotg) && hsotg->params.service_interval)
3459		dwc2_set_bit(hsotg, DCTL, DCTL_SERVICE_INTERVAL_SUPPORTED);
3460
3461	dwc2_writel(hsotg, 0, DAINTMSK);
3462
3463	dev_dbg(hsotg->dev, "EP0: DIEPCTL0=0x%08x, DOEPCTL0=0x%08x\n",
3464		dwc2_readl(hsotg, DIEPCTL0),
3465		dwc2_readl(hsotg, DOEPCTL0));
3466
3467	/* enable in and out endpoint interrupts */
3468	dwc2_hsotg_en_gsint(hsotg, GINTSTS_OEPINT | GINTSTS_IEPINT);
3469
3470	/*
3471	 * Enable the RXFIFO when in slave mode, as this is how we collect
3472	 * the data. In DMA mode, we get events from the FIFO but also
3473	 * things we cannot process, so do not use it.
3474	 */
3475	if (!using_dma(hsotg))
3476		dwc2_hsotg_en_gsint(hsotg, GINTSTS_RXFLVL);
3477
3478	/* Enable interrupts for EP0 in and out */
3479	dwc2_hsotg_ctrl_epint(hsotg, 0, 0, 1);
3480	dwc2_hsotg_ctrl_epint(hsotg, 0, 1, 1);
3481
3482	if (!is_usb_reset) {
3483		dwc2_set_bit(hsotg, DCTL, DCTL_PWRONPRGDONE);
3484		udelay(10);  /* see openiboot */
3485		dwc2_clear_bit(hsotg, DCTL, DCTL_PWRONPRGDONE);
3486	}
3487
3488	dev_dbg(hsotg->dev, "DCTL=0x%08x\n", dwc2_readl(hsotg, DCTL));
3489
3490	/*
3491	 * DxEPCTL_USBActEp says RO in manual, but seems to be set by
3492	 * writing to the EPCTL register..
3493	 */
3494
3495	/* set to read 1 8byte packet */
3496	dwc2_writel(hsotg, DXEPTSIZ_MC(1) | DXEPTSIZ_PKTCNT(1) |
3497	       DXEPTSIZ_XFERSIZE(8), DOEPTSIZ0);
3498
3499	dwc2_writel(hsotg, dwc2_hsotg_ep0_mps(hsotg->eps_out[0]->ep.maxpacket) |
3500	       DXEPCTL_CNAK | DXEPCTL_EPENA |
3501	       DXEPCTL_USBACTEP,
3502	       DOEPCTL0);
3503
3504	/* enable, but don't activate EP0in */
3505	dwc2_writel(hsotg, dwc2_hsotg_ep0_mps(hsotg->eps_out[0]->ep.maxpacket) |
3506	       DXEPCTL_USBACTEP, DIEPCTL0);
3507
3508	/* clear global NAKs */
3509	val = DCTL_CGOUTNAK | DCTL_CGNPINNAK;
3510	if (!is_usb_reset)
3511		val |= DCTL_SFTDISCON;
3512	dwc2_set_bit(hsotg, DCTL, val);
3513
3514	/* configure the core to support LPM */
3515	dwc2_gadget_init_lpm(hsotg);
3516
3517	/* program GREFCLK register if needed */
3518	if (using_desc_dma(hsotg) && hsotg->params.service_interval)
3519		dwc2_gadget_program_ref_clk(hsotg);
3520
3521	/* must be at-least 3ms to allow bus to see disconnect */
3522	mdelay(3);
3523
3524	hsotg->lx_state = DWC2_L0;
3525
3526	dwc2_hsotg_enqueue_setup(hsotg);
3527
3528	dev_dbg(hsotg->dev, "EP0: DIEPCTL0=0x%08x, DOEPCTL0=0x%08x\n",
3529		dwc2_readl(hsotg, DIEPCTL0),
3530		dwc2_readl(hsotg, DOEPCTL0));
3531}
3532
3533static void dwc2_hsotg_core_disconnect(struct dwc2_hsotg *hsotg)
3534{
3535	/* set the soft-disconnect bit */
3536	dwc2_set_bit(hsotg, DCTL, DCTL_SFTDISCON);
3537}
3538
3539void dwc2_hsotg_core_connect(struct dwc2_hsotg *hsotg)
3540{
3541	/* remove the soft-disconnect and let's go */
3542	dwc2_clear_bit(hsotg, DCTL, DCTL_SFTDISCON);
 
3543}
3544
3545/**
3546 * dwc2_gadget_handle_incomplete_isoc_in - handle incomplete ISO IN Interrupt.
3547 * @hsotg: The device state:
3548 *
3549 * This interrupt indicates one of the following conditions occurred while
3550 * transmitting an ISOC transaction.
3551 * - Corrupted IN Token for ISOC EP.
3552 * - Packet not complete in FIFO.
3553 *
3554 * The following actions will be taken:
3555 * - Determine the EP
3556 * - Disable EP; when 'Endpoint Disabled' interrupt is received Flush FIFO
3557 */
3558static void dwc2_gadget_handle_incomplete_isoc_in(struct dwc2_hsotg *hsotg)
3559{
3560	struct dwc2_hsotg_ep *hs_ep;
3561	u32 epctrl;
3562	u32 daintmsk;
3563	u32 idx;
3564
3565	dev_dbg(hsotg->dev, "Incomplete isoc in interrupt received:\n");
3566
3567	daintmsk = dwc2_readl(hsotg, DAINTMSK);
3568
3569	for (idx = 1; idx < hsotg->num_of_eps; idx++) {
3570		hs_ep = hsotg->eps_in[idx];
3571		/* Proceed only unmasked ISOC EPs */
3572		if ((BIT(idx) & ~daintmsk) || !hs_ep->isochronous)
3573			continue;
3574
3575		epctrl = dwc2_readl(hsotg, DIEPCTL(idx));
3576		if ((epctrl & DXEPCTL_EPENA) &&
3577		    dwc2_gadget_target_frame_elapsed(hs_ep)) {
3578			epctrl |= DXEPCTL_SNAK;
3579			epctrl |= DXEPCTL_EPDIS;
3580			dwc2_writel(hsotg, epctrl, DIEPCTL(idx));
3581		}
3582	}
3583
3584	/* Clear interrupt */
3585	dwc2_writel(hsotg, GINTSTS_INCOMPL_SOIN, GINTSTS);
3586}
3587
3588/**
3589 * dwc2_gadget_handle_incomplete_isoc_out - handle incomplete ISO OUT Interrupt
3590 * @hsotg: The device state:
3591 *
3592 * This interrupt indicates one of the following conditions occurred while
3593 * transmitting an ISOC transaction.
3594 * - Corrupted OUT Token for ISOC EP.
3595 * - Packet not complete in FIFO.
3596 *
3597 * The following actions will be taken:
3598 * - Determine the EP
3599 * - Set DCTL_SGOUTNAK and unmask GOUTNAKEFF if target frame elapsed.
3600 */
3601static void dwc2_gadget_handle_incomplete_isoc_out(struct dwc2_hsotg *hsotg)
3602{
3603	u32 gintsts;
3604	u32 gintmsk;
3605	u32 daintmsk;
3606	u32 epctrl;
3607	struct dwc2_hsotg_ep *hs_ep;
3608	int idx;
3609
3610	dev_dbg(hsotg->dev, "%s: GINTSTS_INCOMPL_SOOUT\n", __func__);
3611
3612	daintmsk = dwc2_readl(hsotg, DAINTMSK);
3613	daintmsk >>= DAINT_OUTEP_SHIFT;
3614
3615	for (idx = 1; idx < hsotg->num_of_eps; idx++) {
3616		hs_ep = hsotg->eps_out[idx];
3617		/* Proceed only unmasked ISOC EPs */
3618		if ((BIT(idx) & ~daintmsk) || !hs_ep->isochronous)
3619			continue;
3620
3621		epctrl = dwc2_readl(hsotg, DOEPCTL(idx));
3622		if ((epctrl & DXEPCTL_EPENA) &&
3623		    dwc2_gadget_target_frame_elapsed(hs_ep)) {
3624			/* Unmask GOUTNAKEFF interrupt */
3625			gintmsk = dwc2_readl(hsotg, GINTMSK);
3626			gintmsk |= GINTSTS_GOUTNAKEFF;
3627			dwc2_writel(hsotg, gintmsk, GINTMSK);
3628
3629			gintsts = dwc2_readl(hsotg, GINTSTS);
3630			if (!(gintsts & GINTSTS_GOUTNAKEFF)) {
3631				dwc2_set_bit(hsotg, DCTL, DCTL_SGOUTNAK);
3632				break;
3633			}
3634		}
3635	}
3636
3637	/* Clear interrupt */
3638	dwc2_writel(hsotg, GINTSTS_INCOMPL_SOOUT, GINTSTS);
3639}
3640
3641/**
3642 * dwc2_hsotg_irq - handle device interrupt
3643 * @irq: The IRQ number triggered
3644 * @pw: The pw value when registered the handler.
3645 */
3646static irqreturn_t dwc2_hsotg_irq(int irq, void *pw)
3647{
3648	struct dwc2_hsotg *hsotg = pw;
3649	int retry_count = 8;
3650	u32 gintsts;
3651	u32 gintmsk;
3652
3653	if (!dwc2_is_device_mode(hsotg))
3654		return IRQ_NONE;
3655
3656	spin_lock(&hsotg->lock);
3657irq_retry:
3658	gintsts = dwc2_readl(hsotg, GINTSTS);
3659	gintmsk = dwc2_readl(hsotg, GINTMSK);
3660
3661	dev_dbg(hsotg->dev, "%s: %08x %08x (%08x) retry %d\n",
3662		__func__, gintsts, gintsts & gintmsk, gintmsk, retry_count);
3663
3664	gintsts &= gintmsk;
3665
3666	if (gintsts & GINTSTS_RESETDET) {
3667		dev_dbg(hsotg->dev, "%s: USBRstDet\n", __func__);
3668
3669		dwc2_writel(hsotg, GINTSTS_RESETDET, GINTSTS);
3670
3671		/* This event must be used only if controller is suspended */
3672		if (hsotg->lx_state == DWC2_L2) {
3673			dwc2_exit_partial_power_down(hsotg, true);
3674			hsotg->lx_state = DWC2_L0;
3675		}
3676	}
3677
3678	if (gintsts & (GINTSTS_USBRST | GINTSTS_RESETDET)) {
3679		u32 usb_status = dwc2_readl(hsotg, GOTGCTL);
3680		u32 connected = hsotg->connected;
3681
3682		dev_dbg(hsotg->dev, "%s: USBRst\n", __func__);
3683		dev_dbg(hsotg->dev, "GNPTXSTS=%08x\n",
3684			dwc2_readl(hsotg, GNPTXSTS));
3685
3686		dwc2_writel(hsotg, GINTSTS_USBRST, GINTSTS);
3687
3688		/* Report disconnection if it is not already done. */
3689		dwc2_hsotg_disconnect(hsotg);
3690
3691		/* Reset device address to zero */
3692		dwc2_clear_bit(hsotg, DCFG, DCFG_DEVADDR_MASK);
3693
3694		if (usb_status & GOTGCTL_BSESVLD && connected)
3695			dwc2_hsotg_core_init_disconnected(hsotg, true);
3696	}
3697
3698	if (gintsts & GINTSTS_ENUMDONE) {
3699		dwc2_writel(hsotg, GINTSTS_ENUMDONE, GINTSTS);
3700
3701		dwc2_hsotg_irq_enumdone(hsotg);
3702	}
3703
3704	if (gintsts & (GINTSTS_OEPINT | GINTSTS_IEPINT)) {
3705		u32 daint = dwc2_readl(hsotg, DAINT);
3706		u32 daintmsk = dwc2_readl(hsotg, DAINTMSK);
3707		u32 daint_out, daint_in;
3708		int ep;
3709
3710		daint &= daintmsk;
3711		daint_out = daint >> DAINT_OUTEP_SHIFT;
3712		daint_in = daint & ~(daint_out << DAINT_OUTEP_SHIFT);
3713
3714		dev_dbg(hsotg->dev, "%s: daint=%08x\n", __func__, daint);
3715
3716		for (ep = 0; ep < hsotg->num_of_eps && daint_out;
3717						ep++, daint_out >>= 1) {
3718			if (daint_out & 1)
3719				dwc2_hsotg_epint(hsotg, ep, 0);
3720		}
3721
3722		for (ep = 0; ep < hsotg->num_of_eps  && daint_in;
3723						ep++, daint_in >>= 1) {
3724			if (daint_in & 1)
3725				dwc2_hsotg_epint(hsotg, ep, 1);
3726		}
3727	}
3728
3729	/* check both FIFOs */
3730
3731	if (gintsts & GINTSTS_NPTXFEMP) {
3732		dev_dbg(hsotg->dev, "NPTxFEmp\n");
3733
3734		/*
3735		 * Disable the interrupt to stop it happening again
3736		 * unless one of these endpoint routines decides that
3737		 * it needs re-enabling
3738		 */
3739
3740		dwc2_hsotg_disable_gsint(hsotg, GINTSTS_NPTXFEMP);
3741		dwc2_hsotg_irq_fifoempty(hsotg, false);
3742	}
3743
3744	if (gintsts & GINTSTS_PTXFEMP) {
3745		dev_dbg(hsotg->dev, "PTxFEmp\n");
3746
3747		/* See note in GINTSTS_NPTxFEmp */
3748
3749		dwc2_hsotg_disable_gsint(hsotg, GINTSTS_PTXFEMP);
3750		dwc2_hsotg_irq_fifoempty(hsotg, true);
3751	}
3752
3753	if (gintsts & GINTSTS_RXFLVL) {
3754		/*
3755		 * note, since GINTSTS_RxFLvl doubles as FIFO-not-empty,
3756		 * we need to retry dwc2_hsotg_handle_rx if this is still
3757		 * set.
3758		 */
3759
3760		dwc2_hsotg_handle_rx(hsotg);
3761	}
3762
3763	if (gintsts & GINTSTS_ERLYSUSP) {
3764		dev_dbg(hsotg->dev, "GINTSTS_ErlySusp\n");
3765		dwc2_writel(hsotg, GINTSTS_ERLYSUSP, GINTSTS);
3766	}
3767
3768	/*
3769	 * these next two seem to crop-up occasionally causing the core
3770	 * to shutdown the USB transfer, so try clearing them and logging
3771	 * the occurrence.
3772	 */
3773
3774	if (gintsts & GINTSTS_GOUTNAKEFF) {
3775		u8 idx;
3776		u32 epctrl;
3777		u32 gintmsk;
3778		u32 daintmsk;
3779		struct dwc2_hsotg_ep *hs_ep;
3780
3781		daintmsk = dwc2_readl(hsotg, DAINTMSK);
3782		daintmsk >>= DAINT_OUTEP_SHIFT;
3783		/* Mask this interrupt */
3784		gintmsk = dwc2_readl(hsotg, GINTMSK);
3785		gintmsk &= ~GINTSTS_GOUTNAKEFF;
3786		dwc2_writel(hsotg, gintmsk, GINTMSK);
3787
3788		dev_dbg(hsotg->dev, "GOUTNakEff triggered\n");
3789		for (idx = 1; idx < hsotg->num_of_eps; idx++) {
3790			hs_ep = hsotg->eps_out[idx];
3791			/* Proceed only unmasked ISOC EPs */
3792			if (BIT(idx) & ~daintmsk)
3793				continue;
3794
3795			epctrl = dwc2_readl(hsotg, DOEPCTL(idx));
3796
3797			//ISOC Ep's only
3798			if ((epctrl & DXEPCTL_EPENA) && hs_ep->isochronous) {
3799				epctrl |= DXEPCTL_SNAK;
3800				epctrl |= DXEPCTL_EPDIS;
3801				dwc2_writel(hsotg, epctrl, DOEPCTL(idx));
3802				continue;
3803			}
3804
3805			//Non-ISOC EP's
3806			if (hs_ep->halted) {
3807				if (!(epctrl & DXEPCTL_EPENA))
3808					epctrl |= DXEPCTL_EPENA;
3809				epctrl |= DXEPCTL_EPDIS;
3810				epctrl |= DXEPCTL_STALL;
3811				dwc2_writel(hsotg, epctrl, DOEPCTL(idx));
3812			}
3813		}
3814
3815		/* This interrupt bit is cleared in DXEPINT_EPDISBLD handler */
3816	}
3817
3818	if (gintsts & GINTSTS_GINNAKEFF) {
3819		dev_info(hsotg->dev, "GINNakEff triggered\n");
3820
3821		dwc2_set_bit(hsotg, DCTL, DCTL_CGNPINNAK);
3822
3823		dwc2_hsotg_dump(hsotg);
3824	}
3825
3826	if (gintsts & GINTSTS_INCOMPL_SOIN)
3827		dwc2_gadget_handle_incomplete_isoc_in(hsotg);
3828
3829	if (gintsts & GINTSTS_INCOMPL_SOOUT)
3830		dwc2_gadget_handle_incomplete_isoc_out(hsotg);
3831
3832	/*
3833	 * if we've had fifo events, we should try and go around the
3834	 * loop again to see if there's any point in returning yet.
3835	 */
3836
3837	if (gintsts & IRQ_RETRY_MASK && --retry_count > 0)
3838		goto irq_retry;
3839
3840	/* Check WKUP_ALERT interrupt*/
3841	if (hsotg->params.service_interval)
3842		dwc2_gadget_wkup_alert_handler(hsotg);
3843
3844	spin_unlock(&hsotg->lock);
3845
3846	return IRQ_HANDLED;
3847}
3848
3849static void dwc2_hsotg_ep_stop_xfr(struct dwc2_hsotg *hsotg,
3850				   struct dwc2_hsotg_ep *hs_ep)
3851{
3852	u32 epctrl_reg;
3853	u32 epint_reg;
3854
3855	epctrl_reg = hs_ep->dir_in ? DIEPCTL(hs_ep->index) :
3856		DOEPCTL(hs_ep->index);
3857	epint_reg = hs_ep->dir_in ? DIEPINT(hs_ep->index) :
3858		DOEPINT(hs_ep->index);
3859
3860	dev_dbg(hsotg->dev, "%s: stopping transfer on %s\n", __func__,
3861		hs_ep->name);
3862
3863	if (hs_ep->dir_in) {
3864		if (hsotg->dedicated_fifos || hs_ep->periodic) {
3865			dwc2_set_bit(hsotg, epctrl_reg, DXEPCTL_SNAK);
3866			/* Wait for Nak effect */
3867			if (dwc2_hsotg_wait_bit_set(hsotg, epint_reg,
3868						    DXEPINT_INEPNAKEFF, 100))
3869				dev_warn(hsotg->dev,
3870					 "%s: timeout DIEPINT.NAKEFF\n",
3871					 __func__);
3872		} else {
3873			dwc2_set_bit(hsotg, DCTL, DCTL_SGNPINNAK);
3874			/* Wait for Nak effect */
3875			if (dwc2_hsotg_wait_bit_set(hsotg, GINTSTS,
3876						    GINTSTS_GINNAKEFF, 100))
3877				dev_warn(hsotg->dev,
3878					 "%s: timeout GINTSTS.GINNAKEFF\n",
3879					 __func__);
3880		}
3881	} else {
 
 
 
3882		if (!(dwc2_readl(hsotg, GINTSTS) & GINTSTS_GOUTNAKEFF))
3883			dwc2_set_bit(hsotg, DCTL, DCTL_SGOUTNAK);
3884
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3885		/* Wait for global nak to take effect */
3886		if (dwc2_hsotg_wait_bit_set(hsotg, GINTSTS,
3887					    GINTSTS_GOUTNAKEFF, 100))
3888			dev_warn(hsotg->dev, "%s: timeout GINTSTS.GOUTNAKEFF\n",
3889				 __func__);
3890	}
3891
3892	/* Disable ep */
3893	dwc2_set_bit(hsotg, epctrl_reg, DXEPCTL_EPDIS | DXEPCTL_SNAK);
3894
3895	/* Wait for ep to be disabled */
3896	if (dwc2_hsotg_wait_bit_set(hsotg, epint_reg, DXEPINT_EPDISBLD, 100))
3897		dev_warn(hsotg->dev,
3898			 "%s: timeout DOEPCTL.EPDisable\n", __func__);
3899
3900	/* Clear EPDISBLD interrupt */
3901	dwc2_set_bit(hsotg, epint_reg, DXEPINT_EPDISBLD);
3902
3903	if (hs_ep->dir_in) {
3904		unsigned short fifo_index;
3905
3906		if (hsotg->dedicated_fifos || hs_ep->periodic)
3907			fifo_index = hs_ep->fifo_index;
3908		else
3909			fifo_index = 0;
3910
3911		/* Flush TX FIFO */
3912		dwc2_flush_tx_fifo(hsotg, fifo_index);
3913
3914		/* Clear Global In NP NAK in Shared FIFO for non periodic ep */
3915		if (!hsotg->dedicated_fifos && !hs_ep->periodic)
3916			dwc2_set_bit(hsotg, DCTL, DCTL_CGNPINNAK);
3917
3918	} else {
3919		/* Remove global NAKs */
3920		dwc2_set_bit(hsotg, DCTL, DCTL_CGOUTNAK);
3921	}
3922}
3923
3924/**
3925 * dwc2_hsotg_ep_enable - enable the given endpoint
3926 * @ep: The USB endpint to configure
3927 * @desc: The USB endpoint descriptor to configure with.
3928 *
3929 * This is called from the USB gadget code's usb_ep_enable().
3930 */
3931static int dwc2_hsotg_ep_enable(struct usb_ep *ep,
3932				const struct usb_endpoint_descriptor *desc)
3933{
3934	struct dwc2_hsotg_ep *hs_ep = our_ep(ep);
3935	struct dwc2_hsotg *hsotg = hs_ep->parent;
3936	unsigned long flags;
3937	unsigned int index = hs_ep->index;
3938	u32 epctrl_reg;
3939	u32 epctrl;
3940	u32 mps;
3941	u32 mc;
3942	u32 mask;
3943	unsigned int dir_in;
3944	unsigned int i, val, size;
3945	int ret = 0;
3946	unsigned char ep_type;
3947	int desc_num;
3948
3949	dev_dbg(hsotg->dev,
3950		"%s: ep %s: a 0x%02x, attr 0x%02x, mps 0x%04x, intr %d\n",
3951		__func__, ep->name, desc->bEndpointAddress, desc->bmAttributes,
3952		desc->wMaxPacketSize, desc->bInterval);
3953
3954	/* not to be called for EP0 */
3955	if (index == 0) {
3956		dev_err(hsotg->dev, "%s: called for EP 0\n", __func__);
3957		return -EINVAL;
3958	}
3959
3960	dir_in = (desc->bEndpointAddress & USB_ENDPOINT_DIR_MASK) ? 1 : 0;
3961	if (dir_in != hs_ep->dir_in) {
3962		dev_err(hsotg->dev, "%s: direction mismatch!\n", __func__);
3963		return -EINVAL;
3964	}
3965
3966	ep_type = desc->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK;
3967	mps = usb_endpoint_maxp(desc);
3968	mc = usb_endpoint_maxp_mult(desc);
3969
3970	/* ISOC IN in DDMA supported bInterval up to 10 */
3971	if (using_desc_dma(hsotg) && ep_type == USB_ENDPOINT_XFER_ISOC &&
3972	    dir_in && desc->bInterval > 10) {
3973		dev_err(hsotg->dev,
3974			"%s: ISOC IN, DDMA: bInterval>10 not supported!\n", __func__);
3975		return -EINVAL;
3976	}
3977
3978	/* High bandwidth ISOC OUT in DDMA not supported */
3979	if (using_desc_dma(hsotg) && ep_type == USB_ENDPOINT_XFER_ISOC &&
3980	    !dir_in && mc > 1) {
3981		dev_err(hsotg->dev,
3982			"%s: ISOC OUT, DDMA: HB not supported!\n", __func__);
3983		return -EINVAL;
3984	}
3985
3986	/* note, we handle this here instead of dwc2_hsotg_set_ep_maxpacket */
3987
3988	epctrl_reg = dir_in ? DIEPCTL(index) : DOEPCTL(index);
3989	epctrl = dwc2_readl(hsotg, epctrl_reg);
3990
3991	dev_dbg(hsotg->dev, "%s: read DxEPCTL=0x%08x from 0x%08x\n",
3992		__func__, epctrl, epctrl_reg);
3993
3994	if (using_desc_dma(hsotg) && ep_type == USB_ENDPOINT_XFER_ISOC)
3995		desc_num = MAX_DMA_DESC_NUM_HS_ISOC;
3996	else
3997		desc_num = MAX_DMA_DESC_NUM_GENERIC;
3998
3999	/* Allocate DMA descriptor chain for non-ctrl endpoints */
4000	if (using_desc_dma(hsotg) && !hs_ep->desc_list) {
4001		hs_ep->desc_list = dmam_alloc_coherent(hsotg->dev,
4002			desc_num * sizeof(struct dwc2_dma_desc),
4003			&hs_ep->desc_list_dma, GFP_ATOMIC);
4004		if (!hs_ep->desc_list) {
4005			ret = -ENOMEM;
4006			goto error2;
4007		}
4008	}
4009
4010	spin_lock_irqsave(&hsotg->lock, flags);
4011
4012	epctrl &= ~(DXEPCTL_EPTYPE_MASK | DXEPCTL_MPS_MASK);
4013	epctrl |= DXEPCTL_MPS(mps);
4014
4015	/*
4016	 * mark the endpoint as active, otherwise the core may ignore
4017	 * transactions entirely for this endpoint
4018	 */
4019	epctrl |= DXEPCTL_USBACTEP;
4020
4021	/* update the endpoint state */
4022	dwc2_hsotg_set_ep_maxpacket(hsotg, hs_ep->index, mps, mc, dir_in);
4023
4024	/* default, set to non-periodic */
4025	hs_ep->isochronous = 0;
4026	hs_ep->periodic = 0;
4027	hs_ep->halted = 0;
 
4028	hs_ep->interval = desc->bInterval;
4029
4030	switch (ep_type) {
4031	case USB_ENDPOINT_XFER_ISOC:
4032		epctrl |= DXEPCTL_EPTYPE_ISO;
4033		epctrl |= DXEPCTL_SETEVENFR;
4034		hs_ep->isochronous = 1;
4035		hs_ep->interval = 1 << (desc->bInterval - 1);
4036		hs_ep->target_frame = TARGET_FRAME_INITIAL;
4037		hs_ep->next_desc = 0;
4038		hs_ep->compl_desc = 0;
4039		if (dir_in) {
4040			hs_ep->periodic = 1;
4041			mask = dwc2_readl(hsotg, DIEPMSK);
4042			mask |= DIEPMSK_NAKMSK;
4043			dwc2_writel(hsotg, mask, DIEPMSK);
4044		} else {
 
4045			mask = dwc2_readl(hsotg, DOEPMSK);
4046			mask |= DOEPMSK_OUTTKNEPDISMSK;
4047			dwc2_writel(hsotg, mask, DOEPMSK);
4048		}
4049		break;
4050
4051	case USB_ENDPOINT_XFER_BULK:
4052		epctrl |= DXEPCTL_EPTYPE_BULK;
4053		break;
4054
4055	case USB_ENDPOINT_XFER_INT:
4056		if (dir_in)
4057			hs_ep->periodic = 1;
4058
4059		if (hsotg->gadget.speed == USB_SPEED_HIGH)
4060			hs_ep->interval = 1 << (desc->bInterval - 1);
4061
4062		epctrl |= DXEPCTL_EPTYPE_INTERRUPT;
4063		break;
4064
4065	case USB_ENDPOINT_XFER_CONTROL:
4066		epctrl |= DXEPCTL_EPTYPE_CONTROL;
4067		break;
4068	}
4069
4070	/*
4071	 * if the hardware has dedicated fifos, we must give each IN EP
4072	 * a unique tx-fifo even if it is non-periodic.
4073	 */
4074	if (dir_in && hsotg->dedicated_fifos) {
4075		unsigned fifo_count = dwc2_hsotg_tx_fifo_count(hsotg);
4076		u32 fifo_index = 0;
4077		u32 fifo_size = UINT_MAX;
4078
4079		size = hs_ep->ep.maxpacket * hs_ep->mc;
4080		for (i = 1; i <= fifo_count; ++i) {
4081			if (hsotg->fifo_map & (1 << i))
4082				continue;
4083			val = dwc2_readl(hsotg, DPTXFSIZN(i));
4084			val = (val >> FIFOSIZE_DEPTH_SHIFT) * 4;
4085			if (val < size)
4086				continue;
4087			/* Search for smallest acceptable fifo */
4088			if (val < fifo_size) {
4089				fifo_size = val;
4090				fifo_index = i;
4091			}
4092		}
4093		if (!fifo_index) {
4094			dev_err(hsotg->dev,
4095				"%s: No suitable fifo found\n", __func__);
4096			ret = -ENOMEM;
4097			goto error1;
4098		}
4099		epctrl &= ~(DXEPCTL_TXFNUM_LIMIT << DXEPCTL_TXFNUM_SHIFT);
4100		hsotg->fifo_map |= 1 << fifo_index;
4101		epctrl |= DXEPCTL_TXFNUM(fifo_index);
4102		hs_ep->fifo_index = fifo_index;
4103		hs_ep->fifo_size = fifo_size;
4104	}
4105
4106	/* for non control endpoints, set PID to D0 */
4107	if (index && !hs_ep->isochronous)
4108		epctrl |= DXEPCTL_SETD0PID;
4109
4110	/* WA for Full speed ISOC IN in DDMA mode.
4111	 * By Clear NAK status of EP, core will send ZLP
4112	 * to IN token and assert NAK interrupt relying
4113	 * on TxFIFO status only
4114	 */
4115
4116	if (hsotg->gadget.speed == USB_SPEED_FULL &&
4117	    hs_ep->isochronous && dir_in) {
4118		/* The WA applies only to core versions from 2.72a
4119		 * to 4.00a (including both). Also for FS_IOT_1.00a
4120		 * and HS_IOT_1.00a.
4121		 */
4122		u32 gsnpsid = dwc2_readl(hsotg, GSNPSID);
4123
4124		if ((gsnpsid >= DWC2_CORE_REV_2_72a &&
4125		     gsnpsid <= DWC2_CORE_REV_4_00a) ||
4126		     gsnpsid == DWC2_FS_IOT_REV_1_00a ||
4127		     gsnpsid == DWC2_HS_IOT_REV_1_00a)
4128			epctrl |= DXEPCTL_CNAK;
4129	}
4130
4131	dev_dbg(hsotg->dev, "%s: write DxEPCTL=0x%08x\n",
4132		__func__, epctrl);
4133
4134	dwc2_writel(hsotg, epctrl, epctrl_reg);
4135	dev_dbg(hsotg->dev, "%s: read DxEPCTL=0x%08x\n",
4136		__func__, dwc2_readl(hsotg, epctrl_reg));
4137
4138	/* enable the endpoint interrupt */
4139	dwc2_hsotg_ctrl_epint(hsotg, index, dir_in, 1);
4140
4141error1:
4142	spin_unlock_irqrestore(&hsotg->lock, flags);
4143
4144error2:
4145	if (ret && using_desc_dma(hsotg) && hs_ep->desc_list) {
4146		dmam_free_coherent(hsotg->dev, desc_num *
4147			sizeof(struct dwc2_dma_desc),
4148			hs_ep->desc_list, hs_ep->desc_list_dma);
4149		hs_ep->desc_list = NULL;
4150	}
4151
4152	return ret;
4153}
4154
4155/**
4156 * dwc2_hsotg_ep_disable - disable given endpoint
4157 * @ep: The endpoint to disable.
4158 */
4159static int dwc2_hsotg_ep_disable(struct usb_ep *ep)
4160{
4161	struct dwc2_hsotg_ep *hs_ep = our_ep(ep);
4162	struct dwc2_hsotg *hsotg = hs_ep->parent;
4163	int dir_in = hs_ep->dir_in;
4164	int index = hs_ep->index;
4165	u32 epctrl_reg;
4166	u32 ctrl;
4167
4168	dev_dbg(hsotg->dev, "%s(ep %p)\n", __func__, ep);
4169
4170	if (ep == &hsotg->eps_out[0]->ep) {
4171		dev_err(hsotg->dev, "%s: called for ep0\n", __func__);
4172		return -EINVAL;
4173	}
4174
4175	if (hsotg->op_state != OTG_STATE_B_PERIPHERAL) {
4176		dev_err(hsotg->dev, "%s: called in host mode?\n", __func__);
4177		return -EINVAL;
4178	}
4179
4180	epctrl_reg = dir_in ? DIEPCTL(index) : DOEPCTL(index);
4181
4182	ctrl = dwc2_readl(hsotg, epctrl_reg);
4183
4184	if (ctrl & DXEPCTL_EPENA)
4185		dwc2_hsotg_ep_stop_xfr(hsotg, hs_ep);
4186
4187	ctrl &= ~DXEPCTL_EPENA;
4188	ctrl &= ~DXEPCTL_USBACTEP;
4189	ctrl |= DXEPCTL_SNAK;
4190
4191	dev_dbg(hsotg->dev, "%s: DxEPCTL=0x%08x\n", __func__, ctrl);
4192	dwc2_writel(hsotg, ctrl, epctrl_reg);
4193
4194	/* disable endpoint interrupts */
4195	dwc2_hsotg_ctrl_epint(hsotg, hs_ep->index, hs_ep->dir_in, 0);
4196
4197	/* terminate all requests with shutdown */
4198	kill_all_requests(hsotg, hs_ep, -ESHUTDOWN);
4199
4200	hsotg->fifo_map &= ~(1 << hs_ep->fifo_index);
4201	hs_ep->fifo_index = 0;
4202	hs_ep->fifo_size = 0;
4203
4204	return 0;
4205}
4206
4207static int dwc2_hsotg_ep_disable_lock(struct usb_ep *ep)
4208{
4209	struct dwc2_hsotg_ep *hs_ep = our_ep(ep);
4210	struct dwc2_hsotg *hsotg = hs_ep->parent;
4211	unsigned long flags;
4212	int ret;
4213
4214	spin_lock_irqsave(&hsotg->lock, flags);
4215	ret = dwc2_hsotg_ep_disable(ep);
4216	spin_unlock_irqrestore(&hsotg->lock, flags);
4217	return ret;
4218}
4219
4220/**
4221 * on_list - check request is on the given endpoint
4222 * @ep: The endpoint to check.
4223 * @test: The request to test if it is on the endpoint.
4224 */
4225static bool on_list(struct dwc2_hsotg_ep *ep, struct dwc2_hsotg_req *test)
4226{
4227	struct dwc2_hsotg_req *req, *treq;
4228
4229	list_for_each_entry_safe(req, treq, &ep->queue, queue) {
4230		if (req == test)
4231			return true;
4232	}
4233
4234	return false;
4235}
4236
4237/**
4238 * dwc2_hsotg_ep_dequeue - dequeue given endpoint
4239 * @ep: The endpoint to dequeue.
4240 * @req: The request to be removed from a queue.
4241 */
4242static int dwc2_hsotg_ep_dequeue(struct usb_ep *ep, struct usb_request *req)
4243{
4244	struct dwc2_hsotg_req *hs_req = our_req(req);
4245	struct dwc2_hsotg_ep *hs_ep = our_ep(ep);
4246	struct dwc2_hsotg *hs = hs_ep->parent;
4247	unsigned long flags;
4248
4249	dev_dbg(hs->dev, "ep_dequeue(%p,%p)\n", ep, req);
4250
4251	spin_lock_irqsave(&hs->lock, flags);
4252
4253	if (!on_list(hs_ep, hs_req)) {
4254		spin_unlock_irqrestore(&hs->lock, flags);
4255		return -EINVAL;
4256	}
4257
4258	/* Dequeue already started request */
4259	if (req == &hs_ep->req->req)
4260		dwc2_hsotg_ep_stop_xfr(hs, hs_ep);
4261
4262	dwc2_hsotg_complete_request(hs, hs_ep, hs_req, -ECONNRESET);
4263	spin_unlock_irqrestore(&hs->lock, flags);
4264
4265	return 0;
4266}
4267
4268/**
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4269 * dwc2_hsotg_ep_sethalt - set halt on a given endpoint
4270 * @ep: The endpoint to set halt.
4271 * @value: Set or unset the halt.
4272 * @now: If true, stall the endpoint now. Otherwise return -EAGAIN if
4273 *       the endpoint is busy processing requests.
4274 *
4275 * We need to stall the endpoint immediately if request comes from set_feature
4276 * protocol command handler.
4277 */
4278static int dwc2_hsotg_ep_sethalt(struct usb_ep *ep, int value, bool now)
4279{
4280	struct dwc2_hsotg_ep *hs_ep = our_ep(ep);
4281	struct dwc2_hsotg *hs = hs_ep->parent;
4282	int index = hs_ep->index;
4283	u32 epreg;
4284	u32 epctl;
4285	u32 xfertype;
4286
4287	dev_info(hs->dev, "%s(ep %p %s, %d)\n", __func__, ep, ep->name, value);
4288
4289	if (index == 0) {
4290		if (value)
4291			dwc2_hsotg_stall_ep0(hs);
4292		else
4293			dev_warn(hs->dev,
4294				 "%s: can't clear halt on ep0\n", __func__);
4295		return 0;
4296	}
4297
4298	if (hs_ep->isochronous) {
4299		dev_err(hs->dev, "%s is Isochronous Endpoint\n", ep->name);
4300		return -EINVAL;
4301	}
4302
4303	if (!now && value && !list_empty(&hs_ep->queue)) {
4304		dev_dbg(hs->dev, "%s request is pending, cannot halt\n",
4305			ep->name);
4306		return -EAGAIN;
4307	}
4308
4309	if (hs_ep->dir_in) {
4310		epreg = DIEPCTL(index);
4311		epctl = dwc2_readl(hs, epreg);
4312
4313		if (value) {
4314			epctl |= DXEPCTL_STALL | DXEPCTL_SNAK;
4315			if (epctl & DXEPCTL_EPENA)
4316				epctl |= DXEPCTL_EPDIS;
4317		} else {
4318			epctl &= ~DXEPCTL_STALL;
 
4319			xfertype = epctl & DXEPCTL_EPTYPE_MASK;
4320			if (xfertype == DXEPCTL_EPTYPE_BULK ||
4321			    xfertype == DXEPCTL_EPTYPE_INTERRUPT)
4322				epctl |= DXEPCTL_SETD0PID;
4323		}
4324		dwc2_writel(hs, epctl, epreg);
4325	} else {
4326		epreg = DOEPCTL(index);
4327		epctl = dwc2_readl(hs, epreg);
4328
4329		if (value) {
 
 
 
4330			if (!(dwc2_readl(hs, GINTSTS) & GINTSTS_GOUTNAKEFF))
4331				dwc2_set_bit(hs, DCTL, DCTL_SGOUTNAK);
4332			// STALL bit will be set in GOUTNAKEFF interrupt handler
4333		} else {
4334			epctl &= ~DXEPCTL_STALL;
 
4335			xfertype = epctl & DXEPCTL_EPTYPE_MASK;
4336			if (xfertype == DXEPCTL_EPTYPE_BULK ||
4337			    xfertype == DXEPCTL_EPTYPE_INTERRUPT)
4338				epctl |= DXEPCTL_SETD0PID;
4339			dwc2_writel(hs, epctl, epreg);
4340		}
4341	}
4342
4343	hs_ep->halted = value;
4344	return 0;
4345}
4346
4347/**
4348 * dwc2_hsotg_ep_sethalt_lock - set halt on a given endpoint with lock held
4349 * @ep: The endpoint to set halt.
4350 * @value: Set or unset the halt.
4351 */
4352static int dwc2_hsotg_ep_sethalt_lock(struct usb_ep *ep, int value)
4353{
4354	struct dwc2_hsotg_ep *hs_ep = our_ep(ep);
4355	struct dwc2_hsotg *hs = hs_ep->parent;
4356	unsigned long flags = 0;
4357	int ret = 0;
4358
4359	spin_lock_irqsave(&hs->lock, flags);
4360	ret = dwc2_hsotg_ep_sethalt(ep, value, false);
4361	spin_unlock_irqrestore(&hs->lock, flags);
4362
4363	return ret;
4364}
4365
4366static const struct usb_ep_ops dwc2_hsotg_ep_ops = {
4367	.enable		= dwc2_hsotg_ep_enable,
4368	.disable	= dwc2_hsotg_ep_disable_lock,
4369	.alloc_request	= dwc2_hsotg_ep_alloc_request,
4370	.free_request	= dwc2_hsotg_ep_free_request,
4371	.queue		= dwc2_hsotg_ep_queue_lock,
4372	.dequeue	= dwc2_hsotg_ep_dequeue,
4373	.set_halt	= dwc2_hsotg_ep_sethalt_lock,
 
4374	/* note, don't believe we have any call for the fifo routines */
4375};
4376
4377/**
4378 * dwc2_hsotg_init - initialize the usb core
4379 * @hsotg: The driver state
4380 */
4381static void dwc2_hsotg_init(struct dwc2_hsotg *hsotg)
4382{
4383	/* unmask subset of endpoint interrupts */
4384
4385	dwc2_writel(hsotg, DIEPMSK_TIMEOUTMSK | DIEPMSK_AHBERRMSK |
4386		    DIEPMSK_EPDISBLDMSK | DIEPMSK_XFERCOMPLMSK,
4387		    DIEPMSK);
4388
4389	dwc2_writel(hsotg, DOEPMSK_SETUPMSK | DOEPMSK_AHBERRMSK |
4390		    DOEPMSK_EPDISBLDMSK | DOEPMSK_XFERCOMPLMSK,
4391		    DOEPMSK);
4392
4393	dwc2_writel(hsotg, 0, DAINTMSK);
4394
4395	/* Be in disconnected state until gadget is registered */
4396	dwc2_set_bit(hsotg, DCTL, DCTL_SFTDISCON);
4397
4398	/* setup fifos */
4399
4400	dev_dbg(hsotg->dev, "GRXFSIZ=0x%08x, GNPTXFSIZ=0x%08x\n",
4401		dwc2_readl(hsotg, GRXFSIZ),
4402		dwc2_readl(hsotg, GNPTXFSIZ));
4403
4404	dwc2_hsotg_init_fifo(hsotg);
4405
4406	if (using_dma(hsotg))
4407		dwc2_set_bit(hsotg, GAHBCFG, GAHBCFG_DMA_EN);
4408}
4409
4410/**
4411 * dwc2_hsotg_udc_start - prepare the udc for work
4412 * @gadget: The usb gadget state
4413 * @driver: The usb gadget driver
4414 *
4415 * Perform initialization to prepare udc device and driver
4416 * to work.
4417 */
4418static int dwc2_hsotg_udc_start(struct usb_gadget *gadget,
4419				struct usb_gadget_driver *driver)
4420{
4421	struct dwc2_hsotg *hsotg = to_hsotg(gadget);
4422	unsigned long flags;
4423	int ret;
4424
4425	if (!hsotg) {
4426		pr_err("%s: called with no device\n", __func__);
4427		return -ENODEV;
4428	}
4429
4430	if (!driver) {
4431		dev_err(hsotg->dev, "%s: no driver\n", __func__);
4432		return -EINVAL;
4433	}
4434
4435	if (driver->max_speed < USB_SPEED_FULL)
4436		dev_err(hsotg->dev, "%s: bad speed\n", __func__);
4437
4438	if (!driver->setup) {
4439		dev_err(hsotg->dev, "%s: missing entry points\n", __func__);
4440		return -EINVAL;
4441	}
4442
4443	WARN_ON(hsotg->driver);
4444
4445	driver->driver.bus = NULL;
4446	hsotg->driver = driver;
4447	hsotg->gadget.dev.of_node = hsotg->dev->of_node;
4448	hsotg->gadget.speed = USB_SPEED_UNKNOWN;
4449
4450	if (hsotg->dr_mode == USB_DR_MODE_PERIPHERAL) {
4451		ret = dwc2_lowlevel_hw_enable(hsotg);
4452		if (ret)
4453			goto err;
4454	}
4455
4456	if (!IS_ERR_OR_NULL(hsotg->uphy))
4457		otg_set_peripheral(hsotg->uphy->otg, &hsotg->gadget);
4458
4459	spin_lock_irqsave(&hsotg->lock, flags);
4460	if (dwc2_hw_is_device(hsotg)) {
4461		dwc2_hsotg_init(hsotg);
4462		dwc2_hsotg_core_init_disconnected(hsotg, false);
4463	}
4464
4465	hsotg->enabled = 0;
4466	spin_unlock_irqrestore(&hsotg->lock, flags);
4467
4468	gadget->sg_supported = using_desc_dma(hsotg);
4469	dev_info(hsotg->dev, "bound driver %s\n", driver->driver.name);
4470
4471	return 0;
4472
4473err:
4474	hsotg->driver = NULL;
4475	return ret;
4476}
4477
4478/**
4479 * dwc2_hsotg_udc_stop - stop the udc
4480 * @gadget: The usb gadget state
4481 *
4482 * Stop udc hw block and stay tunned for future transmissions
4483 */
4484static int dwc2_hsotg_udc_stop(struct usb_gadget *gadget)
4485{
4486	struct dwc2_hsotg *hsotg = to_hsotg(gadget);
4487	unsigned long flags = 0;
4488	int ep;
4489
4490	if (!hsotg)
4491		return -ENODEV;
4492
4493	/* all endpoints should be shutdown */
4494	for (ep = 1; ep < hsotg->num_of_eps; ep++) {
4495		if (hsotg->eps_in[ep])
4496			dwc2_hsotg_ep_disable_lock(&hsotg->eps_in[ep]->ep);
4497		if (hsotg->eps_out[ep])
4498			dwc2_hsotg_ep_disable_lock(&hsotg->eps_out[ep]->ep);
4499	}
4500
4501	spin_lock_irqsave(&hsotg->lock, flags);
4502
4503	hsotg->driver = NULL;
4504	hsotg->gadget.speed = USB_SPEED_UNKNOWN;
4505	hsotg->enabled = 0;
4506
4507	spin_unlock_irqrestore(&hsotg->lock, flags);
4508
4509	if (!IS_ERR_OR_NULL(hsotg->uphy))
4510		otg_set_peripheral(hsotg->uphy->otg, NULL);
4511
4512	if (hsotg->dr_mode == USB_DR_MODE_PERIPHERAL)
4513		dwc2_lowlevel_hw_disable(hsotg);
4514
4515	return 0;
4516}
4517
4518/**
4519 * dwc2_hsotg_gadget_getframe - read the frame number
4520 * @gadget: The usb gadget state
4521 *
4522 * Read the {micro} frame number
4523 */
4524static int dwc2_hsotg_gadget_getframe(struct usb_gadget *gadget)
4525{
4526	return dwc2_hsotg_read_frameno(to_hsotg(gadget));
4527}
4528
4529/**
4530 * dwc2_hsotg_set_selfpowered - set if device is self/bus powered
4531 * @gadget: The usb gadget state
4532 * @is_selfpowered: Whether the device is self-powered
4533 *
4534 * Set if the device is self or bus powered.
4535 */
4536static int dwc2_hsotg_set_selfpowered(struct usb_gadget *gadget,
4537				      int is_selfpowered)
4538{
4539	struct dwc2_hsotg *hsotg = to_hsotg(gadget);
4540	unsigned long flags;
4541
4542	spin_lock_irqsave(&hsotg->lock, flags);
4543	gadget->is_selfpowered = !!is_selfpowered;
4544	spin_unlock_irqrestore(&hsotg->lock, flags);
4545
4546	return 0;
4547}
4548
4549/**
4550 * dwc2_hsotg_pullup - connect/disconnect the USB PHY
4551 * @gadget: The usb gadget state
4552 * @is_on: Current state of the USB PHY
4553 *
4554 * Connect/Disconnect the USB PHY pullup
4555 */
4556static int dwc2_hsotg_pullup(struct usb_gadget *gadget, int is_on)
4557{
4558	struct dwc2_hsotg *hsotg = to_hsotg(gadget);
4559	unsigned long flags = 0;
4560
4561	dev_dbg(hsotg->dev, "%s: is_on: %d op_state: %d\n", __func__, is_on,
4562		hsotg->op_state);
4563
4564	/* Don't modify pullup state while in host mode */
4565	if (hsotg->op_state != OTG_STATE_B_PERIPHERAL) {
4566		hsotg->enabled = is_on;
4567		return 0;
4568	}
4569
4570	spin_lock_irqsave(&hsotg->lock, flags);
4571	if (is_on) {
4572		hsotg->enabled = 1;
4573		dwc2_hsotg_core_init_disconnected(hsotg, false);
4574		/* Enable ACG feature in device mode,if supported */
4575		dwc2_enable_acg(hsotg);
4576		dwc2_hsotg_core_connect(hsotg);
4577	} else {
4578		dwc2_hsotg_core_disconnect(hsotg);
4579		dwc2_hsotg_disconnect(hsotg);
4580		hsotg->enabled = 0;
4581	}
4582
4583	hsotg->gadget.speed = USB_SPEED_UNKNOWN;
4584	spin_unlock_irqrestore(&hsotg->lock, flags);
4585
4586	return 0;
4587}
4588
4589static int dwc2_hsotg_vbus_session(struct usb_gadget *gadget, int is_active)
4590{
4591	struct dwc2_hsotg *hsotg = to_hsotg(gadget);
4592	unsigned long flags;
4593
4594	dev_dbg(hsotg->dev, "%s: is_active: %d\n", __func__, is_active);
4595	spin_lock_irqsave(&hsotg->lock, flags);
4596
4597	/*
4598	 * If controller is hibernated, it must exit from power_down
4599	 * before being initialized / de-initialized
4600	 */
4601	if (hsotg->lx_state == DWC2_L2)
4602		dwc2_exit_partial_power_down(hsotg, false);
 
 
 
 
4603
4604	if (is_active) {
4605		hsotg->op_state = OTG_STATE_B_PERIPHERAL;
4606
4607		dwc2_hsotg_core_init_disconnected(hsotg, false);
4608		if (hsotg->enabled) {
4609			/* Enable ACG feature in device mode,if supported */
4610			dwc2_enable_acg(hsotg);
4611			dwc2_hsotg_core_connect(hsotg);
4612		}
4613	} else {
4614		dwc2_hsotg_core_disconnect(hsotg);
4615		dwc2_hsotg_disconnect(hsotg);
4616	}
4617
4618	spin_unlock_irqrestore(&hsotg->lock, flags);
4619	return 0;
4620}
4621
4622/**
4623 * dwc2_hsotg_vbus_draw - report bMaxPower field
4624 * @gadget: The usb gadget state
4625 * @mA: Amount of current
4626 *
4627 * Report how much power the device may consume to the phy.
4628 */
4629static int dwc2_hsotg_vbus_draw(struct usb_gadget *gadget, unsigned int mA)
4630{
4631	struct dwc2_hsotg *hsotg = to_hsotg(gadget);
4632
4633	if (IS_ERR_OR_NULL(hsotg->uphy))
4634		return -ENOTSUPP;
4635	return usb_phy_set_power(hsotg->uphy, mA);
4636}
4637
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4638static const struct usb_gadget_ops dwc2_hsotg_gadget_ops = {
4639	.get_frame	= dwc2_hsotg_gadget_getframe,
4640	.set_selfpowered	= dwc2_hsotg_set_selfpowered,
4641	.udc_start		= dwc2_hsotg_udc_start,
4642	.udc_stop		= dwc2_hsotg_udc_stop,
4643	.pullup                 = dwc2_hsotg_pullup,
 
4644	.vbus_session		= dwc2_hsotg_vbus_session,
4645	.vbus_draw		= dwc2_hsotg_vbus_draw,
4646};
4647
4648/**
4649 * dwc2_hsotg_initep - initialise a single endpoint
4650 * @hsotg: The device state.
4651 * @hs_ep: The endpoint to be initialised.
4652 * @epnum: The endpoint number
4653 * @dir_in: True if direction is in.
4654 *
4655 * Initialise the given endpoint (as part of the probe and device state
4656 * creation) to give to the gadget driver. Setup the endpoint name, any
4657 * direction information and other state that may be required.
4658 */
4659static void dwc2_hsotg_initep(struct dwc2_hsotg *hsotg,
4660			      struct dwc2_hsotg_ep *hs_ep,
4661				       int epnum,
4662				       bool dir_in)
4663{
4664	char *dir;
4665
4666	if (epnum == 0)
4667		dir = "";
4668	else if (dir_in)
4669		dir = "in";
4670	else
4671		dir = "out";
4672
4673	hs_ep->dir_in = dir_in;
4674	hs_ep->index = epnum;
4675
4676	snprintf(hs_ep->name, sizeof(hs_ep->name), "ep%d%s", epnum, dir);
4677
4678	INIT_LIST_HEAD(&hs_ep->queue);
4679	INIT_LIST_HEAD(&hs_ep->ep.ep_list);
4680
4681	/* add to the list of endpoints known by the gadget driver */
4682	if (epnum)
4683		list_add_tail(&hs_ep->ep.ep_list, &hsotg->gadget.ep_list);
4684
4685	hs_ep->parent = hsotg;
4686	hs_ep->ep.name = hs_ep->name;
4687
4688	if (hsotg->params.speed == DWC2_SPEED_PARAM_LOW)
4689		usb_ep_set_maxpacket_limit(&hs_ep->ep, 8);
4690	else
4691		usb_ep_set_maxpacket_limit(&hs_ep->ep,
4692					   epnum ? 1024 : EP0_MPS_LIMIT);
4693	hs_ep->ep.ops = &dwc2_hsotg_ep_ops;
4694
4695	if (epnum == 0) {
4696		hs_ep->ep.caps.type_control = true;
4697	} else {
4698		if (hsotg->params.speed != DWC2_SPEED_PARAM_LOW) {
4699			hs_ep->ep.caps.type_iso = true;
4700			hs_ep->ep.caps.type_bulk = true;
4701		}
4702		hs_ep->ep.caps.type_int = true;
4703	}
4704
4705	if (dir_in)
4706		hs_ep->ep.caps.dir_in = true;
4707	else
4708		hs_ep->ep.caps.dir_out = true;
4709
4710	/*
4711	 * if we're using dma, we need to set the next-endpoint pointer
4712	 * to be something valid.
4713	 */
4714
4715	if (using_dma(hsotg)) {
4716		u32 next = DXEPCTL_NEXTEP((epnum + 1) % 15);
4717
4718		if (dir_in)
4719			dwc2_writel(hsotg, next, DIEPCTL(epnum));
4720		else
4721			dwc2_writel(hsotg, next, DOEPCTL(epnum));
4722	}
4723}
4724
4725/**
4726 * dwc2_hsotg_hw_cfg - read HW configuration registers
4727 * @hsotg: Programming view of the DWC_otg controller
4728 *
4729 * Read the USB core HW configuration registers
4730 */
4731static int dwc2_hsotg_hw_cfg(struct dwc2_hsotg *hsotg)
4732{
4733	u32 cfg;
4734	u32 ep_type;
4735	u32 i;
4736
4737	/* check hardware configuration */
4738
4739	hsotg->num_of_eps = hsotg->hw_params.num_dev_ep;
4740
4741	/* Add ep0 */
4742	hsotg->num_of_eps++;
4743
4744	hsotg->eps_in[0] = devm_kzalloc(hsotg->dev,
4745					sizeof(struct dwc2_hsotg_ep),
4746					GFP_KERNEL);
4747	if (!hsotg->eps_in[0])
4748		return -ENOMEM;
4749	/* Same dwc2_hsotg_ep is used in both directions for ep0 */
4750	hsotg->eps_out[0] = hsotg->eps_in[0];
4751
4752	cfg = hsotg->hw_params.dev_ep_dirs;
4753	for (i = 1, cfg >>= 2; i < hsotg->num_of_eps; i++, cfg >>= 2) {
4754		ep_type = cfg & 3;
4755		/* Direction in or both */
4756		if (!(ep_type & 2)) {
4757			hsotg->eps_in[i] = devm_kzalloc(hsotg->dev,
4758				sizeof(struct dwc2_hsotg_ep), GFP_KERNEL);
4759			if (!hsotg->eps_in[i])
4760				return -ENOMEM;
4761		}
4762		/* Direction out or both */
4763		if (!(ep_type & 1)) {
4764			hsotg->eps_out[i] = devm_kzalloc(hsotg->dev,
4765				sizeof(struct dwc2_hsotg_ep), GFP_KERNEL);
4766			if (!hsotg->eps_out[i])
4767				return -ENOMEM;
4768		}
4769	}
4770
4771	hsotg->fifo_mem = hsotg->hw_params.total_fifo_size;
4772	hsotg->dedicated_fifos = hsotg->hw_params.en_multiple_tx_fifo;
4773
4774	dev_info(hsotg->dev, "EPs: %d, %s fifos, %d entries in SPRAM\n",
4775		 hsotg->num_of_eps,
4776		 hsotg->dedicated_fifos ? "dedicated" : "shared",
4777		 hsotg->fifo_mem);
4778	return 0;
4779}
4780
4781/**
4782 * dwc2_hsotg_dump - dump state of the udc
4783 * @hsotg: Programming view of the DWC_otg controller
4784 *
4785 */
4786static void dwc2_hsotg_dump(struct dwc2_hsotg *hsotg)
4787{
4788#ifdef DEBUG
4789	struct device *dev = hsotg->dev;
4790	u32 val;
4791	int idx;
4792
4793	dev_info(dev, "DCFG=0x%08x, DCTL=0x%08x, DIEPMSK=%08x\n",
4794		 dwc2_readl(hsotg, DCFG), dwc2_readl(hsotg, DCTL),
4795		 dwc2_readl(hsotg, DIEPMSK));
4796
4797	dev_info(dev, "GAHBCFG=0x%08x, GHWCFG1=0x%08x\n",
4798		 dwc2_readl(hsotg, GAHBCFG), dwc2_readl(hsotg, GHWCFG1));
4799
4800	dev_info(dev, "GRXFSIZ=0x%08x, GNPTXFSIZ=0x%08x\n",
4801		 dwc2_readl(hsotg, GRXFSIZ), dwc2_readl(hsotg, GNPTXFSIZ));
4802
4803	/* show periodic fifo settings */
4804
4805	for (idx = 1; idx < hsotg->num_of_eps; idx++) {
4806		val = dwc2_readl(hsotg, DPTXFSIZN(idx));
4807		dev_info(dev, "DPTx[%d] FSize=%d, StAddr=0x%08x\n", idx,
4808			 val >> FIFOSIZE_DEPTH_SHIFT,
4809			 val & FIFOSIZE_STARTADDR_MASK);
4810	}
4811
4812	for (idx = 0; idx < hsotg->num_of_eps; idx++) {
4813		dev_info(dev,
4814			 "ep%d-in: EPCTL=0x%08x, SIZ=0x%08x, DMA=0x%08x\n", idx,
4815			 dwc2_readl(hsotg, DIEPCTL(idx)),
4816			 dwc2_readl(hsotg, DIEPTSIZ(idx)),
4817			 dwc2_readl(hsotg, DIEPDMA(idx)));
4818
4819		val = dwc2_readl(hsotg, DOEPCTL(idx));
4820		dev_info(dev,
4821			 "ep%d-out: EPCTL=0x%08x, SIZ=0x%08x, DMA=0x%08x\n",
4822			 idx, dwc2_readl(hsotg, DOEPCTL(idx)),
4823			 dwc2_readl(hsotg, DOEPTSIZ(idx)),
4824			 dwc2_readl(hsotg, DOEPDMA(idx)));
4825	}
4826
4827	dev_info(dev, "DVBUSDIS=0x%08x, DVBUSPULSE=%08x\n",
4828		 dwc2_readl(hsotg, DVBUSDIS), dwc2_readl(hsotg, DVBUSPULSE));
4829#endif
4830}
4831
4832/**
4833 * dwc2_gadget_init - init function for gadget
4834 * @hsotg: Programming view of the DWC_otg controller
4835 *
4836 */
4837int dwc2_gadget_init(struct dwc2_hsotg *hsotg)
4838{
4839	struct device *dev = hsotg->dev;
4840	int epnum;
4841	int ret;
4842
4843	/* Dump fifo information */
4844	dev_dbg(dev, "NonPeriodic TXFIFO size: %d\n",
4845		hsotg->params.g_np_tx_fifo_size);
4846	dev_dbg(dev, "RXFIFO size: %d\n", hsotg->params.g_rx_fifo_size);
4847
4848	hsotg->gadget.max_speed = USB_SPEED_HIGH;
 
 
 
 
 
 
 
 
 
 
 
4849	hsotg->gadget.ops = &dwc2_hsotg_gadget_ops;
4850	hsotg->gadget.name = dev_name(dev);
 
4851	hsotg->remote_wakeup_allowed = 0;
4852
4853	if (hsotg->params.lpm)
4854		hsotg->gadget.lpm_capable = true;
4855
4856	if (hsotg->dr_mode == USB_DR_MODE_OTG)
4857		hsotg->gadget.is_otg = 1;
4858	else if (hsotg->dr_mode == USB_DR_MODE_PERIPHERAL)
4859		hsotg->op_state = OTG_STATE_B_PERIPHERAL;
4860
4861	ret = dwc2_hsotg_hw_cfg(hsotg);
4862	if (ret) {
4863		dev_err(hsotg->dev, "Hardware configuration failed: %d\n", ret);
4864		return ret;
4865	}
4866
4867	hsotg->ctrl_buff = devm_kzalloc(hsotg->dev,
4868			DWC2_CTRL_BUFF_SIZE, GFP_KERNEL);
4869	if (!hsotg->ctrl_buff)
4870		return -ENOMEM;
4871
4872	hsotg->ep0_buff = devm_kzalloc(hsotg->dev,
4873			DWC2_CTRL_BUFF_SIZE, GFP_KERNEL);
4874	if (!hsotg->ep0_buff)
4875		return -ENOMEM;
4876
4877	if (using_desc_dma(hsotg)) {
4878		ret = dwc2_gadget_alloc_ctrl_desc_chains(hsotg);
4879		if (ret < 0)
4880			return ret;
4881	}
4882
4883	ret = devm_request_irq(hsotg->dev, hsotg->irq, dwc2_hsotg_irq,
4884			       IRQF_SHARED, dev_name(hsotg->dev), hsotg);
4885	if (ret < 0) {
4886		dev_err(dev, "cannot claim IRQ for gadget\n");
4887		return ret;
4888	}
4889
4890	/* hsotg->num_of_eps holds number of EPs other than ep0 */
4891
4892	if (hsotg->num_of_eps == 0) {
4893		dev_err(dev, "wrong number of EPs (zero)\n");
4894		return -EINVAL;
4895	}
4896
4897	/* setup endpoint information */
4898
4899	INIT_LIST_HEAD(&hsotg->gadget.ep_list);
4900	hsotg->gadget.ep0 = &hsotg->eps_out[0]->ep;
4901
4902	/* allocate EP0 request */
4903
4904	hsotg->ctrl_req = dwc2_hsotg_ep_alloc_request(&hsotg->eps_out[0]->ep,
4905						     GFP_KERNEL);
4906	if (!hsotg->ctrl_req) {
4907		dev_err(dev, "failed to allocate ctrl req\n");
4908		return -ENOMEM;
4909	}
4910
4911	/* initialise the endpoints now the core has been initialised */
4912	for (epnum = 0; epnum < hsotg->num_of_eps; epnum++) {
4913		if (hsotg->eps_in[epnum])
4914			dwc2_hsotg_initep(hsotg, hsotg->eps_in[epnum],
4915					  epnum, 1);
4916		if (hsotg->eps_out[epnum])
4917			dwc2_hsotg_initep(hsotg, hsotg->eps_out[epnum],
4918					  epnum, 0);
4919	}
4920
4921	dwc2_hsotg_dump(hsotg);
4922
4923	return 0;
4924}
4925
4926/**
4927 * dwc2_hsotg_remove - remove function for hsotg driver
4928 * @hsotg: Programming view of the DWC_otg controller
4929 *
4930 */
4931int dwc2_hsotg_remove(struct dwc2_hsotg *hsotg)
4932{
4933	usb_del_gadget_udc(&hsotg->gadget);
4934	dwc2_hsotg_ep_free_request(&hsotg->eps_out[0]->ep, hsotg->ctrl_req);
4935
4936	return 0;
4937}
4938
4939int dwc2_hsotg_suspend(struct dwc2_hsotg *hsotg)
4940{
4941	unsigned long flags;
4942
4943	if (hsotg->lx_state != DWC2_L0)
4944		return 0;
4945
4946	if (hsotg->driver) {
4947		int ep;
4948
4949		dev_info(hsotg->dev, "suspending usb gadget %s\n",
4950			 hsotg->driver->driver.name);
4951
4952		spin_lock_irqsave(&hsotg->lock, flags);
4953		if (hsotg->enabled)
4954			dwc2_hsotg_core_disconnect(hsotg);
4955		dwc2_hsotg_disconnect(hsotg);
4956		hsotg->gadget.speed = USB_SPEED_UNKNOWN;
4957		spin_unlock_irqrestore(&hsotg->lock, flags);
4958
4959		for (ep = 0; ep < hsotg->num_of_eps; ep++) {
4960			if (hsotg->eps_in[ep])
4961				dwc2_hsotg_ep_disable_lock(&hsotg->eps_in[ep]->ep);
4962			if (hsotg->eps_out[ep])
4963				dwc2_hsotg_ep_disable_lock(&hsotg->eps_out[ep]->ep);
4964		}
4965	}
4966
4967	return 0;
4968}
4969
4970int dwc2_hsotg_resume(struct dwc2_hsotg *hsotg)
4971{
4972	unsigned long flags;
4973
4974	if (hsotg->lx_state == DWC2_L2)
4975		return 0;
4976
4977	if (hsotg->driver) {
4978		dev_info(hsotg->dev, "resuming usb gadget %s\n",
4979			 hsotg->driver->driver.name);
4980
4981		spin_lock_irqsave(&hsotg->lock, flags);
4982		dwc2_hsotg_core_init_disconnected(hsotg, false);
4983		if (hsotg->enabled) {
4984			/* Enable ACG feature in device mode,if supported */
4985			dwc2_enable_acg(hsotg);
4986			dwc2_hsotg_core_connect(hsotg);
4987		}
4988		spin_unlock_irqrestore(&hsotg->lock, flags);
4989	}
4990
4991	return 0;
4992}
4993
4994/**
4995 * dwc2_backup_device_registers() - Backup controller device registers.
4996 * When suspending usb bus, registers needs to be backuped
4997 * if controller power is disabled once suspended.
4998 *
4999 * @hsotg: Programming view of the DWC_otg controller
5000 */
5001int dwc2_backup_device_registers(struct dwc2_hsotg *hsotg)
5002{
5003	struct dwc2_dregs_backup *dr;
5004	int i;
5005
5006	dev_dbg(hsotg->dev, "%s\n", __func__);
5007
5008	/* Backup dev regs */
5009	dr = &hsotg->dr_backup;
5010
5011	dr->dcfg = dwc2_readl(hsotg, DCFG);
5012	dr->dctl = dwc2_readl(hsotg, DCTL);
5013	dr->daintmsk = dwc2_readl(hsotg, DAINTMSK);
5014	dr->diepmsk = dwc2_readl(hsotg, DIEPMSK);
5015	dr->doepmsk = dwc2_readl(hsotg, DOEPMSK);
5016
5017	for (i = 0; i < hsotg->num_of_eps; i++) {
5018		/* Backup IN EPs */
5019		dr->diepctl[i] = dwc2_readl(hsotg, DIEPCTL(i));
5020
5021		/* Ensure DATA PID is correctly configured */
5022		if (dr->diepctl[i] & DXEPCTL_DPID)
5023			dr->diepctl[i] |= DXEPCTL_SETD1PID;
5024		else
5025			dr->diepctl[i] |= DXEPCTL_SETD0PID;
5026
5027		dr->dieptsiz[i] = dwc2_readl(hsotg, DIEPTSIZ(i));
5028		dr->diepdma[i] = dwc2_readl(hsotg, DIEPDMA(i));
5029
5030		/* Backup OUT EPs */
5031		dr->doepctl[i] = dwc2_readl(hsotg, DOEPCTL(i));
5032
5033		/* Ensure DATA PID is correctly configured */
5034		if (dr->doepctl[i] & DXEPCTL_DPID)
5035			dr->doepctl[i] |= DXEPCTL_SETD1PID;
5036		else
5037			dr->doepctl[i] |= DXEPCTL_SETD0PID;
5038
5039		dr->doeptsiz[i] = dwc2_readl(hsotg, DOEPTSIZ(i));
5040		dr->doepdma[i] = dwc2_readl(hsotg, DOEPDMA(i));
5041		dr->dtxfsiz[i] = dwc2_readl(hsotg, DPTXFSIZN(i));
5042	}
5043	dr->valid = true;
5044	return 0;
5045}
5046
5047/**
5048 * dwc2_restore_device_registers() - Restore controller device registers.
5049 * When resuming usb bus, device registers needs to be restored
5050 * if controller power were disabled.
5051 *
5052 * @hsotg: Programming view of the DWC_otg controller
5053 * @remote_wakeup: Indicates whether resume is initiated by Device or Host.
5054 *
5055 * Return: 0 if successful, negative error code otherwise
5056 */
5057int dwc2_restore_device_registers(struct dwc2_hsotg *hsotg, int remote_wakeup)
5058{
5059	struct dwc2_dregs_backup *dr;
5060	int i;
5061
5062	dev_dbg(hsotg->dev, "%s\n", __func__);
5063
5064	/* Restore dev regs */
5065	dr = &hsotg->dr_backup;
5066	if (!dr->valid) {
5067		dev_err(hsotg->dev, "%s: no device registers to restore\n",
5068			__func__);
5069		return -EINVAL;
5070	}
5071	dr->valid = false;
5072
5073	if (!remote_wakeup)
5074		dwc2_writel(hsotg, dr->dctl, DCTL);
5075
5076	dwc2_writel(hsotg, dr->daintmsk, DAINTMSK);
5077	dwc2_writel(hsotg, dr->diepmsk, DIEPMSK);
5078	dwc2_writel(hsotg, dr->doepmsk, DOEPMSK);
5079
5080	for (i = 0; i < hsotg->num_of_eps; i++) {
5081		/* Restore IN EPs */
5082		dwc2_writel(hsotg, dr->dieptsiz[i], DIEPTSIZ(i));
5083		dwc2_writel(hsotg, dr->diepdma[i], DIEPDMA(i));
5084		dwc2_writel(hsotg, dr->doeptsiz[i], DOEPTSIZ(i));
5085		/** WA for enabled EPx's IN in DDMA mode. On entering to
5086		 * hibernation wrong value read and saved from DIEPDMAx,
5087		 * as result BNA interrupt asserted on hibernation exit
5088		 * by restoring from saved area.
5089		 */
5090		if (hsotg->params.g_dma_desc &&
5091		    (dr->diepctl[i] & DXEPCTL_EPENA))
5092			dr->diepdma[i] = hsotg->eps_in[i]->desc_list_dma;
5093		dwc2_writel(hsotg, dr->dtxfsiz[i], DPTXFSIZN(i));
5094		dwc2_writel(hsotg, dr->diepctl[i], DIEPCTL(i));
5095		/* Restore OUT EPs */
5096		dwc2_writel(hsotg, dr->doeptsiz[i], DOEPTSIZ(i));
5097		/* WA for enabled EPx's OUT in DDMA mode. On entering to
5098		 * hibernation wrong value read and saved from DOEPDMAx,
5099		 * as result BNA interrupt asserted on hibernation exit
5100		 * by restoring from saved area.
5101		 */
5102		if (hsotg->params.g_dma_desc &&
5103		    (dr->doepctl[i] & DXEPCTL_EPENA))
5104			dr->doepdma[i] = hsotg->eps_out[i]->desc_list_dma;
5105		dwc2_writel(hsotg, dr->doepdma[i], DOEPDMA(i));
5106		dwc2_writel(hsotg, dr->doepctl[i], DOEPCTL(i));
5107	}
5108
5109	return 0;
5110}
5111
5112/**
5113 * dwc2_gadget_init_lpm - Configure the core to support LPM in device mode
5114 *
5115 * @hsotg: Programming view of DWC_otg controller
5116 *
5117 */
5118void dwc2_gadget_init_lpm(struct dwc2_hsotg *hsotg)
5119{
5120	u32 val;
5121
5122	if (!hsotg->params.lpm)
5123		return;
5124
5125	val = GLPMCFG_LPMCAP | GLPMCFG_APPL1RES;
5126	val |= hsotg->params.hird_threshold_en ? GLPMCFG_HIRD_THRES_EN : 0;
5127	val |= hsotg->params.lpm_clock_gating ? GLPMCFG_ENBLSLPM : 0;
5128	val |= hsotg->params.hird_threshold << GLPMCFG_HIRD_THRES_SHIFT;
5129	val |= hsotg->params.besl ? GLPMCFG_ENBESL : 0;
5130	val |= GLPMCFG_LPM_REJECT_CTRL_CONTROL;
5131	val |= GLPMCFG_LPM_ACCEPT_CTRL_ISOC;
5132	dwc2_writel(hsotg, val, GLPMCFG);
5133	dev_dbg(hsotg->dev, "GLPMCFG=0x%08x\n", dwc2_readl(hsotg, GLPMCFG));
5134
5135	/* Unmask WKUP_ALERT Interrupt */
5136	if (hsotg->params.service_interval)
5137		dwc2_set_bit(hsotg, GINTMSK2, GINTMSK2_WKUP_ALERT_INT_MSK);
5138}
5139
5140/**
5141 * dwc2_gadget_program_ref_clk - Program GREFCLK register in device mode
5142 *
5143 * @hsotg: Programming view of DWC_otg controller
5144 *
5145 */
5146void dwc2_gadget_program_ref_clk(struct dwc2_hsotg *hsotg)
5147{
5148	u32 val = 0;
5149
5150	val |= GREFCLK_REF_CLK_MODE;
5151	val |= hsotg->params.ref_clk_per << GREFCLK_REFCLKPER_SHIFT;
5152	val |= hsotg->params.sof_cnt_wkup_alert <<
5153	       GREFCLK_SOF_CNT_WKUP_ALERT_SHIFT;
5154
5155	dwc2_writel(hsotg, val, GREFCLK);
5156	dev_dbg(hsotg->dev, "GREFCLK=0x%08x\n", dwc2_readl(hsotg, GREFCLK));
5157}
5158
5159/**
5160 * dwc2_gadget_enter_hibernation() - Put controller in Hibernation.
5161 *
5162 * @hsotg: Programming view of the DWC_otg controller
5163 *
5164 * Return non-zero if failed to enter to hibernation.
5165 */
5166int dwc2_gadget_enter_hibernation(struct dwc2_hsotg *hsotg)
5167{
5168	u32 gpwrdn;
5169	int ret = 0;
5170
5171	/* Change to L2(suspend) state */
5172	hsotg->lx_state = DWC2_L2;
5173	dev_dbg(hsotg->dev, "Start of hibernation completed\n");
5174	ret = dwc2_backup_global_registers(hsotg);
5175	if (ret) {
5176		dev_err(hsotg->dev, "%s: failed to backup global registers\n",
5177			__func__);
5178		return ret;
5179	}
5180	ret = dwc2_backup_device_registers(hsotg);
5181	if (ret) {
5182		dev_err(hsotg->dev, "%s: failed to backup device registers\n",
5183			__func__);
5184		return ret;
5185	}
5186
5187	gpwrdn = GPWRDN_PWRDNRSTN;
5188	gpwrdn |= GPWRDN_PMUACTV;
5189	dwc2_writel(hsotg, gpwrdn, GPWRDN);
5190	udelay(10);
5191
5192	/* Set flag to indicate that we are in hibernation */
5193	hsotg->hibernated = 1;
5194
5195	/* Enable interrupts from wake up logic */
5196	gpwrdn = dwc2_readl(hsotg, GPWRDN);
5197	gpwrdn |= GPWRDN_PMUINTSEL;
5198	dwc2_writel(hsotg, gpwrdn, GPWRDN);
5199	udelay(10);
5200
5201	/* Unmask device mode interrupts in GPWRDN */
5202	gpwrdn = dwc2_readl(hsotg, GPWRDN);
5203	gpwrdn |= GPWRDN_RST_DET_MSK;
5204	gpwrdn |= GPWRDN_LNSTSCHG_MSK;
5205	gpwrdn |= GPWRDN_STS_CHGINT_MSK;
5206	dwc2_writel(hsotg, gpwrdn, GPWRDN);
5207	udelay(10);
5208
5209	/* Enable Power Down Clamp */
5210	gpwrdn = dwc2_readl(hsotg, GPWRDN);
5211	gpwrdn |= GPWRDN_PWRDNCLMP;
5212	dwc2_writel(hsotg, gpwrdn, GPWRDN);
5213	udelay(10);
5214
5215	/* Switch off VDD */
5216	gpwrdn = dwc2_readl(hsotg, GPWRDN);
5217	gpwrdn |= GPWRDN_PWRDNSWTCH;
5218	dwc2_writel(hsotg, gpwrdn, GPWRDN);
5219	udelay(10);
5220
5221	/* Save gpwrdn register for further usage if stschng interrupt */
5222	hsotg->gr_backup.gpwrdn = dwc2_readl(hsotg, GPWRDN);
5223	dev_dbg(hsotg->dev, "Hibernation completed\n");
5224
5225	return ret;
5226}
5227
5228/**
5229 * dwc2_gadget_exit_hibernation()
5230 * This function is for exiting from Device mode hibernation by host initiated
5231 * resume/reset and device initiated remote-wakeup.
5232 *
5233 * @hsotg: Programming view of the DWC_otg controller
5234 * @rem_wakeup: indicates whether resume is initiated by Device or Host.
5235 * @reset: indicates whether resume is initiated by Reset.
5236 *
5237 * Return non-zero if failed to exit from hibernation.
5238 */
5239int dwc2_gadget_exit_hibernation(struct dwc2_hsotg *hsotg,
5240				 int rem_wakeup, int reset)
5241{
5242	u32 pcgcctl;
5243	u32 gpwrdn;
5244	u32 dctl;
5245	int ret = 0;
5246	struct dwc2_gregs_backup *gr;
5247	struct dwc2_dregs_backup *dr;
5248
5249	gr = &hsotg->gr_backup;
5250	dr = &hsotg->dr_backup;
5251
5252	if (!hsotg->hibernated) {
5253		dev_dbg(hsotg->dev, "Already exited from Hibernation\n");
5254		return 1;
5255	}
5256	dev_dbg(hsotg->dev,
5257		"%s: called with rem_wakeup = %d reset = %d\n",
5258		__func__, rem_wakeup, reset);
5259
5260	dwc2_hib_restore_common(hsotg, rem_wakeup, 0);
5261
5262	if (!reset) {
5263		/* Clear all pending interupts */
5264		dwc2_writel(hsotg, 0xffffffff, GINTSTS);
5265	}
5266
5267	/* De-assert Restore */
5268	gpwrdn = dwc2_readl(hsotg, GPWRDN);
5269	gpwrdn &= ~GPWRDN_RESTORE;
5270	dwc2_writel(hsotg, gpwrdn, GPWRDN);
5271	udelay(10);
5272
5273	if (!rem_wakeup) {
5274		pcgcctl = dwc2_readl(hsotg, PCGCTL);
5275		pcgcctl &= ~PCGCTL_RSTPDWNMODULE;
5276		dwc2_writel(hsotg, pcgcctl, PCGCTL);
5277	}
5278
5279	/* Restore GUSBCFG, DCFG and DCTL */
5280	dwc2_writel(hsotg, gr->gusbcfg, GUSBCFG);
5281	dwc2_writel(hsotg, dr->dcfg, DCFG);
5282	dwc2_writel(hsotg, dr->dctl, DCTL);
5283
 
 
 
 
5284	/* De-assert Wakeup Logic */
5285	gpwrdn = dwc2_readl(hsotg, GPWRDN);
5286	gpwrdn &= ~GPWRDN_PMUACTV;
5287	dwc2_writel(hsotg, gpwrdn, GPWRDN);
5288
5289	if (rem_wakeup) {
5290		udelay(10);
5291		/* Start Remote Wakeup Signaling */
5292		dwc2_writel(hsotg, dr->dctl | DCTL_RMTWKUPSIG, DCTL);
5293	} else {
5294		udelay(50);
5295		/* Set Device programming done bit */
5296		dctl = dwc2_readl(hsotg, DCTL);
5297		dctl |= DCTL_PWRONPRGDONE;
5298		dwc2_writel(hsotg, dctl, DCTL);
5299	}
5300	/* Wait for interrupts which must be cleared */
5301	mdelay(2);
5302	/* Clear all pending interupts */
5303	dwc2_writel(hsotg, 0xffffffff, GINTSTS);
5304
5305	/* Restore global registers */
5306	ret = dwc2_restore_global_registers(hsotg);
5307	if (ret) {
5308		dev_err(hsotg->dev, "%s: failed to restore registers\n",
5309			__func__);
5310		return ret;
5311	}
5312
5313	/* Restore device registers */
5314	ret = dwc2_restore_device_registers(hsotg, rem_wakeup);
5315	if (ret) {
5316		dev_err(hsotg->dev, "%s: failed to restore device registers\n",
5317			__func__);
5318		return ret;
5319	}
5320
5321	if (rem_wakeup) {
5322		mdelay(10);
5323		dctl = dwc2_readl(hsotg, DCTL);
5324		dctl &= ~DCTL_RMTWKUPSIG;
5325		dwc2_writel(hsotg, dctl, DCTL);
5326	}
5327
5328	hsotg->hibernated = 0;
5329	hsotg->lx_state = DWC2_L0;
5330	dev_dbg(hsotg->dev, "Hibernation recovery completes here\n");
5331
5332	return ret;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
5333}