Linux Audio

Check our new training course

Loading...
v6.8
   1// SPDX-License-Identifier: GPL-2.0+
   2/*
   3 * bcm63xx_udc.c -- BCM63xx UDC high/full speed USB device controller
   4 *
   5 * Copyright (C) 2012 Kevin Cernekee <cernekee@gmail.com>
   6 * Copyright (C) 2012 Broadcom Corporation
 
 
 
 
 
   7 */
   8
   9#include <linux/bitops.h>
  10#include <linux/bug.h>
  11#include <linux/clk.h>
  12#include <linux/compiler.h>
  13#include <linux/debugfs.h>
  14#include <linux/delay.h>
  15#include <linux/device.h>
  16#include <linux/dma-mapping.h>
  17#include <linux/errno.h>
  18#include <linux/interrupt.h>
  19#include <linux/ioport.h>
 
  20#include <linux/kernel.h>
  21#include <linux/list.h>
  22#include <linux/module.h>
  23#include <linux/moduleparam.h>
  24#include <linux/platform_device.h>
  25#include <linux/sched.h>
  26#include <linux/seq_file.h>
  27#include <linux/slab.h>
  28#include <linux/timer.h>
  29#include <linux/usb.h>
  30#include <linux/usb/ch9.h>
  31#include <linux/usb/gadget.h>
  32#include <linux/workqueue.h>
  33
  34#include <bcm63xx_cpu.h>
  35#include <bcm63xx_iudma.h>
  36#include <bcm63xx_dev_usb_usbd.h>
  37#include <bcm63xx_io.h>
  38#include <bcm63xx_regs.h>
  39
  40#define DRV_MODULE_NAME		"bcm63xx_udc"
  41
  42static const char bcm63xx_ep0name[] = "ep0";
  43
  44static const struct {
  45	const char *name;
  46	const struct usb_ep_caps caps;
  47} bcm63xx_ep_info[] = {
  48#define EP_INFO(_name, _caps) \
  49	{ \
  50		.name = _name, \
  51		.caps = _caps, \
  52	}
  53
  54	EP_INFO(bcm63xx_ep0name,
  55		USB_EP_CAPS(USB_EP_CAPS_TYPE_CONTROL, USB_EP_CAPS_DIR_ALL)),
  56	EP_INFO("ep1in-bulk",
  57		USB_EP_CAPS(USB_EP_CAPS_TYPE_BULK, USB_EP_CAPS_DIR_IN)),
  58	EP_INFO("ep2out-bulk",
  59		USB_EP_CAPS(USB_EP_CAPS_TYPE_BULK, USB_EP_CAPS_DIR_OUT)),
  60	EP_INFO("ep3in-int",
  61		USB_EP_CAPS(USB_EP_CAPS_TYPE_INT, USB_EP_CAPS_DIR_IN)),
  62	EP_INFO("ep4out-int",
  63		USB_EP_CAPS(USB_EP_CAPS_TYPE_INT, USB_EP_CAPS_DIR_OUT)),
  64
  65#undef EP_INFO
  66};
  67
  68static bool use_fullspeed;
  69module_param(use_fullspeed, bool, S_IRUGO);
  70MODULE_PARM_DESC(use_fullspeed, "true for fullspeed only");
  71
  72/*
  73 * RX IRQ coalescing options:
  74 *
  75 * false (default) - one IRQ per DATAx packet.  Slow but reliable.  The
  76 * driver is able to pass the "testusb" suite and recover from conditions like:
  77 *
  78 *   1) Device queues up a 2048-byte RX IUDMA transaction on an OUT bulk ep
  79 *   2) Host sends 512 bytes of data
  80 *   3) Host decides to reconfigure the device and sends SET_INTERFACE
  81 *   4) Device shuts down the endpoint and cancels the RX transaction
  82 *
  83 * true - one IRQ per transfer, for transfers <= 2048B.  Generates
  84 * considerably fewer IRQs, but error recovery is less robust.  Does not
  85 * reliably pass "testusb".
  86 *
  87 * TX always uses coalescing, because we can cancel partially complete TX
  88 * transfers by repeatedly flushing the FIFO.  The hardware doesn't allow
  89 * this on RX.
  90 */
  91static bool irq_coalesce;
  92module_param(irq_coalesce, bool, S_IRUGO);
  93MODULE_PARM_DESC(irq_coalesce, "take one IRQ per RX transfer");
  94
  95#define BCM63XX_NUM_EP			5
  96#define BCM63XX_NUM_IUDMA		6
  97#define BCM63XX_NUM_FIFO_PAIRS		3
  98
  99#define IUDMA_RESET_TIMEOUT_US		10000
 100
 101#define IUDMA_EP0_RXCHAN		0
 102#define IUDMA_EP0_TXCHAN		1
 103
 104#define IUDMA_MAX_FRAGMENT		2048
 105#define BCM63XX_MAX_CTRL_PKT		64
 106
 107#define BCMEP_CTRL			0x00
 108#define BCMEP_ISOC			0x01
 109#define BCMEP_BULK			0x02
 110#define BCMEP_INTR			0x03
 111
 112#define BCMEP_OUT			0x00
 113#define BCMEP_IN			0x01
 114
 115#define BCM63XX_SPD_FULL		1
 116#define BCM63XX_SPD_HIGH		0
 117
 118#define IUDMA_DMAC_OFFSET		0x200
 119#define IUDMA_DMAS_OFFSET		0x400
 120
 121enum bcm63xx_ep0_state {
 122	EP0_REQUEUE,
 123	EP0_IDLE,
 124	EP0_IN_DATA_PHASE_SETUP,
 125	EP0_IN_DATA_PHASE_COMPLETE,
 126	EP0_OUT_DATA_PHASE_SETUP,
 127	EP0_OUT_DATA_PHASE_COMPLETE,
 128	EP0_OUT_STATUS_PHASE,
 129	EP0_IN_FAKE_STATUS_PHASE,
 130	EP0_SHUTDOWN,
 131};
 132
 133static const char __maybe_unused bcm63xx_ep0_state_names[][32] = {
 134	"REQUEUE",
 135	"IDLE",
 136	"IN_DATA_PHASE_SETUP",
 137	"IN_DATA_PHASE_COMPLETE",
 138	"OUT_DATA_PHASE_SETUP",
 139	"OUT_DATA_PHASE_COMPLETE",
 140	"OUT_STATUS_PHASE",
 141	"IN_FAKE_STATUS_PHASE",
 142	"SHUTDOWN",
 143};
 144
 145/**
 146 * struct iudma_ch_cfg - Static configuration for an IUDMA channel.
 147 * @ep_num: USB endpoint number.
 148 * @n_bds: Number of buffer descriptors in the ring.
 149 * @ep_type: Endpoint type (control, bulk, interrupt).
 150 * @dir: Direction (in, out).
 151 * @n_fifo_slots: Number of FIFO entries to allocate for this channel.
 152 * @max_pkt_hs: Maximum packet size in high speed mode.
 153 * @max_pkt_fs: Maximum packet size in full speed mode.
 154 */
 155struct iudma_ch_cfg {
 156	int				ep_num;
 157	int				n_bds;
 158	int				ep_type;
 159	int				dir;
 160	int				n_fifo_slots;
 161	int				max_pkt_hs;
 162	int				max_pkt_fs;
 163};
 164
 165static const struct iudma_ch_cfg iudma_defaults[] = {
 166
 167	/* This controller was designed to support a CDC/RNDIS application.
 168	   It may be possible to reconfigure some of the endpoints, but
 169	   the hardware limitations (FIFO sizing and number of DMA channels)
 170	   may significantly impact flexibility and/or stability.  Change
 171	   these values at your own risk.
 172
 173	      ep_num       ep_type           n_fifo_slots    max_pkt_fs
 174	idx      |  n_bds     |         dir       |  max_pkt_hs  |
 175	 |       |    |       |          |        |      |       |       */
 176	[0] = { -1,   4, BCMEP_CTRL, BCMEP_OUT,  32,    64,     64 },
 177	[1] = {  0,   4, BCMEP_CTRL, BCMEP_OUT,  32,    64,     64 },
 178	[2] = {  2,  16, BCMEP_BULK, BCMEP_OUT, 128,   512,     64 },
 179	[3] = {  1,  16, BCMEP_BULK, BCMEP_IN,  128,   512,     64 },
 180	[4] = {  4,   4, BCMEP_INTR, BCMEP_OUT,  32,    64,     64 },
 181	[5] = {  3,   4, BCMEP_INTR, BCMEP_IN,   32,    64,     64 },
 182};
 183
 184struct bcm63xx_udc;
 185
 186/**
 187 * struct iudma_ch - Represents the current state of a single IUDMA channel.
 188 * @ch_idx: IUDMA channel index (0 to BCM63XX_NUM_IUDMA-1).
 189 * @ep_num: USB endpoint number.  -1 for ep0 RX.
 190 * @enabled: Whether bcm63xx_ep_enable() has been called.
 191 * @max_pkt: "Chunk size" on the USB interface.  Based on interface speed.
 192 * @is_tx: true for TX, false for RX.
 193 * @bep: Pointer to the associated endpoint.  NULL for ep0 RX.
 194 * @udc: Reference to the device controller.
 195 * @read_bd: Next buffer descriptor to reap from the hardware.
 196 * @write_bd: Next BD available for a new packet.
 197 * @end_bd: Points to the final BD in the ring.
 198 * @n_bds_used: Number of BD entries currently occupied.
 199 * @bd_ring: Base pointer to the BD ring.
 200 * @bd_ring_dma: Physical (DMA) address of bd_ring.
 201 * @n_bds: Total number of BDs in the ring.
 202 *
 203 * ep0 has two IUDMA channels (IUDMA_EP0_RXCHAN and IUDMA_EP0_TXCHAN), as it is
 204 * bidirectional.  The "struct usb_ep" associated with ep0 is for TX (IN)
 205 * only.
 206 *
 207 * Each bulk/intr endpoint has a single IUDMA channel and a single
 208 * struct usb_ep.
 209 */
 210struct iudma_ch {
 211	unsigned int			ch_idx;
 212	int				ep_num;
 213	bool				enabled;
 214	int				max_pkt;
 215	bool				is_tx;
 216	struct bcm63xx_ep		*bep;
 217	struct bcm63xx_udc		*udc;
 218
 219	struct bcm_enet_desc		*read_bd;
 220	struct bcm_enet_desc		*write_bd;
 221	struct bcm_enet_desc		*end_bd;
 222	int				n_bds_used;
 223
 224	struct bcm_enet_desc		*bd_ring;
 225	dma_addr_t			bd_ring_dma;
 226	unsigned int			n_bds;
 227};
 228
 229/**
 230 * struct bcm63xx_ep - Internal (driver) state of a single endpoint.
 231 * @ep_num: USB endpoint number.
 232 * @iudma: Pointer to IUDMA channel state.
 233 * @ep: USB gadget layer representation of the EP.
 234 * @udc: Reference to the device controller.
 235 * @queue: Linked list of outstanding requests for this EP.
 236 * @halted: 1 if the EP is stalled; 0 otherwise.
 237 */
 238struct bcm63xx_ep {
 239	unsigned int			ep_num;
 240	struct iudma_ch			*iudma;
 241	struct usb_ep			ep;
 242	struct bcm63xx_udc		*udc;
 243	struct list_head		queue;
 244	unsigned			halted:1;
 245};
 246
 247/**
 248 * struct bcm63xx_req - Internal (driver) state of a single request.
 249 * @queue: Links back to the EP's request list.
 250 * @req: USB gadget layer representation of the request.
 251 * @offset: Current byte offset into the data buffer (next byte to queue).
 252 * @bd_bytes: Number of data bytes in outstanding BD entries.
 253 * @iudma: IUDMA channel used for the request.
 254 */
 255struct bcm63xx_req {
 256	struct list_head		queue;		/* ep's requests */
 257	struct usb_request		req;
 258	unsigned int			offset;
 259	unsigned int			bd_bytes;
 260	struct iudma_ch			*iudma;
 261};
 262
 263/**
 264 * struct bcm63xx_udc - Driver/hardware private context.
 265 * @lock: Spinlock to mediate access to this struct, and (most) HW regs.
 266 * @dev: Generic Linux device structure.
 267 * @pd: Platform data (board/port info).
 268 * @usbd_clk: Clock descriptor for the USB device block.
 269 * @usbh_clk: Clock descriptor for the USB host block.
 270 * @gadget: USB device.
 271 * @driver: Driver for USB device.
 272 * @usbd_regs: Base address of the USBD/USB20D block.
 273 * @iudma_regs: Base address of the USBD's associated IUDMA block.
 274 * @bep: Array of endpoints, including ep0.
 275 * @iudma: Array of all IUDMA channels used by this controller.
 276 * @cfg: USB configuration number, from SET_CONFIGURATION wValue.
 277 * @iface: USB interface number, from SET_INTERFACE wIndex.
 278 * @alt_iface: USB alt interface number, from SET_INTERFACE wValue.
 279 * @ep0_ctrl_req: Request object for bcm63xx_udc-initiated ep0 transactions.
 280 * @ep0_ctrl_buf: Data buffer for ep0_ctrl_req.
 281 * @ep0state: Current state of the ep0 state machine.
 282 * @ep0_wq: Workqueue struct used to wake up the ep0 state machine.
 283 * @wedgemap: Bitmap of wedged endpoints.
 284 * @ep0_req_reset: USB reset is pending.
 285 * @ep0_req_set_cfg: Need to spoof a SET_CONFIGURATION packet.
 286 * @ep0_req_set_iface: Need to spoof a SET_INTERFACE packet.
 287 * @ep0_req_shutdown: Driver is shutting down; requesting ep0 to halt activity.
 288 * @ep0_req_completed: ep0 request has completed; worker has not seen it yet.
 289 * @ep0_reply: Pending reply from gadget driver.
 290 * @ep0_request: Outstanding ep0 request.
 
 
 
 291 */
 292struct bcm63xx_udc {
 293	spinlock_t			lock;
 294
 295	struct device			*dev;
 296	struct bcm63xx_usbd_platform_data *pd;
 297	struct clk			*usbd_clk;
 298	struct clk			*usbh_clk;
 299
 300	struct usb_gadget		gadget;
 301	struct usb_gadget_driver	*driver;
 302
 303	void __iomem			*usbd_regs;
 304	void __iomem			*iudma_regs;
 305
 306	struct bcm63xx_ep		bep[BCM63XX_NUM_EP];
 307	struct iudma_ch			iudma[BCM63XX_NUM_IUDMA];
 308
 309	int				cfg;
 310	int				iface;
 311	int				alt_iface;
 312
 313	struct bcm63xx_req		ep0_ctrl_req;
 314	u8				*ep0_ctrl_buf;
 315
 316	int				ep0state;
 317	struct work_struct		ep0_wq;
 318
 319	unsigned long			wedgemap;
 320
 321	unsigned			ep0_req_reset:1;
 322	unsigned			ep0_req_set_cfg:1;
 323	unsigned			ep0_req_set_iface:1;
 324	unsigned			ep0_req_shutdown:1;
 325
 326	unsigned			ep0_req_completed:1;
 327	struct usb_request		*ep0_reply;
 328	struct usb_request		*ep0_request;
 
 
 
 
 329};
 330
 331static const struct usb_ep_ops bcm63xx_udc_ep_ops;
 332
 333/***********************************************************************
 334 * Convenience functions
 335 ***********************************************************************/
 336
 337static inline struct bcm63xx_udc *gadget_to_udc(struct usb_gadget *g)
 338{
 339	return container_of(g, struct bcm63xx_udc, gadget);
 340}
 341
 342static inline struct bcm63xx_ep *our_ep(struct usb_ep *ep)
 343{
 344	return container_of(ep, struct bcm63xx_ep, ep);
 345}
 346
 347static inline struct bcm63xx_req *our_req(struct usb_request *req)
 348{
 349	return container_of(req, struct bcm63xx_req, req);
 350}
 351
 352static inline u32 usbd_readl(struct bcm63xx_udc *udc, u32 off)
 353{
 354	return bcm_readl(udc->usbd_regs + off);
 355}
 356
 357static inline void usbd_writel(struct bcm63xx_udc *udc, u32 val, u32 off)
 358{
 359	bcm_writel(val, udc->usbd_regs + off);
 360}
 361
 362static inline u32 usb_dma_readl(struct bcm63xx_udc *udc, u32 off)
 363{
 364	return bcm_readl(udc->iudma_regs + off);
 365}
 366
 367static inline void usb_dma_writel(struct bcm63xx_udc *udc, u32 val, u32 off)
 368{
 369	bcm_writel(val, udc->iudma_regs + off);
 370}
 371
 372static inline u32 usb_dmac_readl(struct bcm63xx_udc *udc, u32 off, int chan)
 373{
 374	return bcm_readl(udc->iudma_regs + IUDMA_DMAC_OFFSET + off +
 375			(ENETDMA_CHAN_WIDTH * chan));
 376}
 377
 378static inline void usb_dmac_writel(struct bcm63xx_udc *udc, u32 val, u32 off,
 379					int chan)
 380{
 381	bcm_writel(val, udc->iudma_regs + IUDMA_DMAC_OFFSET + off +
 382			(ENETDMA_CHAN_WIDTH * chan));
 383}
 384
 385static inline u32 usb_dmas_readl(struct bcm63xx_udc *udc, u32 off, int chan)
 386{
 387	return bcm_readl(udc->iudma_regs + IUDMA_DMAS_OFFSET + off +
 388			(ENETDMA_CHAN_WIDTH * chan));
 389}
 390
 391static inline void usb_dmas_writel(struct bcm63xx_udc *udc, u32 val, u32 off,
 392					int chan)
 393{
 394	bcm_writel(val, udc->iudma_regs + IUDMA_DMAS_OFFSET + off +
 395			(ENETDMA_CHAN_WIDTH * chan));
 396}
 397
 398static inline void set_clocks(struct bcm63xx_udc *udc, bool is_enabled)
 399{
 400	if (is_enabled) {
 401		clk_enable(udc->usbh_clk);
 402		clk_enable(udc->usbd_clk);
 403		udelay(10);
 404	} else {
 405		clk_disable(udc->usbd_clk);
 406		clk_disable(udc->usbh_clk);
 407	}
 408}
 409
 410/***********************************************************************
 411 * Low-level IUDMA / FIFO operations
 412 ***********************************************************************/
 413
 414/**
 415 * bcm63xx_ep_dma_select - Helper function to set up the init_sel signal.
 416 * @udc: Reference to the device controller.
 417 * @idx: Desired init_sel value.
 418 *
 419 * The "init_sel" signal is used as a selection index for both endpoints
 420 * and IUDMA channels.  Since these do not map 1:1, the use of this signal
 421 * depends on the context.
 422 */
 423static void bcm63xx_ep_dma_select(struct bcm63xx_udc *udc, int idx)
 424{
 425	u32 val = usbd_readl(udc, USBD_CONTROL_REG);
 426
 427	val &= ~USBD_CONTROL_INIT_SEL_MASK;
 428	val |= idx << USBD_CONTROL_INIT_SEL_SHIFT;
 429	usbd_writel(udc, val, USBD_CONTROL_REG);
 430}
 431
 432/**
 433 * bcm63xx_set_stall - Enable/disable stall on one endpoint.
 434 * @udc: Reference to the device controller.
 435 * @bep: Endpoint on which to operate.
 436 * @is_stalled: true to enable stall, false to disable.
 437 *
 438 * See notes in bcm63xx_update_wedge() regarding automatic clearing of
 439 * halt/stall conditions.
 440 */
 441static void bcm63xx_set_stall(struct bcm63xx_udc *udc, struct bcm63xx_ep *bep,
 442	bool is_stalled)
 443{
 444	u32 val;
 445
 446	val = USBD_STALL_UPDATE_MASK |
 447		(is_stalled ? USBD_STALL_ENABLE_MASK : 0) |
 448		(bep->ep_num << USBD_STALL_EPNUM_SHIFT);
 449	usbd_writel(udc, val, USBD_STALL_REG);
 450}
 451
 452/**
 453 * bcm63xx_fifo_setup - (Re)initialize FIFO boundaries and settings.
 454 * @udc: Reference to the device controller.
 455 *
 456 * These parameters depend on the USB link speed.  Settings are
 457 * per-IUDMA-channel-pair.
 458 */
 459static void bcm63xx_fifo_setup(struct bcm63xx_udc *udc)
 460{
 461	int is_hs = udc->gadget.speed == USB_SPEED_HIGH;
 462	u32 i, val, rx_fifo_slot, tx_fifo_slot;
 463
 464	/* set up FIFO boundaries and packet sizes; this is done in pairs */
 465	rx_fifo_slot = tx_fifo_slot = 0;
 466	for (i = 0; i < BCM63XX_NUM_IUDMA; i += 2) {
 467		const struct iudma_ch_cfg *rx_cfg = &iudma_defaults[i];
 468		const struct iudma_ch_cfg *tx_cfg = &iudma_defaults[i + 1];
 469
 470		bcm63xx_ep_dma_select(udc, i >> 1);
 471
 472		val = (rx_fifo_slot << USBD_RXFIFO_CONFIG_START_SHIFT) |
 473			((rx_fifo_slot + rx_cfg->n_fifo_slots - 1) <<
 474			 USBD_RXFIFO_CONFIG_END_SHIFT);
 475		rx_fifo_slot += rx_cfg->n_fifo_slots;
 476		usbd_writel(udc, val, USBD_RXFIFO_CONFIG_REG);
 477		usbd_writel(udc,
 478			    is_hs ? rx_cfg->max_pkt_hs : rx_cfg->max_pkt_fs,
 479			    USBD_RXFIFO_EPSIZE_REG);
 480
 481		val = (tx_fifo_slot << USBD_TXFIFO_CONFIG_START_SHIFT) |
 482			((tx_fifo_slot + tx_cfg->n_fifo_slots - 1) <<
 483			 USBD_TXFIFO_CONFIG_END_SHIFT);
 484		tx_fifo_slot += tx_cfg->n_fifo_slots;
 485		usbd_writel(udc, val, USBD_TXFIFO_CONFIG_REG);
 486		usbd_writel(udc,
 487			    is_hs ? tx_cfg->max_pkt_hs : tx_cfg->max_pkt_fs,
 488			    USBD_TXFIFO_EPSIZE_REG);
 489
 490		usbd_readl(udc, USBD_TXFIFO_EPSIZE_REG);
 491	}
 492}
 493
 494/**
 495 * bcm63xx_fifo_reset_ep - Flush a single endpoint's FIFO.
 496 * @udc: Reference to the device controller.
 497 * @ep_num: Endpoint number.
 498 */
 499static void bcm63xx_fifo_reset_ep(struct bcm63xx_udc *udc, int ep_num)
 500{
 501	u32 val;
 502
 503	bcm63xx_ep_dma_select(udc, ep_num);
 504
 505	val = usbd_readl(udc, USBD_CONTROL_REG);
 506	val |= USBD_CONTROL_FIFO_RESET_MASK;
 507	usbd_writel(udc, val, USBD_CONTROL_REG);
 508	usbd_readl(udc, USBD_CONTROL_REG);
 509}
 510
 511/**
 512 * bcm63xx_fifo_reset - Flush all hardware FIFOs.
 513 * @udc: Reference to the device controller.
 514 */
 515static void bcm63xx_fifo_reset(struct bcm63xx_udc *udc)
 516{
 517	int i;
 518
 519	for (i = 0; i < BCM63XX_NUM_FIFO_PAIRS; i++)
 520		bcm63xx_fifo_reset_ep(udc, i);
 521}
 522
 523/**
 524 * bcm63xx_ep_init - Initial (one-time) endpoint initialization.
 525 * @udc: Reference to the device controller.
 526 */
 527static void bcm63xx_ep_init(struct bcm63xx_udc *udc)
 528{
 529	u32 i, val;
 530
 531	for (i = 0; i < BCM63XX_NUM_IUDMA; i++) {
 532		const struct iudma_ch_cfg *cfg = &iudma_defaults[i];
 533
 534		if (cfg->ep_num < 0)
 535			continue;
 536
 537		bcm63xx_ep_dma_select(udc, cfg->ep_num);
 538		val = (cfg->ep_type << USBD_EPNUM_TYPEMAP_TYPE_SHIFT) |
 539			((i >> 1) << USBD_EPNUM_TYPEMAP_DMA_CH_SHIFT);
 540		usbd_writel(udc, val, USBD_EPNUM_TYPEMAP_REG);
 541	}
 542}
 543
 544/**
 545 * bcm63xx_ep_setup - Configure per-endpoint settings.
 546 * @udc: Reference to the device controller.
 547 *
 548 * This needs to be rerun if the speed/cfg/intf/altintf changes.
 549 */
 550static void bcm63xx_ep_setup(struct bcm63xx_udc *udc)
 551{
 552	u32 val, i;
 553
 554	usbd_writel(udc, USBD_CSR_SETUPADDR_DEF, USBD_CSR_SETUPADDR_REG);
 555
 556	for (i = 0; i < BCM63XX_NUM_IUDMA; i++) {
 557		const struct iudma_ch_cfg *cfg = &iudma_defaults[i];
 558		int max_pkt = udc->gadget.speed == USB_SPEED_HIGH ?
 559			      cfg->max_pkt_hs : cfg->max_pkt_fs;
 560		int idx = cfg->ep_num;
 561
 562		udc->iudma[i].max_pkt = max_pkt;
 563
 564		if (idx < 0)
 565			continue;
 566		usb_ep_set_maxpacket_limit(&udc->bep[idx].ep, max_pkt);
 567
 568		val = (idx << USBD_CSR_EP_LOG_SHIFT) |
 569		      (cfg->dir << USBD_CSR_EP_DIR_SHIFT) |
 570		      (cfg->ep_type << USBD_CSR_EP_TYPE_SHIFT) |
 571		      (udc->cfg << USBD_CSR_EP_CFG_SHIFT) |
 572		      (udc->iface << USBD_CSR_EP_IFACE_SHIFT) |
 573		      (udc->alt_iface << USBD_CSR_EP_ALTIFACE_SHIFT) |
 574		      (max_pkt << USBD_CSR_EP_MAXPKT_SHIFT);
 575		usbd_writel(udc, val, USBD_CSR_EP_REG(idx));
 576	}
 577}
 578
 579/**
 580 * iudma_write - Queue a single IUDMA transaction.
 581 * @udc: Reference to the device controller.
 582 * @iudma: IUDMA channel to use.
 583 * @breq: Request containing the transaction data.
 584 *
 585 * For RX IUDMA, this will queue a single buffer descriptor, as RX IUDMA
 586 * does not honor SOP/EOP so the handling of multiple buffers is ambiguous.
 587 * So iudma_write() may be called several times to fulfill a single
 588 * usb_request.
 589 *
 590 * For TX IUDMA, this can queue multiple buffer descriptors if needed.
 591 */
 592static void iudma_write(struct bcm63xx_udc *udc, struct iudma_ch *iudma,
 593	struct bcm63xx_req *breq)
 594{
 595	int first_bd = 1, last_bd = 0, extra_zero_pkt = 0;
 596	unsigned int bytes_left = breq->req.length - breq->offset;
 597	const int max_bd_bytes = !irq_coalesce && !iudma->is_tx ?
 598		iudma->max_pkt : IUDMA_MAX_FRAGMENT;
 599
 600	iudma->n_bds_used = 0;
 601	breq->bd_bytes = 0;
 602	breq->iudma = iudma;
 603
 604	if ((bytes_left % iudma->max_pkt == 0) && bytes_left && breq->req.zero)
 605		extra_zero_pkt = 1;
 606
 607	do {
 608		struct bcm_enet_desc *d = iudma->write_bd;
 609		u32 dmaflags = 0;
 610		unsigned int n_bytes;
 611
 612		if (d == iudma->end_bd) {
 613			dmaflags |= DMADESC_WRAP_MASK;
 614			iudma->write_bd = iudma->bd_ring;
 615		} else {
 616			iudma->write_bd++;
 617		}
 618		iudma->n_bds_used++;
 619
 620		n_bytes = min_t(int, bytes_left, max_bd_bytes);
 621		if (n_bytes)
 622			dmaflags |= n_bytes << DMADESC_LENGTH_SHIFT;
 623		else
 624			dmaflags |= (1 << DMADESC_LENGTH_SHIFT) |
 625				    DMADESC_USB_ZERO_MASK;
 626
 627		dmaflags |= DMADESC_OWNER_MASK;
 628		if (first_bd) {
 629			dmaflags |= DMADESC_SOP_MASK;
 630			first_bd = 0;
 631		}
 632
 633		/*
 634		 * extra_zero_pkt forces one more iteration through the loop
 635		 * after all data is queued up, to send the zero packet
 636		 */
 637		if (extra_zero_pkt && !bytes_left)
 638			extra_zero_pkt = 0;
 639
 640		if (!iudma->is_tx || iudma->n_bds_used == iudma->n_bds ||
 641		    (n_bytes == bytes_left && !extra_zero_pkt)) {
 642			last_bd = 1;
 643			dmaflags |= DMADESC_EOP_MASK;
 644		}
 645
 646		d->address = breq->req.dma + breq->offset;
 647		mb();
 648		d->len_stat = dmaflags;
 649
 650		breq->offset += n_bytes;
 651		breq->bd_bytes += n_bytes;
 652		bytes_left -= n_bytes;
 653	} while (!last_bd);
 654
 655	usb_dmac_writel(udc, ENETDMAC_CHANCFG_EN_MASK,
 656			ENETDMAC_CHANCFG_REG, iudma->ch_idx);
 657}
 658
 659/**
 660 * iudma_read - Check for IUDMA buffer completion.
 661 * @udc: Reference to the device controller.
 662 * @iudma: IUDMA channel to use.
 663 *
 664 * This checks to see if ALL of the outstanding BDs on the DMA channel
 665 * have been filled.  If so, it returns the actual transfer length;
 666 * otherwise it returns -EBUSY.
 667 */
 668static int iudma_read(struct bcm63xx_udc *udc, struct iudma_ch *iudma)
 669{
 670	int i, actual_len = 0;
 671	struct bcm_enet_desc *d = iudma->read_bd;
 672
 673	if (!iudma->n_bds_used)
 674		return -EINVAL;
 675
 676	for (i = 0; i < iudma->n_bds_used; i++) {
 677		u32 dmaflags;
 678
 679		dmaflags = d->len_stat;
 680
 681		if (dmaflags & DMADESC_OWNER_MASK)
 682			return -EBUSY;
 683
 684		actual_len += (dmaflags & DMADESC_LENGTH_MASK) >>
 685			      DMADESC_LENGTH_SHIFT;
 686		if (d == iudma->end_bd)
 687			d = iudma->bd_ring;
 688		else
 689			d++;
 690	}
 691
 692	iudma->read_bd = d;
 693	iudma->n_bds_used = 0;
 694	return actual_len;
 695}
 696
 697/**
 698 * iudma_reset_channel - Stop DMA on a single channel.
 699 * @udc: Reference to the device controller.
 700 * @iudma: IUDMA channel to reset.
 701 */
 702static void iudma_reset_channel(struct bcm63xx_udc *udc, struct iudma_ch *iudma)
 703{
 704	int timeout = IUDMA_RESET_TIMEOUT_US;
 705	struct bcm_enet_desc *d;
 706	int ch_idx = iudma->ch_idx;
 707
 708	if (!iudma->is_tx)
 709		bcm63xx_fifo_reset_ep(udc, max(0, iudma->ep_num));
 710
 711	/* stop DMA, then wait for the hardware to wrap up */
 712	usb_dmac_writel(udc, 0, ENETDMAC_CHANCFG_REG, ch_idx);
 713
 714	while (usb_dmac_readl(udc, ENETDMAC_CHANCFG_REG, ch_idx) &
 715				   ENETDMAC_CHANCFG_EN_MASK) {
 716		udelay(1);
 717
 718		/* repeatedly flush the FIFO data until the BD completes */
 719		if (iudma->is_tx && iudma->ep_num >= 0)
 720			bcm63xx_fifo_reset_ep(udc, iudma->ep_num);
 721
 722		if (!timeout--) {
 723			dev_err(udc->dev, "can't reset IUDMA channel %d\n",
 724				ch_idx);
 725			break;
 726		}
 727		if (timeout == IUDMA_RESET_TIMEOUT_US / 2) {
 728			dev_warn(udc->dev, "forcibly halting IUDMA channel %d\n",
 729				 ch_idx);
 730			usb_dmac_writel(udc, ENETDMAC_CHANCFG_BUFHALT_MASK,
 731					ENETDMAC_CHANCFG_REG, ch_idx);
 732		}
 733	}
 734	usb_dmac_writel(udc, ~0, ENETDMAC_IR_REG, ch_idx);
 735
 736	/* don't leave "live" HW-owned entries for the next guy to step on */
 737	for (d = iudma->bd_ring; d <= iudma->end_bd; d++)
 738		d->len_stat = 0;
 739	mb();
 740
 741	iudma->read_bd = iudma->write_bd = iudma->bd_ring;
 742	iudma->n_bds_used = 0;
 743
 744	/* set up IRQs, UBUS burst size, and BD base for this channel */
 745	usb_dmac_writel(udc, ENETDMAC_IR_BUFDONE_MASK,
 746			ENETDMAC_IRMASK_REG, ch_idx);
 747	usb_dmac_writel(udc, 8, ENETDMAC_MAXBURST_REG, ch_idx);
 748
 749	usb_dmas_writel(udc, iudma->bd_ring_dma, ENETDMAS_RSTART_REG, ch_idx);
 750	usb_dmas_writel(udc, 0, ENETDMAS_SRAM2_REG, ch_idx);
 751}
 752
 753/**
 754 * iudma_init_channel - One-time IUDMA channel initialization.
 755 * @udc: Reference to the device controller.
 756 * @ch_idx: Channel to initialize.
 757 */
 758static int iudma_init_channel(struct bcm63xx_udc *udc, unsigned int ch_idx)
 759{
 760	struct iudma_ch *iudma = &udc->iudma[ch_idx];
 761	const struct iudma_ch_cfg *cfg = &iudma_defaults[ch_idx];
 762	unsigned int n_bds = cfg->n_bds;
 763	struct bcm63xx_ep *bep = NULL;
 764
 765	iudma->ep_num = cfg->ep_num;
 766	iudma->ch_idx = ch_idx;
 767	iudma->is_tx = !!(ch_idx & 0x01);
 768	if (iudma->ep_num >= 0) {
 769		bep = &udc->bep[iudma->ep_num];
 770		bep->iudma = iudma;
 771		INIT_LIST_HEAD(&bep->queue);
 772	}
 773
 774	iudma->bep = bep;
 775	iudma->udc = udc;
 776
 777	/* ep0 is always active; others are controlled by the gadget driver */
 778	if (iudma->ep_num <= 0)
 779		iudma->enabled = true;
 780
 781	iudma->n_bds = n_bds;
 782	iudma->bd_ring = dmam_alloc_coherent(udc->dev,
 783		n_bds * sizeof(struct bcm_enet_desc),
 784		&iudma->bd_ring_dma, GFP_KERNEL);
 785	if (!iudma->bd_ring)
 786		return -ENOMEM;
 787	iudma->end_bd = &iudma->bd_ring[n_bds - 1];
 788
 789	return 0;
 790}
 791
 792/**
 793 * iudma_init - One-time initialization of all IUDMA channels.
 794 * @udc: Reference to the device controller.
 795 *
 796 * Enable DMA, flush channels, and enable global IUDMA IRQs.
 797 */
 798static int iudma_init(struct bcm63xx_udc *udc)
 799{
 800	int i, rc;
 801
 802	usb_dma_writel(udc, ENETDMA_CFG_EN_MASK, ENETDMA_CFG_REG);
 803
 804	for (i = 0; i < BCM63XX_NUM_IUDMA; i++) {
 805		rc = iudma_init_channel(udc, i);
 806		if (rc)
 807			return rc;
 808		iudma_reset_channel(udc, &udc->iudma[i]);
 809	}
 810
 811	usb_dma_writel(udc, BIT(BCM63XX_NUM_IUDMA)-1, ENETDMA_GLB_IRQMASK_REG);
 812	return 0;
 813}
 814
 815/**
 816 * iudma_uninit - Uninitialize IUDMA channels.
 817 * @udc: Reference to the device controller.
 818 *
 819 * Kill global IUDMA IRQs, flush channels, and kill DMA.
 820 */
 821static void iudma_uninit(struct bcm63xx_udc *udc)
 822{
 823	int i;
 824
 825	usb_dma_writel(udc, 0, ENETDMA_GLB_IRQMASK_REG);
 826
 827	for (i = 0; i < BCM63XX_NUM_IUDMA; i++)
 828		iudma_reset_channel(udc, &udc->iudma[i]);
 829
 830	usb_dma_writel(udc, 0, ENETDMA_CFG_REG);
 831}
 832
 833/***********************************************************************
 834 * Other low-level USBD operations
 835 ***********************************************************************/
 836
 837/**
 838 * bcm63xx_set_ctrl_irqs - Mask/unmask control path interrupts.
 839 * @udc: Reference to the device controller.
 840 * @enable_irqs: true to enable, false to disable.
 841 */
 842static void bcm63xx_set_ctrl_irqs(struct bcm63xx_udc *udc, bool enable_irqs)
 843{
 844	u32 val;
 845
 846	usbd_writel(udc, 0, USBD_STATUS_REG);
 847
 848	val = BIT(USBD_EVENT_IRQ_USB_RESET) |
 849	      BIT(USBD_EVENT_IRQ_SETUP) |
 850	      BIT(USBD_EVENT_IRQ_SETCFG) |
 851	      BIT(USBD_EVENT_IRQ_SETINTF) |
 852	      BIT(USBD_EVENT_IRQ_USB_LINK);
 853	usbd_writel(udc, enable_irqs ? val : 0, USBD_EVENT_IRQ_MASK_REG);
 854	usbd_writel(udc, val, USBD_EVENT_IRQ_STATUS_REG);
 855}
 856
 857/**
 858 * bcm63xx_select_phy_mode - Select between USB device and host mode.
 859 * @udc: Reference to the device controller.
 860 * @is_device: true for device, false for host.
 861 *
 862 * This should probably be reworked to use the drivers/usb/otg
 863 * infrastructure.
 864 *
 865 * By default, the AFE/pullups are disabled in device mode, until
 866 * bcm63xx_select_pullup() is called.
 867 */
 868static void bcm63xx_select_phy_mode(struct bcm63xx_udc *udc, bool is_device)
 869{
 870	u32 val, portmask = BIT(udc->pd->port_no);
 871
 872	if (BCMCPU_IS_6328()) {
 873		/* configure pinmux to sense VBUS signal */
 874		val = bcm_gpio_readl(GPIO_PINMUX_OTHR_REG);
 875		val &= ~GPIO_PINMUX_OTHR_6328_USB_MASK;
 876		val |= is_device ? GPIO_PINMUX_OTHR_6328_USB_DEV :
 877			       GPIO_PINMUX_OTHR_6328_USB_HOST;
 878		bcm_gpio_writel(val, GPIO_PINMUX_OTHR_REG);
 879	}
 880
 881	val = bcm_rset_readl(RSET_USBH_PRIV, USBH_PRIV_UTMI_CTL_6368_REG);
 882	if (is_device) {
 883		val |= (portmask << USBH_PRIV_UTMI_CTL_HOSTB_SHIFT);
 884		val |= (portmask << USBH_PRIV_UTMI_CTL_NODRIV_SHIFT);
 885	} else {
 886		val &= ~(portmask << USBH_PRIV_UTMI_CTL_HOSTB_SHIFT);
 887		val &= ~(portmask << USBH_PRIV_UTMI_CTL_NODRIV_SHIFT);
 888	}
 889	bcm_rset_writel(RSET_USBH_PRIV, val, USBH_PRIV_UTMI_CTL_6368_REG);
 890
 891	val = bcm_rset_readl(RSET_USBH_PRIV, USBH_PRIV_SWAP_6368_REG);
 892	if (is_device)
 893		val |= USBH_PRIV_SWAP_USBD_MASK;
 894	else
 895		val &= ~USBH_PRIV_SWAP_USBD_MASK;
 896	bcm_rset_writel(RSET_USBH_PRIV, val, USBH_PRIV_SWAP_6368_REG);
 897}
 898
 899/**
 900 * bcm63xx_select_pullup - Enable/disable the pullup on D+
 901 * @udc: Reference to the device controller.
 902 * @is_on: true to enable the pullup, false to disable.
 903 *
 904 * If the pullup is active, the host will sense a FS/HS device connected to
 905 * the port.  If the pullup is inactive, the host will think the USB
 906 * device has been disconnected.
 907 */
 908static void bcm63xx_select_pullup(struct bcm63xx_udc *udc, bool is_on)
 909{
 910	u32 val, portmask = BIT(udc->pd->port_no);
 911
 912	val = bcm_rset_readl(RSET_USBH_PRIV, USBH_PRIV_UTMI_CTL_6368_REG);
 913	if (is_on)
 914		val &= ~(portmask << USBH_PRIV_UTMI_CTL_NODRIV_SHIFT);
 915	else
 916		val |= (portmask << USBH_PRIV_UTMI_CTL_NODRIV_SHIFT);
 917	bcm_rset_writel(RSET_USBH_PRIV, val, USBH_PRIV_UTMI_CTL_6368_REG);
 918}
 919
 920/**
 921 * bcm63xx_uninit_udc_hw - Shut down the hardware prior to driver removal.
 922 * @udc: Reference to the device controller.
 923 *
 924 * This just masks the IUDMA IRQs and releases the clocks.  It is assumed
 925 * that bcm63xx_udc_stop() has already run, and the clocks are stopped.
 926 */
 927static void bcm63xx_uninit_udc_hw(struct bcm63xx_udc *udc)
 928{
 929	set_clocks(udc, true);
 930	iudma_uninit(udc);
 931	set_clocks(udc, false);
 932
 933	clk_put(udc->usbd_clk);
 934	clk_put(udc->usbh_clk);
 935}
 936
 937/**
 938 * bcm63xx_init_udc_hw - Initialize the controller hardware and data structures.
 939 * @udc: Reference to the device controller.
 940 */
 941static int bcm63xx_init_udc_hw(struct bcm63xx_udc *udc)
 942{
 943	int i, rc = 0;
 944	u32 val;
 945
 946	udc->ep0_ctrl_buf = devm_kzalloc(udc->dev, BCM63XX_MAX_CTRL_PKT,
 947					 GFP_KERNEL);
 948	if (!udc->ep0_ctrl_buf)
 949		return -ENOMEM;
 950
 951	INIT_LIST_HEAD(&udc->gadget.ep_list);
 952	for (i = 0; i < BCM63XX_NUM_EP; i++) {
 953		struct bcm63xx_ep *bep = &udc->bep[i];
 954
 955		bep->ep.name = bcm63xx_ep_info[i].name;
 956		bep->ep.caps = bcm63xx_ep_info[i].caps;
 957		bep->ep_num = i;
 958		bep->ep.ops = &bcm63xx_udc_ep_ops;
 959		list_add_tail(&bep->ep.ep_list, &udc->gadget.ep_list);
 960		bep->halted = 0;
 961		usb_ep_set_maxpacket_limit(&bep->ep, BCM63XX_MAX_CTRL_PKT);
 962		bep->udc = udc;
 963		bep->ep.desc = NULL;
 964		INIT_LIST_HEAD(&bep->queue);
 965	}
 966
 967	udc->gadget.ep0 = &udc->bep[0].ep;
 968	list_del(&udc->bep[0].ep.ep_list);
 969
 970	udc->gadget.speed = USB_SPEED_UNKNOWN;
 971	udc->ep0state = EP0_SHUTDOWN;
 972
 973	udc->usbh_clk = clk_get(udc->dev, "usbh");
 974	if (IS_ERR(udc->usbh_clk))
 975		return -EIO;
 976
 977	udc->usbd_clk = clk_get(udc->dev, "usbd");
 978	if (IS_ERR(udc->usbd_clk)) {
 979		clk_put(udc->usbh_clk);
 980		return -EIO;
 981	}
 982
 983	set_clocks(udc, true);
 984
 985	val = USBD_CONTROL_AUTO_CSRS_MASK |
 986	      USBD_CONTROL_DONE_CSRS_MASK |
 987	      (irq_coalesce ? USBD_CONTROL_RXZSCFG_MASK : 0);
 988	usbd_writel(udc, val, USBD_CONTROL_REG);
 989
 990	val = USBD_STRAPS_APP_SELF_PWR_MASK |
 991	      USBD_STRAPS_APP_RAM_IF_MASK |
 992	      USBD_STRAPS_APP_CSRPRGSUP_MASK |
 993	      USBD_STRAPS_APP_8BITPHY_MASK |
 994	      USBD_STRAPS_APP_RMTWKUP_MASK;
 995
 996	if (udc->gadget.max_speed == USB_SPEED_HIGH)
 997		val |= (BCM63XX_SPD_HIGH << USBD_STRAPS_SPEED_SHIFT);
 998	else
 999		val |= (BCM63XX_SPD_FULL << USBD_STRAPS_SPEED_SHIFT);
1000	usbd_writel(udc, val, USBD_STRAPS_REG);
1001
1002	bcm63xx_set_ctrl_irqs(udc, false);
1003
1004	usbd_writel(udc, 0, USBD_EVENT_IRQ_CFG_LO_REG);
1005
1006	val = USBD_EVENT_IRQ_CFG_FALLING(USBD_EVENT_IRQ_ENUM_ON) |
1007	      USBD_EVENT_IRQ_CFG_FALLING(USBD_EVENT_IRQ_SET_CSRS);
1008	usbd_writel(udc, val, USBD_EVENT_IRQ_CFG_HI_REG);
1009
1010	rc = iudma_init(udc);
1011	set_clocks(udc, false);
1012	if (rc)
1013		bcm63xx_uninit_udc_hw(udc);
1014
1015	return 0;
1016}
1017
1018/***********************************************************************
1019 * Standard EP gadget operations
1020 ***********************************************************************/
1021
1022/**
1023 * bcm63xx_ep_enable - Enable one endpoint.
1024 * @ep: Endpoint to enable.
1025 * @desc: Contains max packet, direction, etc.
1026 *
1027 * Most of the endpoint parameters are fixed in this controller, so there
1028 * isn't much for this function to do.
1029 */
1030static int bcm63xx_ep_enable(struct usb_ep *ep,
1031	const struct usb_endpoint_descriptor *desc)
1032{
1033	struct bcm63xx_ep *bep = our_ep(ep);
1034	struct bcm63xx_udc *udc = bep->udc;
1035	struct iudma_ch *iudma = bep->iudma;
1036	unsigned long flags;
1037
1038	if (!ep || !desc || ep->name == bcm63xx_ep0name)
1039		return -EINVAL;
1040
1041	if (!udc->driver)
1042		return -ESHUTDOWN;
1043
1044	spin_lock_irqsave(&udc->lock, flags);
1045	if (iudma->enabled) {
1046		spin_unlock_irqrestore(&udc->lock, flags);
1047		return -EINVAL;
1048	}
1049
1050	iudma->enabled = true;
1051	BUG_ON(!list_empty(&bep->queue));
1052
1053	iudma_reset_channel(udc, iudma);
1054
1055	bep->halted = 0;
1056	bcm63xx_set_stall(udc, bep, false);
1057	clear_bit(bep->ep_num, &udc->wedgemap);
1058
1059	ep->desc = desc;
1060	ep->maxpacket = usb_endpoint_maxp(desc);
1061
1062	spin_unlock_irqrestore(&udc->lock, flags);
1063	return 0;
1064}
1065
1066/**
1067 * bcm63xx_ep_disable - Disable one endpoint.
1068 * @ep: Endpoint to disable.
1069 */
1070static int bcm63xx_ep_disable(struct usb_ep *ep)
1071{
1072	struct bcm63xx_ep *bep = our_ep(ep);
1073	struct bcm63xx_udc *udc = bep->udc;
1074	struct iudma_ch *iudma = bep->iudma;
1075	struct bcm63xx_req *breq, *n;
1076	unsigned long flags;
1077
1078	if (!ep || !ep->desc)
1079		return -EINVAL;
1080
1081	spin_lock_irqsave(&udc->lock, flags);
1082	if (!iudma->enabled) {
1083		spin_unlock_irqrestore(&udc->lock, flags);
1084		return -EINVAL;
1085	}
1086	iudma->enabled = false;
1087
1088	iudma_reset_channel(udc, iudma);
1089
1090	if (!list_empty(&bep->queue)) {
1091		list_for_each_entry_safe(breq, n, &bep->queue, queue) {
1092			usb_gadget_unmap_request(&udc->gadget, &breq->req,
1093						 iudma->is_tx);
1094			list_del(&breq->queue);
1095			breq->req.status = -ESHUTDOWN;
1096
1097			spin_unlock_irqrestore(&udc->lock, flags);
1098			usb_gadget_giveback_request(&iudma->bep->ep, &breq->req);
1099			spin_lock_irqsave(&udc->lock, flags);
1100		}
1101	}
1102	ep->desc = NULL;
1103
1104	spin_unlock_irqrestore(&udc->lock, flags);
1105	return 0;
1106}
1107
1108/**
1109 * bcm63xx_udc_alloc_request - Allocate a new request.
1110 * @ep: Endpoint associated with the request.
1111 * @mem_flags: Flags to pass to kzalloc().
1112 */
1113static struct usb_request *bcm63xx_udc_alloc_request(struct usb_ep *ep,
1114	gfp_t mem_flags)
1115{
1116	struct bcm63xx_req *breq;
1117
1118	breq = kzalloc(sizeof(*breq), mem_flags);
1119	if (!breq)
1120		return NULL;
1121	return &breq->req;
1122}
1123
1124/**
1125 * bcm63xx_udc_free_request - Free a request.
1126 * @ep: Endpoint associated with the request.
1127 * @req: Request to free.
1128 */
1129static void bcm63xx_udc_free_request(struct usb_ep *ep,
1130	struct usb_request *req)
1131{
1132	struct bcm63xx_req *breq = our_req(req);
1133	kfree(breq);
1134}
1135
1136/**
1137 * bcm63xx_udc_queue - Queue up a new request.
1138 * @ep: Endpoint associated with the request.
1139 * @req: Request to add.
1140 * @mem_flags: Unused.
1141 *
1142 * If the queue is empty, start this request immediately.  Otherwise, add
1143 * it to the list.
1144 *
1145 * ep0 replies are sent through this function from the gadget driver, but
1146 * they are treated differently because they need to be handled by the ep0
1147 * state machine.  (Sometimes they are replies to control requests that
1148 * were spoofed by this driver, and so they shouldn't be transmitted at all.)
1149 */
1150static int bcm63xx_udc_queue(struct usb_ep *ep, struct usb_request *req,
1151	gfp_t mem_flags)
1152{
1153	struct bcm63xx_ep *bep = our_ep(ep);
1154	struct bcm63xx_udc *udc = bep->udc;
1155	struct bcm63xx_req *breq = our_req(req);
1156	unsigned long flags;
1157	int rc = 0;
1158
1159	if (unlikely(!req || !req->complete || !req->buf || !ep))
1160		return -EINVAL;
1161
1162	req->actual = 0;
1163	req->status = 0;
1164	breq->offset = 0;
1165
1166	if (bep == &udc->bep[0]) {
1167		/* only one reply per request, please */
1168		if (udc->ep0_reply)
1169			return -EINVAL;
1170
1171		udc->ep0_reply = req;
1172		schedule_work(&udc->ep0_wq);
1173		return 0;
1174	}
1175
1176	spin_lock_irqsave(&udc->lock, flags);
1177	if (!bep->iudma->enabled) {
1178		rc = -ESHUTDOWN;
1179		goto out;
1180	}
1181
1182	rc = usb_gadget_map_request(&udc->gadget, req, bep->iudma->is_tx);
1183	if (rc == 0) {
1184		list_add_tail(&breq->queue, &bep->queue);
1185		if (list_is_singular(&bep->queue))
1186			iudma_write(udc, bep->iudma, breq);
1187	}
1188
1189out:
1190	spin_unlock_irqrestore(&udc->lock, flags);
1191	return rc;
1192}
1193
1194/**
1195 * bcm63xx_udc_dequeue - Remove a pending request from the queue.
1196 * @ep: Endpoint associated with the request.
1197 * @req: Request to remove.
1198 *
1199 * If the request is not at the head of the queue, this is easy - just nuke
1200 * it.  If the request is at the head of the queue, we'll need to stop the
1201 * DMA transaction and then queue up the successor.
1202 */
1203static int bcm63xx_udc_dequeue(struct usb_ep *ep, struct usb_request *req)
1204{
1205	struct bcm63xx_ep *bep = our_ep(ep);
1206	struct bcm63xx_udc *udc = bep->udc;
1207	struct bcm63xx_req *breq = our_req(req), *cur;
1208	unsigned long flags;
1209	int rc = 0;
1210
1211	spin_lock_irqsave(&udc->lock, flags);
1212	if (list_empty(&bep->queue)) {
1213		rc = -EINVAL;
1214		goto out;
1215	}
1216
1217	cur = list_first_entry(&bep->queue, struct bcm63xx_req, queue);
1218	usb_gadget_unmap_request(&udc->gadget, &breq->req, bep->iudma->is_tx);
1219
1220	if (breq == cur) {
1221		iudma_reset_channel(udc, bep->iudma);
1222		list_del(&breq->queue);
1223
1224		if (!list_empty(&bep->queue)) {
1225			struct bcm63xx_req *next;
1226
1227			next = list_first_entry(&bep->queue,
1228				struct bcm63xx_req, queue);
1229			iudma_write(udc, bep->iudma, next);
1230		}
1231	} else {
1232		list_del(&breq->queue);
1233	}
1234
1235out:
1236	spin_unlock_irqrestore(&udc->lock, flags);
1237
1238	req->status = -ESHUTDOWN;
1239	req->complete(ep, req);
1240
1241	return rc;
1242}
1243
1244/**
1245 * bcm63xx_udc_set_halt - Enable/disable STALL flag in the hardware.
1246 * @ep: Endpoint to halt.
1247 * @value: Zero to clear halt; nonzero to set halt.
1248 *
1249 * See comments in bcm63xx_update_wedge().
1250 */
1251static int bcm63xx_udc_set_halt(struct usb_ep *ep, int value)
1252{
1253	struct bcm63xx_ep *bep = our_ep(ep);
1254	struct bcm63xx_udc *udc = bep->udc;
1255	unsigned long flags;
1256
1257	spin_lock_irqsave(&udc->lock, flags);
1258	bcm63xx_set_stall(udc, bep, !!value);
1259	bep->halted = value;
1260	spin_unlock_irqrestore(&udc->lock, flags);
1261
1262	return 0;
1263}
1264
1265/**
1266 * bcm63xx_udc_set_wedge - Stall the endpoint until the next reset.
1267 * @ep: Endpoint to wedge.
1268 *
1269 * See comments in bcm63xx_update_wedge().
1270 */
1271static int bcm63xx_udc_set_wedge(struct usb_ep *ep)
1272{
1273	struct bcm63xx_ep *bep = our_ep(ep);
1274	struct bcm63xx_udc *udc = bep->udc;
1275	unsigned long flags;
1276
1277	spin_lock_irqsave(&udc->lock, flags);
1278	set_bit(bep->ep_num, &udc->wedgemap);
1279	bcm63xx_set_stall(udc, bep, true);
1280	spin_unlock_irqrestore(&udc->lock, flags);
1281
1282	return 0;
1283}
1284
1285static const struct usb_ep_ops bcm63xx_udc_ep_ops = {
1286	.enable		= bcm63xx_ep_enable,
1287	.disable	= bcm63xx_ep_disable,
1288
1289	.alloc_request	= bcm63xx_udc_alloc_request,
1290	.free_request	= bcm63xx_udc_free_request,
1291
1292	.queue		= bcm63xx_udc_queue,
1293	.dequeue	= bcm63xx_udc_dequeue,
1294
1295	.set_halt	= bcm63xx_udc_set_halt,
1296	.set_wedge	= bcm63xx_udc_set_wedge,
1297};
1298
1299/***********************************************************************
1300 * EP0 handling
1301 ***********************************************************************/
1302
1303/**
1304 * bcm63xx_ep0_setup_callback - Drop spinlock to invoke ->setup callback.
1305 * @udc: Reference to the device controller.
1306 * @ctrl: 8-byte SETUP request.
1307 */
1308static int bcm63xx_ep0_setup_callback(struct bcm63xx_udc *udc,
1309	struct usb_ctrlrequest *ctrl)
1310{
1311	int rc;
1312
1313	spin_unlock_irq(&udc->lock);
1314	rc = udc->driver->setup(&udc->gadget, ctrl);
1315	spin_lock_irq(&udc->lock);
1316	return rc;
1317}
1318
1319/**
1320 * bcm63xx_ep0_spoof_set_cfg - Synthesize a SET_CONFIGURATION request.
1321 * @udc: Reference to the device controller.
1322 *
1323 * Many standard requests are handled automatically in the hardware, but
1324 * we still need to pass them to the gadget driver so that it can
1325 * reconfigure the interfaces/endpoints if necessary.
1326 *
1327 * Unfortunately we are not able to send a STALL response if the host
1328 * requests an invalid configuration.  If this happens, we'll have to be
1329 * content with printing a warning.
1330 */
1331static int bcm63xx_ep0_spoof_set_cfg(struct bcm63xx_udc *udc)
1332{
1333	struct usb_ctrlrequest ctrl;
1334	int rc;
1335
1336	ctrl.bRequestType = USB_DIR_OUT | USB_RECIP_DEVICE;
1337	ctrl.bRequest = USB_REQ_SET_CONFIGURATION;
1338	ctrl.wValue = cpu_to_le16(udc->cfg);
1339	ctrl.wIndex = 0;
1340	ctrl.wLength = 0;
1341
1342	rc = bcm63xx_ep0_setup_callback(udc, &ctrl);
1343	if (rc < 0) {
1344		dev_warn_ratelimited(udc->dev,
1345			"hardware auto-acked bad SET_CONFIGURATION(%d) request\n",
1346			udc->cfg);
1347	}
1348	return rc;
1349}
1350
1351/**
1352 * bcm63xx_ep0_spoof_set_iface - Synthesize a SET_INTERFACE request.
1353 * @udc: Reference to the device controller.
1354 */
1355static int bcm63xx_ep0_spoof_set_iface(struct bcm63xx_udc *udc)
1356{
1357	struct usb_ctrlrequest ctrl;
1358	int rc;
1359
1360	ctrl.bRequestType = USB_DIR_OUT | USB_RECIP_INTERFACE;
1361	ctrl.bRequest = USB_REQ_SET_INTERFACE;
1362	ctrl.wValue = cpu_to_le16(udc->alt_iface);
1363	ctrl.wIndex = cpu_to_le16(udc->iface);
1364	ctrl.wLength = 0;
1365
1366	rc = bcm63xx_ep0_setup_callback(udc, &ctrl);
1367	if (rc < 0) {
1368		dev_warn_ratelimited(udc->dev,
1369			"hardware auto-acked bad SET_INTERFACE(%d,%d) request\n",
1370			udc->iface, udc->alt_iface);
1371	}
1372	return rc;
1373}
1374
1375/**
1376 * bcm63xx_ep0_map_write - dma_map and iudma_write a single request.
1377 * @udc: Reference to the device controller.
1378 * @ch_idx: IUDMA channel number.
1379 * @req: USB gadget layer representation of the request.
1380 */
1381static void bcm63xx_ep0_map_write(struct bcm63xx_udc *udc, int ch_idx,
1382	struct usb_request *req)
1383{
1384	struct bcm63xx_req *breq = our_req(req);
1385	struct iudma_ch *iudma = &udc->iudma[ch_idx];
1386
1387	BUG_ON(udc->ep0_request);
1388	udc->ep0_request = req;
1389
1390	req->actual = 0;
1391	breq->offset = 0;
1392	usb_gadget_map_request(&udc->gadget, req, iudma->is_tx);
1393	iudma_write(udc, iudma, breq);
1394}
1395
1396/**
1397 * bcm63xx_ep0_complete - Set completion status and "stage" the callback.
1398 * @udc: Reference to the device controller.
1399 * @req: USB gadget layer representation of the request.
1400 * @status: Status to return to the gadget driver.
1401 */
1402static void bcm63xx_ep0_complete(struct bcm63xx_udc *udc,
1403	struct usb_request *req, int status)
1404{
1405	req->status = status;
1406	if (status)
1407		req->actual = 0;
1408	if (req->complete) {
1409		spin_unlock_irq(&udc->lock);
1410		req->complete(&udc->bep[0].ep, req);
1411		spin_lock_irq(&udc->lock);
1412	}
1413}
1414
1415/**
1416 * bcm63xx_ep0_nuke_reply - Abort request from the gadget driver due to
1417 *   reset/shutdown.
1418 * @udc: Reference to the device controller.
1419 * @is_tx: Nonzero for TX (IN), zero for RX (OUT).
1420 */
1421static void bcm63xx_ep0_nuke_reply(struct bcm63xx_udc *udc, int is_tx)
1422{
1423	struct usb_request *req = udc->ep0_reply;
1424
1425	udc->ep0_reply = NULL;
1426	usb_gadget_unmap_request(&udc->gadget, req, is_tx);
1427	if (udc->ep0_request == req) {
1428		udc->ep0_req_completed = 0;
1429		udc->ep0_request = NULL;
1430	}
1431	bcm63xx_ep0_complete(udc, req, -ESHUTDOWN);
1432}
1433
1434/**
1435 * bcm63xx_ep0_read_complete - Close out the pending ep0 request; return
1436 *   transfer len.
1437 * @udc: Reference to the device controller.
1438 */
1439static int bcm63xx_ep0_read_complete(struct bcm63xx_udc *udc)
1440{
1441	struct usb_request *req = udc->ep0_request;
1442
1443	udc->ep0_req_completed = 0;
1444	udc->ep0_request = NULL;
1445
1446	return req->actual;
1447}
1448
1449/**
1450 * bcm63xx_ep0_internal_request - Helper function to submit an ep0 request.
1451 * @udc: Reference to the device controller.
1452 * @ch_idx: IUDMA channel number.
1453 * @length: Number of bytes to TX/RX.
1454 *
1455 * Used for simple transfers performed by the ep0 worker.  This will always
1456 * use ep0_ctrl_req / ep0_ctrl_buf.
1457 */
1458static void bcm63xx_ep0_internal_request(struct bcm63xx_udc *udc, int ch_idx,
1459	int length)
1460{
1461	struct usb_request *req = &udc->ep0_ctrl_req.req;
1462
1463	req->buf = udc->ep0_ctrl_buf;
1464	req->length = length;
1465	req->complete = NULL;
1466
1467	bcm63xx_ep0_map_write(udc, ch_idx, req);
1468}
1469
1470/**
1471 * bcm63xx_ep0_do_setup - Parse new SETUP packet and decide how to handle it.
1472 * @udc: Reference to the device controller.
1473 *
1474 * EP0_IDLE probably shouldn't ever happen.  EP0_REQUEUE means we're ready
1475 * for the next packet.  Anything else means the transaction requires multiple
1476 * stages of handling.
1477 */
1478static enum bcm63xx_ep0_state bcm63xx_ep0_do_setup(struct bcm63xx_udc *udc)
1479{
1480	int rc;
1481	struct usb_ctrlrequest *ctrl = (void *)udc->ep0_ctrl_buf;
1482
1483	rc = bcm63xx_ep0_read_complete(udc);
1484
1485	if (rc < 0) {
1486		dev_err(udc->dev, "missing SETUP packet\n");
1487		return EP0_IDLE;
1488	}
1489
1490	/*
1491	 * Handle 0-byte IN STATUS acknowledgement.  The hardware doesn't
1492	 * ALWAYS deliver these 100% of the time, so if we happen to see one,
1493	 * just throw it away.
1494	 */
1495	if (rc == 0)
1496		return EP0_REQUEUE;
1497
1498	/* Drop malformed SETUP packets */
1499	if (rc != sizeof(*ctrl)) {
1500		dev_warn_ratelimited(udc->dev,
1501			"malformed SETUP packet (%d bytes)\n", rc);
1502		return EP0_REQUEUE;
1503	}
1504
1505	/* Process new SETUP packet arriving on ep0 */
1506	rc = bcm63xx_ep0_setup_callback(udc, ctrl);
1507	if (rc < 0) {
1508		bcm63xx_set_stall(udc, &udc->bep[0], true);
1509		return EP0_REQUEUE;
1510	}
1511
1512	if (!ctrl->wLength)
1513		return EP0_REQUEUE;
1514	else if (ctrl->bRequestType & USB_DIR_IN)
1515		return EP0_IN_DATA_PHASE_SETUP;
1516	else
1517		return EP0_OUT_DATA_PHASE_SETUP;
1518}
1519
1520/**
1521 * bcm63xx_ep0_do_idle - Check for outstanding requests if ep0 is idle.
1522 * @udc: Reference to the device controller.
1523 *
1524 * In state EP0_IDLE, the RX descriptor is either pending, or has been
1525 * filled with a SETUP packet from the host.  This function handles new
1526 * SETUP packets, control IRQ events (which can generate fake SETUP packets),
1527 * and reset/shutdown events.
1528 *
1529 * Returns 0 if work was done; -EAGAIN if nothing to do.
1530 */
1531static int bcm63xx_ep0_do_idle(struct bcm63xx_udc *udc)
1532{
1533	if (udc->ep0_req_reset) {
1534		udc->ep0_req_reset = 0;
1535	} else if (udc->ep0_req_set_cfg) {
1536		udc->ep0_req_set_cfg = 0;
1537		if (bcm63xx_ep0_spoof_set_cfg(udc) >= 0)
1538			udc->ep0state = EP0_IN_FAKE_STATUS_PHASE;
1539	} else if (udc->ep0_req_set_iface) {
1540		udc->ep0_req_set_iface = 0;
1541		if (bcm63xx_ep0_spoof_set_iface(udc) >= 0)
1542			udc->ep0state = EP0_IN_FAKE_STATUS_PHASE;
1543	} else if (udc->ep0_req_completed) {
1544		udc->ep0state = bcm63xx_ep0_do_setup(udc);
1545		return udc->ep0state == EP0_IDLE ? -EAGAIN : 0;
1546	} else if (udc->ep0_req_shutdown) {
1547		udc->ep0_req_shutdown = 0;
1548		udc->ep0_req_completed = 0;
1549		udc->ep0_request = NULL;
1550		iudma_reset_channel(udc, &udc->iudma[IUDMA_EP0_RXCHAN]);
1551		usb_gadget_unmap_request(&udc->gadget,
1552			&udc->ep0_ctrl_req.req, 0);
1553
1554		/* bcm63xx_udc_pullup() is waiting for this */
1555		mb();
1556		udc->ep0state = EP0_SHUTDOWN;
1557	} else if (udc->ep0_reply) {
1558		/*
1559		 * This could happen if a USB RESET shows up during an ep0
1560		 * transaction (especially if a laggy driver like gadgetfs
1561		 * is in use).
1562		 */
1563		dev_warn(udc->dev, "nuking unexpected reply\n");
1564		bcm63xx_ep0_nuke_reply(udc, 0);
1565	} else {
1566		return -EAGAIN;
1567	}
1568
1569	return 0;
1570}
1571
1572/**
1573 * bcm63xx_ep0_one_round - Handle the current ep0 state.
1574 * @udc: Reference to the device controller.
1575 *
1576 * Returns 0 if work was done; -EAGAIN if nothing to do.
1577 */
1578static int bcm63xx_ep0_one_round(struct bcm63xx_udc *udc)
1579{
1580	enum bcm63xx_ep0_state ep0state = udc->ep0state;
1581	bool shutdown = udc->ep0_req_reset || udc->ep0_req_shutdown;
1582
1583	switch (udc->ep0state) {
1584	case EP0_REQUEUE:
1585		/* set up descriptor to receive SETUP packet */
1586		bcm63xx_ep0_internal_request(udc, IUDMA_EP0_RXCHAN,
1587					     BCM63XX_MAX_CTRL_PKT);
1588		ep0state = EP0_IDLE;
1589		break;
1590	case EP0_IDLE:
1591		return bcm63xx_ep0_do_idle(udc);
1592	case EP0_IN_DATA_PHASE_SETUP:
1593		/*
1594		 * Normal case: TX request is in ep0_reply (queued by the
1595		 * callback), or will be queued shortly.  When it's here,
1596		 * send it to the HW and go to EP0_IN_DATA_PHASE_COMPLETE.
1597		 *
1598		 * Shutdown case: Stop waiting for the reply.  Just
1599		 * REQUEUE->IDLE.  The gadget driver is NOT expected to
1600		 * queue anything else now.
1601		 */
1602		if (udc->ep0_reply) {
1603			bcm63xx_ep0_map_write(udc, IUDMA_EP0_TXCHAN,
1604					      udc->ep0_reply);
1605			ep0state = EP0_IN_DATA_PHASE_COMPLETE;
1606		} else if (shutdown) {
1607			ep0state = EP0_REQUEUE;
1608		}
1609		break;
1610	case EP0_IN_DATA_PHASE_COMPLETE: {
1611		/*
1612		 * Normal case: TX packet (ep0_reply) is in flight; wait for
1613		 * it to finish, then go back to REQUEUE->IDLE.
1614		 *
1615		 * Shutdown case: Reset the TX channel, send -ESHUTDOWN
1616		 * completion to the gadget driver, then REQUEUE->IDLE.
1617		 */
1618		if (udc->ep0_req_completed) {
1619			udc->ep0_reply = NULL;
1620			bcm63xx_ep0_read_complete(udc);
1621			/*
1622			 * the "ack" sometimes gets eaten (see
1623			 * bcm63xx_ep0_do_idle)
1624			 */
1625			ep0state = EP0_REQUEUE;
1626		} else if (shutdown) {
1627			iudma_reset_channel(udc, &udc->iudma[IUDMA_EP0_TXCHAN]);
1628			bcm63xx_ep0_nuke_reply(udc, 1);
1629			ep0state = EP0_REQUEUE;
1630		}
1631		break;
1632	}
1633	case EP0_OUT_DATA_PHASE_SETUP:
1634		/* Similar behavior to EP0_IN_DATA_PHASE_SETUP */
1635		if (udc->ep0_reply) {
1636			bcm63xx_ep0_map_write(udc, IUDMA_EP0_RXCHAN,
1637					      udc->ep0_reply);
1638			ep0state = EP0_OUT_DATA_PHASE_COMPLETE;
1639		} else if (shutdown) {
1640			ep0state = EP0_REQUEUE;
1641		}
1642		break;
1643	case EP0_OUT_DATA_PHASE_COMPLETE: {
1644		/* Similar behavior to EP0_IN_DATA_PHASE_COMPLETE */
1645		if (udc->ep0_req_completed) {
1646			udc->ep0_reply = NULL;
1647			bcm63xx_ep0_read_complete(udc);
1648
1649			/* send 0-byte ack to host */
1650			bcm63xx_ep0_internal_request(udc, IUDMA_EP0_TXCHAN, 0);
1651			ep0state = EP0_OUT_STATUS_PHASE;
1652		} else if (shutdown) {
1653			iudma_reset_channel(udc, &udc->iudma[IUDMA_EP0_RXCHAN]);
1654			bcm63xx_ep0_nuke_reply(udc, 0);
1655			ep0state = EP0_REQUEUE;
1656		}
1657		break;
1658	}
1659	case EP0_OUT_STATUS_PHASE:
1660		/*
1661		 * Normal case: 0-byte OUT ack packet is in flight; wait
1662		 * for it to finish, then go back to REQUEUE->IDLE.
1663		 *
1664		 * Shutdown case: just cancel the transmission.  Don't bother
1665		 * calling the completion, because it originated from this
1666		 * function anyway.  Then go back to REQUEUE->IDLE.
1667		 */
1668		if (udc->ep0_req_completed) {
1669			bcm63xx_ep0_read_complete(udc);
1670			ep0state = EP0_REQUEUE;
1671		} else if (shutdown) {
1672			iudma_reset_channel(udc, &udc->iudma[IUDMA_EP0_TXCHAN]);
1673			udc->ep0_request = NULL;
1674			ep0state = EP0_REQUEUE;
1675		}
1676		break;
1677	case EP0_IN_FAKE_STATUS_PHASE: {
1678		/*
1679		 * Normal case: we spoofed a SETUP packet and are now
1680		 * waiting for the gadget driver to send a 0-byte reply.
1681		 * This doesn't actually get sent to the HW because the
1682		 * HW has already sent its own reply.  Once we get the
1683		 * response, return to IDLE.
1684		 *
1685		 * Shutdown case: return to IDLE immediately.
1686		 *
1687		 * Note that the ep0 RX descriptor has remained queued
1688		 * (and possibly unfilled) during this entire transaction.
1689		 * The HW datapath (IUDMA) never even sees SET_CONFIGURATION
1690		 * or SET_INTERFACE transactions.
1691		 */
1692		struct usb_request *r = udc->ep0_reply;
1693
1694		if (!r) {
1695			if (shutdown)
1696				ep0state = EP0_IDLE;
1697			break;
1698		}
1699
1700		bcm63xx_ep0_complete(udc, r, 0);
1701		udc->ep0_reply = NULL;
1702		ep0state = EP0_IDLE;
1703		break;
1704	}
1705	case EP0_SHUTDOWN:
1706		break;
1707	}
1708
1709	if (udc->ep0state == ep0state)
1710		return -EAGAIN;
1711
1712	udc->ep0state = ep0state;
1713	return 0;
1714}
1715
1716/**
1717 * bcm63xx_ep0_process - ep0 worker thread / state machine.
1718 * @w: Workqueue struct.
1719 *
1720 * bcm63xx_ep0_process is triggered any time an event occurs on ep0.  It
1721 * is used to synchronize ep0 events and ensure that both HW and SW events
1722 * occur in a well-defined order.  When the ep0 IUDMA queues are idle, it may
1723 * synthesize SET_CONFIGURATION / SET_INTERFACE requests that were consumed
1724 * by the USBD hardware.
1725 *
1726 * The worker function will continue iterating around the state machine
1727 * until there is nothing left to do.  Usually "nothing left to do" means
1728 * that we're waiting for a new event from the hardware.
1729 */
1730static void bcm63xx_ep0_process(struct work_struct *w)
1731{
1732	struct bcm63xx_udc *udc = container_of(w, struct bcm63xx_udc, ep0_wq);
1733	spin_lock_irq(&udc->lock);
1734	while (bcm63xx_ep0_one_round(udc) == 0)
1735		;
1736	spin_unlock_irq(&udc->lock);
1737}
1738
1739/***********************************************************************
1740 * Standard UDC gadget operations
1741 ***********************************************************************/
1742
1743/**
1744 * bcm63xx_udc_get_frame - Read current SOF frame number from the HW.
1745 * @gadget: USB device.
1746 */
1747static int bcm63xx_udc_get_frame(struct usb_gadget *gadget)
1748{
1749	struct bcm63xx_udc *udc = gadget_to_udc(gadget);
1750
1751	return (usbd_readl(udc, USBD_STATUS_REG) &
1752		USBD_STATUS_SOF_MASK) >> USBD_STATUS_SOF_SHIFT;
1753}
1754
1755/**
1756 * bcm63xx_udc_pullup - Enable/disable pullup on D+ line.
1757 * @gadget: USB device.
1758 * @is_on: 0 to disable pullup, 1 to enable.
1759 *
1760 * See notes in bcm63xx_select_pullup().
1761 */
1762static int bcm63xx_udc_pullup(struct usb_gadget *gadget, int is_on)
1763{
1764	struct bcm63xx_udc *udc = gadget_to_udc(gadget);
1765	unsigned long flags;
1766	int i, rc = -EINVAL;
1767
1768	spin_lock_irqsave(&udc->lock, flags);
1769	if (is_on && udc->ep0state == EP0_SHUTDOWN) {
1770		udc->gadget.speed = USB_SPEED_UNKNOWN;
1771		udc->ep0state = EP0_REQUEUE;
1772		bcm63xx_fifo_setup(udc);
1773		bcm63xx_fifo_reset(udc);
1774		bcm63xx_ep_setup(udc);
1775
1776		bitmap_zero(&udc->wedgemap, BCM63XX_NUM_EP);
1777		for (i = 0; i < BCM63XX_NUM_EP; i++)
1778			bcm63xx_set_stall(udc, &udc->bep[i], false);
1779
1780		bcm63xx_set_ctrl_irqs(udc, true);
1781		bcm63xx_select_pullup(gadget_to_udc(gadget), true);
1782		rc = 0;
1783	} else if (!is_on && udc->ep0state != EP0_SHUTDOWN) {
1784		bcm63xx_select_pullup(gadget_to_udc(gadget), false);
1785
1786		udc->ep0_req_shutdown = 1;
1787		spin_unlock_irqrestore(&udc->lock, flags);
1788
1789		while (1) {
1790			schedule_work(&udc->ep0_wq);
1791			if (udc->ep0state == EP0_SHUTDOWN)
1792				break;
1793			msleep(50);
1794		}
1795		bcm63xx_set_ctrl_irqs(udc, false);
1796		cancel_work_sync(&udc->ep0_wq);
1797		return 0;
1798	}
1799
1800	spin_unlock_irqrestore(&udc->lock, flags);
1801	return rc;
1802}
1803
1804/**
1805 * bcm63xx_udc_start - Start the controller.
1806 * @gadget: USB device.
1807 * @driver: Driver for USB device.
1808 */
1809static int bcm63xx_udc_start(struct usb_gadget *gadget,
1810		struct usb_gadget_driver *driver)
1811{
1812	struct bcm63xx_udc *udc = gadget_to_udc(gadget);
1813	unsigned long flags;
1814
1815	if (!driver || driver->max_speed < USB_SPEED_HIGH ||
1816	    !driver->setup)
1817		return -EINVAL;
1818	if (!udc)
1819		return -ENODEV;
1820	if (udc->driver)
1821		return -EBUSY;
1822
1823	spin_lock_irqsave(&udc->lock, flags);
1824
1825	set_clocks(udc, true);
1826	bcm63xx_fifo_setup(udc);
1827	bcm63xx_ep_init(udc);
1828	bcm63xx_ep_setup(udc);
1829	bcm63xx_fifo_reset(udc);
1830	bcm63xx_select_phy_mode(udc, true);
1831
1832	udc->driver = driver;
 
1833	udc->gadget.dev.of_node = udc->dev->of_node;
1834
1835	spin_unlock_irqrestore(&udc->lock, flags);
1836
1837	return 0;
1838}
1839
1840/**
1841 * bcm63xx_udc_stop - Shut down the controller.
1842 * @gadget: USB device.
1843 * @driver: Driver for USB device.
1844 */
1845static int bcm63xx_udc_stop(struct usb_gadget *gadget)
1846{
1847	struct bcm63xx_udc *udc = gadget_to_udc(gadget);
1848	unsigned long flags;
1849
1850	spin_lock_irqsave(&udc->lock, flags);
1851
1852	udc->driver = NULL;
1853
1854	/*
1855	 * If we switch the PHY too abruptly after dropping D+, the host
1856	 * will often complain:
1857	 *
1858	 *     hub 1-0:1.0: port 1 disabled by hub (EMI?), re-enabling...
1859	 */
1860	msleep(100);
1861
1862	bcm63xx_select_phy_mode(udc, false);
1863	set_clocks(udc, false);
1864
1865	spin_unlock_irqrestore(&udc->lock, flags);
1866
1867	return 0;
1868}
1869
1870static const struct usb_gadget_ops bcm63xx_udc_ops = {
1871	.get_frame	= bcm63xx_udc_get_frame,
1872	.pullup		= bcm63xx_udc_pullup,
1873	.udc_start	= bcm63xx_udc_start,
1874	.udc_stop	= bcm63xx_udc_stop,
1875};
1876
1877/***********************************************************************
1878 * IRQ handling
1879 ***********************************************************************/
1880
1881/**
1882 * bcm63xx_update_cfg_iface - Read current configuration/interface settings.
1883 * @udc: Reference to the device controller.
1884 *
1885 * This controller intercepts SET_CONFIGURATION and SET_INTERFACE messages.
1886 * The driver never sees the raw control packets coming in on the ep0
1887 * IUDMA channel, but at least we get an interrupt event to tell us that
1888 * new values are waiting in the USBD_STATUS register.
1889 */
1890static void bcm63xx_update_cfg_iface(struct bcm63xx_udc *udc)
1891{
1892	u32 reg = usbd_readl(udc, USBD_STATUS_REG);
1893
1894	udc->cfg = (reg & USBD_STATUS_CFG_MASK) >> USBD_STATUS_CFG_SHIFT;
1895	udc->iface = (reg & USBD_STATUS_INTF_MASK) >> USBD_STATUS_INTF_SHIFT;
1896	udc->alt_iface = (reg & USBD_STATUS_ALTINTF_MASK) >>
1897			 USBD_STATUS_ALTINTF_SHIFT;
1898	bcm63xx_ep_setup(udc);
1899}
1900
1901/**
1902 * bcm63xx_update_link_speed - Check to see if the link speed has changed.
1903 * @udc: Reference to the device controller.
1904 *
1905 * The link speed update coincides with a SETUP IRQ.  Returns 1 if the
1906 * speed has changed, so that the caller can update the endpoint settings.
1907 */
1908static int bcm63xx_update_link_speed(struct bcm63xx_udc *udc)
1909{
1910	u32 reg = usbd_readl(udc, USBD_STATUS_REG);
1911	enum usb_device_speed oldspeed = udc->gadget.speed;
1912
1913	switch ((reg & USBD_STATUS_SPD_MASK) >> USBD_STATUS_SPD_SHIFT) {
1914	case BCM63XX_SPD_HIGH:
1915		udc->gadget.speed = USB_SPEED_HIGH;
1916		break;
1917	case BCM63XX_SPD_FULL:
1918		udc->gadget.speed = USB_SPEED_FULL;
1919		break;
1920	default:
1921		/* this should never happen */
1922		udc->gadget.speed = USB_SPEED_UNKNOWN;
1923		dev_err(udc->dev,
1924			"received SETUP packet with invalid link speed\n");
1925		return 0;
1926	}
1927
1928	if (udc->gadget.speed != oldspeed) {
1929		dev_info(udc->dev, "link up, %s-speed mode\n",
1930			 udc->gadget.speed == USB_SPEED_HIGH ? "high" : "full");
1931		return 1;
1932	} else {
1933		return 0;
1934	}
1935}
1936
1937/**
1938 * bcm63xx_update_wedge - Iterate through wedged endpoints.
1939 * @udc: Reference to the device controller.
1940 * @new_status: true to "refresh" wedge status; false to clear it.
1941 *
1942 * On a SETUP interrupt, we need to manually "refresh" the wedge status
1943 * because the controller hardware is designed to automatically clear
1944 * stalls in response to a CLEAR_FEATURE request from the host.
1945 *
1946 * On a RESET interrupt, we do want to restore all wedged endpoints.
1947 */
1948static void bcm63xx_update_wedge(struct bcm63xx_udc *udc, bool new_status)
1949{
1950	int i;
1951
1952	for_each_set_bit(i, &udc->wedgemap, BCM63XX_NUM_EP) {
1953		bcm63xx_set_stall(udc, &udc->bep[i], new_status);
1954		if (!new_status)
1955			clear_bit(i, &udc->wedgemap);
1956	}
1957}
1958
1959/**
1960 * bcm63xx_udc_ctrl_isr - ISR for control path events (USBD).
1961 * @irq: IRQ number (unused).
1962 * @dev_id: Reference to the device controller.
1963 *
1964 * This is where we handle link (VBUS) down, USB reset, speed changes,
1965 * SET_CONFIGURATION, and SET_INTERFACE events.
1966 */
1967static irqreturn_t bcm63xx_udc_ctrl_isr(int irq, void *dev_id)
1968{
1969	struct bcm63xx_udc *udc = dev_id;
1970	u32 stat;
1971	bool disconnected = false, bus_reset = false;
1972
1973	stat = usbd_readl(udc, USBD_EVENT_IRQ_STATUS_REG) &
1974	       usbd_readl(udc, USBD_EVENT_IRQ_MASK_REG);
1975
1976	usbd_writel(udc, stat, USBD_EVENT_IRQ_STATUS_REG);
1977
1978	spin_lock(&udc->lock);
1979	if (stat & BIT(USBD_EVENT_IRQ_USB_LINK)) {
1980		/* VBUS toggled */
1981
1982		if (!(usbd_readl(udc, USBD_EVENTS_REG) &
1983		      USBD_EVENTS_USB_LINK_MASK) &&
1984		      udc->gadget.speed != USB_SPEED_UNKNOWN)
1985			dev_info(udc->dev, "link down\n");
1986
1987		udc->gadget.speed = USB_SPEED_UNKNOWN;
1988		disconnected = true;
1989	}
1990	if (stat & BIT(USBD_EVENT_IRQ_USB_RESET)) {
1991		bcm63xx_fifo_setup(udc);
1992		bcm63xx_fifo_reset(udc);
1993		bcm63xx_ep_setup(udc);
1994
1995		bcm63xx_update_wedge(udc, false);
1996
1997		udc->ep0_req_reset = 1;
1998		schedule_work(&udc->ep0_wq);
1999		bus_reset = true;
2000	}
2001	if (stat & BIT(USBD_EVENT_IRQ_SETUP)) {
2002		if (bcm63xx_update_link_speed(udc)) {
2003			bcm63xx_fifo_setup(udc);
2004			bcm63xx_ep_setup(udc);
2005		}
2006		bcm63xx_update_wedge(udc, true);
2007	}
2008	if (stat & BIT(USBD_EVENT_IRQ_SETCFG)) {
2009		bcm63xx_update_cfg_iface(udc);
2010		udc->ep0_req_set_cfg = 1;
2011		schedule_work(&udc->ep0_wq);
2012	}
2013	if (stat & BIT(USBD_EVENT_IRQ_SETINTF)) {
2014		bcm63xx_update_cfg_iface(udc);
2015		udc->ep0_req_set_iface = 1;
2016		schedule_work(&udc->ep0_wq);
2017	}
2018	spin_unlock(&udc->lock);
2019
2020	if (disconnected && udc->driver)
2021		udc->driver->disconnect(&udc->gadget);
2022	else if (bus_reset && udc->driver)
2023		usb_gadget_udc_reset(&udc->gadget, udc->driver);
2024
2025	return IRQ_HANDLED;
2026}
2027
2028/**
2029 * bcm63xx_udc_data_isr - ISR for data path events (IUDMA).
2030 * @irq: IRQ number (unused).
2031 * @dev_id: Reference to the IUDMA channel that generated the interrupt.
2032 *
2033 * For the two ep0 channels, we have special handling that triggers the
2034 * ep0 worker thread.  For normal bulk/intr channels, either queue up
2035 * the next buffer descriptor for the transaction (incomplete transaction),
2036 * or invoke the completion callback (complete transactions).
2037 */
2038static irqreturn_t bcm63xx_udc_data_isr(int irq, void *dev_id)
2039{
2040	struct iudma_ch *iudma = dev_id;
2041	struct bcm63xx_udc *udc = iudma->udc;
2042	struct bcm63xx_ep *bep;
2043	struct usb_request *req = NULL;
2044	struct bcm63xx_req *breq = NULL;
2045	int rc;
2046	bool is_done = false;
2047
2048	spin_lock(&udc->lock);
2049
2050	usb_dmac_writel(udc, ENETDMAC_IR_BUFDONE_MASK,
2051			ENETDMAC_IR_REG, iudma->ch_idx);
2052	bep = iudma->bep;
2053	rc = iudma_read(udc, iudma);
2054
2055	/* special handling for EP0 RX (0) and TX (1) */
2056	if (iudma->ch_idx == IUDMA_EP0_RXCHAN ||
2057	    iudma->ch_idx == IUDMA_EP0_TXCHAN) {
2058		req = udc->ep0_request;
2059		breq = our_req(req);
2060
2061		/* a single request could require multiple submissions */
2062		if (rc >= 0) {
2063			req->actual += rc;
2064
2065			if (req->actual >= req->length || breq->bd_bytes > rc) {
2066				udc->ep0_req_completed = 1;
2067				is_done = true;
2068				schedule_work(&udc->ep0_wq);
2069
2070				/* "actual" on a ZLP is 1 byte */
2071				req->actual = min(req->actual, req->length);
2072			} else {
2073				/* queue up the next BD (same request) */
2074				iudma_write(udc, iudma, breq);
2075			}
2076		}
2077	} else if (!list_empty(&bep->queue)) {
2078		breq = list_first_entry(&bep->queue, struct bcm63xx_req, queue);
2079		req = &breq->req;
2080
2081		if (rc >= 0) {
2082			req->actual += rc;
2083
2084			if (req->actual >= req->length || breq->bd_bytes > rc) {
2085				is_done = true;
2086				list_del(&breq->queue);
2087
2088				req->actual = min(req->actual, req->length);
2089
2090				if (!list_empty(&bep->queue)) {
2091					struct bcm63xx_req *next;
2092
2093					next = list_first_entry(&bep->queue,
2094						struct bcm63xx_req, queue);
2095					iudma_write(udc, iudma, next);
2096				}
2097			} else {
2098				iudma_write(udc, iudma, breq);
2099			}
2100		}
2101	}
2102	spin_unlock(&udc->lock);
2103
2104	if (is_done) {
2105		usb_gadget_unmap_request(&udc->gadget, req, iudma->is_tx);
2106		if (req->complete)
2107			req->complete(&bep->ep, req);
2108	}
2109
2110	return IRQ_HANDLED;
2111}
2112
2113/***********************************************************************
2114 * Debug filesystem
2115 ***********************************************************************/
2116
2117/*
2118 * bcm63xx_usbd_dbg_show - Show USBD controller state.
2119 * @s: seq_file to which the information will be written.
2120 * @p: Unused.
2121 *
2122 * This file nominally shows up as /sys/kernel/debug/bcm63xx_udc/usbd
2123 */
2124static int bcm63xx_usbd_dbg_show(struct seq_file *s, void *p)
2125{
2126	struct bcm63xx_udc *udc = s->private;
2127
2128	if (!udc->driver)
2129		return -ENODEV;
2130
2131	seq_printf(s, "ep0 state: %s\n",
2132		   bcm63xx_ep0_state_names[udc->ep0state]);
2133	seq_printf(s, "  pending requests: %s%s%s%s%s%s%s\n",
2134		   udc->ep0_req_reset ? "reset " : "",
2135		   udc->ep0_req_set_cfg ? "set_cfg " : "",
2136		   udc->ep0_req_set_iface ? "set_iface " : "",
2137		   udc->ep0_req_shutdown ? "shutdown " : "",
2138		   udc->ep0_request ? "pending " : "",
2139		   udc->ep0_req_completed ? "completed " : "",
2140		   udc->ep0_reply ? "reply " : "");
2141	seq_printf(s, "cfg: %d; iface: %d; alt_iface: %d\n",
2142		   udc->cfg, udc->iface, udc->alt_iface);
2143	seq_printf(s, "regs:\n");
2144	seq_printf(s, "  control: %08x; straps: %08x; status: %08x\n",
2145		   usbd_readl(udc, USBD_CONTROL_REG),
2146		   usbd_readl(udc, USBD_STRAPS_REG),
2147		   usbd_readl(udc, USBD_STATUS_REG));
2148	seq_printf(s, "  events:  %08x; stall:  %08x\n",
2149		   usbd_readl(udc, USBD_EVENTS_REG),
2150		   usbd_readl(udc, USBD_STALL_REG));
2151
2152	return 0;
2153}
2154DEFINE_SHOW_ATTRIBUTE(bcm63xx_usbd_dbg);
2155
2156/*
2157 * bcm63xx_iudma_dbg_show - Show IUDMA status and descriptors.
2158 * @s: seq_file to which the information will be written.
2159 * @p: Unused.
2160 *
2161 * This file nominally shows up as /sys/kernel/debug/bcm63xx_udc/iudma
2162 */
2163static int bcm63xx_iudma_dbg_show(struct seq_file *s, void *p)
2164{
2165	struct bcm63xx_udc *udc = s->private;
2166	int ch_idx, i;
2167	u32 sram2, sram3;
2168
2169	if (!udc->driver)
2170		return -ENODEV;
2171
2172	for (ch_idx = 0; ch_idx < BCM63XX_NUM_IUDMA; ch_idx++) {
2173		struct iudma_ch *iudma = &udc->iudma[ch_idx];
 
2174
2175		seq_printf(s, "IUDMA channel %d -- ", ch_idx);
2176		switch (iudma_defaults[ch_idx].ep_type) {
2177		case BCMEP_CTRL:
2178			seq_printf(s, "control");
2179			break;
2180		case BCMEP_BULK:
2181			seq_printf(s, "bulk");
2182			break;
2183		case BCMEP_INTR:
2184			seq_printf(s, "interrupt");
2185			break;
2186		}
2187		seq_printf(s, ch_idx & 0x01 ? " tx" : " rx");
2188		seq_printf(s, " [ep%d]:\n",
2189			   max_t(int, iudma_defaults[ch_idx].ep_num, 0));
2190		seq_printf(s, "  cfg: %08x; irqstat: %08x; irqmask: %08x; maxburst: %08x\n",
2191			   usb_dmac_readl(udc, ENETDMAC_CHANCFG_REG, ch_idx),
2192			   usb_dmac_readl(udc, ENETDMAC_IR_REG, ch_idx),
2193			   usb_dmac_readl(udc, ENETDMAC_IRMASK_REG, ch_idx),
2194			   usb_dmac_readl(udc, ENETDMAC_MAXBURST_REG, ch_idx));
2195
2196		sram2 = usb_dmas_readl(udc, ENETDMAS_SRAM2_REG, ch_idx);
2197		sram3 = usb_dmas_readl(udc, ENETDMAS_SRAM3_REG, ch_idx);
2198		seq_printf(s, "  base: %08x; index: %04x_%04x; desc: %04x_%04x %08x\n",
2199			   usb_dmas_readl(udc, ENETDMAS_RSTART_REG, ch_idx),
2200			   sram2 >> 16, sram2 & 0xffff,
2201			   sram3 >> 16, sram3 & 0xffff,
2202			   usb_dmas_readl(udc, ENETDMAS_SRAM4_REG, ch_idx));
2203		seq_printf(s, "  desc: %d/%d used", iudma->n_bds_used,
2204			   iudma->n_bds);
2205
2206		if (iudma->bep)
2207			seq_printf(s, "; %zu queued\n", list_count_nodes(&iudma->bep->queue));
2208		else
 
 
 
2209			seq_printf(s, "\n");
 
2210
2211		for (i = 0; i < iudma->n_bds; i++) {
2212			struct bcm_enet_desc *d = &iudma->bd_ring[i];
2213
2214			seq_printf(s, "  %03x (%02x): len_stat: %04x_%04x; pa %08x",
2215				   i * sizeof(*d), i,
2216				   d->len_stat >> 16, d->len_stat & 0xffff,
2217				   d->address);
2218			if (d == iudma->read_bd)
2219				seq_printf(s, "   <<RD");
2220			if (d == iudma->write_bd)
2221				seq_printf(s, "   <<WR");
2222			seq_printf(s, "\n");
2223		}
2224
2225		seq_printf(s, "\n");
2226	}
2227
2228	return 0;
2229}
2230DEFINE_SHOW_ATTRIBUTE(bcm63xx_iudma_dbg);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2231
2232/**
2233 * bcm63xx_udc_init_debugfs - Create debugfs entries.
2234 * @udc: Reference to the device controller.
2235 */
2236static void bcm63xx_udc_init_debugfs(struct bcm63xx_udc *udc)
2237{
2238	struct dentry *root;
2239
2240	if (!IS_ENABLED(CONFIG_USB_GADGET_DEBUG_FS))
2241		return;
2242
2243	root = debugfs_create_dir(udc->gadget.name, usb_debug_root);
2244	debugfs_create_file("usbd", 0400, root, udc, &bcm63xx_usbd_dbg_fops);
2245	debugfs_create_file("iudma", 0400, root, udc, &bcm63xx_iudma_dbg_fops);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2246}
2247
2248/**
2249 * bcm63xx_udc_cleanup_debugfs - Remove debugfs entries.
2250 * @udc: Reference to the device controller.
2251 *
2252 * debugfs_remove() is safe to call with a NULL argument.
2253 */
2254static void bcm63xx_udc_cleanup_debugfs(struct bcm63xx_udc *udc)
2255{
2256	debugfs_lookup_and_remove(udc->gadget.name, usb_debug_root);
 
 
 
 
 
2257}
2258
2259/***********************************************************************
2260 * Driver init/exit
2261 ***********************************************************************/
2262
2263/**
2264 * bcm63xx_udc_probe - Initialize a new instance of the UDC.
2265 * @pdev: Platform device struct from the bcm63xx BSP code.
2266 *
2267 * Note that platform data is required, because pd.port_no varies from chip
2268 * to chip and is used to switch the correct USB port to device mode.
2269 */
2270static int bcm63xx_udc_probe(struct platform_device *pdev)
2271{
2272	struct device *dev = &pdev->dev;
2273	struct bcm63xx_usbd_platform_data *pd = dev_get_platdata(dev);
2274	struct bcm63xx_udc *udc;
 
2275	int rc = -ENOMEM, i, irq;
2276
2277	udc = devm_kzalloc(dev, sizeof(*udc), GFP_KERNEL);
2278	if (!udc)
2279		return -ENOMEM;
2280
2281	platform_set_drvdata(pdev, udc);
2282	udc->dev = dev;
2283	udc->pd = pd;
2284
2285	if (!pd) {
2286		dev_err(dev, "missing platform data\n");
2287		return -EINVAL;
2288	}
2289
2290	udc->usbd_regs = devm_platform_ioremap_resource(pdev, 0);
 
2291	if (IS_ERR(udc->usbd_regs))
2292		return PTR_ERR(udc->usbd_regs);
2293
2294	udc->iudma_regs = devm_platform_ioremap_resource(pdev, 1);
 
2295	if (IS_ERR(udc->iudma_regs))
2296		return PTR_ERR(udc->iudma_regs);
2297
2298	spin_lock_init(&udc->lock);
2299	INIT_WORK(&udc->ep0_wq, bcm63xx_ep0_process);
2300
2301	udc->gadget.ops = &bcm63xx_udc_ops;
2302	udc->gadget.name = dev_name(dev);
2303
2304	if (!pd->use_fullspeed && !use_fullspeed)
2305		udc->gadget.max_speed = USB_SPEED_HIGH;
2306	else
2307		udc->gadget.max_speed = USB_SPEED_FULL;
2308
2309	/* request clocks, allocate buffers, and clear any pending IRQs */
2310	rc = bcm63xx_init_udc_hw(udc);
2311	if (rc)
2312		return rc;
2313
2314	rc = -ENXIO;
2315
2316	/* IRQ resource #0: control interrupt (VBUS, speed, etc.) */
2317	irq = platform_get_irq(pdev, 0);
2318	if (irq < 0) {
2319		rc = irq;
2320		goto out_uninit;
2321	}
2322	if (devm_request_irq(dev, irq, &bcm63xx_udc_ctrl_isr, 0,
2323			     dev_name(dev), udc) < 0)
2324		goto report_request_failure;
 
 
2325
2326	/* IRQ resources #1-6: data interrupts for IUDMA channels 0-5 */
2327	for (i = 0; i < BCM63XX_NUM_IUDMA; i++) {
2328		irq = platform_get_irq(pdev, i + 1);
2329		if (irq < 0) {
2330			rc = irq;
2331			goto out_uninit;
2332		}
2333		if (devm_request_irq(dev, irq, &bcm63xx_udc_data_isr, 0,
2334				     dev_name(dev), &udc->iudma[i]) < 0)
2335			goto report_request_failure;
 
 
2336	}
2337
2338	bcm63xx_udc_init_debugfs(udc);
2339	rc = usb_add_gadget_udc(dev, &udc->gadget);
2340	if (!rc)
2341		return 0;
2342
2343	bcm63xx_udc_cleanup_debugfs(udc);
2344out_uninit:
2345	bcm63xx_uninit_udc_hw(udc);
2346	return rc;
2347
2348report_request_failure:
2349	dev_err(dev, "error requesting IRQ #%d\n", irq);
2350	goto out_uninit;
2351}
2352
2353/**
2354 * bcm63xx_udc_remove - Remove the device from the system.
2355 * @pdev: Platform device struct from the bcm63xx BSP code.
2356 */
2357static void bcm63xx_udc_remove(struct platform_device *pdev)
2358{
2359	struct bcm63xx_udc *udc = platform_get_drvdata(pdev);
2360
2361	bcm63xx_udc_cleanup_debugfs(udc);
2362	usb_del_gadget_udc(&udc->gadget);
2363	BUG_ON(udc->driver);
2364
2365	bcm63xx_uninit_udc_hw(udc);
 
 
2366}
2367
2368static struct platform_driver bcm63xx_udc_driver = {
2369	.probe		= bcm63xx_udc_probe,
2370	.remove_new	= bcm63xx_udc_remove,
2371	.driver		= {
2372		.name	= DRV_MODULE_NAME,
2373	},
2374};
2375module_platform_driver(bcm63xx_udc_driver);
2376
2377MODULE_DESCRIPTION("BCM63xx USB Peripheral Controller");
2378MODULE_AUTHOR("Kevin Cernekee <cernekee@gmail.com>");
2379MODULE_LICENSE("GPL");
2380MODULE_ALIAS("platform:" DRV_MODULE_NAME);
v4.6
 
   1/*
   2 * bcm63xx_udc.c -- BCM63xx UDC high/full speed USB device controller
   3 *
   4 * Copyright (C) 2012 Kevin Cernekee <cernekee@gmail.com>
   5 * Copyright (C) 2012 Broadcom Corporation
   6 *
   7 * This program is free software; you can redistribute it and/or modify
   8 * it under the terms of the GNU General Public License as published by
   9 * the Free Software Foundation; either version 2 of the License, or
  10 * (at your option) any later version.
  11 */
  12
  13#include <linux/bitops.h>
  14#include <linux/bug.h>
  15#include <linux/clk.h>
  16#include <linux/compiler.h>
  17#include <linux/debugfs.h>
  18#include <linux/delay.h>
  19#include <linux/device.h>
  20#include <linux/dma-mapping.h>
  21#include <linux/errno.h>
  22#include <linux/interrupt.h>
  23#include <linux/ioport.h>
  24#include <linux/kconfig.h>
  25#include <linux/kernel.h>
  26#include <linux/list.h>
  27#include <linux/module.h>
  28#include <linux/moduleparam.h>
  29#include <linux/platform_device.h>
  30#include <linux/sched.h>
  31#include <linux/seq_file.h>
  32#include <linux/slab.h>
  33#include <linux/timer.h>
 
  34#include <linux/usb/ch9.h>
  35#include <linux/usb/gadget.h>
  36#include <linux/workqueue.h>
  37
  38#include <bcm63xx_cpu.h>
  39#include <bcm63xx_iudma.h>
  40#include <bcm63xx_dev_usb_usbd.h>
  41#include <bcm63xx_io.h>
  42#include <bcm63xx_regs.h>
  43
  44#define DRV_MODULE_NAME		"bcm63xx_udc"
  45
  46static const char bcm63xx_ep0name[] = "ep0";
  47
  48static const struct {
  49	const char *name;
  50	const struct usb_ep_caps caps;
  51} bcm63xx_ep_info[] = {
  52#define EP_INFO(_name, _caps) \
  53	{ \
  54		.name = _name, \
  55		.caps = _caps, \
  56	}
  57
  58	EP_INFO(bcm63xx_ep0name,
  59		USB_EP_CAPS(USB_EP_CAPS_TYPE_CONTROL, USB_EP_CAPS_DIR_ALL)),
  60	EP_INFO("ep1in-bulk",
  61		USB_EP_CAPS(USB_EP_CAPS_TYPE_BULK, USB_EP_CAPS_DIR_IN)),
  62	EP_INFO("ep2out-bulk",
  63		USB_EP_CAPS(USB_EP_CAPS_TYPE_BULK, USB_EP_CAPS_DIR_OUT)),
  64	EP_INFO("ep3in-int",
  65		USB_EP_CAPS(USB_EP_CAPS_TYPE_INT, USB_EP_CAPS_DIR_IN)),
  66	EP_INFO("ep4out-int",
  67		USB_EP_CAPS(USB_EP_CAPS_TYPE_INT, USB_EP_CAPS_DIR_OUT)),
  68
  69#undef EP_INFO
  70};
  71
  72static bool use_fullspeed;
  73module_param(use_fullspeed, bool, S_IRUGO);
  74MODULE_PARM_DESC(use_fullspeed, "true for fullspeed only");
  75
  76/*
  77 * RX IRQ coalescing options:
  78 *
  79 * false (default) - one IRQ per DATAx packet.  Slow but reliable.  The
  80 * driver is able to pass the "testusb" suite and recover from conditions like:
  81 *
  82 *   1) Device queues up a 2048-byte RX IUDMA transaction on an OUT bulk ep
  83 *   2) Host sends 512 bytes of data
  84 *   3) Host decides to reconfigure the device and sends SET_INTERFACE
  85 *   4) Device shuts down the endpoint and cancels the RX transaction
  86 *
  87 * true - one IRQ per transfer, for transfers <= 2048B.  Generates
  88 * considerably fewer IRQs, but error recovery is less robust.  Does not
  89 * reliably pass "testusb".
  90 *
  91 * TX always uses coalescing, because we can cancel partially complete TX
  92 * transfers by repeatedly flushing the FIFO.  The hardware doesn't allow
  93 * this on RX.
  94 */
  95static bool irq_coalesce;
  96module_param(irq_coalesce, bool, S_IRUGO);
  97MODULE_PARM_DESC(irq_coalesce, "take one IRQ per RX transfer");
  98
  99#define BCM63XX_NUM_EP			5
 100#define BCM63XX_NUM_IUDMA		6
 101#define BCM63XX_NUM_FIFO_PAIRS		3
 102
 103#define IUDMA_RESET_TIMEOUT_US		10000
 104
 105#define IUDMA_EP0_RXCHAN		0
 106#define IUDMA_EP0_TXCHAN		1
 107
 108#define IUDMA_MAX_FRAGMENT		2048
 109#define BCM63XX_MAX_CTRL_PKT		64
 110
 111#define BCMEP_CTRL			0x00
 112#define BCMEP_ISOC			0x01
 113#define BCMEP_BULK			0x02
 114#define BCMEP_INTR			0x03
 115
 116#define BCMEP_OUT			0x00
 117#define BCMEP_IN			0x01
 118
 119#define BCM63XX_SPD_FULL		1
 120#define BCM63XX_SPD_HIGH		0
 121
 122#define IUDMA_DMAC_OFFSET		0x200
 123#define IUDMA_DMAS_OFFSET		0x400
 124
 125enum bcm63xx_ep0_state {
 126	EP0_REQUEUE,
 127	EP0_IDLE,
 128	EP0_IN_DATA_PHASE_SETUP,
 129	EP0_IN_DATA_PHASE_COMPLETE,
 130	EP0_OUT_DATA_PHASE_SETUP,
 131	EP0_OUT_DATA_PHASE_COMPLETE,
 132	EP0_OUT_STATUS_PHASE,
 133	EP0_IN_FAKE_STATUS_PHASE,
 134	EP0_SHUTDOWN,
 135};
 136
 137static const char __maybe_unused bcm63xx_ep0_state_names[][32] = {
 138	"REQUEUE",
 139	"IDLE",
 140	"IN_DATA_PHASE_SETUP",
 141	"IN_DATA_PHASE_COMPLETE",
 142	"OUT_DATA_PHASE_SETUP",
 143	"OUT_DATA_PHASE_COMPLETE",
 144	"OUT_STATUS_PHASE",
 145	"IN_FAKE_STATUS_PHASE",
 146	"SHUTDOWN",
 147};
 148
 149/**
 150 * struct iudma_ch_cfg - Static configuration for an IUDMA channel.
 151 * @ep_num: USB endpoint number.
 152 * @n_bds: Number of buffer descriptors in the ring.
 153 * @ep_type: Endpoint type (control, bulk, interrupt).
 154 * @dir: Direction (in, out).
 155 * @n_fifo_slots: Number of FIFO entries to allocate for this channel.
 156 * @max_pkt_hs: Maximum packet size in high speed mode.
 157 * @max_pkt_fs: Maximum packet size in full speed mode.
 158 */
 159struct iudma_ch_cfg {
 160	int				ep_num;
 161	int				n_bds;
 162	int				ep_type;
 163	int				dir;
 164	int				n_fifo_slots;
 165	int				max_pkt_hs;
 166	int				max_pkt_fs;
 167};
 168
 169static const struct iudma_ch_cfg iudma_defaults[] = {
 170
 171	/* This controller was designed to support a CDC/RNDIS application.
 172	   It may be possible to reconfigure some of the endpoints, but
 173	   the hardware limitations (FIFO sizing and number of DMA channels)
 174	   may significantly impact flexibility and/or stability.  Change
 175	   these values at your own risk.
 176
 177	      ep_num       ep_type           n_fifo_slots    max_pkt_fs
 178	idx      |  n_bds     |         dir       |  max_pkt_hs  |
 179	 |       |    |       |          |        |      |       |       */
 180	[0] = { -1,   4, BCMEP_CTRL, BCMEP_OUT,  32,    64,     64 },
 181	[1] = {  0,   4, BCMEP_CTRL, BCMEP_OUT,  32,    64,     64 },
 182	[2] = {  2,  16, BCMEP_BULK, BCMEP_OUT, 128,   512,     64 },
 183	[3] = {  1,  16, BCMEP_BULK, BCMEP_IN,  128,   512,     64 },
 184	[4] = {  4,   4, BCMEP_INTR, BCMEP_OUT,  32,    64,     64 },
 185	[5] = {  3,   4, BCMEP_INTR, BCMEP_IN,   32,    64,     64 },
 186};
 187
 188struct bcm63xx_udc;
 189
 190/**
 191 * struct iudma_ch - Represents the current state of a single IUDMA channel.
 192 * @ch_idx: IUDMA channel index (0 to BCM63XX_NUM_IUDMA-1).
 193 * @ep_num: USB endpoint number.  -1 for ep0 RX.
 194 * @enabled: Whether bcm63xx_ep_enable() has been called.
 195 * @max_pkt: "Chunk size" on the USB interface.  Based on interface speed.
 196 * @is_tx: true for TX, false for RX.
 197 * @bep: Pointer to the associated endpoint.  NULL for ep0 RX.
 198 * @udc: Reference to the device controller.
 199 * @read_bd: Next buffer descriptor to reap from the hardware.
 200 * @write_bd: Next BD available for a new packet.
 201 * @end_bd: Points to the final BD in the ring.
 202 * @n_bds_used: Number of BD entries currently occupied.
 203 * @bd_ring: Base pointer to the BD ring.
 204 * @bd_ring_dma: Physical (DMA) address of bd_ring.
 205 * @n_bds: Total number of BDs in the ring.
 206 *
 207 * ep0 has two IUDMA channels (IUDMA_EP0_RXCHAN and IUDMA_EP0_TXCHAN), as it is
 208 * bidirectional.  The "struct usb_ep" associated with ep0 is for TX (IN)
 209 * only.
 210 *
 211 * Each bulk/intr endpoint has a single IUDMA channel and a single
 212 * struct usb_ep.
 213 */
 214struct iudma_ch {
 215	unsigned int			ch_idx;
 216	int				ep_num;
 217	bool				enabled;
 218	int				max_pkt;
 219	bool				is_tx;
 220	struct bcm63xx_ep		*bep;
 221	struct bcm63xx_udc		*udc;
 222
 223	struct bcm_enet_desc		*read_bd;
 224	struct bcm_enet_desc		*write_bd;
 225	struct bcm_enet_desc		*end_bd;
 226	int				n_bds_used;
 227
 228	struct bcm_enet_desc		*bd_ring;
 229	dma_addr_t			bd_ring_dma;
 230	unsigned int			n_bds;
 231};
 232
 233/**
 234 * struct bcm63xx_ep - Internal (driver) state of a single endpoint.
 235 * @ep_num: USB endpoint number.
 236 * @iudma: Pointer to IUDMA channel state.
 237 * @ep: USB gadget layer representation of the EP.
 238 * @udc: Reference to the device controller.
 239 * @queue: Linked list of outstanding requests for this EP.
 240 * @halted: 1 if the EP is stalled; 0 otherwise.
 241 */
 242struct bcm63xx_ep {
 243	unsigned int			ep_num;
 244	struct iudma_ch			*iudma;
 245	struct usb_ep			ep;
 246	struct bcm63xx_udc		*udc;
 247	struct list_head		queue;
 248	unsigned			halted:1;
 249};
 250
 251/**
 252 * struct bcm63xx_req - Internal (driver) state of a single request.
 253 * @queue: Links back to the EP's request list.
 254 * @req: USB gadget layer representation of the request.
 255 * @offset: Current byte offset into the data buffer (next byte to queue).
 256 * @bd_bytes: Number of data bytes in outstanding BD entries.
 257 * @iudma: IUDMA channel used for the request.
 258 */
 259struct bcm63xx_req {
 260	struct list_head		queue;		/* ep's requests */
 261	struct usb_request		req;
 262	unsigned int			offset;
 263	unsigned int			bd_bytes;
 264	struct iudma_ch			*iudma;
 265};
 266
 267/**
 268 * struct bcm63xx_udc - Driver/hardware private context.
 269 * @lock: Spinlock to mediate access to this struct, and (most) HW regs.
 270 * @dev: Generic Linux device structure.
 271 * @pd: Platform data (board/port info).
 272 * @usbd_clk: Clock descriptor for the USB device block.
 273 * @usbh_clk: Clock descriptor for the USB host block.
 274 * @gadget: USB slave device.
 275 * @driver: Driver for USB slave devices.
 276 * @usbd_regs: Base address of the USBD/USB20D block.
 277 * @iudma_regs: Base address of the USBD's associated IUDMA block.
 278 * @bep: Array of endpoints, including ep0.
 279 * @iudma: Array of all IUDMA channels used by this controller.
 280 * @cfg: USB configuration number, from SET_CONFIGURATION wValue.
 281 * @iface: USB interface number, from SET_INTERFACE wIndex.
 282 * @alt_iface: USB alt interface number, from SET_INTERFACE wValue.
 283 * @ep0_ctrl_req: Request object for bcm63xx_udc-initiated ep0 transactions.
 284 * @ep0_ctrl_buf: Data buffer for ep0_ctrl_req.
 285 * @ep0state: Current state of the ep0 state machine.
 286 * @ep0_wq: Workqueue struct used to wake up the ep0 state machine.
 287 * @wedgemap: Bitmap of wedged endpoints.
 288 * @ep0_req_reset: USB reset is pending.
 289 * @ep0_req_set_cfg: Need to spoof a SET_CONFIGURATION packet.
 290 * @ep0_req_set_iface: Need to spoof a SET_INTERFACE packet.
 291 * @ep0_req_shutdown: Driver is shutting down; requesting ep0 to halt activity.
 292 * @ep0_req_completed: ep0 request has completed; worker has not seen it yet.
 293 * @ep0_reply: Pending reply from gadget driver.
 294 * @ep0_request: Outstanding ep0 request.
 295 * @debugfs_root: debugfs directory: /sys/kernel/debug/<DRV_MODULE_NAME>.
 296 * @debugfs_usbd: debugfs file "usbd" for controller state.
 297 * @debugfs_iudma: debugfs file "usbd" for IUDMA state.
 298 */
 299struct bcm63xx_udc {
 300	spinlock_t			lock;
 301
 302	struct device			*dev;
 303	struct bcm63xx_usbd_platform_data *pd;
 304	struct clk			*usbd_clk;
 305	struct clk			*usbh_clk;
 306
 307	struct usb_gadget		gadget;
 308	struct usb_gadget_driver	*driver;
 309
 310	void __iomem			*usbd_regs;
 311	void __iomem			*iudma_regs;
 312
 313	struct bcm63xx_ep		bep[BCM63XX_NUM_EP];
 314	struct iudma_ch			iudma[BCM63XX_NUM_IUDMA];
 315
 316	int				cfg;
 317	int				iface;
 318	int				alt_iface;
 319
 320	struct bcm63xx_req		ep0_ctrl_req;
 321	u8				*ep0_ctrl_buf;
 322
 323	int				ep0state;
 324	struct work_struct		ep0_wq;
 325
 326	unsigned long			wedgemap;
 327
 328	unsigned			ep0_req_reset:1;
 329	unsigned			ep0_req_set_cfg:1;
 330	unsigned			ep0_req_set_iface:1;
 331	unsigned			ep0_req_shutdown:1;
 332
 333	unsigned			ep0_req_completed:1;
 334	struct usb_request		*ep0_reply;
 335	struct usb_request		*ep0_request;
 336
 337	struct dentry			*debugfs_root;
 338	struct dentry			*debugfs_usbd;
 339	struct dentry			*debugfs_iudma;
 340};
 341
 342static const struct usb_ep_ops bcm63xx_udc_ep_ops;
 343
 344/***********************************************************************
 345 * Convenience functions
 346 ***********************************************************************/
 347
 348static inline struct bcm63xx_udc *gadget_to_udc(struct usb_gadget *g)
 349{
 350	return container_of(g, struct bcm63xx_udc, gadget);
 351}
 352
 353static inline struct bcm63xx_ep *our_ep(struct usb_ep *ep)
 354{
 355	return container_of(ep, struct bcm63xx_ep, ep);
 356}
 357
 358static inline struct bcm63xx_req *our_req(struct usb_request *req)
 359{
 360	return container_of(req, struct bcm63xx_req, req);
 361}
 362
 363static inline u32 usbd_readl(struct bcm63xx_udc *udc, u32 off)
 364{
 365	return bcm_readl(udc->usbd_regs + off);
 366}
 367
 368static inline void usbd_writel(struct bcm63xx_udc *udc, u32 val, u32 off)
 369{
 370	bcm_writel(val, udc->usbd_regs + off);
 371}
 372
 373static inline u32 usb_dma_readl(struct bcm63xx_udc *udc, u32 off)
 374{
 375	return bcm_readl(udc->iudma_regs + off);
 376}
 377
 378static inline void usb_dma_writel(struct bcm63xx_udc *udc, u32 val, u32 off)
 379{
 380	bcm_writel(val, udc->iudma_regs + off);
 381}
 382
 383static inline u32 usb_dmac_readl(struct bcm63xx_udc *udc, u32 off, int chan)
 384{
 385	return bcm_readl(udc->iudma_regs + IUDMA_DMAC_OFFSET + off +
 386			(ENETDMA_CHAN_WIDTH * chan));
 387}
 388
 389static inline void usb_dmac_writel(struct bcm63xx_udc *udc, u32 val, u32 off,
 390					int chan)
 391{
 392	bcm_writel(val, udc->iudma_regs + IUDMA_DMAC_OFFSET + off +
 393			(ENETDMA_CHAN_WIDTH * chan));
 394}
 395
 396static inline u32 usb_dmas_readl(struct bcm63xx_udc *udc, u32 off, int chan)
 397{
 398	return bcm_readl(udc->iudma_regs + IUDMA_DMAS_OFFSET + off +
 399			(ENETDMA_CHAN_WIDTH * chan));
 400}
 401
 402static inline void usb_dmas_writel(struct bcm63xx_udc *udc, u32 val, u32 off,
 403					int chan)
 404{
 405	bcm_writel(val, udc->iudma_regs + IUDMA_DMAS_OFFSET + off +
 406			(ENETDMA_CHAN_WIDTH * chan));
 407}
 408
 409static inline void set_clocks(struct bcm63xx_udc *udc, bool is_enabled)
 410{
 411	if (is_enabled) {
 412		clk_enable(udc->usbh_clk);
 413		clk_enable(udc->usbd_clk);
 414		udelay(10);
 415	} else {
 416		clk_disable(udc->usbd_clk);
 417		clk_disable(udc->usbh_clk);
 418	}
 419}
 420
 421/***********************************************************************
 422 * Low-level IUDMA / FIFO operations
 423 ***********************************************************************/
 424
 425/**
 426 * bcm63xx_ep_dma_select - Helper function to set up the init_sel signal.
 427 * @udc: Reference to the device controller.
 428 * @idx: Desired init_sel value.
 429 *
 430 * The "init_sel" signal is used as a selection index for both endpoints
 431 * and IUDMA channels.  Since these do not map 1:1, the use of this signal
 432 * depends on the context.
 433 */
 434static void bcm63xx_ep_dma_select(struct bcm63xx_udc *udc, int idx)
 435{
 436	u32 val = usbd_readl(udc, USBD_CONTROL_REG);
 437
 438	val &= ~USBD_CONTROL_INIT_SEL_MASK;
 439	val |= idx << USBD_CONTROL_INIT_SEL_SHIFT;
 440	usbd_writel(udc, val, USBD_CONTROL_REG);
 441}
 442
 443/**
 444 * bcm63xx_set_stall - Enable/disable stall on one endpoint.
 445 * @udc: Reference to the device controller.
 446 * @bep: Endpoint on which to operate.
 447 * @is_stalled: true to enable stall, false to disable.
 448 *
 449 * See notes in bcm63xx_update_wedge() regarding automatic clearing of
 450 * halt/stall conditions.
 451 */
 452static void bcm63xx_set_stall(struct bcm63xx_udc *udc, struct bcm63xx_ep *bep,
 453	bool is_stalled)
 454{
 455	u32 val;
 456
 457	val = USBD_STALL_UPDATE_MASK |
 458		(is_stalled ? USBD_STALL_ENABLE_MASK : 0) |
 459		(bep->ep_num << USBD_STALL_EPNUM_SHIFT);
 460	usbd_writel(udc, val, USBD_STALL_REG);
 461}
 462
 463/**
 464 * bcm63xx_fifo_setup - (Re)initialize FIFO boundaries and settings.
 465 * @udc: Reference to the device controller.
 466 *
 467 * These parameters depend on the USB link speed.  Settings are
 468 * per-IUDMA-channel-pair.
 469 */
 470static void bcm63xx_fifo_setup(struct bcm63xx_udc *udc)
 471{
 472	int is_hs = udc->gadget.speed == USB_SPEED_HIGH;
 473	u32 i, val, rx_fifo_slot, tx_fifo_slot;
 474
 475	/* set up FIFO boundaries and packet sizes; this is done in pairs */
 476	rx_fifo_slot = tx_fifo_slot = 0;
 477	for (i = 0; i < BCM63XX_NUM_IUDMA; i += 2) {
 478		const struct iudma_ch_cfg *rx_cfg = &iudma_defaults[i];
 479		const struct iudma_ch_cfg *tx_cfg = &iudma_defaults[i + 1];
 480
 481		bcm63xx_ep_dma_select(udc, i >> 1);
 482
 483		val = (rx_fifo_slot << USBD_RXFIFO_CONFIG_START_SHIFT) |
 484			((rx_fifo_slot + rx_cfg->n_fifo_slots - 1) <<
 485			 USBD_RXFIFO_CONFIG_END_SHIFT);
 486		rx_fifo_slot += rx_cfg->n_fifo_slots;
 487		usbd_writel(udc, val, USBD_RXFIFO_CONFIG_REG);
 488		usbd_writel(udc,
 489			    is_hs ? rx_cfg->max_pkt_hs : rx_cfg->max_pkt_fs,
 490			    USBD_RXFIFO_EPSIZE_REG);
 491
 492		val = (tx_fifo_slot << USBD_TXFIFO_CONFIG_START_SHIFT) |
 493			((tx_fifo_slot + tx_cfg->n_fifo_slots - 1) <<
 494			 USBD_TXFIFO_CONFIG_END_SHIFT);
 495		tx_fifo_slot += tx_cfg->n_fifo_slots;
 496		usbd_writel(udc, val, USBD_TXFIFO_CONFIG_REG);
 497		usbd_writel(udc,
 498			    is_hs ? tx_cfg->max_pkt_hs : tx_cfg->max_pkt_fs,
 499			    USBD_TXFIFO_EPSIZE_REG);
 500
 501		usbd_readl(udc, USBD_TXFIFO_EPSIZE_REG);
 502	}
 503}
 504
 505/**
 506 * bcm63xx_fifo_reset_ep - Flush a single endpoint's FIFO.
 507 * @udc: Reference to the device controller.
 508 * @ep_num: Endpoint number.
 509 */
 510static void bcm63xx_fifo_reset_ep(struct bcm63xx_udc *udc, int ep_num)
 511{
 512	u32 val;
 513
 514	bcm63xx_ep_dma_select(udc, ep_num);
 515
 516	val = usbd_readl(udc, USBD_CONTROL_REG);
 517	val |= USBD_CONTROL_FIFO_RESET_MASK;
 518	usbd_writel(udc, val, USBD_CONTROL_REG);
 519	usbd_readl(udc, USBD_CONTROL_REG);
 520}
 521
 522/**
 523 * bcm63xx_fifo_reset - Flush all hardware FIFOs.
 524 * @udc: Reference to the device controller.
 525 */
 526static void bcm63xx_fifo_reset(struct bcm63xx_udc *udc)
 527{
 528	int i;
 529
 530	for (i = 0; i < BCM63XX_NUM_FIFO_PAIRS; i++)
 531		bcm63xx_fifo_reset_ep(udc, i);
 532}
 533
 534/**
 535 * bcm63xx_ep_init - Initial (one-time) endpoint initialization.
 536 * @udc: Reference to the device controller.
 537 */
 538static void bcm63xx_ep_init(struct bcm63xx_udc *udc)
 539{
 540	u32 i, val;
 541
 542	for (i = 0; i < BCM63XX_NUM_IUDMA; i++) {
 543		const struct iudma_ch_cfg *cfg = &iudma_defaults[i];
 544
 545		if (cfg->ep_num < 0)
 546			continue;
 547
 548		bcm63xx_ep_dma_select(udc, cfg->ep_num);
 549		val = (cfg->ep_type << USBD_EPNUM_TYPEMAP_TYPE_SHIFT) |
 550			((i >> 1) << USBD_EPNUM_TYPEMAP_DMA_CH_SHIFT);
 551		usbd_writel(udc, val, USBD_EPNUM_TYPEMAP_REG);
 552	}
 553}
 554
 555/**
 556 * bcm63xx_ep_setup - Configure per-endpoint settings.
 557 * @udc: Reference to the device controller.
 558 *
 559 * This needs to be rerun if the speed/cfg/intf/altintf changes.
 560 */
 561static void bcm63xx_ep_setup(struct bcm63xx_udc *udc)
 562{
 563	u32 val, i;
 564
 565	usbd_writel(udc, USBD_CSR_SETUPADDR_DEF, USBD_CSR_SETUPADDR_REG);
 566
 567	for (i = 0; i < BCM63XX_NUM_IUDMA; i++) {
 568		const struct iudma_ch_cfg *cfg = &iudma_defaults[i];
 569		int max_pkt = udc->gadget.speed == USB_SPEED_HIGH ?
 570			      cfg->max_pkt_hs : cfg->max_pkt_fs;
 571		int idx = cfg->ep_num;
 572
 573		udc->iudma[i].max_pkt = max_pkt;
 574
 575		if (idx < 0)
 576			continue;
 577		usb_ep_set_maxpacket_limit(&udc->bep[idx].ep, max_pkt);
 578
 579		val = (idx << USBD_CSR_EP_LOG_SHIFT) |
 580		      (cfg->dir << USBD_CSR_EP_DIR_SHIFT) |
 581		      (cfg->ep_type << USBD_CSR_EP_TYPE_SHIFT) |
 582		      (udc->cfg << USBD_CSR_EP_CFG_SHIFT) |
 583		      (udc->iface << USBD_CSR_EP_IFACE_SHIFT) |
 584		      (udc->alt_iface << USBD_CSR_EP_ALTIFACE_SHIFT) |
 585		      (max_pkt << USBD_CSR_EP_MAXPKT_SHIFT);
 586		usbd_writel(udc, val, USBD_CSR_EP_REG(idx));
 587	}
 588}
 589
 590/**
 591 * iudma_write - Queue a single IUDMA transaction.
 592 * @udc: Reference to the device controller.
 593 * @iudma: IUDMA channel to use.
 594 * @breq: Request containing the transaction data.
 595 *
 596 * For RX IUDMA, this will queue a single buffer descriptor, as RX IUDMA
 597 * does not honor SOP/EOP so the handling of multiple buffers is ambiguous.
 598 * So iudma_write() may be called several times to fulfill a single
 599 * usb_request.
 600 *
 601 * For TX IUDMA, this can queue multiple buffer descriptors if needed.
 602 */
 603static void iudma_write(struct bcm63xx_udc *udc, struct iudma_ch *iudma,
 604	struct bcm63xx_req *breq)
 605{
 606	int first_bd = 1, last_bd = 0, extra_zero_pkt = 0;
 607	unsigned int bytes_left = breq->req.length - breq->offset;
 608	const int max_bd_bytes = !irq_coalesce && !iudma->is_tx ?
 609		iudma->max_pkt : IUDMA_MAX_FRAGMENT;
 610
 611	iudma->n_bds_used = 0;
 612	breq->bd_bytes = 0;
 613	breq->iudma = iudma;
 614
 615	if ((bytes_left % iudma->max_pkt == 0) && bytes_left && breq->req.zero)
 616		extra_zero_pkt = 1;
 617
 618	do {
 619		struct bcm_enet_desc *d = iudma->write_bd;
 620		u32 dmaflags = 0;
 621		unsigned int n_bytes;
 622
 623		if (d == iudma->end_bd) {
 624			dmaflags |= DMADESC_WRAP_MASK;
 625			iudma->write_bd = iudma->bd_ring;
 626		} else {
 627			iudma->write_bd++;
 628		}
 629		iudma->n_bds_used++;
 630
 631		n_bytes = min_t(int, bytes_left, max_bd_bytes);
 632		if (n_bytes)
 633			dmaflags |= n_bytes << DMADESC_LENGTH_SHIFT;
 634		else
 635			dmaflags |= (1 << DMADESC_LENGTH_SHIFT) |
 636				    DMADESC_USB_ZERO_MASK;
 637
 638		dmaflags |= DMADESC_OWNER_MASK;
 639		if (first_bd) {
 640			dmaflags |= DMADESC_SOP_MASK;
 641			first_bd = 0;
 642		}
 643
 644		/*
 645		 * extra_zero_pkt forces one more iteration through the loop
 646		 * after all data is queued up, to send the zero packet
 647		 */
 648		if (extra_zero_pkt && !bytes_left)
 649			extra_zero_pkt = 0;
 650
 651		if (!iudma->is_tx || iudma->n_bds_used == iudma->n_bds ||
 652		    (n_bytes == bytes_left && !extra_zero_pkt)) {
 653			last_bd = 1;
 654			dmaflags |= DMADESC_EOP_MASK;
 655		}
 656
 657		d->address = breq->req.dma + breq->offset;
 658		mb();
 659		d->len_stat = dmaflags;
 660
 661		breq->offset += n_bytes;
 662		breq->bd_bytes += n_bytes;
 663		bytes_left -= n_bytes;
 664	} while (!last_bd);
 665
 666	usb_dmac_writel(udc, ENETDMAC_CHANCFG_EN_MASK,
 667			ENETDMAC_CHANCFG_REG, iudma->ch_idx);
 668}
 669
 670/**
 671 * iudma_read - Check for IUDMA buffer completion.
 672 * @udc: Reference to the device controller.
 673 * @iudma: IUDMA channel to use.
 674 *
 675 * This checks to see if ALL of the outstanding BDs on the DMA channel
 676 * have been filled.  If so, it returns the actual transfer length;
 677 * otherwise it returns -EBUSY.
 678 */
 679static int iudma_read(struct bcm63xx_udc *udc, struct iudma_ch *iudma)
 680{
 681	int i, actual_len = 0;
 682	struct bcm_enet_desc *d = iudma->read_bd;
 683
 684	if (!iudma->n_bds_used)
 685		return -EINVAL;
 686
 687	for (i = 0; i < iudma->n_bds_used; i++) {
 688		u32 dmaflags;
 689
 690		dmaflags = d->len_stat;
 691
 692		if (dmaflags & DMADESC_OWNER_MASK)
 693			return -EBUSY;
 694
 695		actual_len += (dmaflags & DMADESC_LENGTH_MASK) >>
 696			      DMADESC_LENGTH_SHIFT;
 697		if (d == iudma->end_bd)
 698			d = iudma->bd_ring;
 699		else
 700			d++;
 701	}
 702
 703	iudma->read_bd = d;
 704	iudma->n_bds_used = 0;
 705	return actual_len;
 706}
 707
 708/**
 709 * iudma_reset_channel - Stop DMA on a single channel.
 710 * @udc: Reference to the device controller.
 711 * @iudma: IUDMA channel to reset.
 712 */
 713static void iudma_reset_channel(struct bcm63xx_udc *udc, struct iudma_ch *iudma)
 714{
 715	int timeout = IUDMA_RESET_TIMEOUT_US;
 716	struct bcm_enet_desc *d;
 717	int ch_idx = iudma->ch_idx;
 718
 719	if (!iudma->is_tx)
 720		bcm63xx_fifo_reset_ep(udc, max(0, iudma->ep_num));
 721
 722	/* stop DMA, then wait for the hardware to wrap up */
 723	usb_dmac_writel(udc, 0, ENETDMAC_CHANCFG_REG, ch_idx);
 724
 725	while (usb_dmac_readl(udc, ENETDMAC_CHANCFG_REG, ch_idx) &
 726				   ENETDMAC_CHANCFG_EN_MASK) {
 727		udelay(1);
 728
 729		/* repeatedly flush the FIFO data until the BD completes */
 730		if (iudma->is_tx && iudma->ep_num >= 0)
 731			bcm63xx_fifo_reset_ep(udc, iudma->ep_num);
 732
 733		if (!timeout--) {
 734			dev_err(udc->dev, "can't reset IUDMA channel %d\n",
 735				ch_idx);
 736			break;
 737		}
 738		if (timeout == IUDMA_RESET_TIMEOUT_US / 2) {
 739			dev_warn(udc->dev, "forcibly halting IUDMA channel %d\n",
 740				 ch_idx);
 741			usb_dmac_writel(udc, ENETDMAC_CHANCFG_BUFHALT_MASK,
 742					ENETDMAC_CHANCFG_REG, ch_idx);
 743		}
 744	}
 745	usb_dmac_writel(udc, ~0, ENETDMAC_IR_REG, ch_idx);
 746
 747	/* don't leave "live" HW-owned entries for the next guy to step on */
 748	for (d = iudma->bd_ring; d <= iudma->end_bd; d++)
 749		d->len_stat = 0;
 750	mb();
 751
 752	iudma->read_bd = iudma->write_bd = iudma->bd_ring;
 753	iudma->n_bds_used = 0;
 754
 755	/* set up IRQs, UBUS burst size, and BD base for this channel */
 756	usb_dmac_writel(udc, ENETDMAC_IR_BUFDONE_MASK,
 757			ENETDMAC_IRMASK_REG, ch_idx);
 758	usb_dmac_writel(udc, 8, ENETDMAC_MAXBURST_REG, ch_idx);
 759
 760	usb_dmas_writel(udc, iudma->bd_ring_dma, ENETDMAS_RSTART_REG, ch_idx);
 761	usb_dmas_writel(udc, 0, ENETDMAS_SRAM2_REG, ch_idx);
 762}
 763
 764/**
 765 * iudma_init_channel - One-time IUDMA channel initialization.
 766 * @udc: Reference to the device controller.
 767 * @ch_idx: Channel to initialize.
 768 */
 769static int iudma_init_channel(struct bcm63xx_udc *udc, unsigned int ch_idx)
 770{
 771	struct iudma_ch *iudma = &udc->iudma[ch_idx];
 772	const struct iudma_ch_cfg *cfg = &iudma_defaults[ch_idx];
 773	unsigned int n_bds = cfg->n_bds;
 774	struct bcm63xx_ep *bep = NULL;
 775
 776	iudma->ep_num = cfg->ep_num;
 777	iudma->ch_idx = ch_idx;
 778	iudma->is_tx = !!(ch_idx & 0x01);
 779	if (iudma->ep_num >= 0) {
 780		bep = &udc->bep[iudma->ep_num];
 781		bep->iudma = iudma;
 782		INIT_LIST_HEAD(&bep->queue);
 783	}
 784
 785	iudma->bep = bep;
 786	iudma->udc = udc;
 787
 788	/* ep0 is always active; others are controlled by the gadget driver */
 789	if (iudma->ep_num <= 0)
 790		iudma->enabled = true;
 791
 792	iudma->n_bds = n_bds;
 793	iudma->bd_ring = dmam_alloc_coherent(udc->dev,
 794		n_bds * sizeof(struct bcm_enet_desc),
 795		&iudma->bd_ring_dma, GFP_KERNEL);
 796	if (!iudma->bd_ring)
 797		return -ENOMEM;
 798	iudma->end_bd = &iudma->bd_ring[n_bds - 1];
 799
 800	return 0;
 801}
 802
 803/**
 804 * iudma_init - One-time initialization of all IUDMA channels.
 805 * @udc: Reference to the device controller.
 806 *
 807 * Enable DMA, flush channels, and enable global IUDMA IRQs.
 808 */
 809static int iudma_init(struct bcm63xx_udc *udc)
 810{
 811	int i, rc;
 812
 813	usb_dma_writel(udc, ENETDMA_CFG_EN_MASK, ENETDMA_CFG_REG);
 814
 815	for (i = 0; i < BCM63XX_NUM_IUDMA; i++) {
 816		rc = iudma_init_channel(udc, i);
 817		if (rc)
 818			return rc;
 819		iudma_reset_channel(udc, &udc->iudma[i]);
 820	}
 821
 822	usb_dma_writel(udc, BIT(BCM63XX_NUM_IUDMA)-1, ENETDMA_GLB_IRQMASK_REG);
 823	return 0;
 824}
 825
 826/**
 827 * iudma_uninit - Uninitialize IUDMA channels.
 828 * @udc: Reference to the device controller.
 829 *
 830 * Kill global IUDMA IRQs, flush channels, and kill DMA.
 831 */
 832static void iudma_uninit(struct bcm63xx_udc *udc)
 833{
 834	int i;
 835
 836	usb_dma_writel(udc, 0, ENETDMA_GLB_IRQMASK_REG);
 837
 838	for (i = 0; i < BCM63XX_NUM_IUDMA; i++)
 839		iudma_reset_channel(udc, &udc->iudma[i]);
 840
 841	usb_dma_writel(udc, 0, ENETDMA_CFG_REG);
 842}
 843
 844/***********************************************************************
 845 * Other low-level USBD operations
 846 ***********************************************************************/
 847
 848/**
 849 * bcm63xx_set_ctrl_irqs - Mask/unmask control path interrupts.
 850 * @udc: Reference to the device controller.
 851 * @enable_irqs: true to enable, false to disable.
 852 */
 853static void bcm63xx_set_ctrl_irqs(struct bcm63xx_udc *udc, bool enable_irqs)
 854{
 855	u32 val;
 856
 857	usbd_writel(udc, 0, USBD_STATUS_REG);
 858
 859	val = BIT(USBD_EVENT_IRQ_USB_RESET) |
 860	      BIT(USBD_EVENT_IRQ_SETUP) |
 861	      BIT(USBD_EVENT_IRQ_SETCFG) |
 862	      BIT(USBD_EVENT_IRQ_SETINTF) |
 863	      BIT(USBD_EVENT_IRQ_USB_LINK);
 864	usbd_writel(udc, enable_irqs ? val : 0, USBD_EVENT_IRQ_MASK_REG);
 865	usbd_writel(udc, val, USBD_EVENT_IRQ_STATUS_REG);
 866}
 867
 868/**
 869 * bcm63xx_select_phy_mode - Select between USB device and host mode.
 870 * @udc: Reference to the device controller.
 871 * @is_device: true for device, false for host.
 872 *
 873 * This should probably be reworked to use the drivers/usb/otg
 874 * infrastructure.
 875 *
 876 * By default, the AFE/pullups are disabled in device mode, until
 877 * bcm63xx_select_pullup() is called.
 878 */
 879static void bcm63xx_select_phy_mode(struct bcm63xx_udc *udc, bool is_device)
 880{
 881	u32 val, portmask = BIT(udc->pd->port_no);
 882
 883	if (BCMCPU_IS_6328()) {
 884		/* configure pinmux to sense VBUS signal */
 885		val = bcm_gpio_readl(GPIO_PINMUX_OTHR_REG);
 886		val &= ~GPIO_PINMUX_OTHR_6328_USB_MASK;
 887		val |= is_device ? GPIO_PINMUX_OTHR_6328_USB_DEV :
 888			       GPIO_PINMUX_OTHR_6328_USB_HOST;
 889		bcm_gpio_writel(val, GPIO_PINMUX_OTHR_REG);
 890	}
 891
 892	val = bcm_rset_readl(RSET_USBH_PRIV, USBH_PRIV_UTMI_CTL_6368_REG);
 893	if (is_device) {
 894		val |= (portmask << USBH_PRIV_UTMI_CTL_HOSTB_SHIFT);
 895		val |= (portmask << USBH_PRIV_UTMI_CTL_NODRIV_SHIFT);
 896	} else {
 897		val &= ~(portmask << USBH_PRIV_UTMI_CTL_HOSTB_SHIFT);
 898		val &= ~(portmask << USBH_PRIV_UTMI_CTL_NODRIV_SHIFT);
 899	}
 900	bcm_rset_writel(RSET_USBH_PRIV, val, USBH_PRIV_UTMI_CTL_6368_REG);
 901
 902	val = bcm_rset_readl(RSET_USBH_PRIV, USBH_PRIV_SWAP_6368_REG);
 903	if (is_device)
 904		val |= USBH_PRIV_SWAP_USBD_MASK;
 905	else
 906		val &= ~USBH_PRIV_SWAP_USBD_MASK;
 907	bcm_rset_writel(RSET_USBH_PRIV, val, USBH_PRIV_SWAP_6368_REG);
 908}
 909
 910/**
 911 * bcm63xx_select_pullup - Enable/disable the pullup on D+
 912 * @udc: Reference to the device controller.
 913 * @is_on: true to enable the pullup, false to disable.
 914 *
 915 * If the pullup is active, the host will sense a FS/HS device connected to
 916 * the port.  If the pullup is inactive, the host will think the USB
 917 * device has been disconnected.
 918 */
 919static void bcm63xx_select_pullup(struct bcm63xx_udc *udc, bool is_on)
 920{
 921	u32 val, portmask = BIT(udc->pd->port_no);
 922
 923	val = bcm_rset_readl(RSET_USBH_PRIV, USBH_PRIV_UTMI_CTL_6368_REG);
 924	if (is_on)
 925		val &= ~(portmask << USBH_PRIV_UTMI_CTL_NODRIV_SHIFT);
 926	else
 927		val |= (portmask << USBH_PRIV_UTMI_CTL_NODRIV_SHIFT);
 928	bcm_rset_writel(RSET_USBH_PRIV, val, USBH_PRIV_UTMI_CTL_6368_REG);
 929}
 930
 931/**
 932 * bcm63xx_uninit_udc_hw - Shut down the hardware prior to driver removal.
 933 * @udc: Reference to the device controller.
 934 *
 935 * This just masks the IUDMA IRQs and releases the clocks.  It is assumed
 936 * that bcm63xx_udc_stop() has already run, and the clocks are stopped.
 937 */
 938static void bcm63xx_uninit_udc_hw(struct bcm63xx_udc *udc)
 939{
 940	set_clocks(udc, true);
 941	iudma_uninit(udc);
 942	set_clocks(udc, false);
 943
 944	clk_put(udc->usbd_clk);
 945	clk_put(udc->usbh_clk);
 946}
 947
 948/**
 949 * bcm63xx_init_udc_hw - Initialize the controller hardware and data structures.
 950 * @udc: Reference to the device controller.
 951 */
 952static int bcm63xx_init_udc_hw(struct bcm63xx_udc *udc)
 953{
 954	int i, rc = 0;
 955	u32 val;
 956
 957	udc->ep0_ctrl_buf = devm_kzalloc(udc->dev, BCM63XX_MAX_CTRL_PKT,
 958					 GFP_KERNEL);
 959	if (!udc->ep0_ctrl_buf)
 960		return -ENOMEM;
 961
 962	INIT_LIST_HEAD(&udc->gadget.ep_list);
 963	for (i = 0; i < BCM63XX_NUM_EP; i++) {
 964		struct bcm63xx_ep *bep = &udc->bep[i];
 965
 966		bep->ep.name = bcm63xx_ep_info[i].name;
 967		bep->ep.caps = bcm63xx_ep_info[i].caps;
 968		bep->ep_num = i;
 969		bep->ep.ops = &bcm63xx_udc_ep_ops;
 970		list_add_tail(&bep->ep.ep_list, &udc->gadget.ep_list);
 971		bep->halted = 0;
 972		usb_ep_set_maxpacket_limit(&bep->ep, BCM63XX_MAX_CTRL_PKT);
 973		bep->udc = udc;
 974		bep->ep.desc = NULL;
 975		INIT_LIST_HEAD(&bep->queue);
 976	}
 977
 978	udc->gadget.ep0 = &udc->bep[0].ep;
 979	list_del(&udc->bep[0].ep.ep_list);
 980
 981	udc->gadget.speed = USB_SPEED_UNKNOWN;
 982	udc->ep0state = EP0_SHUTDOWN;
 983
 984	udc->usbh_clk = clk_get(udc->dev, "usbh");
 985	if (IS_ERR(udc->usbh_clk))
 986		return -EIO;
 987
 988	udc->usbd_clk = clk_get(udc->dev, "usbd");
 989	if (IS_ERR(udc->usbd_clk)) {
 990		clk_put(udc->usbh_clk);
 991		return -EIO;
 992	}
 993
 994	set_clocks(udc, true);
 995
 996	val = USBD_CONTROL_AUTO_CSRS_MASK |
 997	      USBD_CONTROL_DONE_CSRS_MASK |
 998	      (irq_coalesce ? USBD_CONTROL_RXZSCFG_MASK : 0);
 999	usbd_writel(udc, val, USBD_CONTROL_REG);
1000
1001	val = USBD_STRAPS_APP_SELF_PWR_MASK |
1002	      USBD_STRAPS_APP_RAM_IF_MASK |
1003	      USBD_STRAPS_APP_CSRPRGSUP_MASK |
1004	      USBD_STRAPS_APP_8BITPHY_MASK |
1005	      USBD_STRAPS_APP_RMTWKUP_MASK;
1006
1007	if (udc->gadget.max_speed == USB_SPEED_HIGH)
1008		val |= (BCM63XX_SPD_HIGH << USBD_STRAPS_SPEED_SHIFT);
1009	else
1010		val |= (BCM63XX_SPD_FULL << USBD_STRAPS_SPEED_SHIFT);
1011	usbd_writel(udc, val, USBD_STRAPS_REG);
1012
1013	bcm63xx_set_ctrl_irqs(udc, false);
1014
1015	usbd_writel(udc, 0, USBD_EVENT_IRQ_CFG_LO_REG);
1016
1017	val = USBD_EVENT_IRQ_CFG_FALLING(USBD_EVENT_IRQ_ENUM_ON) |
1018	      USBD_EVENT_IRQ_CFG_FALLING(USBD_EVENT_IRQ_SET_CSRS);
1019	usbd_writel(udc, val, USBD_EVENT_IRQ_CFG_HI_REG);
1020
1021	rc = iudma_init(udc);
1022	set_clocks(udc, false);
1023	if (rc)
1024		bcm63xx_uninit_udc_hw(udc);
1025
1026	return 0;
1027}
1028
1029/***********************************************************************
1030 * Standard EP gadget operations
1031 ***********************************************************************/
1032
1033/**
1034 * bcm63xx_ep_enable - Enable one endpoint.
1035 * @ep: Endpoint to enable.
1036 * @desc: Contains max packet, direction, etc.
1037 *
1038 * Most of the endpoint parameters are fixed in this controller, so there
1039 * isn't much for this function to do.
1040 */
1041static int bcm63xx_ep_enable(struct usb_ep *ep,
1042	const struct usb_endpoint_descriptor *desc)
1043{
1044	struct bcm63xx_ep *bep = our_ep(ep);
1045	struct bcm63xx_udc *udc = bep->udc;
1046	struct iudma_ch *iudma = bep->iudma;
1047	unsigned long flags;
1048
1049	if (!ep || !desc || ep->name == bcm63xx_ep0name)
1050		return -EINVAL;
1051
1052	if (!udc->driver)
1053		return -ESHUTDOWN;
1054
1055	spin_lock_irqsave(&udc->lock, flags);
1056	if (iudma->enabled) {
1057		spin_unlock_irqrestore(&udc->lock, flags);
1058		return -EINVAL;
1059	}
1060
1061	iudma->enabled = true;
1062	BUG_ON(!list_empty(&bep->queue));
1063
1064	iudma_reset_channel(udc, iudma);
1065
1066	bep->halted = 0;
1067	bcm63xx_set_stall(udc, bep, false);
1068	clear_bit(bep->ep_num, &udc->wedgemap);
1069
1070	ep->desc = desc;
1071	ep->maxpacket = usb_endpoint_maxp(desc);
1072
1073	spin_unlock_irqrestore(&udc->lock, flags);
1074	return 0;
1075}
1076
1077/**
1078 * bcm63xx_ep_disable - Disable one endpoint.
1079 * @ep: Endpoint to disable.
1080 */
1081static int bcm63xx_ep_disable(struct usb_ep *ep)
1082{
1083	struct bcm63xx_ep *bep = our_ep(ep);
1084	struct bcm63xx_udc *udc = bep->udc;
1085	struct iudma_ch *iudma = bep->iudma;
1086	struct bcm63xx_req *breq, *n;
1087	unsigned long flags;
1088
1089	if (!ep || !ep->desc)
1090		return -EINVAL;
1091
1092	spin_lock_irqsave(&udc->lock, flags);
1093	if (!iudma->enabled) {
1094		spin_unlock_irqrestore(&udc->lock, flags);
1095		return -EINVAL;
1096	}
1097	iudma->enabled = false;
1098
1099	iudma_reset_channel(udc, iudma);
1100
1101	if (!list_empty(&bep->queue)) {
1102		list_for_each_entry_safe(breq, n, &bep->queue, queue) {
1103			usb_gadget_unmap_request(&udc->gadget, &breq->req,
1104						 iudma->is_tx);
1105			list_del(&breq->queue);
1106			breq->req.status = -ESHUTDOWN;
1107
1108			spin_unlock_irqrestore(&udc->lock, flags);
1109			usb_gadget_giveback_request(&iudma->bep->ep, &breq->req);
1110			spin_lock_irqsave(&udc->lock, flags);
1111		}
1112	}
1113	ep->desc = NULL;
1114
1115	spin_unlock_irqrestore(&udc->lock, flags);
1116	return 0;
1117}
1118
1119/**
1120 * bcm63xx_udc_alloc_request - Allocate a new request.
1121 * @ep: Endpoint associated with the request.
1122 * @mem_flags: Flags to pass to kzalloc().
1123 */
1124static struct usb_request *bcm63xx_udc_alloc_request(struct usb_ep *ep,
1125	gfp_t mem_flags)
1126{
1127	struct bcm63xx_req *breq;
1128
1129	breq = kzalloc(sizeof(*breq), mem_flags);
1130	if (!breq)
1131		return NULL;
1132	return &breq->req;
1133}
1134
1135/**
1136 * bcm63xx_udc_free_request - Free a request.
1137 * @ep: Endpoint associated with the request.
1138 * @req: Request to free.
1139 */
1140static void bcm63xx_udc_free_request(struct usb_ep *ep,
1141	struct usb_request *req)
1142{
1143	struct bcm63xx_req *breq = our_req(req);
1144	kfree(breq);
1145}
1146
1147/**
1148 * bcm63xx_udc_queue - Queue up a new request.
1149 * @ep: Endpoint associated with the request.
1150 * @req: Request to add.
1151 * @mem_flags: Unused.
1152 *
1153 * If the queue is empty, start this request immediately.  Otherwise, add
1154 * it to the list.
1155 *
1156 * ep0 replies are sent through this function from the gadget driver, but
1157 * they are treated differently because they need to be handled by the ep0
1158 * state machine.  (Sometimes they are replies to control requests that
1159 * were spoofed by this driver, and so they shouldn't be transmitted at all.)
1160 */
1161static int bcm63xx_udc_queue(struct usb_ep *ep, struct usb_request *req,
1162	gfp_t mem_flags)
1163{
1164	struct bcm63xx_ep *bep = our_ep(ep);
1165	struct bcm63xx_udc *udc = bep->udc;
1166	struct bcm63xx_req *breq = our_req(req);
1167	unsigned long flags;
1168	int rc = 0;
1169
1170	if (unlikely(!req || !req->complete || !req->buf || !ep))
1171		return -EINVAL;
1172
1173	req->actual = 0;
1174	req->status = 0;
1175	breq->offset = 0;
1176
1177	if (bep == &udc->bep[0]) {
1178		/* only one reply per request, please */
1179		if (udc->ep0_reply)
1180			return -EINVAL;
1181
1182		udc->ep0_reply = req;
1183		schedule_work(&udc->ep0_wq);
1184		return 0;
1185	}
1186
1187	spin_lock_irqsave(&udc->lock, flags);
1188	if (!bep->iudma->enabled) {
1189		rc = -ESHUTDOWN;
1190		goto out;
1191	}
1192
1193	rc = usb_gadget_map_request(&udc->gadget, req, bep->iudma->is_tx);
1194	if (rc == 0) {
1195		list_add_tail(&breq->queue, &bep->queue);
1196		if (list_is_singular(&bep->queue))
1197			iudma_write(udc, bep->iudma, breq);
1198	}
1199
1200out:
1201	spin_unlock_irqrestore(&udc->lock, flags);
1202	return rc;
1203}
1204
1205/**
1206 * bcm63xx_udc_dequeue - Remove a pending request from the queue.
1207 * @ep: Endpoint associated with the request.
1208 * @req: Request to remove.
1209 *
1210 * If the request is not at the head of the queue, this is easy - just nuke
1211 * it.  If the request is at the head of the queue, we'll need to stop the
1212 * DMA transaction and then queue up the successor.
1213 */
1214static int bcm63xx_udc_dequeue(struct usb_ep *ep, struct usb_request *req)
1215{
1216	struct bcm63xx_ep *bep = our_ep(ep);
1217	struct bcm63xx_udc *udc = bep->udc;
1218	struct bcm63xx_req *breq = our_req(req), *cur;
1219	unsigned long flags;
1220	int rc = 0;
1221
1222	spin_lock_irqsave(&udc->lock, flags);
1223	if (list_empty(&bep->queue)) {
1224		rc = -EINVAL;
1225		goto out;
1226	}
1227
1228	cur = list_first_entry(&bep->queue, struct bcm63xx_req, queue);
1229	usb_gadget_unmap_request(&udc->gadget, &breq->req, bep->iudma->is_tx);
1230
1231	if (breq == cur) {
1232		iudma_reset_channel(udc, bep->iudma);
1233		list_del(&breq->queue);
1234
1235		if (!list_empty(&bep->queue)) {
1236			struct bcm63xx_req *next;
1237
1238			next = list_first_entry(&bep->queue,
1239				struct bcm63xx_req, queue);
1240			iudma_write(udc, bep->iudma, next);
1241		}
1242	} else {
1243		list_del(&breq->queue);
1244	}
1245
1246out:
1247	spin_unlock_irqrestore(&udc->lock, flags);
1248
1249	req->status = -ESHUTDOWN;
1250	req->complete(ep, req);
1251
1252	return rc;
1253}
1254
1255/**
1256 * bcm63xx_udc_set_halt - Enable/disable STALL flag in the hardware.
1257 * @ep: Endpoint to halt.
1258 * @value: Zero to clear halt; nonzero to set halt.
1259 *
1260 * See comments in bcm63xx_update_wedge().
1261 */
1262static int bcm63xx_udc_set_halt(struct usb_ep *ep, int value)
1263{
1264	struct bcm63xx_ep *bep = our_ep(ep);
1265	struct bcm63xx_udc *udc = bep->udc;
1266	unsigned long flags;
1267
1268	spin_lock_irqsave(&udc->lock, flags);
1269	bcm63xx_set_stall(udc, bep, !!value);
1270	bep->halted = value;
1271	spin_unlock_irqrestore(&udc->lock, flags);
1272
1273	return 0;
1274}
1275
1276/**
1277 * bcm63xx_udc_set_wedge - Stall the endpoint until the next reset.
1278 * @ep: Endpoint to wedge.
1279 *
1280 * See comments in bcm63xx_update_wedge().
1281 */
1282static int bcm63xx_udc_set_wedge(struct usb_ep *ep)
1283{
1284	struct bcm63xx_ep *bep = our_ep(ep);
1285	struct bcm63xx_udc *udc = bep->udc;
1286	unsigned long flags;
1287
1288	spin_lock_irqsave(&udc->lock, flags);
1289	set_bit(bep->ep_num, &udc->wedgemap);
1290	bcm63xx_set_stall(udc, bep, true);
1291	spin_unlock_irqrestore(&udc->lock, flags);
1292
1293	return 0;
1294}
1295
1296static const struct usb_ep_ops bcm63xx_udc_ep_ops = {
1297	.enable		= bcm63xx_ep_enable,
1298	.disable	= bcm63xx_ep_disable,
1299
1300	.alloc_request	= bcm63xx_udc_alloc_request,
1301	.free_request	= bcm63xx_udc_free_request,
1302
1303	.queue		= bcm63xx_udc_queue,
1304	.dequeue	= bcm63xx_udc_dequeue,
1305
1306	.set_halt	= bcm63xx_udc_set_halt,
1307	.set_wedge	= bcm63xx_udc_set_wedge,
1308};
1309
1310/***********************************************************************
1311 * EP0 handling
1312 ***********************************************************************/
1313
1314/**
1315 * bcm63xx_ep0_setup_callback - Drop spinlock to invoke ->setup callback.
1316 * @udc: Reference to the device controller.
1317 * @ctrl: 8-byte SETUP request.
1318 */
1319static int bcm63xx_ep0_setup_callback(struct bcm63xx_udc *udc,
1320	struct usb_ctrlrequest *ctrl)
1321{
1322	int rc;
1323
1324	spin_unlock_irq(&udc->lock);
1325	rc = udc->driver->setup(&udc->gadget, ctrl);
1326	spin_lock_irq(&udc->lock);
1327	return rc;
1328}
1329
1330/**
1331 * bcm63xx_ep0_spoof_set_cfg - Synthesize a SET_CONFIGURATION request.
1332 * @udc: Reference to the device controller.
1333 *
1334 * Many standard requests are handled automatically in the hardware, but
1335 * we still need to pass them to the gadget driver so that it can
1336 * reconfigure the interfaces/endpoints if necessary.
1337 *
1338 * Unfortunately we are not able to send a STALL response if the host
1339 * requests an invalid configuration.  If this happens, we'll have to be
1340 * content with printing a warning.
1341 */
1342static int bcm63xx_ep0_spoof_set_cfg(struct bcm63xx_udc *udc)
1343{
1344	struct usb_ctrlrequest ctrl;
1345	int rc;
1346
1347	ctrl.bRequestType = USB_DIR_OUT | USB_RECIP_DEVICE;
1348	ctrl.bRequest = USB_REQ_SET_CONFIGURATION;
1349	ctrl.wValue = cpu_to_le16(udc->cfg);
1350	ctrl.wIndex = 0;
1351	ctrl.wLength = 0;
1352
1353	rc = bcm63xx_ep0_setup_callback(udc, &ctrl);
1354	if (rc < 0) {
1355		dev_warn_ratelimited(udc->dev,
1356			"hardware auto-acked bad SET_CONFIGURATION(%d) request\n",
1357			udc->cfg);
1358	}
1359	return rc;
1360}
1361
1362/**
1363 * bcm63xx_ep0_spoof_set_iface - Synthesize a SET_INTERFACE request.
1364 * @udc: Reference to the device controller.
1365 */
1366static int bcm63xx_ep0_spoof_set_iface(struct bcm63xx_udc *udc)
1367{
1368	struct usb_ctrlrequest ctrl;
1369	int rc;
1370
1371	ctrl.bRequestType = USB_DIR_OUT | USB_RECIP_INTERFACE;
1372	ctrl.bRequest = USB_REQ_SET_INTERFACE;
1373	ctrl.wValue = cpu_to_le16(udc->alt_iface);
1374	ctrl.wIndex = cpu_to_le16(udc->iface);
1375	ctrl.wLength = 0;
1376
1377	rc = bcm63xx_ep0_setup_callback(udc, &ctrl);
1378	if (rc < 0) {
1379		dev_warn_ratelimited(udc->dev,
1380			"hardware auto-acked bad SET_INTERFACE(%d,%d) request\n",
1381			udc->iface, udc->alt_iface);
1382	}
1383	return rc;
1384}
1385
1386/**
1387 * bcm63xx_ep0_map_write - dma_map and iudma_write a single request.
1388 * @udc: Reference to the device controller.
1389 * @ch_idx: IUDMA channel number.
1390 * @req: USB gadget layer representation of the request.
1391 */
1392static void bcm63xx_ep0_map_write(struct bcm63xx_udc *udc, int ch_idx,
1393	struct usb_request *req)
1394{
1395	struct bcm63xx_req *breq = our_req(req);
1396	struct iudma_ch *iudma = &udc->iudma[ch_idx];
1397
1398	BUG_ON(udc->ep0_request);
1399	udc->ep0_request = req;
1400
1401	req->actual = 0;
1402	breq->offset = 0;
1403	usb_gadget_map_request(&udc->gadget, req, iudma->is_tx);
1404	iudma_write(udc, iudma, breq);
1405}
1406
1407/**
1408 * bcm63xx_ep0_complete - Set completion status and "stage" the callback.
1409 * @udc: Reference to the device controller.
1410 * @req: USB gadget layer representation of the request.
1411 * @status: Status to return to the gadget driver.
1412 */
1413static void bcm63xx_ep0_complete(struct bcm63xx_udc *udc,
1414	struct usb_request *req, int status)
1415{
1416	req->status = status;
1417	if (status)
1418		req->actual = 0;
1419	if (req->complete) {
1420		spin_unlock_irq(&udc->lock);
1421		req->complete(&udc->bep[0].ep, req);
1422		spin_lock_irq(&udc->lock);
1423	}
1424}
1425
1426/**
1427 * bcm63xx_ep0_nuke_reply - Abort request from the gadget driver due to
1428 *   reset/shutdown.
1429 * @udc: Reference to the device controller.
1430 * @is_tx: Nonzero for TX (IN), zero for RX (OUT).
1431 */
1432static void bcm63xx_ep0_nuke_reply(struct bcm63xx_udc *udc, int is_tx)
1433{
1434	struct usb_request *req = udc->ep0_reply;
1435
1436	udc->ep0_reply = NULL;
1437	usb_gadget_unmap_request(&udc->gadget, req, is_tx);
1438	if (udc->ep0_request == req) {
1439		udc->ep0_req_completed = 0;
1440		udc->ep0_request = NULL;
1441	}
1442	bcm63xx_ep0_complete(udc, req, -ESHUTDOWN);
1443}
1444
1445/**
1446 * bcm63xx_ep0_read_complete - Close out the pending ep0 request; return
1447 *   transfer len.
1448 * @udc: Reference to the device controller.
1449 */
1450static int bcm63xx_ep0_read_complete(struct bcm63xx_udc *udc)
1451{
1452	struct usb_request *req = udc->ep0_request;
1453
1454	udc->ep0_req_completed = 0;
1455	udc->ep0_request = NULL;
1456
1457	return req->actual;
1458}
1459
1460/**
1461 * bcm63xx_ep0_internal_request - Helper function to submit an ep0 request.
1462 * @udc: Reference to the device controller.
1463 * @ch_idx: IUDMA channel number.
1464 * @length: Number of bytes to TX/RX.
1465 *
1466 * Used for simple transfers performed by the ep0 worker.  This will always
1467 * use ep0_ctrl_req / ep0_ctrl_buf.
1468 */
1469static void bcm63xx_ep0_internal_request(struct bcm63xx_udc *udc, int ch_idx,
1470	int length)
1471{
1472	struct usb_request *req = &udc->ep0_ctrl_req.req;
1473
1474	req->buf = udc->ep0_ctrl_buf;
1475	req->length = length;
1476	req->complete = NULL;
1477
1478	bcm63xx_ep0_map_write(udc, ch_idx, req);
1479}
1480
1481/**
1482 * bcm63xx_ep0_do_setup - Parse new SETUP packet and decide how to handle it.
1483 * @udc: Reference to the device controller.
1484 *
1485 * EP0_IDLE probably shouldn't ever happen.  EP0_REQUEUE means we're ready
1486 * for the next packet.  Anything else means the transaction requires multiple
1487 * stages of handling.
1488 */
1489static enum bcm63xx_ep0_state bcm63xx_ep0_do_setup(struct bcm63xx_udc *udc)
1490{
1491	int rc;
1492	struct usb_ctrlrequest *ctrl = (void *)udc->ep0_ctrl_buf;
1493
1494	rc = bcm63xx_ep0_read_complete(udc);
1495
1496	if (rc < 0) {
1497		dev_err(udc->dev, "missing SETUP packet\n");
1498		return EP0_IDLE;
1499	}
1500
1501	/*
1502	 * Handle 0-byte IN STATUS acknowledgement.  The hardware doesn't
1503	 * ALWAYS deliver these 100% of the time, so if we happen to see one,
1504	 * just throw it away.
1505	 */
1506	if (rc == 0)
1507		return EP0_REQUEUE;
1508
1509	/* Drop malformed SETUP packets */
1510	if (rc != sizeof(*ctrl)) {
1511		dev_warn_ratelimited(udc->dev,
1512			"malformed SETUP packet (%d bytes)\n", rc);
1513		return EP0_REQUEUE;
1514	}
1515
1516	/* Process new SETUP packet arriving on ep0 */
1517	rc = bcm63xx_ep0_setup_callback(udc, ctrl);
1518	if (rc < 0) {
1519		bcm63xx_set_stall(udc, &udc->bep[0], true);
1520		return EP0_REQUEUE;
1521	}
1522
1523	if (!ctrl->wLength)
1524		return EP0_REQUEUE;
1525	else if (ctrl->bRequestType & USB_DIR_IN)
1526		return EP0_IN_DATA_PHASE_SETUP;
1527	else
1528		return EP0_OUT_DATA_PHASE_SETUP;
1529}
1530
1531/**
1532 * bcm63xx_ep0_do_idle - Check for outstanding requests if ep0 is idle.
1533 * @udc: Reference to the device controller.
1534 *
1535 * In state EP0_IDLE, the RX descriptor is either pending, or has been
1536 * filled with a SETUP packet from the host.  This function handles new
1537 * SETUP packets, control IRQ events (which can generate fake SETUP packets),
1538 * and reset/shutdown events.
1539 *
1540 * Returns 0 if work was done; -EAGAIN if nothing to do.
1541 */
1542static int bcm63xx_ep0_do_idle(struct bcm63xx_udc *udc)
1543{
1544	if (udc->ep0_req_reset) {
1545		udc->ep0_req_reset = 0;
1546	} else if (udc->ep0_req_set_cfg) {
1547		udc->ep0_req_set_cfg = 0;
1548		if (bcm63xx_ep0_spoof_set_cfg(udc) >= 0)
1549			udc->ep0state = EP0_IN_FAKE_STATUS_PHASE;
1550	} else if (udc->ep0_req_set_iface) {
1551		udc->ep0_req_set_iface = 0;
1552		if (bcm63xx_ep0_spoof_set_iface(udc) >= 0)
1553			udc->ep0state = EP0_IN_FAKE_STATUS_PHASE;
1554	} else if (udc->ep0_req_completed) {
1555		udc->ep0state = bcm63xx_ep0_do_setup(udc);
1556		return udc->ep0state == EP0_IDLE ? -EAGAIN : 0;
1557	} else if (udc->ep0_req_shutdown) {
1558		udc->ep0_req_shutdown = 0;
1559		udc->ep0_req_completed = 0;
1560		udc->ep0_request = NULL;
1561		iudma_reset_channel(udc, &udc->iudma[IUDMA_EP0_RXCHAN]);
1562		usb_gadget_unmap_request(&udc->gadget,
1563			&udc->ep0_ctrl_req.req, 0);
1564
1565		/* bcm63xx_udc_pullup() is waiting for this */
1566		mb();
1567		udc->ep0state = EP0_SHUTDOWN;
1568	} else if (udc->ep0_reply) {
1569		/*
1570		 * This could happen if a USB RESET shows up during an ep0
1571		 * transaction (especially if a laggy driver like gadgetfs
1572		 * is in use).
1573		 */
1574		dev_warn(udc->dev, "nuking unexpected reply\n");
1575		bcm63xx_ep0_nuke_reply(udc, 0);
1576	} else {
1577		return -EAGAIN;
1578	}
1579
1580	return 0;
1581}
1582
1583/**
1584 * bcm63xx_ep0_one_round - Handle the current ep0 state.
1585 * @udc: Reference to the device controller.
1586 *
1587 * Returns 0 if work was done; -EAGAIN if nothing to do.
1588 */
1589static int bcm63xx_ep0_one_round(struct bcm63xx_udc *udc)
1590{
1591	enum bcm63xx_ep0_state ep0state = udc->ep0state;
1592	bool shutdown = udc->ep0_req_reset || udc->ep0_req_shutdown;
1593
1594	switch (udc->ep0state) {
1595	case EP0_REQUEUE:
1596		/* set up descriptor to receive SETUP packet */
1597		bcm63xx_ep0_internal_request(udc, IUDMA_EP0_RXCHAN,
1598					     BCM63XX_MAX_CTRL_PKT);
1599		ep0state = EP0_IDLE;
1600		break;
1601	case EP0_IDLE:
1602		return bcm63xx_ep0_do_idle(udc);
1603	case EP0_IN_DATA_PHASE_SETUP:
1604		/*
1605		 * Normal case: TX request is in ep0_reply (queued by the
1606		 * callback), or will be queued shortly.  When it's here,
1607		 * send it to the HW and go to EP0_IN_DATA_PHASE_COMPLETE.
1608		 *
1609		 * Shutdown case: Stop waiting for the reply.  Just
1610		 * REQUEUE->IDLE.  The gadget driver is NOT expected to
1611		 * queue anything else now.
1612		 */
1613		if (udc->ep0_reply) {
1614			bcm63xx_ep0_map_write(udc, IUDMA_EP0_TXCHAN,
1615					      udc->ep0_reply);
1616			ep0state = EP0_IN_DATA_PHASE_COMPLETE;
1617		} else if (shutdown) {
1618			ep0state = EP0_REQUEUE;
1619		}
1620		break;
1621	case EP0_IN_DATA_PHASE_COMPLETE: {
1622		/*
1623		 * Normal case: TX packet (ep0_reply) is in flight; wait for
1624		 * it to finish, then go back to REQUEUE->IDLE.
1625		 *
1626		 * Shutdown case: Reset the TX channel, send -ESHUTDOWN
1627		 * completion to the gadget driver, then REQUEUE->IDLE.
1628		 */
1629		if (udc->ep0_req_completed) {
1630			udc->ep0_reply = NULL;
1631			bcm63xx_ep0_read_complete(udc);
1632			/*
1633			 * the "ack" sometimes gets eaten (see
1634			 * bcm63xx_ep0_do_idle)
1635			 */
1636			ep0state = EP0_REQUEUE;
1637		} else if (shutdown) {
1638			iudma_reset_channel(udc, &udc->iudma[IUDMA_EP0_TXCHAN]);
1639			bcm63xx_ep0_nuke_reply(udc, 1);
1640			ep0state = EP0_REQUEUE;
1641		}
1642		break;
1643	}
1644	case EP0_OUT_DATA_PHASE_SETUP:
1645		/* Similar behavior to EP0_IN_DATA_PHASE_SETUP */
1646		if (udc->ep0_reply) {
1647			bcm63xx_ep0_map_write(udc, IUDMA_EP0_RXCHAN,
1648					      udc->ep0_reply);
1649			ep0state = EP0_OUT_DATA_PHASE_COMPLETE;
1650		} else if (shutdown) {
1651			ep0state = EP0_REQUEUE;
1652		}
1653		break;
1654	case EP0_OUT_DATA_PHASE_COMPLETE: {
1655		/* Similar behavior to EP0_IN_DATA_PHASE_COMPLETE */
1656		if (udc->ep0_req_completed) {
1657			udc->ep0_reply = NULL;
1658			bcm63xx_ep0_read_complete(udc);
1659
1660			/* send 0-byte ack to host */
1661			bcm63xx_ep0_internal_request(udc, IUDMA_EP0_TXCHAN, 0);
1662			ep0state = EP0_OUT_STATUS_PHASE;
1663		} else if (shutdown) {
1664			iudma_reset_channel(udc, &udc->iudma[IUDMA_EP0_RXCHAN]);
1665			bcm63xx_ep0_nuke_reply(udc, 0);
1666			ep0state = EP0_REQUEUE;
1667		}
1668		break;
1669	}
1670	case EP0_OUT_STATUS_PHASE:
1671		/*
1672		 * Normal case: 0-byte OUT ack packet is in flight; wait
1673		 * for it to finish, then go back to REQUEUE->IDLE.
1674		 *
1675		 * Shutdown case: just cancel the transmission.  Don't bother
1676		 * calling the completion, because it originated from this
1677		 * function anyway.  Then go back to REQUEUE->IDLE.
1678		 */
1679		if (udc->ep0_req_completed) {
1680			bcm63xx_ep0_read_complete(udc);
1681			ep0state = EP0_REQUEUE;
1682		} else if (shutdown) {
1683			iudma_reset_channel(udc, &udc->iudma[IUDMA_EP0_TXCHAN]);
1684			udc->ep0_request = NULL;
1685			ep0state = EP0_REQUEUE;
1686		}
1687		break;
1688	case EP0_IN_FAKE_STATUS_PHASE: {
1689		/*
1690		 * Normal case: we spoofed a SETUP packet and are now
1691		 * waiting for the gadget driver to send a 0-byte reply.
1692		 * This doesn't actually get sent to the HW because the
1693		 * HW has already sent its own reply.  Once we get the
1694		 * response, return to IDLE.
1695		 *
1696		 * Shutdown case: return to IDLE immediately.
1697		 *
1698		 * Note that the ep0 RX descriptor has remained queued
1699		 * (and possibly unfilled) during this entire transaction.
1700		 * The HW datapath (IUDMA) never even sees SET_CONFIGURATION
1701		 * or SET_INTERFACE transactions.
1702		 */
1703		struct usb_request *r = udc->ep0_reply;
1704
1705		if (!r) {
1706			if (shutdown)
1707				ep0state = EP0_IDLE;
1708			break;
1709		}
1710
1711		bcm63xx_ep0_complete(udc, r, 0);
1712		udc->ep0_reply = NULL;
1713		ep0state = EP0_IDLE;
1714		break;
1715	}
1716	case EP0_SHUTDOWN:
1717		break;
1718	}
1719
1720	if (udc->ep0state == ep0state)
1721		return -EAGAIN;
1722
1723	udc->ep0state = ep0state;
1724	return 0;
1725}
1726
1727/**
1728 * bcm63xx_ep0_process - ep0 worker thread / state machine.
1729 * @w: Workqueue struct.
1730 *
1731 * bcm63xx_ep0_process is triggered any time an event occurs on ep0.  It
1732 * is used to synchronize ep0 events and ensure that both HW and SW events
1733 * occur in a well-defined order.  When the ep0 IUDMA queues are idle, it may
1734 * synthesize SET_CONFIGURATION / SET_INTERFACE requests that were consumed
1735 * by the USBD hardware.
1736 *
1737 * The worker function will continue iterating around the state machine
1738 * until there is nothing left to do.  Usually "nothing left to do" means
1739 * that we're waiting for a new event from the hardware.
1740 */
1741static void bcm63xx_ep0_process(struct work_struct *w)
1742{
1743	struct bcm63xx_udc *udc = container_of(w, struct bcm63xx_udc, ep0_wq);
1744	spin_lock_irq(&udc->lock);
1745	while (bcm63xx_ep0_one_round(udc) == 0)
1746		;
1747	spin_unlock_irq(&udc->lock);
1748}
1749
1750/***********************************************************************
1751 * Standard UDC gadget operations
1752 ***********************************************************************/
1753
1754/**
1755 * bcm63xx_udc_get_frame - Read current SOF frame number from the HW.
1756 * @gadget: USB slave device.
1757 */
1758static int bcm63xx_udc_get_frame(struct usb_gadget *gadget)
1759{
1760	struct bcm63xx_udc *udc = gadget_to_udc(gadget);
1761
1762	return (usbd_readl(udc, USBD_STATUS_REG) &
1763		USBD_STATUS_SOF_MASK) >> USBD_STATUS_SOF_SHIFT;
1764}
1765
1766/**
1767 * bcm63xx_udc_pullup - Enable/disable pullup on D+ line.
1768 * @gadget: USB slave device.
1769 * @is_on: 0 to disable pullup, 1 to enable.
1770 *
1771 * See notes in bcm63xx_select_pullup().
1772 */
1773static int bcm63xx_udc_pullup(struct usb_gadget *gadget, int is_on)
1774{
1775	struct bcm63xx_udc *udc = gadget_to_udc(gadget);
1776	unsigned long flags;
1777	int i, rc = -EINVAL;
1778
1779	spin_lock_irqsave(&udc->lock, flags);
1780	if (is_on && udc->ep0state == EP0_SHUTDOWN) {
1781		udc->gadget.speed = USB_SPEED_UNKNOWN;
1782		udc->ep0state = EP0_REQUEUE;
1783		bcm63xx_fifo_setup(udc);
1784		bcm63xx_fifo_reset(udc);
1785		bcm63xx_ep_setup(udc);
1786
1787		bitmap_zero(&udc->wedgemap, BCM63XX_NUM_EP);
1788		for (i = 0; i < BCM63XX_NUM_EP; i++)
1789			bcm63xx_set_stall(udc, &udc->bep[i], false);
1790
1791		bcm63xx_set_ctrl_irqs(udc, true);
1792		bcm63xx_select_pullup(gadget_to_udc(gadget), true);
1793		rc = 0;
1794	} else if (!is_on && udc->ep0state != EP0_SHUTDOWN) {
1795		bcm63xx_select_pullup(gadget_to_udc(gadget), false);
1796
1797		udc->ep0_req_shutdown = 1;
1798		spin_unlock_irqrestore(&udc->lock, flags);
1799
1800		while (1) {
1801			schedule_work(&udc->ep0_wq);
1802			if (udc->ep0state == EP0_SHUTDOWN)
1803				break;
1804			msleep(50);
1805		}
1806		bcm63xx_set_ctrl_irqs(udc, false);
1807		cancel_work_sync(&udc->ep0_wq);
1808		return 0;
1809	}
1810
1811	spin_unlock_irqrestore(&udc->lock, flags);
1812	return rc;
1813}
1814
1815/**
1816 * bcm63xx_udc_start - Start the controller.
1817 * @gadget: USB slave device.
1818 * @driver: Driver for USB slave devices.
1819 */
1820static int bcm63xx_udc_start(struct usb_gadget *gadget,
1821		struct usb_gadget_driver *driver)
1822{
1823	struct bcm63xx_udc *udc = gadget_to_udc(gadget);
1824	unsigned long flags;
1825
1826	if (!driver || driver->max_speed < USB_SPEED_HIGH ||
1827	    !driver->setup)
1828		return -EINVAL;
1829	if (!udc)
1830		return -ENODEV;
1831	if (udc->driver)
1832		return -EBUSY;
1833
1834	spin_lock_irqsave(&udc->lock, flags);
1835
1836	set_clocks(udc, true);
1837	bcm63xx_fifo_setup(udc);
1838	bcm63xx_ep_init(udc);
1839	bcm63xx_ep_setup(udc);
1840	bcm63xx_fifo_reset(udc);
1841	bcm63xx_select_phy_mode(udc, true);
1842
1843	udc->driver = driver;
1844	driver->driver.bus = NULL;
1845	udc->gadget.dev.of_node = udc->dev->of_node;
1846
1847	spin_unlock_irqrestore(&udc->lock, flags);
1848
1849	return 0;
1850}
1851
1852/**
1853 * bcm63xx_udc_stop - Shut down the controller.
1854 * @gadget: USB slave device.
1855 * @driver: Driver for USB slave devices.
1856 */
1857static int bcm63xx_udc_stop(struct usb_gadget *gadget)
1858{
1859	struct bcm63xx_udc *udc = gadget_to_udc(gadget);
1860	unsigned long flags;
1861
1862	spin_lock_irqsave(&udc->lock, flags);
1863
1864	udc->driver = NULL;
1865
1866	/*
1867	 * If we switch the PHY too abruptly after dropping D+, the host
1868	 * will often complain:
1869	 *
1870	 *     hub 1-0:1.0: port 1 disabled by hub (EMI?), re-enabling...
1871	 */
1872	msleep(100);
1873
1874	bcm63xx_select_phy_mode(udc, false);
1875	set_clocks(udc, false);
1876
1877	spin_unlock_irqrestore(&udc->lock, flags);
1878
1879	return 0;
1880}
1881
1882static const struct usb_gadget_ops bcm63xx_udc_ops = {
1883	.get_frame	= bcm63xx_udc_get_frame,
1884	.pullup		= bcm63xx_udc_pullup,
1885	.udc_start	= bcm63xx_udc_start,
1886	.udc_stop	= bcm63xx_udc_stop,
1887};
1888
1889/***********************************************************************
1890 * IRQ handling
1891 ***********************************************************************/
1892
1893/**
1894 * bcm63xx_update_cfg_iface - Read current configuration/interface settings.
1895 * @udc: Reference to the device controller.
1896 *
1897 * This controller intercepts SET_CONFIGURATION and SET_INTERFACE messages.
1898 * The driver never sees the raw control packets coming in on the ep0
1899 * IUDMA channel, but at least we get an interrupt event to tell us that
1900 * new values are waiting in the USBD_STATUS register.
1901 */
1902static void bcm63xx_update_cfg_iface(struct bcm63xx_udc *udc)
1903{
1904	u32 reg = usbd_readl(udc, USBD_STATUS_REG);
1905
1906	udc->cfg = (reg & USBD_STATUS_CFG_MASK) >> USBD_STATUS_CFG_SHIFT;
1907	udc->iface = (reg & USBD_STATUS_INTF_MASK) >> USBD_STATUS_INTF_SHIFT;
1908	udc->alt_iface = (reg & USBD_STATUS_ALTINTF_MASK) >>
1909			 USBD_STATUS_ALTINTF_SHIFT;
1910	bcm63xx_ep_setup(udc);
1911}
1912
1913/**
1914 * bcm63xx_update_link_speed - Check to see if the link speed has changed.
1915 * @udc: Reference to the device controller.
1916 *
1917 * The link speed update coincides with a SETUP IRQ.  Returns 1 if the
1918 * speed has changed, so that the caller can update the endpoint settings.
1919 */
1920static int bcm63xx_update_link_speed(struct bcm63xx_udc *udc)
1921{
1922	u32 reg = usbd_readl(udc, USBD_STATUS_REG);
1923	enum usb_device_speed oldspeed = udc->gadget.speed;
1924
1925	switch ((reg & USBD_STATUS_SPD_MASK) >> USBD_STATUS_SPD_SHIFT) {
1926	case BCM63XX_SPD_HIGH:
1927		udc->gadget.speed = USB_SPEED_HIGH;
1928		break;
1929	case BCM63XX_SPD_FULL:
1930		udc->gadget.speed = USB_SPEED_FULL;
1931		break;
1932	default:
1933		/* this should never happen */
1934		udc->gadget.speed = USB_SPEED_UNKNOWN;
1935		dev_err(udc->dev,
1936			"received SETUP packet with invalid link speed\n");
1937		return 0;
1938	}
1939
1940	if (udc->gadget.speed != oldspeed) {
1941		dev_info(udc->dev, "link up, %s-speed mode\n",
1942			 udc->gadget.speed == USB_SPEED_HIGH ? "high" : "full");
1943		return 1;
1944	} else {
1945		return 0;
1946	}
1947}
1948
1949/**
1950 * bcm63xx_update_wedge - Iterate through wedged endpoints.
1951 * @udc: Reference to the device controller.
1952 * @new_status: true to "refresh" wedge status; false to clear it.
1953 *
1954 * On a SETUP interrupt, we need to manually "refresh" the wedge status
1955 * because the controller hardware is designed to automatically clear
1956 * stalls in response to a CLEAR_FEATURE request from the host.
1957 *
1958 * On a RESET interrupt, we do want to restore all wedged endpoints.
1959 */
1960static void bcm63xx_update_wedge(struct bcm63xx_udc *udc, bool new_status)
1961{
1962	int i;
1963
1964	for_each_set_bit(i, &udc->wedgemap, BCM63XX_NUM_EP) {
1965		bcm63xx_set_stall(udc, &udc->bep[i], new_status);
1966		if (!new_status)
1967			clear_bit(i, &udc->wedgemap);
1968	}
1969}
1970
1971/**
1972 * bcm63xx_udc_ctrl_isr - ISR for control path events (USBD).
1973 * @irq: IRQ number (unused).
1974 * @dev_id: Reference to the device controller.
1975 *
1976 * This is where we handle link (VBUS) down, USB reset, speed changes,
1977 * SET_CONFIGURATION, and SET_INTERFACE events.
1978 */
1979static irqreturn_t bcm63xx_udc_ctrl_isr(int irq, void *dev_id)
1980{
1981	struct bcm63xx_udc *udc = dev_id;
1982	u32 stat;
1983	bool disconnected = false, bus_reset = false;
1984
1985	stat = usbd_readl(udc, USBD_EVENT_IRQ_STATUS_REG) &
1986	       usbd_readl(udc, USBD_EVENT_IRQ_MASK_REG);
1987
1988	usbd_writel(udc, stat, USBD_EVENT_IRQ_STATUS_REG);
1989
1990	spin_lock(&udc->lock);
1991	if (stat & BIT(USBD_EVENT_IRQ_USB_LINK)) {
1992		/* VBUS toggled */
1993
1994		if (!(usbd_readl(udc, USBD_EVENTS_REG) &
1995		      USBD_EVENTS_USB_LINK_MASK) &&
1996		      udc->gadget.speed != USB_SPEED_UNKNOWN)
1997			dev_info(udc->dev, "link down\n");
1998
1999		udc->gadget.speed = USB_SPEED_UNKNOWN;
2000		disconnected = true;
2001	}
2002	if (stat & BIT(USBD_EVENT_IRQ_USB_RESET)) {
2003		bcm63xx_fifo_setup(udc);
2004		bcm63xx_fifo_reset(udc);
2005		bcm63xx_ep_setup(udc);
2006
2007		bcm63xx_update_wedge(udc, false);
2008
2009		udc->ep0_req_reset = 1;
2010		schedule_work(&udc->ep0_wq);
2011		bus_reset = true;
2012	}
2013	if (stat & BIT(USBD_EVENT_IRQ_SETUP)) {
2014		if (bcm63xx_update_link_speed(udc)) {
2015			bcm63xx_fifo_setup(udc);
2016			bcm63xx_ep_setup(udc);
2017		}
2018		bcm63xx_update_wedge(udc, true);
2019	}
2020	if (stat & BIT(USBD_EVENT_IRQ_SETCFG)) {
2021		bcm63xx_update_cfg_iface(udc);
2022		udc->ep0_req_set_cfg = 1;
2023		schedule_work(&udc->ep0_wq);
2024	}
2025	if (stat & BIT(USBD_EVENT_IRQ_SETINTF)) {
2026		bcm63xx_update_cfg_iface(udc);
2027		udc->ep0_req_set_iface = 1;
2028		schedule_work(&udc->ep0_wq);
2029	}
2030	spin_unlock(&udc->lock);
2031
2032	if (disconnected && udc->driver)
2033		udc->driver->disconnect(&udc->gadget);
2034	else if (bus_reset && udc->driver)
2035		usb_gadget_udc_reset(&udc->gadget, udc->driver);
2036
2037	return IRQ_HANDLED;
2038}
2039
2040/**
2041 * bcm63xx_udc_data_isr - ISR for data path events (IUDMA).
2042 * @irq: IRQ number (unused).
2043 * @dev_id: Reference to the IUDMA channel that generated the interrupt.
2044 *
2045 * For the two ep0 channels, we have special handling that triggers the
2046 * ep0 worker thread.  For normal bulk/intr channels, either queue up
2047 * the next buffer descriptor for the transaction (incomplete transaction),
2048 * or invoke the completion callback (complete transactions).
2049 */
2050static irqreturn_t bcm63xx_udc_data_isr(int irq, void *dev_id)
2051{
2052	struct iudma_ch *iudma = dev_id;
2053	struct bcm63xx_udc *udc = iudma->udc;
2054	struct bcm63xx_ep *bep;
2055	struct usb_request *req = NULL;
2056	struct bcm63xx_req *breq = NULL;
2057	int rc;
2058	bool is_done = false;
2059
2060	spin_lock(&udc->lock);
2061
2062	usb_dmac_writel(udc, ENETDMAC_IR_BUFDONE_MASK,
2063			ENETDMAC_IR_REG, iudma->ch_idx);
2064	bep = iudma->bep;
2065	rc = iudma_read(udc, iudma);
2066
2067	/* special handling for EP0 RX (0) and TX (1) */
2068	if (iudma->ch_idx == IUDMA_EP0_RXCHAN ||
2069	    iudma->ch_idx == IUDMA_EP0_TXCHAN) {
2070		req = udc->ep0_request;
2071		breq = our_req(req);
2072
2073		/* a single request could require multiple submissions */
2074		if (rc >= 0) {
2075			req->actual += rc;
2076
2077			if (req->actual >= req->length || breq->bd_bytes > rc) {
2078				udc->ep0_req_completed = 1;
2079				is_done = true;
2080				schedule_work(&udc->ep0_wq);
2081
2082				/* "actual" on a ZLP is 1 byte */
2083				req->actual = min(req->actual, req->length);
2084			} else {
2085				/* queue up the next BD (same request) */
2086				iudma_write(udc, iudma, breq);
2087			}
2088		}
2089	} else if (!list_empty(&bep->queue)) {
2090		breq = list_first_entry(&bep->queue, struct bcm63xx_req, queue);
2091		req = &breq->req;
2092
2093		if (rc >= 0) {
2094			req->actual += rc;
2095
2096			if (req->actual >= req->length || breq->bd_bytes > rc) {
2097				is_done = true;
2098				list_del(&breq->queue);
2099
2100				req->actual = min(req->actual, req->length);
2101
2102				if (!list_empty(&bep->queue)) {
2103					struct bcm63xx_req *next;
2104
2105					next = list_first_entry(&bep->queue,
2106						struct bcm63xx_req, queue);
2107					iudma_write(udc, iudma, next);
2108				}
2109			} else {
2110				iudma_write(udc, iudma, breq);
2111			}
2112		}
2113	}
2114	spin_unlock(&udc->lock);
2115
2116	if (is_done) {
2117		usb_gadget_unmap_request(&udc->gadget, req, iudma->is_tx);
2118		if (req->complete)
2119			req->complete(&bep->ep, req);
2120	}
2121
2122	return IRQ_HANDLED;
2123}
2124
2125/***********************************************************************
2126 * Debug filesystem
2127 ***********************************************************************/
2128
2129/*
2130 * bcm63xx_usbd_dbg_show - Show USBD controller state.
2131 * @s: seq_file to which the information will be written.
2132 * @p: Unused.
2133 *
2134 * This file nominally shows up as /sys/kernel/debug/bcm63xx_udc/usbd
2135 */
2136static int bcm63xx_usbd_dbg_show(struct seq_file *s, void *p)
2137{
2138	struct bcm63xx_udc *udc = s->private;
2139
2140	if (!udc->driver)
2141		return -ENODEV;
2142
2143	seq_printf(s, "ep0 state: %s\n",
2144		   bcm63xx_ep0_state_names[udc->ep0state]);
2145	seq_printf(s, "  pending requests: %s%s%s%s%s%s%s\n",
2146		   udc->ep0_req_reset ? "reset " : "",
2147		   udc->ep0_req_set_cfg ? "set_cfg " : "",
2148		   udc->ep0_req_set_iface ? "set_iface " : "",
2149		   udc->ep0_req_shutdown ? "shutdown " : "",
2150		   udc->ep0_request ? "pending " : "",
2151		   udc->ep0_req_completed ? "completed " : "",
2152		   udc->ep0_reply ? "reply " : "");
2153	seq_printf(s, "cfg: %d; iface: %d; alt_iface: %d\n",
2154		   udc->cfg, udc->iface, udc->alt_iface);
2155	seq_printf(s, "regs:\n");
2156	seq_printf(s, "  control: %08x; straps: %08x; status: %08x\n",
2157		   usbd_readl(udc, USBD_CONTROL_REG),
2158		   usbd_readl(udc, USBD_STRAPS_REG),
2159		   usbd_readl(udc, USBD_STATUS_REG));
2160	seq_printf(s, "  events:  %08x; stall:  %08x\n",
2161		   usbd_readl(udc, USBD_EVENTS_REG),
2162		   usbd_readl(udc, USBD_STALL_REG));
2163
2164	return 0;
2165}
 
2166
2167/*
2168 * bcm63xx_iudma_dbg_show - Show IUDMA status and descriptors.
2169 * @s: seq_file to which the information will be written.
2170 * @p: Unused.
2171 *
2172 * This file nominally shows up as /sys/kernel/debug/bcm63xx_udc/iudma
2173 */
2174static int bcm63xx_iudma_dbg_show(struct seq_file *s, void *p)
2175{
2176	struct bcm63xx_udc *udc = s->private;
2177	int ch_idx, i;
2178	u32 sram2, sram3;
2179
2180	if (!udc->driver)
2181		return -ENODEV;
2182
2183	for (ch_idx = 0; ch_idx < BCM63XX_NUM_IUDMA; ch_idx++) {
2184		struct iudma_ch *iudma = &udc->iudma[ch_idx];
2185		struct list_head *pos;
2186
2187		seq_printf(s, "IUDMA channel %d -- ", ch_idx);
2188		switch (iudma_defaults[ch_idx].ep_type) {
2189		case BCMEP_CTRL:
2190			seq_printf(s, "control");
2191			break;
2192		case BCMEP_BULK:
2193			seq_printf(s, "bulk");
2194			break;
2195		case BCMEP_INTR:
2196			seq_printf(s, "interrupt");
2197			break;
2198		}
2199		seq_printf(s, ch_idx & 0x01 ? " tx" : " rx");
2200		seq_printf(s, " [ep%d]:\n",
2201			   max_t(int, iudma_defaults[ch_idx].ep_num, 0));
2202		seq_printf(s, "  cfg: %08x; irqstat: %08x; irqmask: %08x; maxburst: %08x\n",
2203			   usb_dmac_readl(udc, ENETDMAC_CHANCFG_REG, ch_idx),
2204			   usb_dmac_readl(udc, ENETDMAC_IR_REG, ch_idx),
2205			   usb_dmac_readl(udc, ENETDMAC_IRMASK_REG, ch_idx),
2206			   usb_dmac_readl(udc, ENETDMAC_MAXBURST_REG, ch_idx));
2207
2208		sram2 = usb_dmas_readl(udc, ENETDMAS_SRAM2_REG, ch_idx);
2209		sram3 = usb_dmas_readl(udc, ENETDMAS_SRAM3_REG, ch_idx);
2210		seq_printf(s, "  base: %08x; index: %04x_%04x; desc: %04x_%04x %08x\n",
2211			   usb_dmas_readl(udc, ENETDMAS_RSTART_REG, ch_idx),
2212			   sram2 >> 16, sram2 & 0xffff,
2213			   sram3 >> 16, sram3 & 0xffff,
2214			   usb_dmas_readl(udc, ENETDMAS_SRAM4_REG, ch_idx));
2215		seq_printf(s, "  desc: %d/%d used", iudma->n_bds_used,
2216			   iudma->n_bds);
2217
2218		if (iudma->bep) {
2219			i = 0;
2220			list_for_each(pos, &iudma->bep->queue)
2221				i++;
2222			seq_printf(s, "; %d queued\n", i);
2223		} else {
2224			seq_printf(s, "\n");
2225		}
2226
2227		for (i = 0; i < iudma->n_bds; i++) {
2228			struct bcm_enet_desc *d = &iudma->bd_ring[i];
2229
2230			seq_printf(s, "  %03x (%02x): len_stat: %04x_%04x; pa %08x",
2231				   i * sizeof(*d), i,
2232				   d->len_stat >> 16, d->len_stat & 0xffff,
2233				   d->address);
2234			if (d == iudma->read_bd)
2235				seq_printf(s, "   <<RD");
2236			if (d == iudma->write_bd)
2237				seq_printf(s, "   <<WR");
2238			seq_printf(s, "\n");
2239		}
2240
2241		seq_printf(s, "\n");
2242	}
2243
2244	return 0;
2245}
2246
2247static int bcm63xx_usbd_dbg_open(struct inode *inode, struct file *file)
2248{
2249	return single_open(file, bcm63xx_usbd_dbg_show, inode->i_private);
2250}
2251
2252static int bcm63xx_iudma_dbg_open(struct inode *inode, struct file *file)
2253{
2254	return single_open(file, bcm63xx_iudma_dbg_show, inode->i_private);
2255}
2256
2257static const struct file_operations usbd_dbg_fops = {
2258	.owner		= THIS_MODULE,
2259	.open		= bcm63xx_usbd_dbg_open,
2260	.llseek		= seq_lseek,
2261	.read		= seq_read,
2262	.release	= single_release,
2263};
2264
2265static const struct file_operations iudma_dbg_fops = {
2266	.owner		= THIS_MODULE,
2267	.open		= bcm63xx_iudma_dbg_open,
2268	.llseek		= seq_lseek,
2269	.read		= seq_read,
2270	.release	= single_release,
2271};
2272
2273
2274/**
2275 * bcm63xx_udc_init_debugfs - Create debugfs entries.
2276 * @udc: Reference to the device controller.
2277 */
2278static void bcm63xx_udc_init_debugfs(struct bcm63xx_udc *udc)
2279{
2280	struct dentry *root, *usbd, *iudma;
2281
2282	if (!IS_ENABLED(CONFIG_USB_GADGET_DEBUG_FS))
2283		return;
2284
2285	root = debugfs_create_dir(udc->gadget.name, NULL);
2286	if (IS_ERR(root) || !root)
2287		goto err_root;
2288
2289	usbd = debugfs_create_file("usbd", 0400, root, udc,
2290			&usbd_dbg_fops);
2291	if (!usbd)
2292		goto err_usbd;
2293	iudma = debugfs_create_file("iudma", 0400, root, udc,
2294			&iudma_dbg_fops);
2295	if (!iudma)
2296		goto err_iudma;
2297
2298	udc->debugfs_root = root;
2299	udc->debugfs_usbd = usbd;
2300	udc->debugfs_iudma = iudma;
2301	return;
2302err_iudma:
2303	debugfs_remove(usbd);
2304err_usbd:
2305	debugfs_remove(root);
2306err_root:
2307	dev_err(udc->dev, "debugfs is not available\n");
2308}
2309
2310/**
2311 * bcm63xx_udc_cleanup_debugfs - Remove debugfs entries.
2312 * @udc: Reference to the device controller.
2313 *
2314 * debugfs_remove() is safe to call with a NULL argument.
2315 */
2316static void bcm63xx_udc_cleanup_debugfs(struct bcm63xx_udc *udc)
2317{
2318	debugfs_remove(udc->debugfs_iudma);
2319	debugfs_remove(udc->debugfs_usbd);
2320	debugfs_remove(udc->debugfs_root);
2321	udc->debugfs_iudma = NULL;
2322	udc->debugfs_usbd = NULL;
2323	udc->debugfs_root = NULL;
2324}
2325
2326/***********************************************************************
2327 * Driver init/exit
2328 ***********************************************************************/
2329
2330/**
2331 * bcm63xx_udc_probe - Initialize a new instance of the UDC.
2332 * @pdev: Platform device struct from the bcm63xx BSP code.
2333 *
2334 * Note that platform data is required, because pd.port_no varies from chip
2335 * to chip and is used to switch the correct USB port to device mode.
2336 */
2337static int bcm63xx_udc_probe(struct platform_device *pdev)
2338{
2339	struct device *dev = &pdev->dev;
2340	struct bcm63xx_usbd_platform_data *pd = dev_get_platdata(dev);
2341	struct bcm63xx_udc *udc;
2342	struct resource *res;
2343	int rc = -ENOMEM, i, irq;
2344
2345	udc = devm_kzalloc(dev, sizeof(*udc), GFP_KERNEL);
2346	if (!udc)
2347		return -ENOMEM;
2348
2349	platform_set_drvdata(pdev, udc);
2350	udc->dev = dev;
2351	udc->pd = pd;
2352
2353	if (!pd) {
2354		dev_err(dev, "missing platform data\n");
2355		return -EINVAL;
2356	}
2357
2358	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
2359	udc->usbd_regs = devm_ioremap_resource(dev, res);
2360	if (IS_ERR(udc->usbd_regs))
2361		return PTR_ERR(udc->usbd_regs);
2362
2363	res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
2364	udc->iudma_regs = devm_ioremap_resource(dev, res);
2365	if (IS_ERR(udc->iudma_regs))
2366		return PTR_ERR(udc->iudma_regs);
2367
2368	spin_lock_init(&udc->lock);
2369	INIT_WORK(&udc->ep0_wq, bcm63xx_ep0_process);
2370
2371	udc->gadget.ops = &bcm63xx_udc_ops;
2372	udc->gadget.name = dev_name(dev);
2373
2374	if (!pd->use_fullspeed && !use_fullspeed)
2375		udc->gadget.max_speed = USB_SPEED_HIGH;
2376	else
2377		udc->gadget.max_speed = USB_SPEED_FULL;
2378
2379	/* request clocks, allocate buffers, and clear any pending IRQs */
2380	rc = bcm63xx_init_udc_hw(udc);
2381	if (rc)
2382		return rc;
2383
2384	rc = -ENXIO;
2385
2386	/* IRQ resource #0: control interrupt (VBUS, speed, etc.) */
2387	irq = platform_get_irq(pdev, 0);
2388	if (irq < 0) {
2389		dev_err(dev, "missing IRQ resource #0\n");
2390		goto out_uninit;
2391	}
2392	if (devm_request_irq(dev, irq, &bcm63xx_udc_ctrl_isr, 0,
2393			     dev_name(dev), udc) < 0) {
2394		dev_err(dev, "error requesting IRQ #%d\n", irq);
2395		goto out_uninit;
2396	}
2397
2398	/* IRQ resources #1-6: data interrupts for IUDMA channels 0-5 */
2399	for (i = 0; i < BCM63XX_NUM_IUDMA; i++) {
2400		irq = platform_get_irq(pdev, i + 1);
2401		if (irq < 0) {
2402			dev_err(dev, "missing IRQ resource #%d\n", i + 1);
2403			goto out_uninit;
2404		}
2405		if (devm_request_irq(dev, irq, &bcm63xx_udc_data_isr, 0,
2406				     dev_name(dev), &udc->iudma[i]) < 0) {
2407			dev_err(dev, "error requesting IRQ #%d\n", irq);
2408			goto out_uninit;
2409		}
2410	}
2411
2412	bcm63xx_udc_init_debugfs(udc);
2413	rc = usb_add_gadget_udc(dev, &udc->gadget);
2414	if (!rc)
2415		return 0;
2416
2417	bcm63xx_udc_cleanup_debugfs(udc);
2418out_uninit:
2419	bcm63xx_uninit_udc_hw(udc);
2420	return rc;
 
 
 
 
2421}
2422
2423/**
2424 * bcm63xx_udc_remove - Remove the device from the system.
2425 * @pdev: Platform device struct from the bcm63xx BSP code.
2426 */
2427static int bcm63xx_udc_remove(struct platform_device *pdev)
2428{
2429	struct bcm63xx_udc *udc = platform_get_drvdata(pdev);
2430
2431	bcm63xx_udc_cleanup_debugfs(udc);
2432	usb_del_gadget_udc(&udc->gadget);
2433	BUG_ON(udc->driver);
2434
2435	bcm63xx_uninit_udc_hw(udc);
2436
2437	return 0;
2438}
2439
2440static struct platform_driver bcm63xx_udc_driver = {
2441	.probe		= bcm63xx_udc_probe,
2442	.remove		= bcm63xx_udc_remove,
2443	.driver		= {
2444		.name	= DRV_MODULE_NAME,
2445	},
2446};
2447module_platform_driver(bcm63xx_udc_driver);
2448
2449MODULE_DESCRIPTION("BCM63xx USB Peripheral Controller");
2450MODULE_AUTHOR("Kevin Cernekee <cernekee@gmail.com>");
2451MODULE_LICENSE("GPL");
2452MODULE_ALIAS("platform:" DRV_MODULE_NAME);