Loading...
1// SPDX-License-Identifier: GPL-2.0+
2/*
3 * bcm63xx_udc.c -- BCM63xx UDC high/full speed USB device controller
4 *
5 * Copyright (C) 2012 Kevin Cernekee <cernekee@gmail.com>
6 * Copyright (C) 2012 Broadcom Corporation
7 */
8
9#include <linux/bitops.h>
10#include <linux/bug.h>
11#include <linux/clk.h>
12#include <linux/compiler.h>
13#include <linux/debugfs.h>
14#include <linux/delay.h>
15#include <linux/device.h>
16#include <linux/dma-mapping.h>
17#include <linux/errno.h>
18#include <linux/interrupt.h>
19#include <linux/ioport.h>
20#include <linux/kernel.h>
21#include <linux/list.h>
22#include <linux/module.h>
23#include <linux/moduleparam.h>
24#include <linux/platform_device.h>
25#include <linux/sched.h>
26#include <linux/seq_file.h>
27#include <linux/slab.h>
28#include <linux/timer.h>
29#include <linux/usb.h>
30#include <linux/usb/ch9.h>
31#include <linux/usb/gadget.h>
32#include <linux/workqueue.h>
33
34#include <bcm63xx_cpu.h>
35#include <bcm63xx_iudma.h>
36#include <bcm63xx_dev_usb_usbd.h>
37#include <bcm63xx_io.h>
38#include <bcm63xx_regs.h>
39
40#define DRV_MODULE_NAME "bcm63xx_udc"
41
42static const char bcm63xx_ep0name[] = "ep0";
43
44static const struct {
45 const char *name;
46 const struct usb_ep_caps caps;
47} bcm63xx_ep_info[] = {
48#define EP_INFO(_name, _caps) \
49 { \
50 .name = _name, \
51 .caps = _caps, \
52 }
53
54 EP_INFO(bcm63xx_ep0name,
55 USB_EP_CAPS(USB_EP_CAPS_TYPE_CONTROL, USB_EP_CAPS_DIR_ALL)),
56 EP_INFO("ep1in-bulk",
57 USB_EP_CAPS(USB_EP_CAPS_TYPE_BULK, USB_EP_CAPS_DIR_IN)),
58 EP_INFO("ep2out-bulk",
59 USB_EP_CAPS(USB_EP_CAPS_TYPE_BULK, USB_EP_CAPS_DIR_OUT)),
60 EP_INFO("ep3in-int",
61 USB_EP_CAPS(USB_EP_CAPS_TYPE_INT, USB_EP_CAPS_DIR_IN)),
62 EP_INFO("ep4out-int",
63 USB_EP_CAPS(USB_EP_CAPS_TYPE_INT, USB_EP_CAPS_DIR_OUT)),
64
65#undef EP_INFO
66};
67
68static bool use_fullspeed;
69module_param(use_fullspeed, bool, S_IRUGO);
70MODULE_PARM_DESC(use_fullspeed, "true for fullspeed only");
71
72/*
73 * RX IRQ coalescing options:
74 *
75 * false (default) - one IRQ per DATAx packet. Slow but reliable. The
76 * driver is able to pass the "testusb" suite and recover from conditions like:
77 *
78 * 1) Device queues up a 2048-byte RX IUDMA transaction on an OUT bulk ep
79 * 2) Host sends 512 bytes of data
80 * 3) Host decides to reconfigure the device and sends SET_INTERFACE
81 * 4) Device shuts down the endpoint and cancels the RX transaction
82 *
83 * true - one IRQ per transfer, for transfers <= 2048B. Generates
84 * considerably fewer IRQs, but error recovery is less robust. Does not
85 * reliably pass "testusb".
86 *
87 * TX always uses coalescing, because we can cancel partially complete TX
88 * transfers by repeatedly flushing the FIFO. The hardware doesn't allow
89 * this on RX.
90 */
91static bool irq_coalesce;
92module_param(irq_coalesce, bool, S_IRUGO);
93MODULE_PARM_DESC(irq_coalesce, "take one IRQ per RX transfer");
94
95#define BCM63XX_NUM_EP 5
96#define BCM63XX_NUM_IUDMA 6
97#define BCM63XX_NUM_FIFO_PAIRS 3
98
99#define IUDMA_RESET_TIMEOUT_US 10000
100
101#define IUDMA_EP0_RXCHAN 0
102#define IUDMA_EP0_TXCHAN 1
103
104#define IUDMA_MAX_FRAGMENT 2048
105#define BCM63XX_MAX_CTRL_PKT 64
106
107#define BCMEP_CTRL 0x00
108#define BCMEP_ISOC 0x01
109#define BCMEP_BULK 0x02
110#define BCMEP_INTR 0x03
111
112#define BCMEP_OUT 0x00
113#define BCMEP_IN 0x01
114
115#define BCM63XX_SPD_FULL 1
116#define BCM63XX_SPD_HIGH 0
117
118#define IUDMA_DMAC_OFFSET 0x200
119#define IUDMA_DMAS_OFFSET 0x400
120
121enum bcm63xx_ep0_state {
122 EP0_REQUEUE,
123 EP0_IDLE,
124 EP0_IN_DATA_PHASE_SETUP,
125 EP0_IN_DATA_PHASE_COMPLETE,
126 EP0_OUT_DATA_PHASE_SETUP,
127 EP0_OUT_DATA_PHASE_COMPLETE,
128 EP0_OUT_STATUS_PHASE,
129 EP0_IN_FAKE_STATUS_PHASE,
130 EP0_SHUTDOWN,
131};
132
133static const char __maybe_unused bcm63xx_ep0_state_names[][32] = {
134 "REQUEUE",
135 "IDLE",
136 "IN_DATA_PHASE_SETUP",
137 "IN_DATA_PHASE_COMPLETE",
138 "OUT_DATA_PHASE_SETUP",
139 "OUT_DATA_PHASE_COMPLETE",
140 "OUT_STATUS_PHASE",
141 "IN_FAKE_STATUS_PHASE",
142 "SHUTDOWN",
143};
144
145/**
146 * struct iudma_ch_cfg - Static configuration for an IUDMA channel.
147 * @ep_num: USB endpoint number.
148 * @n_bds: Number of buffer descriptors in the ring.
149 * @ep_type: Endpoint type (control, bulk, interrupt).
150 * @dir: Direction (in, out).
151 * @n_fifo_slots: Number of FIFO entries to allocate for this channel.
152 * @max_pkt_hs: Maximum packet size in high speed mode.
153 * @max_pkt_fs: Maximum packet size in full speed mode.
154 */
155struct iudma_ch_cfg {
156 int ep_num;
157 int n_bds;
158 int ep_type;
159 int dir;
160 int n_fifo_slots;
161 int max_pkt_hs;
162 int max_pkt_fs;
163};
164
165static const struct iudma_ch_cfg iudma_defaults[] = {
166
167 /* This controller was designed to support a CDC/RNDIS application.
168 It may be possible to reconfigure some of the endpoints, but
169 the hardware limitations (FIFO sizing and number of DMA channels)
170 may significantly impact flexibility and/or stability. Change
171 these values at your own risk.
172
173 ep_num ep_type n_fifo_slots max_pkt_fs
174 idx | n_bds | dir | max_pkt_hs |
175 | | | | | | | | */
176 [0] = { -1, 4, BCMEP_CTRL, BCMEP_OUT, 32, 64, 64 },
177 [1] = { 0, 4, BCMEP_CTRL, BCMEP_OUT, 32, 64, 64 },
178 [2] = { 2, 16, BCMEP_BULK, BCMEP_OUT, 128, 512, 64 },
179 [3] = { 1, 16, BCMEP_BULK, BCMEP_IN, 128, 512, 64 },
180 [4] = { 4, 4, BCMEP_INTR, BCMEP_OUT, 32, 64, 64 },
181 [5] = { 3, 4, BCMEP_INTR, BCMEP_IN, 32, 64, 64 },
182};
183
184struct bcm63xx_udc;
185
186/**
187 * struct iudma_ch - Represents the current state of a single IUDMA channel.
188 * @ch_idx: IUDMA channel index (0 to BCM63XX_NUM_IUDMA-1).
189 * @ep_num: USB endpoint number. -1 for ep0 RX.
190 * @enabled: Whether bcm63xx_ep_enable() has been called.
191 * @max_pkt: "Chunk size" on the USB interface. Based on interface speed.
192 * @is_tx: true for TX, false for RX.
193 * @bep: Pointer to the associated endpoint. NULL for ep0 RX.
194 * @udc: Reference to the device controller.
195 * @read_bd: Next buffer descriptor to reap from the hardware.
196 * @write_bd: Next BD available for a new packet.
197 * @end_bd: Points to the final BD in the ring.
198 * @n_bds_used: Number of BD entries currently occupied.
199 * @bd_ring: Base pointer to the BD ring.
200 * @bd_ring_dma: Physical (DMA) address of bd_ring.
201 * @n_bds: Total number of BDs in the ring.
202 *
203 * ep0 has two IUDMA channels (IUDMA_EP0_RXCHAN and IUDMA_EP0_TXCHAN), as it is
204 * bidirectional. The "struct usb_ep" associated with ep0 is for TX (IN)
205 * only.
206 *
207 * Each bulk/intr endpoint has a single IUDMA channel and a single
208 * struct usb_ep.
209 */
210struct iudma_ch {
211 unsigned int ch_idx;
212 int ep_num;
213 bool enabled;
214 int max_pkt;
215 bool is_tx;
216 struct bcm63xx_ep *bep;
217 struct bcm63xx_udc *udc;
218
219 struct bcm_enet_desc *read_bd;
220 struct bcm_enet_desc *write_bd;
221 struct bcm_enet_desc *end_bd;
222 int n_bds_used;
223
224 struct bcm_enet_desc *bd_ring;
225 dma_addr_t bd_ring_dma;
226 unsigned int n_bds;
227};
228
229/**
230 * struct bcm63xx_ep - Internal (driver) state of a single endpoint.
231 * @ep_num: USB endpoint number.
232 * @iudma: Pointer to IUDMA channel state.
233 * @ep: USB gadget layer representation of the EP.
234 * @udc: Reference to the device controller.
235 * @queue: Linked list of outstanding requests for this EP.
236 * @halted: 1 if the EP is stalled; 0 otherwise.
237 */
238struct bcm63xx_ep {
239 unsigned int ep_num;
240 struct iudma_ch *iudma;
241 struct usb_ep ep;
242 struct bcm63xx_udc *udc;
243 struct list_head queue;
244 unsigned halted:1;
245};
246
247/**
248 * struct bcm63xx_req - Internal (driver) state of a single request.
249 * @queue: Links back to the EP's request list.
250 * @req: USB gadget layer representation of the request.
251 * @offset: Current byte offset into the data buffer (next byte to queue).
252 * @bd_bytes: Number of data bytes in outstanding BD entries.
253 * @iudma: IUDMA channel used for the request.
254 */
255struct bcm63xx_req {
256 struct list_head queue; /* ep's requests */
257 struct usb_request req;
258 unsigned int offset;
259 unsigned int bd_bytes;
260 struct iudma_ch *iudma;
261};
262
263/**
264 * struct bcm63xx_udc - Driver/hardware private context.
265 * @lock: Spinlock to mediate access to this struct, and (most) HW regs.
266 * @dev: Generic Linux device structure.
267 * @pd: Platform data (board/port info).
268 * @usbd_clk: Clock descriptor for the USB device block.
269 * @usbh_clk: Clock descriptor for the USB host block.
270 * @gadget: USB device.
271 * @driver: Driver for USB device.
272 * @usbd_regs: Base address of the USBD/USB20D block.
273 * @iudma_regs: Base address of the USBD's associated IUDMA block.
274 * @bep: Array of endpoints, including ep0.
275 * @iudma: Array of all IUDMA channels used by this controller.
276 * @cfg: USB configuration number, from SET_CONFIGURATION wValue.
277 * @iface: USB interface number, from SET_INTERFACE wIndex.
278 * @alt_iface: USB alt interface number, from SET_INTERFACE wValue.
279 * @ep0_ctrl_req: Request object for bcm63xx_udc-initiated ep0 transactions.
280 * @ep0_ctrl_buf: Data buffer for ep0_ctrl_req.
281 * @ep0state: Current state of the ep0 state machine.
282 * @ep0_wq: Workqueue struct used to wake up the ep0 state machine.
283 * @wedgemap: Bitmap of wedged endpoints.
284 * @ep0_req_reset: USB reset is pending.
285 * @ep0_req_set_cfg: Need to spoof a SET_CONFIGURATION packet.
286 * @ep0_req_set_iface: Need to spoof a SET_INTERFACE packet.
287 * @ep0_req_shutdown: Driver is shutting down; requesting ep0 to halt activity.
288 * @ep0_req_completed: ep0 request has completed; worker has not seen it yet.
289 * @ep0_reply: Pending reply from gadget driver.
290 * @ep0_request: Outstanding ep0 request.
291 */
292struct bcm63xx_udc {
293 spinlock_t lock;
294
295 struct device *dev;
296 struct bcm63xx_usbd_platform_data *pd;
297 struct clk *usbd_clk;
298 struct clk *usbh_clk;
299
300 struct usb_gadget gadget;
301 struct usb_gadget_driver *driver;
302
303 void __iomem *usbd_regs;
304 void __iomem *iudma_regs;
305
306 struct bcm63xx_ep bep[BCM63XX_NUM_EP];
307 struct iudma_ch iudma[BCM63XX_NUM_IUDMA];
308
309 int cfg;
310 int iface;
311 int alt_iface;
312
313 struct bcm63xx_req ep0_ctrl_req;
314 u8 *ep0_ctrl_buf;
315
316 int ep0state;
317 struct work_struct ep0_wq;
318
319 unsigned long wedgemap;
320
321 unsigned ep0_req_reset:1;
322 unsigned ep0_req_set_cfg:1;
323 unsigned ep0_req_set_iface:1;
324 unsigned ep0_req_shutdown:1;
325
326 unsigned ep0_req_completed:1;
327 struct usb_request *ep0_reply;
328 struct usb_request *ep0_request;
329};
330
331static const struct usb_ep_ops bcm63xx_udc_ep_ops;
332
333/***********************************************************************
334 * Convenience functions
335 ***********************************************************************/
336
337static inline struct bcm63xx_udc *gadget_to_udc(struct usb_gadget *g)
338{
339 return container_of(g, struct bcm63xx_udc, gadget);
340}
341
342static inline struct bcm63xx_ep *our_ep(struct usb_ep *ep)
343{
344 return container_of(ep, struct bcm63xx_ep, ep);
345}
346
347static inline struct bcm63xx_req *our_req(struct usb_request *req)
348{
349 return container_of(req, struct bcm63xx_req, req);
350}
351
352static inline u32 usbd_readl(struct bcm63xx_udc *udc, u32 off)
353{
354 return bcm_readl(udc->usbd_regs + off);
355}
356
357static inline void usbd_writel(struct bcm63xx_udc *udc, u32 val, u32 off)
358{
359 bcm_writel(val, udc->usbd_regs + off);
360}
361
362static inline u32 usb_dma_readl(struct bcm63xx_udc *udc, u32 off)
363{
364 return bcm_readl(udc->iudma_regs + off);
365}
366
367static inline void usb_dma_writel(struct bcm63xx_udc *udc, u32 val, u32 off)
368{
369 bcm_writel(val, udc->iudma_regs + off);
370}
371
372static inline u32 usb_dmac_readl(struct bcm63xx_udc *udc, u32 off, int chan)
373{
374 return bcm_readl(udc->iudma_regs + IUDMA_DMAC_OFFSET + off +
375 (ENETDMA_CHAN_WIDTH * chan));
376}
377
378static inline void usb_dmac_writel(struct bcm63xx_udc *udc, u32 val, u32 off,
379 int chan)
380{
381 bcm_writel(val, udc->iudma_regs + IUDMA_DMAC_OFFSET + off +
382 (ENETDMA_CHAN_WIDTH * chan));
383}
384
385static inline u32 usb_dmas_readl(struct bcm63xx_udc *udc, u32 off, int chan)
386{
387 return bcm_readl(udc->iudma_regs + IUDMA_DMAS_OFFSET + off +
388 (ENETDMA_CHAN_WIDTH * chan));
389}
390
391static inline void usb_dmas_writel(struct bcm63xx_udc *udc, u32 val, u32 off,
392 int chan)
393{
394 bcm_writel(val, udc->iudma_regs + IUDMA_DMAS_OFFSET + off +
395 (ENETDMA_CHAN_WIDTH * chan));
396}
397
398static inline void set_clocks(struct bcm63xx_udc *udc, bool is_enabled)
399{
400 if (is_enabled) {
401 clk_enable(udc->usbh_clk);
402 clk_enable(udc->usbd_clk);
403 udelay(10);
404 } else {
405 clk_disable(udc->usbd_clk);
406 clk_disable(udc->usbh_clk);
407 }
408}
409
410/***********************************************************************
411 * Low-level IUDMA / FIFO operations
412 ***********************************************************************/
413
414/**
415 * bcm63xx_ep_dma_select - Helper function to set up the init_sel signal.
416 * @udc: Reference to the device controller.
417 * @idx: Desired init_sel value.
418 *
419 * The "init_sel" signal is used as a selection index for both endpoints
420 * and IUDMA channels. Since these do not map 1:1, the use of this signal
421 * depends on the context.
422 */
423static void bcm63xx_ep_dma_select(struct bcm63xx_udc *udc, int idx)
424{
425 u32 val = usbd_readl(udc, USBD_CONTROL_REG);
426
427 val &= ~USBD_CONTROL_INIT_SEL_MASK;
428 val |= idx << USBD_CONTROL_INIT_SEL_SHIFT;
429 usbd_writel(udc, val, USBD_CONTROL_REG);
430}
431
432/**
433 * bcm63xx_set_stall - Enable/disable stall on one endpoint.
434 * @udc: Reference to the device controller.
435 * @bep: Endpoint on which to operate.
436 * @is_stalled: true to enable stall, false to disable.
437 *
438 * See notes in bcm63xx_update_wedge() regarding automatic clearing of
439 * halt/stall conditions.
440 */
441static void bcm63xx_set_stall(struct bcm63xx_udc *udc, struct bcm63xx_ep *bep,
442 bool is_stalled)
443{
444 u32 val;
445
446 val = USBD_STALL_UPDATE_MASK |
447 (is_stalled ? USBD_STALL_ENABLE_MASK : 0) |
448 (bep->ep_num << USBD_STALL_EPNUM_SHIFT);
449 usbd_writel(udc, val, USBD_STALL_REG);
450}
451
452/**
453 * bcm63xx_fifo_setup - (Re)initialize FIFO boundaries and settings.
454 * @udc: Reference to the device controller.
455 *
456 * These parameters depend on the USB link speed. Settings are
457 * per-IUDMA-channel-pair.
458 */
459static void bcm63xx_fifo_setup(struct bcm63xx_udc *udc)
460{
461 int is_hs = udc->gadget.speed == USB_SPEED_HIGH;
462 u32 i, val, rx_fifo_slot, tx_fifo_slot;
463
464 /* set up FIFO boundaries and packet sizes; this is done in pairs */
465 rx_fifo_slot = tx_fifo_slot = 0;
466 for (i = 0; i < BCM63XX_NUM_IUDMA; i += 2) {
467 const struct iudma_ch_cfg *rx_cfg = &iudma_defaults[i];
468 const struct iudma_ch_cfg *tx_cfg = &iudma_defaults[i + 1];
469
470 bcm63xx_ep_dma_select(udc, i >> 1);
471
472 val = (rx_fifo_slot << USBD_RXFIFO_CONFIG_START_SHIFT) |
473 ((rx_fifo_slot + rx_cfg->n_fifo_slots - 1) <<
474 USBD_RXFIFO_CONFIG_END_SHIFT);
475 rx_fifo_slot += rx_cfg->n_fifo_slots;
476 usbd_writel(udc, val, USBD_RXFIFO_CONFIG_REG);
477 usbd_writel(udc,
478 is_hs ? rx_cfg->max_pkt_hs : rx_cfg->max_pkt_fs,
479 USBD_RXFIFO_EPSIZE_REG);
480
481 val = (tx_fifo_slot << USBD_TXFIFO_CONFIG_START_SHIFT) |
482 ((tx_fifo_slot + tx_cfg->n_fifo_slots - 1) <<
483 USBD_TXFIFO_CONFIG_END_SHIFT);
484 tx_fifo_slot += tx_cfg->n_fifo_slots;
485 usbd_writel(udc, val, USBD_TXFIFO_CONFIG_REG);
486 usbd_writel(udc,
487 is_hs ? tx_cfg->max_pkt_hs : tx_cfg->max_pkt_fs,
488 USBD_TXFIFO_EPSIZE_REG);
489
490 usbd_readl(udc, USBD_TXFIFO_EPSIZE_REG);
491 }
492}
493
494/**
495 * bcm63xx_fifo_reset_ep - Flush a single endpoint's FIFO.
496 * @udc: Reference to the device controller.
497 * @ep_num: Endpoint number.
498 */
499static void bcm63xx_fifo_reset_ep(struct bcm63xx_udc *udc, int ep_num)
500{
501 u32 val;
502
503 bcm63xx_ep_dma_select(udc, ep_num);
504
505 val = usbd_readl(udc, USBD_CONTROL_REG);
506 val |= USBD_CONTROL_FIFO_RESET_MASK;
507 usbd_writel(udc, val, USBD_CONTROL_REG);
508 usbd_readl(udc, USBD_CONTROL_REG);
509}
510
511/**
512 * bcm63xx_fifo_reset - Flush all hardware FIFOs.
513 * @udc: Reference to the device controller.
514 */
515static void bcm63xx_fifo_reset(struct bcm63xx_udc *udc)
516{
517 int i;
518
519 for (i = 0; i < BCM63XX_NUM_FIFO_PAIRS; i++)
520 bcm63xx_fifo_reset_ep(udc, i);
521}
522
523/**
524 * bcm63xx_ep_init - Initial (one-time) endpoint initialization.
525 * @udc: Reference to the device controller.
526 */
527static void bcm63xx_ep_init(struct bcm63xx_udc *udc)
528{
529 u32 i, val;
530
531 for (i = 0; i < BCM63XX_NUM_IUDMA; i++) {
532 const struct iudma_ch_cfg *cfg = &iudma_defaults[i];
533
534 if (cfg->ep_num < 0)
535 continue;
536
537 bcm63xx_ep_dma_select(udc, cfg->ep_num);
538 val = (cfg->ep_type << USBD_EPNUM_TYPEMAP_TYPE_SHIFT) |
539 ((i >> 1) << USBD_EPNUM_TYPEMAP_DMA_CH_SHIFT);
540 usbd_writel(udc, val, USBD_EPNUM_TYPEMAP_REG);
541 }
542}
543
544/**
545 * bcm63xx_ep_setup - Configure per-endpoint settings.
546 * @udc: Reference to the device controller.
547 *
548 * This needs to be rerun if the speed/cfg/intf/altintf changes.
549 */
550static void bcm63xx_ep_setup(struct bcm63xx_udc *udc)
551{
552 u32 val, i;
553
554 usbd_writel(udc, USBD_CSR_SETUPADDR_DEF, USBD_CSR_SETUPADDR_REG);
555
556 for (i = 0; i < BCM63XX_NUM_IUDMA; i++) {
557 const struct iudma_ch_cfg *cfg = &iudma_defaults[i];
558 int max_pkt = udc->gadget.speed == USB_SPEED_HIGH ?
559 cfg->max_pkt_hs : cfg->max_pkt_fs;
560 int idx = cfg->ep_num;
561
562 udc->iudma[i].max_pkt = max_pkt;
563
564 if (idx < 0)
565 continue;
566 usb_ep_set_maxpacket_limit(&udc->bep[idx].ep, max_pkt);
567
568 val = (idx << USBD_CSR_EP_LOG_SHIFT) |
569 (cfg->dir << USBD_CSR_EP_DIR_SHIFT) |
570 (cfg->ep_type << USBD_CSR_EP_TYPE_SHIFT) |
571 (udc->cfg << USBD_CSR_EP_CFG_SHIFT) |
572 (udc->iface << USBD_CSR_EP_IFACE_SHIFT) |
573 (udc->alt_iface << USBD_CSR_EP_ALTIFACE_SHIFT) |
574 (max_pkt << USBD_CSR_EP_MAXPKT_SHIFT);
575 usbd_writel(udc, val, USBD_CSR_EP_REG(idx));
576 }
577}
578
579/**
580 * iudma_write - Queue a single IUDMA transaction.
581 * @udc: Reference to the device controller.
582 * @iudma: IUDMA channel to use.
583 * @breq: Request containing the transaction data.
584 *
585 * For RX IUDMA, this will queue a single buffer descriptor, as RX IUDMA
586 * does not honor SOP/EOP so the handling of multiple buffers is ambiguous.
587 * So iudma_write() may be called several times to fulfill a single
588 * usb_request.
589 *
590 * For TX IUDMA, this can queue multiple buffer descriptors if needed.
591 */
592static void iudma_write(struct bcm63xx_udc *udc, struct iudma_ch *iudma,
593 struct bcm63xx_req *breq)
594{
595 int first_bd = 1, last_bd = 0, extra_zero_pkt = 0;
596 unsigned int bytes_left = breq->req.length - breq->offset;
597 const int max_bd_bytes = !irq_coalesce && !iudma->is_tx ?
598 iudma->max_pkt : IUDMA_MAX_FRAGMENT;
599
600 iudma->n_bds_used = 0;
601 breq->bd_bytes = 0;
602 breq->iudma = iudma;
603
604 if ((bytes_left % iudma->max_pkt == 0) && bytes_left && breq->req.zero)
605 extra_zero_pkt = 1;
606
607 do {
608 struct bcm_enet_desc *d = iudma->write_bd;
609 u32 dmaflags = 0;
610 unsigned int n_bytes;
611
612 if (d == iudma->end_bd) {
613 dmaflags |= DMADESC_WRAP_MASK;
614 iudma->write_bd = iudma->bd_ring;
615 } else {
616 iudma->write_bd++;
617 }
618 iudma->n_bds_used++;
619
620 n_bytes = min_t(int, bytes_left, max_bd_bytes);
621 if (n_bytes)
622 dmaflags |= n_bytes << DMADESC_LENGTH_SHIFT;
623 else
624 dmaflags |= (1 << DMADESC_LENGTH_SHIFT) |
625 DMADESC_USB_ZERO_MASK;
626
627 dmaflags |= DMADESC_OWNER_MASK;
628 if (first_bd) {
629 dmaflags |= DMADESC_SOP_MASK;
630 first_bd = 0;
631 }
632
633 /*
634 * extra_zero_pkt forces one more iteration through the loop
635 * after all data is queued up, to send the zero packet
636 */
637 if (extra_zero_pkt && !bytes_left)
638 extra_zero_pkt = 0;
639
640 if (!iudma->is_tx || iudma->n_bds_used == iudma->n_bds ||
641 (n_bytes == bytes_left && !extra_zero_pkt)) {
642 last_bd = 1;
643 dmaflags |= DMADESC_EOP_MASK;
644 }
645
646 d->address = breq->req.dma + breq->offset;
647 mb();
648 d->len_stat = dmaflags;
649
650 breq->offset += n_bytes;
651 breq->bd_bytes += n_bytes;
652 bytes_left -= n_bytes;
653 } while (!last_bd);
654
655 usb_dmac_writel(udc, ENETDMAC_CHANCFG_EN_MASK,
656 ENETDMAC_CHANCFG_REG, iudma->ch_idx);
657}
658
659/**
660 * iudma_read - Check for IUDMA buffer completion.
661 * @udc: Reference to the device controller.
662 * @iudma: IUDMA channel to use.
663 *
664 * This checks to see if ALL of the outstanding BDs on the DMA channel
665 * have been filled. If so, it returns the actual transfer length;
666 * otherwise it returns -EBUSY.
667 */
668static int iudma_read(struct bcm63xx_udc *udc, struct iudma_ch *iudma)
669{
670 int i, actual_len = 0;
671 struct bcm_enet_desc *d = iudma->read_bd;
672
673 if (!iudma->n_bds_used)
674 return -EINVAL;
675
676 for (i = 0; i < iudma->n_bds_used; i++) {
677 u32 dmaflags;
678
679 dmaflags = d->len_stat;
680
681 if (dmaflags & DMADESC_OWNER_MASK)
682 return -EBUSY;
683
684 actual_len += (dmaflags & DMADESC_LENGTH_MASK) >>
685 DMADESC_LENGTH_SHIFT;
686 if (d == iudma->end_bd)
687 d = iudma->bd_ring;
688 else
689 d++;
690 }
691
692 iudma->read_bd = d;
693 iudma->n_bds_used = 0;
694 return actual_len;
695}
696
697/**
698 * iudma_reset_channel - Stop DMA on a single channel.
699 * @udc: Reference to the device controller.
700 * @iudma: IUDMA channel to reset.
701 */
702static void iudma_reset_channel(struct bcm63xx_udc *udc, struct iudma_ch *iudma)
703{
704 int timeout = IUDMA_RESET_TIMEOUT_US;
705 struct bcm_enet_desc *d;
706 int ch_idx = iudma->ch_idx;
707
708 if (!iudma->is_tx)
709 bcm63xx_fifo_reset_ep(udc, max(0, iudma->ep_num));
710
711 /* stop DMA, then wait for the hardware to wrap up */
712 usb_dmac_writel(udc, 0, ENETDMAC_CHANCFG_REG, ch_idx);
713
714 while (usb_dmac_readl(udc, ENETDMAC_CHANCFG_REG, ch_idx) &
715 ENETDMAC_CHANCFG_EN_MASK) {
716 udelay(1);
717
718 /* repeatedly flush the FIFO data until the BD completes */
719 if (iudma->is_tx && iudma->ep_num >= 0)
720 bcm63xx_fifo_reset_ep(udc, iudma->ep_num);
721
722 if (!timeout--) {
723 dev_err(udc->dev, "can't reset IUDMA channel %d\n",
724 ch_idx);
725 break;
726 }
727 if (timeout == IUDMA_RESET_TIMEOUT_US / 2) {
728 dev_warn(udc->dev, "forcibly halting IUDMA channel %d\n",
729 ch_idx);
730 usb_dmac_writel(udc, ENETDMAC_CHANCFG_BUFHALT_MASK,
731 ENETDMAC_CHANCFG_REG, ch_idx);
732 }
733 }
734 usb_dmac_writel(udc, ~0, ENETDMAC_IR_REG, ch_idx);
735
736 /* don't leave "live" HW-owned entries for the next guy to step on */
737 for (d = iudma->bd_ring; d <= iudma->end_bd; d++)
738 d->len_stat = 0;
739 mb();
740
741 iudma->read_bd = iudma->write_bd = iudma->bd_ring;
742 iudma->n_bds_used = 0;
743
744 /* set up IRQs, UBUS burst size, and BD base for this channel */
745 usb_dmac_writel(udc, ENETDMAC_IR_BUFDONE_MASK,
746 ENETDMAC_IRMASK_REG, ch_idx);
747 usb_dmac_writel(udc, 8, ENETDMAC_MAXBURST_REG, ch_idx);
748
749 usb_dmas_writel(udc, iudma->bd_ring_dma, ENETDMAS_RSTART_REG, ch_idx);
750 usb_dmas_writel(udc, 0, ENETDMAS_SRAM2_REG, ch_idx);
751}
752
753/**
754 * iudma_init_channel - One-time IUDMA channel initialization.
755 * @udc: Reference to the device controller.
756 * @ch_idx: Channel to initialize.
757 */
758static int iudma_init_channel(struct bcm63xx_udc *udc, unsigned int ch_idx)
759{
760 struct iudma_ch *iudma = &udc->iudma[ch_idx];
761 const struct iudma_ch_cfg *cfg = &iudma_defaults[ch_idx];
762 unsigned int n_bds = cfg->n_bds;
763 struct bcm63xx_ep *bep = NULL;
764
765 iudma->ep_num = cfg->ep_num;
766 iudma->ch_idx = ch_idx;
767 iudma->is_tx = !!(ch_idx & 0x01);
768 if (iudma->ep_num >= 0) {
769 bep = &udc->bep[iudma->ep_num];
770 bep->iudma = iudma;
771 INIT_LIST_HEAD(&bep->queue);
772 }
773
774 iudma->bep = bep;
775 iudma->udc = udc;
776
777 /* ep0 is always active; others are controlled by the gadget driver */
778 if (iudma->ep_num <= 0)
779 iudma->enabled = true;
780
781 iudma->n_bds = n_bds;
782 iudma->bd_ring = dmam_alloc_coherent(udc->dev,
783 n_bds * sizeof(struct bcm_enet_desc),
784 &iudma->bd_ring_dma, GFP_KERNEL);
785 if (!iudma->bd_ring)
786 return -ENOMEM;
787 iudma->end_bd = &iudma->bd_ring[n_bds - 1];
788
789 return 0;
790}
791
792/**
793 * iudma_init - One-time initialization of all IUDMA channels.
794 * @udc: Reference to the device controller.
795 *
796 * Enable DMA, flush channels, and enable global IUDMA IRQs.
797 */
798static int iudma_init(struct bcm63xx_udc *udc)
799{
800 int i, rc;
801
802 usb_dma_writel(udc, ENETDMA_CFG_EN_MASK, ENETDMA_CFG_REG);
803
804 for (i = 0; i < BCM63XX_NUM_IUDMA; i++) {
805 rc = iudma_init_channel(udc, i);
806 if (rc)
807 return rc;
808 iudma_reset_channel(udc, &udc->iudma[i]);
809 }
810
811 usb_dma_writel(udc, BIT(BCM63XX_NUM_IUDMA)-1, ENETDMA_GLB_IRQMASK_REG);
812 return 0;
813}
814
815/**
816 * iudma_uninit - Uninitialize IUDMA channels.
817 * @udc: Reference to the device controller.
818 *
819 * Kill global IUDMA IRQs, flush channels, and kill DMA.
820 */
821static void iudma_uninit(struct bcm63xx_udc *udc)
822{
823 int i;
824
825 usb_dma_writel(udc, 0, ENETDMA_GLB_IRQMASK_REG);
826
827 for (i = 0; i < BCM63XX_NUM_IUDMA; i++)
828 iudma_reset_channel(udc, &udc->iudma[i]);
829
830 usb_dma_writel(udc, 0, ENETDMA_CFG_REG);
831}
832
833/***********************************************************************
834 * Other low-level USBD operations
835 ***********************************************************************/
836
837/**
838 * bcm63xx_set_ctrl_irqs - Mask/unmask control path interrupts.
839 * @udc: Reference to the device controller.
840 * @enable_irqs: true to enable, false to disable.
841 */
842static void bcm63xx_set_ctrl_irqs(struct bcm63xx_udc *udc, bool enable_irqs)
843{
844 u32 val;
845
846 usbd_writel(udc, 0, USBD_STATUS_REG);
847
848 val = BIT(USBD_EVENT_IRQ_USB_RESET) |
849 BIT(USBD_EVENT_IRQ_SETUP) |
850 BIT(USBD_EVENT_IRQ_SETCFG) |
851 BIT(USBD_EVENT_IRQ_SETINTF) |
852 BIT(USBD_EVENT_IRQ_USB_LINK);
853 usbd_writel(udc, enable_irqs ? val : 0, USBD_EVENT_IRQ_MASK_REG);
854 usbd_writel(udc, val, USBD_EVENT_IRQ_STATUS_REG);
855}
856
857/**
858 * bcm63xx_select_phy_mode - Select between USB device and host mode.
859 * @udc: Reference to the device controller.
860 * @is_device: true for device, false for host.
861 *
862 * This should probably be reworked to use the drivers/usb/otg
863 * infrastructure.
864 *
865 * By default, the AFE/pullups are disabled in device mode, until
866 * bcm63xx_select_pullup() is called.
867 */
868static void bcm63xx_select_phy_mode(struct bcm63xx_udc *udc, bool is_device)
869{
870 u32 val, portmask = BIT(udc->pd->port_no);
871
872 if (BCMCPU_IS_6328()) {
873 /* configure pinmux to sense VBUS signal */
874 val = bcm_gpio_readl(GPIO_PINMUX_OTHR_REG);
875 val &= ~GPIO_PINMUX_OTHR_6328_USB_MASK;
876 val |= is_device ? GPIO_PINMUX_OTHR_6328_USB_DEV :
877 GPIO_PINMUX_OTHR_6328_USB_HOST;
878 bcm_gpio_writel(val, GPIO_PINMUX_OTHR_REG);
879 }
880
881 val = bcm_rset_readl(RSET_USBH_PRIV, USBH_PRIV_UTMI_CTL_6368_REG);
882 if (is_device) {
883 val |= (portmask << USBH_PRIV_UTMI_CTL_HOSTB_SHIFT);
884 val |= (portmask << USBH_PRIV_UTMI_CTL_NODRIV_SHIFT);
885 } else {
886 val &= ~(portmask << USBH_PRIV_UTMI_CTL_HOSTB_SHIFT);
887 val &= ~(portmask << USBH_PRIV_UTMI_CTL_NODRIV_SHIFT);
888 }
889 bcm_rset_writel(RSET_USBH_PRIV, val, USBH_PRIV_UTMI_CTL_6368_REG);
890
891 val = bcm_rset_readl(RSET_USBH_PRIV, USBH_PRIV_SWAP_6368_REG);
892 if (is_device)
893 val |= USBH_PRIV_SWAP_USBD_MASK;
894 else
895 val &= ~USBH_PRIV_SWAP_USBD_MASK;
896 bcm_rset_writel(RSET_USBH_PRIV, val, USBH_PRIV_SWAP_6368_REG);
897}
898
899/**
900 * bcm63xx_select_pullup - Enable/disable the pullup on D+
901 * @udc: Reference to the device controller.
902 * @is_on: true to enable the pullup, false to disable.
903 *
904 * If the pullup is active, the host will sense a FS/HS device connected to
905 * the port. If the pullup is inactive, the host will think the USB
906 * device has been disconnected.
907 */
908static void bcm63xx_select_pullup(struct bcm63xx_udc *udc, bool is_on)
909{
910 u32 val, portmask = BIT(udc->pd->port_no);
911
912 val = bcm_rset_readl(RSET_USBH_PRIV, USBH_PRIV_UTMI_CTL_6368_REG);
913 if (is_on)
914 val &= ~(portmask << USBH_PRIV_UTMI_CTL_NODRIV_SHIFT);
915 else
916 val |= (portmask << USBH_PRIV_UTMI_CTL_NODRIV_SHIFT);
917 bcm_rset_writel(RSET_USBH_PRIV, val, USBH_PRIV_UTMI_CTL_6368_REG);
918}
919
920/**
921 * bcm63xx_uninit_udc_hw - Shut down the hardware prior to driver removal.
922 * @udc: Reference to the device controller.
923 *
924 * This just masks the IUDMA IRQs and releases the clocks. It is assumed
925 * that bcm63xx_udc_stop() has already run, and the clocks are stopped.
926 */
927static void bcm63xx_uninit_udc_hw(struct bcm63xx_udc *udc)
928{
929 set_clocks(udc, true);
930 iudma_uninit(udc);
931 set_clocks(udc, false);
932
933 clk_put(udc->usbd_clk);
934 clk_put(udc->usbh_clk);
935}
936
937/**
938 * bcm63xx_init_udc_hw - Initialize the controller hardware and data structures.
939 * @udc: Reference to the device controller.
940 */
941static int bcm63xx_init_udc_hw(struct bcm63xx_udc *udc)
942{
943 int i, rc = 0;
944 u32 val;
945
946 udc->ep0_ctrl_buf = devm_kzalloc(udc->dev, BCM63XX_MAX_CTRL_PKT,
947 GFP_KERNEL);
948 if (!udc->ep0_ctrl_buf)
949 return -ENOMEM;
950
951 INIT_LIST_HEAD(&udc->gadget.ep_list);
952 for (i = 0; i < BCM63XX_NUM_EP; i++) {
953 struct bcm63xx_ep *bep = &udc->bep[i];
954
955 bep->ep.name = bcm63xx_ep_info[i].name;
956 bep->ep.caps = bcm63xx_ep_info[i].caps;
957 bep->ep_num = i;
958 bep->ep.ops = &bcm63xx_udc_ep_ops;
959 list_add_tail(&bep->ep.ep_list, &udc->gadget.ep_list);
960 bep->halted = 0;
961 usb_ep_set_maxpacket_limit(&bep->ep, BCM63XX_MAX_CTRL_PKT);
962 bep->udc = udc;
963 bep->ep.desc = NULL;
964 INIT_LIST_HEAD(&bep->queue);
965 }
966
967 udc->gadget.ep0 = &udc->bep[0].ep;
968 list_del(&udc->bep[0].ep.ep_list);
969
970 udc->gadget.speed = USB_SPEED_UNKNOWN;
971 udc->ep0state = EP0_SHUTDOWN;
972
973 udc->usbh_clk = clk_get(udc->dev, "usbh");
974 if (IS_ERR(udc->usbh_clk))
975 return -EIO;
976
977 udc->usbd_clk = clk_get(udc->dev, "usbd");
978 if (IS_ERR(udc->usbd_clk)) {
979 clk_put(udc->usbh_clk);
980 return -EIO;
981 }
982
983 set_clocks(udc, true);
984
985 val = USBD_CONTROL_AUTO_CSRS_MASK |
986 USBD_CONTROL_DONE_CSRS_MASK |
987 (irq_coalesce ? USBD_CONTROL_RXZSCFG_MASK : 0);
988 usbd_writel(udc, val, USBD_CONTROL_REG);
989
990 val = USBD_STRAPS_APP_SELF_PWR_MASK |
991 USBD_STRAPS_APP_RAM_IF_MASK |
992 USBD_STRAPS_APP_CSRPRGSUP_MASK |
993 USBD_STRAPS_APP_8BITPHY_MASK |
994 USBD_STRAPS_APP_RMTWKUP_MASK;
995
996 if (udc->gadget.max_speed == USB_SPEED_HIGH)
997 val |= (BCM63XX_SPD_HIGH << USBD_STRAPS_SPEED_SHIFT);
998 else
999 val |= (BCM63XX_SPD_FULL << USBD_STRAPS_SPEED_SHIFT);
1000 usbd_writel(udc, val, USBD_STRAPS_REG);
1001
1002 bcm63xx_set_ctrl_irqs(udc, false);
1003
1004 usbd_writel(udc, 0, USBD_EVENT_IRQ_CFG_LO_REG);
1005
1006 val = USBD_EVENT_IRQ_CFG_FALLING(USBD_EVENT_IRQ_ENUM_ON) |
1007 USBD_EVENT_IRQ_CFG_FALLING(USBD_EVENT_IRQ_SET_CSRS);
1008 usbd_writel(udc, val, USBD_EVENT_IRQ_CFG_HI_REG);
1009
1010 rc = iudma_init(udc);
1011 set_clocks(udc, false);
1012 if (rc)
1013 bcm63xx_uninit_udc_hw(udc);
1014
1015 return 0;
1016}
1017
1018/***********************************************************************
1019 * Standard EP gadget operations
1020 ***********************************************************************/
1021
1022/**
1023 * bcm63xx_ep_enable - Enable one endpoint.
1024 * @ep: Endpoint to enable.
1025 * @desc: Contains max packet, direction, etc.
1026 *
1027 * Most of the endpoint parameters are fixed in this controller, so there
1028 * isn't much for this function to do.
1029 */
1030static int bcm63xx_ep_enable(struct usb_ep *ep,
1031 const struct usb_endpoint_descriptor *desc)
1032{
1033 struct bcm63xx_ep *bep = our_ep(ep);
1034 struct bcm63xx_udc *udc = bep->udc;
1035 struct iudma_ch *iudma = bep->iudma;
1036 unsigned long flags;
1037
1038 if (!ep || !desc || ep->name == bcm63xx_ep0name)
1039 return -EINVAL;
1040
1041 if (!udc->driver)
1042 return -ESHUTDOWN;
1043
1044 spin_lock_irqsave(&udc->lock, flags);
1045 if (iudma->enabled) {
1046 spin_unlock_irqrestore(&udc->lock, flags);
1047 return -EINVAL;
1048 }
1049
1050 iudma->enabled = true;
1051 BUG_ON(!list_empty(&bep->queue));
1052
1053 iudma_reset_channel(udc, iudma);
1054
1055 bep->halted = 0;
1056 bcm63xx_set_stall(udc, bep, false);
1057 clear_bit(bep->ep_num, &udc->wedgemap);
1058
1059 ep->desc = desc;
1060 ep->maxpacket = usb_endpoint_maxp(desc);
1061
1062 spin_unlock_irqrestore(&udc->lock, flags);
1063 return 0;
1064}
1065
1066/**
1067 * bcm63xx_ep_disable - Disable one endpoint.
1068 * @ep: Endpoint to disable.
1069 */
1070static int bcm63xx_ep_disable(struct usb_ep *ep)
1071{
1072 struct bcm63xx_ep *bep = our_ep(ep);
1073 struct bcm63xx_udc *udc = bep->udc;
1074 struct iudma_ch *iudma = bep->iudma;
1075 struct bcm63xx_req *breq, *n;
1076 unsigned long flags;
1077
1078 if (!ep || !ep->desc)
1079 return -EINVAL;
1080
1081 spin_lock_irqsave(&udc->lock, flags);
1082 if (!iudma->enabled) {
1083 spin_unlock_irqrestore(&udc->lock, flags);
1084 return -EINVAL;
1085 }
1086 iudma->enabled = false;
1087
1088 iudma_reset_channel(udc, iudma);
1089
1090 if (!list_empty(&bep->queue)) {
1091 list_for_each_entry_safe(breq, n, &bep->queue, queue) {
1092 usb_gadget_unmap_request(&udc->gadget, &breq->req,
1093 iudma->is_tx);
1094 list_del(&breq->queue);
1095 breq->req.status = -ESHUTDOWN;
1096
1097 spin_unlock_irqrestore(&udc->lock, flags);
1098 usb_gadget_giveback_request(&iudma->bep->ep, &breq->req);
1099 spin_lock_irqsave(&udc->lock, flags);
1100 }
1101 }
1102 ep->desc = NULL;
1103
1104 spin_unlock_irqrestore(&udc->lock, flags);
1105 return 0;
1106}
1107
1108/**
1109 * bcm63xx_udc_alloc_request - Allocate a new request.
1110 * @ep: Endpoint associated with the request.
1111 * @mem_flags: Flags to pass to kzalloc().
1112 */
1113static struct usb_request *bcm63xx_udc_alloc_request(struct usb_ep *ep,
1114 gfp_t mem_flags)
1115{
1116 struct bcm63xx_req *breq;
1117
1118 breq = kzalloc(sizeof(*breq), mem_flags);
1119 if (!breq)
1120 return NULL;
1121 return &breq->req;
1122}
1123
1124/**
1125 * bcm63xx_udc_free_request - Free a request.
1126 * @ep: Endpoint associated with the request.
1127 * @req: Request to free.
1128 */
1129static void bcm63xx_udc_free_request(struct usb_ep *ep,
1130 struct usb_request *req)
1131{
1132 struct bcm63xx_req *breq = our_req(req);
1133 kfree(breq);
1134}
1135
1136/**
1137 * bcm63xx_udc_queue - Queue up a new request.
1138 * @ep: Endpoint associated with the request.
1139 * @req: Request to add.
1140 * @mem_flags: Unused.
1141 *
1142 * If the queue is empty, start this request immediately. Otherwise, add
1143 * it to the list.
1144 *
1145 * ep0 replies are sent through this function from the gadget driver, but
1146 * they are treated differently because they need to be handled by the ep0
1147 * state machine. (Sometimes they are replies to control requests that
1148 * were spoofed by this driver, and so they shouldn't be transmitted at all.)
1149 */
1150static int bcm63xx_udc_queue(struct usb_ep *ep, struct usb_request *req,
1151 gfp_t mem_flags)
1152{
1153 struct bcm63xx_ep *bep = our_ep(ep);
1154 struct bcm63xx_udc *udc = bep->udc;
1155 struct bcm63xx_req *breq = our_req(req);
1156 unsigned long flags;
1157 int rc = 0;
1158
1159 if (unlikely(!req || !req->complete || !req->buf || !ep))
1160 return -EINVAL;
1161
1162 req->actual = 0;
1163 req->status = 0;
1164 breq->offset = 0;
1165
1166 if (bep == &udc->bep[0]) {
1167 /* only one reply per request, please */
1168 if (udc->ep0_reply)
1169 return -EINVAL;
1170
1171 udc->ep0_reply = req;
1172 schedule_work(&udc->ep0_wq);
1173 return 0;
1174 }
1175
1176 spin_lock_irqsave(&udc->lock, flags);
1177 if (!bep->iudma->enabled) {
1178 rc = -ESHUTDOWN;
1179 goto out;
1180 }
1181
1182 rc = usb_gadget_map_request(&udc->gadget, req, bep->iudma->is_tx);
1183 if (rc == 0) {
1184 list_add_tail(&breq->queue, &bep->queue);
1185 if (list_is_singular(&bep->queue))
1186 iudma_write(udc, bep->iudma, breq);
1187 }
1188
1189out:
1190 spin_unlock_irqrestore(&udc->lock, flags);
1191 return rc;
1192}
1193
1194/**
1195 * bcm63xx_udc_dequeue - Remove a pending request from the queue.
1196 * @ep: Endpoint associated with the request.
1197 * @req: Request to remove.
1198 *
1199 * If the request is not at the head of the queue, this is easy - just nuke
1200 * it. If the request is at the head of the queue, we'll need to stop the
1201 * DMA transaction and then queue up the successor.
1202 */
1203static int bcm63xx_udc_dequeue(struct usb_ep *ep, struct usb_request *req)
1204{
1205 struct bcm63xx_ep *bep = our_ep(ep);
1206 struct bcm63xx_udc *udc = bep->udc;
1207 struct bcm63xx_req *breq = our_req(req), *cur;
1208 unsigned long flags;
1209 int rc = 0;
1210
1211 spin_lock_irqsave(&udc->lock, flags);
1212 if (list_empty(&bep->queue)) {
1213 rc = -EINVAL;
1214 goto out;
1215 }
1216
1217 cur = list_first_entry(&bep->queue, struct bcm63xx_req, queue);
1218 usb_gadget_unmap_request(&udc->gadget, &breq->req, bep->iudma->is_tx);
1219
1220 if (breq == cur) {
1221 iudma_reset_channel(udc, bep->iudma);
1222 list_del(&breq->queue);
1223
1224 if (!list_empty(&bep->queue)) {
1225 struct bcm63xx_req *next;
1226
1227 next = list_first_entry(&bep->queue,
1228 struct bcm63xx_req, queue);
1229 iudma_write(udc, bep->iudma, next);
1230 }
1231 } else {
1232 list_del(&breq->queue);
1233 }
1234
1235out:
1236 spin_unlock_irqrestore(&udc->lock, flags);
1237
1238 req->status = -ESHUTDOWN;
1239 req->complete(ep, req);
1240
1241 return rc;
1242}
1243
1244/**
1245 * bcm63xx_udc_set_halt - Enable/disable STALL flag in the hardware.
1246 * @ep: Endpoint to halt.
1247 * @value: Zero to clear halt; nonzero to set halt.
1248 *
1249 * See comments in bcm63xx_update_wedge().
1250 */
1251static int bcm63xx_udc_set_halt(struct usb_ep *ep, int value)
1252{
1253 struct bcm63xx_ep *bep = our_ep(ep);
1254 struct bcm63xx_udc *udc = bep->udc;
1255 unsigned long flags;
1256
1257 spin_lock_irqsave(&udc->lock, flags);
1258 bcm63xx_set_stall(udc, bep, !!value);
1259 bep->halted = value;
1260 spin_unlock_irqrestore(&udc->lock, flags);
1261
1262 return 0;
1263}
1264
1265/**
1266 * bcm63xx_udc_set_wedge - Stall the endpoint until the next reset.
1267 * @ep: Endpoint to wedge.
1268 *
1269 * See comments in bcm63xx_update_wedge().
1270 */
1271static int bcm63xx_udc_set_wedge(struct usb_ep *ep)
1272{
1273 struct bcm63xx_ep *bep = our_ep(ep);
1274 struct bcm63xx_udc *udc = bep->udc;
1275 unsigned long flags;
1276
1277 spin_lock_irqsave(&udc->lock, flags);
1278 set_bit(bep->ep_num, &udc->wedgemap);
1279 bcm63xx_set_stall(udc, bep, true);
1280 spin_unlock_irqrestore(&udc->lock, flags);
1281
1282 return 0;
1283}
1284
1285static const struct usb_ep_ops bcm63xx_udc_ep_ops = {
1286 .enable = bcm63xx_ep_enable,
1287 .disable = bcm63xx_ep_disable,
1288
1289 .alloc_request = bcm63xx_udc_alloc_request,
1290 .free_request = bcm63xx_udc_free_request,
1291
1292 .queue = bcm63xx_udc_queue,
1293 .dequeue = bcm63xx_udc_dequeue,
1294
1295 .set_halt = bcm63xx_udc_set_halt,
1296 .set_wedge = bcm63xx_udc_set_wedge,
1297};
1298
1299/***********************************************************************
1300 * EP0 handling
1301 ***********************************************************************/
1302
1303/**
1304 * bcm63xx_ep0_setup_callback - Drop spinlock to invoke ->setup callback.
1305 * @udc: Reference to the device controller.
1306 * @ctrl: 8-byte SETUP request.
1307 */
1308static int bcm63xx_ep0_setup_callback(struct bcm63xx_udc *udc,
1309 struct usb_ctrlrequest *ctrl)
1310{
1311 int rc;
1312
1313 spin_unlock_irq(&udc->lock);
1314 rc = udc->driver->setup(&udc->gadget, ctrl);
1315 spin_lock_irq(&udc->lock);
1316 return rc;
1317}
1318
1319/**
1320 * bcm63xx_ep0_spoof_set_cfg - Synthesize a SET_CONFIGURATION request.
1321 * @udc: Reference to the device controller.
1322 *
1323 * Many standard requests are handled automatically in the hardware, but
1324 * we still need to pass them to the gadget driver so that it can
1325 * reconfigure the interfaces/endpoints if necessary.
1326 *
1327 * Unfortunately we are not able to send a STALL response if the host
1328 * requests an invalid configuration. If this happens, we'll have to be
1329 * content with printing a warning.
1330 */
1331static int bcm63xx_ep0_spoof_set_cfg(struct bcm63xx_udc *udc)
1332{
1333 struct usb_ctrlrequest ctrl;
1334 int rc;
1335
1336 ctrl.bRequestType = USB_DIR_OUT | USB_RECIP_DEVICE;
1337 ctrl.bRequest = USB_REQ_SET_CONFIGURATION;
1338 ctrl.wValue = cpu_to_le16(udc->cfg);
1339 ctrl.wIndex = 0;
1340 ctrl.wLength = 0;
1341
1342 rc = bcm63xx_ep0_setup_callback(udc, &ctrl);
1343 if (rc < 0) {
1344 dev_warn_ratelimited(udc->dev,
1345 "hardware auto-acked bad SET_CONFIGURATION(%d) request\n",
1346 udc->cfg);
1347 }
1348 return rc;
1349}
1350
1351/**
1352 * bcm63xx_ep0_spoof_set_iface - Synthesize a SET_INTERFACE request.
1353 * @udc: Reference to the device controller.
1354 */
1355static int bcm63xx_ep0_spoof_set_iface(struct bcm63xx_udc *udc)
1356{
1357 struct usb_ctrlrequest ctrl;
1358 int rc;
1359
1360 ctrl.bRequestType = USB_DIR_OUT | USB_RECIP_INTERFACE;
1361 ctrl.bRequest = USB_REQ_SET_INTERFACE;
1362 ctrl.wValue = cpu_to_le16(udc->alt_iface);
1363 ctrl.wIndex = cpu_to_le16(udc->iface);
1364 ctrl.wLength = 0;
1365
1366 rc = bcm63xx_ep0_setup_callback(udc, &ctrl);
1367 if (rc < 0) {
1368 dev_warn_ratelimited(udc->dev,
1369 "hardware auto-acked bad SET_INTERFACE(%d,%d) request\n",
1370 udc->iface, udc->alt_iface);
1371 }
1372 return rc;
1373}
1374
1375/**
1376 * bcm63xx_ep0_map_write - dma_map and iudma_write a single request.
1377 * @udc: Reference to the device controller.
1378 * @ch_idx: IUDMA channel number.
1379 * @req: USB gadget layer representation of the request.
1380 */
1381static void bcm63xx_ep0_map_write(struct bcm63xx_udc *udc, int ch_idx,
1382 struct usb_request *req)
1383{
1384 struct bcm63xx_req *breq = our_req(req);
1385 struct iudma_ch *iudma = &udc->iudma[ch_idx];
1386
1387 BUG_ON(udc->ep0_request);
1388 udc->ep0_request = req;
1389
1390 req->actual = 0;
1391 breq->offset = 0;
1392 usb_gadget_map_request(&udc->gadget, req, iudma->is_tx);
1393 iudma_write(udc, iudma, breq);
1394}
1395
1396/**
1397 * bcm63xx_ep0_complete - Set completion status and "stage" the callback.
1398 * @udc: Reference to the device controller.
1399 * @req: USB gadget layer representation of the request.
1400 * @status: Status to return to the gadget driver.
1401 */
1402static void bcm63xx_ep0_complete(struct bcm63xx_udc *udc,
1403 struct usb_request *req, int status)
1404{
1405 req->status = status;
1406 if (status)
1407 req->actual = 0;
1408 if (req->complete) {
1409 spin_unlock_irq(&udc->lock);
1410 req->complete(&udc->bep[0].ep, req);
1411 spin_lock_irq(&udc->lock);
1412 }
1413}
1414
1415/**
1416 * bcm63xx_ep0_nuke_reply - Abort request from the gadget driver due to
1417 * reset/shutdown.
1418 * @udc: Reference to the device controller.
1419 * @is_tx: Nonzero for TX (IN), zero for RX (OUT).
1420 */
1421static void bcm63xx_ep0_nuke_reply(struct bcm63xx_udc *udc, int is_tx)
1422{
1423 struct usb_request *req = udc->ep0_reply;
1424
1425 udc->ep0_reply = NULL;
1426 usb_gadget_unmap_request(&udc->gadget, req, is_tx);
1427 if (udc->ep0_request == req) {
1428 udc->ep0_req_completed = 0;
1429 udc->ep0_request = NULL;
1430 }
1431 bcm63xx_ep0_complete(udc, req, -ESHUTDOWN);
1432}
1433
1434/**
1435 * bcm63xx_ep0_read_complete - Close out the pending ep0 request; return
1436 * transfer len.
1437 * @udc: Reference to the device controller.
1438 */
1439static int bcm63xx_ep0_read_complete(struct bcm63xx_udc *udc)
1440{
1441 struct usb_request *req = udc->ep0_request;
1442
1443 udc->ep0_req_completed = 0;
1444 udc->ep0_request = NULL;
1445
1446 return req->actual;
1447}
1448
1449/**
1450 * bcm63xx_ep0_internal_request - Helper function to submit an ep0 request.
1451 * @udc: Reference to the device controller.
1452 * @ch_idx: IUDMA channel number.
1453 * @length: Number of bytes to TX/RX.
1454 *
1455 * Used for simple transfers performed by the ep0 worker. This will always
1456 * use ep0_ctrl_req / ep0_ctrl_buf.
1457 */
1458static void bcm63xx_ep0_internal_request(struct bcm63xx_udc *udc, int ch_idx,
1459 int length)
1460{
1461 struct usb_request *req = &udc->ep0_ctrl_req.req;
1462
1463 req->buf = udc->ep0_ctrl_buf;
1464 req->length = length;
1465 req->complete = NULL;
1466
1467 bcm63xx_ep0_map_write(udc, ch_idx, req);
1468}
1469
1470/**
1471 * bcm63xx_ep0_do_setup - Parse new SETUP packet and decide how to handle it.
1472 * @udc: Reference to the device controller.
1473 *
1474 * EP0_IDLE probably shouldn't ever happen. EP0_REQUEUE means we're ready
1475 * for the next packet. Anything else means the transaction requires multiple
1476 * stages of handling.
1477 */
1478static enum bcm63xx_ep0_state bcm63xx_ep0_do_setup(struct bcm63xx_udc *udc)
1479{
1480 int rc;
1481 struct usb_ctrlrequest *ctrl = (void *)udc->ep0_ctrl_buf;
1482
1483 rc = bcm63xx_ep0_read_complete(udc);
1484
1485 if (rc < 0) {
1486 dev_err(udc->dev, "missing SETUP packet\n");
1487 return EP0_IDLE;
1488 }
1489
1490 /*
1491 * Handle 0-byte IN STATUS acknowledgement. The hardware doesn't
1492 * ALWAYS deliver these 100% of the time, so if we happen to see one,
1493 * just throw it away.
1494 */
1495 if (rc == 0)
1496 return EP0_REQUEUE;
1497
1498 /* Drop malformed SETUP packets */
1499 if (rc != sizeof(*ctrl)) {
1500 dev_warn_ratelimited(udc->dev,
1501 "malformed SETUP packet (%d bytes)\n", rc);
1502 return EP0_REQUEUE;
1503 }
1504
1505 /* Process new SETUP packet arriving on ep0 */
1506 rc = bcm63xx_ep0_setup_callback(udc, ctrl);
1507 if (rc < 0) {
1508 bcm63xx_set_stall(udc, &udc->bep[0], true);
1509 return EP0_REQUEUE;
1510 }
1511
1512 if (!ctrl->wLength)
1513 return EP0_REQUEUE;
1514 else if (ctrl->bRequestType & USB_DIR_IN)
1515 return EP0_IN_DATA_PHASE_SETUP;
1516 else
1517 return EP0_OUT_DATA_PHASE_SETUP;
1518}
1519
1520/**
1521 * bcm63xx_ep0_do_idle - Check for outstanding requests if ep0 is idle.
1522 * @udc: Reference to the device controller.
1523 *
1524 * In state EP0_IDLE, the RX descriptor is either pending, or has been
1525 * filled with a SETUP packet from the host. This function handles new
1526 * SETUP packets, control IRQ events (which can generate fake SETUP packets),
1527 * and reset/shutdown events.
1528 *
1529 * Returns 0 if work was done; -EAGAIN if nothing to do.
1530 */
1531static int bcm63xx_ep0_do_idle(struct bcm63xx_udc *udc)
1532{
1533 if (udc->ep0_req_reset) {
1534 udc->ep0_req_reset = 0;
1535 } else if (udc->ep0_req_set_cfg) {
1536 udc->ep0_req_set_cfg = 0;
1537 if (bcm63xx_ep0_spoof_set_cfg(udc) >= 0)
1538 udc->ep0state = EP0_IN_FAKE_STATUS_PHASE;
1539 } else if (udc->ep0_req_set_iface) {
1540 udc->ep0_req_set_iface = 0;
1541 if (bcm63xx_ep0_spoof_set_iface(udc) >= 0)
1542 udc->ep0state = EP0_IN_FAKE_STATUS_PHASE;
1543 } else if (udc->ep0_req_completed) {
1544 udc->ep0state = bcm63xx_ep0_do_setup(udc);
1545 return udc->ep0state == EP0_IDLE ? -EAGAIN : 0;
1546 } else if (udc->ep0_req_shutdown) {
1547 udc->ep0_req_shutdown = 0;
1548 udc->ep0_req_completed = 0;
1549 udc->ep0_request = NULL;
1550 iudma_reset_channel(udc, &udc->iudma[IUDMA_EP0_RXCHAN]);
1551 usb_gadget_unmap_request(&udc->gadget,
1552 &udc->ep0_ctrl_req.req, 0);
1553
1554 /* bcm63xx_udc_pullup() is waiting for this */
1555 mb();
1556 udc->ep0state = EP0_SHUTDOWN;
1557 } else if (udc->ep0_reply) {
1558 /*
1559 * This could happen if a USB RESET shows up during an ep0
1560 * transaction (especially if a laggy driver like gadgetfs
1561 * is in use).
1562 */
1563 dev_warn(udc->dev, "nuking unexpected reply\n");
1564 bcm63xx_ep0_nuke_reply(udc, 0);
1565 } else {
1566 return -EAGAIN;
1567 }
1568
1569 return 0;
1570}
1571
1572/**
1573 * bcm63xx_ep0_one_round - Handle the current ep0 state.
1574 * @udc: Reference to the device controller.
1575 *
1576 * Returns 0 if work was done; -EAGAIN if nothing to do.
1577 */
1578static int bcm63xx_ep0_one_round(struct bcm63xx_udc *udc)
1579{
1580 enum bcm63xx_ep0_state ep0state = udc->ep0state;
1581 bool shutdown = udc->ep0_req_reset || udc->ep0_req_shutdown;
1582
1583 switch (udc->ep0state) {
1584 case EP0_REQUEUE:
1585 /* set up descriptor to receive SETUP packet */
1586 bcm63xx_ep0_internal_request(udc, IUDMA_EP0_RXCHAN,
1587 BCM63XX_MAX_CTRL_PKT);
1588 ep0state = EP0_IDLE;
1589 break;
1590 case EP0_IDLE:
1591 return bcm63xx_ep0_do_idle(udc);
1592 case EP0_IN_DATA_PHASE_SETUP:
1593 /*
1594 * Normal case: TX request is in ep0_reply (queued by the
1595 * callback), or will be queued shortly. When it's here,
1596 * send it to the HW and go to EP0_IN_DATA_PHASE_COMPLETE.
1597 *
1598 * Shutdown case: Stop waiting for the reply. Just
1599 * REQUEUE->IDLE. The gadget driver is NOT expected to
1600 * queue anything else now.
1601 */
1602 if (udc->ep0_reply) {
1603 bcm63xx_ep0_map_write(udc, IUDMA_EP0_TXCHAN,
1604 udc->ep0_reply);
1605 ep0state = EP0_IN_DATA_PHASE_COMPLETE;
1606 } else if (shutdown) {
1607 ep0state = EP0_REQUEUE;
1608 }
1609 break;
1610 case EP0_IN_DATA_PHASE_COMPLETE: {
1611 /*
1612 * Normal case: TX packet (ep0_reply) is in flight; wait for
1613 * it to finish, then go back to REQUEUE->IDLE.
1614 *
1615 * Shutdown case: Reset the TX channel, send -ESHUTDOWN
1616 * completion to the gadget driver, then REQUEUE->IDLE.
1617 */
1618 if (udc->ep0_req_completed) {
1619 udc->ep0_reply = NULL;
1620 bcm63xx_ep0_read_complete(udc);
1621 /*
1622 * the "ack" sometimes gets eaten (see
1623 * bcm63xx_ep0_do_idle)
1624 */
1625 ep0state = EP0_REQUEUE;
1626 } else if (shutdown) {
1627 iudma_reset_channel(udc, &udc->iudma[IUDMA_EP0_TXCHAN]);
1628 bcm63xx_ep0_nuke_reply(udc, 1);
1629 ep0state = EP0_REQUEUE;
1630 }
1631 break;
1632 }
1633 case EP0_OUT_DATA_PHASE_SETUP:
1634 /* Similar behavior to EP0_IN_DATA_PHASE_SETUP */
1635 if (udc->ep0_reply) {
1636 bcm63xx_ep0_map_write(udc, IUDMA_EP0_RXCHAN,
1637 udc->ep0_reply);
1638 ep0state = EP0_OUT_DATA_PHASE_COMPLETE;
1639 } else if (shutdown) {
1640 ep0state = EP0_REQUEUE;
1641 }
1642 break;
1643 case EP0_OUT_DATA_PHASE_COMPLETE: {
1644 /* Similar behavior to EP0_IN_DATA_PHASE_COMPLETE */
1645 if (udc->ep0_req_completed) {
1646 udc->ep0_reply = NULL;
1647 bcm63xx_ep0_read_complete(udc);
1648
1649 /* send 0-byte ack to host */
1650 bcm63xx_ep0_internal_request(udc, IUDMA_EP0_TXCHAN, 0);
1651 ep0state = EP0_OUT_STATUS_PHASE;
1652 } else if (shutdown) {
1653 iudma_reset_channel(udc, &udc->iudma[IUDMA_EP0_RXCHAN]);
1654 bcm63xx_ep0_nuke_reply(udc, 0);
1655 ep0state = EP0_REQUEUE;
1656 }
1657 break;
1658 }
1659 case EP0_OUT_STATUS_PHASE:
1660 /*
1661 * Normal case: 0-byte OUT ack packet is in flight; wait
1662 * for it to finish, then go back to REQUEUE->IDLE.
1663 *
1664 * Shutdown case: just cancel the transmission. Don't bother
1665 * calling the completion, because it originated from this
1666 * function anyway. Then go back to REQUEUE->IDLE.
1667 */
1668 if (udc->ep0_req_completed) {
1669 bcm63xx_ep0_read_complete(udc);
1670 ep0state = EP0_REQUEUE;
1671 } else if (shutdown) {
1672 iudma_reset_channel(udc, &udc->iudma[IUDMA_EP0_TXCHAN]);
1673 udc->ep0_request = NULL;
1674 ep0state = EP0_REQUEUE;
1675 }
1676 break;
1677 case EP0_IN_FAKE_STATUS_PHASE: {
1678 /*
1679 * Normal case: we spoofed a SETUP packet and are now
1680 * waiting for the gadget driver to send a 0-byte reply.
1681 * This doesn't actually get sent to the HW because the
1682 * HW has already sent its own reply. Once we get the
1683 * response, return to IDLE.
1684 *
1685 * Shutdown case: return to IDLE immediately.
1686 *
1687 * Note that the ep0 RX descriptor has remained queued
1688 * (and possibly unfilled) during this entire transaction.
1689 * The HW datapath (IUDMA) never even sees SET_CONFIGURATION
1690 * or SET_INTERFACE transactions.
1691 */
1692 struct usb_request *r = udc->ep0_reply;
1693
1694 if (!r) {
1695 if (shutdown)
1696 ep0state = EP0_IDLE;
1697 break;
1698 }
1699
1700 bcm63xx_ep0_complete(udc, r, 0);
1701 udc->ep0_reply = NULL;
1702 ep0state = EP0_IDLE;
1703 break;
1704 }
1705 case EP0_SHUTDOWN:
1706 break;
1707 }
1708
1709 if (udc->ep0state == ep0state)
1710 return -EAGAIN;
1711
1712 udc->ep0state = ep0state;
1713 return 0;
1714}
1715
1716/**
1717 * bcm63xx_ep0_process - ep0 worker thread / state machine.
1718 * @w: Workqueue struct.
1719 *
1720 * bcm63xx_ep0_process is triggered any time an event occurs on ep0. It
1721 * is used to synchronize ep0 events and ensure that both HW and SW events
1722 * occur in a well-defined order. When the ep0 IUDMA queues are idle, it may
1723 * synthesize SET_CONFIGURATION / SET_INTERFACE requests that were consumed
1724 * by the USBD hardware.
1725 *
1726 * The worker function will continue iterating around the state machine
1727 * until there is nothing left to do. Usually "nothing left to do" means
1728 * that we're waiting for a new event from the hardware.
1729 */
1730static void bcm63xx_ep0_process(struct work_struct *w)
1731{
1732 struct bcm63xx_udc *udc = container_of(w, struct bcm63xx_udc, ep0_wq);
1733 spin_lock_irq(&udc->lock);
1734 while (bcm63xx_ep0_one_round(udc) == 0)
1735 ;
1736 spin_unlock_irq(&udc->lock);
1737}
1738
1739/***********************************************************************
1740 * Standard UDC gadget operations
1741 ***********************************************************************/
1742
1743/**
1744 * bcm63xx_udc_get_frame - Read current SOF frame number from the HW.
1745 * @gadget: USB device.
1746 */
1747static int bcm63xx_udc_get_frame(struct usb_gadget *gadget)
1748{
1749 struct bcm63xx_udc *udc = gadget_to_udc(gadget);
1750
1751 return (usbd_readl(udc, USBD_STATUS_REG) &
1752 USBD_STATUS_SOF_MASK) >> USBD_STATUS_SOF_SHIFT;
1753}
1754
1755/**
1756 * bcm63xx_udc_pullup - Enable/disable pullup on D+ line.
1757 * @gadget: USB device.
1758 * @is_on: 0 to disable pullup, 1 to enable.
1759 *
1760 * See notes in bcm63xx_select_pullup().
1761 */
1762static int bcm63xx_udc_pullup(struct usb_gadget *gadget, int is_on)
1763{
1764 struct bcm63xx_udc *udc = gadget_to_udc(gadget);
1765 unsigned long flags;
1766 int i, rc = -EINVAL;
1767
1768 spin_lock_irqsave(&udc->lock, flags);
1769 if (is_on && udc->ep0state == EP0_SHUTDOWN) {
1770 udc->gadget.speed = USB_SPEED_UNKNOWN;
1771 udc->ep0state = EP0_REQUEUE;
1772 bcm63xx_fifo_setup(udc);
1773 bcm63xx_fifo_reset(udc);
1774 bcm63xx_ep_setup(udc);
1775
1776 bitmap_zero(&udc->wedgemap, BCM63XX_NUM_EP);
1777 for (i = 0; i < BCM63XX_NUM_EP; i++)
1778 bcm63xx_set_stall(udc, &udc->bep[i], false);
1779
1780 bcm63xx_set_ctrl_irqs(udc, true);
1781 bcm63xx_select_pullup(gadget_to_udc(gadget), true);
1782 rc = 0;
1783 } else if (!is_on && udc->ep0state != EP0_SHUTDOWN) {
1784 bcm63xx_select_pullup(gadget_to_udc(gadget), false);
1785
1786 udc->ep0_req_shutdown = 1;
1787 spin_unlock_irqrestore(&udc->lock, flags);
1788
1789 while (1) {
1790 schedule_work(&udc->ep0_wq);
1791 if (udc->ep0state == EP0_SHUTDOWN)
1792 break;
1793 msleep(50);
1794 }
1795 bcm63xx_set_ctrl_irqs(udc, false);
1796 cancel_work_sync(&udc->ep0_wq);
1797 return 0;
1798 }
1799
1800 spin_unlock_irqrestore(&udc->lock, flags);
1801 return rc;
1802}
1803
1804/**
1805 * bcm63xx_udc_start - Start the controller.
1806 * @gadget: USB device.
1807 * @driver: Driver for USB device.
1808 */
1809static int bcm63xx_udc_start(struct usb_gadget *gadget,
1810 struct usb_gadget_driver *driver)
1811{
1812 struct bcm63xx_udc *udc = gadget_to_udc(gadget);
1813 unsigned long flags;
1814
1815 if (!driver || driver->max_speed < USB_SPEED_HIGH ||
1816 !driver->setup)
1817 return -EINVAL;
1818 if (!udc)
1819 return -ENODEV;
1820 if (udc->driver)
1821 return -EBUSY;
1822
1823 spin_lock_irqsave(&udc->lock, flags);
1824
1825 set_clocks(udc, true);
1826 bcm63xx_fifo_setup(udc);
1827 bcm63xx_ep_init(udc);
1828 bcm63xx_ep_setup(udc);
1829 bcm63xx_fifo_reset(udc);
1830 bcm63xx_select_phy_mode(udc, true);
1831
1832 udc->driver = driver;
1833 udc->gadget.dev.of_node = udc->dev->of_node;
1834
1835 spin_unlock_irqrestore(&udc->lock, flags);
1836
1837 return 0;
1838}
1839
1840/**
1841 * bcm63xx_udc_stop - Shut down the controller.
1842 * @gadget: USB device.
1843 * @driver: Driver for USB device.
1844 */
1845static int bcm63xx_udc_stop(struct usb_gadget *gadget)
1846{
1847 struct bcm63xx_udc *udc = gadget_to_udc(gadget);
1848 unsigned long flags;
1849
1850 spin_lock_irqsave(&udc->lock, flags);
1851
1852 udc->driver = NULL;
1853
1854 /*
1855 * If we switch the PHY too abruptly after dropping D+, the host
1856 * will often complain:
1857 *
1858 * hub 1-0:1.0: port 1 disabled by hub (EMI?), re-enabling...
1859 */
1860 msleep(100);
1861
1862 bcm63xx_select_phy_mode(udc, false);
1863 set_clocks(udc, false);
1864
1865 spin_unlock_irqrestore(&udc->lock, flags);
1866
1867 return 0;
1868}
1869
1870static const struct usb_gadget_ops bcm63xx_udc_ops = {
1871 .get_frame = bcm63xx_udc_get_frame,
1872 .pullup = bcm63xx_udc_pullup,
1873 .udc_start = bcm63xx_udc_start,
1874 .udc_stop = bcm63xx_udc_stop,
1875};
1876
1877/***********************************************************************
1878 * IRQ handling
1879 ***********************************************************************/
1880
1881/**
1882 * bcm63xx_update_cfg_iface - Read current configuration/interface settings.
1883 * @udc: Reference to the device controller.
1884 *
1885 * This controller intercepts SET_CONFIGURATION and SET_INTERFACE messages.
1886 * The driver never sees the raw control packets coming in on the ep0
1887 * IUDMA channel, but at least we get an interrupt event to tell us that
1888 * new values are waiting in the USBD_STATUS register.
1889 */
1890static void bcm63xx_update_cfg_iface(struct bcm63xx_udc *udc)
1891{
1892 u32 reg = usbd_readl(udc, USBD_STATUS_REG);
1893
1894 udc->cfg = (reg & USBD_STATUS_CFG_MASK) >> USBD_STATUS_CFG_SHIFT;
1895 udc->iface = (reg & USBD_STATUS_INTF_MASK) >> USBD_STATUS_INTF_SHIFT;
1896 udc->alt_iface = (reg & USBD_STATUS_ALTINTF_MASK) >>
1897 USBD_STATUS_ALTINTF_SHIFT;
1898 bcm63xx_ep_setup(udc);
1899}
1900
1901/**
1902 * bcm63xx_update_link_speed - Check to see if the link speed has changed.
1903 * @udc: Reference to the device controller.
1904 *
1905 * The link speed update coincides with a SETUP IRQ. Returns 1 if the
1906 * speed has changed, so that the caller can update the endpoint settings.
1907 */
1908static int bcm63xx_update_link_speed(struct bcm63xx_udc *udc)
1909{
1910 u32 reg = usbd_readl(udc, USBD_STATUS_REG);
1911 enum usb_device_speed oldspeed = udc->gadget.speed;
1912
1913 switch ((reg & USBD_STATUS_SPD_MASK) >> USBD_STATUS_SPD_SHIFT) {
1914 case BCM63XX_SPD_HIGH:
1915 udc->gadget.speed = USB_SPEED_HIGH;
1916 break;
1917 case BCM63XX_SPD_FULL:
1918 udc->gadget.speed = USB_SPEED_FULL;
1919 break;
1920 default:
1921 /* this should never happen */
1922 udc->gadget.speed = USB_SPEED_UNKNOWN;
1923 dev_err(udc->dev,
1924 "received SETUP packet with invalid link speed\n");
1925 return 0;
1926 }
1927
1928 if (udc->gadget.speed != oldspeed) {
1929 dev_info(udc->dev, "link up, %s-speed mode\n",
1930 udc->gadget.speed == USB_SPEED_HIGH ? "high" : "full");
1931 return 1;
1932 } else {
1933 return 0;
1934 }
1935}
1936
1937/**
1938 * bcm63xx_update_wedge - Iterate through wedged endpoints.
1939 * @udc: Reference to the device controller.
1940 * @new_status: true to "refresh" wedge status; false to clear it.
1941 *
1942 * On a SETUP interrupt, we need to manually "refresh" the wedge status
1943 * because the controller hardware is designed to automatically clear
1944 * stalls in response to a CLEAR_FEATURE request from the host.
1945 *
1946 * On a RESET interrupt, we do want to restore all wedged endpoints.
1947 */
1948static void bcm63xx_update_wedge(struct bcm63xx_udc *udc, bool new_status)
1949{
1950 int i;
1951
1952 for_each_set_bit(i, &udc->wedgemap, BCM63XX_NUM_EP) {
1953 bcm63xx_set_stall(udc, &udc->bep[i], new_status);
1954 if (!new_status)
1955 clear_bit(i, &udc->wedgemap);
1956 }
1957}
1958
1959/**
1960 * bcm63xx_udc_ctrl_isr - ISR for control path events (USBD).
1961 * @irq: IRQ number (unused).
1962 * @dev_id: Reference to the device controller.
1963 *
1964 * This is where we handle link (VBUS) down, USB reset, speed changes,
1965 * SET_CONFIGURATION, and SET_INTERFACE events.
1966 */
1967static irqreturn_t bcm63xx_udc_ctrl_isr(int irq, void *dev_id)
1968{
1969 struct bcm63xx_udc *udc = dev_id;
1970 u32 stat;
1971 bool disconnected = false, bus_reset = false;
1972
1973 stat = usbd_readl(udc, USBD_EVENT_IRQ_STATUS_REG) &
1974 usbd_readl(udc, USBD_EVENT_IRQ_MASK_REG);
1975
1976 usbd_writel(udc, stat, USBD_EVENT_IRQ_STATUS_REG);
1977
1978 spin_lock(&udc->lock);
1979 if (stat & BIT(USBD_EVENT_IRQ_USB_LINK)) {
1980 /* VBUS toggled */
1981
1982 if (!(usbd_readl(udc, USBD_EVENTS_REG) &
1983 USBD_EVENTS_USB_LINK_MASK) &&
1984 udc->gadget.speed != USB_SPEED_UNKNOWN)
1985 dev_info(udc->dev, "link down\n");
1986
1987 udc->gadget.speed = USB_SPEED_UNKNOWN;
1988 disconnected = true;
1989 }
1990 if (stat & BIT(USBD_EVENT_IRQ_USB_RESET)) {
1991 bcm63xx_fifo_setup(udc);
1992 bcm63xx_fifo_reset(udc);
1993 bcm63xx_ep_setup(udc);
1994
1995 bcm63xx_update_wedge(udc, false);
1996
1997 udc->ep0_req_reset = 1;
1998 schedule_work(&udc->ep0_wq);
1999 bus_reset = true;
2000 }
2001 if (stat & BIT(USBD_EVENT_IRQ_SETUP)) {
2002 if (bcm63xx_update_link_speed(udc)) {
2003 bcm63xx_fifo_setup(udc);
2004 bcm63xx_ep_setup(udc);
2005 }
2006 bcm63xx_update_wedge(udc, true);
2007 }
2008 if (stat & BIT(USBD_EVENT_IRQ_SETCFG)) {
2009 bcm63xx_update_cfg_iface(udc);
2010 udc->ep0_req_set_cfg = 1;
2011 schedule_work(&udc->ep0_wq);
2012 }
2013 if (stat & BIT(USBD_EVENT_IRQ_SETINTF)) {
2014 bcm63xx_update_cfg_iface(udc);
2015 udc->ep0_req_set_iface = 1;
2016 schedule_work(&udc->ep0_wq);
2017 }
2018 spin_unlock(&udc->lock);
2019
2020 if (disconnected && udc->driver)
2021 udc->driver->disconnect(&udc->gadget);
2022 else if (bus_reset && udc->driver)
2023 usb_gadget_udc_reset(&udc->gadget, udc->driver);
2024
2025 return IRQ_HANDLED;
2026}
2027
2028/**
2029 * bcm63xx_udc_data_isr - ISR for data path events (IUDMA).
2030 * @irq: IRQ number (unused).
2031 * @dev_id: Reference to the IUDMA channel that generated the interrupt.
2032 *
2033 * For the two ep0 channels, we have special handling that triggers the
2034 * ep0 worker thread. For normal bulk/intr channels, either queue up
2035 * the next buffer descriptor for the transaction (incomplete transaction),
2036 * or invoke the completion callback (complete transactions).
2037 */
2038static irqreturn_t bcm63xx_udc_data_isr(int irq, void *dev_id)
2039{
2040 struct iudma_ch *iudma = dev_id;
2041 struct bcm63xx_udc *udc = iudma->udc;
2042 struct bcm63xx_ep *bep;
2043 struct usb_request *req = NULL;
2044 struct bcm63xx_req *breq = NULL;
2045 int rc;
2046 bool is_done = false;
2047
2048 spin_lock(&udc->lock);
2049
2050 usb_dmac_writel(udc, ENETDMAC_IR_BUFDONE_MASK,
2051 ENETDMAC_IR_REG, iudma->ch_idx);
2052 bep = iudma->bep;
2053 rc = iudma_read(udc, iudma);
2054
2055 /* special handling for EP0 RX (0) and TX (1) */
2056 if (iudma->ch_idx == IUDMA_EP0_RXCHAN ||
2057 iudma->ch_idx == IUDMA_EP0_TXCHAN) {
2058 req = udc->ep0_request;
2059 breq = our_req(req);
2060
2061 /* a single request could require multiple submissions */
2062 if (rc >= 0) {
2063 req->actual += rc;
2064
2065 if (req->actual >= req->length || breq->bd_bytes > rc) {
2066 udc->ep0_req_completed = 1;
2067 is_done = true;
2068 schedule_work(&udc->ep0_wq);
2069
2070 /* "actual" on a ZLP is 1 byte */
2071 req->actual = min(req->actual, req->length);
2072 } else {
2073 /* queue up the next BD (same request) */
2074 iudma_write(udc, iudma, breq);
2075 }
2076 }
2077 } else if (!list_empty(&bep->queue)) {
2078 breq = list_first_entry(&bep->queue, struct bcm63xx_req, queue);
2079 req = &breq->req;
2080
2081 if (rc >= 0) {
2082 req->actual += rc;
2083
2084 if (req->actual >= req->length || breq->bd_bytes > rc) {
2085 is_done = true;
2086 list_del(&breq->queue);
2087
2088 req->actual = min(req->actual, req->length);
2089
2090 if (!list_empty(&bep->queue)) {
2091 struct bcm63xx_req *next;
2092
2093 next = list_first_entry(&bep->queue,
2094 struct bcm63xx_req, queue);
2095 iudma_write(udc, iudma, next);
2096 }
2097 } else {
2098 iudma_write(udc, iudma, breq);
2099 }
2100 }
2101 }
2102 spin_unlock(&udc->lock);
2103
2104 if (is_done) {
2105 usb_gadget_unmap_request(&udc->gadget, req, iudma->is_tx);
2106 if (req->complete)
2107 req->complete(&bep->ep, req);
2108 }
2109
2110 return IRQ_HANDLED;
2111}
2112
2113/***********************************************************************
2114 * Debug filesystem
2115 ***********************************************************************/
2116
2117/*
2118 * bcm63xx_usbd_dbg_show - Show USBD controller state.
2119 * @s: seq_file to which the information will be written.
2120 * @p: Unused.
2121 *
2122 * This file nominally shows up as /sys/kernel/debug/bcm63xx_udc/usbd
2123 */
2124static int bcm63xx_usbd_dbg_show(struct seq_file *s, void *p)
2125{
2126 struct bcm63xx_udc *udc = s->private;
2127
2128 if (!udc->driver)
2129 return -ENODEV;
2130
2131 seq_printf(s, "ep0 state: %s\n",
2132 bcm63xx_ep0_state_names[udc->ep0state]);
2133 seq_printf(s, " pending requests: %s%s%s%s%s%s%s\n",
2134 udc->ep0_req_reset ? "reset " : "",
2135 udc->ep0_req_set_cfg ? "set_cfg " : "",
2136 udc->ep0_req_set_iface ? "set_iface " : "",
2137 udc->ep0_req_shutdown ? "shutdown " : "",
2138 udc->ep0_request ? "pending " : "",
2139 udc->ep0_req_completed ? "completed " : "",
2140 udc->ep0_reply ? "reply " : "");
2141 seq_printf(s, "cfg: %d; iface: %d; alt_iface: %d\n",
2142 udc->cfg, udc->iface, udc->alt_iface);
2143 seq_printf(s, "regs:\n");
2144 seq_printf(s, " control: %08x; straps: %08x; status: %08x\n",
2145 usbd_readl(udc, USBD_CONTROL_REG),
2146 usbd_readl(udc, USBD_STRAPS_REG),
2147 usbd_readl(udc, USBD_STATUS_REG));
2148 seq_printf(s, " events: %08x; stall: %08x\n",
2149 usbd_readl(udc, USBD_EVENTS_REG),
2150 usbd_readl(udc, USBD_STALL_REG));
2151
2152 return 0;
2153}
2154DEFINE_SHOW_ATTRIBUTE(bcm63xx_usbd_dbg);
2155
2156/*
2157 * bcm63xx_iudma_dbg_show - Show IUDMA status and descriptors.
2158 * @s: seq_file to which the information will be written.
2159 * @p: Unused.
2160 *
2161 * This file nominally shows up as /sys/kernel/debug/bcm63xx_udc/iudma
2162 */
2163static int bcm63xx_iudma_dbg_show(struct seq_file *s, void *p)
2164{
2165 struct bcm63xx_udc *udc = s->private;
2166 int ch_idx, i;
2167 u32 sram2, sram3;
2168
2169 if (!udc->driver)
2170 return -ENODEV;
2171
2172 for (ch_idx = 0; ch_idx < BCM63XX_NUM_IUDMA; ch_idx++) {
2173 struct iudma_ch *iudma = &udc->iudma[ch_idx];
2174
2175 seq_printf(s, "IUDMA channel %d -- ", ch_idx);
2176 switch (iudma_defaults[ch_idx].ep_type) {
2177 case BCMEP_CTRL:
2178 seq_printf(s, "control");
2179 break;
2180 case BCMEP_BULK:
2181 seq_printf(s, "bulk");
2182 break;
2183 case BCMEP_INTR:
2184 seq_printf(s, "interrupt");
2185 break;
2186 }
2187 seq_printf(s, ch_idx & 0x01 ? " tx" : " rx");
2188 seq_printf(s, " [ep%d]:\n",
2189 max_t(int, iudma_defaults[ch_idx].ep_num, 0));
2190 seq_printf(s, " cfg: %08x; irqstat: %08x; irqmask: %08x; maxburst: %08x\n",
2191 usb_dmac_readl(udc, ENETDMAC_CHANCFG_REG, ch_idx),
2192 usb_dmac_readl(udc, ENETDMAC_IR_REG, ch_idx),
2193 usb_dmac_readl(udc, ENETDMAC_IRMASK_REG, ch_idx),
2194 usb_dmac_readl(udc, ENETDMAC_MAXBURST_REG, ch_idx));
2195
2196 sram2 = usb_dmas_readl(udc, ENETDMAS_SRAM2_REG, ch_idx);
2197 sram3 = usb_dmas_readl(udc, ENETDMAS_SRAM3_REG, ch_idx);
2198 seq_printf(s, " base: %08x; index: %04x_%04x; desc: %04x_%04x %08x\n",
2199 usb_dmas_readl(udc, ENETDMAS_RSTART_REG, ch_idx),
2200 sram2 >> 16, sram2 & 0xffff,
2201 sram3 >> 16, sram3 & 0xffff,
2202 usb_dmas_readl(udc, ENETDMAS_SRAM4_REG, ch_idx));
2203 seq_printf(s, " desc: %d/%d used", iudma->n_bds_used,
2204 iudma->n_bds);
2205
2206 if (iudma->bep)
2207 seq_printf(s, "; %zu queued\n", list_count_nodes(&iudma->bep->queue));
2208 else
2209 seq_printf(s, "\n");
2210
2211 for (i = 0; i < iudma->n_bds; i++) {
2212 struct bcm_enet_desc *d = &iudma->bd_ring[i];
2213
2214 seq_printf(s, " %03x (%02x): len_stat: %04x_%04x; pa %08x",
2215 i * sizeof(*d), i,
2216 d->len_stat >> 16, d->len_stat & 0xffff,
2217 d->address);
2218 if (d == iudma->read_bd)
2219 seq_printf(s, " <<RD");
2220 if (d == iudma->write_bd)
2221 seq_printf(s, " <<WR");
2222 seq_printf(s, "\n");
2223 }
2224
2225 seq_printf(s, "\n");
2226 }
2227
2228 return 0;
2229}
2230DEFINE_SHOW_ATTRIBUTE(bcm63xx_iudma_dbg);
2231
2232/**
2233 * bcm63xx_udc_init_debugfs - Create debugfs entries.
2234 * @udc: Reference to the device controller.
2235 */
2236static void bcm63xx_udc_init_debugfs(struct bcm63xx_udc *udc)
2237{
2238 struct dentry *root;
2239
2240 if (!IS_ENABLED(CONFIG_USB_GADGET_DEBUG_FS))
2241 return;
2242
2243 root = debugfs_create_dir(udc->gadget.name, usb_debug_root);
2244 debugfs_create_file("usbd", 0400, root, udc, &bcm63xx_usbd_dbg_fops);
2245 debugfs_create_file("iudma", 0400, root, udc, &bcm63xx_iudma_dbg_fops);
2246}
2247
2248/**
2249 * bcm63xx_udc_cleanup_debugfs - Remove debugfs entries.
2250 * @udc: Reference to the device controller.
2251 *
2252 * debugfs_remove() is safe to call with a NULL argument.
2253 */
2254static void bcm63xx_udc_cleanup_debugfs(struct bcm63xx_udc *udc)
2255{
2256 debugfs_lookup_and_remove(udc->gadget.name, usb_debug_root);
2257}
2258
2259/***********************************************************************
2260 * Driver init/exit
2261 ***********************************************************************/
2262
2263/**
2264 * bcm63xx_udc_probe - Initialize a new instance of the UDC.
2265 * @pdev: Platform device struct from the bcm63xx BSP code.
2266 *
2267 * Note that platform data is required, because pd.port_no varies from chip
2268 * to chip and is used to switch the correct USB port to device mode.
2269 */
2270static int bcm63xx_udc_probe(struct platform_device *pdev)
2271{
2272 struct device *dev = &pdev->dev;
2273 struct bcm63xx_usbd_platform_data *pd = dev_get_platdata(dev);
2274 struct bcm63xx_udc *udc;
2275 int rc = -ENOMEM, i, irq;
2276
2277 udc = devm_kzalloc(dev, sizeof(*udc), GFP_KERNEL);
2278 if (!udc)
2279 return -ENOMEM;
2280
2281 platform_set_drvdata(pdev, udc);
2282 udc->dev = dev;
2283 udc->pd = pd;
2284
2285 if (!pd) {
2286 dev_err(dev, "missing platform data\n");
2287 return -EINVAL;
2288 }
2289
2290 udc->usbd_regs = devm_platform_ioremap_resource(pdev, 0);
2291 if (IS_ERR(udc->usbd_regs))
2292 return PTR_ERR(udc->usbd_regs);
2293
2294 udc->iudma_regs = devm_platform_ioremap_resource(pdev, 1);
2295 if (IS_ERR(udc->iudma_regs))
2296 return PTR_ERR(udc->iudma_regs);
2297
2298 spin_lock_init(&udc->lock);
2299 INIT_WORK(&udc->ep0_wq, bcm63xx_ep0_process);
2300
2301 udc->gadget.ops = &bcm63xx_udc_ops;
2302 udc->gadget.name = dev_name(dev);
2303
2304 if (!pd->use_fullspeed && !use_fullspeed)
2305 udc->gadget.max_speed = USB_SPEED_HIGH;
2306 else
2307 udc->gadget.max_speed = USB_SPEED_FULL;
2308
2309 /* request clocks, allocate buffers, and clear any pending IRQs */
2310 rc = bcm63xx_init_udc_hw(udc);
2311 if (rc)
2312 return rc;
2313
2314 rc = -ENXIO;
2315
2316 /* IRQ resource #0: control interrupt (VBUS, speed, etc.) */
2317 irq = platform_get_irq(pdev, 0);
2318 if (irq < 0) {
2319 rc = irq;
2320 goto out_uninit;
2321 }
2322 if (devm_request_irq(dev, irq, &bcm63xx_udc_ctrl_isr, 0,
2323 dev_name(dev), udc) < 0)
2324 goto report_request_failure;
2325
2326 /* IRQ resources #1-6: data interrupts for IUDMA channels 0-5 */
2327 for (i = 0; i < BCM63XX_NUM_IUDMA; i++) {
2328 irq = platform_get_irq(pdev, i + 1);
2329 if (irq < 0) {
2330 rc = irq;
2331 goto out_uninit;
2332 }
2333 if (devm_request_irq(dev, irq, &bcm63xx_udc_data_isr, 0,
2334 dev_name(dev), &udc->iudma[i]) < 0)
2335 goto report_request_failure;
2336 }
2337
2338 bcm63xx_udc_init_debugfs(udc);
2339 rc = usb_add_gadget_udc(dev, &udc->gadget);
2340 if (!rc)
2341 return 0;
2342
2343 bcm63xx_udc_cleanup_debugfs(udc);
2344out_uninit:
2345 bcm63xx_uninit_udc_hw(udc);
2346 return rc;
2347
2348report_request_failure:
2349 dev_err(dev, "error requesting IRQ #%d\n", irq);
2350 goto out_uninit;
2351}
2352
2353/**
2354 * bcm63xx_udc_remove - Remove the device from the system.
2355 * @pdev: Platform device struct from the bcm63xx BSP code.
2356 */
2357static void bcm63xx_udc_remove(struct platform_device *pdev)
2358{
2359 struct bcm63xx_udc *udc = platform_get_drvdata(pdev);
2360
2361 bcm63xx_udc_cleanup_debugfs(udc);
2362 usb_del_gadget_udc(&udc->gadget);
2363 BUG_ON(udc->driver);
2364
2365 bcm63xx_uninit_udc_hw(udc);
2366}
2367
2368static struct platform_driver bcm63xx_udc_driver = {
2369 .probe = bcm63xx_udc_probe,
2370 .remove_new = bcm63xx_udc_remove,
2371 .driver = {
2372 .name = DRV_MODULE_NAME,
2373 },
2374};
2375module_platform_driver(bcm63xx_udc_driver);
2376
2377MODULE_DESCRIPTION("BCM63xx USB Peripheral Controller");
2378MODULE_AUTHOR("Kevin Cernekee <cernekee@gmail.com>");
2379MODULE_LICENSE("GPL");
2380MODULE_ALIAS("platform:" DRV_MODULE_NAME);
1// SPDX-License-Identifier: GPL-2.0+
2/*
3 * bcm63xx_udc.c -- BCM63xx UDC high/full speed USB device controller
4 *
5 * Copyright (C) 2012 Kevin Cernekee <cernekee@gmail.com>
6 * Copyright (C) 2012 Broadcom Corporation
7 */
8
9#include <linux/bitops.h>
10#include <linux/bug.h>
11#include <linux/clk.h>
12#include <linux/compiler.h>
13#include <linux/debugfs.h>
14#include <linux/delay.h>
15#include <linux/device.h>
16#include <linux/dma-mapping.h>
17#include <linux/errno.h>
18#include <linux/interrupt.h>
19#include <linux/ioport.h>
20#include <linux/kernel.h>
21#include <linux/list.h>
22#include <linux/module.h>
23#include <linux/moduleparam.h>
24#include <linux/platform_device.h>
25#include <linux/sched.h>
26#include <linux/seq_file.h>
27#include <linux/slab.h>
28#include <linux/timer.h>
29#include <linux/usb/ch9.h>
30#include <linux/usb/gadget.h>
31#include <linux/workqueue.h>
32
33#include <bcm63xx_cpu.h>
34#include <bcm63xx_iudma.h>
35#include <bcm63xx_dev_usb_usbd.h>
36#include <bcm63xx_io.h>
37#include <bcm63xx_regs.h>
38
39#define DRV_MODULE_NAME "bcm63xx_udc"
40
41static const char bcm63xx_ep0name[] = "ep0";
42
43static const struct {
44 const char *name;
45 const struct usb_ep_caps caps;
46} bcm63xx_ep_info[] = {
47#define EP_INFO(_name, _caps) \
48 { \
49 .name = _name, \
50 .caps = _caps, \
51 }
52
53 EP_INFO(bcm63xx_ep0name,
54 USB_EP_CAPS(USB_EP_CAPS_TYPE_CONTROL, USB_EP_CAPS_DIR_ALL)),
55 EP_INFO("ep1in-bulk",
56 USB_EP_CAPS(USB_EP_CAPS_TYPE_BULK, USB_EP_CAPS_DIR_IN)),
57 EP_INFO("ep2out-bulk",
58 USB_EP_CAPS(USB_EP_CAPS_TYPE_BULK, USB_EP_CAPS_DIR_OUT)),
59 EP_INFO("ep3in-int",
60 USB_EP_CAPS(USB_EP_CAPS_TYPE_INT, USB_EP_CAPS_DIR_IN)),
61 EP_INFO("ep4out-int",
62 USB_EP_CAPS(USB_EP_CAPS_TYPE_INT, USB_EP_CAPS_DIR_OUT)),
63
64#undef EP_INFO
65};
66
67static bool use_fullspeed;
68module_param(use_fullspeed, bool, S_IRUGO);
69MODULE_PARM_DESC(use_fullspeed, "true for fullspeed only");
70
71/*
72 * RX IRQ coalescing options:
73 *
74 * false (default) - one IRQ per DATAx packet. Slow but reliable. The
75 * driver is able to pass the "testusb" suite and recover from conditions like:
76 *
77 * 1) Device queues up a 2048-byte RX IUDMA transaction on an OUT bulk ep
78 * 2) Host sends 512 bytes of data
79 * 3) Host decides to reconfigure the device and sends SET_INTERFACE
80 * 4) Device shuts down the endpoint and cancels the RX transaction
81 *
82 * true - one IRQ per transfer, for transfers <= 2048B. Generates
83 * considerably fewer IRQs, but error recovery is less robust. Does not
84 * reliably pass "testusb".
85 *
86 * TX always uses coalescing, because we can cancel partially complete TX
87 * transfers by repeatedly flushing the FIFO. The hardware doesn't allow
88 * this on RX.
89 */
90static bool irq_coalesce;
91module_param(irq_coalesce, bool, S_IRUGO);
92MODULE_PARM_DESC(irq_coalesce, "take one IRQ per RX transfer");
93
94#define BCM63XX_NUM_EP 5
95#define BCM63XX_NUM_IUDMA 6
96#define BCM63XX_NUM_FIFO_PAIRS 3
97
98#define IUDMA_RESET_TIMEOUT_US 10000
99
100#define IUDMA_EP0_RXCHAN 0
101#define IUDMA_EP0_TXCHAN 1
102
103#define IUDMA_MAX_FRAGMENT 2048
104#define BCM63XX_MAX_CTRL_PKT 64
105
106#define BCMEP_CTRL 0x00
107#define BCMEP_ISOC 0x01
108#define BCMEP_BULK 0x02
109#define BCMEP_INTR 0x03
110
111#define BCMEP_OUT 0x00
112#define BCMEP_IN 0x01
113
114#define BCM63XX_SPD_FULL 1
115#define BCM63XX_SPD_HIGH 0
116
117#define IUDMA_DMAC_OFFSET 0x200
118#define IUDMA_DMAS_OFFSET 0x400
119
120enum bcm63xx_ep0_state {
121 EP0_REQUEUE,
122 EP0_IDLE,
123 EP0_IN_DATA_PHASE_SETUP,
124 EP0_IN_DATA_PHASE_COMPLETE,
125 EP0_OUT_DATA_PHASE_SETUP,
126 EP0_OUT_DATA_PHASE_COMPLETE,
127 EP0_OUT_STATUS_PHASE,
128 EP0_IN_FAKE_STATUS_PHASE,
129 EP0_SHUTDOWN,
130};
131
132static const char __maybe_unused bcm63xx_ep0_state_names[][32] = {
133 "REQUEUE",
134 "IDLE",
135 "IN_DATA_PHASE_SETUP",
136 "IN_DATA_PHASE_COMPLETE",
137 "OUT_DATA_PHASE_SETUP",
138 "OUT_DATA_PHASE_COMPLETE",
139 "OUT_STATUS_PHASE",
140 "IN_FAKE_STATUS_PHASE",
141 "SHUTDOWN",
142};
143
144/**
145 * struct iudma_ch_cfg - Static configuration for an IUDMA channel.
146 * @ep_num: USB endpoint number.
147 * @n_bds: Number of buffer descriptors in the ring.
148 * @ep_type: Endpoint type (control, bulk, interrupt).
149 * @dir: Direction (in, out).
150 * @n_fifo_slots: Number of FIFO entries to allocate for this channel.
151 * @max_pkt_hs: Maximum packet size in high speed mode.
152 * @max_pkt_fs: Maximum packet size in full speed mode.
153 */
154struct iudma_ch_cfg {
155 int ep_num;
156 int n_bds;
157 int ep_type;
158 int dir;
159 int n_fifo_slots;
160 int max_pkt_hs;
161 int max_pkt_fs;
162};
163
164static const struct iudma_ch_cfg iudma_defaults[] = {
165
166 /* This controller was designed to support a CDC/RNDIS application.
167 It may be possible to reconfigure some of the endpoints, but
168 the hardware limitations (FIFO sizing and number of DMA channels)
169 may significantly impact flexibility and/or stability. Change
170 these values at your own risk.
171
172 ep_num ep_type n_fifo_slots max_pkt_fs
173 idx | n_bds | dir | max_pkt_hs |
174 | | | | | | | | */
175 [0] = { -1, 4, BCMEP_CTRL, BCMEP_OUT, 32, 64, 64 },
176 [1] = { 0, 4, BCMEP_CTRL, BCMEP_OUT, 32, 64, 64 },
177 [2] = { 2, 16, BCMEP_BULK, BCMEP_OUT, 128, 512, 64 },
178 [3] = { 1, 16, BCMEP_BULK, BCMEP_IN, 128, 512, 64 },
179 [4] = { 4, 4, BCMEP_INTR, BCMEP_OUT, 32, 64, 64 },
180 [5] = { 3, 4, BCMEP_INTR, BCMEP_IN, 32, 64, 64 },
181};
182
183struct bcm63xx_udc;
184
185/**
186 * struct iudma_ch - Represents the current state of a single IUDMA channel.
187 * @ch_idx: IUDMA channel index (0 to BCM63XX_NUM_IUDMA-1).
188 * @ep_num: USB endpoint number. -1 for ep0 RX.
189 * @enabled: Whether bcm63xx_ep_enable() has been called.
190 * @max_pkt: "Chunk size" on the USB interface. Based on interface speed.
191 * @is_tx: true for TX, false for RX.
192 * @bep: Pointer to the associated endpoint. NULL for ep0 RX.
193 * @udc: Reference to the device controller.
194 * @read_bd: Next buffer descriptor to reap from the hardware.
195 * @write_bd: Next BD available for a new packet.
196 * @end_bd: Points to the final BD in the ring.
197 * @n_bds_used: Number of BD entries currently occupied.
198 * @bd_ring: Base pointer to the BD ring.
199 * @bd_ring_dma: Physical (DMA) address of bd_ring.
200 * @n_bds: Total number of BDs in the ring.
201 *
202 * ep0 has two IUDMA channels (IUDMA_EP0_RXCHAN and IUDMA_EP0_TXCHAN), as it is
203 * bidirectional. The "struct usb_ep" associated with ep0 is for TX (IN)
204 * only.
205 *
206 * Each bulk/intr endpoint has a single IUDMA channel and a single
207 * struct usb_ep.
208 */
209struct iudma_ch {
210 unsigned int ch_idx;
211 int ep_num;
212 bool enabled;
213 int max_pkt;
214 bool is_tx;
215 struct bcm63xx_ep *bep;
216 struct bcm63xx_udc *udc;
217
218 struct bcm_enet_desc *read_bd;
219 struct bcm_enet_desc *write_bd;
220 struct bcm_enet_desc *end_bd;
221 int n_bds_used;
222
223 struct bcm_enet_desc *bd_ring;
224 dma_addr_t bd_ring_dma;
225 unsigned int n_bds;
226};
227
228/**
229 * struct bcm63xx_ep - Internal (driver) state of a single endpoint.
230 * @ep_num: USB endpoint number.
231 * @iudma: Pointer to IUDMA channel state.
232 * @ep: USB gadget layer representation of the EP.
233 * @udc: Reference to the device controller.
234 * @queue: Linked list of outstanding requests for this EP.
235 * @halted: 1 if the EP is stalled; 0 otherwise.
236 */
237struct bcm63xx_ep {
238 unsigned int ep_num;
239 struct iudma_ch *iudma;
240 struct usb_ep ep;
241 struct bcm63xx_udc *udc;
242 struct list_head queue;
243 unsigned halted:1;
244};
245
246/**
247 * struct bcm63xx_req - Internal (driver) state of a single request.
248 * @queue: Links back to the EP's request list.
249 * @req: USB gadget layer representation of the request.
250 * @offset: Current byte offset into the data buffer (next byte to queue).
251 * @bd_bytes: Number of data bytes in outstanding BD entries.
252 * @iudma: IUDMA channel used for the request.
253 */
254struct bcm63xx_req {
255 struct list_head queue; /* ep's requests */
256 struct usb_request req;
257 unsigned int offset;
258 unsigned int bd_bytes;
259 struct iudma_ch *iudma;
260};
261
262/**
263 * struct bcm63xx_udc - Driver/hardware private context.
264 * @lock: Spinlock to mediate access to this struct, and (most) HW regs.
265 * @dev: Generic Linux device structure.
266 * @pd: Platform data (board/port info).
267 * @usbd_clk: Clock descriptor for the USB device block.
268 * @usbh_clk: Clock descriptor for the USB host block.
269 * @gadget: USB slave device.
270 * @driver: Driver for USB slave devices.
271 * @usbd_regs: Base address of the USBD/USB20D block.
272 * @iudma_regs: Base address of the USBD's associated IUDMA block.
273 * @bep: Array of endpoints, including ep0.
274 * @iudma: Array of all IUDMA channels used by this controller.
275 * @cfg: USB configuration number, from SET_CONFIGURATION wValue.
276 * @iface: USB interface number, from SET_INTERFACE wIndex.
277 * @alt_iface: USB alt interface number, from SET_INTERFACE wValue.
278 * @ep0_ctrl_req: Request object for bcm63xx_udc-initiated ep0 transactions.
279 * @ep0_ctrl_buf: Data buffer for ep0_ctrl_req.
280 * @ep0state: Current state of the ep0 state machine.
281 * @ep0_wq: Workqueue struct used to wake up the ep0 state machine.
282 * @wedgemap: Bitmap of wedged endpoints.
283 * @ep0_req_reset: USB reset is pending.
284 * @ep0_req_set_cfg: Need to spoof a SET_CONFIGURATION packet.
285 * @ep0_req_set_iface: Need to spoof a SET_INTERFACE packet.
286 * @ep0_req_shutdown: Driver is shutting down; requesting ep0 to halt activity.
287 * @ep0_req_completed: ep0 request has completed; worker has not seen it yet.
288 * @ep0_reply: Pending reply from gadget driver.
289 * @ep0_request: Outstanding ep0 request.
290 * @debugfs_root: debugfs directory: /sys/kernel/debug/<DRV_MODULE_NAME>.
291 * @debugfs_usbd: debugfs file "usbd" for controller state.
292 * @debugfs_iudma: debugfs file "usbd" for IUDMA state.
293 */
294struct bcm63xx_udc {
295 spinlock_t lock;
296
297 struct device *dev;
298 struct bcm63xx_usbd_platform_data *pd;
299 struct clk *usbd_clk;
300 struct clk *usbh_clk;
301
302 struct usb_gadget gadget;
303 struct usb_gadget_driver *driver;
304
305 void __iomem *usbd_regs;
306 void __iomem *iudma_regs;
307
308 struct bcm63xx_ep bep[BCM63XX_NUM_EP];
309 struct iudma_ch iudma[BCM63XX_NUM_IUDMA];
310
311 int cfg;
312 int iface;
313 int alt_iface;
314
315 struct bcm63xx_req ep0_ctrl_req;
316 u8 *ep0_ctrl_buf;
317
318 int ep0state;
319 struct work_struct ep0_wq;
320
321 unsigned long wedgemap;
322
323 unsigned ep0_req_reset:1;
324 unsigned ep0_req_set_cfg:1;
325 unsigned ep0_req_set_iface:1;
326 unsigned ep0_req_shutdown:1;
327
328 unsigned ep0_req_completed:1;
329 struct usb_request *ep0_reply;
330 struct usb_request *ep0_request;
331
332 struct dentry *debugfs_root;
333 struct dentry *debugfs_usbd;
334 struct dentry *debugfs_iudma;
335};
336
337static const struct usb_ep_ops bcm63xx_udc_ep_ops;
338
339/***********************************************************************
340 * Convenience functions
341 ***********************************************************************/
342
343static inline struct bcm63xx_udc *gadget_to_udc(struct usb_gadget *g)
344{
345 return container_of(g, struct bcm63xx_udc, gadget);
346}
347
348static inline struct bcm63xx_ep *our_ep(struct usb_ep *ep)
349{
350 return container_of(ep, struct bcm63xx_ep, ep);
351}
352
353static inline struct bcm63xx_req *our_req(struct usb_request *req)
354{
355 return container_of(req, struct bcm63xx_req, req);
356}
357
358static inline u32 usbd_readl(struct bcm63xx_udc *udc, u32 off)
359{
360 return bcm_readl(udc->usbd_regs + off);
361}
362
363static inline void usbd_writel(struct bcm63xx_udc *udc, u32 val, u32 off)
364{
365 bcm_writel(val, udc->usbd_regs + off);
366}
367
368static inline u32 usb_dma_readl(struct bcm63xx_udc *udc, u32 off)
369{
370 return bcm_readl(udc->iudma_regs + off);
371}
372
373static inline void usb_dma_writel(struct bcm63xx_udc *udc, u32 val, u32 off)
374{
375 bcm_writel(val, udc->iudma_regs + off);
376}
377
378static inline u32 usb_dmac_readl(struct bcm63xx_udc *udc, u32 off, int chan)
379{
380 return bcm_readl(udc->iudma_regs + IUDMA_DMAC_OFFSET + off +
381 (ENETDMA_CHAN_WIDTH * chan));
382}
383
384static inline void usb_dmac_writel(struct bcm63xx_udc *udc, u32 val, u32 off,
385 int chan)
386{
387 bcm_writel(val, udc->iudma_regs + IUDMA_DMAC_OFFSET + off +
388 (ENETDMA_CHAN_WIDTH * chan));
389}
390
391static inline u32 usb_dmas_readl(struct bcm63xx_udc *udc, u32 off, int chan)
392{
393 return bcm_readl(udc->iudma_regs + IUDMA_DMAS_OFFSET + off +
394 (ENETDMA_CHAN_WIDTH * chan));
395}
396
397static inline void usb_dmas_writel(struct bcm63xx_udc *udc, u32 val, u32 off,
398 int chan)
399{
400 bcm_writel(val, udc->iudma_regs + IUDMA_DMAS_OFFSET + off +
401 (ENETDMA_CHAN_WIDTH * chan));
402}
403
404static inline void set_clocks(struct bcm63xx_udc *udc, bool is_enabled)
405{
406 if (is_enabled) {
407 clk_enable(udc->usbh_clk);
408 clk_enable(udc->usbd_clk);
409 udelay(10);
410 } else {
411 clk_disable(udc->usbd_clk);
412 clk_disable(udc->usbh_clk);
413 }
414}
415
416/***********************************************************************
417 * Low-level IUDMA / FIFO operations
418 ***********************************************************************/
419
420/**
421 * bcm63xx_ep_dma_select - Helper function to set up the init_sel signal.
422 * @udc: Reference to the device controller.
423 * @idx: Desired init_sel value.
424 *
425 * The "init_sel" signal is used as a selection index for both endpoints
426 * and IUDMA channels. Since these do not map 1:1, the use of this signal
427 * depends on the context.
428 */
429static void bcm63xx_ep_dma_select(struct bcm63xx_udc *udc, int idx)
430{
431 u32 val = usbd_readl(udc, USBD_CONTROL_REG);
432
433 val &= ~USBD_CONTROL_INIT_SEL_MASK;
434 val |= idx << USBD_CONTROL_INIT_SEL_SHIFT;
435 usbd_writel(udc, val, USBD_CONTROL_REG);
436}
437
438/**
439 * bcm63xx_set_stall - Enable/disable stall on one endpoint.
440 * @udc: Reference to the device controller.
441 * @bep: Endpoint on which to operate.
442 * @is_stalled: true to enable stall, false to disable.
443 *
444 * See notes in bcm63xx_update_wedge() regarding automatic clearing of
445 * halt/stall conditions.
446 */
447static void bcm63xx_set_stall(struct bcm63xx_udc *udc, struct bcm63xx_ep *bep,
448 bool is_stalled)
449{
450 u32 val;
451
452 val = USBD_STALL_UPDATE_MASK |
453 (is_stalled ? USBD_STALL_ENABLE_MASK : 0) |
454 (bep->ep_num << USBD_STALL_EPNUM_SHIFT);
455 usbd_writel(udc, val, USBD_STALL_REG);
456}
457
458/**
459 * bcm63xx_fifo_setup - (Re)initialize FIFO boundaries and settings.
460 * @udc: Reference to the device controller.
461 *
462 * These parameters depend on the USB link speed. Settings are
463 * per-IUDMA-channel-pair.
464 */
465static void bcm63xx_fifo_setup(struct bcm63xx_udc *udc)
466{
467 int is_hs = udc->gadget.speed == USB_SPEED_HIGH;
468 u32 i, val, rx_fifo_slot, tx_fifo_slot;
469
470 /* set up FIFO boundaries and packet sizes; this is done in pairs */
471 rx_fifo_slot = tx_fifo_slot = 0;
472 for (i = 0; i < BCM63XX_NUM_IUDMA; i += 2) {
473 const struct iudma_ch_cfg *rx_cfg = &iudma_defaults[i];
474 const struct iudma_ch_cfg *tx_cfg = &iudma_defaults[i + 1];
475
476 bcm63xx_ep_dma_select(udc, i >> 1);
477
478 val = (rx_fifo_slot << USBD_RXFIFO_CONFIG_START_SHIFT) |
479 ((rx_fifo_slot + rx_cfg->n_fifo_slots - 1) <<
480 USBD_RXFIFO_CONFIG_END_SHIFT);
481 rx_fifo_slot += rx_cfg->n_fifo_slots;
482 usbd_writel(udc, val, USBD_RXFIFO_CONFIG_REG);
483 usbd_writel(udc,
484 is_hs ? rx_cfg->max_pkt_hs : rx_cfg->max_pkt_fs,
485 USBD_RXFIFO_EPSIZE_REG);
486
487 val = (tx_fifo_slot << USBD_TXFIFO_CONFIG_START_SHIFT) |
488 ((tx_fifo_slot + tx_cfg->n_fifo_slots - 1) <<
489 USBD_TXFIFO_CONFIG_END_SHIFT);
490 tx_fifo_slot += tx_cfg->n_fifo_slots;
491 usbd_writel(udc, val, USBD_TXFIFO_CONFIG_REG);
492 usbd_writel(udc,
493 is_hs ? tx_cfg->max_pkt_hs : tx_cfg->max_pkt_fs,
494 USBD_TXFIFO_EPSIZE_REG);
495
496 usbd_readl(udc, USBD_TXFIFO_EPSIZE_REG);
497 }
498}
499
500/**
501 * bcm63xx_fifo_reset_ep - Flush a single endpoint's FIFO.
502 * @udc: Reference to the device controller.
503 * @ep_num: Endpoint number.
504 */
505static void bcm63xx_fifo_reset_ep(struct bcm63xx_udc *udc, int ep_num)
506{
507 u32 val;
508
509 bcm63xx_ep_dma_select(udc, ep_num);
510
511 val = usbd_readl(udc, USBD_CONTROL_REG);
512 val |= USBD_CONTROL_FIFO_RESET_MASK;
513 usbd_writel(udc, val, USBD_CONTROL_REG);
514 usbd_readl(udc, USBD_CONTROL_REG);
515}
516
517/**
518 * bcm63xx_fifo_reset - Flush all hardware FIFOs.
519 * @udc: Reference to the device controller.
520 */
521static void bcm63xx_fifo_reset(struct bcm63xx_udc *udc)
522{
523 int i;
524
525 for (i = 0; i < BCM63XX_NUM_FIFO_PAIRS; i++)
526 bcm63xx_fifo_reset_ep(udc, i);
527}
528
529/**
530 * bcm63xx_ep_init - Initial (one-time) endpoint initialization.
531 * @udc: Reference to the device controller.
532 */
533static void bcm63xx_ep_init(struct bcm63xx_udc *udc)
534{
535 u32 i, val;
536
537 for (i = 0; i < BCM63XX_NUM_IUDMA; i++) {
538 const struct iudma_ch_cfg *cfg = &iudma_defaults[i];
539
540 if (cfg->ep_num < 0)
541 continue;
542
543 bcm63xx_ep_dma_select(udc, cfg->ep_num);
544 val = (cfg->ep_type << USBD_EPNUM_TYPEMAP_TYPE_SHIFT) |
545 ((i >> 1) << USBD_EPNUM_TYPEMAP_DMA_CH_SHIFT);
546 usbd_writel(udc, val, USBD_EPNUM_TYPEMAP_REG);
547 }
548}
549
550/**
551 * bcm63xx_ep_setup - Configure per-endpoint settings.
552 * @udc: Reference to the device controller.
553 *
554 * This needs to be rerun if the speed/cfg/intf/altintf changes.
555 */
556static void bcm63xx_ep_setup(struct bcm63xx_udc *udc)
557{
558 u32 val, i;
559
560 usbd_writel(udc, USBD_CSR_SETUPADDR_DEF, USBD_CSR_SETUPADDR_REG);
561
562 for (i = 0; i < BCM63XX_NUM_IUDMA; i++) {
563 const struct iudma_ch_cfg *cfg = &iudma_defaults[i];
564 int max_pkt = udc->gadget.speed == USB_SPEED_HIGH ?
565 cfg->max_pkt_hs : cfg->max_pkt_fs;
566 int idx = cfg->ep_num;
567
568 udc->iudma[i].max_pkt = max_pkt;
569
570 if (idx < 0)
571 continue;
572 usb_ep_set_maxpacket_limit(&udc->bep[idx].ep, max_pkt);
573
574 val = (idx << USBD_CSR_EP_LOG_SHIFT) |
575 (cfg->dir << USBD_CSR_EP_DIR_SHIFT) |
576 (cfg->ep_type << USBD_CSR_EP_TYPE_SHIFT) |
577 (udc->cfg << USBD_CSR_EP_CFG_SHIFT) |
578 (udc->iface << USBD_CSR_EP_IFACE_SHIFT) |
579 (udc->alt_iface << USBD_CSR_EP_ALTIFACE_SHIFT) |
580 (max_pkt << USBD_CSR_EP_MAXPKT_SHIFT);
581 usbd_writel(udc, val, USBD_CSR_EP_REG(idx));
582 }
583}
584
585/**
586 * iudma_write - Queue a single IUDMA transaction.
587 * @udc: Reference to the device controller.
588 * @iudma: IUDMA channel to use.
589 * @breq: Request containing the transaction data.
590 *
591 * For RX IUDMA, this will queue a single buffer descriptor, as RX IUDMA
592 * does not honor SOP/EOP so the handling of multiple buffers is ambiguous.
593 * So iudma_write() may be called several times to fulfill a single
594 * usb_request.
595 *
596 * For TX IUDMA, this can queue multiple buffer descriptors if needed.
597 */
598static void iudma_write(struct bcm63xx_udc *udc, struct iudma_ch *iudma,
599 struct bcm63xx_req *breq)
600{
601 int first_bd = 1, last_bd = 0, extra_zero_pkt = 0;
602 unsigned int bytes_left = breq->req.length - breq->offset;
603 const int max_bd_bytes = !irq_coalesce && !iudma->is_tx ?
604 iudma->max_pkt : IUDMA_MAX_FRAGMENT;
605
606 iudma->n_bds_used = 0;
607 breq->bd_bytes = 0;
608 breq->iudma = iudma;
609
610 if ((bytes_left % iudma->max_pkt == 0) && bytes_left && breq->req.zero)
611 extra_zero_pkt = 1;
612
613 do {
614 struct bcm_enet_desc *d = iudma->write_bd;
615 u32 dmaflags = 0;
616 unsigned int n_bytes;
617
618 if (d == iudma->end_bd) {
619 dmaflags |= DMADESC_WRAP_MASK;
620 iudma->write_bd = iudma->bd_ring;
621 } else {
622 iudma->write_bd++;
623 }
624 iudma->n_bds_used++;
625
626 n_bytes = min_t(int, bytes_left, max_bd_bytes);
627 if (n_bytes)
628 dmaflags |= n_bytes << DMADESC_LENGTH_SHIFT;
629 else
630 dmaflags |= (1 << DMADESC_LENGTH_SHIFT) |
631 DMADESC_USB_ZERO_MASK;
632
633 dmaflags |= DMADESC_OWNER_MASK;
634 if (first_bd) {
635 dmaflags |= DMADESC_SOP_MASK;
636 first_bd = 0;
637 }
638
639 /*
640 * extra_zero_pkt forces one more iteration through the loop
641 * after all data is queued up, to send the zero packet
642 */
643 if (extra_zero_pkt && !bytes_left)
644 extra_zero_pkt = 0;
645
646 if (!iudma->is_tx || iudma->n_bds_used == iudma->n_bds ||
647 (n_bytes == bytes_left && !extra_zero_pkt)) {
648 last_bd = 1;
649 dmaflags |= DMADESC_EOP_MASK;
650 }
651
652 d->address = breq->req.dma + breq->offset;
653 mb();
654 d->len_stat = dmaflags;
655
656 breq->offset += n_bytes;
657 breq->bd_bytes += n_bytes;
658 bytes_left -= n_bytes;
659 } while (!last_bd);
660
661 usb_dmac_writel(udc, ENETDMAC_CHANCFG_EN_MASK,
662 ENETDMAC_CHANCFG_REG, iudma->ch_idx);
663}
664
665/**
666 * iudma_read - Check for IUDMA buffer completion.
667 * @udc: Reference to the device controller.
668 * @iudma: IUDMA channel to use.
669 *
670 * This checks to see if ALL of the outstanding BDs on the DMA channel
671 * have been filled. If so, it returns the actual transfer length;
672 * otherwise it returns -EBUSY.
673 */
674static int iudma_read(struct bcm63xx_udc *udc, struct iudma_ch *iudma)
675{
676 int i, actual_len = 0;
677 struct bcm_enet_desc *d = iudma->read_bd;
678
679 if (!iudma->n_bds_used)
680 return -EINVAL;
681
682 for (i = 0; i < iudma->n_bds_used; i++) {
683 u32 dmaflags;
684
685 dmaflags = d->len_stat;
686
687 if (dmaflags & DMADESC_OWNER_MASK)
688 return -EBUSY;
689
690 actual_len += (dmaflags & DMADESC_LENGTH_MASK) >>
691 DMADESC_LENGTH_SHIFT;
692 if (d == iudma->end_bd)
693 d = iudma->bd_ring;
694 else
695 d++;
696 }
697
698 iudma->read_bd = d;
699 iudma->n_bds_used = 0;
700 return actual_len;
701}
702
703/**
704 * iudma_reset_channel - Stop DMA on a single channel.
705 * @udc: Reference to the device controller.
706 * @iudma: IUDMA channel to reset.
707 */
708static void iudma_reset_channel(struct bcm63xx_udc *udc, struct iudma_ch *iudma)
709{
710 int timeout = IUDMA_RESET_TIMEOUT_US;
711 struct bcm_enet_desc *d;
712 int ch_idx = iudma->ch_idx;
713
714 if (!iudma->is_tx)
715 bcm63xx_fifo_reset_ep(udc, max(0, iudma->ep_num));
716
717 /* stop DMA, then wait for the hardware to wrap up */
718 usb_dmac_writel(udc, 0, ENETDMAC_CHANCFG_REG, ch_idx);
719
720 while (usb_dmac_readl(udc, ENETDMAC_CHANCFG_REG, ch_idx) &
721 ENETDMAC_CHANCFG_EN_MASK) {
722 udelay(1);
723
724 /* repeatedly flush the FIFO data until the BD completes */
725 if (iudma->is_tx && iudma->ep_num >= 0)
726 bcm63xx_fifo_reset_ep(udc, iudma->ep_num);
727
728 if (!timeout--) {
729 dev_err(udc->dev, "can't reset IUDMA channel %d\n",
730 ch_idx);
731 break;
732 }
733 if (timeout == IUDMA_RESET_TIMEOUT_US / 2) {
734 dev_warn(udc->dev, "forcibly halting IUDMA channel %d\n",
735 ch_idx);
736 usb_dmac_writel(udc, ENETDMAC_CHANCFG_BUFHALT_MASK,
737 ENETDMAC_CHANCFG_REG, ch_idx);
738 }
739 }
740 usb_dmac_writel(udc, ~0, ENETDMAC_IR_REG, ch_idx);
741
742 /* don't leave "live" HW-owned entries for the next guy to step on */
743 for (d = iudma->bd_ring; d <= iudma->end_bd; d++)
744 d->len_stat = 0;
745 mb();
746
747 iudma->read_bd = iudma->write_bd = iudma->bd_ring;
748 iudma->n_bds_used = 0;
749
750 /* set up IRQs, UBUS burst size, and BD base for this channel */
751 usb_dmac_writel(udc, ENETDMAC_IR_BUFDONE_MASK,
752 ENETDMAC_IRMASK_REG, ch_idx);
753 usb_dmac_writel(udc, 8, ENETDMAC_MAXBURST_REG, ch_idx);
754
755 usb_dmas_writel(udc, iudma->bd_ring_dma, ENETDMAS_RSTART_REG, ch_idx);
756 usb_dmas_writel(udc, 0, ENETDMAS_SRAM2_REG, ch_idx);
757}
758
759/**
760 * iudma_init_channel - One-time IUDMA channel initialization.
761 * @udc: Reference to the device controller.
762 * @ch_idx: Channel to initialize.
763 */
764static int iudma_init_channel(struct bcm63xx_udc *udc, unsigned int ch_idx)
765{
766 struct iudma_ch *iudma = &udc->iudma[ch_idx];
767 const struct iudma_ch_cfg *cfg = &iudma_defaults[ch_idx];
768 unsigned int n_bds = cfg->n_bds;
769 struct bcm63xx_ep *bep = NULL;
770
771 iudma->ep_num = cfg->ep_num;
772 iudma->ch_idx = ch_idx;
773 iudma->is_tx = !!(ch_idx & 0x01);
774 if (iudma->ep_num >= 0) {
775 bep = &udc->bep[iudma->ep_num];
776 bep->iudma = iudma;
777 INIT_LIST_HEAD(&bep->queue);
778 }
779
780 iudma->bep = bep;
781 iudma->udc = udc;
782
783 /* ep0 is always active; others are controlled by the gadget driver */
784 if (iudma->ep_num <= 0)
785 iudma->enabled = true;
786
787 iudma->n_bds = n_bds;
788 iudma->bd_ring = dmam_alloc_coherent(udc->dev,
789 n_bds * sizeof(struct bcm_enet_desc),
790 &iudma->bd_ring_dma, GFP_KERNEL);
791 if (!iudma->bd_ring)
792 return -ENOMEM;
793 iudma->end_bd = &iudma->bd_ring[n_bds - 1];
794
795 return 0;
796}
797
798/**
799 * iudma_init - One-time initialization of all IUDMA channels.
800 * @udc: Reference to the device controller.
801 *
802 * Enable DMA, flush channels, and enable global IUDMA IRQs.
803 */
804static int iudma_init(struct bcm63xx_udc *udc)
805{
806 int i, rc;
807
808 usb_dma_writel(udc, ENETDMA_CFG_EN_MASK, ENETDMA_CFG_REG);
809
810 for (i = 0; i < BCM63XX_NUM_IUDMA; i++) {
811 rc = iudma_init_channel(udc, i);
812 if (rc)
813 return rc;
814 iudma_reset_channel(udc, &udc->iudma[i]);
815 }
816
817 usb_dma_writel(udc, BIT(BCM63XX_NUM_IUDMA)-1, ENETDMA_GLB_IRQMASK_REG);
818 return 0;
819}
820
821/**
822 * iudma_uninit - Uninitialize IUDMA channels.
823 * @udc: Reference to the device controller.
824 *
825 * Kill global IUDMA IRQs, flush channels, and kill DMA.
826 */
827static void iudma_uninit(struct bcm63xx_udc *udc)
828{
829 int i;
830
831 usb_dma_writel(udc, 0, ENETDMA_GLB_IRQMASK_REG);
832
833 for (i = 0; i < BCM63XX_NUM_IUDMA; i++)
834 iudma_reset_channel(udc, &udc->iudma[i]);
835
836 usb_dma_writel(udc, 0, ENETDMA_CFG_REG);
837}
838
839/***********************************************************************
840 * Other low-level USBD operations
841 ***********************************************************************/
842
843/**
844 * bcm63xx_set_ctrl_irqs - Mask/unmask control path interrupts.
845 * @udc: Reference to the device controller.
846 * @enable_irqs: true to enable, false to disable.
847 */
848static void bcm63xx_set_ctrl_irqs(struct bcm63xx_udc *udc, bool enable_irqs)
849{
850 u32 val;
851
852 usbd_writel(udc, 0, USBD_STATUS_REG);
853
854 val = BIT(USBD_EVENT_IRQ_USB_RESET) |
855 BIT(USBD_EVENT_IRQ_SETUP) |
856 BIT(USBD_EVENT_IRQ_SETCFG) |
857 BIT(USBD_EVENT_IRQ_SETINTF) |
858 BIT(USBD_EVENT_IRQ_USB_LINK);
859 usbd_writel(udc, enable_irqs ? val : 0, USBD_EVENT_IRQ_MASK_REG);
860 usbd_writel(udc, val, USBD_EVENT_IRQ_STATUS_REG);
861}
862
863/**
864 * bcm63xx_select_phy_mode - Select between USB device and host mode.
865 * @udc: Reference to the device controller.
866 * @is_device: true for device, false for host.
867 *
868 * This should probably be reworked to use the drivers/usb/otg
869 * infrastructure.
870 *
871 * By default, the AFE/pullups are disabled in device mode, until
872 * bcm63xx_select_pullup() is called.
873 */
874static void bcm63xx_select_phy_mode(struct bcm63xx_udc *udc, bool is_device)
875{
876 u32 val, portmask = BIT(udc->pd->port_no);
877
878 if (BCMCPU_IS_6328()) {
879 /* configure pinmux to sense VBUS signal */
880 val = bcm_gpio_readl(GPIO_PINMUX_OTHR_REG);
881 val &= ~GPIO_PINMUX_OTHR_6328_USB_MASK;
882 val |= is_device ? GPIO_PINMUX_OTHR_6328_USB_DEV :
883 GPIO_PINMUX_OTHR_6328_USB_HOST;
884 bcm_gpio_writel(val, GPIO_PINMUX_OTHR_REG);
885 }
886
887 val = bcm_rset_readl(RSET_USBH_PRIV, USBH_PRIV_UTMI_CTL_6368_REG);
888 if (is_device) {
889 val |= (portmask << USBH_PRIV_UTMI_CTL_HOSTB_SHIFT);
890 val |= (portmask << USBH_PRIV_UTMI_CTL_NODRIV_SHIFT);
891 } else {
892 val &= ~(portmask << USBH_PRIV_UTMI_CTL_HOSTB_SHIFT);
893 val &= ~(portmask << USBH_PRIV_UTMI_CTL_NODRIV_SHIFT);
894 }
895 bcm_rset_writel(RSET_USBH_PRIV, val, USBH_PRIV_UTMI_CTL_6368_REG);
896
897 val = bcm_rset_readl(RSET_USBH_PRIV, USBH_PRIV_SWAP_6368_REG);
898 if (is_device)
899 val |= USBH_PRIV_SWAP_USBD_MASK;
900 else
901 val &= ~USBH_PRIV_SWAP_USBD_MASK;
902 bcm_rset_writel(RSET_USBH_PRIV, val, USBH_PRIV_SWAP_6368_REG);
903}
904
905/**
906 * bcm63xx_select_pullup - Enable/disable the pullup on D+
907 * @udc: Reference to the device controller.
908 * @is_on: true to enable the pullup, false to disable.
909 *
910 * If the pullup is active, the host will sense a FS/HS device connected to
911 * the port. If the pullup is inactive, the host will think the USB
912 * device has been disconnected.
913 */
914static void bcm63xx_select_pullup(struct bcm63xx_udc *udc, bool is_on)
915{
916 u32 val, portmask = BIT(udc->pd->port_no);
917
918 val = bcm_rset_readl(RSET_USBH_PRIV, USBH_PRIV_UTMI_CTL_6368_REG);
919 if (is_on)
920 val &= ~(portmask << USBH_PRIV_UTMI_CTL_NODRIV_SHIFT);
921 else
922 val |= (portmask << USBH_PRIV_UTMI_CTL_NODRIV_SHIFT);
923 bcm_rset_writel(RSET_USBH_PRIV, val, USBH_PRIV_UTMI_CTL_6368_REG);
924}
925
926/**
927 * bcm63xx_uninit_udc_hw - Shut down the hardware prior to driver removal.
928 * @udc: Reference to the device controller.
929 *
930 * This just masks the IUDMA IRQs and releases the clocks. It is assumed
931 * that bcm63xx_udc_stop() has already run, and the clocks are stopped.
932 */
933static void bcm63xx_uninit_udc_hw(struct bcm63xx_udc *udc)
934{
935 set_clocks(udc, true);
936 iudma_uninit(udc);
937 set_clocks(udc, false);
938
939 clk_put(udc->usbd_clk);
940 clk_put(udc->usbh_clk);
941}
942
943/**
944 * bcm63xx_init_udc_hw - Initialize the controller hardware and data structures.
945 * @udc: Reference to the device controller.
946 */
947static int bcm63xx_init_udc_hw(struct bcm63xx_udc *udc)
948{
949 int i, rc = 0;
950 u32 val;
951
952 udc->ep0_ctrl_buf = devm_kzalloc(udc->dev, BCM63XX_MAX_CTRL_PKT,
953 GFP_KERNEL);
954 if (!udc->ep0_ctrl_buf)
955 return -ENOMEM;
956
957 INIT_LIST_HEAD(&udc->gadget.ep_list);
958 for (i = 0; i < BCM63XX_NUM_EP; i++) {
959 struct bcm63xx_ep *bep = &udc->bep[i];
960
961 bep->ep.name = bcm63xx_ep_info[i].name;
962 bep->ep.caps = bcm63xx_ep_info[i].caps;
963 bep->ep_num = i;
964 bep->ep.ops = &bcm63xx_udc_ep_ops;
965 list_add_tail(&bep->ep.ep_list, &udc->gadget.ep_list);
966 bep->halted = 0;
967 usb_ep_set_maxpacket_limit(&bep->ep, BCM63XX_MAX_CTRL_PKT);
968 bep->udc = udc;
969 bep->ep.desc = NULL;
970 INIT_LIST_HEAD(&bep->queue);
971 }
972
973 udc->gadget.ep0 = &udc->bep[0].ep;
974 list_del(&udc->bep[0].ep.ep_list);
975
976 udc->gadget.speed = USB_SPEED_UNKNOWN;
977 udc->ep0state = EP0_SHUTDOWN;
978
979 udc->usbh_clk = clk_get(udc->dev, "usbh");
980 if (IS_ERR(udc->usbh_clk))
981 return -EIO;
982
983 udc->usbd_clk = clk_get(udc->dev, "usbd");
984 if (IS_ERR(udc->usbd_clk)) {
985 clk_put(udc->usbh_clk);
986 return -EIO;
987 }
988
989 set_clocks(udc, true);
990
991 val = USBD_CONTROL_AUTO_CSRS_MASK |
992 USBD_CONTROL_DONE_CSRS_MASK |
993 (irq_coalesce ? USBD_CONTROL_RXZSCFG_MASK : 0);
994 usbd_writel(udc, val, USBD_CONTROL_REG);
995
996 val = USBD_STRAPS_APP_SELF_PWR_MASK |
997 USBD_STRAPS_APP_RAM_IF_MASK |
998 USBD_STRAPS_APP_CSRPRGSUP_MASK |
999 USBD_STRAPS_APP_8BITPHY_MASK |
1000 USBD_STRAPS_APP_RMTWKUP_MASK;
1001
1002 if (udc->gadget.max_speed == USB_SPEED_HIGH)
1003 val |= (BCM63XX_SPD_HIGH << USBD_STRAPS_SPEED_SHIFT);
1004 else
1005 val |= (BCM63XX_SPD_FULL << USBD_STRAPS_SPEED_SHIFT);
1006 usbd_writel(udc, val, USBD_STRAPS_REG);
1007
1008 bcm63xx_set_ctrl_irqs(udc, false);
1009
1010 usbd_writel(udc, 0, USBD_EVENT_IRQ_CFG_LO_REG);
1011
1012 val = USBD_EVENT_IRQ_CFG_FALLING(USBD_EVENT_IRQ_ENUM_ON) |
1013 USBD_EVENT_IRQ_CFG_FALLING(USBD_EVENT_IRQ_SET_CSRS);
1014 usbd_writel(udc, val, USBD_EVENT_IRQ_CFG_HI_REG);
1015
1016 rc = iudma_init(udc);
1017 set_clocks(udc, false);
1018 if (rc)
1019 bcm63xx_uninit_udc_hw(udc);
1020
1021 return 0;
1022}
1023
1024/***********************************************************************
1025 * Standard EP gadget operations
1026 ***********************************************************************/
1027
1028/**
1029 * bcm63xx_ep_enable - Enable one endpoint.
1030 * @ep: Endpoint to enable.
1031 * @desc: Contains max packet, direction, etc.
1032 *
1033 * Most of the endpoint parameters are fixed in this controller, so there
1034 * isn't much for this function to do.
1035 */
1036static int bcm63xx_ep_enable(struct usb_ep *ep,
1037 const struct usb_endpoint_descriptor *desc)
1038{
1039 struct bcm63xx_ep *bep = our_ep(ep);
1040 struct bcm63xx_udc *udc = bep->udc;
1041 struct iudma_ch *iudma = bep->iudma;
1042 unsigned long flags;
1043
1044 if (!ep || !desc || ep->name == bcm63xx_ep0name)
1045 return -EINVAL;
1046
1047 if (!udc->driver)
1048 return -ESHUTDOWN;
1049
1050 spin_lock_irqsave(&udc->lock, flags);
1051 if (iudma->enabled) {
1052 spin_unlock_irqrestore(&udc->lock, flags);
1053 return -EINVAL;
1054 }
1055
1056 iudma->enabled = true;
1057 BUG_ON(!list_empty(&bep->queue));
1058
1059 iudma_reset_channel(udc, iudma);
1060
1061 bep->halted = 0;
1062 bcm63xx_set_stall(udc, bep, false);
1063 clear_bit(bep->ep_num, &udc->wedgemap);
1064
1065 ep->desc = desc;
1066 ep->maxpacket = usb_endpoint_maxp(desc);
1067
1068 spin_unlock_irqrestore(&udc->lock, flags);
1069 return 0;
1070}
1071
1072/**
1073 * bcm63xx_ep_disable - Disable one endpoint.
1074 * @ep: Endpoint to disable.
1075 */
1076static int bcm63xx_ep_disable(struct usb_ep *ep)
1077{
1078 struct bcm63xx_ep *bep = our_ep(ep);
1079 struct bcm63xx_udc *udc = bep->udc;
1080 struct iudma_ch *iudma = bep->iudma;
1081 struct bcm63xx_req *breq, *n;
1082 unsigned long flags;
1083
1084 if (!ep || !ep->desc)
1085 return -EINVAL;
1086
1087 spin_lock_irqsave(&udc->lock, flags);
1088 if (!iudma->enabled) {
1089 spin_unlock_irqrestore(&udc->lock, flags);
1090 return -EINVAL;
1091 }
1092 iudma->enabled = false;
1093
1094 iudma_reset_channel(udc, iudma);
1095
1096 if (!list_empty(&bep->queue)) {
1097 list_for_each_entry_safe(breq, n, &bep->queue, queue) {
1098 usb_gadget_unmap_request(&udc->gadget, &breq->req,
1099 iudma->is_tx);
1100 list_del(&breq->queue);
1101 breq->req.status = -ESHUTDOWN;
1102
1103 spin_unlock_irqrestore(&udc->lock, flags);
1104 usb_gadget_giveback_request(&iudma->bep->ep, &breq->req);
1105 spin_lock_irqsave(&udc->lock, flags);
1106 }
1107 }
1108 ep->desc = NULL;
1109
1110 spin_unlock_irqrestore(&udc->lock, flags);
1111 return 0;
1112}
1113
1114/**
1115 * bcm63xx_udc_alloc_request - Allocate a new request.
1116 * @ep: Endpoint associated with the request.
1117 * @mem_flags: Flags to pass to kzalloc().
1118 */
1119static struct usb_request *bcm63xx_udc_alloc_request(struct usb_ep *ep,
1120 gfp_t mem_flags)
1121{
1122 struct bcm63xx_req *breq;
1123
1124 breq = kzalloc(sizeof(*breq), mem_flags);
1125 if (!breq)
1126 return NULL;
1127 return &breq->req;
1128}
1129
1130/**
1131 * bcm63xx_udc_free_request - Free a request.
1132 * @ep: Endpoint associated with the request.
1133 * @req: Request to free.
1134 */
1135static void bcm63xx_udc_free_request(struct usb_ep *ep,
1136 struct usb_request *req)
1137{
1138 struct bcm63xx_req *breq = our_req(req);
1139 kfree(breq);
1140}
1141
1142/**
1143 * bcm63xx_udc_queue - Queue up a new request.
1144 * @ep: Endpoint associated with the request.
1145 * @req: Request to add.
1146 * @mem_flags: Unused.
1147 *
1148 * If the queue is empty, start this request immediately. Otherwise, add
1149 * it to the list.
1150 *
1151 * ep0 replies are sent through this function from the gadget driver, but
1152 * they are treated differently because they need to be handled by the ep0
1153 * state machine. (Sometimes they are replies to control requests that
1154 * were spoofed by this driver, and so they shouldn't be transmitted at all.)
1155 */
1156static int bcm63xx_udc_queue(struct usb_ep *ep, struct usb_request *req,
1157 gfp_t mem_flags)
1158{
1159 struct bcm63xx_ep *bep = our_ep(ep);
1160 struct bcm63xx_udc *udc = bep->udc;
1161 struct bcm63xx_req *breq = our_req(req);
1162 unsigned long flags;
1163 int rc = 0;
1164
1165 if (unlikely(!req || !req->complete || !req->buf || !ep))
1166 return -EINVAL;
1167
1168 req->actual = 0;
1169 req->status = 0;
1170 breq->offset = 0;
1171
1172 if (bep == &udc->bep[0]) {
1173 /* only one reply per request, please */
1174 if (udc->ep0_reply)
1175 return -EINVAL;
1176
1177 udc->ep0_reply = req;
1178 schedule_work(&udc->ep0_wq);
1179 return 0;
1180 }
1181
1182 spin_lock_irqsave(&udc->lock, flags);
1183 if (!bep->iudma->enabled) {
1184 rc = -ESHUTDOWN;
1185 goto out;
1186 }
1187
1188 rc = usb_gadget_map_request(&udc->gadget, req, bep->iudma->is_tx);
1189 if (rc == 0) {
1190 list_add_tail(&breq->queue, &bep->queue);
1191 if (list_is_singular(&bep->queue))
1192 iudma_write(udc, bep->iudma, breq);
1193 }
1194
1195out:
1196 spin_unlock_irqrestore(&udc->lock, flags);
1197 return rc;
1198}
1199
1200/**
1201 * bcm63xx_udc_dequeue - Remove a pending request from the queue.
1202 * @ep: Endpoint associated with the request.
1203 * @req: Request to remove.
1204 *
1205 * If the request is not at the head of the queue, this is easy - just nuke
1206 * it. If the request is at the head of the queue, we'll need to stop the
1207 * DMA transaction and then queue up the successor.
1208 */
1209static int bcm63xx_udc_dequeue(struct usb_ep *ep, struct usb_request *req)
1210{
1211 struct bcm63xx_ep *bep = our_ep(ep);
1212 struct bcm63xx_udc *udc = bep->udc;
1213 struct bcm63xx_req *breq = our_req(req), *cur;
1214 unsigned long flags;
1215 int rc = 0;
1216
1217 spin_lock_irqsave(&udc->lock, flags);
1218 if (list_empty(&bep->queue)) {
1219 rc = -EINVAL;
1220 goto out;
1221 }
1222
1223 cur = list_first_entry(&bep->queue, struct bcm63xx_req, queue);
1224 usb_gadget_unmap_request(&udc->gadget, &breq->req, bep->iudma->is_tx);
1225
1226 if (breq == cur) {
1227 iudma_reset_channel(udc, bep->iudma);
1228 list_del(&breq->queue);
1229
1230 if (!list_empty(&bep->queue)) {
1231 struct bcm63xx_req *next;
1232
1233 next = list_first_entry(&bep->queue,
1234 struct bcm63xx_req, queue);
1235 iudma_write(udc, bep->iudma, next);
1236 }
1237 } else {
1238 list_del(&breq->queue);
1239 }
1240
1241out:
1242 spin_unlock_irqrestore(&udc->lock, flags);
1243
1244 req->status = -ESHUTDOWN;
1245 req->complete(ep, req);
1246
1247 return rc;
1248}
1249
1250/**
1251 * bcm63xx_udc_set_halt - Enable/disable STALL flag in the hardware.
1252 * @ep: Endpoint to halt.
1253 * @value: Zero to clear halt; nonzero to set halt.
1254 *
1255 * See comments in bcm63xx_update_wedge().
1256 */
1257static int bcm63xx_udc_set_halt(struct usb_ep *ep, int value)
1258{
1259 struct bcm63xx_ep *bep = our_ep(ep);
1260 struct bcm63xx_udc *udc = bep->udc;
1261 unsigned long flags;
1262
1263 spin_lock_irqsave(&udc->lock, flags);
1264 bcm63xx_set_stall(udc, bep, !!value);
1265 bep->halted = value;
1266 spin_unlock_irqrestore(&udc->lock, flags);
1267
1268 return 0;
1269}
1270
1271/**
1272 * bcm63xx_udc_set_wedge - Stall the endpoint until the next reset.
1273 * @ep: Endpoint to wedge.
1274 *
1275 * See comments in bcm63xx_update_wedge().
1276 */
1277static int bcm63xx_udc_set_wedge(struct usb_ep *ep)
1278{
1279 struct bcm63xx_ep *bep = our_ep(ep);
1280 struct bcm63xx_udc *udc = bep->udc;
1281 unsigned long flags;
1282
1283 spin_lock_irqsave(&udc->lock, flags);
1284 set_bit(bep->ep_num, &udc->wedgemap);
1285 bcm63xx_set_stall(udc, bep, true);
1286 spin_unlock_irqrestore(&udc->lock, flags);
1287
1288 return 0;
1289}
1290
1291static const struct usb_ep_ops bcm63xx_udc_ep_ops = {
1292 .enable = bcm63xx_ep_enable,
1293 .disable = bcm63xx_ep_disable,
1294
1295 .alloc_request = bcm63xx_udc_alloc_request,
1296 .free_request = bcm63xx_udc_free_request,
1297
1298 .queue = bcm63xx_udc_queue,
1299 .dequeue = bcm63xx_udc_dequeue,
1300
1301 .set_halt = bcm63xx_udc_set_halt,
1302 .set_wedge = bcm63xx_udc_set_wedge,
1303};
1304
1305/***********************************************************************
1306 * EP0 handling
1307 ***********************************************************************/
1308
1309/**
1310 * bcm63xx_ep0_setup_callback - Drop spinlock to invoke ->setup callback.
1311 * @udc: Reference to the device controller.
1312 * @ctrl: 8-byte SETUP request.
1313 */
1314static int bcm63xx_ep0_setup_callback(struct bcm63xx_udc *udc,
1315 struct usb_ctrlrequest *ctrl)
1316{
1317 int rc;
1318
1319 spin_unlock_irq(&udc->lock);
1320 rc = udc->driver->setup(&udc->gadget, ctrl);
1321 spin_lock_irq(&udc->lock);
1322 return rc;
1323}
1324
1325/**
1326 * bcm63xx_ep0_spoof_set_cfg - Synthesize a SET_CONFIGURATION request.
1327 * @udc: Reference to the device controller.
1328 *
1329 * Many standard requests are handled automatically in the hardware, but
1330 * we still need to pass them to the gadget driver so that it can
1331 * reconfigure the interfaces/endpoints if necessary.
1332 *
1333 * Unfortunately we are not able to send a STALL response if the host
1334 * requests an invalid configuration. If this happens, we'll have to be
1335 * content with printing a warning.
1336 */
1337static int bcm63xx_ep0_spoof_set_cfg(struct bcm63xx_udc *udc)
1338{
1339 struct usb_ctrlrequest ctrl;
1340 int rc;
1341
1342 ctrl.bRequestType = USB_DIR_OUT | USB_RECIP_DEVICE;
1343 ctrl.bRequest = USB_REQ_SET_CONFIGURATION;
1344 ctrl.wValue = cpu_to_le16(udc->cfg);
1345 ctrl.wIndex = 0;
1346 ctrl.wLength = 0;
1347
1348 rc = bcm63xx_ep0_setup_callback(udc, &ctrl);
1349 if (rc < 0) {
1350 dev_warn_ratelimited(udc->dev,
1351 "hardware auto-acked bad SET_CONFIGURATION(%d) request\n",
1352 udc->cfg);
1353 }
1354 return rc;
1355}
1356
1357/**
1358 * bcm63xx_ep0_spoof_set_iface - Synthesize a SET_INTERFACE request.
1359 * @udc: Reference to the device controller.
1360 */
1361static int bcm63xx_ep0_spoof_set_iface(struct bcm63xx_udc *udc)
1362{
1363 struct usb_ctrlrequest ctrl;
1364 int rc;
1365
1366 ctrl.bRequestType = USB_DIR_OUT | USB_RECIP_INTERFACE;
1367 ctrl.bRequest = USB_REQ_SET_INTERFACE;
1368 ctrl.wValue = cpu_to_le16(udc->alt_iface);
1369 ctrl.wIndex = cpu_to_le16(udc->iface);
1370 ctrl.wLength = 0;
1371
1372 rc = bcm63xx_ep0_setup_callback(udc, &ctrl);
1373 if (rc < 0) {
1374 dev_warn_ratelimited(udc->dev,
1375 "hardware auto-acked bad SET_INTERFACE(%d,%d) request\n",
1376 udc->iface, udc->alt_iface);
1377 }
1378 return rc;
1379}
1380
1381/**
1382 * bcm63xx_ep0_map_write - dma_map and iudma_write a single request.
1383 * @udc: Reference to the device controller.
1384 * @ch_idx: IUDMA channel number.
1385 * @req: USB gadget layer representation of the request.
1386 */
1387static void bcm63xx_ep0_map_write(struct bcm63xx_udc *udc, int ch_idx,
1388 struct usb_request *req)
1389{
1390 struct bcm63xx_req *breq = our_req(req);
1391 struct iudma_ch *iudma = &udc->iudma[ch_idx];
1392
1393 BUG_ON(udc->ep0_request);
1394 udc->ep0_request = req;
1395
1396 req->actual = 0;
1397 breq->offset = 0;
1398 usb_gadget_map_request(&udc->gadget, req, iudma->is_tx);
1399 iudma_write(udc, iudma, breq);
1400}
1401
1402/**
1403 * bcm63xx_ep0_complete - Set completion status and "stage" the callback.
1404 * @udc: Reference to the device controller.
1405 * @req: USB gadget layer representation of the request.
1406 * @status: Status to return to the gadget driver.
1407 */
1408static void bcm63xx_ep0_complete(struct bcm63xx_udc *udc,
1409 struct usb_request *req, int status)
1410{
1411 req->status = status;
1412 if (status)
1413 req->actual = 0;
1414 if (req->complete) {
1415 spin_unlock_irq(&udc->lock);
1416 req->complete(&udc->bep[0].ep, req);
1417 spin_lock_irq(&udc->lock);
1418 }
1419}
1420
1421/**
1422 * bcm63xx_ep0_nuke_reply - Abort request from the gadget driver due to
1423 * reset/shutdown.
1424 * @udc: Reference to the device controller.
1425 * @is_tx: Nonzero for TX (IN), zero for RX (OUT).
1426 */
1427static void bcm63xx_ep0_nuke_reply(struct bcm63xx_udc *udc, int is_tx)
1428{
1429 struct usb_request *req = udc->ep0_reply;
1430
1431 udc->ep0_reply = NULL;
1432 usb_gadget_unmap_request(&udc->gadget, req, is_tx);
1433 if (udc->ep0_request == req) {
1434 udc->ep0_req_completed = 0;
1435 udc->ep0_request = NULL;
1436 }
1437 bcm63xx_ep0_complete(udc, req, -ESHUTDOWN);
1438}
1439
1440/**
1441 * bcm63xx_ep0_read_complete - Close out the pending ep0 request; return
1442 * transfer len.
1443 * @udc: Reference to the device controller.
1444 */
1445static int bcm63xx_ep0_read_complete(struct bcm63xx_udc *udc)
1446{
1447 struct usb_request *req = udc->ep0_request;
1448
1449 udc->ep0_req_completed = 0;
1450 udc->ep0_request = NULL;
1451
1452 return req->actual;
1453}
1454
1455/**
1456 * bcm63xx_ep0_internal_request - Helper function to submit an ep0 request.
1457 * @udc: Reference to the device controller.
1458 * @ch_idx: IUDMA channel number.
1459 * @length: Number of bytes to TX/RX.
1460 *
1461 * Used for simple transfers performed by the ep0 worker. This will always
1462 * use ep0_ctrl_req / ep0_ctrl_buf.
1463 */
1464static void bcm63xx_ep0_internal_request(struct bcm63xx_udc *udc, int ch_idx,
1465 int length)
1466{
1467 struct usb_request *req = &udc->ep0_ctrl_req.req;
1468
1469 req->buf = udc->ep0_ctrl_buf;
1470 req->length = length;
1471 req->complete = NULL;
1472
1473 bcm63xx_ep0_map_write(udc, ch_idx, req);
1474}
1475
1476/**
1477 * bcm63xx_ep0_do_setup - Parse new SETUP packet and decide how to handle it.
1478 * @udc: Reference to the device controller.
1479 *
1480 * EP0_IDLE probably shouldn't ever happen. EP0_REQUEUE means we're ready
1481 * for the next packet. Anything else means the transaction requires multiple
1482 * stages of handling.
1483 */
1484static enum bcm63xx_ep0_state bcm63xx_ep0_do_setup(struct bcm63xx_udc *udc)
1485{
1486 int rc;
1487 struct usb_ctrlrequest *ctrl = (void *)udc->ep0_ctrl_buf;
1488
1489 rc = bcm63xx_ep0_read_complete(udc);
1490
1491 if (rc < 0) {
1492 dev_err(udc->dev, "missing SETUP packet\n");
1493 return EP0_IDLE;
1494 }
1495
1496 /*
1497 * Handle 0-byte IN STATUS acknowledgement. The hardware doesn't
1498 * ALWAYS deliver these 100% of the time, so if we happen to see one,
1499 * just throw it away.
1500 */
1501 if (rc == 0)
1502 return EP0_REQUEUE;
1503
1504 /* Drop malformed SETUP packets */
1505 if (rc != sizeof(*ctrl)) {
1506 dev_warn_ratelimited(udc->dev,
1507 "malformed SETUP packet (%d bytes)\n", rc);
1508 return EP0_REQUEUE;
1509 }
1510
1511 /* Process new SETUP packet arriving on ep0 */
1512 rc = bcm63xx_ep0_setup_callback(udc, ctrl);
1513 if (rc < 0) {
1514 bcm63xx_set_stall(udc, &udc->bep[0], true);
1515 return EP0_REQUEUE;
1516 }
1517
1518 if (!ctrl->wLength)
1519 return EP0_REQUEUE;
1520 else if (ctrl->bRequestType & USB_DIR_IN)
1521 return EP0_IN_DATA_PHASE_SETUP;
1522 else
1523 return EP0_OUT_DATA_PHASE_SETUP;
1524}
1525
1526/**
1527 * bcm63xx_ep0_do_idle - Check for outstanding requests if ep0 is idle.
1528 * @udc: Reference to the device controller.
1529 *
1530 * In state EP0_IDLE, the RX descriptor is either pending, or has been
1531 * filled with a SETUP packet from the host. This function handles new
1532 * SETUP packets, control IRQ events (which can generate fake SETUP packets),
1533 * and reset/shutdown events.
1534 *
1535 * Returns 0 if work was done; -EAGAIN if nothing to do.
1536 */
1537static int bcm63xx_ep0_do_idle(struct bcm63xx_udc *udc)
1538{
1539 if (udc->ep0_req_reset) {
1540 udc->ep0_req_reset = 0;
1541 } else if (udc->ep0_req_set_cfg) {
1542 udc->ep0_req_set_cfg = 0;
1543 if (bcm63xx_ep0_spoof_set_cfg(udc) >= 0)
1544 udc->ep0state = EP0_IN_FAKE_STATUS_PHASE;
1545 } else if (udc->ep0_req_set_iface) {
1546 udc->ep0_req_set_iface = 0;
1547 if (bcm63xx_ep0_spoof_set_iface(udc) >= 0)
1548 udc->ep0state = EP0_IN_FAKE_STATUS_PHASE;
1549 } else if (udc->ep0_req_completed) {
1550 udc->ep0state = bcm63xx_ep0_do_setup(udc);
1551 return udc->ep0state == EP0_IDLE ? -EAGAIN : 0;
1552 } else if (udc->ep0_req_shutdown) {
1553 udc->ep0_req_shutdown = 0;
1554 udc->ep0_req_completed = 0;
1555 udc->ep0_request = NULL;
1556 iudma_reset_channel(udc, &udc->iudma[IUDMA_EP0_RXCHAN]);
1557 usb_gadget_unmap_request(&udc->gadget,
1558 &udc->ep0_ctrl_req.req, 0);
1559
1560 /* bcm63xx_udc_pullup() is waiting for this */
1561 mb();
1562 udc->ep0state = EP0_SHUTDOWN;
1563 } else if (udc->ep0_reply) {
1564 /*
1565 * This could happen if a USB RESET shows up during an ep0
1566 * transaction (especially if a laggy driver like gadgetfs
1567 * is in use).
1568 */
1569 dev_warn(udc->dev, "nuking unexpected reply\n");
1570 bcm63xx_ep0_nuke_reply(udc, 0);
1571 } else {
1572 return -EAGAIN;
1573 }
1574
1575 return 0;
1576}
1577
1578/**
1579 * bcm63xx_ep0_one_round - Handle the current ep0 state.
1580 * @udc: Reference to the device controller.
1581 *
1582 * Returns 0 if work was done; -EAGAIN if nothing to do.
1583 */
1584static int bcm63xx_ep0_one_round(struct bcm63xx_udc *udc)
1585{
1586 enum bcm63xx_ep0_state ep0state = udc->ep0state;
1587 bool shutdown = udc->ep0_req_reset || udc->ep0_req_shutdown;
1588
1589 switch (udc->ep0state) {
1590 case EP0_REQUEUE:
1591 /* set up descriptor to receive SETUP packet */
1592 bcm63xx_ep0_internal_request(udc, IUDMA_EP0_RXCHAN,
1593 BCM63XX_MAX_CTRL_PKT);
1594 ep0state = EP0_IDLE;
1595 break;
1596 case EP0_IDLE:
1597 return bcm63xx_ep0_do_idle(udc);
1598 case EP0_IN_DATA_PHASE_SETUP:
1599 /*
1600 * Normal case: TX request is in ep0_reply (queued by the
1601 * callback), or will be queued shortly. When it's here,
1602 * send it to the HW and go to EP0_IN_DATA_PHASE_COMPLETE.
1603 *
1604 * Shutdown case: Stop waiting for the reply. Just
1605 * REQUEUE->IDLE. The gadget driver is NOT expected to
1606 * queue anything else now.
1607 */
1608 if (udc->ep0_reply) {
1609 bcm63xx_ep0_map_write(udc, IUDMA_EP0_TXCHAN,
1610 udc->ep0_reply);
1611 ep0state = EP0_IN_DATA_PHASE_COMPLETE;
1612 } else if (shutdown) {
1613 ep0state = EP0_REQUEUE;
1614 }
1615 break;
1616 case EP0_IN_DATA_PHASE_COMPLETE: {
1617 /*
1618 * Normal case: TX packet (ep0_reply) is in flight; wait for
1619 * it to finish, then go back to REQUEUE->IDLE.
1620 *
1621 * Shutdown case: Reset the TX channel, send -ESHUTDOWN
1622 * completion to the gadget driver, then REQUEUE->IDLE.
1623 */
1624 if (udc->ep0_req_completed) {
1625 udc->ep0_reply = NULL;
1626 bcm63xx_ep0_read_complete(udc);
1627 /*
1628 * the "ack" sometimes gets eaten (see
1629 * bcm63xx_ep0_do_idle)
1630 */
1631 ep0state = EP0_REQUEUE;
1632 } else if (shutdown) {
1633 iudma_reset_channel(udc, &udc->iudma[IUDMA_EP0_TXCHAN]);
1634 bcm63xx_ep0_nuke_reply(udc, 1);
1635 ep0state = EP0_REQUEUE;
1636 }
1637 break;
1638 }
1639 case EP0_OUT_DATA_PHASE_SETUP:
1640 /* Similar behavior to EP0_IN_DATA_PHASE_SETUP */
1641 if (udc->ep0_reply) {
1642 bcm63xx_ep0_map_write(udc, IUDMA_EP0_RXCHAN,
1643 udc->ep0_reply);
1644 ep0state = EP0_OUT_DATA_PHASE_COMPLETE;
1645 } else if (shutdown) {
1646 ep0state = EP0_REQUEUE;
1647 }
1648 break;
1649 case EP0_OUT_DATA_PHASE_COMPLETE: {
1650 /* Similar behavior to EP0_IN_DATA_PHASE_COMPLETE */
1651 if (udc->ep0_req_completed) {
1652 udc->ep0_reply = NULL;
1653 bcm63xx_ep0_read_complete(udc);
1654
1655 /* send 0-byte ack to host */
1656 bcm63xx_ep0_internal_request(udc, IUDMA_EP0_TXCHAN, 0);
1657 ep0state = EP0_OUT_STATUS_PHASE;
1658 } else if (shutdown) {
1659 iudma_reset_channel(udc, &udc->iudma[IUDMA_EP0_RXCHAN]);
1660 bcm63xx_ep0_nuke_reply(udc, 0);
1661 ep0state = EP0_REQUEUE;
1662 }
1663 break;
1664 }
1665 case EP0_OUT_STATUS_PHASE:
1666 /*
1667 * Normal case: 0-byte OUT ack packet is in flight; wait
1668 * for it to finish, then go back to REQUEUE->IDLE.
1669 *
1670 * Shutdown case: just cancel the transmission. Don't bother
1671 * calling the completion, because it originated from this
1672 * function anyway. Then go back to REQUEUE->IDLE.
1673 */
1674 if (udc->ep0_req_completed) {
1675 bcm63xx_ep0_read_complete(udc);
1676 ep0state = EP0_REQUEUE;
1677 } else if (shutdown) {
1678 iudma_reset_channel(udc, &udc->iudma[IUDMA_EP0_TXCHAN]);
1679 udc->ep0_request = NULL;
1680 ep0state = EP0_REQUEUE;
1681 }
1682 break;
1683 case EP0_IN_FAKE_STATUS_PHASE: {
1684 /*
1685 * Normal case: we spoofed a SETUP packet and are now
1686 * waiting for the gadget driver to send a 0-byte reply.
1687 * This doesn't actually get sent to the HW because the
1688 * HW has already sent its own reply. Once we get the
1689 * response, return to IDLE.
1690 *
1691 * Shutdown case: return to IDLE immediately.
1692 *
1693 * Note that the ep0 RX descriptor has remained queued
1694 * (and possibly unfilled) during this entire transaction.
1695 * The HW datapath (IUDMA) never even sees SET_CONFIGURATION
1696 * or SET_INTERFACE transactions.
1697 */
1698 struct usb_request *r = udc->ep0_reply;
1699
1700 if (!r) {
1701 if (shutdown)
1702 ep0state = EP0_IDLE;
1703 break;
1704 }
1705
1706 bcm63xx_ep0_complete(udc, r, 0);
1707 udc->ep0_reply = NULL;
1708 ep0state = EP0_IDLE;
1709 break;
1710 }
1711 case EP0_SHUTDOWN:
1712 break;
1713 }
1714
1715 if (udc->ep0state == ep0state)
1716 return -EAGAIN;
1717
1718 udc->ep0state = ep0state;
1719 return 0;
1720}
1721
1722/**
1723 * bcm63xx_ep0_process - ep0 worker thread / state machine.
1724 * @w: Workqueue struct.
1725 *
1726 * bcm63xx_ep0_process is triggered any time an event occurs on ep0. It
1727 * is used to synchronize ep0 events and ensure that both HW and SW events
1728 * occur in a well-defined order. When the ep0 IUDMA queues are idle, it may
1729 * synthesize SET_CONFIGURATION / SET_INTERFACE requests that were consumed
1730 * by the USBD hardware.
1731 *
1732 * The worker function will continue iterating around the state machine
1733 * until there is nothing left to do. Usually "nothing left to do" means
1734 * that we're waiting for a new event from the hardware.
1735 */
1736static void bcm63xx_ep0_process(struct work_struct *w)
1737{
1738 struct bcm63xx_udc *udc = container_of(w, struct bcm63xx_udc, ep0_wq);
1739 spin_lock_irq(&udc->lock);
1740 while (bcm63xx_ep0_one_round(udc) == 0)
1741 ;
1742 spin_unlock_irq(&udc->lock);
1743}
1744
1745/***********************************************************************
1746 * Standard UDC gadget operations
1747 ***********************************************************************/
1748
1749/**
1750 * bcm63xx_udc_get_frame - Read current SOF frame number from the HW.
1751 * @gadget: USB slave device.
1752 */
1753static int bcm63xx_udc_get_frame(struct usb_gadget *gadget)
1754{
1755 struct bcm63xx_udc *udc = gadget_to_udc(gadget);
1756
1757 return (usbd_readl(udc, USBD_STATUS_REG) &
1758 USBD_STATUS_SOF_MASK) >> USBD_STATUS_SOF_SHIFT;
1759}
1760
1761/**
1762 * bcm63xx_udc_pullup - Enable/disable pullup on D+ line.
1763 * @gadget: USB slave device.
1764 * @is_on: 0 to disable pullup, 1 to enable.
1765 *
1766 * See notes in bcm63xx_select_pullup().
1767 */
1768static int bcm63xx_udc_pullup(struct usb_gadget *gadget, int is_on)
1769{
1770 struct bcm63xx_udc *udc = gadget_to_udc(gadget);
1771 unsigned long flags;
1772 int i, rc = -EINVAL;
1773
1774 spin_lock_irqsave(&udc->lock, flags);
1775 if (is_on && udc->ep0state == EP0_SHUTDOWN) {
1776 udc->gadget.speed = USB_SPEED_UNKNOWN;
1777 udc->ep0state = EP0_REQUEUE;
1778 bcm63xx_fifo_setup(udc);
1779 bcm63xx_fifo_reset(udc);
1780 bcm63xx_ep_setup(udc);
1781
1782 bitmap_zero(&udc->wedgemap, BCM63XX_NUM_EP);
1783 for (i = 0; i < BCM63XX_NUM_EP; i++)
1784 bcm63xx_set_stall(udc, &udc->bep[i], false);
1785
1786 bcm63xx_set_ctrl_irqs(udc, true);
1787 bcm63xx_select_pullup(gadget_to_udc(gadget), true);
1788 rc = 0;
1789 } else if (!is_on && udc->ep0state != EP0_SHUTDOWN) {
1790 bcm63xx_select_pullup(gadget_to_udc(gadget), false);
1791
1792 udc->ep0_req_shutdown = 1;
1793 spin_unlock_irqrestore(&udc->lock, flags);
1794
1795 while (1) {
1796 schedule_work(&udc->ep0_wq);
1797 if (udc->ep0state == EP0_SHUTDOWN)
1798 break;
1799 msleep(50);
1800 }
1801 bcm63xx_set_ctrl_irqs(udc, false);
1802 cancel_work_sync(&udc->ep0_wq);
1803 return 0;
1804 }
1805
1806 spin_unlock_irqrestore(&udc->lock, flags);
1807 return rc;
1808}
1809
1810/**
1811 * bcm63xx_udc_start - Start the controller.
1812 * @gadget: USB slave device.
1813 * @driver: Driver for USB slave devices.
1814 */
1815static int bcm63xx_udc_start(struct usb_gadget *gadget,
1816 struct usb_gadget_driver *driver)
1817{
1818 struct bcm63xx_udc *udc = gadget_to_udc(gadget);
1819 unsigned long flags;
1820
1821 if (!driver || driver->max_speed < USB_SPEED_HIGH ||
1822 !driver->setup)
1823 return -EINVAL;
1824 if (!udc)
1825 return -ENODEV;
1826 if (udc->driver)
1827 return -EBUSY;
1828
1829 spin_lock_irqsave(&udc->lock, flags);
1830
1831 set_clocks(udc, true);
1832 bcm63xx_fifo_setup(udc);
1833 bcm63xx_ep_init(udc);
1834 bcm63xx_ep_setup(udc);
1835 bcm63xx_fifo_reset(udc);
1836 bcm63xx_select_phy_mode(udc, true);
1837
1838 udc->driver = driver;
1839 driver->driver.bus = NULL;
1840 udc->gadget.dev.of_node = udc->dev->of_node;
1841
1842 spin_unlock_irqrestore(&udc->lock, flags);
1843
1844 return 0;
1845}
1846
1847/**
1848 * bcm63xx_udc_stop - Shut down the controller.
1849 * @gadget: USB slave device.
1850 * @driver: Driver for USB slave devices.
1851 */
1852static int bcm63xx_udc_stop(struct usb_gadget *gadget)
1853{
1854 struct bcm63xx_udc *udc = gadget_to_udc(gadget);
1855 unsigned long flags;
1856
1857 spin_lock_irqsave(&udc->lock, flags);
1858
1859 udc->driver = NULL;
1860
1861 /*
1862 * If we switch the PHY too abruptly after dropping D+, the host
1863 * will often complain:
1864 *
1865 * hub 1-0:1.0: port 1 disabled by hub (EMI?), re-enabling...
1866 */
1867 msleep(100);
1868
1869 bcm63xx_select_phy_mode(udc, false);
1870 set_clocks(udc, false);
1871
1872 spin_unlock_irqrestore(&udc->lock, flags);
1873
1874 return 0;
1875}
1876
1877static const struct usb_gadget_ops bcm63xx_udc_ops = {
1878 .get_frame = bcm63xx_udc_get_frame,
1879 .pullup = bcm63xx_udc_pullup,
1880 .udc_start = bcm63xx_udc_start,
1881 .udc_stop = bcm63xx_udc_stop,
1882};
1883
1884/***********************************************************************
1885 * IRQ handling
1886 ***********************************************************************/
1887
1888/**
1889 * bcm63xx_update_cfg_iface - Read current configuration/interface settings.
1890 * @udc: Reference to the device controller.
1891 *
1892 * This controller intercepts SET_CONFIGURATION and SET_INTERFACE messages.
1893 * The driver never sees the raw control packets coming in on the ep0
1894 * IUDMA channel, but at least we get an interrupt event to tell us that
1895 * new values are waiting in the USBD_STATUS register.
1896 */
1897static void bcm63xx_update_cfg_iface(struct bcm63xx_udc *udc)
1898{
1899 u32 reg = usbd_readl(udc, USBD_STATUS_REG);
1900
1901 udc->cfg = (reg & USBD_STATUS_CFG_MASK) >> USBD_STATUS_CFG_SHIFT;
1902 udc->iface = (reg & USBD_STATUS_INTF_MASK) >> USBD_STATUS_INTF_SHIFT;
1903 udc->alt_iface = (reg & USBD_STATUS_ALTINTF_MASK) >>
1904 USBD_STATUS_ALTINTF_SHIFT;
1905 bcm63xx_ep_setup(udc);
1906}
1907
1908/**
1909 * bcm63xx_update_link_speed - Check to see if the link speed has changed.
1910 * @udc: Reference to the device controller.
1911 *
1912 * The link speed update coincides with a SETUP IRQ. Returns 1 if the
1913 * speed has changed, so that the caller can update the endpoint settings.
1914 */
1915static int bcm63xx_update_link_speed(struct bcm63xx_udc *udc)
1916{
1917 u32 reg = usbd_readl(udc, USBD_STATUS_REG);
1918 enum usb_device_speed oldspeed = udc->gadget.speed;
1919
1920 switch ((reg & USBD_STATUS_SPD_MASK) >> USBD_STATUS_SPD_SHIFT) {
1921 case BCM63XX_SPD_HIGH:
1922 udc->gadget.speed = USB_SPEED_HIGH;
1923 break;
1924 case BCM63XX_SPD_FULL:
1925 udc->gadget.speed = USB_SPEED_FULL;
1926 break;
1927 default:
1928 /* this should never happen */
1929 udc->gadget.speed = USB_SPEED_UNKNOWN;
1930 dev_err(udc->dev,
1931 "received SETUP packet with invalid link speed\n");
1932 return 0;
1933 }
1934
1935 if (udc->gadget.speed != oldspeed) {
1936 dev_info(udc->dev, "link up, %s-speed mode\n",
1937 udc->gadget.speed == USB_SPEED_HIGH ? "high" : "full");
1938 return 1;
1939 } else {
1940 return 0;
1941 }
1942}
1943
1944/**
1945 * bcm63xx_update_wedge - Iterate through wedged endpoints.
1946 * @udc: Reference to the device controller.
1947 * @new_status: true to "refresh" wedge status; false to clear it.
1948 *
1949 * On a SETUP interrupt, we need to manually "refresh" the wedge status
1950 * because the controller hardware is designed to automatically clear
1951 * stalls in response to a CLEAR_FEATURE request from the host.
1952 *
1953 * On a RESET interrupt, we do want to restore all wedged endpoints.
1954 */
1955static void bcm63xx_update_wedge(struct bcm63xx_udc *udc, bool new_status)
1956{
1957 int i;
1958
1959 for_each_set_bit(i, &udc->wedgemap, BCM63XX_NUM_EP) {
1960 bcm63xx_set_stall(udc, &udc->bep[i], new_status);
1961 if (!new_status)
1962 clear_bit(i, &udc->wedgemap);
1963 }
1964}
1965
1966/**
1967 * bcm63xx_udc_ctrl_isr - ISR for control path events (USBD).
1968 * @irq: IRQ number (unused).
1969 * @dev_id: Reference to the device controller.
1970 *
1971 * This is where we handle link (VBUS) down, USB reset, speed changes,
1972 * SET_CONFIGURATION, and SET_INTERFACE events.
1973 */
1974static irqreturn_t bcm63xx_udc_ctrl_isr(int irq, void *dev_id)
1975{
1976 struct bcm63xx_udc *udc = dev_id;
1977 u32 stat;
1978 bool disconnected = false, bus_reset = false;
1979
1980 stat = usbd_readl(udc, USBD_EVENT_IRQ_STATUS_REG) &
1981 usbd_readl(udc, USBD_EVENT_IRQ_MASK_REG);
1982
1983 usbd_writel(udc, stat, USBD_EVENT_IRQ_STATUS_REG);
1984
1985 spin_lock(&udc->lock);
1986 if (stat & BIT(USBD_EVENT_IRQ_USB_LINK)) {
1987 /* VBUS toggled */
1988
1989 if (!(usbd_readl(udc, USBD_EVENTS_REG) &
1990 USBD_EVENTS_USB_LINK_MASK) &&
1991 udc->gadget.speed != USB_SPEED_UNKNOWN)
1992 dev_info(udc->dev, "link down\n");
1993
1994 udc->gadget.speed = USB_SPEED_UNKNOWN;
1995 disconnected = true;
1996 }
1997 if (stat & BIT(USBD_EVENT_IRQ_USB_RESET)) {
1998 bcm63xx_fifo_setup(udc);
1999 bcm63xx_fifo_reset(udc);
2000 bcm63xx_ep_setup(udc);
2001
2002 bcm63xx_update_wedge(udc, false);
2003
2004 udc->ep0_req_reset = 1;
2005 schedule_work(&udc->ep0_wq);
2006 bus_reset = true;
2007 }
2008 if (stat & BIT(USBD_EVENT_IRQ_SETUP)) {
2009 if (bcm63xx_update_link_speed(udc)) {
2010 bcm63xx_fifo_setup(udc);
2011 bcm63xx_ep_setup(udc);
2012 }
2013 bcm63xx_update_wedge(udc, true);
2014 }
2015 if (stat & BIT(USBD_EVENT_IRQ_SETCFG)) {
2016 bcm63xx_update_cfg_iface(udc);
2017 udc->ep0_req_set_cfg = 1;
2018 schedule_work(&udc->ep0_wq);
2019 }
2020 if (stat & BIT(USBD_EVENT_IRQ_SETINTF)) {
2021 bcm63xx_update_cfg_iface(udc);
2022 udc->ep0_req_set_iface = 1;
2023 schedule_work(&udc->ep0_wq);
2024 }
2025 spin_unlock(&udc->lock);
2026
2027 if (disconnected && udc->driver)
2028 udc->driver->disconnect(&udc->gadget);
2029 else if (bus_reset && udc->driver)
2030 usb_gadget_udc_reset(&udc->gadget, udc->driver);
2031
2032 return IRQ_HANDLED;
2033}
2034
2035/**
2036 * bcm63xx_udc_data_isr - ISR for data path events (IUDMA).
2037 * @irq: IRQ number (unused).
2038 * @dev_id: Reference to the IUDMA channel that generated the interrupt.
2039 *
2040 * For the two ep0 channels, we have special handling that triggers the
2041 * ep0 worker thread. For normal bulk/intr channels, either queue up
2042 * the next buffer descriptor for the transaction (incomplete transaction),
2043 * or invoke the completion callback (complete transactions).
2044 */
2045static irqreturn_t bcm63xx_udc_data_isr(int irq, void *dev_id)
2046{
2047 struct iudma_ch *iudma = dev_id;
2048 struct bcm63xx_udc *udc = iudma->udc;
2049 struct bcm63xx_ep *bep;
2050 struct usb_request *req = NULL;
2051 struct bcm63xx_req *breq = NULL;
2052 int rc;
2053 bool is_done = false;
2054
2055 spin_lock(&udc->lock);
2056
2057 usb_dmac_writel(udc, ENETDMAC_IR_BUFDONE_MASK,
2058 ENETDMAC_IR_REG, iudma->ch_idx);
2059 bep = iudma->bep;
2060 rc = iudma_read(udc, iudma);
2061
2062 /* special handling for EP0 RX (0) and TX (1) */
2063 if (iudma->ch_idx == IUDMA_EP0_RXCHAN ||
2064 iudma->ch_idx == IUDMA_EP0_TXCHAN) {
2065 req = udc->ep0_request;
2066 breq = our_req(req);
2067
2068 /* a single request could require multiple submissions */
2069 if (rc >= 0) {
2070 req->actual += rc;
2071
2072 if (req->actual >= req->length || breq->bd_bytes > rc) {
2073 udc->ep0_req_completed = 1;
2074 is_done = true;
2075 schedule_work(&udc->ep0_wq);
2076
2077 /* "actual" on a ZLP is 1 byte */
2078 req->actual = min(req->actual, req->length);
2079 } else {
2080 /* queue up the next BD (same request) */
2081 iudma_write(udc, iudma, breq);
2082 }
2083 }
2084 } else if (!list_empty(&bep->queue)) {
2085 breq = list_first_entry(&bep->queue, struct bcm63xx_req, queue);
2086 req = &breq->req;
2087
2088 if (rc >= 0) {
2089 req->actual += rc;
2090
2091 if (req->actual >= req->length || breq->bd_bytes > rc) {
2092 is_done = true;
2093 list_del(&breq->queue);
2094
2095 req->actual = min(req->actual, req->length);
2096
2097 if (!list_empty(&bep->queue)) {
2098 struct bcm63xx_req *next;
2099
2100 next = list_first_entry(&bep->queue,
2101 struct bcm63xx_req, queue);
2102 iudma_write(udc, iudma, next);
2103 }
2104 } else {
2105 iudma_write(udc, iudma, breq);
2106 }
2107 }
2108 }
2109 spin_unlock(&udc->lock);
2110
2111 if (is_done) {
2112 usb_gadget_unmap_request(&udc->gadget, req, iudma->is_tx);
2113 if (req->complete)
2114 req->complete(&bep->ep, req);
2115 }
2116
2117 return IRQ_HANDLED;
2118}
2119
2120/***********************************************************************
2121 * Debug filesystem
2122 ***********************************************************************/
2123
2124/*
2125 * bcm63xx_usbd_dbg_show - Show USBD controller state.
2126 * @s: seq_file to which the information will be written.
2127 * @p: Unused.
2128 *
2129 * This file nominally shows up as /sys/kernel/debug/bcm63xx_udc/usbd
2130 */
2131static int bcm63xx_usbd_dbg_show(struct seq_file *s, void *p)
2132{
2133 struct bcm63xx_udc *udc = s->private;
2134
2135 if (!udc->driver)
2136 return -ENODEV;
2137
2138 seq_printf(s, "ep0 state: %s\n",
2139 bcm63xx_ep0_state_names[udc->ep0state]);
2140 seq_printf(s, " pending requests: %s%s%s%s%s%s%s\n",
2141 udc->ep0_req_reset ? "reset " : "",
2142 udc->ep0_req_set_cfg ? "set_cfg " : "",
2143 udc->ep0_req_set_iface ? "set_iface " : "",
2144 udc->ep0_req_shutdown ? "shutdown " : "",
2145 udc->ep0_request ? "pending " : "",
2146 udc->ep0_req_completed ? "completed " : "",
2147 udc->ep0_reply ? "reply " : "");
2148 seq_printf(s, "cfg: %d; iface: %d; alt_iface: %d\n",
2149 udc->cfg, udc->iface, udc->alt_iface);
2150 seq_printf(s, "regs:\n");
2151 seq_printf(s, " control: %08x; straps: %08x; status: %08x\n",
2152 usbd_readl(udc, USBD_CONTROL_REG),
2153 usbd_readl(udc, USBD_STRAPS_REG),
2154 usbd_readl(udc, USBD_STATUS_REG));
2155 seq_printf(s, " events: %08x; stall: %08x\n",
2156 usbd_readl(udc, USBD_EVENTS_REG),
2157 usbd_readl(udc, USBD_STALL_REG));
2158
2159 return 0;
2160}
2161DEFINE_SHOW_ATTRIBUTE(bcm63xx_usbd_dbg);
2162
2163/*
2164 * bcm63xx_iudma_dbg_show - Show IUDMA status and descriptors.
2165 * @s: seq_file to which the information will be written.
2166 * @p: Unused.
2167 *
2168 * This file nominally shows up as /sys/kernel/debug/bcm63xx_udc/iudma
2169 */
2170static int bcm63xx_iudma_dbg_show(struct seq_file *s, void *p)
2171{
2172 struct bcm63xx_udc *udc = s->private;
2173 int ch_idx, i;
2174 u32 sram2, sram3;
2175
2176 if (!udc->driver)
2177 return -ENODEV;
2178
2179 for (ch_idx = 0; ch_idx < BCM63XX_NUM_IUDMA; ch_idx++) {
2180 struct iudma_ch *iudma = &udc->iudma[ch_idx];
2181 struct list_head *pos;
2182
2183 seq_printf(s, "IUDMA channel %d -- ", ch_idx);
2184 switch (iudma_defaults[ch_idx].ep_type) {
2185 case BCMEP_CTRL:
2186 seq_printf(s, "control");
2187 break;
2188 case BCMEP_BULK:
2189 seq_printf(s, "bulk");
2190 break;
2191 case BCMEP_INTR:
2192 seq_printf(s, "interrupt");
2193 break;
2194 }
2195 seq_printf(s, ch_idx & 0x01 ? " tx" : " rx");
2196 seq_printf(s, " [ep%d]:\n",
2197 max_t(int, iudma_defaults[ch_idx].ep_num, 0));
2198 seq_printf(s, " cfg: %08x; irqstat: %08x; irqmask: %08x; maxburst: %08x\n",
2199 usb_dmac_readl(udc, ENETDMAC_CHANCFG_REG, ch_idx),
2200 usb_dmac_readl(udc, ENETDMAC_IR_REG, ch_idx),
2201 usb_dmac_readl(udc, ENETDMAC_IRMASK_REG, ch_idx),
2202 usb_dmac_readl(udc, ENETDMAC_MAXBURST_REG, ch_idx));
2203
2204 sram2 = usb_dmas_readl(udc, ENETDMAS_SRAM2_REG, ch_idx);
2205 sram3 = usb_dmas_readl(udc, ENETDMAS_SRAM3_REG, ch_idx);
2206 seq_printf(s, " base: %08x; index: %04x_%04x; desc: %04x_%04x %08x\n",
2207 usb_dmas_readl(udc, ENETDMAS_RSTART_REG, ch_idx),
2208 sram2 >> 16, sram2 & 0xffff,
2209 sram3 >> 16, sram3 & 0xffff,
2210 usb_dmas_readl(udc, ENETDMAS_SRAM4_REG, ch_idx));
2211 seq_printf(s, " desc: %d/%d used", iudma->n_bds_used,
2212 iudma->n_bds);
2213
2214 if (iudma->bep) {
2215 i = 0;
2216 list_for_each(pos, &iudma->bep->queue)
2217 i++;
2218 seq_printf(s, "; %d queued\n", i);
2219 } else {
2220 seq_printf(s, "\n");
2221 }
2222
2223 for (i = 0; i < iudma->n_bds; i++) {
2224 struct bcm_enet_desc *d = &iudma->bd_ring[i];
2225
2226 seq_printf(s, " %03x (%02x): len_stat: %04x_%04x; pa %08x",
2227 i * sizeof(*d), i,
2228 d->len_stat >> 16, d->len_stat & 0xffff,
2229 d->address);
2230 if (d == iudma->read_bd)
2231 seq_printf(s, " <<RD");
2232 if (d == iudma->write_bd)
2233 seq_printf(s, " <<WR");
2234 seq_printf(s, "\n");
2235 }
2236
2237 seq_printf(s, "\n");
2238 }
2239
2240 return 0;
2241}
2242DEFINE_SHOW_ATTRIBUTE(bcm63xx_iudma_dbg);
2243
2244/**
2245 * bcm63xx_udc_init_debugfs - Create debugfs entries.
2246 * @udc: Reference to the device controller.
2247 */
2248static void bcm63xx_udc_init_debugfs(struct bcm63xx_udc *udc)
2249{
2250 struct dentry *root, *usbd, *iudma;
2251
2252 if (!IS_ENABLED(CONFIG_USB_GADGET_DEBUG_FS))
2253 return;
2254
2255 root = debugfs_create_dir(udc->gadget.name, NULL);
2256 if (IS_ERR(root) || !root)
2257 goto err_root;
2258
2259 usbd = debugfs_create_file("usbd", 0400, root, udc,
2260 &bcm63xx_usbd_dbg_fops);
2261 if (!usbd)
2262 goto err_usbd;
2263 iudma = debugfs_create_file("iudma", 0400, root, udc,
2264 &bcm63xx_iudma_dbg_fops);
2265 if (!iudma)
2266 goto err_iudma;
2267
2268 udc->debugfs_root = root;
2269 udc->debugfs_usbd = usbd;
2270 udc->debugfs_iudma = iudma;
2271 return;
2272err_iudma:
2273 debugfs_remove(usbd);
2274err_usbd:
2275 debugfs_remove(root);
2276err_root:
2277 dev_err(udc->dev, "debugfs is not available\n");
2278}
2279
2280/**
2281 * bcm63xx_udc_cleanup_debugfs - Remove debugfs entries.
2282 * @udc: Reference to the device controller.
2283 *
2284 * debugfs_remove() is safe to call with a NULL argument.
2285 */
2286static void bcm63xx_udc_cleanup_debugfs(struct bcm63xx_udc *udc)
2287{
2288 debugfs_remove(udc->debugfs_iudma);
2289 debugfs_remove(udc->debugfs_usbd);
2290 debugfs_remove(udc->debugfs_root);
2291 udc->debugfs_iudma = NULL;
2292 udc->debugfs_usbd = NULL;
2293 udc->debugfs_root = NULL;
2294}
2295
2296/***********************************************************************
2297 * Driver init/exit
2298 ***********************************************************************/
2299
2300/**
2301 * bcm63xx_udc_probe - Initialize a new instance of the UDC.
2302 * @pdev: Platform device struct from the bcm63xx BSP code.
2303 *
2304 * Note that platform data is required, because pd.port_no varies from chip
2305 * to chip and is used to switch the correct USB port to device mode.
2306 */
2307static int bcm63xx_udc_probe(struct platform_device *pdev)
2308{
2309 struct device *dev = &pdev->dev;
2310 struct bcm63xx_usbd_platform_data *pd = dev_get_platdata(dev);
2311 struct bcm63xx_udc *udc;
2312 struct resource *res;
2313 int rc = -ENOMEM, i, irq;
2314
2315 udc = devm_kzalloc(dev, sizeof(*udc), GFP_KERNEL);
2316 if (!udc)
2317 return -ENOMEM;
2318
2319 platform_set_drvdata(pdev, udc);
2320 udc->dev = dev;
2321 udc->pd = pd;
2322
2323 if (!pd) {
2324 dev_err(dev, "missing platform data\n");
2325 return -EINVAL;
2326 }
2327
2328 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
2329 udc->usbd_regs = devm_ioremap_resource(dev, res);
2330 if (IS_ERR(udc->usbd_regs))
2331 return PTR_ERR(udc->usbd_regs);
2332
2333 res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
2334 udc->iudma_regs = devm_ioremap_resource(dev, res);
2335 if (IS_ERR(udc->iudma_regs))
2336 return PTR_ERR(udc->iudma_regs);
2337
2338 spin_lock_init(&udc->lock);
2339 INIT_WORK(&udc->ep0_wq, bcm63xx_ep0_process);
2340
2341 udc->gadget.ops = &bcm63xx_udc_ops;
2342 udc->gadget.name = dev_name(dev);
2343
2344 if (!pd->use_fullspeed && !use_fullspeed)
2345 udc->gadget.max_speed = USB_SPEED_HIGH;
2346 else
2347 udc->gadget.max_speed = USB_SPEED_FULL;
2348
2349 /* request clocks, allocate buffers, and clear any pending IRQs */
2350 rc = bcm63xx_init_udc_hw(udc);
2351 if (rc)
2352 return rc;
2353
2354 rc = -ENXIO;
2355
2356 /* IRQ resource #0: control interrupt (VBUS, speed, etc.) */
2357 irq = platform_get_irq(pdev, 0);
2358 if (irq < 0) {
2359 dev_err(dev, "missing IRQ resource #0\n");
2360 goto out_uninit;
2361 }
2362 if (devm_request_irq(dev, irq, &bcm63xx_udc_ctrl_isr, 0,
2363 dev_name(dev), udc) < 0)
2364 goto report_request_failure;
2365
2366 /* IRQ resources #1-6: data interrupts for IUDMA channels 0-5 */
2367 for (i = 0; i < BCM63XX_NUM_IUDMA; i++) {
2368 irq = platform_get_irq(pdev, i + 1);
2369 if (irq < 0) {
2370 dev_err(dev, "missing IRQ resource #%d\n", i + 1);
2371 goto out_uninit;
2372 }
2373 if (devm_request_irq(dev, irq, &bcm63xx_udc_data_isr, 0,
2374 dev_name(dev), &udc->iudma[i]) < 0)
2375 goto report_request_failure;
2376 }
2377
2378 bcm63xx_udc_init_debugfs(udc);
2379 rc = usb_add_gadget_udc(dev, &udc->gadget);
2380 if (!rc)
2381 return 0;
2382
2383 bcm63xx_udc_cleanup_debugfs(udc);
2384out_uninit:
2385 bcm63xx_uninit_udc_hw(udc);
2386 return rc;
2387
2388report_request_failure:
2389 dev_err(dev, "error requesting IRQ #%d\n", irq);
2390 goto out_uninit;
2391}
2392
2393/**
2394 * bcm63xx_udc_remove - Remove the device from the system.
2395 * @pdev: Platform device struct from the bcm63xx BSP code.
2396 */
2397static int bcm63xx_udc_remove(struct platform_device *pdev)
2398{
2399 struct bcm63xx_udc *udc = platform_get_drvdata(pdev);
2400
2401 bcm63xx_udc_cleanup_debugfs(udc);
2402 usb_del_gadget_udc(&udc->gadget);
2403 BUG_ON(udc->driver);
2404
2405 bcm63xx_uninit_udc_hw(udc);
2406
2407 return 0;
2408}
2409
2410static struct platform_driver bcm63xx_udc_driver = {
2411 .probe = bcm63xx_udc_probe,
2412 .remove = bcm63xx_udc_remove,
2413 .driver = {
2414 .name = DRV_MODULE_NAME,
2415 },
2416};
2417module_platform_driver(bcm63xx_udc_driver);
2418
2419MODULE_DESCRIPTION("BCM63xx USB Peripheral Controller");
2420MODULE_AUTHOR("Kevin Cernekee <cernekee@gmail.com>");
2421MODULE_LICENSE("GPL");
2422MODULE_ALIAS("platform:" DRV_MODULE_NAME);